This commit is contained in:
2025-09-12 16:22:12 -07:00
parent b1e5405f7b
commit 0607ced61e
3 changed files with 284 additions and 212 deletions

View File

@@ -1,6 +1,22 @@
# Version 1.1.1
- Fixed the number of rectangles in the progress bar to 19
- Fixed a crash when attempting to load a brain image on Windows
- Removed hardcoded event annotations. Fixes [Issue 16](https://git.research.dezeeuw.ca/tyler/flares/issues/16)
# Version 1.1.0
- Changelog details coming soon
- Revamped the Analysis window
- 4 Options of Participant, Participant Brain, Inter-Group, and Cross Group Brain are available.
- Customization is present to query different participants, images, events, brains, etc.
- Removed preprocessing options and reorganized their order to correlate with the actual order.
- Most preprocessing options removed will be coming back soon
- Added a group option when clicking on a participant's file
- If no group is specified, the participant will be added to the "Default" group
- Added option to update the optode positions in a snirf file from the Options menu (F6)
- Fixed [Issue 3](https://git.research.dezeeuw.ca/tyler/flares/issues/3), [Issue 4](https://git.research.dezeeuw.ca/tyler/flares/issues/4), [Issue 17](https://git.research.dezeeuw.ca/tyler/flares/issues/17), [Issue 21](https://git.research.dezeeuw.ca/tyler/flares/issues/21), [Issue 22](https://git.research.dezeeuw.ca/tyler/flares/issues/22)
# Version 1.0.1

125
flares.py
View File

@@ -48,6 +48,11 @@ from statsmodels.stats.multitest import multipletests
from scipy import stats
from scipy.spatial.distance import cdist
# Backen visualization needed to be defined for pyinstaller
import pyvistaqt # type: ignore
import vtkmodules.util.data_model
import vtkmodules.util.execution_model
# External library imports for mne
from mne import (
EvokedArray, SourceEstimate, Info, Epochs, Label,
@@ -125,6 +130,8 @@ TDDR: bool
ENHANCE_NEGATIVE_CORRELATION: bool
SHORT_CHANNEL: bool
VERBOSITY = True
# FIXME: Shouldn't need each ordering - just order it before checking
@@ -171,6 +178,7 @@ REQUIRED_KEYS: dict[str, Any] = {
"PSP_TIME_WINDOW": int,
"PSP_THRESHOLD": float,
"SHORT_CHANNEL": bool,
# "REJECT_PAIRS": bool,
# "FORCE_DROP_ANNOTATIONS": list,
# "FILTER_LOW_PASS": float,
@@ -1120,14 +1128,18 @@ def epochs_calculations(raw_haemo, events, event_dict):
fig.legend(lines, conditions, loc="lower right")
fig_epochs.append(("evoked_topo", help)) # Store with a unique name
# Evoked response for specific condition ("Reach")
evoked_stim1 = epochs['Reach'].average()
unique_annotations = set(raw_haemo.annotations.description)
for cond in unique_annotations:
# Evoked response for specific condition ("Activity")
evoked_stim1 = epochs[cond].average()
fig_evoked_hbo = evoked_stim1.copy().pick(picks='hbo').plot(time_unit='s', show=False)
fig_evoked_hbr = evoked_stim1.copy().pick(picks='hbr').plot(time_unit='s', show=False)
fig_epochs.append(("fig_evoked_hbo", fig_evoked_hbo)) # Store with a unique name
fig_epochs.append(("fig_evoked_hbr", fig_evoked_hbr)) # Store with a unique name
fig_epochs.append((f"fig_evoked_hbo_{cond}", fig_evoked_hbo)) # Store with a unique name
fig_epochs.append((f"fig_evoked_hbr_{cond}", fig_evoked_hbr)) # Store with a unique name
print("Evoked HbO peak amplitude:", evoked_stim1.copy().pick(picks='hbo').data.max())
@@ -1200,14 +1212,13 @@ def epochs_calculations(raw_haemo, events, event_dict):
def make_design_matrix(raw_haemo, short_chans):
raw_haemo.resample(1, npad="auto")
short_chans.resample(1)
raw_haemo._data = raw_haemo._data * 1e6
# 2) Create design matrix
if SHORT_CHANNEL:
short_chans.resample(1)
design_matrix = make_first_level_design_matrix(
raw=raw_haemo,
hrf_model='fir',
@@ -1220,6 +1231,17 @@ def make_design_matrix(raw_haemo, short_chans):
add_regs=short_chans.get_data().T,
add_reg_names=short_chans.ch_names
)
else:
design_matrix = make_first_level_design_matrix(
raw=raw_haemo,
hrf_model='fir',
stim_dur=0.5,
fir_delays=range(15),
drift_model='cosine',
high_pass=0.01,
oversampling=1,
min_onset=-125,
)
print(design_matrix.head())
print(design_matrix.columns)
@@ -1232,10 +1254,6 @@ def make_design_matrix(raw_haemo, short_chans):
def generate_montage_locations():
"""Get standard MNI montage locations in dataframe.
@@ -1600,10 +1618,16 @@ def fold_channels(raw: BaseRaw) -> None:
def individual_significance(raw_haemo, glm_est):
fig_individual_significances = [] # List to store figures
# TODO: BAD!
cha = glm_est.to_dataframe()
ch_summary = cha.query("Condition.str.startswith('Reach_delay_') and Chroma == 'hbo'", engine='python')
unique_annotations = set(raw_haemo.annotations.description)
for cond in unique_annotations:
ch_summary = cha.query(f"Condition.str.startswith('{cond}_delay_') and Chroma == 'hbo'", engine='python')
print(ch_summary.head())
@@ -1611,8 +1635,8 @@ def individual_significance(raw_haemo, glm_est):
print(channel_averages.head())
reach_ch_summary = ch_summary.query(
"Chroma == 'hbo' and Condition.str.startswith('Reach_delay_')", engine='python'
activity_ch_summary = ch_summary.query(
f"Chroma == 'hbo' and Condition.str.startswith('{cond}_delay_')", engine='python'
)
# Function to correct p-values per channel
@@ -1622,7 +1646,7 @@ def individual_significance(raw_haemo, glm_est):
return df
# Apply FDR correction grouped by channel
corrected = reach_ch_summary.groupby("ch_name", group_keys=False).apply(fdr_correct_per_channel)
corrected = activity_ch_summary.groupby("ch_name", group_keys=False).apply(fdr_correct_per_channel)
# Determine which channels are significant across any delay
sig_channels = (
@@ -1632,7 +1656,7 @@ def individual_significance(raw_haemo, glm_est):
)
# Merge with mean theta (optional for plotting)
mean_theta = reach_ch_summary.groupby('ch_name')['theta'].mean().reset_index()
mean_theta = activity_ch_summary.groupby('ch_name')['theta'].mean().reset_index()
sig_channels = sig_channels.merge(mean_theta, on='ch_name')
print(sig_channels)
@@ -1669,8 +1693,6 @@ def individual_significance(raw_haemo, glm_est):
ABS_SIGNIFICANCE_T_VALUE = 1
P_THRESHOLD = 0.05
SOURCE_DETECTOR_SEPARATOR = "_"
Reach = "Reach"
t_or_theta = 'theta'
for _, row in avg_df.iterrows(): # type: ignore
@@ -1728,11 +1750,11 @@ def individual_significance(raw_haemo, glm_est):
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = plt.colorbar(sm, ax=ax, shrink=0.85) # type: ignore
cbar.set_label(f'Average {Reach} {t_or_theta} value (hbo)', fontsize=11) # type: ignore
cbar.set_label(f'Average {cond} {t_or_theta} value (hbo)', fontsize=11) # type: ignore
# Formatting the subplots
ax.set_aspect('equal')
ax.set_title(f"Average {t_or_theta} values for {Reach} (HbO)", fontsize=14) # type: ignore
ax.set_title(f"Average {t_or_theta} values for {cond} (HbO)", fontsize=14) # type: ignore
ax.set_xlabel('X position (m)', fontsize=11) # type: ignore
ax.set_ylabel('Y position (m)', fontsize=11) # type: ignore
ax.grid(True, alpha=0.3) # type: ignore
@@ -1745,8 +1767,9 @@ def individual_significance(raw_haemo, glm_est):
fig.tight_layout()
fig_individual_significances.append((f"Condition {cond}", fig))
return fig
return fig_individual_significances
# TODO: Hardcoded
def group_significance(
@@ -1761,7 +1784,7 @@ def group_significance(
Args:
raw_haemo: Raw haemoglobin MNE object (used for optode positions)
all_cha: DataFrame with columns including 'ID', 'Condition', 'p_value', 'theta', 'df', 'ch_name', 'Chroma'
condition: condition prefix, e.g., 'Reach'
condition: condition prefix, e.g., 'Activity'
correction: p-value correction method ('fdr_bh' or 'bonferroni')
Returns:
@@ -1919,7 +1942,12 @@ def group_significance(
def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix):
fig_glms = [] # List to store figures
dm = design_matrix.copy()
logger.info(design_matrix.shape)
logger.info(design_matrix.columns)
logger.info(design_matrix.head())
rois = dict(AllChannels=range(len(raw_haemo.ch_names)))
conditions = design_matrix.columns
@@ -1928,27 +1956,37 @@ def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix):
df_individual["ID"] = file_path
# df_individual["theta"] = [t * 1.0e6 for t in df_individual["theta"]]
condition_of_interest="Reach"
first_onset_for_cond = {}
for onset, desc in zip(raw_haemo.annotations.onset, raw_haemo.annotations.description):
if desc not in first_onset_for_cond:
first_onset_for_cond[desc] = onset
# Get unique condition names from annotations (descriptions)
unique_annotations = set(raw_haemo.annotations.description)
for cond in unique_annotations:
logger.info(cond)
df_individual_filtered = df_individual.copy()
# Filter for the condition of interest and FIR delays
df_individual["isCondition"] = [condition_of_interest in n for n in df_individual["Condition"]]
df_individual["isDelay"] = ["delay" in n for n in df_individual["Condition"]]
df_individual = df_individual.query("isDelay and isCondition")
df_individual_filtered["isCondition"] = [cond in n for n in df_individual_filtered["Condition"]]
df_individual_filtered["isDelay"] = ["delay" in n for n in df_individual_filtered["Condition"]]
df_individual_filtered = df_individual_filtered.query("isDelay and isCondition")
# Remove other conditions from design matrix
dm_condition_cols = [col for col in dm.columns if condition_of_interest in col]
dm_condition_cols = [col for col in dm.columns if cond in col]
dm_cond = dm[dm_condition_cols]
# Add a numeric delay column
def extract_delay_number(condition_str):
# Extracts the number at the end of a string like 'Reach_delay_5'
# Extracts the number at the end of a string like 'Activity_delay_5'
return int(condition_str.split("_")[-1])
df_individual["DelayNum"] = df_individual["Condition"].apply(extract_delay_number)
df_individual_filtered["DelayNum"] = df_individual_filtered["Condition"].apply(extract_delay_number)
# Now separate and sort using numeric delay
df_hbo = df_individual[df_individual["Chroma"] == "hbo"].sort_values("DelayNum")
df_hbr = df_individual[df_individual["Chroma"] == "hbr"].sort_values("DelayNum")
df_hbo = df_individual_filtered[df_individual_filtered["Chroma"] == "hbo"].sort_values("DelayNum")
df_hbr = df_individual_filtered[df_individual_filtered["Chroma"] == "hbr"].sort_values("DelayNum")
vals_hbo = df_hbo["theta"].values
vals_hbr = df_hbr["theta"].values
@@ -1962,7 +2000,7 @@ def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix):
dm_cond_scaled_hbr = dm_cond_values * vals_hbr.reshape(1, -1)
# Create time axis relative to stimulus onset
time = dm_cond.index - np.ceil(raw_haemo.annotations.onset[1])
time = dm_cond.index - np.ceil(first_onset_for_cond.get(cond, 0))
# Plot
axes[0].plot(time, dm_cond_values)
@@ -1978,8 +2016,8 @@ def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix):
axes[1].set_ylim(-0.5, 1)
axes[2].set_ylim(-0.5, 1)
axes[0].set_title(f"FIR Model (Unscaled)")
axes[1].set_title(f"FIR Components (Scaled by {condition_of_interest} GLM Estimates)")
axes[2].set_title(f"Evoked Response ({condition_of_interest})")
axes[1].set_title(f"FIR Components (Scaled by {cond} GLM Estimates)")
axes[2].set_title(f"Evoked Response ({cond})")
axes[0].set_ylabel("FIR Model")
axes[1].set_ylabel("Oxyhaemoglobin (ΔμMol)")
axes[2].set_ylabel("Haemoglobin (ΔμMol)")
@@ -1992,8 +2030,9 @@ def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix):
print(f"Mean theta (HbR): {np.mean(vals_hbr):.4f}")
print(f"Sum of theta (HbR): {np.sum(vals_hbr):.4f}")
return fig
fig_glms.append((f"Condition {cond}", fig))
return fig_glms
def plot_3d_evoked_array(
@@ -2871,9 +2910,12 @@ def process_participant(file_path, progress_callback=None):
logger.info("11")
# Step 11: Get short / long channels
if SHORT_CHANNEL:
short_chans = get_short_channels(raw_haemo, max_dist=0.015)
fig_short_chans = short_chans.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Short Channels Only", show=False)
fig_individual["short"] = fig_short_chans
else:
short_chans = None
raw_haemo = get_long_channels(raw_haemo)
if progress_callback: progress_callback(12)
logger.info("12")
@@ -2916,13 +2958,15 @@ def process_participant(file_path, progress_callback=None):
# Step 16: Plot GLM results
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
fig_individual["GLM"] = fig_glm_result
for name, fig in fig_glm_result:
fig_individual[f"GLM {name}"] = fig
if progress_callback: progress_callback(17)
logger.info("17")
# Step 17: Plot channel significance
fig_significance = individual_significance(raw_haemo, glm_est)
fig_individual["Significance"] = fig_significance
for name, fig in fig_significance:
fig_individual[f"Significance {name}"] = fig
if progress_callback: progress_callback(18)
logger.info("18")
@@ -2975,6 +3019,9 @@ def process_participant(file_path, progress_callback=None):
contrast_dict[condition] = contrast_vector
if progress_callback: progress_callback(19)
logger.info("19")
# Compute contrast results
contrast_results = {}
@@ -2988,7 +3035,7 @@ def process_participant(file_path, progress_callback=None):
fig_bytes = convert_fig_dict_to_png_bytes(fig_individual)
if progress_callback: progress_callback(20)
logger.info("20")
return raw_haemo, epochs, fig_bytes, cha, contrast_results, df_ind, design_matrix, AGE, GENDER, GROUP, True
# Not 3000 lines yay!

13
main.py
View File

@@ -120,6 +120,12 @@ SECTIONS = [
#{"name": "FILTER", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
]
},
{
"title": "Short Channels",
"params": [
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "Does the data have a short channel?"},
]
},
{
"title": "Extracting Events",
"params": [
@@ -242,6 +248,9 @@ class UpdateCheckThread(QThread):
error_occurred = Signal(str)
def run(self):
if not getattr(sys, 'frozen', False):
self.error_occurred.emit("Application is not frozen (Development mode).")
return
try:
latest_version, download_url = self.get_latest_release_for_platform()
if not latest_version:
@@ -646,9 +655,9 @@ class ProgressBubble(QWidget):
self.progress_layout = QHBoxLayout()
self.rects = []
for _ in range(12):
for _ in range(19):
rect = QFrame()
rect.setFixedSize(10, 20)
rect.setFixedSize(10, 18)
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
self.progress_layout.addWidget(rect)
self.rects.append(rect)