From 0607ced61e7e44a4f166b7f6b7271909e78c26c8 Mon Sep 17 00:00:00 2001 From: tyler Date: Fri, 12 Sep 2025 16:22:12 -0700 Subject: [PATCH] fixes --- changelog.md | 18 +- flares.py | 465 ++++++++++++++++++++++++++++----------------------- main.py | 13 +- 3 files changed, 284 insertions(+), 212 deletions(-) diff --git a/changelog.md b/changelog.md index 8a7220e..02398dc 100644 --- a/changelog.md +++ b/changelog.md @@ -1,6 +1,22 @@ +# Version 1.1.1 + +- Fixed the number of rectangles in the progress bar to 19 +- Fixed a crash when attempting to load a brain image on Windows +- Removed hardcoded event annotations. Fixes [Issue 16](https://git.research.dezeeuw.ca/tyler/flares/issues/16) + + # Version 1.1.0 -- Changelog details coming soon +- Revamped the Analysis window +- 4 Options of Participant, Participant Brain, Inter-Group, and Cross Group Brain are available. +- Customization is present to query different participants, images, events, brains, etc. +- Removed preprocessing options and reorganized their order to correlate with the actual order. +- Most preprocessing options removed will be coming back soon +- Added a group option when clicking on a participant's file +- If no group is specified, the participant will be added to the "Default" group +- Added option to update the optode positions in a snirf file from the Options menu (F6) +- Fixed [Issue 3](https://git.research.dezeeuw.ca/tyler/flares/issues/3), [Issue 4](https://git.research.dezeeuw.ca/tyler/flares/issues/4), [Issue 17](https://git.research.dezeeuw.ca/tyler/flares/issues/17), [Issue 21](https://git.research.dezeeuw.ca/tyler/flares/issues/21), [Issue 22](https://git.research.dezeeuw.ca/tyler/flares/issues/22) + # Version 1.0.1 diff --git a/flares.py b/flares.py index 5eedd78..24793ec 100644 --- a/flares.py +++ b/flares.py @@ -48,6 +48,11 @@ from statsmodels.stats.multitest import multipletests from scipy import stats from scipy.spatial.distance import cdist +# Backen visualization needed to be defined for pyinstaller +import pyvistaqt # type: ignore +import vtkmodules.util.data_model +import vtkmodules.util.execution_model + # External library imports for mne from mne import ( EvokedArray, SourceEstimate, Info, Epochs, Label, @@ -125,6 +130,8 @@ TDDR: bool ENHANCE_NEGATIVE_CORRELATION: bool +SHORT_CHANNEL: bool + VERBOSITY = True # FIXME: Shouldn't need each ordering - just order it before checking @@ -171,6 +178,7 @@ REQUIRED_KEYS: dict[str, Any] = { "PSP_TIME_WINDOW": int, "PSP_THRESHOLD": float, + "SHORT_CHANNEL": bool, # "REJECT_PAIRS": bool, # "FORCE_DROP_ANNOTATIONS": list, # "FILTER_LOW_PASS": float, @@ -1120,16 +1128,20 @@ def epochs_calculations(raw_haemo, events, event_dict): fig.legend(lines, conditions, loc="lower right") fig_epochs.append(("evoked_topo", help)) # Store with a unique name - # Evoked response for specific condition ("Reach") - evoked_stim1 = epochs['Reach'].average() + unique_annotations = set(raw_haemo.annotations.description) - fig_evoked_hbo = evoked_stim1.copy().pick(picks='hbo').plot(time_unit='s', show=False) - fig_evoked_hbr = evoked_stim1.copy().pick(picks='hbr').plot(time_unit='s', show=False) - - fig_epochs.append(("fig_evoked_hbo", fig_evoked_hbo)) # Store with a unique name - fig_epochs.append(("fig_evoked_hbr", fig_evoked_hbr)) # Store with a unique name + for cond in unique_annotations: - print("Evoked HbO peak amplitude:", evoked_stim1.copy().pick(picks='hbo').data.max()) + # Evoked response for specific condition ("Activity") + evoked_stim1 = epochs[cond].average() + + fig_evoked_hbo = evoked_stim1.copy().pick(picks='hbo').plot(time_unit='s', show=False) + fig_evoked_hbr = evoked_stim1.copy().pick(picks='hbr').plot(time_unit='s', show=False) + + fig_epochs.append((f"fig_evoked_hbo_{cond}", fig_evoked_hbo)) # Store with a unique name + fig_epochs.append((f"fig_evoked_hbr_{cond}", fig_evoked_hbr)) # Store with a unique name + + print("Evoked HbO peak amplitude:", evoked_stim1.copy().pick(picks='hbo').data.max()) evokeds = {} for condition in epochs2.event_id: @@ -1200,26 +1212,36 @@ def epochs_calculations(raw_haemo, events, event_dict): - - def make_design_matrix(raw_haemo, short_chans): raw_haemo.resample(1, npad="auto") - short_chans.resample(1) raw_haemo._data = raw_haemo._data * 1e6 # 2) Create design matrix - design_matrix = make_first_level_design_matrix( - raw=raw_haemo, - hrf_model='fir', - stim_dur=0.5, - fir_delays=range(15), - drift_model='cosine', - high_pass=0.01, - oversampling=1, - min_onset=-125, - add_regs=short_chans.get_data().T, - add_reg_names=short_chans.ch_names - ) + if SHORT_CHANNEL: + short_chans.resample(1) + design_matrix = make_first_level_design_matrix( + raw=raw_haemo, + hrf_model='fir', + stim_dur=0.5, + fir_delays=range(15), + drift_model='cosine', + high_pass=0.01, + oversampling=1, + min_onset=-125, + add_regs=short_chans.get_data().T, + add_reg_names=short_chans.ch_names + ) + else: + design_matrix = make_first_level_design_matrix( + raw=raw_haemo, + hrf_model='fir', + stim_dur=0.5, + fir_delays=range(15), + drift_model='cosine', + high_pass=0.01, + oversampling=1, + min_onset=-125, + ) print(design_matrix.head()) print(design_matrix.columns) @@ -1232,10 +1254,6 @@ def make_design_matrix(raw_haemo, short_chans): - - - - def generate_montage_locations(): """Get standard MNI montage locations in dataframe. @@ -1600,153 +1618,158 @@ def fold_channels(raw: BaseRaw) -> None: def individual_significance(raw_haemo, glm_est): + fig_individual_significances = [] # List to store figures + # TODO: BAD! cha = glm_est.to_dataframe() - ch_summary = cha.query("Condition.str.startswith('Reach_delay_') and Chroma == 'hbo'", engine='python') + unique_annotations = set(raw_haemo.annotations.description) - print(ch_summary.head()) + for cond in unique_annotations: - channel_averages = ch_summary.groupby('ch_name')['theta'].mean().reset_index() - print(channel_averages.head()) + ch_summary = cha.query(f"Condition.str.startswith('{cond}_delay_') and Chroma == 'hbo'", engine='python') + + print(ch_summary.head()) + + channel_averages = ch_summary.groupby('ch_name')['theta'].mean().reset_index() + print(channel_averages.head()) - reach_ch_summary = ch_summary.query( - "Chroma == 'hbo' and Condition.str.startswith('Reach_delay_')", engine='python' - ) + activity_ch_summary = ch_summary.query( + f"Chroma == 'hbo' and Condition.str.startswith('{cond}_delay_')", engine='python' + ) - # Function to correct p-values per channel - def fdr_correct_per_channel(df): - df = df.copy() - df['pval_fdr'] = multipletests(df['p_value'], method='fdr_bh')[1] - return df + # Function to correct p-values per channel + def fdr_correct_per_channel(df): + df = df.copy() + df['pval_fdr'] = multipletests(df['p_value'], method='fdr_bh')[1] + return df - # Apply FDR correction grouped by channel - corrected = reach_ch_summary.groupby("ch_name", group_keys=False).apply(fdr_correct_per_channel) + # Apply FDR correction grouped by channel + corrected = activity_ch_summary.groupby("ch_name", group_keys=False).apply(fdr_correct_per_channel) - # Determine which channels are significant across any delay - sig_channels = ( - corrected.groupby('ch_name') - .apply(lambda df: (df['pval_fdr'] < 0.05).any()) - .reset_index(name='significant') - ) + # Determine which channels are significant across any delay + sig_channels = ( + corrected.groupby('ch_name') + .apply(lambda df: (df['pval_fdr'] < 0.05).any()) + .reset_index(name='significant') + ) - # Merge with mean theta (optional for plotting) - mean_theta = reach_ch_summary.groupby('ch_name')['theta'].mean().reset_index() - sig_channels = sig_channels.merge(mean_theta, on='ch_name') - print(sig_channels) + # Merge with mean theta (optional for plotting) + mean_theta = activity_ch_summary.groupby('ch_name')['theta'].mean().reset_index() + sig_channels = sig_channels.merge(mean_theta, on='ch_name') + print(sig_channels) - # For example, take the minimum corrected p-value per channel - summary_pvals = corrected.groupby('ch_name')['pval_fdr'].min().reset_index() - print(summary_pvals) + # For example, take the minimum corrected p-value per channel + summary_pvals = corrected.groupby('ch_name')['pval_fdr'].min().reset_index() + print(summary_pvals) - def parse_ch_name(ch_name): - # Extract numbers after S and D in names like 'S10_D5 hbo' - match = re.match(r'S(\d+)_D(\d+)', ch_name) - if match: - return int(match.group(1)), int(match.group(2)) - else: - return None, None + def parse_ch_name(ch_name): + # Extract numbers after S and D in names like 'S10_D5 hbo' + match = re.match(r'S(\d+)_D(\d+)', ch_name) + if match: + return int(match.group(1)), int(match.group(2)) + else: + return None, None - min_pvals = corrected.groupby('ch_name')['pval_fdr'].min().reset_index() + min_pvals = corrected.groupby('ch_name')['pval_fdr'].min().reset_index() - # Merge the real p-values into sig_channels / avg_df - avg_df = sig_channels.merge(min_pvals, on='ch_name') + # Merge the real p-values into sig_channels / avg_df + avg_df = sig_channels.merge(min_pvals, on='ch_name') - # Rename columns for consistency - avg_df = avg_df.rename(columns={'theta': 't_or_theta', 'pval_fdr': 'p_value'}) + # Rename columns for consistency + avg_df = avg_df.rename(columns={'theta': 't_or_theta', 'pval_fdr': 'p_value'}) - # Add Source and Detector columns again - avg_df['Source'], avg_df['Detector'] = zip(*avg_df['ch_name'].map(parse_ch_name)) + # Add Source and Detector columns again + avg_df['Source'], avg_df['Detector'] = zip(*avg_df['ch_name'].map(parse_ch_name)) - # Keep relevant columns - avg_df = avg_df[['Source', 'Detector', 't_or_theta', 'p_value']].dropna() + # Keep relevant columns + avg_df = avg_df[['Source', 'Detector', 't_or_theta', 'p_value']].dropna() - ABS_SIGNIFICANCE_THETA_VALUE = 1 - ABS_SIGNIFICANCE_T_VALUE = 1 - P_THRESHOLD = 0.05 - SOURCE_DETECTOR_SEPARATOR = "_" - Reach = "Reach" + ABS_SIGNIFICANCE_THETA_VALUE = 1 + ABS_SIGNIFICANCE_T_VALUE = 1 + P_THRESHOLD = 0.05 + SOURCE_DETECTOR_SEPARATOR = "_" + t_or_theta = 'theta' + for _, row in avg_df.iterrows(): # type: ignore + print(f"Source {row['Source']} <-> Detector {row['Detector']}: " + f"Avg {t_or_theta}-value = {row['t_or_theta']:.3f}, Avg p-value = {row['p_value']:.3f}") - t_or_theta = 'theta' - for _, row in avg_df.iterrows(): # type: ignore - print(f"Source {row['Source']} <-> Detector {row['Detector']}: " - f"Avg {t_or_theta}-value = {row['t_or_theta']:.3f}, Avg p-value = {row['p_value']:.3f}") + # Extract the cource and detector positions from raw + src_pos: dict[int, tuple[float, float]] = {} + det_pos: dict[int, tuple[float, float]] = {} + for ch in getattr(raw_haemo, "info")["chs"]: + ch_name = ch['ch_name'] + if not ch_name or not ch['loc'].any(): + continue + parts = ch_name.split()[0] + src_str, det_str = parts.split(SOURCE_DETECTOR_SEPARATOR) + src_num = int(src_str[1:]) + det_num = int(det_str[1:]) + src_pos[src_num] = ch['loc'][3:5] + det_pos[det_num] = ch['loc'][6:8] - # Extract the cource and detector positions from raw - src_pos: dict[int, tuple[float, float]] = {} - det_pos: dict[int, tuple[float, float]] = {} - for ch in getattr(raw_haemo, "info")["chs"]: - ch_name = ch['ch_name'] - if not ch_name or not ch['loc'].any(): - continue - parts = ch_name.split()[0] - src_str, det_str = parts.split(SOURCE_DETECTOR_SEPARATOR) - src_num = int(src_str[1:]) - det_num = int(det_str[1:]) - src_pos[src_num] = ch['loc'][3:5] - det_pos[det_num] = ch['loc'][6:8] + # Set up the plot + fig, ax = plt.subplots(figsize=(8, 6)) # type: ignore - # Set up the plot - fig, ax = plt.subplots(figsize=(8, 6)) # type: ignore + # Plot the sources + for pos in src_pos.values(): + ax.scatter(pos[0], pos[1], s=120, c='k', marker='o', edgecolors='white', linewidths=1, zorder=3) # type: ignore - # Plot the sources - for pos in src_pos.values(): - ax.scatter(pos[0], pos[1], s=120, c='k', marker='o', edgecolors='white', linewidths=1, zorder=3) # type: ignore + # Plot the detectors + for pos in det_pos.values(): + ax.scatter(pos[0], pos[1], s=120, c='k', marker='s', edgecolors='white', linewidths=1, zorder=3) # type: ignore - # Plot the detectors - for pos in det_pos.values(): - ax.scatter(pos[0], pos[1], s=120, c='k', marker='s', edgecolors='white', linewidths=1, zorder=3) # type: ignore + # Ensure that the colors stay within the boundaries even if they are over or under the max/min values + if t_or_theta == 't': + norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_T_VALUE, vmax=ABS_SIGNIFICANCE_T_VALUE) + elif t_or_theta == 'theta': + norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_THETA_VALUE, vmax=ABS_SIGNIFICANCE_THETA_VALUE) - # Ensure that the colors stay within the boundaries even if they are over or under the max/min values - if t_or_theta == 't': - norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_T_VALUE, vmax=ABS_SIGNIFICANCE_T_VALUE) - elif t_or_theta == 'theta': - norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_THETA_VALUE, vmax=ABS_SIGNIFICANCE_THETA_VALUE) + cmap: mcolors.Colormap = plt.get_cmap('seismic') - cmap: mcolors.Colormap = plt.get_cmap('seismic') + # Plot connections with avg t-values + for row in avg_df.itertuples(): + src: int = cast(int, row.Source) # type: ignore + det: int = cast(int, row.Detector) # type: ignore + tval: float = cast(float, row.t_or_theta) # type: ignore + pval: float = cast(float, row.p_value) # type: ignore + - # Plot connections with avg t-values - for row in avg_df.itertuples(): - src: int = cast(int, row.Source) # type: ignore - det: int = cast(int, row.Detector) # type: ignore - tval: float = cast(float, row.t_or_theta) # type: ignore - pval: float = cast(float, row.p_value) # type: ignore + if src in src_pos and det in det_pos: + x = [src_pos[src][0], det_pos[det][0]] + y = [src_pos[src][1], det_pos[det][1]] + style = '-' if pval <= P_THRESHOLD else '--' + ax.plot(x, y, linestyle=style, color=cmap(norm(tval)), linewidth=4, alpha=0.9, zorder=2) # type: ignore + + # Format the Colorbar + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + sm.set_array([]) + cbar = plt.colorbar(sm, ax=ax, shrink=0.85) # type: ignore + cbar.set_label(f'Average {cond} {t_or_theta} value (hbo)', fontsize=11) # type: ignore + + # Formatting the subplots + ax.set_aspect('equal') + ax.set_title(f"Average {t_or_theta} values for {cond} (HbO)", fontsize=14) # type: ignore + ax.set_xlabel('X position (m)', fontsize=11) # type: ignore + ax.set_ylabel('Y position (m)', fontsize=11) # type: ignore + ax.grid(True, alpha=0.3) # type: ignore + + # Set axis limits to be 1cm more than the optode positions + all_x = [pos[0] for pos in src_pos.values()] + [pos[0] for pos in det_pos.values()] + all_y = [pos[1] for pos in src_pos.values()] + [pos[1] for pos in det_pos.values()] + ax.set_xlim(min(all_x)-0.01, max(all_x)+0.01) + ax.set_ylim(min(all_y)-0.01, max(all_y)+0.01) + + fig.tight_layout() + fig_individual_significances.append((f"Condition {cond}", fig)) - if src in src_pos and det in det_pos: - x = [src_pos[src][0], det_pos[det][0]] - y = [src_pos[src][1], det_pos[det][1]] - style = '-' if pval <= P_THRESHOLD else '--' - ax.plot(x, y, linestyle=style, color=cmap(norm(tval)), linewidth=4, alpha=0.9, zorder=2) # type: ignore - - # Format the Colorbar - sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) - sm.set_array([]) - cbar = plt.colorbar(sm, ax=ax, shrink=0.85) # type: ignore - cbar.set_label(f'Average {Reach} {t_or_theta} value (hbo)', fontsize=11) # type: ignore - - # Formatting the subplots - ax.set_aspect('equal') - ax.set_title(f"Average {t_or_theta} values for {Reach} (HbO)", fontsize=14) # type: ignore - ax.set_xlabel('X position (m)', fontsize=11) # type: ignore - ax.set_ylabel('Y position (m)', fontsize=11) # type: ignore - ax.grid(True, alpha=0.3) # type: ignore - - # Set axis limits to be 1cm more than the optode positions - all_x = [pos[0] for pos in src_pos.values()] + [pos[0] for pos in det_pos.values()] - all_y = [pos[1] for pos in src_pos.values()] + [pos[1] for pos in det_pos.values()] - ax.set_xlim(min(all_x)-0.01, max(all_x)+0.01) - ax.set_ylim(min(all_y)-0.01, max(all_y)+0.01) - - fig.tight_layout() - - - return fig + return fig_individual_significances # TODO: Hardcoded def group_significance( @@ -1761,7 +1784,7 @@ def group_significance( Args: raw_haemo: Raw haemoglobin MNE object (used for optode positions) all_cha: DataFrame with columns including 'ID', 'Condition', 'p_value', 'theta', 'df', 'ch_name', 'Chroma' - condition: condition prefix, e.g., 'Reach' + condition: condition prefix, e.g., 'Activity' correction: p-value correction method ('fdr_bh' or 'bonferroni') Returns: @@ -1919,7 +1942,12 @@ def group_significance( def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix): + fig_glms = [] # List to store figures + dm = design_matrix.copy() + logger.info(design_matrix.shape) + logger.info(design_matrix.columns) + logger.info(design_matrix.head()) rois = dict(AllChannels=range(len(raw_haemo.ch_names))) conditions = design_matrix.columns @@ -1928,72 +1956,83 @@ def plot_glm_results(file_path, raw_haemo, glm_est, design_matrix): df_individual["ID"] = file_path # df_individual["theta"] = [t * 1.0e6 for t in df_individual["theta"]] - condition_of_interest="Reach" + first_onset_for_cond = {} + for onset, desc in zip(raw_haemo.annotations.onset, raw_haemo.annotations.description): + if desc not in first_onset_for_cond: + first_onset_for_cond[desc] = onset - # Filter for the condition of interest and FIR delays - df_individual["isCondition"] = [condition_of_interest in n for n in df_individual["Condition"]] - df_individual["isDelay"] = ["delay" in n for n in df_individual["Condition"]] - df_individual = df_individual.query("isDelay and isCondition") - - # Remove other conditions from design matrix - dm_condition_cols = [col for col in dm.columns if condition_of_interest in col] - dm_cond = dm[dm_condition_cols] + # Get unique condition names from annotations (descriptions) + unique_annotations = set(raw_haemo.annotations.description) + + for cond in unique_annotations: + logger.info(cond) + df_individual_filtered = df_individual.copy() + + # Filter for the condition of interest and FIR delays + df_individual_filtered["isCondition"] = [cond in n for n in df_individual_filtered["Condition"]] + df_individual_filtered["isDelay"] = ["delay" in n for n in df_individual_filtered["Condition"]] + df_individual_filtered = df_individual_filtered.query("isDelay and isCondition") - # Add a numeric delay column - def extract_delay_number(condition_str): - # Extracts the number at the end of a string like 'Reach_delay_5' - return int(condition_str.split("_")[-1]) + # Remove other conditions from design matrix + dm_condition_cols = [col for col in dm.columns if cond in col] + dm_cond = dm[dm_condition_cols] + + # Add a numeric delay column + def extract_delay_number(condition_str): + # Extracts the number at the end of a string like 'Activity_delay_5' + return int(condition_str.split("_")[-1]) - df_individual["DelayNum"] = df_individual["Condition"].apply(extract_delay_number) + df_individual_filtered["DelayNum"] = df_individual_filtered["Condition"].apply(extract_delay_number) - # Now separate and sort using numeric delay - df_hbo = df_individual[df_individual["Chroma"] == "hbo"].sort_values("DelayNum") - df_hbr = df_individual[df_individual["Chroma"] == "hbr"].sort_values("DelayNum") + # Now separate and sort using numeric delay + df_hbo = df_individual_filtered[df_individual_filtered["Chroma"] == "hbo"].sort_values("DelayNum") + df_hbr = df_individual_filtered[df_individual_filtered["Chroma"] == "hbr"].sort_values("DelayNum") - vals_hbo = df_hbo["theta"].values - vals_hbr = df_hbr["theta"].values - - # Create the plot - fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(19, 10)) - - # Scale design matrix components using numpy arrays instead of pandas operations - dm_cond_values = dm_cond.values - dm_cond_scaled_hbo = dm_cond_values * vals_hbo.reshape(1, -1) - dm_cond_scaled_hbr = dm_cond_values * vals_hbr.reshape(1, -1) - - # Create time axis relative to stimulus onset - time = dm_cond.index - np.ceil(raw_haemo.annotations.onset[1]) - - # Plot - axes[0].plot(time, dm_cond_values) - axes[1].plot(time, dm_cond_scaled_hbo) - axes[2].plot(time, np.sum(dm_cond_scaled_hbo, axis=1), 'r') - axes[2].plot(time, np.sum(dm_cond_scaled_hbr, axis=1), 'b') - - # Format plots - for ax in range(3): - axes[ax].set_xlim(-5, 25) - axes[ax].set_xlabel("Time (s)") - axes[0].set_ylim(-0.2, 1.2) - axes[1].set_ylim(-0.5, 1) - axes[2].set_ylim(-0.5, 1) - axes[0].set_title(f"FIR Model (Unscaled)") - axes[1].set_title(f"FIR Components (Scaled by {condition_of_interest} GLM Estimates)") - axes[2].set_title(f"Evoked Response ({condition_of_interest})") - axes[0].set_ylabel("FIR Model") - axes[1].set_ylabel("Oxyhaemoglobin (ΔμMol)") - axes[2].set_ylabel("Haemoglobin (ΔμMol)") - axes[2].legend(["Oxyhaemoglobin", "Deoxyhaemoglobin"]) + vals_hbo = df_hbo["theta"].values + vals_hbr = df_hbr["theta"].values + + # Create the plot + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(19, 10)) + + # Scale design matrix components using numpy arrays instead of pandas operations + dm_cond_values = dm_cond.values + dm_cond_scaled_hbo = dm_cond_values * vals_hbo.reshape(1, -1) + dm_cond_scaled_hbr = dm_cond_values * vals_hbr.reshape(1, -1) + + # Create time axis relative to stimulus onset + time = dm_cond.index - np.ceil(first_onset_for_cond.get(cond, 0)) + + # Plot + axes[0].plot(time, dm_cond_values) + axes[1].plot(time, dm_cond_scaled_hbo) + axes[2].plot(time, np.sum(dm_cond_scaled_hbo, axis=1), 'r') + axes[2].plot(time, np.sum(dm_cond_scaled_hbr, axis=1), 'b') + + # Format plots + for ax in range(3): + axes[ax].set_xlim(-5, 25) + axes[ax].set_xlabel("Time (s)") + axes[0].set_ylim(-0.2, 1.2) + axes[1].set_ylim(-0.5, 1) + axes[2].set_ylim(-0.5, 1) + axes[0].set_title(f"FIR Model (Unscaled)") + axes[1].set_title(f"FIR Components (Scaled by {cond} GLM Estimates)") + axes[2].set_title(f"Evoked Response ({cond})") + axes[0].set_ylabel("FIR Model") + axes[1].set_ylabel("Oxyhaemoglobin (ΔμMol)") + axes[2].set_ylabel("Haemoglobin (ΔμMol)") + axes[2].legend(["Oxyhaemoglobin", "Deoxyhaemoglobin"]) - - print(f"Number of FIR bins: {len(vals_hbo)}") - print(f"Mean theta (HbO): {np.mean(vals_hbo):.4f}") - print(f"Sum of theta (HbO): {np.sum(vals_hbo):.4f}") - print(f"Mean theta (HbR): {np.mean(vals_hbr):.4f}") - print(f"Sum of theta (HbR): {np.sum(vals_hbr):.4f}") - - return fig + + print(f"Number of FIR bins: {len(vals_hbo)}") + print(f"Mean theta (HbO): {np.mean(vals_hbo):.4f}") + print(f"Sum of theta (HbO): {np.sum(vals_hbo):.4f}") + print(f"Mean theta (HbR): {np.mean(vals_hbr):.4f}") + print(f"Sum of theta (HbR): {np.sum(vals_hbr):.4f}") + + fig_glms.append((f"Condition {cond}", fig)) + return fig_glms def plot_3d_evoked_array( @@ -2871,9 +2910,12 @@ def process_participant(file_path, progress_callback=None): logger.info("11") # Step 11: Get short / long channels - short_chans = get_short_channels(raw_haemo, max_dist=0.015) - fig_short_chans = short_chans.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Short Channels Only", show=False) - fig_individual["short"] = fig_short_chans + if SHORT_CHANNEL: + short_chans = get_short_channels(raw_haemo, max_dist=0.015) + fig_short_chans = short_chans.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Short Channels Only", show=False) + fig_individual["short"] = fig_short_chans + else: + short_chans = None raw_haemo = get_long_channels(raw_haemo) if progress_callback: progress_callback(12) logger.info("12") @@ -2916,13 +2958,15 @@ def process_participant(file_path, progress_callback=None): # Step 16: Plot GLM results fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix) - fig_individual["GLM"] = fig_glm_result + for name, fig in fig_glm_result: + fig_individual[f"GLM {name}"] = fig if progress_callback: progress_callback(17) logger.info("17") # Step 17: Plot channel significance fig_significance = individual_significance(raw_haemo, glm_est) - fig_individual["Significance"] = fig_significance + for name, fig in fig_significance: + fig_individual[f"Significance {name}"] = fig if progress_callback: progress_callback(18) logger.info("18") @@ -2975,6 +3019,9 @@ def process_participant(file_path, progress_callback=None): contrast_dict[condition] = contrast_vector + if progress_callback: progress_callback(19) + logger.info("19") + # Compute contrast results contrast_results = {} @@ -2988,7 +3035,7 @@ def process_participant(file_path, progress_callback=None): fig_bytes = convert_fig_dict_to_png_bytes(fig_individual) + if progress_callback: progress_callback(20) + logger.info("20") - return raw_haemo, epochs, fig_bytes, cha, contrast_results, df_ind, design_matrix, AGE, GENDER, GROUP, True - -# Not 3000 lines yay! \ No newline at end of file + return raw_haemo, epochs, fig_bytes, cha, contrast_results, df_ind, design_matrix, AGE, GENDER, GROUP, True \ No newline at end of file diff --git a/main.py b/main.py index c4a6750..b100fe9 100644 --- a/main.py +++ b/main.py @@ -120,6 +120,12 @@ SECTIONS = [ #{"name": "FILTER", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."}, ] }, + { + "title": "Short Channels", + "params": [ + {"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "Does the data have a short channel?"}, + ] + }, { "title": "Extracting Events", "params": [ @@ -242,6 +248,9 @@ class UpdateCheckThread(QThread): error_occurred = Signal(str) def run(self): + if not getattr(sys, 'frozen', False): + self.error_occurred.emit("Application is not frozen (Development mode).") + return try: latest_version, download_url = self.get_latest_release_for_platform() if not latest_version: @@ -646,9 +655,9 @@ class ProgressBubble(QWidget): self.progress_layout = QHBoxLayout() self.rects = [] - for _ in range(12): + for _ in range(19): rect = QFrame() - rect.setFixedSize(10, 20) + rect.setFixedSize(10, 18) rect.setStyleSheet("background-color: white; border: 1px solid gray;") self.progress_layout.addWidget(rect) self.rects.append(rect)