final changes for new release

This commit is contained in:
2026-03-28 14:30:20 -07:00
parent 70c4c1e779
commit 74ce2eeb2e
3 changed files with 158 additions and 67 deletions
+7 -1
View File
@@ -5,13 +5,19 @@
- AGE, GENDER, GROUP, loaded files, and all the parameters on the right side of the screen can now be saved before any data has been processed
- If processing has not been completed, the process button will be visible. If processing has completed, the analysis button will be visible
- If the values fail to load, they will fallback to the previous logic of retreiving these values after processing has occured. Fixes [Issue 66](https://git.research.dezeeuw.ca/tyler/flares/issues/66)
- Added new parameters to the right side of the screen: MAX_SHIFT, T_MIN, T_MAX. Fixes [Issue 69](https://git.research.dezeeuw.ca/tyler/flares/issues/69)
- Added new parameters to the right side of the screen: MAX_SHIFT, T_MIN, T_MAX, MAX_BAD_CHANNELS. Fixes [Issue 69](https://git.research.dezeeuw.ca/tyler/flares/issues/69)
- Participants that are bad on channels will no longer continue to the GLM stage. Fixes [Issue 18](https://git.research.dezeeuw.ca/tyler/flares/issues/18)
- Added new sections to the right side of the screen to mark bad channels: Cross Validation, Median Absolute Deviation, PSD Noise, & Channel Variance
- These sections come with the new parameters CV, CV_THRESHOLD, MAD, MAD_THRESHOLD, PSD_NOISE, TARGET_FREQ_DIV, DB_LIMIT, CHANNEL_VAR, & CHANNEL_THRESH
- Changed number of rectangles in the progress bar to 28 to account for the new options and updated the User Guide with the new stages
- Added feedback when clicking an analysis option that opens up a new window. Fixes [Issue 20](https://git.research.dezeeuw.ca/tyler/flares/issues/20)
- Fixed an issue where projects can not be saved to a different drive letter on windows. Fixes [Issue 71](https://git.research.dezeeuw.ca/tyler/flares/issues/71)
- Fixed an issue where the fOLD files were not included in the Windows version. Fixes [Issue 60](https://git.research.dezeeuw.ca/tyler/flares/issues/60)
- Fixed an issue where the MacOS version would fail to perform some analysis options. Fixes [Issue 63](https://git.research.dezeeuw.ca/tyler/flares/issues/63)
- Fixed an issue where processing too many participants would cause the analysis button to not appear. Fixes [Issue 61](https://git.research.dezeeuw.ca/tyler/flares/issues/61)
- Fixed an issue where the error message when a participant fails would not appear. Fixes [Issue 68](https://git.research.dezeeuw.ca/tyler/flares/issues/68)
- Fixed an issue where changes would not be saved if a project was originally loaded from a save. Fixes [Issue 44](https://git.research.dezeeuw.ca/tyler/flares/issues/44)
- Fixed an issue where the significance image would be empty in the Inter-Group Viewer if only one participant was selected. Fixes [Issue 32](https://git.research.dezeeuw.ca/tyler/flares/issues/32)
- Fixed an issue where pressing the 'Clear' button after loading a save would cause the application to crash. Fixes [Issue 67](https://git.research.dezeeuw.ca/tyler/flares/issues/67)
- Fixed an issue where group dropdowns in the Cross-Group viewer would not be updated correctly based on the other groups selected value. Fixes [Issue 49](https://git.research.dezeeuw.ca/tyler/flares/issues/49)
- Fixed an issue where scrollbars were still present after clearing all data. Fixes [Issue 70](https://git.research.dezeeuw.ca/tyler/flares/issues/70)
+95 -44
View File
@@ -149,9 +149,23 @@ PSP: bool
PSP_TIME_WINDOW: int
PSP_THRESHOLD: float
CV: bool
CV_THRESHOLD: int
MAD: bool
MAD_THRESHOLD: int
PSD_NOISE: bool
TARGET_FREQ_DIV: int
DB_LIMIT: int
CHANNEL_VAR: bool
CHANNEL_THRESH: float
BAD_CHANNELS_HANDLING: str
MAX_DIST: float
MIN_NEIGHBORS: int
MAX_BAD_CHANNELS: int
TDDR: bool
@@ -2974,6 +2988,19 @@ def run_second_level_analysis(df_contrasts, raw, p, bounds):
'mean_beta': mean_beta,
'n_subjects': len(Y)
})
if not group_results:
# Create a "Warning" figure instead of a map
fig, ax = plt.subplots(figsize=(8, 4))
ax.text(0.5, 0.5,
f"Second-Level Analysis Aborted\n\n"
f"Reason: All {len(channels)} channels skipped.\n"
f"Requirement: At least 2 subjects (IDs) per channel.\n"
f"Current Subject Count: {df_contrasts['ID'].nunique()}",
ha='center', va='center', fontsize=12, color='darkred',
bbox=dict(facecolor='white', alpha=0.5, edgecolor='red'))
ax.set_axis_off()
plt.show()
return pd.DataFrame()
df_group = pd.DataFrame(group_results)
logger.info("Second-level results:\n%s", df_group)
@@ -3652,14 +3679,14 @@ def detect_sensor_displacement(raw, threshold_ratio=0.05):
def detect_high_freq_noise(raw, db_limit=-60):
def detect_high_freq_noise(raw, db_limit=-60, freq_div=4):
"""
Identifies channels with excessive power at high frequencies
(sfreq/4), usually indicating electronic interference.
"""
ch_names = raw.ch_names
sfreq = raw.info['sfreq']
target_freq = sfreq / 4
target_freq = sfreq / freq_div
# Compute PSD
spectrum = raw.compute_psd(fmin=0.1, fmax=sfreq/2)
@@ -3952,18 +3979,33 @@ def process_participant(file_path, progress_callback=None):
if progress_callback: progress_callback(8)
logger.info("Step 8 Completed.")
# TODO: Add callbacks and user defined parameters
bad_cv, fig_cv = find_bad_channels_cv(raw, cv_threshold=20.0)
fig_individual['cv'] = fig_cv
bad_cv = []
if CV:
bad_cv, fig_cv = find_bad_channels_cv(raw, cv_threshold=CV_THRESHOLD)
fig_individual['cv'] = fig_cv
if progress_callback: progress_callback(9)
logger.info("Step 9 Completed.")
bad_range, fig_range = find_bad_channels_range(raw, threshold=3.0)
fig_individual['range'] = fig_range
bad_range = []
if MAD:
bad_range, fig_range = find_bad_channels_range(raw, threshold=MAD_THRESHOLD)
fig_individual['range'] = fig_range
if progress_callback: progress_callback(10)
logger.info("Step 10 Completed.")
bad_noise, fig_noise = detect_high_freq_noise(raw, db_limit=-60)
fig_individual['psd_noise'] = fig_noise
bad_noise = []
if PSD_NOISE:
bad_noise, fig_noise = detect_high_freq_noise(raw, db_limit=DB_LIMIT, freq_div=TARGET_FREQ_DIV)
fig_individual['psd_noise'] = fig_noise
if progress_callback: progress_callback(11)
logger.info("Step 11 Completed.")
bad_disp, fig_disp = detect_sensor_displacement(raw, threshold_ratio=0.05)
fig_individual['displacement'] = fig_disp
bad_disp = []
if CHANNEL_VAR:
bad_disp, fig_disp = detect_sensor_displacement(raw, threshold_ratio=CHANNEL_THRESH)
fig_individual['displacement'] = fig_disp
if progress_callback: progress_callback(12)
logger.info("Step 12 Completed.")
# Step 9: Bad Channels Handling
if BAD_CHANNELS_HANDLING != "None":
@@ -3977,77 +4019,86 @@ def process_participant(file_path, progress_callback=None):
fig_individual["fig4"] = fig_raw_after
fig_individual["Compare"] = fig_compare
elif BAD_CHANNELS_HANDLING == "Remove":
#NOTE: testing this
num_bad = len(bad_channels)
# Check against the threshold
if num_bad > MAX_BAD_CHANNELS:
raise Exception(
f"Data Quality Error: {num_bad} channels flagged for removal, "
f"which exceeds the limit of {MAX_BAD_CHANNELS}. To avoid this,"
f"either lower your filtering parameters or increase MAX_BAD_CHANNELS."
)
raw.pick_types(fnirs=True, exclude='bads')
logger.info(f"Physically removed {len(bad_channels)} channels from the dataset.")
if progress_callback: progress_callback(9)
logger.info("Step 9 Completed.")
if progress_callback: progress_callback(13)
logger.info("Step 13 Completed.")
# Step 10: Optical Density
raw_od = optical_density(raw)
fig_raw_od = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Optical Density", show=False)
fig_individual["Optical Density"] = fig_raw_od
if progress_callback: progress_callback(10)
logger.info("Step 10 Completed.")
if progress_callback: progress_callback(14)
logger.info("Step 14 Completed.")
# Step 11: Temporal Derivative Distribution Repair Filtering
if TDDR:
raw_od = temporal_derivative_distribution_repair(raw_od)
fig_raw_od_tddr = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="After TDDR (Motion Correction)", show=False)
fig_individual["TDDR"] = fig_raw_od_tddr
if progress_callback: progress_callback(11)
logger.info("Step 11 Completed.")
if progress_callback: progress_callback(15)
logger.info("Step 15 Completed.")
# Step 12: Wavelet Filtering
if WAVELET:
raw_od, fig = calculate_and_apply_wavelet(raw_od)
fig_individual["Wavelet"] = fig
if progress_callback: progress_callback(12)
logger.info("Step 12 Completed.")
if progress_callback: progress_callback(16)
logger.info("Step 16 Completed.")
# Step 13: Haemoglobin Concentration
raw_haemo = beer_lambert_law(raw_od, ppf=calculate_dpf(file_path))
fig_raw_haemo_bll = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="HbO and HbR Signals", show=False)
fig_individual["BLL"] = fig_raw_haemo_bll
if progress_callback: progress_callback(13)
logger.info("Step 13 Completed.")
if progress_callback: progress_callback(17)
logger.info("Step 17 Completed.")
# Step 14: Enhance Negative Correlation
if ENHANCE_NEGATIVE_CORRELATION:
raw_haemo = enhance_negative_correlation(raw_haemo)
fig_raw_haemo_enc = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Enhance Negative Correlation", show=False)
fig_individual["ENC"] = fig_raw_haemo_enc
if progress_callback: progress_callback(14)
logger.info("Step 14 Completed.")
if progress_callback: progress_callback(18)
logger.info("Step 18 Completed.")
# Step 15: Filter
if FILTER:
raw_haemo, fig_filter, fig_raw_haemo_filter = filter_the_data(raw_haemo)
fig_individual["filter1"] = fig_filter
fig_individual["filter2"] = fig_raw_haemo_filter
if progress_callback: progress_callback(15)
logger.info("Step 15 Completed.")
if progress_callback: progress_callback(19)
logger.info("Step 19 Completed.")
# Step 16: Extracting Events
events, event_dict = events_from_annotations(raw_haemo)
fig_events = plot_events(events, event_id=event_dict, sfreq=raw_haemo.info["sfreq"], show=False)
fig_individual["events"] = fig_events
if progress_callback: progress_callback(16)
logger.info("Step 16 Completed.")
if progress_callback: progress_callback(20)
logger.info("Step 20 Completed.")
# Step 17: Epoch Calculations
epochs, fig_epochs = epochs_calculations(raw_haemo, events, event_dict)
for name, fig in fig_epochs:
fig_individual[f"epochs_{name}"] = fig
if progress_callback: progress_callback(17)
logger.info("Step 17 Completed.")
if progress_callback: progress_callback(21)
logger.info("Step 21 Completed.")
# Step 18: Design Matrix
design_matrix, fig_design_matrix = make_design_matrix(raw_haemo, short_chans)
fig_individual["Design Matrix"] = fig_design_matrix
if progress_callback: progress_callback(18)
logger.info("Step 18 Completed.")
if progress_callback: progress_callback(22)
logger.info("Step 22 Completed.")
# Step 19: Run GLM
@@ -4063,24 +4114,24 @@ def process_participant(file_path, progress_callback=None):
# A large p-value means the data do not provide strong evidence that the effect is different from zero.
if progress_callback: progress_callback(19)
logger.info("19")
if progress_callback: progress_callback(23)
logger.info("23")
# Step 20: Generate GLM Results
if "derivative" not in HRF_MODEL.lower():
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
for name, fig in fig_glm_result:
fig_individual[f"GLM {name}"] = fig
if progress_callback: progress_callback(20)
logger.info("20")
if progress_callback: progress_callback(24)
logger.info("24")
# Step 21: Generate Channel Significance
if HRF_MODEL == "fir":
fig_significance = individual_significance(raw_haemo, glm_est)
for name, fig in fig_significance:
fig_individual[f"Significance {name}"] = fig
if progress_callback: progress_callback(21)
logger.info("21")
if progress_callback: progress_callback(25)
logger.info("25")
# Step 22: Generate Channel, Region of Interest, and Contrast Results
cha = glm_est.to_dataframe()
@@ -4136,8 +4187,8 @@ def process_participant(file_path, progress_callback=None):
contrast_dict[condition] = contrast_vector
if progress_callback: progress_callback(22)
logger.info("22")
if progress_callback: progress_callback(26)
logger.info("26")
# Step 23: Compute Contrast Results
contrast_results = {}
@@ -4152,16 +4203,16 @@ def process_participant(file_path, progress_callback=None):
cha["ID"] = file_path
if progress_callback: progress_callback(23)
logger.info("23")
if progress_callback: progress_callback(27)
logger.info("27")
# Step 24: Finishing Up
fig_bytes = convert_fig_dict_to_png_bytes(fig_individual)
sanitize_paths_for_pickle(raw_haemo, epochs)
if progress_callback: progress_callback(25)
logger.info("25")
if progress_callback: progress_callback(28)
logger.info("28")
# TODO: Tidy up
# Extract the parameters this file was ran with. No need to return age, gender, group?
+54 -20
View File
@@ -17,11 +17,11 @@ import platform
import traceback
import subprocess
import concurrent.futures
from queue import Empty
from enum import Enum, auto
from pathlib import Path, PurePosixPath
from datetime import datetime
from multiprocessing import Process, current_process, freeze_support, Manager, Queue
from queue import Empty
# External library imports
import numpy as np
@@ -87,7 +87,7 @@ SECTIONS = [
{
"title": "Heart Rate",
"params": [
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Should an attempt be made to calculate participants heart rates?"},
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Should an attempt be made to calculate the participants heart rate?"},
{"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "depends_on": "HEART_RATE", "help": "Will remove this many seconds from the start and end of the file. Useful if recording before cap is firmly placed, or participant removes cap while still recording."},
{"name": "MAX_LOW_HR", "default": 40, "type": int, "depends_on": "HEART_RATE", "help": "Any heart rate windows that average below this value will be rounded up to this value."},
{"name": "MAX_HIGH_HR", "default": 200, "type": int, "depends_on": "HEART_RATE", "help": "Any heart rate windows that average above this value will be rounded down to this value."},
@@ -118,12 +118,42 @@ SECTIONS = [
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "depends_on": "PSP", "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
]
},
{
"title": "Cross Validation",
"params": [
{"name": "CV", "default": True, "type": bool, "help": "Identifies bad channels using the Coefficient of Variation."},
{"name": "CV_THRESHOLD", "default": 20, "type": int, "depends_on": "CV", "help": "Noise threshold (%)."},
]
},
{
"title": "Median Absolute Deviation",
"params": [
{"name": "MAD", "default": True, "type": bool, "help": "Identifies bad channels using Mean Absolute Deviation."},
{"name": "MAD_THRESHOLD", "default": 4, "type": int, "depends_on": "MAD", "help": "Amount of deviations before the channel is flagged bad."},
]
},
{
"title": "Power Spectral Density Noise",
"params": [
{"name": "PSD_NOISE", "default": True, "type": bool, "help": "Identifies bad channels based on their excessive power at high frequencies."},
{"name": "TARGET_FREQ_DIV", "default": 4, "type": int, "depends_on": "PSD_NOISE", "help": "What frequency to check for excessive power. Will take the recording frequency and divide by this number. Has to be greater than 2."},
{"name": "DB_LIMIT", "default": -60, "type": int, "depends_on": "PSD_NOISE", "help": "What db level the power level needs to be below at the target frequency."},
]
},
{
"title": "Channel Variance",
"params": [
{"name": "CHANNEL_VAR", "default": True, "type": bool, "help": "Identifies bad channels based on comparing the variance of the first 25% of the data to the last 25%."},
{"name": "CHANNEL_THRESH", "default": 0.05, "type": float, "depends_on": "CHANNEL_VAR", "help": "If the end variance is less than this % of the start variance, the channel will be marked as bad."},
]
},
{
"title": "Bad Channels Handling",
"params": [
{"name": "BAD_CHANNELS_HANDLING", "default": ["Interpolate"], "type": list, "options": ["Interpolate", "Remove", "None"], "exclusive": True, "help": "How should we deal with the bad channels that occurred? Note: Some analysis options will only work when this is set to 'Interpolate'."},
{"name": "MAX_DIST", "default": 0.03, "type": float, "depends_on": "BAD_CHANNELS_HANDLING", "depends_value": "Interpolate", "help": "The maximum distance to look for neighbours when interpolating. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
{"name": "MIN_NEIGHBORS", "default": 2, "type": int, "depends_on": "BAD_CHANNELS_HANDLING", "depends_value": "Interpolate", "help": "The minimumn amount of neighbours needed within the MAX_DIST parameter. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
{"name": "MAX_BAD_CHANNELS", "default": 12, "type": int, "depends_on": "BAD_CHANNELS_HANDLING", "depends_value": "Remove", "help": "Maximum amount of bad channels before the participant as a whole is marked as bad (exclusive). If this occurs, the participant will be prevented from processing any further. Used only when BAD_CHANNELS_HANDLING is set to 'Remove'."},
]
},
{
@@ -377,22 +407,26 @@ class UserGuideWindow(QWidget):
"Stage 6: Scalp Coupling Index\n"
"Stage 7: Signal to Noise Ratio\n"
"Stage 8: Peak Spectral Power\n"
"Stage 9: Bad Channels Handling\n"
"Stage 10: Optical Density\n"
"Stage 11: Temporal Derivative Distribution Repair Filtering\n"
"Stage 12: Wavelet Filtering\n"
"Stage 13: Haemoglobin Concentration\n"
"Stage 14: Enhance Negative Correlation\n"
"Stage 15: Filter\n"
"Stage 16: Extracting Events\n"
"Stage 17: Epoch Calculations\n"
"Stage 18: Design Matrix\n"
"Stage 19: General Linear Model\n"
"Stage 20: Generate GLM Results\n"
"Stage 21: Generate Channel Significance\n"
"Stage 22: Generate Channel, Region of Interest, and Contrast Results\n"
"Stage 23: Compute Contrast Results\n"
"Stage 24: Finishing Up\n", self)
"Stage 9: Cross Validation\n"
"Stage 10: Median Absolute Deviation\n"
"Stage 11: Power Spectral Density Noise\n"
"Stage 12: Channel Variance\n"
"Stage 13: Bad Channels Handling\n"
"Stage 14: Optical Density\n"
"Stage 15: Temporal Derivative Distribution Repair Filtering\n"
"Stage 16: Wavelet Filtering\n"
"Stage 17: Haemoglobin Concentration\n"
"Stage 18: Enhance Negative Correlation\n"
"Stage 19: Filter\n"
"Stage 20: Extracting Events\n"
"Stage 21: Epoch Calculations\n"
"Stage 22: Design Matrix\n"
"Stage 23: General Linear Model\n"
"Stage 24: Generate GLM Results\n"
"Stage 25: Generate Channel Significance\n"
"Stage 26: Generate Channel, Region of Interest, and Contrast Results\n"
"Stage 27: Compute Contrast Results\n"
"Stage 28: Finishing Up\n", self)
label3 = QLabel(f"For more information, visit the Git wiki page <a href='https://git.research.dezeeuw.ca/tyler/{APP_NAME}/wiki'>here</a>.", self)
label3.setTextFormat(Qt.TextFormat.RichText)
@@ -1620,7 +1654,7 @@ class ProgressBubble(QWidget):
self.progress_layout = QHBoxLayout()
self.rects = []
for _ in range(24):
for _ in range(28):
rect = QFrame()
rect.setFixedSize(10, 18)
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
@@ -4476,7 +4510,7 @@ class GroupBrainViewerWidget(FlaresBaseWidget):
class ViewerLauncherWidget(QWidget):
def __init__(self, haemo_dict, config_dict, fig_bytes_dict, cha_dict, contrast_results_dict, df_ind, design_matrix, epochs_dict):
super().__init__()
self.setWindowTitle("Viewer Launcher")
self.setWindowTitle(f"Viewer Launcher - {APP_NAME.upper()}")
group_dict = {
file_path: config.get("GROUP", "Unknown") # default if GROUP missing