changes and improvements
This commit is contained in:
@@ -15,6 +15,10 @@
|
|||||||
- Fixed the User Guide window to properly display information about the 24 stages and added a link to the Git wiki page
|
- Fixed the User Guide window to properly display information about the 24 stages and added a link to the Git wiki page
|
||||||
- MAX_WORKERS should now properly repect the value set
|
- MAX_WORKERS should now properly repect the value set
|
||||||
- Added a new CSV export option to be used by other applications
|
- Added a new CSV export option to be used by other applications
|
||||||
|
- Added support for updating optode positions directly from an .xlsx file from a Polhemius system
|
||||||
|
- Fixed an issue where the dropdowns in the Viewer windows would immediately open and close when using a trackpad
|
||||||
|
- glover and spm hrf models now function as intended without crashing. Currently, group analysis is still only supported by fir
|
||||||
|
- Revamped the fold channels viewer to not hang the application and to better process multiple participants at once
|
||||||
|
|
||||||
|
|
||||||
# Version 1.1.7
|
# Version 1.1.7
|
||||||
|
|||||||
41
flares.py
41
flares.py
@@ -58,6 +58,7 @@ import neurokit2 as nk # type: ignore
|
|||||||
import pyvistaqt # type: ignore
|
import pyvistaqt # type: ignore
|
||||||
import vtkmodules.util.data_model
|
import vtkmodules.util.data_model
|
||||||
import vtkmodules.util.execution_model
|
import vtkmodules.util.execution_model
|
||||||
|
import xlrd
|
||||||
|
|
||||||
# External library imports for mne
|
# External library imports for mne
|
||||||
from mne import (
|
from mne import (
|
||||||
@@ -123,8 +124,6 @@ SECONDS_TO_KEEP: float
|
|||||||
OPTODE_PLACEMENT: bool
|
OPTODE_PLACEMENT: bool
|
||||||
SHOW_OPTODE_NAMES: bool
|
SHOW_OPTODE_NAMES: bool
|
||||||
|
|
||||||
HEART_RATE: bool
|
|
||||||
|
|
||||||
SHORT_CHANNEL: bool
|
SHORT_CHANNEL: bool
|
||||||
SHORT_CHANNEL_THRESH: float
|
SHORT_CHANNEL_THRESH: float
|
||||||
LONG_CHANNEL_THRESH: float
|
LONG_CHANNEL_THRESH: float
|
||||||
@@ -928,11 +927,12 @@ def interpolate_fNIRS_bads_weighted_average(raw, max_dist=0.03, min_neighbors=2)
|
|||||||
raw.info['bads'] = [ch for ch in raw.info['bads'] if ch not in bad_ch_to_remove]
|
raw.info['bads'] = [ch for ch in raw.info['bads'] if ch not in bad_ch_to_remove]
|
||||||
|
|
||||||
print("\nInterpolation complete.\n")
|
print("\nInterpolation complete.\n")
|
||||||
|
print("Bads cleared:", raw.info['bads'])
|
||||||
|
raw.info['bads'] = []
|
||||||
|
|
||||||
for ch in raw.info['bads']:
|
for ch in raw.info['bads']:
|
||||||
print(f"Channel {ch} still marked as bad.")
|
print(f"Channel {ch} still marked as bad.")
|
||||||
|
|
||||||
print("Bads cleared:", raw.info['bads'])
|
|
||||||
fig_raw_after = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="After interpolation", show=False)
|
fig_raw_after = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="After interpolation", show=False)
|
||||||
|
|
||||||
return raw, fig_raw_after
|
return raw, fig_raw_after
|
||||||
@@ -1333,7 +1333,7 @@ def make_design_matrix(raw_haemo, short_chans):
|
|||||||
drift_model=DRIFT_MODEL,
|
drift_model=DRIFT_MODEL,
|
||||||
high_pass=HIGH_PASS,
|
high_pass=HIGH_PASS,
|
||||||
drift_order=DRIFT_ORDER,
|
drift_order=DRIFT_ORDER,
|
||||||
fir_delays=range(15),
|
fir_delays=FIR_DELAYS,
|
||||||
add_regs=short_chans.get_data().T,
|
add_regs=short_chans.get_data().T,
|
||||||
add_reg_names=short_chans.ch_names,
|
add_reg_names=short_chans.ch_names,
|
||||||
min_onset=MIN_ONSET,
|
min_onset=MIN_ONSET,
|
||||||
@@ -1347,7 +1347,7 @@ def make_design_matrix(raw_haemo, short_chans):
|
|||||||
drift_model=DRIFT_MODEL,
|
drift_model=DRIFT_MODEL,
|
||||||
high_pass=HIGH_PASS,
|
high_pass=HIGH_PASS,
|
||||||
drift_order=DRIFT_ORDER,
|
drift_order=DRIFT_ORDER,
|
||||||
fir_delays=range(15),
|
fir_delays=FIR_DELAYS,
|
||||||
min_onset=MIN_ONSET,
|
min_onset=MIN_ONSET,
|
||||||
oversampling=OVERSAMPLING
|
oversampling=OVERSAMPLING
|
||||||
)
|
)
|
||||||
@@ -1577,7 +1577,7 @@ def resource_path(relative_path):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def fold_channels(raw: BaseRaw) -> None:
|
def fold_channels(raw: BaseRaw, progress_callback=None) -> None:
|
||||||
|
|
||||||
# if getattr(sys, 'frozen', False):
|
# if getattr(sys, 'frozen', False):
|
||||||
path = os.path.expanduser("~") + "/mne_data/fOLD/fOLD-public-master/Supplementary"
|
path = os.path.expanduser("~") + "/mne_data/fOLD/fOLD-public-master/Supplementary"
|
||||||
@@ -1659,8 +1659,11 @@ def fold_channels(raw: BaseRaw) -> None:
|
|||||||
landmark_color_map = {landmark: colors[i % len(colors)] for i, landmark in enumerate(landmarks)}
|
landmark_color_map = {landmark: colors[i % len(colors)] for i, landmark in enumerate(landmarks)}
|
||||||
|
|
||||||
# Iterate over each channel
|
# Iterate over each channel
|
||||||
|
print(len(hbo_channel_names))
|
||||||
|
|
||||||
for idx, channel_name in enumerate(hbo_channel_names):
|
for idx, channel_name in enumerate(hbo_channel_names):
|
||||||
|
|
||||||
|
print(idx, channel_name)
|
||||||
# Run the fOLD on the selected channel
|
# Run the fOLD on the selected channel
|
||||||
channel_data = raw.copy().pick(picks=channel_name) # type: ignore
|
channel_data = raw.copy().pick(picks=channel_name) # type: ignore
|
||||||
|
|
||||||
@@ -1703,6 +1706,9 @@ def fold_channels(raw: BaseRaw) -> None:
|
|||||||
|
|
||||||
landmark_specificity_data = []
|
landmark_specificity_data = []
|
||||||
|
|
||||||
|
if progress_callback:
|
||||||
|
progress_callback(idx + 1)
|
||||||
|
|
||||||
# TODO: Fix this
|
# TODO: Fix this
|
||||||
if True:
|
if True:
|
||||||
handles = [
|
handles = [
|
||||||
@@ -1725,8 +1731,9 @@ def fold_channels(raw: BaseRaw) -> None:
|
|||||||
for ax in axes[len(hbo_channel_names):]:
|
for ax in axes[len(hbo_channel_names):]:
|
||||||
ax.axis('off')
|
ax.axis('off')
|
||||||
|
|
||||||
plt.show()
|
#plt.show()
|
||||||
return fig, legend_fig
|
fig_dict = {"main": fig, "legend": legend_fig}
|
||||||
|
return convert_fig_dict_to_png_bytes(fig_dict)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -2246,9 +2253,15 @@ def brain_3d_visualization(raw_haemo, df_cha, selected_event, t_or_theta: Litera
|
|||||||
# Get all activity conditions
|
# Get all activity conditions
|
||||||
for cond in [f'{selected_event}']:
|
for cond in [f'{selected_event}']:
|
||||||
|
|
||||||
if True:
|
|
||||||
ch_summary = df_cha.query(f"Condition.str.startswith('{cond}_delay_') and Chroma == 'hbo'", engine='python') # type: ignore
|
ch_summary = df_cha.query(f"Condition.str.startswith('{cond}_delay_') and Chroma == 'hbo'", engine='python') # type: ignore
|
||||||
|
|
||||||
|
print(ch_summary)
|
||||||
|
|
||||||
|
if ch_summary.empty:
|
||||||
|
#not fir model
|
||||||
|
print("No data found for this condition.")
|
||||||
|
ch_summary = df_cha.query(f"Condition in [@cond] and Chroma == 'hbo'", engine='python')
|
||||||
|
|
||||||
# Use ordinary least squares (OLS) if only one participant
|
# Use ordinary least squares (OLS) if only one participant
|
||||||
# TODO: Fix.
|
# TODO: Fix.
|
||||||
if True:
|
if True:
|
||||||
@@ -2269,6 +2282,9 @@ def brain_3d_visualization(raw_haemo, df_cha, selected_event, t_or_theta: Litera
|
|||||||
valid_channels = ch_summary["ch_name"].unique().tolist() # type: ignore
|
valid_channels = ch_summary["ch_name"].unique().tolist() # type: ignore
|
||||||
raw_for_plot = raw_haemo.copy().pick(picks=valid_channels) # type: ignore
|
raw_for_plot = raw_haemo.copy().pick(picks=valid_channels) # type: ignore
|
||||||
|
|
||||||
|
print(f"DEBUG: Model DF rows: {len(model_df)}")
|
||||||
|
print(f"DEBUG: Raw channels: {len(raw_for_plot.ch_names)}")
|
||||||
|
|
||||||
brain = plot_3d_evoked_array(raw_for_plot.pick(picks="hbo"), model_df, view="dorsal", distance=0.02, colorbar=True, clim=clim, mode="weighted", size=(800, 700)) # type: ignore
|
brain = plot_3d_evoked_array(raw_for_plot.pick(picks="hbo"), model_df, view="dorsal", distance=0.02, colorbar=True, clim=clim, mode="weighted", size=(800, 700)) # type: ignore
|
||||||
|
|
||||||
if show_optodes == 'all' or show_optodes == 'sensors':
|
if show_optodes == 'all' or show_optodes == 'sensors':
|
||||||
@@ -3299,7 +3315,7 @@ def hr_calc(raw):
|
|||||||
# --- Parameters for PSD ---
|
# --- Parameters for PSD ---
|
||||||
desired_bin_hz = 0.1
|
desired_bin_hz = 0.1
|
||||||
nperseg = int(sfreq / desired_bin_hz)
|
nperseg = int(sfreq / desired_bin_hz)
|
||||||
hr_range = (30, 180)
|
hr_range = (30, 180) # TODO: SHould this not use the user defined values?
|
||||||
|
|
||||||
# --- Function to find strongest local peak ---
|
# --- Function to find strongest local peak ---
|
||||||
def find_hr_from_psd(ch_data):
|
def find_hr_from_psd(ch_data):
|
||||||
@@ -3527,6 +3543,7 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
logger.info("19")
|
logger.info("19")
|
||||||
|
|
||||||
# Step 20: Generate GLM Results
|
# Step 20: Generate GLM Results
|
||||||
|
if "derivative" not in HRF_MODEL.lower():
|
||||||
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
|
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
|
||||||
for name, fig in fig_glm_result:
|
for name, fig in fig_glm_result:
|
||||||
fig_individual[f"GLM {name}"] = fig
|
fig_individual[f"GLM {name}"] = fig
|
||||||
@@ -3534,6 +3551,7 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
logger.info("20")
|
logger.info("20")
|
||||||
|
|
||||||
# Step 21: Generate Channel Significance
|
# Step 21: Generate Channel Significance
|
||||||
|
if HRF_MODEL == "fir":
|
||||||
fig_significance = individual_significance(raw_haemo, glm_est)
|
fig_significance = individual_significance(raw_haemo, glm_est)
|
||||||
for name, fig in fig_significance:
|
for name, fig in fig_significance:
|
||||||
fig_individual[f"Significance {name}"] = fig
|
fig_individual[f"Significance {name}"] = fig
|
||||||
@@ -3568,6 +3586,7 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
[(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]
|
[(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if HRF_MODEL == "fir":
|
||||||
all_delay_cols = [col for col in design_matrix.columns if "_delay_" in col]
|
all_delay_cols = [col for col in design_matrix.columns if "_delay_" in col]
|
||||||
all_conditions = sorted({col.split("_delay_")[0] for col in all_delay_cols})
|
all_conditions = sorted({col.split("_delay_")[0] for col in all_delay_cols})
|
||||||
|
|
||||||
@@ -3599,6 +3618,8 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
# Step 23: Compute Contrast Results
|
# Step 23: Compute Contrast Results
|
||||||
contrast_results = {}
|
contrast_results = {}
|
||||||
|
|
||||||
|
if HRF_MODEL == "fir":
|
||||||
|
|
||||||
for cond, contrast_vector in contrast_dict.items():
|
for cond, contrast_vector in contrast_dict.items():
|
||||||
contrast = glm_est.compute_contrast(contrast_vector) # type: ignore
|
contrast = glm_est.compute_contrast(contrast_vector) # type: ignore
|
||||||
df = contrast.to_dataframe()
|
df = contrast.to_dataframe()
|
||||||
|
|||||||
26
fold.py
Normal file
26
fold.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
# workers.py
|
||||||
|
import flares
|
||||||
|
|
||||||
|
# This function must be completely standalone.
|
||||||
|
# No PyQt imports here!
|
||||||
|
def run_fold_process(haemo_obj, label, shared_dict):
|
||||||
|
"""
|
||||||
|
Runs in a separate OS process.
|
||||||
|
Writes progress to shared_dict so the GUI can see it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
def progress_callback(value):
|
||||||
|
# Only update shared memory if the value has changed
|
||||||
|
# This significantly reduces "noise" on the GUI thread
|
||||||
|
if shared_dict.get(label) != value:
|
||||||
|
shared_dict[label] = value
|
||||||
|
|
||||||
|
# Run the heavy calculation
|
||||||
|
# Ensure 'flares' logic does not try to open any plots/GUIs itself!
|
||||||
|
figures = flares.fold_channels(haemo_obj, progress_callback=progress_callback)
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# If something breaks here, we return the error string
|
||||||
|
# so the main thread knows what happened.
|
||||||
|
return f"ERROR: {str(e)}"
|
||||||
369
main.py
369
main.py
@@ -39,10 +39,10 @@ from mne import Annotations
|
|||||||
|
|
||||||
from PySide6.QtWidgets import (
|
from PySide6.QtWidgets import (
|
||||||
QApplication, QWidget, QMessageBox, QVBoxLayout, QHBoxLayout, QTextEdit, QScrollArea, QComboBox, QGridLayout,
|
QApplication, QWidget, QMessageBox, QVBoxLayout, QHBoxLayout, QTextEdit, QScrollArea, QComboBox, QGridLayout,
|
||||||
QPushButton, QMainWindow, QFileDialog, QLabel, QLineEdit, QFrame, QSizePolicy, QGroupBox, QDialog, QListView, QMenu
|
QPushButton, QMainWindow, QFileDialog, QLabel, QLineEdit, QFrame, QSizePolicy, QGroupBox, QDialog, QListView, QMenu, QSpinBox, QProgressDialog, QProgressBar
|
||||||
)
|
)
|
||||||
from PySide6.QtCore import QThread, Signal, Qt, QTimer, QEvent, QSize, QPoint
|
from PySide6.QtCore import QThread, Signal, Qt, QTimer, QEvent, QSize, QPoint
|
||||||
from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleValidator, QPixmap, QStandardItemModel, QStandardItem
|
from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleValidator, QPixmap, QStandardItemModel, QStandardItem, QImage
|
||||||
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
|
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
|
||||||
|
|
||||||
|
|
||||||
@@ -59,71 +59,71 @@ SECTIONS = [
|
|||||||
"title": "Preprocessing",
|
"title": "Preprocessing",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Should the snirf files be downsampled? If this is set to True, DOWNSAMPLE_FREQUENCY will be used as the target frequency to downsample to."},
|
{"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Should the snirf files be downsampled? If this is set to True, DOWNSAMPLE_FREQUENCY will be used as the target frequency to downsample to."},
|
||||||
{"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated. Only used if DOWNSAMPLE is set to True"},
|
{"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "depends_on": "DOWNSAMPLE", "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Trimming",
|
"title": "Trimming",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "TRIM", "default": True, "type": bool, "help": "Trim the file start."},
|
{"name": "TRIM", "default": True, "type": bool, "help": "Should the start of the files be trimmed?"},
|
||||||
{"name": "SECONDS_TO_KEEP", "default": 5, "type": float, "help": "Seconds to keep at the beginning of all loaded snirf files before the first annotation/event occurs. Calculation is done seperatly on all loaded snirf files. Setting this to 0 will have the first annotation/event be at time point 0."},
|
{"name": "SECONDS_TO_KEEP", "default": 5, "type": float, "depends_on": "TRIM", "help": "Seconds to keep at the beginning of all loaded snirf files before the first annotation/event occurs. Calculation is done seperatly on all loaded snirf files. Setting this to 0 will have the first annotation/event be at time point 0. Only used if TRIM is set to True."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Verify Optode Placement",
|
"title": "Verify Optode Placement",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "OPTODE_PLACEMENT", "default": True, "type": bool, "help": "Generate an image for each participant outlining their optode placement."},
|
{"name": "OPTODE_PLACEMENT", "default": True, "type": bool, "help": "Should an image be generated for each participant outlining their optode placement on a head?"},
|
||||||
{"name": "SHOW_OPTODE_NAMES", "default": True, "type": bool, "help": "Should the optode names be written next to their location or not."},
|
{"name": "SHOW_OPTODE_NAMES", "default": True, "type": bool, "depends_on": "OPTODE_PLACEMENT", "help": "Should the optode names be written next to their location in the image?"},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Short/Long Channels",
|
"title": "Short/Long Channels",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data."},
|
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data. For more information about short channels, please visit the Wiki."},
|
||||||
{"name": "SHORT_CHANNEL_THRESH", "default": 0.015, "type": float, "help": "The maximum distance the short channel can be in metres."},
|
{"name": "SHORT_CHANNEL_THRESH", "default": 0.015, "type": float, "depends_on": "SHORT_CHANNEL", "help": "The maximum distance the short channel can be in metres before it is no longer considered a short channel."},
|
||||||
{"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "The maximum distance the long channel can be in metres."},
|
{"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "The maximum distance channels can be in metres. Any channel longer than this distance will be discarded."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Heart Rate",
|
"title": "Heart Rate",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Attempt to calculate the participants heart rate."},
|
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Should an attempt be made to calculate participants heart rates?"},
|
||||||
{"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "help": "Will remove this many seconds from the start and end of the file. Useful if recording before cap is firmly placed, or participant removes cap while still recording."},
|
{"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "depends_on": "HEART_RATE", "help": "Will remove this many seconds from the start and end of the file. Useful if recording before cap is firmly placed, or participant removes cap while still recording."},
|
||||||
{"name": "MAX_LOW_HR", "default": 40, "type": int, "help": "Any heart rate windows that average below this value will be rounded up to this value."},
|
{"name": "MAX_LOW_HR", "default": 40, "type": int, "depends_on": "HEART_RATE", "help": "Any heart rate windows that average below this value will be rounded up to this value."},
|
||||||
{"name": "MAX_HIGH_HR", "default": 200, "type": int, "help": "Any heart rate windows that average above this value will be rounded down to this value."},
|
{"name": "MAX_HIGH_HR", "default": 200, "type": int, "depends_on": "HEART_RATE", "help": "Any heart rate windows that average above this value will be rounded down to this value."},
|
||||||
{"name": "SMOOTHING_WINDOW_HR", "default": 100, "type": int, "help": "How many individual data points to smooth into a single window."},
|
{"name": "SMOOTHING_WINDOW_HR", "default": 100, "type": int, "depends_on": "HEART_RATE", "help": "How many individual data points to be used to create a single data point/window."},
|
||||||
{"name": "HEART_RATE_WINDOW", "default": 25, "type": int, "help": "Used for visualization. Shows the range of the calculated heart rate +- this value."},
|
{"name": "HEART_RATE_WINDOW", "default": 25, "type": int, "depends_on": "HEART_RATE", "help": "Only used for visualization. Shows the 'range' of the calculated heart rate, which is just the average +- this value."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Scalp Coupling Index",
|
"title": "Scalp Coupling Index",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "SCI", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Scalp Coupling Index. This metric calculates the quality of the connection between the optode and the scalp."},
|
{"name": "SCI", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Scalp Coupling Index. This metric calculates the quality of the connection between the optode and the scalp."},
|
||||||
{"name": "SCI_TIME_WINDOW", "default": 3, "type": int, "help": "Independent SCI calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
{"name": "SCI_TIME_WINDOW", "default": 3, "type": int, "depends_on": "SCI", "help": "Independent SCI calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
||||||
{"name": "SCI_THRESHOLD", "default": 0.6, "type": float, "help": "SCI threshold on a scale of 0-1. A value of 0 is bad coupling while a value of 1 is perfect coupling. If SCI is True, any channels lower than this value will be marked as bad."},
|
{"name": "SCI_THRESHOLD", "default": 0.6, "type": float, "depends_on": "SCI", "help": "SCI threshold on a scale of 0-1. A value of 0 is bad coupling while a value of 1 is perfect coupling. Any channels lower than this value will be marked as bad."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Signal to Noise Ratio",
|
"title": "Signal to Noise Ratio",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "SNR", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Signal to Noise Ratio. This metric calculates how much of the observed signal was noise versus how much of it was a useful signal."},
|
{"name": "SNR", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Signal to Noise Ratio. This metric calculates how much of the observed signal was noise versus how much of it was a useful signal."},
|
||||||
{"name": "SNR_THRESHOLD", "default": 5.0, "type": float, "help": "SNR threshold (dB). A typical scale would be 0-25, but it is possible for values to be both above and below this range. Higher values correspond to a better signal. If SNR is True, any channels lower than this value will be marked as bad."},
|
{"name": "SNR_THRESHOLD", "default": 5.0, "type": float, "depends_on": "SNR", "help": "SNR threshold (dB). A typical scale would be 0-25, but it is possible for values to be both above and below this range. Higher values correspond to a better signal. If SNR is True, any channels lower than this value will be marked as bad."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Peak Spectral Power",
|
"title": "Peak Spectral Power",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "PSP", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Peak Spectral Power. This metric calculates the amplitude or strength of a frequency component that is most prominent in a particular frequency range or spectrum."},
|
{"name": "PSP", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Peak Spectral Power. This metric calculates the amplitude or strength of a frequency component that is most prominent in a particular frequency range or spectrum."},
|
||||||
{"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "help": "Independent PSP calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
{"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "depends_on": "PSP", "help": "Independent PSP calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
||||||
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
|
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "depends_on": "PSP", "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Bad Channels Handling",
|
"title": "Bad Channels Handling",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "BAD_CHANNELS_HANDLING", "default": [], "type": list, "options": ["Interpolate", "Remove", "None"], "exclusive": True, "help": "How should we deal with the bad channels that occurred? Note: Some analysis options will only work when this is set to 'Interpolate'."},
|
{"name": "BAD_CHANNELS_HANDLING", "default": ["Interpolate"], "type": list, "options": ["Interpolate", "Remove", "None"], "exclusive": True, "help": "How should we deal with the bad channels that occurred? Note: Some analysis options will only work when this is set to 'Interpolate'."},
|
||||||
{"name": "MAX_DIST", "default": 0.03, "type": float, "help": "The maximum distance to look for neighbours when interpolating. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
{"name": "MAX_DIST", "default": 0.03, "type": float, "depends_on": "BAD_CHANNELS_HANDLING", "depends_value": "Interpolate", "help": "The maximum distance to look for neighbours when interpolating. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
||||||
{"name": "MIN_NEIGHBORS", "default": 2, "type": int, "help": "The minimumn amount of neighbours needed within the MAX_DIST parameter. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
{"name": "MIN_NEIGHBORS", "default": 2, "type": int, "depends_on": "BAD_CHANNELS_HANDLING", "depends_value": "Interpolate", "help": "The minimumn amount of neighbours needed within the MAX_DIST parameter. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -141,10 +141,10 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Wavelet filtering",
|
"title": "Wavelet filtering",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "WAVELET", "default": True, "type": bool, "help": "Apply Wavelet filtering."},
|
{"name": "WAVELET", "default": True, "type": bool, "help": "Apply Wavelet filtering. It is a method to filter involving decomposition, threholding, and reconstruction."},
|
||||||
{"name": "IQR", "default": 1.5, "type": float, "help": "Inter-Quartile Range."},
|
{"name": "IQR", "default": 1.5, "type": float, "depends_on": "WAVELET", "help": "Scaling factor for the Inter-Quartile Range."},
|
||||||
{"name": "WAVELET_TYPE", "default": "db4", "type": str, "help": "Wavelet type."},
|
{"name": "WAVELET_TYPE", "default": "db4", "type": str, "depends_on": "WAVELET", "help": "Wavelet type. Valid values are ['bior1.1', 'bior1.3', 'bior1.5', 'bior2.2', 'bior2.4', 'bior2.6', 'bior2.8', 'bior3.1', 'bior3.3', 'bior3.5', 'bior3.7', 'bior3.9', 'bior4.4', 'bior5.5', 'bior6.8', 'coif1', 'coif2', 'coif3', 'coif4', 'coif5', 'coif6', 'coif7', 'coif8', 'coif9', 'coif10', 'coif11', 'coif12', 'coif13', 'coif14', 'coif15', 'coif16', 'coif17', 'db1', 'db2', 'db3', 'db4', 'db5', 'db6', 'db7', 'db8', 'db9', 'db10', 'db11', 'db12', 'db13', 'db14', 'db15', 'db16', 'db17', 'db18', 'db19', 'db20', 'db21', 'db22', 'db23', 'db24', 'db25', 'db26', 'db27', 'db28', 'db29', 'db30', 'db31', 'db32', 'db33', 'db34', 'db35', 'db36', 'db37', 'db38', 'dmey', 'haar', 'rbio1.1', 'rbio1.3', 'rbio1.5', 'rbio2.2', 'rbio2.4', 'rbio2.6', 'rbio2.8', 'rbio3.1', 'rbio3.3', 'rbio3.5', 'rbio3.7', 'rbio3.9', 'rbio4.4', 'rbio5.5', 'rbio6.8', 'sym2', 'sym3', 'sym4', 'sym5', 'sym6', 'sym7', 'sym8', 'sym9', 'sym10', 'sym11', 'sym12', 'sym13', 'sym14', 'sym15', 'sym16', 'sym17', 'sym18', 'sym19', 'sym20']"},
|
||||||
{"name": "WAVELET_LEVEL", "default": 3, "type": int, "help": "Wavelet level."},
|
{"name": "WAVELET_LEVEL", "default": 3, "type": int, "depends_on": "WAVELET", "help": "Wavelet Decomposition level (must be >= 0)."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -162,11 +162,11 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Filtering",
|
"title": "Filtering",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "FILTER", "default": True, "type": bool, "help": "Filter the data."},
|
{"name": "FILTER", "default": True, "type": bool, "help": "Should the data be bandpass filtered?"},
|
||||||
{"name": "L_FREQ", "default": 0.005, "type": float, "help": "Any frequencies lower than this value will be removed."},
|
{"name": "L_FREQ", "default": 0.005, "type": float, "depends_on": "FILTER", "help": "Any frequencies lower than this value will be removed."},
|
||||||
{"name": "H_FREQ", "default": 0.3, "type": float, "help": "Any frequencies higher than this value will be removed."},
|
{"name": "H_FREQ", "default": 0.3, "type": float, "depends_on": "FILTER", "help": "Any frequencies higher than this value will be removed."},
|
||||||
{"name": "L_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the lower bound."},
|
{"name": "L_TRANS_BANDWIDTH", "default": 0.002, "type": float, "depends_on": "FILTER", "help": "How wide the transitional period should be so the data doesn't just drop off on the lower bound."},
|
||||||
{"name": "H_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the upper bound."},
|
{"name": "H_TRANS_BANDWIDTH", "default": 0.002, "type": float, "depends_on": "FILTER", "help": "How wide the transitional period should be so the data doesn't just drop off on the upper bound."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -184,11 +184,11 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Design Matrix",
|
"title": "Design Matrix",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "RESAMPLE", "default": True, "type": bool, "help": "The length of your stimulus."},
|
{"name": "RESAMPLE", "default": True, "type": bool, "help": "Should the data be resampled before calculating the design matrix? Downsampling is useful for speeding up calculations without loosing overall data shape."},
|
||||||
{"name": "RESAMPLE_FREQ", "default": 1, "type": int, "help": "The length of your stimulus."},
|
{"name": "RESAMPLE_FREQ", "default": 1, "type": int, "help": "The frequency the data should be resampled to."},
|
||||||
|
|
||||||
|
{"name": "HRF_MODEL", "default": ["fir"], "type": list, "options": ["fir", "glover", "spm", "spm + derivative", "spm + derivative + dispersion", "glover + derivative", "glover + derivative + dispersion"], "exclusive": True, "help": "Specifies the hemodynamic response function."},
|
||||||
{"name": "STIM_DUR", "default": 0.5, "type": float, "help": "The length of your stimulus."},
|
{"name": "STIM_DUR", "default": 0.5, "type": float, "help": "The length of your stimulus."},
|
||||||
{"name": "HRF_MODEL", "default": "fir", "type": str, "help": "Specifies the hemodynamic response function."},
|
|
||||||
{"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Specifies the desired drift model."},
|
{"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Specifies the desired drift model."},
|
||||||
{"name": "HIGH_PASS", "default": 0.01, "type": float, "help": "High-pass frequency in case of a cosine model (in Hz)."},
|
{"name": "HIGH_PASS", "default": 0.01, "type": float, "help": "High-pass frequency in case of a cosine model (in Hz)."},
|
||||||
{"name": "DRIFT_ORDER", "default": 1, "type": int, "help": "Order of the drift model (in case it is polynomial)"},
|
{"name": "DRIFT_ORDER", "default": 1, "type": int, "help": "Order of the drift model (in case it is polynomial)"},
|
||||||
@@ -196,15 +196,15 @@ SECTIONS = [
|
|||||||
{"name": "MIN_ONSET", "default": -24, "type": int, "help": "Minimal onset relative to frame times (in seconds)"},
|
{"name": "MIN_ONSET", "default": -24, "type": int, "help": "Minimal onset relative to frame times (in seconds)"},
|
||||||
{"name": "OVERSAMPLING", "default": 50, "type": int, "help": "Oversampling factor used in temporal convolutions."},
|
{"name": "OVERSAMPLING", "default": 50, "type": int, "help": "Oversampling factor used in temporal convolutions."},
|
||||||
{"name": "REMOVE_EVENTS", "default": "None", "type": list, "help": "Remove events matching the names provided before generating the Design Matrix"},
|
{"name": "REMOVE_EVENTS", "default": "None", "type": list, "help": "Remove events matching the names provided before generating the Design Matrix"},
|
||||||
{"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Whether to use short channel regression and regress out the short channels. Requires SHORT_CHANNELS to be True and at least one short channel to be found."},
|
{"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "depends_on": "SHORT_CHANNEL", "help": "Should short channel regression be used to create the design matrix? This will use the 'signal' from the short channel and regress it out of all other channels."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "General Linear Model",
|
"title": "General Linear Model",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "NOISE_MODEL", "default": "ar1", "type": str, "help": "Number of jobs for GLM processing."},
|
{"name": "NOISE_MODEL", "default": "ar1", "type": str, "help": "The temporal variance model. Defaults to first order auto regressive model 'ar1'. The AR model can be set to any integer value by modifying the value of N. E.g. use ar5 for a fifth order model. If the string auto is provided a model with order 4 times the sample rate will be used."},
|
||||||
{"name": "BINS", "default": 0, "type": int, "help": "Number of jobs for GLM processing."},
|
{"name": "BINS", "default": 0, "type": int, "help": "Maximum number of discrete bins for the AR coef histogram/clustering. By default the value is 0, which will set the number of bins to the number of channels, effectively estimating the AR model for each channel."},
|
||||||
{"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for GLM processing."},
|
{"name": "N_JOBS", "default": 1, "type": int, "help": "The number of CPUs to use to do the GLM computation. -1 means 'all CPUs'."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -218,14 +218,56 @@ SECTIONS = [
|
|||||||
"params": [
|
"params": [
|
||||||
{"name": "TIME_WINDOW_START", "default": 0, "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
|
{"name": "TIME_WINDOW_START", "default": 0, "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
|
||||||
{"name": "TIME_WINDOW_END", "default": 15, "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
|
{"name": "TIME_WINDOW_END", "default": 15, "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
|
||||||
{"name": "MAX_WORKERS", "default": 4, "type": str, "help": "Number of files to be processed at once. Setting this to a small integer value may help on underpowered systems. Remove the value to use an automatic amount."},
|
{"name": "MAX_WORKERS", "default": 6, "type": int, "help": "Number of files to be processed at once. Setting this to a small integer value may help on underpowered systems. Remove the value to use an automatic amount."},
|
||||||
{"name": "VERBOSITY", "default": False, "type": bool, "help": "True will log lots of debugging information to the log file. False will only log required data."},
|
{"name": "VERBOSITY", "default": False, "type": bool, "help": "Setting this to True will log lots of debugging information to the log file. Setting this to False will log minimal data."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
from concurrent.futures import ProcessPoolExecutor
|
||||||
|
from PySide6.QtCore import QObject
|
||||||
|
from fold import run_fold_process
|
||||||
|
|
||||||
|
class FoldWorker(QObject):
|
||||||
|
progress_sig = Signal(int)
|
||||||
|
finished_sig = Signal(dict)
|
||||||
|
error_sig = Signal(str)
|
||||||
|
|
||||||
|
def __init__(self, haemo_obj, label, shared_dict):
|
||||||
|
super().__init__()
|
||||||
|
self.haemo_obj = haemo_obj
|
||||||
|
self.label = label
|
||||||
|
self.shared_dict = shared_dict
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
try:
|
||||||
|
with ProcessPoolExecutor(max_workers=1) as executor:
|
||||||
|
# Submit the function from the external file
|
||||||
|
future = executor.submit(run_fold_process, self.haemo_obj, self.label, self.shared_dict)
|
||||||
|
|
||||||
|
while not future.done():
|
||||||
|
current_progress = self.shared_dict.get(self.label, 0)
|
||||||
|
self.progress_sig.emit(current_progress)
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
result = future.result()
|
||||||
|
|
||||||
|
# Check if our worker returned an error string instead of a dict
|
||||||
|
if isinstance(result, str) and result.startswith("ERROR:"):
|
||||||
|
raise Exception(result)
|
||||||
|
|
||||||
|
self.progress_sig.emit(100)
|
||||||
|
self.finished_sig.emit(result)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error_sig.emit(str(e))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TerminalWindow(QWidget):
|
class TerminalWindow(QWidget):
|
||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None):
|
||||||
super().__init__(parent, Qt.WindowType.Window)
|
super().__init__(parent, Qt.WindowType.Window)
|
||||||
@@ -1447,7 +1489,7 @@ class ParamSection(QWidget):
|
|||||||
layout = QVBoxLayout()
|
layout = QVBoxLayout()
|
||||||
self.setLayout(layout)
|
self.setLayout(layout)
|
||||||
self.widgets = {}
|
self.widgets = {}
|
||||||
|
self.dependencies = []
|
||||||
self.selected_path = None
|
self.selected_path = None
|
||||||
|
|
||||||
# Title label
|
# Title label
|
||||||
@@ -1489,6 +1531,7 @@ class ParamSection(QWidget):
|
|||||||
widget = QComboBox()
|
widget = QComboBox()
|
||||||
widget.addItems(["True", "False"])
|
widget.addItems(["True", "False"])
|
||||||
widget.setCurrentText(str(param["default"]))
|
widget.setCurrentText(str(param["default"]))
|
||||||
|
widget.currentTextChanged.connect(self.update_dependencies)
|
||||||
elif param["type"] == int:
|
elif param["type"] == int:
|
||||||
widget = QLineEdit()
|
widget = QLineEdit()
|
||||||
widget.setValidator(QIntValidator())
|
widget.setValidator(QIntValidator())
|
||||||
@@ -1502,12 +1545,31 @@ class ParamSection(QWidget):
|
|||||||
widget = QComboBox()
|
widget = QComboBox()
|
||||||
widget.addItems(param.get("options", []))
|
widget.addItems(param.get("options", []))
|
||||||
widget.setCurrentText(str(param.get("default", "<None Selected>")))
|
widget.setCurrentText(str(param.get("default", "<None Selected>")))
|
||||||
|
widget.currentTextChanged.connect(self.update_dependencies)
|
||||||
else:
|
else:
|
||||||
widget = self._create_multiselect_dropdown(None)
|
widget = self._create_multiselect_dropdown(None)
|
||||||
|
elif param["type"] == range:
|
||||||
|
widget = QSpinBox()
|
||||||
|
widget.setRange(0, 999) # Set a sensible maximum
|
||||||
|
# If default is "None" or range(15), handle it gracefully:
|
||||||
|
default_val = param["default"]
|
||||||
|
if isinstance(default_val, range):
|
||||||
|
widget.setValue(default_val.stop)
|
||||||
|
elif str(default_val).isdigit():
|
||||||
|
widget.setValue(int(default_val))
|
||||||
|
else:
|
||||||
|
widget.setValue(15) # Default fallback
|
||||||
else:
|
else:
|
||||||
widget = QLineEdit()
|
widget = QLineEdit()
|
||||||
widget.setText(str(param["default"]))
|
widget.setText(str(param["default"]))
|
||||||
|
|
||||||
|
if "depends_on" in param:
|
||||||
|
self.dependencies.append({
|
||||||
|
"child_name": param["name"],
|
||||||
|
"parent_name": param["depends_on"],
|
||||||
|
"depends_value": param.get("depends_value", "True")
|
||||||
|
})
|
||||||
|
|
||||||
widget.setToolTip(help_text)
|
widget.setToolTip(help_text)
|
||||||
|
|
||||||
h_layout.addWidget(widget)
|
h_layout.addWidget(widget)
|
||||||
@@ -1516,9 +1578,34 @@ class ParamSection(QWidget):
|
|||||||
layout.addLayout(h_layout)
|
layout.addLayout(h_layout)
|
||||||
self.widgets[param["name"]] = {
|
self.widgets[param["name"]] = {
|
||||||
"widget": widget,
|
"widget": widget,
|
||||||
"type": param["type"]
|
"type": param["type"],
|
||||||
|
"h_layout": h_layout
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.update_dependencies()
|
||||||
|
|
||||||
|
def update_dependencies(self):
|
||||||
|
"""Disables/Enables widgets based on parent selection values."""
|
||||||
|
for dep in self.dependencies:
|
||||||
|
child_info = self.widgets.get(dep["child_name"])
|
||||||
|
parent_info = self.widgets.get(dep["parent_name"])
|
||||||
|
|
||||||
|
if child_info and parent_info:
|
||||||
|
parent_widget = parent_info["widget"]
|
||||||
|
|
||||||
|
# Get current value of parent (works for both bool-combos and list-combos)
|
||||||
|
current_parent_value = parent_widget.currentText()
|
||||||
|
|
||||||
|
# Check if it matches the required value
|
||||||
|
is_active = (current_parent_value == dep["depends_value"])
|
||||||
|
|
||||||
|
# Toggle the entire row (Button, Label, and Input)
|
||||||
|
h_layout = child_info["h_layout"]
|
||||||
|
for i in range(h_layout.count()):
|
||||||
|
item = h_layout.itemAt(i).widget()
|
||||||
|
if item:
|
||||||
|
item.setEnabled(is_active)
|
||||||
|
|
||||||
def _create_multiselect_dropdown(self, items):
|
def _create_multiselect_dropdown(self, items):
|
||||||
combo = FullClickComboBox()
|
combo = FullClickComboBox()
|
||||||
combo.setView(QListView())
|
combo.setView(QListView())
|
||||||
@@ -1615,6 +1702,12 @@ class ParamSection(QWidget):
|
|||||||
values[name] = [x.strip() for x in widget.lineEdit().text().split(",") if x.strip()]
|
values[name] = [x.strip() for x in widget.lineEdit().text().split(",") if x.strip()]
|
||||||
elif isinstance(widget, QComboBox):
|
elif isinstance(widget, QComboBox):
|
||||||
values[name] = widget.currentText()
|
values[name] = widget.currentText()
|
||||||
|
elif expected_type == range:
|
||||||
|
if isinstance(widget, QSpinBox):
|
||||||
|
# Convert the integer N into range(N)
|
||||||
|
values[name] = range(widget.value())
|
||||||
|
else:
|
||||||
|
values[name] = range(15) # Fallback
|
||||||
else:
|
else:
|
||||||
raw_text = widget.text()
|
raw_text = widget.text()
|
||||||
try:
|
try:
|
||||||
@@ -1768,21 +1861,24 @@ class ParamSection(QWidget):
|
|||||||
self.update_dropdown_items("REMOVE_EVENTS", common_annotations)
|
self.update_dropdown_items("REMOVE_EVENTS", common_annotations)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class FullClickLineEdit(QLineEdit):
|
|
||||||
def mousePressEvent(self, event):
|
|
||||||
combo = self.parent()
|
|
||||||
if isinstance(combo, QComboBox):
|
|
||||||
combo.showPopup()
|
|
||||||
super().mousePressEvent(event)
|
|
||||||
|
|
||||||
|
|
||||||
class FullClickComboBox(QComboBox):
|
class FullClickComboBox(QComboBox):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, parent=None):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(parent)
|
||||||
self.setLineEdit(FullClickLineEdit(self))
|
self.setEditable(True)
|
||||||
self.lineEdit().setReadOnly(True)
|
self.lineEdit().setReadOnly(True)
|
||||||
|
self.lineEdit().installEventFilter(self)
|
||||||
|
|
||||||
|
def eventFilter(self, obj, event):
|
||||||
|
if obj == self.lineEdit():
|
||||||
|
|
||||||
|
if event.type() == QEvent.MouseButtonPress:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if event.type() == QEvent.MouseButtonRelease:
|
||||||
|
self.showPopup()
|
||||||
|
return True
|
||||||
|
|
||||||
|
return super().eventFilter(obj, event)
|
||||||
|
|
||||||
|
|
||||||
class ParticipantViewerWidget(QWidget):
|
class ParticipantViewerWidget(QWidget):
|
||||||
@@ -2346,7 +2442,7 @@ class ParticipantBrainViewerWidget(QWidget):
|
|||||||
haemo_obj = self.haemo_dict.get(file_path)
|
haemo_obj = self.haemo_dict.get(file_path)
|
||||||
|
|
||||||
if haemo_obj is None:
|
if haemo_obj is None:
|
||||||
continue
|
raise Exception("How did we get here?")
|
||||||
|
|
||||||
cha = self.cha_dict.get(file_path)
|
cha = self.cha_dict.get(file_path)
|
||||||
|
|
||||||
@@ -2381,6 +2477,28 @@ class ParticipantBrainViewerWidget(QWidget):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class MultiProgressDialog(QDialog):
|
||||||
|
def __init__(self, parent=None):
|
||||||
|
super().__init__(parent)
|
||||||
|
self.setWindowTitle("fOLD Analysis Progress")
|
||||||
|
self.setFixedWidth(400)
|
||||||
|
# Ensure it doesn't block the main thread
|
||||||
|
self.setWindowModality(Qt.WindowModality.NonModal)
|
||||||
|
self.layout = QVBoxLayout(self)
|
||||||
|
self.bars = {}
|
||||||
|
|
||||||
|
def add_participant(self, label, total_steps):
|
||||||
|
label_widget = QLabel(f"Analyzing {label}...")
|
||||||
|
pbar = QProgressBar()
|
||||||
|
pbar.setMaximum(total_steps)
|
||||||
|
self.layout.addWidget(label_widget)
|
||||||
|
self.layout.addWidget(pbar)
|
||||||
|
self.bars[label] = pbar
|
||||||
|
|
||||||
|
def update_bar(self, label, value):
|
||||||
|
if label in self.bars:
|
||||||
|
self.bars[label].setValue(value)
|
||||||
|
|
||||||
|
|
||||||
class ParticipantFoldChannelsWidget(QWidget):
|
class ParticipantFoldChannelsWidget(QWidget):
|
||||||
def __init__(self, haemo_dict, cha_dict):
|
def __init__(self, haemo_dict, cha_dict):
|
||||||
@@ -2388,7 +2506,7 @@ class ParticipantFoldChannelsWidget(QWidget):
|
|||||||
self.setWindowTitle("FLARES Participant Fold Channels Viewer")
|
self.setWindowTitle("FLARES Participant Fold Channels Viewer")
|
||||||
self.haemo_dict = haemo_dict
|
self.haemo_dict = haemo_dict
|
||||||
self.cha_dict = cha_dict
|
self.cha_dict = cha_dict
|
||||||
|
self.active_threads = []
|
||||||
# Create mappings: file_path -> participant label and dropdown display text
|
# Create mappings: file_path -> participant label and dropdown display text
|
||||||
self.participant_map = {} # file_path -> "Participant 1"
|
self.participant_map = {} # file_path -> "Participant 1"
|
||||||
self.participant_dropdown_items = [] # "Participant 1 (filename)"
|
self.participant_dropdown_items = [] # "Participant 1 (filename)"
|
||||||
@@ -2423,7 +2541,6 @@ class ParticipantFoldChannelsWidget(QWidget):
|
|||||||
self.top_bar.addWidget(self.participant_dropdown)
|
self.top_bar.addWidget(self.participant_dropdown)
|
||||||
self.top_bar.addWidget(QLabel("Fold Type:"))
|
self.top_bar.addWidget(QLabel("Fold Type:"))
|
||||||
self.top_bar.addWidget(self.image_index_dropdown)
|
self.top_bar.addWidget(self.image_index_dropdown)
|
||||||
self.top_bar.addWidget(QLabel("This will cause the app to hang for ~30s/Participant!"))
|
|
||||||
self.top_bar.addWidget(self.submit_button)
|
self.top_bar.addWidget(self.submit_button)
|
||||||
|
|
||||||
self.scroll = QScrollArea()
|
self.scroll = QScrollArea()
|
||||||
@@ -2544,51 +2661,113 @@ class ParticipantFoldChannelsWidget(QWidget):
|
|||||||
self.image_index_dropdown.lineEdit().setText(", ".join(index_labels))
|
self.image_index_dropdown.lineEdit().setText(", ".join(index_labels))
|
||||||
|
|
||||||
def show_fold_images(self):
|
def show_fold_images(self):
|
||||||
import flares
|
|
||||||
|
|
||||||
selected_display_names = self._get_checked_items(self.participant_dropdown)
|
selected_display_names = self._get_checked_items(self.participant_dropdown)
|
||||||
selected_file_paths = []
|
selected_indexes = [int(s.split(" ")[0]) for s in self._get_checked_items(self.image_index_dropdown)]
|
||||||
|
|
||||||
|
if not selected_display_names or 0 not in selected_indexes:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 1. Setup the UI
|
||||||
|
self.progress_popup = MultiProgressDialog(self)
|
||||||
|
self.progress_popup.setWindowModality(Qt.WindowModality.NonModal) # Important!
|
||||||
|
self.active_threads = [] # Keep references alive
|
||||||
|
|
||||||
|
# 2. Create the Shared Memory Manager
|
||||||
|
# This allows the separate processes to "talk" to this GUI thread
|
||||||
|
self.process_manager = Manager()
|
||||||
|
self.shared_progress = self.process_manager.dict()
|
||||||
|
|
||||||
|
# 3. Launch Workers
|
||||||
for display_name in selected_display_names:
|
for display_name in selected_display_names:
|
||||||
for fp, short_label in self.participant_map.items():
|
file_path = next((fp for fp, lbl in self.participant_map.items()
|
||||||
expected_display = f"{short_label} ({os.path.basename(fp)})"
|
if f"{lbl} ({os.path.basename(fp)})" == display_name), None)
|
||||||
if display_name == expected_display:
|
|
||||||
selected_file_paths.append(fp)
|
|
||||||
break
|
|
||||||
|
|
||||||
selected_indexes = [
|
if not file_path: continue
|
||||||
int(s.split(" ")[0]) for s in self._get_checked_items(self.image_index_dropdown)
|
|
||||||
]
|
|
||||||
|
|
||||||
# Pass the necessary arguments to each method
|
# Use .copy() to ensure thread safety during pickling
|
||||||
for file_path in selected_file_paths:
|
haemo_obj = self.haemo_dict.get(file_path).copy()
|
||||||
haemo_obj = self.haemo_dict.get(file_path)
|
label = self.participant_map[file_path]
|
||||||
|
|
||||||
if haemo_obj is None:
|
# Initialize Shared Dict Entry
|
||||||
continue
|
self.shared_progress[label] = 0
|
||||||
|
|
||||||
#cha = self.cha_dict.get(file_path)
|
# Add Bar to Popup
|
||||||
|
hbo_count = len(haemo_obj.copy().pick('hbo').ch_names) # Just for display logic if needed
|
||||||
|
self.progress_popup.add_participant(label, 100) # We use 0-100% standard
|
||||||
|
|
||||||
for idx in selected_indexes:
|
# Create Thread & Worker
|
||||||
if idx == 0:
|
thread = QThread()
|
||||||
try:
|
# Pass the shared dict to the worker
|
||||||
flares.fold_channels(haemo_obj)
|
worker = FoldWorker(haemo_obj, label, self.shared_progress)
|
||||||
except:
|
worker.moveToThread(thread)
|
||||||
msg_box = QMessageBox()
|
|
||||||
|
# Connect Signals
|
||||||
|
thread.started.connect(worker.run)
|
||||||
|
|
||||||
|
# Lambda capture to ensure correct label is used for each bar
|
||||||
|
worker.progress_sig.connect(lambda val, l=label: self.progress_popup.update_bar(l, val))
|
||||||
|
worker.finished_sig.connect(lambda imgs, l=label: self.on_fold_finished(imgs, l))
|
||||||
|
worker.error_sig.connect(self.on_fold_error)
|
||||||
|
|
||||||
|
# Cleanup Logic
|
||||||
|
worker.finished_sig.connect(thread.quit)
|
||||||
|
worker.finished_sig.connect(worker.deleteLater)
|
||||||
|
thread.finished.connect(thread.deleteLater)
|
||||||
|
|
||||||
|
# Store references to prevent garbage collection
|
||||||
|
self.active_threads.append({'thread': thread, 'worker': worker})
|
||||||
|
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
self.progress_popup.show()
|
||||||
|
|
||||||
|
def on_fold_finished(self, png_dict, label):
|
||||||
|
# 1. Close progress popup if all threads are done
|
||||||
|
# We filter the list to see if any threads are still running
|
||||||
|
still_running = any(t['thread'].isRunning() for t in self.active_threads)
|
||||||
|
if not still_running:
|
||||||
|
self.progress_popup.close()
|
||||||
|
# Optional: Shutdown manager when absolutely done to free resources
|
||||||
|
# self.process_manager.shutdown()
|
||||||
|
|
||||||
|
# 2. Display Images
|
||||||
|
if not hasattr(self, 'result_windows'):
|
||||||
|
self.result_windows = []
|
||||||
|
|
||||||
|
offset = len(self.result_windows) * 30 # Cascade offset
|
||||||
|
|
||||||
|
for key, png_data in png_dict.items():
|
||||||
|
popup = QDialog(self)
|
||||||
|
popup.setWindowTitle(f"{label} - fOLD {key.capitalize()}")
|
||||||
|
|
||||||
|
# ... (Your existing layout/image code) ...
|
||||||
|
|
||||||
|
# Resize and Position
|
||||||
|
if key == "main":
|
||||||
|
popup.resize(1664, 936)
|
||||||
|
popup.move(100 + offset, 100 + offset)
|
||||||
|
else:
|
||||||
|
popup.resize(450, 800)
|
||||||
|
popup.move(1770 + offset, 100 + offset)
|
||||||
|
|
||||||
|
popup.show()
|
||||||
|
self.result_windows.append(popup)
|
||||||
|
|
||||||
|
|
||||||
|
def on_fold_error(self, error_msg):
|
||||||
|
if hasattr(self, 'progress_popup'):
|
||||||
|
self.progress_popup.close()
|
||||||
|
msg_box = QMessageBox(self)
|
||||||
msg_box.setIcon(QMessageBox.Icon.Critical)
|
msg_box.setIcon(QMessageBox.Icon.Critical)
|
||||||
msg_box.setWindowTitle("Something went wrong!")
|
msg_box.setWindowTitle("Something went wrong!")
|
||||||
message = (
|
message = (
|
||||||
"Unable to locate the fOLD files!<br><br>"
|
"Unable to locate the fOLD files!<br><br>"
|
||||||
f"Please download the 'Supplementary' folder from <a href='https://github.com/nirx/fOLD-public'>here</a>. "
|
f"Please download the 'Supplementary' folder from <a href='https://github.com/nirx/fOLD-public'>here</a>. "
|
||||||
"Once the folder is downloaded, place it in C:/Users/your username/mne_data/fOLD/fOLD-public-master/Supplementary.<br><br>"
|
"Once downloaded, place it in C:/Users/your username/mne_data/fOLD/fOLD-public-master/Supplementary."
|
||||||
"If you are not using Windows, please go to the FLARES Git page for more information."
|
|
||||||
)
|
)
|
||||||
msg_box.setTextFormat(Qt.TextFormat.RichText)
|
msg_box.setTextFormat(Qt.TextFormat.RichText)
|
||||||
msg_box.setText(message)
|
msg_box.setText(message)
|
||||||
msg_box.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
|
|
||||||
msg_box.setStandardButtons(QMessageBox.StandardButton.Ok)
|
|
||||||
msg_box.exec()
|
msg_box.exec()
|
||||||
else:
|
|
||||||
print(f"No method defined for index {idx}")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -4252,11 +4431,6 @@ class MainApplication(QMainWindow):
|
|||||||
right_content_layout = QVBoxLayout()
|
right_content_layout = QVBoxLayout()
|
||||||
self.right_content_widget.setLayout(right_content_layout)
|
self.right_content_widget.setLayout(right_content_layout)
|
||||||
|
|
||||||
# Option selector dropdown
|
|
||||||
self.option_selector = QComboBox()
|
|
||||||
self.option_selector.addItems(["FIR"])
|
|
||||||
right_content_layout.addWidget(self.option_selector)
|
|
||||||
|
|
||||||
# Container for the sections
|
# Container for the sections
|
||||||
self.rows_container = QWidget()
|
self.rows_container = QWidget()
|
||||||
self.rows_layout = QVBoxLayout()
|
self.rows_layout = QVBoxLayout()
|
||||||
@@ -4309,9 +4483,6 @@ class MainApplication(QMainWindow):
|
|||||||
self.right_container.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
self.right_container.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
||||||
self.right_scroll_area.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
self.right_scroll_area.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)
|
||||||
|
|
||||||
# Store ParamSection widgets
|
|
||||||
self.option_selector.currentIndexChanged.connect(self.update_sections)
|
|
||||||
|
|
||||||
# Initial build
|
# Initial build
|
||||||
self.update_sections(0)
|
self.update_sections(0)
|
||||||
|
|
||||||
@@ -5147,8 +5318,8 @@ class MainApplication(QMainWindow):
|
|||||||
self.files_results[file_path] = result_tuple
|
self.files_results[file_path] = result_tuple
|
||||||
|
|
||||||
# Initialize storage
|
# Initialize storage
|
||||||
# TODO: Is this check needed?
|
# TODO: Is this check needed? Edit: yes very much so
|
||||||
if not hasattr(self, 'raw_haemo_dict'):
|
if getattr(self, 'raw_haemo_dict', None) is None:
|
||||||
self.raw_haemo_dict = {}
|
self.raw_haemo_dict = {}
|
||||||
self.epochs_dict = {}
|
self.epochs_dict = {}
|
||||||
self.fig_bytes_dict = {}
|
self.fig_bytes_dict = {}
|
||||||
@@ -5748,7 +5919,7 @@ def show_critical_error(error_msg):
|
|||||||
log_link = f"file:///{log_path2}"
|
log_link = f"file:///{log_path2}"
|
||||||
autosave_link = f"file:///{autosave_path}"
|
autosave_link = f"file:///{autosave_path}"
|
||||||
|
|
||||||
window.save_project(True)
|
window.save_project(True) #TODO: If the window is the one to crash, the file can't get saved. Could be fine as the window is what was storing the data to begin with?
|
||||||
|
|
||||||
message = (
|
message = (
|
||||||
"FLARES has encountered an unrecoverable error and needs to close.<br><br>"
|
"FLARES has encountered an unrecoverable error and needs to close.<br><br>"
|
||||||
|
|||||||
Reference in New Issue
Block a user