Compare commits
5 Commits
v1.1.7
...
1b78f1904d
| Author | SHA1 | Date | |
|---|---|---|---|
| 1b78f1904d | |||
| 9779a63a9c | |||
| 2ecd357aca | |||
| fe4e8904b4 | |||
| 473c945563 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -174,3 +174,4 @@ cython_debug/
|
|||||||
# PyPI configuration file
|
# PyPI configuration file
|
||||||
.pypirc
|
.pypirc
|
||||||
|
|
||||||
|
/individual_images
|
||||||
19
changelog.md
19
changelog.md
@@ -1,3 +1,22 @@
|
|||||||
|
# Version 1.2.0
|
||||||
|
|
||||||
|
- Added new parameters to the right side of the screen
|
||||||
|
- These parameters include SHOW_OPTODE_NAMES, SECONDS_TO_STRIP_HR, MAX_LOW_HR, MAX_HIGH_HR, SMOOTHING_WINDOW_HR, HEART_RATE_WINDOW, BAD_CHANNELS_HANDLING, MAX_DIST, MIN_NEIGHBORS, L_TRANS_BANDWIDTH, H_TRANS_BANDWIDTH, RESAMPLE, RESAMPLE_FREQ, STIM_DUR, HRF_MODEL, HIGH_PASS, DRIFT_ORDER, FIR_DELAYS, MIN_ONSET, OVERSAMPLING, SHORT_CHANNEL_REGRESSION, NOISE_MODEL, BINS, and VERBOSITY.
|
||||||
|
- All the new parameters have default values matching the underlying values in version 1.1.7
|
||||||
|
- The order of the parameters have changed to match the order that the code runs when the Process button is clicked
|
||||||
|
- Moved TIME_WINDOW_START and TIME_WINDOW_END to the 'Other' category
|
||||||
|
- Fixed a bug causing SCI to not work when HEART_RATE was set to False
|
||||||
|
- Bad channels can now be dealt with by taking no action, removing them completely, or interpolating them based on their neighbours. Interpolation remains the default option
|
||||||
|
- Fixed an underlying deprecation warning
|
||||||
|
- Fixed an issue causing some overlay elements to not render on the brain for certain devices
|
||||||
|
- Fixed a crash when rendering some Inter-Group images with only one participant in a group
|
||||||
|
- Fixed a crash when attempting to fOLD channels without the fOLD dataset installed
|
||||||
|
- Lowered the number of rectangles in the progress bar to 24 after combining some actions
|
||||||
|
- Fixed the User Guide window to properly display information about the 24 stages and added a link to the Git wiki page
|
||||||
|
- MAX_WORKERS should now properly repect the value set
|
||||||
|
- Added a new CSV export option to be used by other applications
|
||||||
|
|
||||||
|
|
||||||
# Version 1.1.7
|
# Version 1.1.7
|
||||||
|
|
||||||
- Fixed a bug where having both a L_FREQ and H_FREQ would cause only the L_FREQ to be used
|
- Fixed a bug where having both a L_FREQ and H_FREQ would cause only the L_FREQ to be used
|
||||||
|
|||||||
363
flares.py
363
flares.py
@@ -21,6 +21,7 @@ import os.path as op
|
|||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
from concurrent.futures import ProcessPoolExecutor, as_completed
|
from concurrent.futures import ProcessPoolExecutor, as_completed
|
||||||
|
from queue import Empty
|
||||||
|
|
||||||
# External library imports
|
# External library imports
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
@@ -53,10 +54,10 @@ from scipy.signal import welch, butter, filtfilt # type: ignore
|
|||||||
import pywt # type: ignore
|
import pywt # type: ignore
|
||||||
import neurokit2 as nk # type: ignore
|
import neurokit2 as nk # type: ignore
|
||||||
|
|
||||||
# Backen visualization needed to be defined for pyinstaller
|
# Backend visualization needed to be defined for pyinstaller
|
||||||
import pyvistaqt # type: ignore
|
import pyvistaqt # type: ignore
|
||||||
#import vtkmodules.util.data_model
|
import vtkmodules.util.data_model
|
||||||
#import vtkmodules.util.execution_model
|
import vtkmodules.util.execution_model
|
||||||
|
|
||||||
# External library imports for mne
|
# External library imports for mne
|
||||||
from mne import (
|
from mne import (
|
||||||
@@ -89,9 +90,10 @@ from mne_nirs.io.fold import fold_channel_specificity # type: ignore
|
|||||||
from mne_nirs.preprocessing import peak_power # type: ignore
|
from mne_nirs.preprocessing import peak_power # type: ignore
|
||||||
from mne_nirs.statistics._glm_level_first import RegressionResults # type: ignore
|
from mne_nirs.statistics._glm_level_first import RegressionResults # type: ignore
|
||||||
|
|
||||||
|
# Needs to be set for mne
|
||||||
os.environ["SUBJECTS_DIR"] = str(data_path()) + "/subjects" # type: ignore
|
os.environ["SUBJECTS_DIR"] = str(data_path()) + "/subjects" # type: ignore
|
||||||
|
|
||||||
|
# TODO: Tidy this up
|
||||||
FIXED_CATEGORY_COLORS = {
|
FIXED_CATEGORY_COLORS = {
|
||||||
"SCI only": "skyblue",
|
"SCI only": "skyblue",
|
||||||
"PSP only": "salmon",
|
"PSP only": "salmon",
|
||||||
@@ -112,10 +114,6 @@ FIXED_CATEGORY_COLORS = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AGE: float
|
|
||||||
GENDER: str
|
|
||||||
|
|
||||||
# SECONDS_TO_STRIP: int
|
|
||||||
DOWNSAMPLE: bool
|
DOWNSAMPLE: bool
|
||||||
DOWNSAMPLE_FREQUENCY: int
|
DOWNSAMPLE_FREQUENCY: int
|
||||||
|
|
||||||
@@ -123,21 +121,37 @@ TRIM: bool
|
|||||||
SECONDS_TO_KEEP: float
|
SECONDS_TO_KEEP: float
|
||||||
|
|
||||||
OPTODE_PLACEMENT: bool
|
OPTODE_PLACEMENT: bool
|
||||||
|
SHOW_OPTODE_NAMES: bool
|
||||||
|
|
||||||
HEART_RATE: bool
|
HEART_RATE: bool
|
||||||
|
|
||||||
|
SHORT_CHANNEL: bool
|
||||||
|
SHORT_CHANNEL_THRESH: float
|
||||||
|
LONG_CHANNEL_THRESH: float
|
||||||
|
|
||||||
|
HEART_RATE: bool
|
||||||
|
SECONDS_TO_STRIP_HR: int
|
||||||
|
MAX_LOW_HR: int
|
||||||
|
MAX_HIGH_HR: int
|
||||||
|
SMOOTHING_WINDOW_HR: int
|
||||||
|
HEART_RATE_WINDOW: int
|
||||||
|
|
||||||
SCI: bool
|
SCI: bool
|
||||||
SCI_TIME_WINDOW: int
|
SCI_TIME_WINDOW: int
|
||||||
SCI_THRESHOLD: float
|
SCI_THRESHOLD: float
|
||||||
|
|
||||||
SNR: bool
|
SNR: bool
|
||||||
# SNR_TIME_WINDOW : int
|
# SNR_TIME_WINDOW : int #TODO: is this needed?
|
||||||
SNR_THRESHOLD: float
|
SNR_THRESHOLD: float
|
||||||
|
|
||||||
PSP: bool
|
PSP: bool
|
||||||
PSP_TIME_WINDOW: int
|
PSP_TIME_WINDOW: int
|
||||||
PSP_THRESHOLD: float
|
PSP_THRESHOLD: float
|
||||||
|
|
||||||
|
BAD_CHANNELS_HANDLING: str
|
||||||
|
MAX_DIST: float
|
||||||
|
MIN_NEIGHBORS: int
|
||||||
|
|
||||||
TDDR: bool
|
TDDR: bool
|
||||||
|
|
||||||
WAVELET: bool
|
WAVELET: bool
|
||||||
@@ -145,57 +159,41 @@ IQR: float
|
|||||||
WAVELET_TYPE: str
|
WAVELET_TYPE: str
|
||||||
WAVELET_LEVEL: int
|
WAVELET_LEVEL: int
|
||||||
|
|
||||||
HEART_RATE = True # True if heart rate should be calculated. This helps the SCI, PSP, and SNR methods to be more accurate.
|
|
||||||
SECONDS_TO_STRIP_HR =5 # Amount of seconds to temporarily strip from the data to calculate heart rate more effectively. Useful if participant removed cap while still recording.
|
|
||||||
MAX_LOW_HR = 40 # Any heart rate values lower than this will be set to this value.
|
|
||||||
MAX_HIGH_HR = 200 # Any heart rate values higher than this will be set to this value.
|
|
||||||
SMOOTHING_WINDOW_HR = 100 # Heart rate will be calculated as a rolling average over this many amount of samples.
|
|
||||||
HEART_RATE_WINDOW = 25 # Amount of BPM above and below the calculated average to use for a range of resting BPM.
|
|
||||||
|
|
||||||
ENHANCE_NEGATIVE_CORRELATION: bool
|
ENHANCE_NEGATIVE_CORRELATION: bool
|
||||||
|
|
||||||
FILTER: bool
|
FILTER: bool
|
||||||
L_FREQ: float
|
L_FREQ: float
|
||||||
H_FREQ: float
|
H_FREQ: float
|
||||||
|
L_TRANS_BANDWIDTH: float
|
||||||
|
H_TRANS_BANDWIDTH: float
|
||||||
|
|
||||||
SHORT_CHANNEL: bool
|
RESAMPLE: bool
|
||||||
SHORT_CHANNEL_THRESH: float
|
RESAMPLE_FREQ: int
|
||||||
LONG_CHANNEL_THRESH: float
|
STIM_DUR: float
|
||||||
|
HRF_MODEL: str
|
||||||
|
DRIFT_MODEL: str
|
||||||
|
HIGH_PASS: float
|
||||||
|
DRIFT_ORDER: int
|
||||||
|
FIR_DELAYS: range
|
||||||
|
MIN_ONSET: int
|
||||||
|
OVERSAMPLING: int
|
||||||
REMOVE_EVENTS: list
|
REMOVE_EVENTS: list
|
||||||
|
SHORT_CHANNEL_REGRESSION: bool
|
||||||
|
|
||||||
|
NOISE_MODEL: str
|
||||||
|
BINS: int
|
||||||
|
N_JOBS: int
|
||||||
|
|
||||||
TIME_WINDOW_START: int
|
TIME_WINDOW_START: int
|
||||||
TIME_WINDOW_END: int
|
TIME_WINDOW_END: int
|
||||||
|
MAX_WORKERS: int
|
||||||
|
VERBOSITY: bool
|
||||||
|
|
||||||
DRIFT_MODEL: str
|
AGE = 25 # Assume 25 if not set from the GUI. This will result in a reasonable PPF
|
||||||
|
|
||||||
VERBOSITY = True
|
|
||||||
|
|
||||||
# FIXME: Shouldn't need each ordering - just order it before checking
|
|
||||||
FIXED_CATEGORY_COLORS = {
|
|
||||||
"SCI only": "skyblue",
|
|
||||||
"PSP only": "salmon",
|
|
||||||
"SNR only": "lightgreen",
|
|
||||||
"PSP + SCI": "orange",
|
|
||||||
"SCI + SNR": "violet",
|
|
||||||
"PSP + SNR": "gold",
|
|
||||||
"SCI + PSP": "orange",
|
|
||||||
"SNR + SCI": "violet",
|
|
||||||
"SNR + PSP": "gold",
|
|
||||||
"PSP + SNR + SCI": "gray",
|
|
||||||
"SCI + PSP + SNR": "gray",
|
|
||||||
"SCI + SNR + PSP": "gray",
|
|
||||||
"PSP + SCI + SNR": "gray",
|
|
||||||
"PSP + SNR + SCI": "gray",
|
|
||||||
"SNR + SCI + PSP": "gray",
|
|
||||||
"SNR + PSP + SCI": "gray",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
AGE = 25
|
|
||||||
GENDER = ""
|
GENDER = ""
|
||||||
GROUP = "Default"
|
GROUP = "Default"
|
||||||
|
|
||||||
|
# These are parameters that are required for the analysis
|
||||||
REQUIRED_KEYS: dict[str, Any] = {
|
REQUIRED_KEYS: dict[str, Any] = {
|
||||||
|
|
||||||
# "SECONDS_TO_STRIP": int,
|
# "SECONDS_TO_STRIP": int,
|
||||||
@@ -262,7 +260,7 @@ PLATFORM_NAME = platform.system().lower()
|
|||||||
# Configure logging to file with timestamps and realtime flush
|
# Configure logging to file with timestamps and realtime flush
|
||||||
if PLATFORM_NAME == 'darwin':
|
if PLATFORM_NAME == 'darwin':
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
filename=os.path.join(os.path.dirname(sys.executable), "../../../fnirs_analysis.log"),
|
filename=os.path.join(os.path.dirname(sys.executable), "../../../fnirs_analysis.log"), # Needed to get out of the bundled application
|
||||||
level=logging.INFO,
|
level=logging.INFO,
|
||||||
format='%(asctime)s - %(processName)s - %(levelname)s - %(message)s',
|
format='%(asctime)s - %(processName)s - %(levelname)s - %(message)s',
|
||||||
datefmt='%Y-%m-%d %H:%M:%S',
|
datefmt='%Y-%m-%d %H:%M:%S',
|
||||||
@@ -320,8 +318,6 @@ def set_metadata(file_path, metadata: dict[str, Any]) -> None:
|
|||||||
val = file_metadata.get(key, None)
|
val = file_metadata.get(key, None)
|
||||||
if val not in (None, '', [], {}, ()): # check for "empty" values
|
if val not in (None, '', [], {}, ()): # check for "empty" values
|
||||||
globals()[key] = val
|
globals()[key] = val
|
||||||
from queue import Empty # This works with multiprocessing.Manager().Queue()
|
|
||||||
|
|
||||||
|
|
||||||
def gui_entry(config: dict[str, Any], gui_queue: Queue, progress_queue: Queue) -> None:
|
def gui_entry(config: dict[str, Any], gui_queue: Queue, progress_queue: Queue) -> None:
|
||||||
def forward_progress():
|
def forward_progress():
|
||||||
@@ -825,7 +821,7 @@ def get_hbo_hbr_picks(raw):
|
|||||||
return hbo_picks, hbr_picks, hbo_wl, hbr_wl
|
return hbo_picks, hbr_picks, hbo_wl, hbr_wl
|
||||||
|
|
||||||
|
|
||||||
def interpolate_fNIRS_bads_weighted_average(raw, bad_channels, max_dist=0.03, min_neighbors=2):
|
def interpolate_fNIRS_bads_weighted_average(raw, max_dist=0.03, min_neighbors=2):
|
||||||
"""
|
"""
|
||||||
Interpolate bad fNIRS channels using a distance-weighted average of nearby good channels.
|
Interpolate bad fNIRS channels using a distance-weighted average of nearby good channels.
|
||||||
|
|
||||||
@@ -1117,17 +1113,17 @@ def mark_bads(raw, bad_sci, bad_snr, bad_psp):
|
|||||||
|
|
||||||
|
|
||||||
def filter_the_data(raw_haemo):
|
def filter_the_data(raw_haemo):
|
||||||
# --- STEP 5: Filtering (0.01–0.2 Hz bandpass) ---
|
# --- STEP 5: Filtering (0.01-0.2 Hz bandpass) ---
|
||||||
fig_filter = raw_haemo.compute_psd(fmax=3).plot(
|
fig_filter = raw_haemo.compute_psd(fmax=3).plot(
|
||||||
average=True, color="r", show=False, amplitude=True
|
average=True, color="r", show=False, amplitude=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if L_FREQ == 0 and H_FREQ != 0:
|
if L_FREQ == 0 and H_FREQ != 0:
|
||||||
raw_haemo = raw_haemo.filter(l_freq=None, h_freq=H_FREQ, h_trans_bandwidth=0.02)
|
raw_haemo = raw_haemo.filter(l_freq=None, h_freq=H_FREQ, h_trans_bandwidth=H_TRANS_BANDWIDTH)
|
||||||
elif L_FREQ != 0 and H_FREQ == 0:
|
elif L_FREQ != 0 and H_FREQ == 0:
|
||||||
raw_haemo = raw_haemo.filter(l_freq=L_FREQ, h_freq=None, l_trans_bandwidth=0.002)
|
raw_haemo = raw_haemo.filter(l_freq=L_FREQ, h_freq=None, l_trans_bandwidth=L_TRANS_BANDWIDTH)
|
||||||
elif L_FREQ != 0 and H_FREQ != 0:
|
elif L_FREQ != 0 and H_FREQ != 0:
|
||||||
raw_haemo = raw_haemo.filter(l_freq=L_FREQ, h_freq=H_FREQ, l_trans_bandwidth=0.002, h_trans_bandwidth=0.02)
|
raw_haemo = raw_haemo.filter(l_freq=L_FREQ, h_freq=H_FREQ, l_trans_bandwidth=L_TRANS_BANDWIDTH, h_trans_bandwidth=H_TRANS_BANDWIDTH)
|
||||||
else:
|
else:
|
||||||
print("No filter")
|
print("No filter")
|
||||||
#raw_haemo = raw_haemo.filter(l_freq=None, h_freq=0.4, h_trans_bandwidth=0.2)
|
#raw_haemo = raw_haemo.filter(l_freq=None, h_freq=0.4, h_trans_bandwidth=0.2)
|
||||||
@@ -1258,7 +1254,7 @@ def epochs_calculations(raw_haemo, events, event_dict):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
data = evoked.data[picks_idx, :].mean(axis=0)
|
data = evoked.data[picks_idx, :].mean(axis=0)
|
||||||
t_start, t_end = 0, 15
|
t_start, t_end = 0, 15 #TODO: Is this in seconds? or is it 1hz input that makes it 15s?
|
||||||
times_mask = (evoked.times >= t_start) & (evoked.times <= t_end)
|
times_mask = (evoked.times >= t_start) & (evoked.times <= t_end)
|
||||||
data_segment = data[times_mask]
|
data_segment = data[times_mask]
|
||||||
times_segment = evoked.times[times_mask]
|
times_segment = evoked.times[times_mask]
|
||||||
@@ -1307,33 +1303,53 @@ def epochs_calculations(raw_haemo, events, event_dict):
|
|||||||
|
|
||||||
def make_design_matrix(raw_haemo, short_chans):
|
def make_design_matrix(raw_haemo, short_chans):
|
||||||
|
|
||||||
raw_haemo.resample(1, npad="auto")
|
events_to_remove = REMOVE_EVENTS
|
||||||
raw_haemo._data = raw_haemo._data * 1e6
|
|
||||||
|
filtered_annotations = [ann for ann in raw_haemo.annotations if ann['description'] not in events_to_remove]
|
||||||
|
|
||||||
|
new_annot = Annotations(
|
||||||
|
onset=[ann['onset'] for ann in filtered_annotations],
|
||||||
|
duration=[ann['duration'] for ann in filtered_annotations],
|
||||||
|
description=[ann['description'] for ann in filtered_annotations]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set the new annotations
|
||||||
|
raw_haemo.set_annotations(new_annot)
|
||||||
|
|
||||||
|
if RESAMPLE:
|
||||||
|
raw_haemo.resample(RESAMPLE_FREQ, npad="auto")
|
||||||
|
raw_haemo._data = raw_haemo._data * 1e6
|
||||||
|
try:
|
||||||
|
short_chans.resample(RESAMPLE_FREQ)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
# 2) Create design matrix
|
# 2) Create design matrix
|
||||||
if SHORT_CHANNEL:
|
if SHORT_CHANNEL_REGRESSION:
|
||||||
short_chans.resample(1)
|
|
||||||
design_matrix = make_first_level_design_matrix(
|
design_matrix = make_first_level_design_matrix(
|
||||||
raw=raw_haemo,
|
raw=raw_haemo,
|
||||||
hrf_model='fir',
|
stim_dur=STIM_DUR,
|
||||||
stim_dur=0.5,
|
hrf_model=HRF_MODEL,
|
||||||
fir_delays=range(15),
|
|
||||||
drift_model=DRIFT_MODEL,
|
drift_model=DRIFT_MODEL,
|
||||||
high_pass=0.01,
|
high_pass=HIGH_PASS,
|
||||||
oversampling=1,
|
drift_order=DRIFT_ORDER,
|
||||||
min_onset=-125,
|
fir_delays=range(15),
|
||||||
add_regs=short_chans.get_data().T,
|
add_regs=short_chans.get_data().T,
|
||||||
add_reg_names=short_chans.ch_names
|
add_reg_names=short_chans.ch_names,
|
||||||
|
min_onset=MIN_ONSET,
|
||||||
|
oversampling=OVERSAMPLING
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
design_matrix = make_first_level_design_matrix(
|
design_matrix = make_first_level_design_matrix(
|
||||||
raw=raw_haemo,
|
raw=raw_haemo,
|
||||||
hrf_model='fir',
|
stim_dur=STIM_DUR,
|
||||||
stim_dur=0.5,
|
hrf_model=HRF_MODEL,
|
||||||
fir_delays=range(15),
|
|
||||||
drift_model=DRIFT_MODEL,
|
drift_model=DRIFT_MODEL,
|
||||||
high_pass=0.01,
|
high_pass=HIGH_PASS,
|
||||||
oversampling=1,
|
drift_order=DRIFT_ORDER,
|
||||||
min_onset=-125,
|
fir_delays=range(15),
|
||||||
|
min_onset=MIN_ONSET,
|
||||||
|
oversampling=OVERSAMPLING
|
||||||
)
|
)
|
||||||
|
|
||||||
print(design_matrix.head())
|
print(design_matrix.head())
|
||||||
@@ -2569,7 +2585,10 @@ def plot_fir_model_results(df, raw_haemo, dm, selected_event, l_bound, u_bound):
|
|||||||
dm_cols_activity = np.where([f"{selected_event}" in c for c in dm.columns])[0]
|
dm_cols_activity = np.where([f"{selected_event}" in c for c in dm.columns])[0]
|
||||||
dm = dm[[dm.columns[i] for i in dm_cols_activity]]
|
dm = dm[[dm.columns[i] for i in dm_cols_activity]]
|
||||||
|
|
||||||
lme = smf.mixedlm("theta ~ -1 + delay:TidyCond:Chroma", df, groups=df["ID"]).fit()
|
try:
|
||||||
|
lme = smf.mixedlm("theta ~ -1 + delay:TidyCond:Chroma", df, groups=df["ID"]).fit()
|
||||||
|
except:
|
||||||
|
lme = smf.ols("theta ~ -1 + delay:TidyCond:Chroma", df, groups=df["ID"]).fit() # type: ignore
|
||||||
|
|
||||||
df_sum = statsmodels_to_results(lme)
|
df_sum = statsmodels_to_results(lme)
|
||||||
df_sum["delay"] = [int(n) for n in df_sum["delay"]]
|
df_sum["delay"] = [int(n) for n in df_sum["delay"]]
|
||||||
@@ -3310,18 +3329,20 @@ def hr_calc(raw):
|
|||||||
return fig, hr1, hr2, low, high
|
return fig, hr1, hr2, low, high
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def process_participant(file_path, progress_callback=None):
|
def process_participant(file_path, progress_callback=None):
|
||||||
|
|
||||||
fig_individual: dict[str, Figure] = {}
|
fig_individual: dict[str, Figure] = {}
|
||||||
|
|
||||||
# Step 1: Load
|
# Step 1: Preprocessing
|
||||||
raw = load_snirf(file_path)
|
raw = load_snirf(file_path)
|
||||||
fig_raw = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Loaded Raw", show=False)
|
fig_raw = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Loaded Raw", show=False)
|
||||||
fig_individual["Loaded Raw"] = fig_raw
|
fig_individual["Loaded Raw"] = fig_raw
|
||||||
if progress_callback: progress_callback(1)
|
if progress_callback: progress_callback(1)
|
||||||
logger.info("1")
|
logger.info("Step 1 Completed.")
|
||||||
|
|
||||||
|
|
||||||
|
# Step 2: Trimming
|
||||||
if TRIM:
|
if TRIM:
|
||||||
if hasattr(raw, 'annotations') and len(raw.annotations) > 0:
|
if hasattr(raw, 'annotations') and len(raw.annotations) > 0:
|
||||||
# Get time of first event
|
# Get time of first event
|
||||||
@@ -3329,17 +3350,16 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
trim_time = max(0, first_event_time - SECONDS_TO_KEEP) # Ensure we don't go negative
|
trim_time = max(0, first_event_time - SECONDS_TO_KEEP) # Ensure we don't go negative
|
||||||
raw.crop(tmin=trim_time)
|
raw.crop(tmin=trim_time)
|
||||||
# Shift annotation onsets to match new t=0
|
# Shift annotation onsets to match new t=0
|
||||||
import mne
|
|
||||||
|
|
||||||
ann = raw.annotations
|
ann = raw.annotations
|
||||||
ann_shifted = mne.Annotations(
|
ann_shifted = Annotations(
|
||||||
onset=ann.onset - trim_time, # shift to start at zero
|
onset=ann.onset - trim_time, # shift to start at zero
|
||||||
duration=ann.duration,
|
duration=ann.duration,
|
||||||
description=ann.description
|
description=ann.description
|
||||||
)
|
)
|
||||||
data = raw.get_data()
|
data = raw.get_data()
|
||||||
info = raw.info.copy()
|
info = raw.info.copy()
|
||||||
raw = mne.io.RawArray(data, info)
|
raw = RawArray(data, info)
|
||||||
raw.set_annotations(ann_shifted)
|
raw.set_annotations(ann_shifted)
|
||||||
|
|
||||||
logger.info(f"Trimmed raw data: start at {trim_time}s (5s before first event), t=0 at new start")
|
logger.info(f"Trimmed raw data: start at {trim_time}s (5s before first event), t=0 at new start")
|
||||||
@@ -3349,185 +3369,178 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
fig_trimmed = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Trimmed Raw", show=False)
|
fig_trimmed = raw.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Trimmed Raw", show=False)
|
||||||
fig_individual["Trimmed Raw"] = fig_trimmed
|
fig_individual["Trimmed Raw"] = fig_trimmed
|
||||||
if progress_callback: progress_callback(2)
|
if progress_callback: progress_callback(2)
|
||||||
logger.info("2")
|
logger.info("Step 2 Completed.")
|
||||||
|
|
||||||
# Step 1.5: Verify optode positions
|
# Step 3: Verify Optode Placement
|
||||||
if OPTODE_PLACEMENT:
|
if OPTODE_PLACEMENT:
|
||||||
fig_optodes = raw.plot_sensors(show_names=True, to_sphere=True, show=False) # type: ignore
|
fig_optodes = raw.plot_sensors(show_names=SHOW_OPTODE_NAMES, to_sphere=True, show=False) # type: ignore
|
||||||
fig_individual["Plot Sensors"] = fig_optodes
|
fig_individual["Plot Sensors"] = fig_optodes
|
||||||
if progress_callback: progress_callback(3)
|
if progress_callback: progress_callback(3)
|
||||||
logger.info("3")
|
logger.info("Step 3 Completed.")
|
||||||
|
|
||||||
# Step 2: Bad from SCI
|
# Step 4: Short/Long Channels
|
||||||
|
if SHORT_CHANNEL:
|
||||||
|
short_chans = get_short_channels(raw, max_dist=SHORT_CHANNEL_THRESH)
|
||||||
|
fig_short_chans = short_chans.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Short Channels Only", show=False)
|
||||||
|
fig_individual["short"] = fig_short_chans
|
||||||
|
else:
|
||||||
|
short_chans = None
|
||||||
|
get_long_channels(raw, min_dist=SHORT_CHANNEL_THRESH, max_dist=LONG_CHANNEL_THRESH) # Don't update the existing raw
|
||||||
|
if progress_callback: progress_callback(4)
|
||||||
|
logger.info("Step 4 Completed.")
|
||||||
|
|
||||||
|
# Step 5: Heart Rate
|
||||||
if HEART_RATE:
|
if HEART_RATE:
|
||||||
fig, hr1, hr2, low, high = hr_calc(raw)
|
fig, hr1, hr2, low, high = hr_calc(raw)
|
||||||
fig_individual["PSD"] = fig
|
fig_individual["PSD"] = fig
|
||||||
fig_individual['HeartRate_PSD'] = hr1
|
fig_individual['HeartRate_PSD'] = hr1
|
||||||
fig_individual['HeartRate_Time'] = hr2
|
fig_individual['HeartRate_Time'] = hr2
|
||||||
if progress_callback: progress_callback(4)
|
if progress_callback: progress_callback(5)
|
||||||
logger.info("4")
|
logger.info("Step 5 Completed.")
|
||||||
|
|
||||||
|
# Step 6: Scalp Coupling Index
|
||||||
bad_sci = []
|
bad_sci = []
|
||||||
if SCI:
|
if SCI:
|
||||||
bad_sci, fig_sci_1, fig_sci_2 = calculate_scalp_coupling(raw, low, high)
|
if HEART_RATE:
|
||||||
|
bad_sci, fig_sci_1, fig_sci_2 = calculate_scalp_coupling(raw, low, high)
|
||||||
|
else:
|
||||||
|
bad_sci, fig_sci_1, fig_sci_2 = calculate_scalp_coupling(raw)
|
||||||
fig_individual["SCI1"] = fig_sci_1
|
fig_individual["SCI1"] = fig_sci_1
|
||||||
fig_individual["SCI2"] = fig_sci_2
|
fig_individual["SCI2"] = fig_sci_2
|
||||||
if progress_callback: progress_callback(5)
|
if progress_callback: progress_callback(6)
|
||||||
logger.info("5")
|
logger.info("Step 6 Completed.")
|
||||||
|
|
||||||
# Step 2: Bad from SNR
|
# Step 7: Signal to Noise Ratio
|
||||||
bad_snr = []
|
bad_snr = []
|
||||||
if SNR:
|
if SNR:
|
||||||
bad_snr, fig_snr = calculate_signal_noise_ratio(raw)
|
bad_snr, fig_snr = calculate_signal_noise_ratio(raw)
|
||||||
fig_individual["SNR1"] = fig_snr
|
fig_individual["SNR1"] = fig_snr
|
||||||
if progress_callback: progress_callback(6)
|
if progress_callback: progress_callback(7)
|
||||||
logger.info("6")
|
logger.info("Step 7 Completed.")
|
||||||
|
|
||||||
# Step 3: Bad from PSP
|
# Step 8: Peak Spectral Power
|
||||||
bad_psp = []
|
bad_psp = []
|
||||||
if PSP:
|
if PSP:
|
||||||
bad_psp, fig_psp1, fig_psp2 = calculate_peak_power(raw)
|
bad_psp, fig_psp1, fig_psp2 = calculate_peak_power(raw)
|
||||||
fig_individual["PSP1"] = fig_psp1
|
fig_individual["PSP1"] = fig_psp1
|
||||||
fig_individual["PSP2"] = fig_psp2
|
fig_individual["PSP2"] = fig_psp2
|
||||||
if progress_callback: progress_callback(7)
|
|
||||||
logger.info("7")
|
|
||||||
|
|
||||||
# Step 4: Mark the bad channels
|
|
||||||
raw, fig_dropped, fig_raw_before, bad_channels = mark_bads(raw, bad_sci, bad_snr, bad_psp)
|
|
||||||
if fig_dropped and fig_raw_before is not None:
|
|
||||||
fig_individual["fig2"] = fig_dropped
|
|
||||||
fig_individual["fig3"] = fig_raw_before
|
|
||||||
if progress_callback: progress_callback(8)
|
if progress_callback: progress_callback(8)
|
||||||
logger.info("8")
|
logger.info("Step 8 Completed.")
|
||||||
|
|
||||||
|
# Step 9: Bad Channels Handling
|
||||||
|
if BAD_CHANNELS_HANDLING != "None":
|
||||||
|
raw, fig_dropped, fig_raw_before, bad_channels = mark_bads(raw, bad_sci, bad_snr, bad_psp)
|
||||||
|
if fig_dropped and fig_raw_before is not None:
|
||||||
|
fig_individual["fig2"] = fig_dropped
|
||||||
|
fig_individual["fig3"] = fig_raw_before
|
||||||
|
if bad_channels:
|
||||||
|
if BAD_CHANNELS_HANDLING == "Interpolate":
|
||||||
|
raw, fig_raw_after = interpolate_fNIRS_bads_weighted_average(raw, max_dist=MAX_DIST, min_neighbors=MIN_NEIGHBORS)
|
||||||
|
fig_individual["fig4"] = fig_raw_after
|
||||||
|
elif BAD_CHANNELS_HANDLING == "Remove":
|
||||||
|
pass
|
||||||
|
#TODO: Is there more needed here?
|
||||||
|
|
||||||
# Step 5: Interpolate the bad channels
|
|
||||||
if bad_channels:
|
|
||||||
raw, fig_raw_after = interpolate_fNIRS_bads_weighted_average(raw, bad_channels)
|
|
||||||
fig_individual["fig4"] = fig_raw_after
|
|
||||||
if progress_callback: progress_callback(9)
|
if progress_callback: progress_callback(9)
|
||||||
logger.info("9")
|
logger.info("Step 9 Completed.")
|
||||||
|
|
||||||
# Step 6: Optical Density
|
# Step 10: Optical Density
|
||||||
raw_od = optical_density(raw)
|
raw_od = optical_density(raw)
|
||||||
fig_raw_od = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Optical Density", show=False)
|
fig_raw_od = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="Optical Density", show=False)
|
||||||
fig_individual["Optical Density"] = fig_raw_od
|
fig_individual["Optical Density"] = fig_raw_od
|
||||||
if progress_callback: progress_callback(10)
|
if progress_callback: progress_callback(10)
|
||||||
logger.info("10")
|
logger.info("Step 10 Completed.")
|
||||||
|
|
||||||
# Step 7: TDDR
|
# Step 11: Temporal Derivative Distribution Repair Filtering
|
||||||
if TDDR:
|
if TDDR:
|
||||||
raw_od = temporal_derivative_distribution_repair(raw_od)
|
raw_od = temporal_derivative_distribution_repair(raw_od)
|
||||||
fig_raw_od_tddr = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="After TDDR (Motion Correction)", show=False)
|
fig_raw_od_tddr = raw_od.plot(duration=raw.times[-1], n_channels=raw.info['nchan'], title="After TDDR (Motion Correction)", show=False)
|
||||||
fig_individual["TDDR"] = fig_raw_od_tddr
|
fig_individual["TDDR"] = fig_raw_od_tddr
|
||||||
if progress_callback: progress_callback(11)
|
if progress_callback: progress_callback(11)
|
||||||
logger.info("11")
|
logger.info("Step 11 Completed.")
|
||||||
|
|
||||||
|
|
||||||
|
# Step 12: Wavelet Filtering
|
||||||
if WAVELET:
|
if WAVELET:
|
||||||
raw_od, fig = calculate_and_apply_wavelet(raw_od)
|
raw_od, fig = calculate_and_apply_wavelet(raw_od)
|
||||||
fig_individual["Wavelet"] = fig
|
fig_individual["Wavelet"] = fig
|
||||||
if progress_callback: progress_callback(12)
|
if progress_callback: progress_callback(12)
|
||||||
logger.info("12")
|
logger.info("Step 12 Completed.")
|
||||||
|
|
||||||
|
# Step 13: Haemoglobin Concentration
|
||||||
# Step 8: BLL
|
|
||||||
raw_haemo = beer_lambert_law(raw_od, ppf=calculate_dpf(file_path))
|
raw_haemo = beer_lambert_law(raw_od, ppf=calculate_dpf(file_path))
|
||||||
fig_raw_haemo_bll = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="HbO and HbR Signals", show=False)
|
fig_raw_haemo_bll = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="HbO and HbR Signals", show=False)
|
||||||
fig_individual["BLL"] = fig_raw_haemo_bll
|
fig_individual["BLL"] = fig_raw_haemo_bll
|
||||||
if progress_callback: progress_callback(13)
|
if progress_callback: progress_callback(13)
|
||||||
logger.info("13")
|
logger.info("Step 13 Completed.")
|
||||||
|
|
||||||
# Step 9: ENC
|
# Step 14: Enhance Negative Correlation
|
||||||
if ENHANCE_NEGATIVE_CORRELATION:
|
if ENHANCE_NEGATIVE_CORRELATION:
|
||||||
raw_haemo = enhance_negative_correlation(raw_haemo)
|
raw_haemo = enhance_negative_correlation(raw_haemo)
|
||||||
fig_raw_haemo_enc = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="HbO and HbR Signals", show=False)
|
fig_raw_haemo_enc = raw_haemo.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Enhance Negative Correlation", show=False)
|
||||||
fig_individual["ENC"] = fig_raw_haemo_enc
|
fig_individual["ENC"] = fig_raw_haemo_enc
|
||||||
if progress_callback: progress_callback(14)
|
if progress_callback: progress_callback(14)
|
||||||
logger.info("14")
|
logger.info("Step 14 Completed.")
|
||||||
|
|
||||||
# Step 10: Filter
|
# Step 15: Filter
|
||||||
if FILTER:
|
if FILTER:
|
||||||
raw_haemo, fig_filter, fig_raw_haemo_filter = filter_the_data(raw_haemo)
|
raw_haemo, fig_filter, fig_raw_haemo_filter = filter_the_data(raw_haemo)
|
||||||
fig_individual["filter1"] = fig_filter
|
fig_individual["filter1"] = fig_filter
|
||||||
fig_individual["filter2"] = fig_raw_haemo_filter
|
fig_individual["filter2"] = fig_raw_haemo_filter
|
||||||
if progress_callback: progress_callback(15)
|
if progress_callback: progress_callback(15)
|
||||||
logger.info("15")
|
logger.info("Step 15 Completed.")
|
||||||
|
|
||||||
# Step 11: Get short / long channels
|
# Step 16: Extracting Events
|
||||||
if SHORT_CHANNEL:
|
|
||||||
short_chans = get_short_channels(raw_haemo, max_dist=SHORT_CHANNEL_THRESH)
|
|
||||||
fig_short_chans = short_chans.plot(duration=raw_haemo.times[-1], n_channels=raw_haemo.info['nchan'], title="Short Channels Only", show=False)
|
|
||||||
fig_individual["short"] = fig_short_chans
|
|
||||||
else:
|
|
||||||
short_chans = None
|
|
||||||
raw_haemo = get_long_channels(raw_haemo, min_dist=SHORT_CHANNEL_THRESH, max_dist=LONG_CHANNEL_THRESH)
|
|
||||||
if progress_callback: progress_callback(16)
|
|
||||||
logger.info("16")
|
|
||||||
|
|
||||||
# Step 12: Events from annotations
|
|
||||||
events, event_dict = events_from_annotations(raw_haemo)
|
events, event_dict = events_from_annotations(raw_haemo)
|
||||||
fig_events = plot_events(events, event_id=event_dict, sfreq=raw_haemo.info["sfreq"], show=False)
|
fig_events = plot_events(events, event_id=event_dict, sfreq=raw_haemo.info["sfreq"], show=False)
|
||||||
fig_individual["events"] = fig_events
|
fig_individual["events"] = fig_events
|
||||||
if progress_callback: progress_callback(17)
|
if progress_callback: progress_callback(16)
|
||||||
logger.info("17")
|
logger.info("Step 16 Completed.")
|
||||||
|
|
||||||
# Step 13: Epoch calculations
|
# Step 17: Epoch Calculations
|
||||||
epochs, fig_epochs = epochs_calculations(raw_haemo, events, event_dict)
|
epochs, fig_epochs = epochs_calculations(raw_haemo, events, event_dict)
|
||||||
for name, fig in fig_epochs: # Unpack the tuple here
|
for name, fig in fig_epochs:
|
||||||
fig_individual[f"epochs_{name}"] = fig # Store only the figure, not the name
|
fig_individual[f"epochs_{name}"] = fig
|
||||||
if progress_callback: progress_callback(18)
|
if progress_callback: progress_callback(17)
|
||||||
logger.info("18")
|
logger.info("Step 17 Completed.")
|
||||||
|
|
||||||
# Step 14: Design Matrix
|
|
||||||
events_to_remove = REMOVE_EVENTS
|
|
||||||
|
|
||||||
filtered_annotations = [ann for ann in raw.annotations if ann['description'] not in events_to_remove]
|
|
||||||
|
|
||||||
new_annot = Annotations(
|
|
||||||
onset=[ann['onset'] for ann in filtered_annotations],
|
|
||||||
duration=[ann['duration'] for ann in filtered_annotations],
|
|
||||||
description=[ann['description'] for ann in filtered_annotations]
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set the new annotations
|
|
||||||
raw_haemo.set_annotations(new_annot)
|
|
||||||
|
|
||||||
|
# Step 18: Design Matrix
|
||||||
design_matrix, fig_design_matrix = make_design_matrix(raw_haemo, short_chans)
|
design_matrix, fig_design_matrix = make_design_matrix(raw_haemo, short_chans)
|
||||||
fig_individual["Design Matrix"] = fig_design_matrix
|
fig_individual["Design Matrix"] = fig_design_matrix
|
||||||
if progress_callback: progress_callback(19)
|
if progress_callback: progress_callback(18)
|
||||||
logger.info("19")
|
logger.info("Step 18 Completed.")
|
||||||
|
|
||||||
# Step 15: Run GLM
|
|
||||||
glm_est = run_glm(raw_haemo, design_matrix)
|
# Step 19: Run GLM
|
||||||
|
glm_est = run_glm(raw_haemo, design_matrix, noise_model=NOISE_MODEL, bins=BINS, n_jobs=N_JOBS, verbose=VERBOSITY)
|
||||||
# Not used AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\nilearn\glm\contrasts.py
|
# Not used AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\nilearn\glm\contrasts.py
|
||||||
# Yes used AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\mne_nirs\utils\_io.py
|
# Yes used AppData\Local\Packages\PythonSoftwareFoundation.Python.3.13_qbz5n2kfra8p0\LocalCache\local-packages\Python313\site-packages\mne_nirs\utils\_io.py
|
||||||
|
|
||||||
# The p-value is calculated from this t-statistic using the Student’s t-distribution with appropriate degrees of freedom.
|
# The p-value is calculated from this t-statistic using the Student's t-distribution with appropriate degrees of freedom.
|
||||||
# p_value = 2 * stats.t.cdf(-abs(t_statistic), df)
|
# p_value = 2 * stats.t.cdf(-abs(t_statistic), df)
|
||||||
# It is a two-tailed p-value.
|
# It is a two-tailed p-value.
|
||||||
# It says how likely it is to observe the effect you did (or something more extreme) if the true effect was zero (null hypothesis).
|
# It says how likely it is to observe the effect you did (or something more extreme) if the true effect was zero (null hypothesis).
|
||||||
# A small p-value (e.g., < 0.05) suggests the effect is unlikely to be zero — it’s "statistically significant."
|
# A small p-value (e.g., < 0.05) suggests the effect is unlikely to be zero — it's "statistically significant."
|
||||||
# A large p-value means the data do not provide strong evidence that the effect is different from zero.
|
# A large p-value means the data do not provide strong evidence that the effect is different from zero.
|
||||||
|
|
||||||
|
|
||||||
if progress_callback: progress_callback(20)
|
if progress_callback: progress_callback(19)
|
||||||
logger.info("20")
|
logger.info("19")
|
||||||
|
|
||||||
# Step 16: Plot GLM results
|
# Step 20: Generate GLM Results
|
||||||
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
|
fig_glm_result = plot_glm_results(file_path, raw_haemo, glm_est, design_matrix)
|
||||||
for name, fig in fig_glm_result:
|
for name, fig in fig_glm_result:
|
||||||
fig_individual[f"GLM {name}"] = fig
|
fig_individual[f"GLM {name}"] = fig
|
||||||
if progress_callback: progress_callback(21)
|
if progress_callback: progress_callback(20)
|
||||||
logger.info("21")
|
logger.info("20")
|
||||||
|
|
||||||
# Step 17: Plot channel significance
|
# Step 21: Generate Channel Significance
|
||||||
fig_significance = individual_significance(raw_haemo, glm_est)
|
fig_significance = individual_significance(raw_haemo, glm_est)
|
||||||
for name, fig in fig_significance:
|
for name, fig in fig_significance:
|
||||||
fig_individual[f"Significance {name}"] = fig
|
fig_individual[f"Significance {name}"] = fig
|
||||||
if progress_callback: progress_callback(22)
|
if progress_callback: progress_callback(21)
|
||||||
logger.info("22")
|
logger.info("21")
|
||||||
|
|
||||||
# Step 18: cha, con, roi
|
# Step 22: Generate Channel, Region of Interest, and Contrast Results
|
||||||
cha = glm_est.to_dataframe()
|
cha = glm_est.to_dataframe()
|
||||||
|
|
||||||
# HACK: Comment out line 588 (self._renderer.show()) in _brain.py from MNE
|
# HACK: Comment out line 588 (self._renderer.show()) in _brain.py from MNE
|
||||||
@@ -3580,10 +3593,10 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
|
|
||||||
contrast_dict[condition] = contrast_vector
|
contrast_dict[condition] = contrast_vector
|
||||||
|
|
||||||
if progress_callback: progress_callback(23)
|
if progress_callback: progress_callback(22)
|
||||||
logger.info("23")
|
logger.info("22")
|
||||||
|
|
||||||
# Compute contrast results
|
# Step 23: Compute Contrast Results
|
||||||
contrast_results = {}
|
contrast_results = {}
|
||||||
|
|
||||||
for cond, contrast_vector in contrast_dict.items():
|
for cond, contrast_vector in contrast_dict.items():
|
||||||
@@ -3594,10 +3607,10 @@ def process_participant(file_path, progress_callback=None):
|
|||||||
|
|
||||||
cha["ID"] = file_path
|
cha["ID"] = file_path
|
||||||
|
|
||||||
if progress_callback: progress_callback(24)
|
if progress_callback: progress_callback(23)
|
||||||
logger.info("24")
|
logger.info("23")
|
||||||
|
|
||||||
|
# Step 24: Finishing Up
|
||||||
fig_bytes = convert_fig_dict_to_png_bytes(fig_individual)
|
fig_bytes = convert_fig_dict_to_png_bytes(fig_individual)
|
||||||
|
|
||||||
sanitize_paths_for_pickle(raw_haemo, epochs)
|
sanitize_paths_for_pickle(raw_haemo, epochs)
|
||||||
|
|||||||
468
main.py
468
main.py
@@ -22,11 +22,11 @@ import subprocess
|
|||||||
from pathlib import Path, PurePosixPath
|
from pathlib import Path, PurePosixPath
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from multiprocessing import Process, current_process, freeze_support, Manager
|
from multiprocessing import Process, current_process, freeze_support, Manager
|
||||||
|
from enum import Enum, auto
|
||||||
import numpy as np
|
|
||||||
import pandas as pd
|
|
||||||
|
|
||||||
# External library imports
|
# External library imports
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
import psutil
|
import psutil
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleVa
|
|||||||
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
|
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
|
||||||
|
|
||||||
|
|
||||||
CURRENT_VERSION = "1.0.0"
|
CURRENT_VERSION = "1.2.0"
|
||||||
|
|
||||||
API_URL = "https://git.research.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
|
API_URL = "https://git.research.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
|
||||||
API_URL_SECONDARY = "https://git.research2.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
|
API_URL_SECONDARY = "https://git.research2.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
|
||||||
@@ -58,7 +58,6 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Preprocessing",
|
"title": "Preprocessing",
|
||||||
"params": [
|
"params": [
|
||||||
# {"name": "SECONDS_TO_STRIP", "default": 0, "type": int, "help": "Seconds to remove from beginning of all loaded snirf files. Setting this to 0 will remove nothing from the files."},
|
|
||||||
{"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Should the snirf files be downsampled? If this is set to True, DOWNSAMPLE_FREQUENCY will be used as the target frequency to downsample to."},
|
{"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Should the snirf files be downsampled? If this is set to True, DOWNSAMPLE_FREQUENCY will be used as the target frequency to downsample to."},
|
||||||
{"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated. Only used if DOWNSAMPLE is set to True"},
|
{"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated. Only used if DOWNSAMPLE is set to True"},
|
||||||
]
|
]
|
||||||
@@ -74,12 +73,26 @@ SECTIONS = [
|
|||||||
"title": "Verify Optode Placement",
|
"title": "Verify Optode Placement",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "OPTODE_PLACEMENT", "default": True, "type": bool, "help": "Generate an image for each participant outlining their optode placement."},
|
{"name": "OPTODE_PLACEMENT", "default": True, "type": bool, "help": "Generate an image for each participant outlining their optode placement."},
|
||||||
|
{"name": "SHOW_OPTODE_NAMES", "default": True, "type": bool, "help": "Should the optode names be written next to their location or not."},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"title": "Short/Long Channels",
|
||||||
|
"params": [
|
||||||
|
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data."},
|
||||||
|
{"name": "SHORT_CHANNEL_THRESH", "default": 0.015, "type": float, "help": "The maximum distance the short channel can be in metres."},
|
||||||
|
{"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "The maximum distance the long channel can be in metres."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Heart Rate",
|
"title": "Heart Rate",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Attempt to calculate the participants heart rate."},
|
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Attempt to calculate the participants heart rate."},
|
||||||
|
{"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "help": "Will remove this many seconds from the start and end of the file. Useful if recording before cap is firmly placed, or participant removes cap while still recording."},
|
||||||
|
{"name": "MAX_LOW_HR", "default": 40, "type": int, "help": "Any heart rate windows that average below this value will be rounded up to this value."},
|
||||||
|
{"name": "MAX_HIGH_HR", "default": 200, "type": int, "help": "Any heart rate windows that average above this value will be rounded down to this value."},
|
||||||
|
{"name": "SMOOTHING_WINDOW_HR", "default": 100, "type": int, "help": "How many individual data points to smooth into a single window."},
|
||||||
|
{"name": "HEART_RATE_WINDOW", "default": 25, "type": int, "help": "Used for visualization. Shows the range of the calculated heart rate +- this value."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -94,14 +107,12 @@ SECTIONS = [
|
|||||||
"title": "Signal to Noise Ratio",
|
"title": "Signal to Noise Ratio",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "SNR", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Signal to Noise Ratio. This metric calculates how much of the observed signal was noise versus how much of it was a useful signal."},
|
{"name": "SNR", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Signal to Noise Ratio. This metric calculates how much of the observed signal was noise versus how much of it was a useful signal."},
|
||||||
# {"name": "SNR_TIME_WINDOW", "default": -1, "type": int, "help": "SNR time window."},
|
|
||||||
{"name": "SNR_THRESHOLD", "default": 5.0, "type": float, "help": "SNR threshold (dB). A typical scale would be 0-25, but it is possible for values to be both above and below this range. Higher values correspond to a better signal. If SNR is True, any channels lower than this value will be marked as bad."},
|
{"name": "SNR_THRESHOLD", "default": 5.0, "type": float, "help": "SNR threshold (dB). A typical scale would be 0-25, but it is possible for values to be both above and below this range. Higher values correspond to a better signal. If SNR is True, any channels lower than this value will be marked as bad."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Peak Spectral Power",
|
"title": "Peak Spectral Power",
|
||||||
"params": [
|
"params": [
|
||||||
|
|
||||||
{"name": "PSP", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Peak Spectral Power. This metric calculates the amplitude or strength of a frequency component that is most prominent in a particular frequency range or spectrum."},
|
{"name": "PSP", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Peak Spectral Power. This metric calculates the amplitude or strength of a frequency component that is most prominent in a particular frequency range or spectrum."},
|
||||||
{"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "help": "Independent PSP calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
{"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "help": "Independent PSP calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
|
||||||
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
|
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
|
||||||
@@ -110,15 +121,15 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Bad Channels Handling",
|
"title": "Bad Channels Handling",
|
||||||
"params": [
|
"params": [
|
||||||
# {"name": "NOT_IMPLEMENTED", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
|
{"name": "BAD_CHANNELS_HANDLING", "default": [], "type": list, "options": ["Interpolate", "Remove", "None"], "exclusive": True, "help": "How should we deal with the bad channels that occurred? Note: Some analysis options will only work when this is set to 'Interpolate'."},
|
||||||
# {"name": "NOT_IMPLEMENTED", "default": 3, "type": int, "help": "PSP time window."},
|
{"name": "MAX_DIST", "default": 0.03, "type": float, "help": "The maximum distance to look for neighbours when interpolating. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
||||||
# {"name": "NOT_IMPLEMENTED", "default": 0.1, "type": float, "help": "PSP threshold."},
|
{"name": "MIN_NEIGHBORS", "default": 2, "type": int, "help": "The minimumn amount of neighbours needed within the MAX_DIST parameter. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Optical Density",
|
"title": "Optical Density",
|
||||||
"params": [
|
"params": [
|
||||||
# Intentionally empty (TODO)
|
# NOTE: Intentionally empty
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -139,7 +150,7 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Haemoglobin Concentration",
|
"title": "Haemoglobin Concentration",
|
||||||
"params": [
|
"params": [
|
||||||
# Intentionally empty (TODO)
|
# NOTE: Intentionally empty
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -154,24 +165,18 @@ SECTIONS = [
|
|||||||
{"name": "FILTER", "default": True, "type": bool, "help": "Filter the data."},
|
{"name": "FILTER", "default": True, "type": bool, "help": "Filter the data."},
|
||||||
{"name": "L_FREQ", "default": 0.005, "type": float, "help": "Any frequencies lower than this value will be removed."},
|
{"name": "L_FREQ", "default": 0.005, "type": float, "help": "Any frequencies lower than this value will be removed."},
|
||||||
{"name": "H_FREQ", "default": 0.3, "type": float, "help": "Any frequencies higher than this value will be removed."},
|
{"name": "H_FREQ", "default": 0.3, "type": float, "help": "Any frequencies higher than this value will be removed."},
|
||||||
|
{"name": "L_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the lower bound."},
|
||||||
|
{"name": "H_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the upper bound."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Short/Long Channels",
|
"title": "Extracting Events*",
|
||||||
"params": [
|
|
||||||
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data."},
|
|
||||||
{"name": "SHORT_CHANNEL_THRESH", "default": 0.015, "type": float, "help": "The maximum distance the short channel can be in metres."},
|
|
||||||
{"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "The maximum distance the long channel can be in metres."},
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"title": "Extracting Events",
|
|
||||||
"params": [
|
"params": [
|
||||||
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
|
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "Epoch Calculations",
|
"title": "Epoch Calculations*",
|
||||||
"params": [
|
"params": [
|
||||||
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
|
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
|
||||||
]
|
]
|
||||||
@@ -179,18 +184,27 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Design Matrix",
|
"title": "Design Matrix",
|
||||||
"params": [
|
"params": [
|
||||||
|
{"name": "RESAMPLE", "default": True, "type": bool, "help": "The length of your stimulus."},
|
||||||
|
{"name": "RESAMPLE_FREQ", "default": 1, "type": int, "help": "The length of your stimulus."},
|
||||||
|
|
||||||
|
{"name": "STIM_DUR", "default": 0.5, "type": float, "help": "The length of your stimulus."},
|
||||||
|
{"name": "HRF_MODEL", "default": "fir", "type": str, "help": "Specifies the hemodynamic response function."},
|
||||||
|
{"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Specifies the desired drift model."},
|
||||||
|
{"name": "HIGH_PASS", "default": 0.01, "type": float, "help": "High-pass frequency in case of a cosine model (in Hz)."},
|
||||||
|
{"name": "DRIFT_ORDER", "default": 1, "type": int, "help": "Order of the drift model (in case it is polynomial)"},
|
||||||
|
{"name": "FIR_DELAYS", "default": "None", "type": range, "help": "In case of FIR design, yields the array of delays used in the FIR model (in scans)."},
|
||||||
|
{"name": "MIN_ONSET", "default": -24, "type": int, "help": "Minimal onset relative to frame times (in seconds)"},
|
||||||
|
{"name": "OVERSAMPLING", "default": 50, "type": int, "help": "Oversampling factor used in temporal convolutions."},
|
||||||
{"name": "REMOVE_EVENTS", "default": "None", "type": list, "help": "Remove events matching the names provided before generating the Design Matrix"},
|
{"name": "REMOVE_EVENTS", "default": "None", "type": list, "help": "Remove events matching the names provided before generating the Design Matrix"},
|
||||||
{"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Drift model for GLM."},
|
{"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Whether to use short channel regression and regress out the short channels. Requires SHORT_CHANNELS to be True and at least one short channel to be found."},
|
||||||
# {"name": "DURATION_BETWEEN_ACTIVITIES", "default": 35, "type": int, "help": "Time between activities (s)."},
|
|
||||||
# {"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Use short channel regression."},
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"title": "General Linear Model",
|
"title": "General Linear Model",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "TIME_WINDOW_START", "default": "0", "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
|
{"name": "NOISE_MODEL", "default": "ar1", "type": str, "help": "Number of jobs for GLM processing."},
|
||||||
{"name": "TIME_WINDOW_END", "default": "15", "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
|
{"name": "BINS", "default": 0, "type": int, "help": "Number of jobs for GLM processing."},
|
||||||
#{"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for GLM processing."},
|
{"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for GLM processing."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -202,7 +216,10 @@ SECTIONS = [
|
|||||||
{
|
{
|
||||||
"title": "Other",
|
"title": "Other",
|
||||||
"params": [
|
"params": [
|
||||||
{"name": "MAX_WORKERS", "default": 4, "type": int, "help": "Number of files to be processed at once. Lowering this value may help on underpowered systems."},
|
{"name": "TIME_WINDOW_START", "default": 0, "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
|
||||||
|
{"name": "TIME_WINDOW_END", "default": 15, "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
|
||||||
|
{"name": "MAX_WORKERS", "default": 4, "type": str, "help": "Number of files to be processed at once. Setting this to a small integer value may help on underpowered systems. Remove the value to use an automatic amount."},
|
||||||
|
{"name": "VERBOSITY", "default": False, "type": bool, "help": "True will log lots of debugging information to the log file. False will only log required data."},
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@@ -483,29 +500,38 @@ class UserGuideWindow(QWidget):
|
|||||||
|
|
||||||
layout = QVBoxLayout()
|
layout = QVBoxLayout()
|
||||||
label = QLabel("Progress Bar Stages:", self)
|
label = QLabel("Progress Bar Stages:", self)
|
||||||
label2 = QLabel("Stage 1: Load the snirf file\n"
|
label2 = QLabel("Stage 1: Preprocessing\n"
|
||||||
"Stage 2: Check the optode positions\n"
|
"Stage 2: Trimming\n"
|
||||||
"Stage 3: Scalp Coupling Index\n"
|
"Stage 3: Verify Optode Placement\n"
|
||||||
"Stage 4: Signal to Noise Ratio\n"
|
"Stage 4: Short/Long Cannels\n"
|
||||||
"Stage 5: Peak Spectral Power\n"
|
"Stage 5: Heart Rate\n"
|
||||||
"Stage 6: Identify bad channels\n"
|
"Stage 6: Scalp Coupling Index\n"
|
||||||
"Stage 7: Interpolate bad channels\n"
|
"Stage 7: Signal to Noise Ratio\n"
|
||||||
"Stage 8: Optical Density\n"
|
"Stage 8: Peak Spectral Power\n"
|
||||||
"Stage 9: Temporal Derivative Distribution Repair\n"
|
"Stage 9: Bad Channels Handling\n"
|
||||||
"Stage 10: Beer Lambert Law\n"
|
"Stage 10: Optical Density\n"
|
||||||
"Stage 11: Heart Rate Filtering\n"
|
"Stage 11: Temporal Derivative Distribution Repair Filtering\n"
|
||||||
"Stage 12: Get Short/Long Channels\n"
|
"Stage 12: Wavelet Filtering\n"
|
||||||
"Stage 13: Calculate Events from Annotations\n"
|
"Stage 13: Haemoglobin Concentration\n"
|
||||||
"Stage 14: Epoch Calculations\n"
|
"Stage 14: Enhance Negative Correlation\n"
|
||||||
"Stage 15: Design Matrix\n"
|
"Stage 15: Filter\n"
|
||||||
"Stage 16: General Linear Model\n"
|
"Stage 16: Extracting Events\n"
|
||||||
"Stage 17: Generate Plots from the GLM\n"
|
"Stage 17: Epoch Calculations\n"
|
||||||
"Stage 18: Individual Significance\n"
|
"Stage 18: Design Matrix\n"
|
||||||
"Stage 19: Channel, Region of Interest, and Contrast Results\n"
|
"Stage 19: General Linear Model\n"
|
||||||
"Stage 20: Image Conversion\n", self)
|
"Stage 20: Generate GLM Results\n"
|
||||||
|
"Stage 21: Generate Channel Significance\n"
|
||||||
|
"Stage 22: Generate Channel, Region of Interest, and Contrast Results\n"
|
||||||
|
"Stage 23: Compute Contrast Results\n"
|
||||||
|
"Stage 24: Finishing Up\n", self)
|
||||||
|
|
||||||
|
label3 = QLabel("For more information, visit the Git wiki page <a href='https://git.research.dezeeuw.ca/tyler/flares/wiki'>here</a>.", self)
|
||||||
|
label3.setTextFormat(Qt.TextFormat.RichText)
|
||||||
|
label3.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
|
||||||
|
label3.setOpenExternalLinks(True)
|
||||||
layout.addWidget(label)
|
layout.addWidget(label)
|
||||||
layout.addWidget(label2)
|
layout.addWidget(label2)
|
||||||
|
layout.addWidget(label3)
|
||||||
|
|
||||||
self.setLayout(layout)
|
self.setLayout(layout)
|
||||||
|
|
||||||
@@ -742,15 +768,23 @@ class UpdateOptodesWindow(QWidget):
|
|||||||
write_raw_snirf(raw, save_path)
|
write_raw_snirf(raw, save_path)
|
||||||
|
|
||||||
|
|
||||||
|
class EventUpdateMode(Enum):
|
||||||
|
WRITE_SNIRF = auto() # destructive
|
||||||
|
WRITE_JSON = auto() # non-destructive
|
||||||
|
|
||||||
|
|
||||||
class UpdateEventsWindow(QWidget):
|
class UpdateEventsWindow(QWidget):
|
||||||
|
|
||||||
def __init__(self, parent=None):
|
def __init__(self, parent=None, mode=EventUpdateMode.WRITE_SNIRF, caller=None):
|
||||||
super().__init__(parent, Qt.WindowType.Window)
|
super().__init__(parent, Qt.WindowType.Window)
|
||||||
|
|
||||||
|
self.mode = mode
|
||||||
|
self.caller = caller or self.__class__.__name__
|
||||||
self.setWindowTitle("Update event markers")
|
self.setWindowTitle("Update event markers")
|
||||||
self.resize(760, 200)
|
self.resize(760, 200)
|
||||||
|
|
||||||
|
print("INIT MODE:", mode)
|
||||||
|
|
||||||
self.label_file_a = QLabel("SNIRF file:")
|
self.label_file_a = QLabel("SNIRF file:")
|
||||||
self.line_edit_file_a = QLineEdit()
|
self.line_edit_file_a = QLineEdit()
|
||||||
self.line_edit_file_a.setReadOnly(True)
|
self.line_edit_file_a.setReadOnly(True)
|
||||||
@@ -1051,120 +1085,139 @@ class UpdateEventsWindow(QWidget):
|
|||||||
QMessageBox.warning(self, "No SNIRF file", "Please select a SNIRF file.")
|
QMessageBox.warning(self, "No SNIRF file", "Please select a SNIRF file.")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
boris_obs = self.boris_data["observations"][selected_obs]
|
||||||
|
|
||||||
|
# --- Extract videos + delays ---
|
||||||
|
files = boris_obs.get("file", {})
|
||||||
|
offsets = boris_obs.get("media_info", {}).get("offset", {})
|
||||||
|
|
||||||
|
videos = {}
|
||||||
|
for key, path in files.items():
|
||||||
|
if path: # only include videos that exist
|
||||||
|
delay = offsets.get(key, 0.0) # default 0 if missing
|
||||||
|
videos[key] = {"file": path, "delay": delay}
|
||||||
|
|
||||||
base_name = os.path.splitext(os.path.basename(file_a))[0]
|
base_name = os.path.splitext(os.path.basename(file_a))[0]
|
||||||
suggested_name = f"{base_name}_{suffix}.snirf"
|
|
||||||
|
|
||||||
# Open save dialog
|
if self.mode == EventUpdateMode.WRITE_SNIRF:
|
||||||
save_path, _ = QFileDialog.getSaveFileName(
|
# Open save dialog for SNIRF
|
||||||
self,
|
base_name = os.path.splitext(os.path.basename(file_a))[0]
|
||||||
"Save SNIRF File As",
|
suggested_name = f"{base_name}_{suffix}.snirf"
|
||||||
suggested_name,
|
save_path, _ = QFileDialog.getSaveFileName(
|
||||||
"SNIRF Files (*.snirf)"
|
self,
|
||||||
)
|
"Save SNIRF File As",
|
||||||
|
suggested_name,
|
||||||
|
"SNIRF Files (*.snirf)"
|
||||||
|
)
|
||||||
|
if not save_path:
|
||||||
|
print("SNIRF save cancelled.")
|
||||||
|
return
|
||||||
|
if not save_path.lower().endswith(".snirf"):
|
||||||
|
save_path += ".snirf"
|
||||||
|
|
||||||
if not save_path:
|
try:
|
||||||
print("Save cancelled.")
|
raw = read_raw_snirf(file_a, preload=True)
|
||||||
return
|
|
||||||
|
|
||||||
if not save_path.lower().endswith(".snirf"):
|
# --- Align BORIS events to SNIRF ---
|
||||||
save_path += ".snirf"
|
boris_events = boris_obs.get("events", [])
|
||||||
|
onsets, durations, descriptions = [], [], []
|
||||||
|
open_events = {} # label -> list of start times
|
||||||
|
label_counts = {}
|
||||||
|
used_times = set()
|
||||||
|
sfreq = raw.info['sfreq']
|
||||||
|
min_shift = 1.0 / sfreq
|
||||||
|
max_attempts = 10
|
||||||
|
|
||||||
try:
|
for event in boris_events:
|
||||||
raw = read_raw_snirf(snirf_path, preload=True)
|
if not isinstance(event, list) or len(event) < 3:
|
||||||
|
continue
|
||||||
|
event_time = event[0]
|
||||||
|
label = event[2]
|
||||||
|
count = label_counts.get(label, 0) + 1
|
||||||
|
label_counts[label] = count
|
||||||
|
|
||||||
onsets = []
|
if label not in open_events:
|
||||||
durations = []
|
open_events[label] = []
|
||||||
descriptions = []
|
|
||||||
|
|
||||||
open_events = {} # label -> list of start times
|
if count % 2 == 1:
|
||||||
label_counts = {}
|
open_events[label].append(event_time)
|
||||||
|
else:
|
||||||
|
if open_events[label]:
|
||||||
|
start_time = open_events[label].pop(0)
|
||||||
|
duration = event_time - start_time
|
||||||
|
if duration <= 0:
|
||||||
|
continue
|
||||||
|
|
||||||
used_times = set()
|
adjusted_time = start_time + time_shift
|
||||||
sfreq = raw.info['sfreq'] # sampling frequency in Hz
|
attempts = 0
|
||||||
min_shift = 1.0 / sfreq
|
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
|
||||||
max_attempts = 10
|
adjusted_time += min_shift
|
||||||
|
attempts += 1
|
||||||
|
if attempts == max_attempts:
|
||||||
|
continue
|
||||||
|
|
||||||
for event in boris_events:
|
adjusted_time = round(adjusted_time, 6)
|
||||||
if not isinstance(event, list) or len(event) < 3:
|
used_times.add(adjusted_time)
|
||||||
continue
|
onsets.append(adjusted_time)
|
||||||
|
durations.append(duration)
|
||||||
event_time = event[0]
|
descriptions.append(label)
|
||||||
label = event[2]
|
# Handle unmatched starts
|
||||||
|
for label, starts in open_events.items():
|
||||||
count = label_counts.get(label, 0) + 1
|
for start_time in starts:
|
||||||
label_counts[label] = count
|
adjusted_time = start_time + time_shift
|
||||||
|
|
||||||
if label not in open_events:
|
|
||||||
open_events[label] = []
|
|
||||||
|
|
||||||
if count % 2 == 1:
|
|
||||||
# Odd occurrence = start event
|
|
||||||
open_events[label].append(event_time)
|
|
||||||
else:
|
|
||||||
# Even occurrence = end event
|
|
||||||
if open_events[label]:
|
|
||||||
matched_start = open_events[label].pop(0)
|
|
||||||
duration = event_time - matched_start
|
|
||||||
|
|
||||||
if duration <= 0:
|
|
||||||
print(f"Warning: Duration for {label} is non-positive ({duration}). Skipping.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
shifted_start = matched_start + time_shift
|
|
||||||
|
|
||||||
adjusted_time = shifted_start
|
|
||||||
attempts = 0
|
attempts = 0
|
||||||
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
|
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
|
||||||
adjusted_time += min_shift
|
adjusted_time += min_shift
|
||||||
attempts += 1
|
attempts += 1
|
||||||
|
|
||||||
if attempts == max_attempts:
|
if attempts == max_attempts:
|
||||||
print(f"Warning: Couldn't find unique time for {label} @ {matched_start}s. Skipping.")
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
adjusted_time = round(adjusted_time, 6)
|
adjusted_time = round(adjusted_time, 6)
|
||||||
used_times.add(adjusted_time)
|
used_times.add(adjusted_time)
|
||||||
|
|
||||||
print(f"Adding event: {label} @ {adjusted_time:.3f}s for {duration:.3f}s")
|
|
||||||
|
|
||||||
onsets.append(adjusted_time)
|
onsets.append(adjusted_time)
|
||||||
durations.append(duration)
|
durations.append(0.0)
|
||||||
descriptions.append(label)
|
descriptions.append(label)
|
||||||
else:
|
|
||||||
print(f"Warning: Unmatched end for label '{label}' at {event_time:.3f}s. Skipping.")
|
|
||||||
|
|
||||||
# Optionally warn about any unmatched starts left open
|
new_annotations = Annotations(onset=onsets, duration=durations, description=descriptions)
|
||||||
for label, starts in open_events.items():
|
raw.set_annotations(new_annotations)
|
||||||
for start_time in starts:
|
write_raw_snirf(raw, save_path)
|
||||||
shifted_start = start_time + time_shift
|
QMessageBox.information(self, "Success", "SNIRF file updated with aligned BORIS events.")
|
||||||
|
|
||||||
adjusted_time = shifted_start
|
except Exception as e:
|
||||||
attempts = 0
|
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
|
||||||
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
|
|
||||||
adjusted_time += min_shift
|
|
||||||
attempts += 1
|
|
||||||
|
|
||||||
if attempts == max_attempts:
|
elif self.mode == EventUpdateMode.WRITE_JSON:
|
||||||
print(f"Warning: Couldn't find unique time for unmatched start {label} @ {start_time}s. Skipping.")
|
# Open save dialog for JSON
|
||||||
continue
|
base_name = os.path.splitext(os.path.basename(file_a))[0]
|
||||||
|
suggested_name = f"{base_name}_{suffix}_alignment.json"
|
||||||
|
save_path, _ = QFileDialog.getSaveFileName(
|
||||||
|
self,
|
||||||
|
"Save Event Alignment JSON As",
|
||||||
|
suggested_name,
|
||||||
|
"JSON Files (*.json)"
|
||||||
|
)
|
||||||
|
if not save_path:
|
||||||
|
print("JSON save cancelled.")
|
||||||
|
return
|
||||||
|
if not save_path.lower().endswith(".json"):
|
||||||
|
save_path += ".json"
|
||||||
|
|
||||||
adjusted_time = round(adjusted_time, 6)
|
# Build JSON dict
|
||||||
used_times.add(adjusted_time)
|
json_data = {
|
||||||
|
"observation": selected_obs,
|
||||||
|
"snirf_anchor": {"label": snirf_label, "time": snirf_anchor_time},
|
||||||
|
"boris_anchor": {"label": boris_label, "time": boris_anchor_time},
|
||||||
|
"time_shift": time_shift,
|
||||||
|
"videos": videos
|
||||||
|
}
|
||||||
|
|
||||||
print(f"Warning: Unmatched start for label '{label}' at {start_time:.3f}s. Adding with duration 0.")
|
# Write JSON
|
||||||
onsets.append(adjusted_time)
|
try:
|
||||||
durations.append(0.0)
|
with open(save_path, "w", encoding="utf-8") as f:
|
||||||
descriptions.append(label)
|
json.dump(json_data, f, indent=4)
|
||||||
|
QMessageBox.information(self, "Success", f"Event alignment saved to:\n{save_path}")
|
||||||
new_annotations = Annotations(onset=onsets, duration=durations, description=descriptions)
|
except Exception as e:
|
||||||
|
QMessageBox.critical(self, "Error", f"Failed to write JSON:\n{e}")
|
||||||
raw.set_annotations(new_annotations)
|
|
||||||
write_raw_snirf(raw, save_path)
|
|
||||||
|
|
||||||
QMessageBox.information(self, "Success", "SNIRF file updated with aligned BORIS events.")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
|
|
||||||
|
|
||||||
|
|
||||||
def update_optode_positions(self, file_a, file_b, save_path):
|
def update_optode_positions(self, file_a, file_b, save_path):
|
||||||
@@ -1197,6 +1250,47 @@ class UpdateEventsWindow(QWidget):
|
|||||||
write_raw_snirf(raw, save_path)
|
write_raw_snirf(raw, save_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _apply_events_to_snirf(self, raw, new_annotations, save_path):
|
||||||
|
raw.set_annotations(new_annotations)
|
||||||
|
write_raw_snirf(raw, save_path)
|
||||||
|
|
||||||
|
def _write_event_mapping_json(
|
||||||
|
self,
|
||||||
|
file_a,
|
||||||
|
file_b,
|
||||||
|
selected_obs,
|
||||||
|
snirf_anchor,
|
||||||
|
boris_anchor,
|
||||||
|
time_shift,
|
||||||
|
mapped_events,
|
||||||
|
save_path
|
||||||
|
):
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
import os
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"source": {
|
||||||
|
"called_from": self.caller,
|
||||||
|
"snirf_file": os.path.basename(file_a),
|
||||||
|
"boris_file": os.path.basename(file_b),
|
||||||
|
"observation": selected_obs
|
||||||
|
},
|
||||||
|
"alignment": {
|
||||||
|
"snirf_anchor": snirf_anchor,
|
||||||
|
"boris_anchor": boris_anchor,
|
||||||
|
"time_shift_seconds": time_shift
|
||||||
|
},
|
||||||
|
"events": mapped_events,
|
||||||
|
"created_at": datetime.utcnow().isoformat() + "Z"
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(save_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(payload, f, indent=2)
|
||||||
|
|
||||||
|
return save_path
|
||||||
|
|
||||||
|
|
||||||
class ProgressBubble(QWidget):
|
class ProgressBubble(QWidget):
|
||||||
"""
|
"""
|
||||||
A clickable widget displaying a progress bar made of colored rectangles and a label.
|
A clickable widget displaying a progress bar made of colored rectangles and a label.
|
||||||
@@ -1229,7 +1323,7 @@ class ProgressBubble(QWidget):
|
|||||||
self.progress_layout = QHBoxLayout()
|
self.progress_layout = QHBoxLayout()
|
||||||
|
|
||||||
self.rects = []
|
self.rects = []
|
||||||
for _ in range(25):
|
for _ in range(24):
|
||||||
rect = QFrame()
|
rect = QFrame()
|
||||||
rect.setFixedSize(10, 18)
|
rect.setFixedSize(10, 18)
|
||||||
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
|
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
|
||||||
@@ -1358,7 +1452,12 @@ class ParamSection(QWidget):
|
|||||||
widget.setValidator(QDoubleValidator())
|
widget.setValidator(QDoubleValidator())
|
||||||
widget.setText(str(param["default"]))
|
widget.setText(str(param["default"]))
|
||||||
elif param["type"] == list:
|
elif param["type"] == list:
|
||||||
widget = self._create_multiselect_dropdown(None)
|
if param.get("exclusive", True):
|
||||||
|
widget = QComboBox()
|
||||||
|
widget.addItems(param.get("options", []))
|
||||||
|
widget.setCurrentText(str(param.get("default", "<None Selected>")))
|
||||||
|
else:
|
||||||
|
widget = self._create_multiselect_dropdown(None)
|
||||||
else:
|
else:
|
||||||
widget = QLineEdit()
|
widget = QLineEdit()
|
||||||
widget.setText(str(param["default"]))
|
widget.setText(str(param["default"]))
|
||||||
@@ -1466,7 +1565,10 @@ class ParamSection(QWidget):
|
|||||||
if expected_type == bool:
|
if expected_type == bool:
|
||||||
values[name] = widget.currentText() == "True"
|
values[name] = widget.currentText() == "True"
|
||||||
elif expected_type == list:
|
elif expected_type == list:
|
||||||
values[name] = [x.strip() for x in widget.lineEdit().text().split(",") if x.strip()]
|
if isinstance(widget, FullClickComboBox):
|
||||||
|
values[name] = [x.strip() for x in widget.lineEdit().text().split(",") if x.strip()]
|
||||||
|
elif isinstance(widget, QComboBox):
|
||||||
|
values[name] = widget.currentText()
|
||||||
else:
|
else:
|
||||||
raw_text = widget.text()
|
raw_text = widget.text()
|
||||||
try:
|
try:
|
||||||
@@ -2422,9 +2524,23 @@ class ParticipantFoldChannelsWidget(QWidget):
|
|||||||
|
|
||||||
for idx in selected_indexes:
|
for idx in selected_indexes:
|
||||||
if idx == 0:
|
if idx == 0:
|
||||||
|
try:
|
||||||
flares.fold_channels(haemo_obj)
|
flares.fold_channels(haemo_obj)
|
||||||
|
except:
|
||||||
|
msg_box = QMessageBox()
|
||||||
|
msg_box.setIcon(QMessageBox.Icon.Critical)
|
||||||
|
msg_box.setWindowTitle("Something went wrong!")
|
||||||
|
message = (
|
||||||
|
"Unable to locate the fOLD files!<br><br>"
|
||||||
|
f"Please download the 'Supplementary' folder from <a href='https://github.com/nirx/fOLD-public'>here</a>. "
|
||||||
|
"Once the folder is downloaded, place it in C:/Users/your username/mne_data/fOLD/fOLD-public-master/Supplementary.<br><br>"
|
||||||
|
"If you are not using Windows, please go to the FLARES Git page for more information."
|
||||||
|
)
|
||||||
|
msg_box.setTextFormat(Qt.TextFormat.RichText)
|
||||||
|
msg_box.setText(message)
|
||||||
|
msg_box.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
|
||||||
|
msg_box.setStandardButtons(QMessageBox.StandardButton.Ok)
|
||||||
|
msg_box.exec()
|
||||||
else:
|
else:
|
||||||
print(f"No method defined for index {idx}")
|
print(f"No method defined for index {idx}")
|
||||||
|
|
||||||
@@ -2460,7 +2576,7 @@ class ExportDataAsCSVViewerWidget(QWidget):
|
|||||||
|
|
||||||
self.index_texts = [
|
self.index_texts = [
|
||||||
"0 (Export Data to CSV)",
|
"0 (Export Data to CSV)",
|
||||||
# "1 (second image)",
|
"1 (CSV for SPARKS)",
|
||||||
# "2 (third image)",
|
# "2 (third image)",
|
||||||
# "3 (fourth image)",
|
# "3 (fourth image)",
|
||||||
]
|
]
|
||||||
@@ -2612,7 +2728,6 @@ class ExportDataAsCSVViewerWidget(QWidget):
|
|||||||
# Pass the necessary arguments to each method
|
# Pass the necessary arguments to each method
|
||||||
for file_path in selected_file_paths:
|
for file_path in selected_file_paths:
|
||||||
haemo_obj = self.haemo_dict.get(file_path)
|
haemo_obj = self.haemo_dict.get(file_path)
|
||||||
|
|
||||||
if haemo_obj is None:
|
if haemo_obj is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -2646,10 +2761,63 @@ class ExportDataAsCSVViewerWidget(QWidget):
|
|||||||
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
|
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
|
||||||
|
|
||||||
|
|
||||||
|
elif idx == 1:
|
||||||
|
try:
|
||||||
|
suggested_name = f"{file_path}_sparks.csv"
|
||||||
|
|
||||||
|
# Open save dialog
|
||||||
|
save_path, _ = QFileDialog.getSaveFileName(
|
||||||
|
self,
|
||||||
|
"Save SNIRF File As",
|
||||||
|
suggested_name,
|
||||||
|
"CSV Files (*.csv)"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not save_path:
|
||||||
|
print("Save cancelled.")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not save_path.lower().endswith(".csv"):
|
||||||
|
save_path += ".csv"
|
||||||
|
# Save the CSV here
|
||||||
|
|
||||||
|
raw = haemo_obj
|
||||||
|
|
||||||
|
data, times = raw.get_data(return_times=True)
|
||||||
|
|
||||||
|
|
||||||
|
ann_col = np.full(times.shape, "", dtype=object)
|
||||||
|
|
||||||
|
if raw.annotations is not None and len(raw.annotations) > 0:
|
||||||
|
for onset, duration, desc in zip(
|
||||||
|
raw.annotations.onset,
|
||||||
|
raw.annotations.duration,
|
||||||
|
raw.annotations.description
|
||||||
|
):
|
||||||
|
mask = (times >= onset) & (times < onset + duration)
|
||||||
|
ann_col[mask] = desc
|
||||||
|
|
||||||
|
df = pd.DataFrame(data.T, columns=raw.ch_names)
|
||||||
|
df.insert(0, "annotation", ann_col)
|
||||||
|
|
||||||
|
df.insert(0, "time", times)
|
||||||
|
df.to_csv(save_path, index=False)
|
||||||
|
QMessageBox.information(self, "Success", "CSV file has been saved.")
|
||||||
|
|
||||||
|
win = UpdateEventsWindow(
|
||||||
|
parent=self,
|
||||||
|
mode=EventUpdateMode.WRITE_JSON,
|
||||||
|
caller="Video Alignment Tool"
|
||||||
|
)
|
||||||
|
win.show()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
|
||||||
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f"No method defined for index {idx}")
|
print(f"No method defined for index {idx}")
|
||||||
|
|
||||||
|
|
||||||
class ClickableLabel(QLabel):
|
class ClickableLabel(QLabel):
|
||||||
def __init__(self, full_pixmap: QPixmap, thumbnail_pixmap: QPixmap):
|
def __init__(self, full_pixmap: QPixmap, thumbnail_pixmap: QPixmap):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@@ -4275,7 +4443,7 @@ class MainApplication(QMainWindow):
|
|||||||
|
|
||||||
def update_event_markers(self):
|
def update_event_markers(self):
|
||||||
if self.events is None or not self.events.isVisible():
|
if self.events is None or not self.events.isVisible():
|
||||||
self.events = UpdateEventsWindow(self)
|
self.events = UpdateEventsWindow(self, EventUpdateMode.WRITE_SNIRF, "Manual SNIRF Edit")
|
||||||
self.events.show()
|
self.events.show()
|
||||||
|
|
||||||
def open_file_dialog(self):
|
def open_file_dialog(self):
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ from pathlib import Path
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy import linalg
|
from scipy import linalg
|
||||||
from scipy.spatial.distance import cdist
|
from scipy.spatial.distance import cdist
|
||||||
from scipy.special import sph_harm
|
from scipy.special import sph_harm_y
|
||||||
|
|
||||||
from ._fiff.constants import FIFF
|
from ._fiff.constants import FIFF
|
||||||
from ._fiff.open import fiff_open
|
from ._fiff.open import fiff_open
|
||||||
|
|||||||
@@ -1025,7 +1025,7 @@ def _handle_sensor_types(meg, eeg, fnirs):
|
|||||||
fnirs=dict(channels="fnirs", pairs="fnirs_pairs"),
|
fnirs=dict(channels="fnirs", pairs="fnirs_pairs"),
|
||||||
)
|
)
|
||||||
sensor_alpha = {
|
sensor_alpha = {
|
||||||
key: dict(meg_helmet=0.25, meg=0.25).get(key, 0.8)
|
key: dict(meg_helmet=0.25, meg=0.25).get(key, 1.0)
|
||||||
for ch_dict in alpha_map.values()
|
for ch_dict in alpha_map.values()
|
||||||
for key in ch_dict.values()
|
for key in ch_dict.values()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -586,7 +586,7 @@ class _PyVistaRenderer(_AbstractRenderer):
|
|||||||
color = None
|
color = None
|
||||||
else:
|
else:
|
||||||
scalars = None
|
scalars = None
|
||||||
tube = line.tube(radius, n_sides=self.tube_n_sides)
|
tube = line.tube(radius=radius, n_sides=self.tube_n_sides)
|
||||||
actor = _add_mesh(
|
actor = _add_mesh(
|
||||||
plotter=self.plotter,
|
plotter=self.plotter,
|
||||||
mesh=tube,
|
mesh=tube,
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ VSVersionInfo(
|
|||||||
StringStruct('FileDescription', 'FLARES main application'),
|
StringStruct('FileDescription', 'FLARES main application'),
|
||||||
StringStruct('FileVersion', '1.0.0.0'),
|
StringStruct('FileVersion', '1.0.0.0'),
|
||||||
StringStruct('InternalName', 'flares.exe'),
|
StringStruct('InternalName', 'flares.exe'),
|
||||||
StringStruct('LegalCopyright', '© 2025 Tyler de Zeeuw'),
|
StringStruct('LegalCopyright', '© 2025-2026 Tyler de Zeeuw'),
|
||||||
StringStruct('OriginalFilename', 'flares.exe'),
|
StringStruct('OriginalFilename', 'flares.exe'),
|
||||||
StringStruct('ProductName', 'FLARES'),
|
StringStruct('ProductName', 'FLARES'),
|
||||||
StringStruct('ProductVersion', '1.0.0.0')])
|
StringStruct('ProductVersion', '1.0.0.0')])
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ VSVersionInfo(
|
|||||||
StringStruct('FileDescription', 'FLARES updater application'),
|
StringStruct('FileDescription', 'FLARES updater application'),
|
||||||
StringStruct('FileVersion', '1.0.0.0'),
|
StringStruct('FileVersion', '1.0.0.0'),
|
||||||
StringStruct('InternalName', 'main.exe'),
|
StringStruct('InternalName', 'main.exe'),
|
||||||
StringStruct('LegalCopyright', '© 2025 Tyler de Zeeuw'),
|
StringStruct('LegalCopyright', '© 2025-2026 Tyler de Zeeuw'),
|
||||||
StringStruct('OriginalFilename', 'flares_updater.exe'),
|
StringStruct('OriginalFilename', 'flares_updater.exe'),
|
||||||
StringStruct('ProductName', 'FLARES Updater'),
|
StringStruct('ProductName', 'FLARES Updater'),
|
||||||
StringStruct('ProductVersion', '1.0.0.0')])
|
StringStruct('ProductVersion', '1.0.0.0')])
|
||||||
|
|||||||
Reference in New Issue
Block a user