11 Commits

Author SHA1 Message Date
7007478c3b update ignore 2026-01-28 10:10:26 -08:00
fb728d5033 added support updating optode positions from .xlsx 2026-01-28 10:09:06 -08:00
1b78f1904d further variable changes 2026-01-23 11:25:01 -08:00
9779a63a9c crash and documentation fixes 2026-01-15 12:04:55 -08:00
2ecd357aca fix bad dependency 2026-01-14 23:57:54 -08:00
fe4e8904b4 improvements 2026-01-14 23:54:03 -08:00
473c945563 fix for desktop windows 2025-11-30 15:42:56 -08:00
64ed6d2e87 more parameters 2025-11-03 16:56:05 -08:00
1aa2402d09 updates to hr 2025-10-31 21:21:10 -07:00
45c6176dba quick bug fixes 2025-10-21 18:05:30 -07:00
a4bbdb90c8 update to changelog for build 1.1.5 2025-10-20 16:08:34 -07:00
9 changed files with 1192 additions and 374 deletions

2
.gitignore vendored
View File

@@ -174,3 +174,5 @@ cython_debug/
# PyPI configuration file
.pypirc
/individual_images
*.xlsx

View File

@@ -1,4 +1,44 @@
# Next Release
# Version 1.2.0
- Added new parameters to the right side of the screen
- These parameters include SHOW_OPTODE_NAMES, SECONDS_TO_STRIP_HR, MAX_LOW_HR, MAX_HIGH_HR, SMOOTHING_WINDOW_HR, HEART_RATE_WINDOW, BAD_CHANNELS_HANDLING, MAX_DIST, MIN_NEIGHBORS, L_TRANS_BANDWIDTH, H_TRANS_BANDWIDTH, RESAMPLE, RESAMPLE_FREQ, STIM_DUR, HRF_MODEL, HIGH_PASS, DRIFT_ORDER, FIR_DELAYS, MIN_ONSET, OVERSAMPLING, SHORT_CHANNEL_REGRESSION, NOISE_MODEL, BINS, and VERBOSITY.
- All the new parameters have default values matching the underlying values in version 1.1.7
- The order of the parameters have changed to match the order that the code runs when the Process button is clicked
- Moved TIME_WINDOW_START and TIME_WINDOW_END to the 'Other' category
- Fixed a bug causing SCI to not work when HEART_RATE was set to False
- Bad channels can now be dealt with by taking no action, removing them completely, or interpolating them based on their neighbours. Interpolation remains the default option
- Fixed an underlying deprecation warning
- Fixed an issue causing some overlay elements to not render on the brain for certain devices
- Fixed a crash when rendering some Inter-Group images with only one participant in a group
- Fixed a crash when attempting to fOLD channels without the fOLD dataset installed
- Lowered the number of rectangles in the progress bar to 24 after combining some actions
- Fixed the User Guide window to properly display information about the 24 stages and added a link to the Git wiki page
- MAX_WORKERS should now properly repect the value set
- Added a new CSV export option to be used by other applications
# Version 1.1.7
- Fixed a bug where having both a L_FREQ and H_FREQ would cause only the L_FREQ to be used
- Changed the default H_FREQ from 0.7 to 0.3
- Added a PSD graph, along with 2 heart rate images to the individual participant viewer
- The PSD graph is used to help calculate the heart rate, whereas the other 2 are currently just for show
- SCI is now done using a .6hz window around the calculated heart rate compared to a window around an average heart rate
- Fixed an issue with some epochs figures not showing under the participant analysis
- Removed SECONDS_TO_STRIP from the preprocessing options
- Added new parameters to the right side of the screen
- These parameters include TRIM, SECONDS_TO_KEEP, OPTODE_PLACEMENT, HEART_RATE, WAVELET, IQR, WAVELET_TYPE, WAVELET_LEVEL, ENHANCE_NEGATIVE_CORRELATION, SHORT_CHANNEL_THRESH, LONG_CHANNEL_THRESH, and DRIFT_MODEL
- Changed number of rectangles in the progress bar to 25 to account for the new options
# Version 1.1.6
- Fixed Process button from appearing when no files are selected
- Fixed a bug that would cause an instant child process crash on Windows
- Added L_FREQ and H_FREQ parameters for more user control over low and high pass filtering
# Version 1.1.5
- Fixed Windows saves not being able to be opened by a Mac (hopefully the other way too!)
- Added the option to right click loaded snirf files to reveal them in a file browser or delete them if they are no longer desired
@@ -14,6 +54,7 @@
- Added a clickable link below the selected file's metadata explaining the independent parameters and why they are useful
- Updated some tooltips to provide better, more accurate information
- Added details about the processing steps and their order into the user guide
- Changed the default bandpass filtering parameters
# Version 1.1.4

901
flares.py

File diff suppressed because it is too large Load Diff

460
main.py
View File

@@ -22,11 +22,11 @@ import subprocess
from pathlib import Path, PurePosixPath
from datetime import datetime
from multiprocessing import Process, current_process, freeze_support, Manager
import numpy as np
import pandas as pd
from enum import Enum, auto
# External library imports
import numpy as np
import pandas as pd
import psutil
import requests
@@ -46,7 +46,7 @@ from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleVa
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
CURRENT_VERSION = "1.0.0"
CURRENT_VERSION = "1.2.0"
API_URL = "https://git.research.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
API_URL_SECONDARY = "https://git.research2.dezeeuw.ca/api/v1/repos/tyler/flares/releases"
@@ -58,11 +58,43 @@ SECTIONS = [
{
"title": "Preprocessing",
"params": [
{"name": "SECONDS_TO_STRIP", "default": 0, "type": int, "help": "Seconds to remove from beginning of all loaded snirf files. Setting this to 0 will remove nothing from the files."},
{"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Should the snirf files be downsampled? If this is set to True, DOWNSAMPLE_FREQUENCY will be used as the target frequency to downsample to."},
{"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated. Only used if DOWNSAMPLE is set to True"},
]
},
{
"title": "Trimming",
"params": [
{"name": "TRIM", "default": True, "type": bool, "help": "Trim the file start."},
{"name": "SECONDS_TO_KEEP", "default": 5, "type": float, "help": "Seconds to keep at the beginning of all loaded snirf files before the first annotation/event occurs. Calculation is done seperatly on all loaded snirf files. Setting this to 0 will have the first annotation/event be at time point 0."},
]
},
{
"title": "Verify Optode Placement",
"params": [
{"name": "OPTODE_PLACEMENT", "default": True, "type": bool, "help": "Generate an image for each participant outlining their optode placement."},
{"name": "SHOW_OPTODE_NAMES", "default": True, "type": bool, "help": "Should the optode names be written next to their location or not."},
]
},
{
"title": "Short/Long Channels",
"params": [
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data."},
{"name": "SHORT_CHANNEL_THRESH", "default": 0.015, "type": float, "help": "The maximum distance the short channel can be in metres."},
{"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "The maximum distance the long channel can be in metres."},
]
},
{
"title": "Heart Rate",
"params": [
{"name": "HEART_RATE", "default": True, "type": bool, "help": "Attempt to calculate the participants heart rate."},
{"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "help": "Will remove this many seconds from the start and end of the file. Useful if recording before cap is firmly placed, or participant removes cap while still recording."},
{"name": "MAX_LOW_HR", "default": 40, "type": int, "help": "Any heart rate windows that average below this value will be rounded up to this value."},
{"name": "MAX_HIGH_HR", "default": 200, "type": int, "help": "Any heart rate windows that average above this value will be rounded down to this value."},
{"name": "SMOOTHING_WINDOW_HR", "default": 100, "type": int, "help": "How many individual data points to smooth into a single window."},
{"name": "HEART_RATE_WINDOW", "default": 25, "type": int, "help": "Used for visualization. Shows the range of the calculated heart rate +- this value."},
]
},
{
"title": "Scalp Coupling Index",
"params": [
@@ -75,14 +107,12 @@ SECTIONS = [
"title": "Signal to Noise Ratio",
"params": [
{"name": "SNR", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Signal to Noise Ratio. This metric calculates how much of the observed signal was noise versus how much of it was a useful signal."},
# {"name": "SNR_TIME_WINDOW", "default": -1, "type": int, "help": "SNR time window."},
{"name": "SNR_THRESHOLD", "default": 5.0, "type": float, "help": "SNR threshold (dB). A typical scale would be 0-25, but it is possible for values to be both above and below this range. Higher values correspond to a better signal. If SNR is True, any channels lower than this value will be marked as bad."},
]
},
{
"title": "Peak Spectral Power",
"params": [
{"name": "PSP", "default": True, "type": bool, "help": "Calculate and mark channels bad based on their Peak Spectral Power. This metric calculates the amplitude or strength of a frequency component that is most prominent in a particular frequency range or spectrum."},
{"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "help": "Independent PSP calculations will be perfomed in a time window for the duration of the value provided, until the end of the file is reached."},
{"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "help": "PSP threshold. A typical scale would be 0-0.5, but it is possible for values to be above this range. Higher values correspond to a better signal. If PSP is True, any channels lower than this value will be marked as bad."},
@@ -91,15 +121,15 @@ SECTIONS = [
{
"title": "Bad Channels Handling",
"params": [
# {"name": "NOT_IMPLEMENTED", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
# {"name": "NOT_IMPLEMENTED", "default": 3, "type": int, "help": "PSP time window."},
# {"name": "NOT_IMPLEMENTED", "default": 0.1, "type": float, "help": "PSP threshold."},
{"name": "BAD_CHANNELS_HANDLING", "default": [], "type": list, "options": ["Interpolate", "Remove", "None"], "exclusive": True, "help": "How should we deal with the bad channels that occurred? Note: Some analysis options will only work when this is set to 'Interpolate'."},
{"name": "MAX_DIST", "default": 0.03, "type": float, "help": "The maximum distance to look for neighbours when interpolating. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
{"name": "MIN_NEIGHBORS", "default": 2, "type": int, "help": "The minimumn amount of neighbours needed within the MAX_DIST parameter. Used only when BAD_CHANNELS_HANDLING is set to 'Interpolate'."},
]
},
{
"title": "Optical Density",
"params": [
# Intentionally empty (TODO)
# NOTE: Intentionally empty
]
},
{
@@ -108,38 +138,45 @@ SECTIONS = [
{"name": "TDDR", "default": True, "type": bool, "help": "Apply Temporal Derivitave Distribution Repair filtering - a method that removes baseline shift and spike artifacts from the data."},
]
},
{
"title": "Wavelet filtering",
"params": [
{"name": "WAVELET", "default": True, "type": bool, "help": "Apply Wavelet filtering."},
{"name": "IQR", "default": 1.5, "type": float, "help": "Inter-Quartile Range."},
{"name": "WAVELET_TYPE", "default": "db4", "type": str, "help": "Wavelet type."},
{"name": "WAVELET_LEVEL", "default": 3, "type": int, "help": "Wavelet level."},
]
},
{
"title": "Haemoglobin Concentration",
"params": [
# Intentionally empty (TODO)
# NOTE: Intentionally empty
]
},
{
"title": "Enhance Negative Correlation",
"params": [
#{"name": "ENHANCE_NEGATIVE_CORRELATION", "default": False, "type": bool, "help": "Calculate Peak Spectral Power."},
{"name": "ENHANCE_NEGATIVE_CORRELATION", "default": False, "type": bool, "help": "Apply Enhance Negative Correlation."},
]
},
{
"title": "Filtering",
"params": [
#{"name": "FILTER", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
{"name": "FILTER", "default": True, "type": bool, "help": "Filter the data."},
{"name": "L_FREQ", "default": 0.005, "type": float, "help": "Any frequencies lower than this value will be removed."},
{"name": "H_FREQ", "default": 0.3, "type": float, "help": "Any frequencies higher than this value will be removed."},
{"name": "L_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the lower bound."},
{"name": "H_TRANS_BANDWIDTH", "default": 0.002, "type": float, "help": "How wide the transitional period should be so the data doesn't just drop off on the upper bound."},
]
},
{
"title": "Short Channels",
"params": [
{"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "This should be set to True if the data has a short channel present in the data."},
]
},
{
"title": "Extracting Events",
"title": "Extracting Events*",
"params": [
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
]
},
{
"title": "Epoch Calculations",
"title": "Epoch Calculations*",
"params": [
#{"name": "EVENTS", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."},
]
@@ -147,18 +184,27 @@ SECTIONS = [
{
"title": "Design Matrix",
"params": [
{"name": "RESAMPLE", "default": True, "type": bool, "help": "The length of your stimulus."},
{"name": "RESAMPLE_FREQ", "default": 1, "type": int, "help": "The length of your stimulus."},
{"name": "STIM_DUR", "default": 0.5, "type": float, "help": "The length of your stimulus."},
{"name": "HRF_MODEL", "default": "fir", "type": str, "help": "Specifies the hemodynamic response function."},
{"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Specifies the desired drift model."},
{"name": "HIGH_PASS", "default": 0.01, "type": float, "help": "High-pass frequency in case of a cosine model (in Hz)."},
{"name": "DRIFT_ORDER", "default": 1, "type": int, "help": "Order of the drift model (in case it is polynomial)"},
{"name": "FIR_DELAYS", "default": "None", "type": range, "help": "In case of FIR design, yields the array of delays used in the FIR model (in scans)."},
{"name": "MIN_ONSET", "default": -24, "type": int, "help": "Minimal onset relative to frame times (in seconds)"},
{"name": "OVERSAMPLING", "default": 50, "type": int, "help": "Oversampling factor used in temporal convolutions."},
{"name": "REMOVE_EVENTS", "default": "None", "type": list, "help": "Remove events matching the names provided before generating the Design Matrix"},
# {"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Drift model for GLM."},
# {"name": "DURATION_BETWEEN_ACTIVITIES", "default": 35, "type": int, "help": "Time between activities (s)."},
# {"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Use short channel regression."},
{"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Whether to use short channel regression and regress out the short channels. Requires SHORT_CHANNELS to be True and at least one short channel to be found."},
]
},
{
"title": "General Linear Model",
"params": [
{"name": "TIME_WINDOW_START", "default": "0", "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
{"name": "TIME_WINDOW_END", "default": "15", "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
#{"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for GLM processing."},
{"name": "NOISE_MODEL", "default": "ar1", "type": str, "help": "Number of jobs for GLM processing."},
{"name": "BINS", "default": 0, "type": int, "help": "Number of jobs for GLM processing."},
{"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for GLM processing."},
]
},
{
@@ -170,7 +216,10 @@ SECTIONS = [
{
"title": "Other",
"params": [
{"name": "MAX_WORKERS", "default": 4, "type": int, "help": "Number of files to be processed at once. Lowering this value may help on underpowered systems."},
{"name": "TIME_WINDOW_START", "default": 0, "type": int, "help": "Where to start averaging the fir model bins. Only affects the significance and contrast images."},
{"name": "TIME_WINDOW_END", "default": 15, "type": int, "help": "Where to end averaging the fir model bins. Only affects the significance and contrast images."},
{"name": "MAX_WORKERS", "default": 4, "type": str, "help": "Number of files to be processed at once. Setting this to a small integer value may help on underpowered systems. Remove the value to use an automatic amount."},
{"name": "VERBOSITY", "default": False, "type": bool, "help": "True will log lots of debugging information to the log file. False will only log required data."},
]
},
]
@@ -451,29 +500,38 @@ class UserGuideWindow(QWidget):
layout = QVBoxLayout()
label = QLabel("Progress Bar Stages:", self)
label2 = QLabel("Stage 1: Load the snirf file\n"
"Stage 2: Check the optode positions\n"
"Stage 3: Scalp Coupling Index\n"
"Stage 4: Signal to Noise Ratio\n"
"Stage 5: Peak Spectral Power\n"
"Stage 6: Identify bad channels\n"
"Stage 7: Interpolate bad channels\n"
"Stage 8: Optical Density\n"
"Stage 9: Temporal Derivative Distribution Repair\n"
"Stage 10: Beer Lambert Law\n"
"Stage 11: Heart Rate Filtering\n"
"Stage 12: Get Short/Long Channels\n"
"Stage 13: Calculate Events from Annotations\n"
"Stage 14: Epoch Calculations\n"
"Stage 15: Design Matrix\n"
"Stage 16: General Linear Model\n"
"Stage 17: Generate Plots from the GLM\n"
"Stage 18: Individual Significance\n"
"Stage 19: Channel, Region of Interest, and Contrast Results\n"
"Stage 20: Image Conversion\n", self)
label2 = QLabel("Stage 1: Preprocessing\n"
"Stage 2: Trimming\n"
"Stage 3: Verify Optode Placement\n"
"Stage 4: Short/Long Cannels\n"
"Stage 5: Heart Rate\n"
"Stage 6: Scalp Coupling Index\n"
"Stage 7: Signal to Noise Ratio\n"
"Stage 8: Peak Spectral Power\n"
"Stage 9: Bad Channels Handling\n"
"Stage 10: Optical Density\n"
"Stage 11: Temporal Derivative Distribution Repair Filtering\n"
"Stage 12: Wavelet Filtering\n"
"Stage 13: Haemoglobin Concentration\n"
"Stage 14: Enhance Negative Correlation\n"
"Stage 15: Filter\n"
"Stage 16: Extracting Events\n"
"Stage 17: Epoch Calculations\n"
"Stage 18: Design Matrix\n"
"Stage 19: General Linear Model\n"
"Stage 20: Generate GLM Results\n"
"Stage 21: Generate Channel Significance\n"
"Stage 22: Generate Channel, Region of Interest, and Contrast Results\n"
"Stage 23: Compute Contrast Results\n"
"Stage 24: Finishing Up\n", self)
label3 = QLabel("For more information, visit the Git wiki page <a href='https://git.research.dezeeuw.ca/tyler/flares/wiki'>here</a>.", self)
label3.setTextFormat(Qt.TextFormat.RichText)
label3.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
label3.setOpenExternalLinks(True)
layout.addWidget(label)
layout.addWidget(label2)
layout.addWidget(label3)
self.setLayout(layout)
@@ -492,10 +550,10 @@ class UpdateOptodesWindow(QWidget):
self.btn_browse_a = QPushButton("Browse .snirf")
self.btn_browse_a.clicked.connect(self.browse_file_a)
self.label_file_b = QLabel("TXT file:")
self.label_file_b = QLabel("Text file:")
self.line_edit_file_b = QLineEdit()
self.line_edit_file_b.setReadOnly(True)
self.btn_browse_b = QPushButton("Browse .txt")
self.btn_browse_b = QPushButton("Browse .txt/.xlsx")
self.btn_browse_b.clicked.connect(self.browse_file_b)
self.label_suffix = QLabel("Suffix to append to filename:")
@@ -516,9 +574,10 @@ class UpdateOptodesWindow(QWidget):
self.description.setText("Some software when creating snirf files will insert a template of optode positions as the correct position of the optodes for the participant.<br>"
"This is rarely correct as each head differs slightly in shape or size, and a lot of calculations require the optodes to be in the correct location.<br>"
"Using a .txt file, we can update the positions in the snirf file to match those of a digitization system such as one from Polhemus or elsewhere.<br>"
"Using a .txt or .xlsx file, we can update the positions in the snirf file to match those of a digitization system such as one from Polhemus or elsewhere.<br>"
"The .txt file should have the fiducials, detectors, and sources clearly labeled, followed by the x, y, and z coordinates seperated by a space.<br>"
"An example format of what a digitization text file should look like can be found <a href='custom_link'>by clicking here</a>.")
"An example format of what a digitization text file should look like can be found <a href='custom_link'>by clicking here</a>. Currently only .xlsx files directly exported from a<br>"
"Polhemus system are supported.")
self.description.linkActivated.connect(self.handle_link_click)
layout.addWidget(self.description)
@@ -547,8 +606,7 @@ class UpdateOptodesWindow(QWidget):
file_a_layout.addWidget(file_a_container)
layout.addLayout(file_a_layout)
help_text_b = "Provide a .txt file with labeled optodes (e.g., nz, rpa, lpa, d1, s1) and their x, y, z coordinates."
help_text_b = "Provide a .txt file with labeled optodes (e.g., nz, rpa, lpa, d1, s1) and their x, y, z coordinates, or a .xlsx file from a Polhemius system."
file_b_layout = QHBoxLayout()
@@ -631,7 +689,7 @@ class UpdateOptodesWindow(QWidget):
self.line_edit_file_a.setText(file_path)
def browse_file_b(self):
file_path, _ = QFileDialog.getOpenFileName(self, "Select TXT File", "", "Text Files (*.txt)")
file_path, _ = QFileDialog.getOpenFileName(self, "Select File", "", "Text Files (*.txt), Excel Files (*.xlsx)")
if file_path:
self.line_edit_file_b.setText(file_path)
@@ -685,7 +743,10 @@ class UpdateOptodesWindow(QWidget):
fiducials = {}
ch_positions = {}
extension = Path(file_b).suffix
# Read the lines from the optode file
if extension == '.txt':
with open(file_b, 'r') as f:
for line in f:
if line.strip():
@@ -701,6 +762,49 @@ class UpdateOptodesWindow(QWidget):
else:
ch_positions[ch_name.upper()] = coords
elif extension == '.xlsx':
df = pd.read_excel(file_b, sheet_name='Sheet1')
def _get_block_data(df, block_id, row_mapping, scale=0.001):
"""Isolates a block, cleans numeric data, and returns a scaled dictionary."""
# 1. Isolate and clean
block = df[df['block_id'] == block_id].iloc[:, [1, 2, 3]].copy()
block = block.apply(pd.to_numeric, errors='coerce')
# 2. Extract into dictionary based on mapping
result = {}
# If row_mapping is a dict (like {0: 'nz'}), use it directly
if isinstance(row_mapping, dict):
for row_idx, key in row_mapping.items():
if row_idx < len(block):
result[key] = block.iloc[row_idx].to_numpy(dtype=float) * scale
# If row_mapping is a string prefix (like 'D' or 'S'), auto-generate keys
elif isinstance(row_mapping, str):
for i in range(len(block)):
result[f"{row_mapping}{i+1}"] = block.iloc[i].to_numpy(dtype=float) * scale
return result
# Identify blocks
is_empty = df.isnull().all(axis=1)
df['block_id'] = is_empty.cumsum()
clean_df = df[~is_empty].copy()
# Process Block 2: Landmarks
fiducials = _get_block_data(clean_df, 2, {0: 'nz', 2: 'rpa', 3: 'lpa'})
# Process Block 3: D-Points
d_points = _get_block_data(clean_df, 3, 'D')
# Process Block 4: S-Points
s_points = _get_block_data(clean_df, 4, 'S')
ch_positions = {**d_points, **s_points}
# Create montage with updated coords in head space
initial_montage = make_dig_montage(ch_pos=ch_positions, nasion=fiducials.get('nz'), lpa=fiducials.get('lpa'), rpa=fiducials.get('rpa'), coord_frame='head') # type: ignore
@@ -710,15 +814,23 @@ class UpdateOptodesWindow(QWidget):
write_raw_snirf(raw, save_path)
class EventUpdateMode(Enum):
WRITE_SNIRF = auto() # destructive
WRITE_JSON = auto() # non-destructive
class UpdateEventsWindow(QWidget):
def __init__(self, parent=None):
def __init__(self, parent=None, mode=EventUpdateMode.WRITE_SNIRF, caller=None):
super().__init__(parent, Qt.WindowType.Window)
self.mode = mode
self.caller = caller or self.__class__.__name__
self.setWindowTitle("Update event markers")
self.resize(760, 200)
print("INIT MODE:", mode)
self.label_file_a = QLabel("SNIRF file:")
self.line_edit_file_a = QLineEdit()
self.line_edit_file_a.setReadOnly(True)
@@ -1019,46 +1131,55 @@ class UpdateEventsWindow(QWidget):
QMessageBox.warning(self, "No SNIRF file", "Please select a SNIRF file.")
return
boris_obs = self.boris_data["observations"][selected_obs]
# --- Extract videos + delays ---
files = boris_obs.get("file", {})
offsets = boris_obs.get("media_info", {}).get("offset", {})
videos = {}
for key, path in files.items():
if path: # only include videos that exist
delay = offsets.get(key, 0.0) # default 0 if missing
videos[key] = {"file": path, "delay": delay}
base_name = os.path.splitext(os.path.basename(file_a))[0]
if self.mode == EventUpdateMode.WRITE_SNIRF:
# Open save dialog for SNIRF
base_name = os.path.splitext(os.path.basename(file_a))[0]
suggested_name = f"{base_name}_{suffix}.snirf"
# Open save dialog
save_path, _ = QFileDialog.getSaveFileName(
self,
"Save SNIRF File As",
suggested_name,
"SNIRF Files (*.snirf)"
)
if not save_path:
print("Save cancelled.")
print("SNIRF save cancelled.")
return
if not save_path.lower().endswith(".snirf"):
save_path += ".snirf"
try:
raw = read_raw_snirf(snirf_path, preload=True)
onsets = []
durations = []
descriptions = []
raw = read_raw_snirf(file_a, preload=True)
# --- Align BORIS events to SNIRF ---
boris_events = boris_obs.get("events", [])
onsets, durations, descriptions = [], [], []
open_events = {} # label -> list of start times
label_counts = {}
used_times = set()
sfreq = raw.info['sfreq'] # sampling frequency in Hz
sfreq = raw.info['sfreq']
min_shift = 1.0 / sfreq
max_attempts = 10
for event in boris_events:
if not isinstance(event, list) or len(event) < 3:
continue
event_time = event[0]
label = event[2]
count = label_counts.get(label, 0) + 1
label_counts[label] = count
@@ -1066,74 +1187,84 @@ class UpdateEventsWindow(QWidget):
open_events[label] = []
if count % 2 == 1:
# Odd occurrence = start event
open_events[label].append(event_time)
else:
# Even occurrence = end event
if open_events[label]:
matched_start = open_events[label].pop(0)
duration = event_time - matched_start
start_time = open_events[label].pop(0)
duration = event_time - start_time
if duration <= 0:
print(f"Warning: Duration for {label} is non-positive ({duration}). Skipping.")
continue
shifted_start = matched_start + time_shift
adjusted_time = shifted_start
adjusted_time = start_time + time_shift
attempts = 0
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
adjusted_time += min_shift
attempts += 1
if attempts == max_attempts:
print(f"Warning: Couldn't find unique time for {label} @ {matched_start}s. Skipping.")
continue
adjusted_time = round(adjusted_time, 6)
used_times.add(adjusted_time)
print(f"Adding event: {label} @ {adjusted_time:.3f}s for {duration:.3f}s")
onsets.append(adjusted_time)
durations.append(duration)
descriptions.append(label)
else:
print(f"Warning: Unmatched end for label '{label}' at {event_time:.3f}s. Skipping.")
# Optionally warn about any unmatched starts left open
# Handle unmatched starts
for label, starts in open_events.items():
for start_time in starts:
shifted_start = start_time + time_shift
adjusted_time = shifted_start
adjusted_time = start_time + time_shift
attempts = 0
while round(adjusted_time, 6) in used_times and attempts < max_attempts:
adjusted_time += min_shift
attempts += 1
if attempts == max_attempts:
print(f"Warning: Couldn't find unique time for unmatched start {label} @ {start_time}s. Skipping.")
continue
adjusted_time = round(adjusted_time, 6)
used_times.add(adjusted_time)
print(f"Warning: Unmatched start for label '{label}' at {start_time:.3f}s. Adding with duration 0.")
onsets.append(adjusted_time)
durations.append(0.0)
descriptions.append(label)
new_annotations = Annotations(onset=onsets, duration=durations, description=descriptions)
raw.set_annotations(new_annotations)
write_raw_snirf(raw, save_path)
QMessageBox.information(self, "Success", "SNIRF file updated with aligned BORIS events.")
except Exception as e:
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
elif self.mode == EventUpdateMode.WRITE_JSON:
# Open save dialog for JSON
base_name = os.path.splitext(os.path.basename(file_a))[0]
suggested_name = f"{base_name}_{suffix}_alignment.json"
save_path, _ = QFileDialog.getSaveFileName(
self,
"Save Event Alignment JSON As",
suggested_name,
"JSON Files (*.json)"
)
if not save_path:
print("JSON save cancelled.")
return
if not save_path.lower().endswith(".json"):
save_path += ".json"
# Build JSON dict
json_data = {
"observation": selected_obs,
"snirf_anchor": {"label": snirf_label, "time": snirf_anchor_time},
"boris_anchor": {"label": boris_label, "time": boris_anchor_time},
"time_shift": time_shift,
"videos": videos
}
# Write JSON
try:
with open(save_path, "w", encoding="utf-8") as f:
json.dump(json_data, f, indent=4)
QMessageBox.information(self, "Success", f"Event alignment saved to:\n{save_path}")
except Exception as e:
QMessageBox.critical(self, "Error", f"Failed to write JSON:\n{e}")
def update_optode_positions(self, file_a, file_b, save_path):
@@ -1165,6 +1296,47 @@ class UpdateEventsWindow(QWidget):
write_raw_snirf(raw, save_path)
def _apply_events_to_snirf(self, raw, new_annotations, save_path):
raw.set_annotations(new_annotations)
write_raw_snirf(raw, save_path)
def _write_event_mapping_json(
self,
file_a,
file_b,
selected_obs,
snirf_anchor,
boris_anchor,
time_shift,
mapped_events,
save_path
):
import json
from datetime import datetime
import os
payload = {
"source": {
"called_from": self.caller,
"snirf_file": os.path.basename(file_a),
"boris_file": os.path.basename(file_b),
"observation": selected_obs
},
"alignment": {
"snirf_anchor": snirf_anchor,
"boris_anchor": boris_anchor,
"time_shift_seconds": time_shift
},
"events": mapped_events,
"created_at": datetime.utcnow().isoformat() + "Z"
}
with open(save_path, "w", encoding="utf-8") as f:
json.dump(payload, f, indent=2)
return save_path
class ProgressBubble(QWidget):
"""
A clickable widget displaying a progress bar made of colored rectangles and a label.
@@ -1197,7 +1369,7 @@ class ProgressBubble(QWidget):
self.progress_layout = QHBoxLayout()
self.rects = []
for _ in range(20):
for _ in range(24):
rect = QFrame()
rect.setFixedSize(10, 18)
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
@@ -1326,6 +1498,11 @@ class ParamSection(QWidget):
widget.setValidator(QDoubleValidator())
widget.setText(str(param["default"]))
elif param["type"] == list:
if param.get("exclusive", True):
widget = QComboBox()
widget.addItems(param.get("options", []))
widget.setCurrentText(str(param.get("default", "<None Selected>")))
else:
widget = self._create_multiselect_dropdown(None)
else:
widget = QLineEdit()
@@ -1434,7 +1611,10 @@ class ParamSection(QWidget):
if expected_type == bool:
values[name] = widget.currentText() == "True"
elif expected_type == list:
if isinstance(widget, FullClickComboBox):
values[name] = [x.strip() for x in widget.lineEdit().text().split(",") if x.strip()]
elif isinstance(widget, QComboBox):
values[name] = widget.currentText()
else:
raw_text = widget.text()
try:
@@ -2390,9 +2570,23 @@ class ParticipantFoldChannelsWidget(QWidget):
for idx in selected_indexes:
if idx == 0:
try:
flares.fold_channels(haemo_obj)
except:
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Icon.Critical)
msg_box.setWindowTitle("Something went wrong!")
message = (
"Unable to locate the fOLD files!<br><br>"
f"Please download the 'Supplementary' folder from <a href='https://github.com/nirx/fOLD-public'>here</a>. "
"Once the folder is downloaded, place it in C:/Users/your username/mne_data/fOLD/fOLD-public-master/Supplementary.<br><br>"
"If you are not using Windows, please go to the FLARES Git page for more information."
)
msg_box.setTextFormat(Qt.TextFormat.RichText)
msg_box.setText(message)
msg_box.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
msg_box.setStandardButtons(QMessageBox.StandardButton.Ok)
msg_box.exec()
else:
print(f"No method defined for index {idx}")
@@ -2428,7 +2622,7 @@ class ExportDataAsCSVViewerWidget(QWidget):
self.index_texts = [
"0 (Export Data to CSV)",
# "1 (second image)",
"1 (CSV for SPARKS)",
# "2 (third image)",
# "3 (fourth image)",
]
@@ -2580,7 +2774,6 @@ class ExportDataAsCSVViewerWidget(QWidget):
# Pass the necessary arguments to each method
for file_path in selected_file_paths:
haemo_obj = self.haemo_dict.get(file_path)
if haemo_obj is None:
continue
@@ -2614,10 +2807,63 @@ class ExportDataAsCSVViewerWidget(QWidget):
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
elif idx == 1:
try:
suggested_name = f"{file_path}_sparks.csv"
# Open save dialog
save_path, _ = QFileDialog.getSaveFileName(
self,
"Save SNIRF File As",
suggested_name,
"CSV Files (*.csv)"
)
if not save_path:
print("Save cancelled.")
return
if not save_path.lower().endswith(".csv"):
save_path += ".csv"
# Save the CSV here
raw = haemo_obj
data, times = raw.get_data(return_times=True)
ann_col = np.full(times.shape, "", dtype=object)
if raw.annotations is not None and len(raw.annotations) > 0:
for onset, duration, desc in zip(
raw.annotations.onset,
raw.annotations.duration,
raw.annotations.description
):
mask = (times >= onset) & (times < onset + duration)
ann_col[mask] = desc
df = pd.DataFrame(data.T, columns=raw.ch_names)
df.insert(0, "annotation", ann_col)
df.insert(0, "time", times)
df.to_csv(save_path, index=False)
QMessageBox.information(self, "Success", "CSV file has been saved.")
win = UpdateEventsWindow(
parent=self,
mode=EventUpdateMode.WRITE_JSON,
caller="Video Alignment Tool"
)
win.show()
except Exception as e:
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
else:
print(f"No method defined for index {idx}")
class ClickableLabel(QLabel):
def __init__(self, full_pixmap: QPixmap, thumbnail_pixmap: QPixmap):
super().__init__()
@@ -4044,7 +4290,7 @@ class MainApplication(QMainWindow):
self.button2.setMinimumSize(100, 40)
self.button3.setMinimumSize(100, 40)
# self.button1.setVisible(False)
self.button1.setVisible(False)
self.button3.setVisible(False)
self.button1.clicked.connect(self.on_run_task)
@@ -4243,7 +4489,7 @@ class MainApplication(QMainWindow):
def update_event_markers(self):
if self.events is None or not self.events.isVisible():
self.events = UpdateEventsWindow(self)
self.events = UpdateEventsWindow(self, EventUpdateMode.WRITE_SNIRF, "Manual SNIRF Edit")
self.events.show()
def open_file_dialog(self):

View File

@@ -12,7 +12,7 @@ from pathlib import Path
import numpy as np
from scipy import linalg
from scipy.spatial.distance import cdist
from scipy.special import sph_harm
from scipy.special import sph_harm_y
from ._fiff.constants import FIFF
from ._fiff.open import fiff_open

View File

@@ -1025,7 +1025,7 @@ def _handle_sensor_types(meg, eeg, fnirs):
fnirs=dict(channels="fnirs", pairs="fnirs_pairs"),
)
sensor_alpha = {
key: dict(meg_helmet=0.25, meg=0.25).get(key, 0.8)
key: dict(meg_helmet=0.25, meg=0.25).get(key, 1.0)
for ch_dict in alpha_map.values()
for key in ch_dict.values()
}

View File

@@ -586,7 +586,7 @@ class _PyVistaRenderer(_AbstractRenderer):
color = None
else:
scalars = None
tube = line.tube(radius, n_sides=self.tube_n_sides)
tube = line.tube(radius=radius, n_sides=self.tube_n_sides)
actor = _add_mesh(
plotter=self.plotter,
mesh=tube,

View File

@@ -18,7 +18,7 @@ VSVersionInfo(
StringStruct('FileDescription', 'FLARES main application'),
StringStruct('FileVersion', '1.0.0.0'),
StringStruct('InternalName', 'flares.exe'),
StringStruct('LegalCopyright', '© 2025 Tyler de Zeeuw'),
StringStruct('LegalCopyright', '© 2025-2026 Tyler de Zeeuw'),
StringStruct('OriginalFilename', 'flares.exe'),
StringStruct('ProductName', 'FLARES'),
StringStruct('ProductVersion', '1.0.0.0')])

View File

@@ -18,7 +18,7 @@ VSVersionInfo(
StringStruct('FileDescription', 'FLARES updater application'),
StringStruct('FileVersion', '1.0.0.0'),
StringStruct('InternalName', 'main.exe'),
StringStruct('LegalCopyright', '© 2025 Tyler de Zeeuw'),
StringStruct('LegalCopyright', '© 2025-2026 Tyler de Zeeuw'),
StringStruct('OriginalFilename', 'flares_updater.exe'),
StringStruct('ProductName', 'FLARES Updater'),
StringStruct('ProductVersion', '1.0.0.0')])