too many fixes to document just read changelog

This commit is contained in:
2026-03-21 15:26:59 -07:00
parent 4922405138
commit 540793d150
5 changed files with 705 additions and 441 deletions

View File

@@ -1,6 +1,29 @@
# Version 1.3.0
- This is a save-changing release due to a new save file format. Please update your project files to ensure compatibility
- It is still potentially possible to load older saves by enabling 'Incompatible Save Bypass' from the Preferences menu
- Fixed workers not releasing memory when processing multiple participants. Fixes [Issue 55](https://git.research.dezeeuw.ca/tyler/flares/issues/55)
- Fixed part of an issue where memory could increase over time despite clicking the clear button. There is still some edge cases where this can occur
- Fixed an issue when clearing a bubble, reloading the same file, and clicking it would cause the app to crash. Fixes [Issue 57](https://git.research.dezeeuw.ca/tyler/flares/issues/57)
- Picking a .txt or .xlsx file now has both in the same file selection instead of having to select which extension was desired
- Fixed an issue where the fOLD files were not included in the Windows version. Fixes [Issue 60](https://git.research.dezeeuw.ca/tyler/flares/issues/60)
- Added a new parameter to the right side of the screen: EPOCH_EVENTS_HANDLING. Fixes [Issue 58](https://git.research.dezeeuw.ca/tyler/flares/issues/58)
- EPOCH_EVENTS_HANDLING defaults to 'shift' compared to previous versions where the default would have been equivalent to 'strict'
- The label for ENHANCE_NEGATIVE_CORRELATION no longer gets cut off by its dropdown selection
- Loading in files and folders have changes to immediately show their bubbles having a respective loading symbol on each bubble
- Once the file has been completely loaded and processed, the loading symbol will change to a green checkmark and clicking will be enabled
- The metadata in the File infomation widget is now saved to prevent recalculations every time the bubble is selected
- The status bar will now say loading while the bubbles are being processed, and loaded once the processing has completed
- This new loading method will prevent the application from hanging when loading lots of files at once. Fixes [Issue 59](https://git.research.dezeeuw.ca/tyler/flares/issues/59)
- Fixed text allignment for the first paragraph when a bubble is selected in the 'File information' widget
- The three main widgets are now resizable! All of them have minimum widths to ensure they do not get too squished
- Added a new option 'Reset Window Layout' under the View menu that will resize all widgets back to their default sizes
- Added a new terminal command 'version' that will print the applications current version
# Version 1.2.2
- Added 'Update events in snirf file (BLAZES)...' and renamed 'Update events in snirf file...' to 'Update events in snirf file (BORIS)...'
- Added 'Update events in snirf file (BLAZES)...' and renamed 'Update events in snirf file...' to 'Update events in snirf file (BORIS)...' under the Options menu
- The BLAZES option will assign events that are exported directly from the software [BLAZES](https://git.research.dezeeuw.ca/tyler/blazes)
- Moved the updating logic to a seperate file for better reusability and generalization
- Fixed 'Toggle Status Bar' having no effect on the visibility of the status bar

199
flares.py
View File

@@ -16,12 +16,14 @@ from io import BytesIO
from typing import Any, Optional, cast, Literal, Union
from itertools import compress
from copy import deepcopy
from multiprocessing import Queue
from multiprocessing import Queue, Pool
import os.path as op
import re
import traceback
from concurrent.futures import ProcessPoolExecutor, as_completed
from queue import Empty
import time
import multiprocessing as mp
# External library imports
import matplotlib.pyplot as plt
@@ -169,6 +171,8 @@ H_FREQ: float
L_TRANS_BANDWIDTH: float
H_TRANS_BANDWIDTH: float
EPOCH_HANDLING: str
RESAMPLE: bool
RESAMPLE_FREQ: int
STIM_DUR: float
@@ -247,6 +251,28 @@ REQUIRED_KEYS: dict[str, Any] = {
}
import logging
import os
import psutil
import traceback
audit_log = logging.getLogger("memory_audit")
audit_log.setLevel(logging.INFO)
audit_log.propagate = False # This prevents it from talking to other loggers
# 2. Add a file handler specifically for this audit logger
if not audit_log.handlers:
fh = logging.FileHandler('flares_memory_audit.log')
fh.setFormatter(logging.Formatter('%(asctime)s | PID: %(process)d | %(message)s'))
audit_log.addHandler(fh)
def get_mem_mb():
return psutil.Process(os.getpid()).memory_info().rss / 1024 / 1024
class ProcessingError(Exception):
def __init__(self, message: str = "Something went wrong!"):
self.message = message
@@ -370,58 +396,92 @@ def gui_entry(config: dict[str, Any], gui_queue: Queue, progress_queue: Queue) -
t.join(timeout=5) # prevent permanent hang
def process_participant_worker(args):
file_path, file_params, file_metadata, progress_queue = args
set_config_me(file_params)
set_metadata(file_path, file_metadata)
logger.info(f"DEBUG: Metadata for {file_path}: AGE={globals().get('AGE')}, GENDER={globals().get('GENDER')}, GROUP={globals().get('GROUP')}")
def progress_callback(step_idx):
if progress_queue:
progress_queue.put(('progress', file_path, step_idx))
def process_participant_worker(file_path, file_params, file_metadata, result_queue, progress_queue):
file_name = os.path.basename(file_path)
try:
# 1. Setup
set_config_me(file_params)
set_metadata(file_path, file_metadata)
def progress_callback(step_idx):
if progress_queue:
# We use put_nowait to prevent the worker from hanging on a full queue
try:
progress_queue.put_nowait(('progress', file_path, step_idx))
except: pass
# 2. Process
result = process_participant(file_path, progress_callback=progress_callback)
return file_path, result, None
# 3. Report Success
result_queue.put((file_path, result, None))
except Exception as e:
error_trace = traceback.format_exc()
return file_path, None, (str(e), error_trace)
result_queue.put((file_path, None, str(e)))
finally:
# --- THE FIX: MANDATORY EXIT ---
# Explicitly flush the logs and force the process to terminate
audit_log.info(f"Worker for {file_name} calling hard exit.")
sys.stdout.flush()
sys.stderr.flush()
# We use os._exit(0) as a nuclear option if sys.exit() is being caught by a try/except
os._exit(0)
def process_multiple_participants(file_paths, file_params, file_metadata, progress_queue=None, max_workers=None):
def process_multiple_participants(file_paths, file_params, file_metadata, progress_queue=None, max_workers=6):
audit_log.info(f"--- SESSION START: {len(file_paths)} files ---")
pending_files = list(file_paths)
active_processes = [] # List of tuples: (Process object, file_path)
results_by_file = {}
# We use a manager queue so it handles IPC serialization cleanly
manager = mp.Manager()
result_queue = manager.Queue()
file_args = [(file_path, file_params, file_metadata, progress_queue) for file_path in file_paths]
# Loop continues as long as there are files to process OR workers still running
while pending_files or active_processes:
# 1. SPWAN WORKERS: Only spawn if we are under the limit AND have files left
while len(active_processes) < max_workers and pending_files:
file_path = pending_files.pop(0)
p = mp.Process(
target=process_participant_worker,
args=(file_path, file_params, file_metadata, result_queue, progress_queue)
)
p.start()
active_processes.append((p, file_path))
audit_log.info(f"Spawned worker. Active processes: {len(active_processes)}")
with ProcessPoolExecutor(max_workers=max_workers) as executor:
futures = {executor.submit(process_participant_worker, arg): arg[0] for arg in file_args}
for future in as_completed(futures):
file_path = futures[future]
# 2. COLLECT RESULTS: Drain the queue continuously so workers don't deadlock
while not result_queue.empty():
try:
file_path, result, error = future.result()
if error:
error_message, error_traceback = error
if progress_queue:
progress_queue.put({
"type": "error",
"file": file_path,
"error": error_message,
"traceback": error_traceback
})
continue
results_by_file[file_path] = result
except Exception as e:
print(f"Unexpected error processing {file_path}: {e}")
res_path, result, error = result_queue.get_nowait()
if not error:
results_by_file[res_path] = result
else:
audit_log.error(f"Worker failed on {os.path.basename(res_path)}: {error}")
except Exception:
break # Queue is empty or busy
# 3. CLEANUP: Check for finished processes and remove them
for p, f_path in active_processes[:]: # Iterate over a slice copy
if not p.is_alive():
p.join() # Formally close the process to free OS resources
active_processes.remove((p, f_path))
audit_log.info(f"Worker finished. Active processes dropping to: {len(active_processes)}")
# Brief pause to prevent this while loop from pegging your CPU to 100%
time.sleep(0.5)
audit_log.info("--- SESSION COMPLETE ---")
return results_by_file
def markbad(data, ax, ch_names: list[str]) -> None:
"""
Add a strikethrough to a plot for channels marked as bad.
@@ -1143,16 +1203,48 @@ def filter_the_data(raw_haemo):
def safe_create_epochs(raw, events, event_dict, tmin, tmax, baseline):
"""
Attempts to create epochs, shifting event times slightly if
sample collisions are detected.
"""
shift_increment = 1.0 / raw.info['sfreq'] # The duration of exactly one sample
for attempt in range(10): # Limit attempts to avoid infinite loops
try:
epochs = Epochs(
raw, events, event_id=event_dict,
tmin=tmin, tmax=tmax, baseline=baseline,
preload=True, verbose=False
)
return epochs
except RuntimeError as e:
if "Event time samples were not unique" in str(e):
# Find duplicates in the events array (column 0 is the sample index)
vals, counts = np.unique(events[:, 0], return_counts=True)
duplicates = vals[counts > 1]
# Shift the second occurrence of every duplicate by 1 sample
for dup in duplicates:
idx = np.where(events[:, 0] == dup)[0][1:] # Get all but the first
events[idx, 0] += 1
print(f"Collision detected. Nudging events by {shift_increment:.4f}s and retrying...")
continue
else:
raise e # Raise if it's a different Runtime Error
raise RuntimeError("Could not resolve event collisions after 10 attempts.")
def epochs_calculations(raw_haemo, events, event_dict):
fig_epochs = [] # List to store figures
# Create epochs from raw data
epochs = Epochs(raw_haemo,
events,
event_id=event_dict,
tmin=-5,
tmax=15,
baseline=(None, 0))
if EPOCH_HANDLING == 'shift':
epochs = safe_create_epochs(raw=raw_haemo, events=events, event_dict=event_dict, tmin=-5, tmax=15, baseline=(None, 0))
else:
epochs = Epochs(raw_haemo, events, event_id=event_dict, tmin=-5, tmax=15, baseline=(None, 0))
# Make a copy of the epochs and drop bad ones
epochs2 = epochs.copy()
@@ -1582,15 +1674,12 @@ def resource_path(relative_path):
def fold_channels(raw: BaseRaw) -> None:
# if getattr(sys, 'frozen', False):
path = os.path.expanduser("~") + "/mne_data/fOLD/fOLD-public-master/Supplementary"
logger.info(path)
set_config('MNE_NIRS_FOLD_PATH', resource_path(path)) # type: ignore
# # Locate the fOLD excel files
# else:
# logger.info("yabba")
# set_config('MNE_NIRS_FOLD_PATH', resource_path("../../mne_data/fOLD/fOLD-public-master/Supplementary")) # type: ignore
# Locate the fOLD excel files
if getattr(sys, 'frozen', False):
set_config('MNE_NIRS_FOLD_PATH', resource_path("../../mne_data/fOLD/fOLD-public-master/Supplementary")) # type: ignore
else:
path = os.path.expanduser("~") + "/mne_data/fOLD/fOLD-public-master/Supplementary"
set_config('MNE_NIRS_FOLD_PATH', resource_path(path)) # type: ignore
output = None

View File

@@ -1,2 +0,0 @@
[LocalizedFileNames]
updater.png=@updater.png,0

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" height="24px" viewBox="0 -960 960 960" width="24px" fill="#1f1f1f"><path d="M200-120q-33 0-56.5-23.5T120-200v-560q0-33 23.5-56.5T200-840h560q33 0 56.5 23.5T840-760v560q0 33-23.5 56.5T760-120H200Zm0-80h320v-560H200v560Zm560 0v-560H600v560h160Z"/></svg>

After

Width:  |  Height:  |  Size: 291 B

919
main.py

File diff suppressed because it is too large Load Diff