6 Commits

Author SHA1 Message Date
tyler e1c7e1a0e6 changelog 1.4.3 2026-05-01 11:42:04 -07:00
tyler 69f3df8921 imptovements for blazes and fix fold again 2026-05-01 11:40:34 -07:00
tyler d74be5cdec note for updating optode placements 2026-04-15 16:17:01 -07:00
tyler 0f6434121f qol improvements + optode location fix 2026-04-15 16:15:15 -07:00
tyler 8d922ecae9 fix for quick update that occured 2026-04-14 08:01:23 -07:00
tyler 8655115cca fix to too many processes 2026-03-30 14:12:35 -07:00
3 changed files with 240 additions and 231 deletions
+21
View File
@@ -1,3 +1,24 @@
# Version 1.4.3
- Fixed an issue where the fOLD files could not be located
- Added better support for updating events from external software
# Version 1.4.2
- Fixed AGE, GENDER, GROUP not visually appearing on a bubble after the metadata has been set. Fixes [Issue 42](https://git.research.dezeeuw.ca/tyler/flares/issues/42)
- Fixed first stage of progress bar going yellow after loading from an unprocessed save
- Fixed AGE, GENDER, GROUP not visually appearing on a bubble when loading from a save
- Group images involving an optode template will now be the average of all participants selected in the group and not the last processed participant. Fixes [Issue 62](https://git.research.dezeeuw.ca/tyler/flares/issues/62)
- Group images will no longer crash if being made with participants that have a different number of channels
- Changed CSV files to now save to the same folder rather than asking each time for each file. Fixes [Issue 39](https://git.research.dezeeuw.ca/tyler/flares/issues/39)
# Version 1.4.1
- Hotfix to fix a recursive child loop that would cause the MacOS version to not open
# Version 1.4.0
- This is potentially a save-changing release due to changes in how file paths and parameters are stored. Please update your project files to ensure compatibility
+67 -2
View File
@@ -1743,7 +1743,7 @@ def fold_channels(raw: BaseRaw) -> None:
# Locate the fOLD excel files
if getattr(sys, 'frozen', False):
set_config('MNE_NIRS_FOLD_PATH', resource_path("/mne_data/fOLD/fOLD-public-master/Supplementary")) # type: ignore
set_config('MNE_NIRS_FOLD_PATH', resource_path("./mne_data/fOLD/fOLD-public-master/Supplementary")) # type: ignore
else:
path = os.path.expanduser("~") + "/mne_data/fOLD/fOLD-public-master/Supplementary"
set_config('MNE_NIRS_FOLD_PATH', resource_path(path)) # type: ignore
@@ -2403,6 +2403,71 @@ def plot_3d_evoked_array(
return brain
def aggregate_fnirs_group_geometry(raw_list):
"""
Averages fNIRS geometry across participants in two tiers:
1. Average by Channel Pairing (S_D).
2. Average by Individual Optode (S, D) across all averaged pairings.
Returns a unified MNE Raw object with exactly one dot per optode.
"""
import mne
import numpy as np
channel_locs = {}
all_ch_names = []
for raw in raw_list:
if raw is None: continue
raw_hbo = raw.copy().pick(picks="hbo")
for i, ch_name in enumerate(raw_hbo.ch_names):
if ch_name not in channel_locs:
channel_locs[ch_name] = []
all_ch_names.append(ch_name)
channel_locs[ch_name].append(raw_hbo.info['chs'][i]['loc'])
avg_pairings = {name: np.nanmean(locs, axis=0) for name, locs in channel_locs.items()}
optode_collections = {'sources': {}, 'detectors': {}}
for ch_name, loc in avg_pairings.items():
parts = ch_name.split()[0].split('_')
s_name, d_name = parts[0], parts[1]
optode_collections['sources'].setdefault(s_name, []).append(loc[3:6])
optode_collections['detectors'].setdefault(d_name, []).append(loc[6:9])
final_sources = {s: np.nanmean(coords, axis=0) for s, coords in optode_collections['sources'].items()}
final_detectors = {d: np.nanmean(coords, axis=0) for d, coords in optode_collections['detectors'].items()}
ref_raw = raw_list[0].copy().pick(picks="hbo")
template_lookup = {ch['ch_name']: ch for ch in ref_raw.info['chs']}
final_chs = []
for ch_name in all_ch_names:
unified_loc = avg_pairings[ch_name].copy()
parts = ch_name.split()[0].split('_')
s_name, d_name = parts[0], parts[1]
unified_loc[3:6] = final_sources[s_name]
unified_loc[6:9] = final_detectors[d_name]
unified_loc[0:3] = (final_sources[s_name] + final_detectors[d_name]) / 2.0
# Create the new channel object
new_ch = template_lookup.get(ch_name, ref_raw.info['chs'][0]).copy()
new_ch['ch_name'] = ch_name
new_ch['loc'] = unified_loc
final_chs.append(new_ch)
# Create the final MNE Info
fake_info = mne.create_info(ch_names=all_ch_names, sfreq=ref_raw.info['sfreq'], ch_types='hbo')
with fake_info._unlock():
fake_info['chs'] = final_chs
return mne.io.RawArray(np.zeros((len(all_ch_names), 1)), fake_info)
def brain_3d_visualization(raw_haemo, df_cha, selected_event, t_or_theta: Literal['t', 'theta'] = 'theta', show_optodes: Literal['sensors', 'labels', 'none', 'all'] = 'all', show_text: bool = True, brain_bounds: float = 1.0) -> None:
@@ -2446,7 +2511,7 @@ def brain_3d_visualization(raw_haemo, df_cha, selected_event, t_or_theta: Litera
brain = plot_3d_evoked_array(raw_for_plot.pick(picks="hbo"), model_df, view="dorsal", distance=0.02, colorbar=True, clim=clim, mode="weighted", size=(800, 700)) # type: ignore
if show_optodes == 'all' or show_optodes == 'sensors':
brain.add_sensors(getattr(raw_for_plot, "info"), trans=Transform('head', 'mri', np.eye(4)), fnirs=["channels", "pairs", "sources", "detectors"], verbose=False) # type: ignore
brain.add_sensors(raw_for_plot.pick(picks="hbo").info, trans=Transform('head', 'mri', np.eye(4)), fnirs=["channels", "pairs", "sources", "detectors"], verbose=False) # type: ignore
if True:
display_text = ('Folder: ' + '\nGroup: ' + '\nCondition: '+ cond + '\nShort Channel Regression: '
+115 -192
View File
@@ -46,7 +46,7 @@ from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleVa
from PySide6.QtSvgWidgets import QSvgWidget # needed to show svgs when app is not frozen
CURRENT_VERSION = "1.4.0"
CURRENT_VERSION = "1.4.3"
APP_NAME = "flares"
API_URL = f"https://git.research.dezeeuw.ca/api/v1/repos/tyler/{APP_NAME}/releases"
API_URL_SECONDARY = f"https://git.research2.dezeeuw.ca/api/v1/repos/tyler/{APP_NAME}/releases"
@@ -664,6 +664,7 @@ class UpdateOptodesWindow(QWidget):
elif extension == '.xlsx':
# TODO: Bad! Why assume sheet1 has the contents?
df = pd.read_excel(file_b, sheet_name='Sheet1')
def _get_block_data(df, block_id, row_mapping, scale=0.001):
@@ -1412,43 +1413,43 @@ class UpdateEventsBlazesWindow(QWidget):
self.combo_snirf_events.setEnabled(False)
def browse_file_b(self):
file_path, _ = QFileDialog.getOpenFileName(self, "Select BLAZES File", "", "BLAZES project Files (*.blaze)")
file_path, _ = QFileDialog.getOpenFileName(self, "Select JSON Timeline File", "", "JSON Files (*.json)")
if file_path:
self.line_edit_file_b.setText(file_path)
try:
with open(file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
self.blazes_data = data
self.json_data = data
obs_keys = self.extract_blazes_observation_strings(data)
obs_keys = self.extract_json_observation_strings(data)
self.combo_events.clear()
if obs_keys:
self.combo_events.addItems(obs_keys)
self.combo_events.setEnabled(True)
else:
QMessageBox.information(self, "No Events", "No observation keys found in BLAZES file.")
QMessageBox.information(self, "No Events", "No events found in JSON file.")
self.combo_events.setEnabled(False)
except (json.JSONDecodeError, FileNotFoundError, KeyError, TypeError) as e:
QMessageBox.warning(self, "Error", f"Failed to parse BLAZES file:\n{e}")
QMessageBox.warning(self, "Error", f"Failed to parse JSON file:\n{e}")
self.combo_events.clear()
self.combo_events.setEnabled(False)
def extract_blazes_observation_strings(self, data):
if "obs" not in data:
raise KeyError("Missing 'obs' key in BLAZES file.")
def extract_json_observation_strings(self, data):
if "events" not in data:
raise KeyError("Missing 'events' key in JSON file.")
obs = data["obs"]
event_strings = []
for event_name, occurrences in obs.items():
# occurrences is a list of dicts: [{"start_frame": 642, "start_time_sec": 26.777, ...}]
for entry in occurrences:
onset = entry.get("start_time_sec", 0.0)
# The new format is a flat list chronologically ordered
for event in data["events"]:
track_name = event.get("track_name", "Unknown")
onset = event.get("start_sec", 0.0)
# Formatting to match your SNIRF style: "Event Name @ 0.000s"
display_str = f"{event_name} @ {onset:.3f}s"
display_str = f"{track_name} @ {onset:.3f}s"
event_strings.append(display_str)
return event_strings
@@ -1464,16 +1465,16 @@ class UpdateEventsBlazesWindow(QWidget):
file_b = self.line_edit_file_b.text()
suffix = APP_NAME
if not hasattr(self, "blazes_data") or self.combo_events.count() == 0 or self.combo_snirf_events.count() == 0:
QMessageBox.warning(self, "Missing data", "Please make sure a BLAZES and SNIRF event are selected.")
if not hasattr(self, "json_data") or self.combo_events.count() == 0 or self.combo_snirf_events.count() == 0:
QMessageBox.warning(self, "Missing data", "Please make sure a JSON and SNIRF event are selected.")
return
try:
blaze_text = self.combo_events.currentText()
_, blaze_time_str = blaze_text.split(" @ ")
blaze_anchor_time = float(blaze_time_str.replace("s", "").strip())
json_text = self.combo_events.currentText()
_, json_time_str = json_text.split(" @ ")
json_anchor_time = float(json_time_str.replace("s", "").strip())
except Exception as e:
QMessageBox.critical(self, "BLAZE Event Error", f"Could not parse BLAZE anchor:\n{e}")
QMessageBox.critical(self, "JSON Event Error", f"Could not parse JSON anchor:\n{e}")
return
try:
@@ -1484,20 +1485,20 @@ class UpdateEventsBlazesWindow(QWidget):
QMessageBox.critical(self, "SNIRF Event Error", f"Could not parse SNIRF anchor:\n{e}")
return
time_shift = snirf_anchor_time - blaze_anchor_time
time_shift = snirf_anchor_time - json_anchor_time
onsets, durations, descriptions = [], [], []
skipped_count = 0
try:
ai_data = self.blazes_data.get("ai_tracks", {})
events_list = self.json_data.get("events", [])
for track_name, events in ai_data.items():
for event in events_list:
track_name = event.get("track_name", "Unknown")
clean_name = track_name.replace("AI: ", "").strip()
for event in events:
original_start = event.get("start_time_sec", 0.0)
original_end = event.get("end_time_sec", original_start)
original_start = event.get("start_sec", 0.0)
original_end = event.get("end_sec", original_start)
duration = original_end - original_start
# FILTER: Minimum 0.1s duration
@@ -1537,7 +1538,7 @@ class UpdateEventsBlazesWindow(QWidget):
description=descriptions
)
# Replace existing annotations with the new aligned AI tracks
# Replace existing annotations with the new aligned JSON tracks
raw.set_annotations(new_annotations)
write_raw_snirf(raw, save_path)
@@ -1547,75 +1548,6 @@ class UpdateEventsBlazesWindow(QWidget):
QMessageBox.critical(self, "Error", f"Failed to update SNIRF file:\n{e}")
def update_optode_positions(self, file_a, file_b, save_path):
fiducials = {}
ch_positions = {}
# Read the lines from the optode file
with open(file_b, 'r') as f:
for line in f:
if line.strip():
# Split by the semicolon and convert to meters
ch_name, coords_str = line.split(":")
coords = np.array(list(map(float, coords_str.strip().split()))) * 0.001
# The key we have is a fiducial
if ch_name.lower() in ['lpa', 'nz', 'rpa']:
fiducials[ch_name.lower()] = coords
# The key we have is a source or detector
else:
ch_positions[ch_name.upper()] = coords
# Create montage with updated coords in head space
initial_montage = make_dig_montage(ch_pos=ch_positions, nasion=fiducials.get('nz'), lpa=fiducials.get('lpa'), rpa=fiducials.get('rpa'), coord_frame='head') # type: ignore
# Read the SNIRF file, set the montage, and write it back
# TODO: Bad! read_raw_snirf doesnt release memory properly! Should be spawned in a seperate process and killed once completed
raw = read_raw_snirf(file_a, preload=True)
raw.set_montage(initial_montage)
write_raw_snirf(raw, save_path)
def _apply_events_to_snirf(self, raw, new_annotations, save_path):
raw.set_annotations(new_annotations)
write_raw_snirf(raw, save_path)
def _write_event_mapping_json(
self,
file_a,
file_b,
selected_obs,
snirf_anchor,
boris_anchor,
time_shift,
mapped_events,
save_path
):
payload = {
"source": {
"called_from": self.caller,
"snirf_file": os.path.basename(file_a),
"boris_file": os.path.basename(file_b),
"observation": selected_obs
},
"alignment": {
"snirf_anchor": snirf_anchor,
"boris_anchor": boris_anchor,
"time_shift_seconds": time_shift
},
"events": mapped_events,
"created_at": datetime.utcnow().isoformat() + "Z"
}
with open(save_path, "w", encoding="utf-8") as f:
json.dump(payload, f, indent=2)
return save_path
class ProgressBubble(QWidget):
"""
@@ -1685,13 +1617,14 @@ class ProgressBubble(QWidget):
# Transition to a green checkmark
self.setSuffixText(" <span style='color: green;'>✔</span>")
def update_progress(self, step_index):
def update_progress(self, step_index, active=True):
self.current_step = step_index
for i, rect in enumerate(self.rects):
if i < step_index:
rect.setStyleSheet("background-color: green; border: 1px solid gray;")
elif i == step_index:
rect.setStyleSheet("background-color: yellow; border: 1px solid gray;")
color = "yellow" if active else "white"
rect.setStyleSheet(f"background-color: {color}; border: 1px solid gray;")
else:
rect.setStyleSheet("background-color: white; border: 1px solid gray;")
@@ -1749,8 +1682,6 @@ class ParamSection(QWidget):
self.dependencies = []
self.selected_path = None
# Load the mne data in a seperate process
self.file_executor = concurrent.futures.ProcessPoolExecutor(max_workers=1)
# Title label
title_label = QLabel(section_data["title"])
@@ -3684,8 +3615,22 @@ class ExportDataAsCSVViewerWidget(FlaresBaseWidget):
int(s.split(" ")[0]) for s in self._get_checked_items(self.image_index_dropdown)
]
if not selected_file_paths or not selected_indexes:
QMessageBox.warning(self, "Selection Missing", "Please select at least one participant and one export type.")
return
# 2. ASK ONCE: Select Output Directory
output_dir = QFileDialog.getExistingDirectory(self, "Select Output Folder for CSV Exports")
if not output_dir:
print("Export cancelled: No folder selected.")
return
success_count = 0
# Pass the necessary arguments to each method
for file_path in selected_file_paths:
base_filename = os.path.splitext(os.path.basename(file_path))[0]
haemo_obj = self.haemo_dict.get(file_path)
if haemo_obj is None:
continue
@@ -3693,58 +3638,20 @@ class ExportDataAsCSVViewerWidget(FlaresBaseWidget):
cha = self.cha_dict.get(file_path)
for idx in selected_indexes:
if idx == 0:
try:
suggested_name = f"{file_path}.csv"
# Open save dialog
save_path, _ = QFileDialog.getSaveFileName(
self,
"Save CSV File As",
suggested_name,
"CSV Files (*.csv)"
)
if not save_path:
print("Save cancelled.")
return
if not save_path.lower().endswith(".csv"):
save_path += ".csv"
# Save the CSV here
if idx == 0:
save_path = os.path.join(output_dir, f"{base_filename}_exported.csv")
if cha is not None:
cha.to_csv(save_path)
QMessageBox.information(self, "Success", "CSV file has been saved.")
except Exception as e:
QMessageBox.critical(self, "Error", f"Failed to update CSV file:\n{e}")
success_count += 1
elif idx == 1:
try:
suggested_name = f"{file_path}_sparks.csv"
# Open save dialog
save_path, _ = QFileDialog.getSaveFileName(
self,
"Save CSV File As",
suggested_name,
"CSV Files (*.csv)"
)
if not save_path:
print("Save cancelled.")
return
if not save_path.lower().endswith(".csv"):
save_path += ".csv"
# Save the CSV here
# SPARKS Export
save_path = os.path.join(output_dir, f"{base_filename}_sparks.csv")
if haemo_obj is not None:
raw = haemo_obj
data, times = raw.get_data(return_times=True)
ann_col = np.full(times.shape, "", dtype=object)
if raw.annotations is not None and len(raw.annotations) > 0:
@@ -3758,25 +3665,30 @@ class ExportDataAsCSVViewerWidget(FlaresBaseWidget):
df = pd.DataFrame(data.T, columns=raw.ch_names)
df.insert(0, "annotation", ann_col)
df.insert(0, "time", times)
df.to_csv(save_path, index=False)
QMessageBox.information(self, "Success", "CSV file has been saved.")
win = UpdateEventsWindow(
parent=self,
mode=EventUpdateMode.WRITE_JSON,
caller="Video Alignment Tool"
)
win.show()
except Exception as e:
QMessageBox.critical(self, "Error", f"Failed to update CSV file:\n{e}")
success_count += 1
else:
print(f"No method defined for index {idx}")
except Exception as e:
print(f"Failed to export {file_path} (Type {idx}): {e}")
# 4. Final Notification
if success_count > 0:
QMessageBox.information(self, "Export Complete", f"Successfully saved {success_count} CSV files to:\n{output_dir}")
# # If SPARKS export was included, show the Event Window once at the end
# if 1 in selected_indexes:
# win = UpdateEventsWindow(
# parent=self,
# mode=EventUpdateMode.WRITE_JSON,
# caller="Video Alignment Tool"
# )
# win.show()
class ClickableLabel(QLabel):
def __init__(self, full_pixmap: QPixmap, thumbnail_pixmap: QPixmap):
super().__init__()
@@ -4206,8 +4118,15 @@ class GroupViewerWidget(FlaresBaseWidget):
print(f"Missing parameters for index {idx}, skipping.")
continue
flares.brain_3d_visualization(haemo_obj, all_cha, selected_event, t_or_theta=t_or_theta, show_optodes=show_optodes, show_text=show_text, brain_bounds=brain_bounds)
raw_list = [self.haemo_dict.get(fp) for fp in selected_file_paths]
if len(selected_file_paths) > 1:
print(f"Aggregating geometry for {len(selected_file_paths)} participants...")
processed_raw = flares.aggregate_fnirs_group_geometry(raw_list)
else:
processed_raw = raw_list[0].copy().pick(picks="hbo")
flares.brain_3d_visualization(processed_raw, all_cha, selected_event, t_or_theta=t_or_theta, show_optodes=show_optodes, show_text=show_text, brain_bounds=brain_bounds)
elif idx == 3:
pass
@@ -4385,6 +4304,11 @@ class GroupBrainViewerWidget(FlaresBaseWidget):
int(s.split(" ")[0]) for s in self._get_checked_items(self.image_index_dropdown)
]
all_selected_paths = list(set(file_paths_a + file_paths_b))
if not all_selected_paths:
print("No participants selected.")
return
parameterized_indexes = {
0: [
@@ -4465,14 +4389,12 @@ class GroupBrainViewerWidget(FlaresBaseWidget):
print("contrast_df_a empty?", contrast_df_a.empty)
print("contrast_df_b empty?", contrast_df_b.empty)
# Get one person for their layout
rep_raw = None
for fp in file_paths_a + file_paths_b:
rep_raw = self.haemo_dict.get(fp)
if rep_raw:
break
all_raw_objs = [self.haemo_dict.get(fp) for fp in all_selected_paths if self.haemo_dict.get(fp)]
print(rep_raw)
if len(all_raw_objs) > 1:
processed_raw = flares.aggregate_fnirs_group_geometry(all_raw_objs)
else:
processed_raw = all_raw_objs[0].copy().pick(picks="hbo")
# Visualizations
for idx in selected_indexes:
@@ -4488,12 +4410,12 @@ class GroupBrainViewerWidget(FlaresBaseWidget):
print(f"Missing parameters for index {idx}, skipping.")
continue
if not contrast_df_a.empty and not contrast_df_b.empty and rep_raw:
if not contrast_df_a.empty and not contrast_df_b.empty and processed_raw:
flares.plot_2d_3d_contrasts_between_groups(
contrast_df_a,
contrast_df_b,
raw_haemo=rep_raw,
raw_haemo=processed_raw,
group_a_name=self.group_a_dropdown.currentText(),
group_b_name=self.group_b_dropdown.currentText(),
is_3d=is_3d,
@@ -4616,6 +4538,9 @@ class MainApplication(QMainWindow):
self.setWindowTitle(f"{APP_NAME.upper()}")
self.setGeometry(100, 100, 1280, 720)
# Load the mne data in a seperate process
self.file_executor = concurrent.futures.ProcessPoolExecutor(max_workers=1)
self.about = None
self.help = None
self.optodes = None
@@ -4712,6 +4637,7 @@ class MainApplication(QMainWindow):
label = QLabel(key.capitalize())
right_column_layout.addWidget(label)
right_column_layout.addWidget(field)
field.textChanged.connect(self.sync_bubble_data)
label_desc = QLabel('<a href="#">Why are these useful?</a>')
label_desc.setTextInteractionFlags(Qt.TextInteractionFlag.TextBrowserInteraction)
@@ -5404,8 +5330,6 @@ class MainApplication(QMainWindow):
for rel_path, step in raw_progress.items()
}
self.show_files_as_bubbles_from_list(file_list, progress_states, filename)
for rel_path in data["file_list"]:
abs_path = str((project_dir / Path(rel_path)).resolve())
@@ -5424,14 +5348,7 @@ class MainApplication(QMainWindow):
# Scenario C: Empty default
self.file_metadata[abs_path] = {"AGE": "", "GENDER": "", "GROUP": ""}
if file_list:
self.current_file = file_list[0]
self.right_column_widget.show()
# Update Metadata fields (Age/Gender/Group) for the selected file
curr_meta = self.file_metadata.get(self.current_file, {"AGE": "", "GENDER": "", "GROUP": ""})
for key, field in self.meta_fields.items():
field.setText(curr_meta.get(key, ""))
self.show_files_as_bubbles_from_list(file_list, progress_states, filename)
if "current_ui_params" in data:
self.restore_sections_from_config(data["current_ui_params"])
@@ -5588,6 +5505,18 @@ class MainApplication(QMainWindow):
bubble.clicked.connect(self.on_bubble_clicked)
bubble.rightClicked.connect(self.on_bubble_right_clicked)
if hasattr(self, 'file_metadata') and file_path in self.file_metadata:
meta = self.file_metadata[file_path]
parts = []
for key in ["AGE", "GENDER", "GROUP"]:
value = meta.get(key, "").strip()
if value:
parts.append(f"{key}: {value}")
suffix = f"{', '.join(parts)}" if parts else ""
bubble.setSuffixText(suffix)
# Track it
self.bubble_widgets[file_path] = bubble
if file_path not in self.selected_paths:
@@ -5595,7 +5524,7 @@ class MainApplication(QMainWindow):
# Restore saved progress but keep loading state active
step = progress_states.get(file_path, 0)
bubble.update_progress(step)
bubble.update_progress(step, active=False)
# Add to layout
self.bubble_layout.addWidget(bubble, index, 1)
@@ -5756,21 +5685,15 @@ class MainApplication(QMainWindow):
if getattr(self, 'last_clicked_bubble', None) is bubble:
self.last_clicked_bubble = None
def eventFilter(self, watched, event):
if event.type() == QEvent.Type.MouseButtonPress:
widget = self.childAt(event.pos())
if isinstance(widget, ProgressBubble):
pass
else:
if self.last_clicked_bubble:
if not self.last_clicked_bubble.isAncestorOf(widget):
if self.current_file:
def sync_bubble_data(self):
"""Refreshes the bubble and saves data in real-time."""
if self.current_file and self.last_clicked_bubble:
# Save the current state of all fields
self.save_metadata(self.current_file)
# Grab the updated suffix and apply it immediately
suffix = self.get_suffix_from_meta_fields()
self.last_clicked_bubble.setSuffixText(suffix)
self.last_clicked_bubble = None
return super().eventFilter(watched, event)
def placeholder(self):
QMessageBox.information(self, "Placeholder", "This feature is not implemented yet.")