diff --git a/main.py b/main.py
index 7d3f2b7..a2367cf 100644
--- a/main.py
+++ b/main.py
@@ -33,11 +33,11 @@ from predictor import GeneralPredictor
from batch_processing import BatchProcessorDialog
import PySide6
-from PySide6.QtWidgets import (QApplication, QMainWindow, QWidget, QVBoxLayout, QGraphicsView, QGraphicsScene,
+from PySide6.QtWidgets import (QApplication, QMainWindow, QTabBar, QWidget, QVBoxLayout, QGraphicsView, QGraphicsScene,
QHBoxLayout, QSplitter, QLabel, QPushButton, QComboBox, QInputDialog,
- QFileDialog, QScrollArea, QMessageBox, QSlider, QTextEdit)
+ QFileDialog, QScrollArea, QMessageBox, QSlider, QTextEdit, QGroupBox, QGridLayout, QCheckBox, QTabWidget)
from PySide6.QtCore import Qt, QThread, Signal, QUrl, QRectF, QPointF, QRect, QSizeF
-from PySide6.QtGui import QPainter, QColor, QFont, QPen, QBrush, QAction, QKeySequence, QIcon, QTextOption
+from PySide6.QtGui import QPainter, QColor, QFont, QPen, QBrush, QAction, QKeySequence, QIcon, QTextOption, QImage, QPixmap
from PySide6.QtMultimedia import QMediaPlayer, QAudioOutput
from PySide6.QtMultimediaWidgets import QGraphicsVideoItem
@@ -54,7 +54,7 @@ PLATFORM_NAME = platform.system().lower()
def debug_print():
if VERBOSITY:
frame = inspect.currentframe().f_back
- qualname = frame.f_code.co_qualname
+ qualname = frame.f_code.co_filename
print(qualname)
@@ -86,6 +86,279 @@ TRACK_COLORS = BASE_COLORS + REMAINING_COLORS
+
+
+import os
+import json
+import cv2
+from PySide6.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QPushButton,
+ QFileDialog, QCheckBox, QComboBox, QLabel,
+ QGridLayout, QGroupBox, QStackedWidget, QInputDialog, QMessageBox)
+from PySide6.QtGui import QPixmap, QImage
+from PySide6.QtCore import Qt
+
+
+
+
+import os
+import json
+import cv2
+from PySide6.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QPushButton,
+ QFileDialog, QCheckBox, QComboBox, QLabel,
+ QGridLayout, QGroupBox, QStackedWidget, QInputDialog, QMessageBox)
+from PySide6.QtGui import QPixmap, QImage
+from PySide6.QtCore import Qt
+
+class OpenFileWindow(QWidget):
+ def __init__(self, parent=None):
+ super().__init__(parent, Qt.WindowType.Window)
+ self.setWindowTitle(f"Load Video - {APP_NAME.upper()}")
+ self.setMinimumWidth(650)
+
+ # State
+ self.video_path = None
+ self.obs_file = None
+ self.full_json_data = None
+ self.current_video_fps = 30.0
+ self.current_video_offset = 0.0
+
+ self.setup_ui()
+
+ def setup_ui(self):
+ self.setStyleSheet("""
+ QWidget { background-color: #1e1e1e; color: #ffffff; font-family: 'Segoe UI'; }
+ QGroupBox {
+ border: 1px solid #3d3d3d; border-radius: 8px; margin-top: 15px;
+ padding-top: 15px; font-weight: bold; color: #00aaff; text-transform: uppercase;
+ }
+ QLabel { color: #ffffff; font-weight: 500; }
+ QLabel:disabled { color: #444444; }
+ QLabel#Metadata { color: #00ffaa; font-family: 'Consolas'; font-size: 11px; }
+ QLabel#Preview { background-color: #000000; border: 2px solid #3d3d3d; }
+ QLabel#Warning { color: #ff5555; font-size: 11px; font-style: italic; font-weight: bold; }
+
+ QPushButton { background-color: #3d3d3d; border: 1px solid #555; padding: 6px; border-radius: 4px; }
+ QPushButton:hover { background-color: #00aaff; color: #000; }
+ QPushButton:disabled { color: #444; background-color: #252525; }
+
+ QComboBox { background-color: #2d2d2d; border: 1px solid #555; padding: 4px; border-radius: 4px; }
+ QComboBox:disabled { background-color: #222; color: #444; border: 1px solid #2a2a2a; }
+ """)
+
+ main_layout = QVBoxLayout(self)
+
+ # --- Section 1: Video ---
+ video_group = QGroupBox("Primary Video Source")
+ v_grid = QGridLayout(video_group)
+ self.btn_pick_video = QPushButton("Select Video")
+ self.lbl_video_path = QLabel("No video selected...")
+ self.lbl_video_metadata = QLabel("Metadata: N/A")
+ self.lbl_video_metadata.setObjectName("Metadata")
+ self.video_preview = QLabel("NO PREVIEW")
+ self.video_preview.setFixedSize(160, 90)
+ self.video_preview.setObjectName("Preview")
+
+ v_grid.addWidget(QLabel("Target Video:"), 0, 0)
+ v_grid.addWidget(self.btn_pick_video, 0, 1)
+ v_grid.addWidget(self.video_preview, 0, 2, 3, 1)
+ v_grid.addWidget(QLabel("Path:"), 1, 0)
+ v_grid.addWidget(self.lbl_video_path, 1, 1)
+ v_grid.addWidget(self.lbl_video_metadata, 2, 0, 1, 2)
+ main_layout.addWidget(video_group)
+
+ # --- Section 2: Analysis Modes ---
+ self.adv_group = QGroupBox("Behavioral Analysis Mode")
+ adv_layout = QVBoxLayout(self.adv_group)
+ self.combo_mode = QComboBox()
+ self.combo_mode.addItems(["BORIS Project (JSON)", "Trained ML Model (.pkl)", "Bypass / Manual"])
+ adv_layout.addWidget(self.combo_mode)
+
+ self.mode_stack = QStackedWidget()
+
+ # Mode 1: BORIS
+ self.page_boris = QWidget()
+ m1_grid = QGridLayout(self.page_boris)
+ self.btn_boris_file = QPushButton("Load .boris File")
+ self.lbl_session = QLabel("Session Key:")
+ self.combo_boris_keys = QComboBox()
+ self.lbl_slot = QLabel("Video Slot:")
+ self.combo_video_slot = QComboBox()
+
+ # Initial Disabled States
+ for w in [self.lbl_session, self.combo_boris_keys, self.lbl_slot, self.combo_video_slot]:
+ w.setEnabled(False)
+
+ m1_grid.addWidget(QLabel("BORIS File:"), 0, 0)
+ m1_grid.addWidget(self.btn_boris_file, 0, 1)
+ m1_grid.addWidget(self.lbl_session, 1, 0)
+ m1_grid.addWidget(self.combo_boris_keys, 1, 1)
+ m1_grid.addWidget(self.lbl_slot, 2, 0)
+ m1_grid.addWidget(self.combo_video_slot, 2, 1)
+
+ # Mode 2: PKL Model
+ self.page_pkl = QWidget()
+ m2_grid = QGridLayout(self.page_pkl)
+ self.btn_pkl_file = QPushButton("Load .pkl Model")
+ self.lbl_pkl_path = QLabel("No model selected...")
+ m2_grid.addWidget(QLabel("Model File:"), 0, 0)
+ m2_grid.addWidget(self.btn_pkl_file, 0, 1)
+ m2_grid.addWidget(QLabel("Selected:"), 1, 0)
+ m2_grid.addWidget(self.lbl_pkl_path, 1, 1)
+
+ # Mode 3: Bypass
+ self.page_bypass = QWidget()
+ m3_layout = QVBoxLayout(self.page_bypass)
+ self.lbl_bypass_info = QLabel("Bypass Mode: No behavioral data will be loaded.\nManual annotation mode enabled.")
+ self.lbl_bypass_info.setStyleSheet("color: #888; font-style: italic;")
+ m3_layout.addWidget(self.lbl_bypass_info)
+
+ self.mode_stack.addWidget(self.page_boris)
+ self.mode_stack.addWidget(self.page_pkl)
+ self.mode_stack.addWidget(self.page_bypass)
+ adv_layout.addWidget(self.mode_stack)
+ main_layout.addWidget(self.adv_group)
+
+ # --- Section 3: Inference ---
+ self.cfg_group = QGroupBox("Inference Settings")
+ c_grid = QGridLayout(self.cfg_group)
+ self.check_use_cache = QCheckBox("Auto-search pose cache (.npy)")
+ self.check_use_cache.setChecked(True)
+
+ self.lbl_model_prompt = QLabel("Pose Model:")
+ self.combo_inference_model = QComboBox()
+ self.combo_inference_model.addItems(["YOLO11n-Pose", "YOLO11m-Pose", "Mediapipe BlazePose"])
+
+ self.check_bypass_inference = QCheckBox("Bypass Pose Inference")
+ self.lbl_inf_warning = QLabel("⚠ WARNING: Nothing fancy. Raw video playback only.")
+ self.lbl_inf_warning.setObjectName("Warning")
+ self.lbl_inf_warning.setVisible(False)
+
+ c_grid.addWidget(self.check_use_cache, 0, 0, 1, 2)
+ c_grid.addWidget(self.lbl_model_prompt, 1, 0)
+ c_grid.addWidget(self.combo_inference_model, 1, 1)
+ c_grid.addWidget(self.check_bypass_inference, 2, 0)
+ c_grid.addWidget(self.lbl_inf_warning, 2, 1)
+ main_layout.addWidget(self.cfg_group)
+
+ # --- Bottom Buttons ---
+ btn_layout = QHBoxLayout()
+ self.btn_cancel = QPushButton("Cancel")
+ self.btn_confirm = QPushButton("Initialize BLAZE Engine")
+ self.btn_confirm.setStyleSheet("background-color: #00aaff; color: #1e1e1e; font-weight: bold;")
+ btn_layout.addWidget(self.btn_cancel)
+ btn_layout.addWidget(self.btn_confirm)
+ main_layout.addLayout(btn_layout)
+
+ # Connections
+ self.btn_pick_video.clicked.connect(self.handle_video_selection)
+ self.combo_mode.currentIndexChanged.connect(self.mode_stack.setCurrentIndex)
+ self.btn_boris_file.clicked.connect(self.handle_boris_load)
+ self.combo_boris_keys.currentIndexChanged.connect(self.handle_session_change)
+ self.combo_video_slot.currentIndexChanged.connect(self.handle_slot_change)
+ self.btn_pkl_file.clicked.connect(self.handle_pkl_selection)
+ self.check_bypass_inference.toggled.connect(self.handle_inference_toggle)
+ self.btn_cancel.clicked.connect(self.close)
+
+ def format_time(self, seconds):
+ h, m, s = int(seconds // 3600), int((seconds % 3600) // 60), int(seconds % 60)
+ return f"{h:02d}:{m:02d}:{s:02d}"
+
+ def handle_video_selection(self):
+ path, _ = QFileDialog.getOpenFileName(self, "Open Video", "", "Video Files (*.mp4 *.avi *.mkv)")
+ if path:
+ self.video_path = path
+ self.lbl_video_path.setText(os.path.basename(path))
+ cap = cv2.VideoCapture(path)
+ fps = cap.get(cv2.CAP_PROP_FPS) or 30.0
+ time_str = self.format_time(cap.get(cv2.CAP_PROP_FRAME_COUNT) / fps)
+ self.lbl_video_metadata.setText(f"RES: {int(cap.get(3))}x{int(cap.get(4))} | FPS: {fps:.2f} | LEN: {time_str}")
+ cap.release()
+ self.render_preview(path)
+ if self.full_json_data: self.attempt_auto_match()
+
+ def handle_boris_load(self):
+ path, _ = QFileDialog.getOpenFileName(self, "Select JSON", "", "JSON Files (*.json *.boris)")
+ if not path: return
+ self.obs_file = path
+ self.btn_boris_file.setText(os.path.basename(path))
+ try:
+ with open(path, 'r') as f:
+ self.full_json_data = json.load(f)
+ obs = self.full_json_data.get("observations", {})
+ self.combo_boris_keys.setEnabled(True)
+ self.lbl_session.setEnabled(True)
+ self.combo_boris_keys.clear()
+ self.combo_boris_keys.addItems(list(obs.keys()))
+ if self.video_path: self.attempt_auto_match()
+ except Exception as e:
+ QMessageBox.warning(self, "Parse Error", str(e))
+
+ def handle_session_change(self):
+ session_key = self.combo_boris_keys.currentText()
+ if not self.full_json_data or not session_key: return
+ session_data = self.full_json_data.get("observations", {}).get(session_key, {})
+ file_map = session_data.get("file", {})
+
+ self.combo_video_slot.blockSignals(True)
+ self.combo_video_slot.clear()
+ slots = list(file_map.keys())
+ self.combo_video_slot.addItems(slots)
+ self.combo_video_slot.setEnabled(True)
+ self.lbl_slot.setEnabled(True)
+ self.combo_video_slot.blockSignals(False)
+
+ if self.video_path:
+ video_filename = os.path.basename(self.video_path)
+ for i, slot in enumerate(slots):
+ if any(video_filename in f for f in file_map[slot]):
+ self.combo_video_slot.setCurrentIndex(i)
+ break
+
+ def handle_slot_change(self):
+ session_key = self.combo_boris_keys.currentText()
+ slot_key = self.combo_video_slot.currentText()
+ if not session_key or not slot_key: return
+ session_data = self.full_json_data.get("observations", {}).get(session_key, {})
+ val = session_data.get("media_info", {}).get("offset", {}).get(str(slot_key))
+ if val is not None:
+ self.current_video_offset = float(val)
+ txt = self.lbl_video_metadata.text().split(" | Offset:")[0]
+ self.lbl_video_metadata.setText(f"{txt} | Offset: {self.current_video_offset}s")
+
+ def attempt_auto_match(self):
+ obs = self.full_json_data.get("observations", {})
+ v_name = os.path.splitext(os.path.basename(self.video_path))[0]
+ v_parts = v_name.split('_')
+ v_fp = f"{v_parts[0]}_{v_parts[1]}_{v_parts[-1]}" if len(v_parts) >= 3 else v_name
+ for i, sk in enumerate(obs.keys()):
+ s_parts = sk.split('_')
+ if len(s_parts) == 3 and f"{s_parts[0]}_{s_parts[1]}_{s_parts[2]}".lower() == v_fp.lower():
+ self.combo_boris_keys.setCurrentIndex(i)
+ return
+
+ def handle_pkl_selection(self):
+ path, _ = QFileDialog.getOpenFileName(self, "Select Model", "", "Pickle Files (*.pkl)")
+ if path:
+ self.lbl_pkl_path.setText(os.path.basename(path))
+
+ def handle_inference_toggle(self, checked):
+ # Target the model label and checkbox explicitly for greying out
+ for w in [self.check_use_cache, self.combo_inference_model, self.lbl_model_prompt]:
+ w.setEnabled(not checked)
+ self.lbl_inf_warning.setVisible(checked)
+
+ def render_preview(self, path):
+ cap = cv2.VideoCapture(path)
+ ret, frame = cap.read()
+ if ret:
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ pixmap = QPixmap.fromImage(QImage(frame.data, frame.shape[1], frame.shape[0], frame.shape[1]*3, QImage.Format_RGB888))
+ self.video_preview.setPixmap(pixmap.scaled(self.video_preview.size(), Qt.KeepAspectRatio))
+ cap.release()
+
+
+
+
class AboutWindow(QWidget):
"""
Simple About window displaying basic application information.
@@ -1204,18 +1477,16 @@ class VideoView(QGraphicsView):
# ==========================================
# MAIN PREMIERE WINDOW
# ==========================================
+
class PremiereWindow(QMainWindow):
def __init__(self):
- debug_print()
super().__init__()
- self.setWindowTitle("Pose Analysis Timeline")
+ self.setWindowTitle(f"Pose Analysis Timeline - {APP_NAME}")
self.resize(1200, 900)
- self.about = None
- self.help = None
-
self.platform_suffix = "-" + PLATFORM_NAME
+ # Application-wide Updaters
self.updater = UpdateManager(
main_window=self,
api_url=API_URL,
@@ -1226,93 +1497,196 @@ class PremiereWindow(QMainWindow):
app_name=APP_NAME
)
- # self.setStyleSheet("background-color: #1e1e1e; color: #ffffff;")
self.setStyleSheet("""
- QMainWindow, QWidget#centralWidget {
- background-color: #1e1e1e;
- }
- QLabel, QStatusBar, QMenuBar {
- color: #ffffff;
- }
- /* Target the Timeline specifically */
- TimelineWidget {
- background-color: #1e1e1e;
- border: 1px solid #333333;
- }
- /* Button styling with Grey borders */
- QDialog, QMessageBox, QFileDialog {
- background-color: #2b2b2b;
- }
- QDialog QLabel, QMessageBox QLabel {
- color: #ffffff;
- }
- QPushButton {
- background-color: #2b2b2b;
- color: #ffffff;
- border: 1px solid #555555; /* Subtle Grey border */
- border-radius: 3px;
- padding: 4px;
- }
- QPushButton:hover {
- background-color: #3d3d3d;
- border-color: #888888; /* Brightens border on hover */
- }
- QPushButton:pressed {
- background-color: #111111;
- }
- QPushButton:disabled {
- border-color: #333333;
- color: #444444;
- }
- /* Splitter/Divider styling */
- QSplitter::handle {
- background-color: #333333; /* Dark grey dividers */
- }
- QSplitter::handle:horizontal {
- width: 2px;
- }
- QSplitter::handle:vertical {
- height: 2px;
- }
- /* ScrollArea styling to keep it dark */
- QScrollArea, QScrollArea > QWidget > QWidget {
- background-color: #1e1e1e;
- border: none;
- }
+ QMainWindow, QWidget#centralWidget { background-color: #1e1e1e; }
+ QLabel, QStatusBar, QMenuBar { color: #ffffff; }
+ QTabWidget::pane { border: 1px solid #333333; background: #1e1e1e; }
+ QTabBar::tab { background: #2b2b2b; color: #aaa; padding: 8px 15px; border: 1px solid #333; border-bottom: none; border-top-left-radius: 4px; border-top-right-radius: 4px; }
+ QTabBar::tab:selected { background: #3d3d3d; color: #fff; font-weight: bold; }
+ QTabBar::tab:hover { background: #444; }
""")
- self.predictor = GeneralPredictor()
+ # --- Tab System ---
+ self.tabs = QTabWidget()
+ self.tabs.setTabsClosable(True)
+ self.tabs.tabCloseRequested.connect(self.close_tab)
+ self.setCentralWidget(self.tabs)
- self.file_path = None
- self.obs_file = None
- self.selected_obs_subkey = None
- self.current_video_offset = 0.0
+ self.create_welcome_tab()
+ self.create_menu_bar()
- # Core Layout
- main_splitter = QSplitter(Qt.Vertical)
- top_splitter = QSplitter(Qt.Horizontal)
+ # Update checks
+ self.local_check_thread = LocalPendingUpdateCheckThread(CURRENT_VERSION, self.platform_suffix, PLATFORM_NAME, APP_NAME)
+ self.local_check_thread.pending_update_found.connect(self.updater.on_pending_update_found)
+ self.local_check_thread.no_pending_update.connect(self.updater.on_no_pending_update)
+ self.local_check_thread.start()
+
+ # Window instances
+ self.load_window = None
+ self.about = None
+ self.help = None
+
+ def create_welcome_tab(self):
+ welcome_widget = QWidget()
+ layout = QVBoxLayout(welcome_widget)
- # --- Top Left: Video Player ---
+ title = QLabel(f"Welcome to {APP_NAME}")
+ title.setStyleSheet("font-size: 24px; font-weight: bold; color: #00aaff;")
+ title.setAlignment(Qt.AlignCenter)
+
+ subtitle = QLabel("Click 'File' > 'Load Video...' to begin a new analysis session.")
+ subtitle.setStyleSheet("font-size: 14px; color: #aaaaaa;")
+ subtitle.setAlignment(Qt.AlignCenter)
+
+ layout.addStretch()
+ layout.addWidget(title)
+ layout.addWidget(subtitle)
+ layout.addStretch()
+
+ self.tabs.addTab(welcome_widget, "Welcome")
+ # Disable the close button on the welcome tab
+ self.tabs.tabBar().setTabButton(0, QTabBar.ButtonPosition.RightSide, None)
+
+ def create_menu_bar(self):
+ menu_bar = self.menuBar()
+ self.statusbar = self.statusBar()
+
+ def make_action(name, shortcut=None, slot=None, checkable=False, checked=False, icon=None):
+ action = QAction(name, self)
+ if shortcut: action.setShortcut(QKeySequence(shortcut))
+ if slot: action.triggered.connect(slot)
+ if checkable:
+ action.setCheckable(True)
+ action.setChecked(checked)
+ if icon: action.setIcon(QIcon(icon))
+ return action
+
+ # File Menu
+ file_menu = menu_bar.addMenu("File")
+ file_menu.addAction(make_action("Load Video...", "Ctrl+O", self.open_load_video_dialog))
+ file_menu.addSeparator()
+ file_menu.addAction(make_action("Exit", "Ctrl+Q", QApplication.instance().quit))
+
+ # Edit Menu (Routes to current tab)
+ edit_menu = menu_bar.addMenu("Edit")
+ edit_menu.addAction(make_action("Cut", "Ctrl+X", self.route_cut))
+ edit_menu.addAction(make_action("Copy", "Ctrl+C", self.route_copy))
+ edit_menu.addAction(make_action("Paste", "Ctrl+V", self.route_paste))
+
+ # View Menu
+ view_menu = menu_bar.addMenu("View")
+ toggle_sb = make_action("Toggle Status Bar", checkable=True, checked=True)
+ toggle_sb.toggled.connect(self.statusbar.setVisible)
+ view_menu.addAction(toggle_sb)
+
+ self.statusbar.showMessage("Ready")
+
+ # --- Tab & Loading Logic ---
+
+ def open_load_video_dialog(self):
+ if self.load_window is None or not self.load_window.isVisible():
+ self.load_window = OpenFileWindow(self)
+ # Connect the initialization button from OpenFileWindow to our tab creator
+ self.load_window.btn_confirm.clicked.connect(self.handle_new_video_session)
+ self.load_window.show()
+
+ def handle_new_video_session(self):
+ # Extract properties from the OpenFileWindow before closing it
+ video_path = self.load_window.video_path
+ obs_file = self.load_window.obs_file
+ offset = self.load_window.current_video_offset
+ # ... grab any other needed parameters (like selected ML model, etc.)
+
+ self.load_window.close()
+
+ # Create a new, independent tab
+ new_tab = VideoAnalysisTab(video_path, obs_file, offset)
+
+ # Add to TabWidget and switch to it
+ tab_name = os.path.basename(video_path)
+ index = self.tabs.addTab(new_tab, tab_name)
+ self.tabs.setCurrentIndex(index)
+
+ def close_tab(self, index):
+ # Prevent closing the Welcome tab if it's the only one left
+ if index == 0 and self.tabs.count() == 1:
+ return
+
+ widget = self.tabs.widget(index)
+ if widget:
+ # If the widget has cleanup routines (like stopping video), call them here
+ if hasattr(widget, 'cleanup'):
+ widget.cleanup()
+ widget.deleteLater()
+ self.tabs.removeTab(index)
+
+ # --- Routing Menu Actions to Current Tab ---
+
+ def get_current_tab(self):
+ return self.tabs.currentWidget()
+
+ def route_copy(self):
+ tab = self.get_current_tab()
+ if hasattr(tab, 'info_label'):
+ tab.info_label.copy()
+ self.statusbar.showMessage("Copied to clipboard")
+
+ def route_cut(self):
+ tab = self.get_current_tab()
+ if hasattr(tab, 'info_label'):
+ tab.info_label.cut()
+ self.statusbar.showMessage("Cut to clipboard")
+
+ def route_paste(self):
+ tab = self.get_current_tab()
+ if hasattr(tab, 'info_label'):
+ tab.info_label.paste()
+ self.statusbar.showMessage("Pasted from clipboard")
+
+
+
+from PySide6.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QSplitter,
+ QScrollArea, QPushButton, QComboBox, QLabel,
+ QSlider, QTextEdit, QMessageBox)
+from PySide6.QtCore import Qt, QSizeF, QUrl, Slot
+from PySide6.QtMultimedia import QMediaPlayer, QAudioOutput
+from PySide6.QtMultimediaWidgets import QGraphicsVideoItem
+
+class VideoAnalysisTab(QWidget):
+ def __init__(self, video_path, obs_file=None, offset=0.0, parent=None):
+ super().__init__(parent)
+
+ # State
+ self.file_path = video_path
+ self.obs_file = obs_file
+ self.current_video_offset = offset
+ self.predictor = GeneralPredictor()
+ self.data = None # Will be populated by worker
+
+ self.setup_ui()
+ #self.reprocess_current_video()
+
+ def setup_ui(self):
+ main_layout = QVBoxLayout(self)
+ main_layout.setContentsMargins(0, 0, 0, 0)
+
+ self.main_splitter = QSplitter(Qt.Vertical)
+ top_splitter = QSplitter(Qt.Horizontal)
+
+ # --- Video Area ---
video_container = QWidget()
video_layout = QVBoxLayout(video_container)
- video_layout.setContentsMargins(0, 0, 0, 0)
self.scene = QGraphicsScene()
- # Use our new subclass instead of standard QGraphicsView
self.view = VideoView(self.scene)
self.view.resized.connect(self.update_video_geometry)
- # Video item (NOT native)
self.video_item = QGraphicsVideoItem()
self.scene.addItem(self.video_item)
- # Overlay widget (normal QWidget)
+ # Overlay initialization
self.skeleton_overlay = SkeletonOverlay(self.view.viewport())
- self.skeleton_overlay.setAttribute(Qt.WA_TransparentForMouseEvents)
- self.skeleton_overlay.setAttribute(Qt.WA_TranslucentBackground)
- self.skeleton_overlay.show()
-
- # Media player
+
self.player = QMediaPlayer()
self.audio_output = QAudioOutput()
self.player.setAudioOutput(self.audio_output)
@@ -1320,12 +1694,12 @@ class PremiereWindow(QMainWindow):
video_layout.addWidget(self.view)
- # --- Control Bar Container (Vertical Stack) ---
+ # --- Controls Area ---
+
controls_container = QWidget()
stacked_controls = QVBoxLayout(controls_container)
stacked_controls.setSpacing(5) # Tight spacing between rows
-
- # --- ROW 1: ML & Training Controls ---
+
ml_row = QHBoxLayout()
ml_row.addStretch()
@@ -1337,17 +1711,17 @@ class PremiereWindow(QMainWindow):
ml_row.addWidget(QLabel("Target:"))
self.target_dropdown = QComboBox()
self.target_dropdown.addItems(["Mouthing", "Head Movement", "Kick (Left)", "Kick (Right)", "Reach (Left)", "Reach (Right)"])
- self.target_dropdown.currentTextChanged.connect(self.update_predictor_target)
+ #self.target_dropdown.currentTextChanged.connect(self.update_predictor_target)
ml_row.addWidget(self.target_dropdown)
self.btn_add_to_pool = QPushButton("Add to Pool")
- self.btn_add_to_pool.clicked.connect(self.add_current_to_ml_pool)
+ #self.btn_add_to_pool.clicked.connect(self.add_current_to_ml_pool)
self.btn_add_to_pool.setFixedWidth(120)
ml_row.addWidget(self.btn_add_to_pool)
self.btn_train_final = QPushButton("Train Global Model")
self.btn_train_final.setStyleSheet("background-color: #2e7d32; font-weight: bold;")
- self.btn_train_final.clicked.connect(self.run_final_training)
+ #self.btn_train_final.clicked.connect(self.run_final_training)
ml_row.addWidget(self.btn_train_final)
self.lbl_pool_status = QLabel("Pool: 0 Participants")
@@ -1358,11 +1732,11 @@ class PremiereWindow(QMainWindow):
self.btn_clear_pool = QPushButton("Clear Pool")
self.btn_clear_pool.setFixedWidth(100)
self.btn_clear_pool.setStyleSheet("color: #ff5555; border: 1px solid #ff5555;")
- self.btn_clear_pool.clicked.connect(self.clear_ml_pool)
+ #self.btn_clear_pool.clicked.connect(self.clear_ml_pool)
ml_row.addWidget(self.btn_clear_pool)
self.btn_extract_ai = QPushButton("Extract AI Data")
- self.btn_extract_ai.clicked.connect(self.extract_ai_to_json)
+ #self.btn_extract_ai.clicked.connect(self.extract_ai_to_json)
ml_row.addWidget(self.btn_extract_ai)
@@ -1419,196 +1793,89 @@ class PremiereWindow(QMainWindow):
# --- Add Rows to Stack ---
stacked_controls.addLayout(ml_row)
stacked_controls.addLayout(playback_row)
-
- # Add the whole stacked container to the main video layout
+
video_layout.addWidget(controls_container)
+
+ # --- Inspector & Timeline ---
+ self.info_label = QTextEdit()
+ self.info_label.setReadOnly(True)
+
+ self.timeline = TimelineWidget()
+ self.timeline.seek_requested.connect(self.seek_video)
+
+ top_splitter.addWidget(video_container)
+ top_splitter.addWidget(self.info_label)
+
+ self.main_splitter.addWidget(top_splitter)
+ self.main_splitter.addWidget(self.timeline)
+ main_layout.addWidget(self.main_splitter)
+
+ self.setup_transport() # Start with empty workspace until worker finishes
- # --- Button Connections ---
+ def setup_transport(self):
+ """Sets up player controls that don't depend on skeleton analysis."""
+ # Enable buttons immediately
+ for btn in [self.btn_play, self.btn_prev, self.btn_next, self.btn_start, self.btn_end]:
+ btn.setEnabled(True)
+
+ # Connections (Use disconnect first to avoid double-firing if re-called)
+ try: self.btn_play.clicked.disconnect()
+ except: pass
+
self.btn_play.clicked.connect(self.toggle_playback)
- # Use lambda to pass the target frame to your existing seek_video method
- self.btn_start.clicked.connect(lambda: self.seek_video(0))
- self.btn_end.clicked.connect(lambda: self.seek_video(self.data['total_frames'] - 1))
+ self.btn_start.clicked.connect(lambda: self.player.setPosition(0))
+ # Note: 'End' and 'Step' need FPS/Duration, handled in the methods themselves
+ self.btn_end.clicked.connect(lambda: self.player.setPosition(self.player.duration()))
self.btn_prev.clicked.connect(lambda: self.step_frame(-1))
self.btn_next.clicked.connect(lambda: self.step_frame(1))
- # --- Top Right: Media Info & Loader ---
- info_container = QWidget()
- info_layout = QVBoxLayout(info_container)
+ self.player.setSource(QUrl.fromLocalFile(self.file_path))
- # NEW: Wrap the info_label in a Scroll Area
- self.inspector_scroll = QScrollArea()
- self.inspector_scroll.setWidgetResizable(True)
- self.inspector_scroll.setStyleSheet("border: none; background-color: transparent;")
- # Create the label as the scroll area's content
- self.info_label = QTextEdit()
- self.info_label.setText("No video loaded.\nClick 'File' > 'Load Video' to begin.")
- self.info_label.setAlignment(Qt.AlignTop | Qt.AlignLeft)
- self.info_label.setWordWrapMode(QTextOption.WordWrap)
- self.info_label.setReadOnly(True)
-
- # self.info_label.setWordWrap(True) # Ensure long text wraps instead o
- # f stretching horizontally
- self.info_label.setStyleSheet("padding: 5px; font-family: 'Segoe UI', Arial; color: #ffffff;")
+ @Slot(dict)
+ def setup_workspace(self, analyzed_data):
+ """Only handles skeleton/analysis-specific data."""
+ self.data = analyzed_data
- self.inspector_scroll.setWidget(self.info_label)
+ # Update timeline and overlay now that we have data
+ if hasattr(self, 'timeline'):
+ self.timeline.set_data(self.data)
- # Add the scroll area to the layout instead of the naked label
- info_layout.addWidget(self.inspector_scroll)
-
- top_splitter.addWidget(video_container)
- top_splitter.addWidget(info_container)
- top_splitter.setSizes([800, 400])
-
- # --- Bottom: Timeline in a Scroll Area ---
- self.timeline = TimelineWidget()
- self.timeline.seek_requested.connect(self.seek_video)
- self.timeline.visibility_changed.connect(self.skeleton_overlay.set_hidden_tracks)
- self.timeline.track_selected.connect(self.on_track_selected)
+ if hasattr(self, 'skeleton_overlay'):
+ self.skeleton_overlay.set_data(self.data)
- scroll_area = QScrollArea()
- scroll_area.setWidgetResizable(True)
- scroll_area.setWidget(self.timeline)
+ # Trigger a geometry refresh now that we have accurate video dims from data
+ self.update_video_geometry()
+ print(f"Analysis complete for {self.file_path}")
+
+ def update_status(self, message):
+ """Updates the inspector or a status bar with worker progress."""
+ self.info_label.append(message)
+
+ def toggle_playback(self):
+ if self.player.playbackState() == QMediaPlayer.PlayingState:
+ self.player.pause()
+ self.btn_play.setText("Play")
+ else:
+ self.player.play()
+ self.btn_play.setText("Pause")
+
+ def seek_video(self, ms):
+ self.player.setPosition(ms)
- main_splitter.addWidget(top_splitter)
- main_splitter.addWidget(scroll_area)
- main_splitter.setSizes([500, 400])
+ def step_frame(self, delta):
+ # Fallback to 30 FPS if worker data isn't ready
+ fps = self.data["fps"] if (hasattr(self, 'data') and self.data) else 30.0
- self.setCentralWidget(main_splitter)
- self.player.positionChanged.connect(self.update_timeline_playhead)
- self.player.positionChanged.connect(self.update_inspector)
- self.create_menu_bar()
-
- self.local_check_thread = LocalPendingUpdateCheckThread(CURRENT_VERSION, self.platform_suffix, PLATFORM_NAME, APP_NAME)
- self.local_check_thread.pending_update_found.connect(self.updater.on_pending_update_found)
- self.local_check_thread.no_pending_update.connect(self.updater.on_no_pending_update)
- self.local_check_thread.start()
-
-
-
- def create_menu_bar(self):
- '''Menu Bar at the top of the screen'''
-
- menu_bar = self.menuBar()
- self.statusbar = self.statusBar()
-
- def make_action(name, shortcut=None, slot=None, checkable=False, checked=False, icon=None):
- action = QAction(name, self)
-
- if shortcut:
- action.setShortcut(QKeySequence(shortcut))
- if slot:
- action.triggered.connect(slot)
- if checkable:
- action.setCheckable(True)
- action.setChecked(checked)
- if icon:
- action.setIcon(QIcon(icon))
- return action
-
- # File menu and actions
- file_menu = menu_bar.addMenu("File")
- file_actions = [
- ("Load Video...", "Ctrl+O", self.load_video, resource_path("icons/file_open_24dp_1F1F1F.svg")),
- # ("Open Folder...", "Ctrl+Alt+O", self.not_implemented, resource_path("icons/folder_24dp_1F1F1F.svg")),
- # ("Open Folders...", "Ctrl+Shift+O", self.open_folder_dialog, resource_path("icons/folder_copy_24dp_1F1F1F.svg")),
- # ("Load Project...", "Ctrl+L", self.not_implemented, resource_path("icons/article_24dp_1F1F1F.svg")),
- # ("Save Project...", "Ctrl+S", self.not_implemented, resource_path("icons/save_24dp_1F1F1F.svg")),
- # ("Save Project As...", "Ctrl+Shift+S", self.not_implemented, resource_path("icons/save_as_24dp_1F1F1F.svg")),
- ]
-
- for i, (name, shortcut, slot, icon) in enumerate(file_actions):
- file_menu.addAction(make_action(name, shortcut, slot, icon=icon))
- if i == 1: # after the first 3 actions (0,1,2)
- file_menu.addSeparator()
-
- file_menu.addSeparator()
- file_menu.addAction(make_action("Exit", "Ctrl+Q", QApplication.instance().quit, icon=resource_path("icons/exit_to_app_24dp_1F1F1F.svg")))
-
- # Edit menu
- edit_menu = menu_bar.addMenu("Edit")
- edit_actions = [
- ("Cut", "Ctrl+X", self.cut_text, resource_path("icons/content_cut_24dp_1F1F1F.svg")),
- ("Copy", "Ctrl+C", self.copy_text, resource_path("icons/content_copy_24dp_1F1F1F.svg")),
- ("Paste", "Ctrl+V", self.paste_text, resource_path("icons/content_paste_24dp_1F1F1F.svg"))
- ]
- for name, shortcut, slot, icon in edit_actions:
- edit_menu.addAction(make_action(name, shortcut, slot, icon=icon))
-
- # View menu
- view_menu = menu_bar.addMenu("View")
- toggle_statusbar_action = make_action("Toggle Status Bar", checkable=True, checked=True, slot=None)
- view_menu.addAction(toggle_statusbar_action)
- toggle_statusbar_action.toggled.connect(self.statusbar.setVisible)
+ current_ms = self.player.position()
+ # One frame in ms = 1000 / fps
+ frame_ms = 1000.0 / fps
+ target_ms = int(current_ms + (delta * frame_ms))
+
+ # Ensure we don't seek past duration
+ target_ms = max(0, min(target_ms, self.player.duration()))
+ self.player.setPosition(target_ms)
- # Options menu (Help & About)
- options_menu = menu_bar.addMenu("Options")
-
- options_actions = [
- ("User Guide", "F1", self.user_guide, resource_path("icons/help_24dp_1F1F1F.svg")),
- ("Check for Updates", "F5", self.updater.manual_check_for_updates, resource_path("icons/update_24dp_1F1F1F.svg")),
- ("Batch YOLO processing...", "F6", self.open_batch_tool, resource_path("icons/upgrade_24dp_1F1F1F.svg")),
- ("About", "F12", self.about_window, resource_path("icons/info_24dp_1F1F1F.svg"))
- ]
-
- for i, (name, shortcut, slot, icon) in enumerate(options_actions):
- options_menu.addAction(make_action(name, shortcut, slot, icon=icon))
- if i == 1 or i == 3: # after the first 2 actions (0,1)
- options_menu.addSeparator()
-
- preferences_menu = menu_bar.addMenu("Preferences")
- preferences_actions = [
- ("Not Implemented", "", self.not_implemented, resource_path("icons/info_24dp_1F1F1F.svg")),
- ]
- for name, shortcut, slot, icon in preferences_actions:
- preferences_menu.addAction(make_action(name, shortcut, slot, icon=icon, checkable=True, checked=False))
-
- terminal_menu = menu_bar.addMenu("Terminal")
- terminal_actions = [
- ("Not Implemented", "", self.not_implemented, resource_path("icons/terminal_24dp_1F1F1F.svg")),
- ]
- for name, shortcut, slot, icon in terminal_actions:
- terminal_menu.addAction(make_action(name, shortcut, slot, icon=icon))
-
- self.statusbar.showMessage("Ready")
-
-
- def not_implemented(self):
- self.statusbar.showMessage("Not Implemented.") # Show status message
-
- def copy_text(self):
- self.info_label.copy() # Trigger copy
- self.statusbar.showMessage("Copied to clipboard") # Show status message
-
- def cut_text(self):
- self.info_label.cut() # Trigger cut
- self.statusbar.showMessage("Cut to clipboard") # Show status message
-
- def about_window(self):
- if self.about is None or not self.about.isVisible():
- self.about = AboutWindow(self)
- self.about.show()
-
- def user_guide(self):
- if self.help is None or not self.help.isVisible():
- self.help = UserGuideWindow(self)
- self.help.show()
-
- def paste_text(self):
- self.info_label.paste() # Trigger paste
- self.statusbar.showMessage("Pasted from clipboard") # Show status message
-
- def open_batch_tool(self):
- dialog = BatchProcessorDialog(self) # Pass 'self' to keep it centered
- dialog.exec()
-
- def toggle_mute(self):
- is_muted = self.btn_mute.isChecked()
- self.audio_output.setMuted(is_muted)
- self.btn_mute.setText("Mute" if is_muted else "Vol")
- # Optional: Dim the slider when muted
- self.sld_volume.setEnabled(not is_muted)
-
def update_volume(self, value):
# QAudioOutput expects a float between 0.0 and 1.0
volume = value / 100.0
@@ -1618,64 +1885,37 @@ class PremiereWindow(QMainWindow):
if self.btn_mute.isChecked() and value > 0:
self.btn_mute.setChecked(False)
self.toggle_mute()
-
- def clear_ml_pool(self):
- """Removes all participants from the training buffer."""
- debug_print()
- # Confirm with the user first to prevent accidental deletions
- reply = QMessageBox.question(self, 'Clear Pool?',
- f"This will remove all {len(self.predictor.raw_participant_buffer)} "
- "participants from the training memory. Continue?",
- QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
-
- if reply == QMessageBox.Yes:
- # 1. Clear the actual list in the predictor
- self.predictor.raw_participant_buffer = []
- # 2. Update the UI label
- self.lbl_pool_status.setText("Pool: 0 Participants")
-
- # 3. Optional: Visual feedback
- # self.statusBar().showMessage("ML Pool cleared.", 3000)
- print("DEBUG: ML Pool manually cleared.")
+ def toggle_mute(self):
+ is_muted = self.btn_mute.isChecked()
+ self.audio_output.setMuted(is_muted)
+ self.btn_mute.setText("Mute" if is_muted else "Vol")
+ # Optional: Dim the slider when muted
+ self.sld_volume.setEnabled(not is_muted)
-
- def update_predictor_target(self):
- debug_print()
- # This physically changes the string from "Mouthing" to "Head Movement"
- self.predictor.current_target = self.target_dropdown.currentText()
-
- print(f"Predictor is now targeting: {self.predictor.current_target}")
-
-
- def reprocess_current_video(self):
- """Restarts the analysis worker to pick up new models."""
- debug_print()
-
- # Start the worker (passing the predictor so it can run AI models)
- self.worker = PoseAnalyzerWorker(
- self.file_path,
- obs_info=self.selected_obs_subkey,
- predictor=self.predictor
- )
-
- self.worker.progress.connect(self.update_status)
- self.worker.finished_data.connect(self.setup_workspace)
- self.worker.start()
-
-
def update_video_geometry(self):
- debug_print()
- if not hasattr(self, "video_item") or not hasattr(self, "data"):
+ if not hasattr(self, "video_item"):
return
+ # 1. Get viewport dimensions
viewport_rect = self.view.viewport().rect()
v_w, v_h = viewport_rect.width(), viewport_rect.height()
- if v_w <= 0 or v_h <= 0: return
+ if v_w <= 0 or v_h <= 0:
+ return
- video_w, video_h = self.data['width'], self.data['height']
+ # 2. Get Video Dimensions (Fall back to native size if worker data is missing)
+ if hasattr(self, "data") and self.data:
+ video_w, video_h = self.data['width'], self.data['height']
+ else:
+ native_size = self.video_item.nativeSize()
+ video_w, video_h = native_size.width(), native_size.height()
+
+ # If the video hasn't loaded metadata yet, it will be -1 or 0
+ if video_w <= 0 or video_h <= 0:
+ return
+
+ # 3. Calculate Aspect Ratio Scaling
aspect = video_w / video_h
-
if v_w / v_h > aspect:
target_h = v_h
target_w = int(v_h * aspect)
@@ -1686,573 +1926,30 @@ class PremiereWindow(QMainWindow):
x_off = (v_w - target_w) / 2
y_off = (v_h - target_h) / 2
+ # 4. Apply transformations
self.scene.setSceneRect(0, 0, v_w, v_h)
self.video_item.setPos(x_off, y_off)
self.video_item.setSize(QSizeF(target_w, target_h))
- self.skeleton_overlay.setGeometry(int(x_off), int(y_off), target_w, target_h)
-
+
+ # Only update overlay if it exists and we have data
+ if hasattr(self, "skeleton_overlay"):
+ self.skeleton_overlay.setGeometry(int(x_off), int(y_off), target_w, target_h)
+
def resizeEvent(self, event):
- debug_print()
+ # debug_print()
super().resizeEvent(event)
self.update_video_geometry()
if hasattr(self, 'timeline'):
self.timeline.set_zoom(self.timeline.zoom_factor)
-
- # def eventFilter(self, source, event):
- # if source is self.video_widget and event.type() == QEvent.Resize:
- # self.skeleton_overlay.resize(event.size())
- # return super().eventFilter(source, event)
-
-
- def add_current_to_ml_pool(self):
- """Adds raw kinematic data and current OBS labels to the buffer."""
- debug_print()
- if not hasattr(self, 'data') or 'raw_kps' not in self.data:
- QMessageBox.warning(self, "No Data", "Load a video first.")
- return
-
- # 1. Grab everything the Worker produced
- payload = {
- "z_kps": self.data['z_kps'],
- "directions": self.data['directions'],
- "raw_kps": self.data['raw_kps']
- }
-
- all_labels = self.timeline.get_all_binary_labels(self.current_video_offset, self.data["fps"])
-
- # 3. Hand off to predictor
- msg = self.predictor.add_to_raw_buffer(payload, all_labels)
- self.lbl_pool_status.setText(f"Pool: {len(self.predictor.raw_participant_buffer)} Participants")
- print(f"DEBUG: Added to Predictor at {hex(id(self.predictor))}")
- print(f"DEBUG: Buffer size is now: {len(self.predictor.raw_participant_buffer)}")
- QMessageBox.information(self, "Success", msg)
-
-
- def run_final_training(self):
- """
- Triggers training
- """
- debug_print()
- # DEBUG: Check the buffer directly before the IF statement
- actual_buffer = self.predictor.raw_participant_buffer
- current_count = len(actual_buffer)
-
- if current_count < 1:
- # If this triggers, let's see WHY it's empty
- QMessageBox.warning(self, "Empty Pool",
- f"Buffer is empty (Size: {current_count}).\n"
- f"Predictor ID: {hex(id(self.predictor))}")
- return
-
- model_type = self.ml_dropdown.currentText()
- current_target = self.target_dropdown.currentText()
-
- reply = QMessageBox.question(self, 'Confirm Training',
- f"Train {model_type} for '{current_target}' using "
- f"{current_count} participants?",
- QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
-
- if reply == QMessageBox.Yes:
- self.btn_train_final.setEnabled(False)
- self.btn_train_final.setText(f"Training...")
+ def cleanup(self):
+ if self.player:
+ self.player.stop()
+ if hasattr(self, 'worker') and self.worker.isRunning():
+ self.worker.terminate()
- try:
- # Force the target update right before training
- self.predictor.current_target = current_target
- report_html = self.predictor.calculate_and_train(model_type, current_target)
-
- self.reprocess_current_video()
-
- self.info_label.setText(report_html)
- msg = QMessageBox(self)
- msg.setWindowTitle("Results")
- msg.setTextFormat(Qt.RichText)
- msg.setText(report_html)
- msg.exec()
-
-
-
- except Exception as e:
- traceback.print_exc()
- QMessageBox.critical(self, "Error", f"{str(e)}")
- finally:
- self.btn_train_final.setEnabled(True)
- self.btn_train_final.setText("Train Global Model")
-
-
-
- # def import_json_observations(self):
- # debug_print()
- # file_path, _ = QFileDialog.getOpenFileName(self, "Select JSON Observations", "", "JSON Files (*.json)")
- # if not file_path: return
-
- # with open(file_path, 'r') as f:
- # full_data = json.load(f)
-
- # # Get the subkeys under "observations"
- # subkeys = list(full_data.get("observations", {}).keys())
-
- # if not subkeys:
- # print("No observations found in JSON.")
- # return
-
- # item, ok = QInputDialog.getItem(self, "Select Session", "Pick an observation set:", subkeys, 0, False)
-
- # if ok and item:
- # new_obs_data = self.load_external_observations(file_path, item)
- # self.append_new_tracks(new_obs_data)
-
- def append_new_tracks(self, new_obs_data):
- debug_print()
- # 1. Update global TRACK_NAMES and TRACK_COLORS
- for name in new_obs_data.keys():
- if name not in TRACK_NAMES:
- TRACK_NAMES.append(name)
- # Assign a distinct color (e.g., a dark purple/magenta for observations)
- TRACK_COLORS.append("#AA00FF")
-
- # 2. Merge into existing data dictionary
- self.data["events"].update(new_obs_data)
-
- # 3. Refresh Timeline
- global NUM_TRACKS
- NUM_TRACKS = len(TRACK_NAMES)
- self.timeline.set_data(self.data)
- self.timeline.update_geometry()
-
- # def load_external_observations(self, file_path, subkey):
- # debug_print()
- # with open(file_path, 'r') as f:
- # data = json.load(f)
-
- # raw_events = data["observations"][subkey]["events"]
- # # We only care about: [time_seconds (0), _, label (2), _, _, _]
-
- # new_tracks = {}
-
- # # Sort events by time just in case the JSON is unsorted
- # raw_events.sort(key=lambda x: x[0])
-
- # # Group timestamps by their label (e.g., "Kick", "Baseline")
- # temp_storage = {}
- # for event in raw_events:
- # time_sec = event[0]
- # label = event[2]
- # frame = int(time_sec * self.data["fps"])
-
- # if label not in temp_storage:
- # temp_storage[label] = []
- # temp_storage[label].append(frame)
-
- # # Convert pairs of frames into (start, end) blocks
- # for label, frames in temp_storage.items():
- # processed_blocks = []
- # # Step through frames in pairs (start, end)
- # for i in range(0, len(frames) - 1, 2):
- # start = frames[i]
- # end = frames[i+1]
- # # Format: (start, end, severity, direction)
- # processed_blocks.append((start, end, "External", "Manual Obs"))
-
- # new_tracks[f"OBS: {label}"] = processed_blocks
-
- # return new_tracks
-
-
- def load_video(self):
- debug_print()
- self.file_path, _ = QFileDialog.getOpenFileName(self, "Open Video", "", "Video Files (*.mp4 *.avi *.mkv)")
- if not self.file_path: return
-
- cap = cv2.VideoCapture(self.file_path)
- if cap.isOpened():
- self.current_video_fps = cap.get(cv2.CAP_PROP_FPS) or 30.0
- #total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
- # Optional: Initialize timeline with blank data so it can at least draw the ruler
- #self.timeline.data = {"total_frames": total_frames, "fps": self.current_video_fps, "events": {}}
- cap.release()
- else:
- self.current_video_fps = 30.0 # Fallback
-
- # --- NEW: JSON Observation Prompt ---
- self.obs_file, _ = QFileDialog.getOpenFileName(self, "Select JSON Observations (Optional)", "", "JSON Files (*.json)")
-
-
- if self.obs_file:
- try:
- with open(self.obs_file, 'r') as f:
- full_json = json.load(f)
-
- observations = full_json.get("observations", {})
- subkeys = list(observations.keys())
-
- # --- AUTO-MATCHING LOGIC ---
- # 1. Get the video filename without extension (e.g., 'T4_2T_WORD_F')
- video_name = os.path.splitext(os.path.basename(self.file_path))[0]
- v_parts = video_name.split('_')
-
- # Build the 'fingerprint' from the video (Blocks 1, 2, and the Last one)
- # This ignores the 'WORDHERE' block in the middle
- if len(v_parts) >= 3:
- video_fingerprint = f"{v_parts[0]}_{v_parts[1]}_{v_parts[-1]}"
- else:
- video_fingerprint = video_name # Fallback
-
- match = None
- for sk in subkeys:
- s_parts = sk.split('_')
- # Subkeys are shorter: Block 1, 2, and 3
- if len(s_parts) == 3:
- sk_fingerprint = f"{s_parts[0]}_{s_parts[1]}_{s_parts[2]}"
- if sk_fingerprint.lower() == video_fingerprint.lower():
- match = sk
- break
-
- # 2. Decision: Use match or prompt user
- if match:
- self.selected_obs_subkey = (self.obs_file, match)
- self.statusBar().showMessage(f"Auto-matched JSON session: {match}", 5000)
- elif subkeys:
- # No match found, only then show the popup
- item, ok = QInputDialog.getItem(self, "Select Session",
- f"Could not auto-match '{video_name}'.\nPick manually:",
- subkeys, 0, False)
- if ok and item:
- self.selected_obs_subkey = (self.obs_file, item)
-
- # --- NEW: Offset & File Matching Logic ---
- if self.selected_obs_subkey:
- _, session_key = self.selected_obs_subkey
- session_data = observations.get(session_key, {})
- file_map = session_data.get("file", {})
-
- video_filename = os.path.basename(self.file_path)
- found_index = None
-
- # 1. Attempt Auto-Match by filename
- for idx_str, file_list in file_map.items():
- # Check if our loaded video is in this list (e.g., "Videos\\T4_2T_WORD_F.mp4")
- if any(video_filename in path for path in file_list):
- found_index = idx_str
- print(f"DEBUG: Auto-matched video to Camera Index {idx_str}")
- break
-
- # 2. If Auto-Match fails, prompt user for Camera Index
- if not found_index:
- available_indices = [k for k, v in file_map.items() if v] # Only indices with files
- if available_indices:
- item, ok = QInputDialog.getItem(self, "Identify Camera",
- f"Could not find '{video_filename}' in JSON.\n"
- "Which camera index is this video?",
- available_indices, 0, False)
- if ok:
- found_index = item
-
- # 3. Retrieve and Print the Offset
- if found_index:
- offsets = session_data.get("media_info", {}).get("offset", {})
- search_key = str(found_index)
- # Note: offsets dict might use integers or strings as keys
- # We check both to be safe
- actual_offset = offsets.get(search_key)
-
- if actual_offset is not None:
- print(f"MATCHED OFFSET: {actual_offset:.4f}")
- # Store this if you need it for timeline syncing later
- self.current_video_offset = float(actual_offset)
- self.timeline.set_sync_params(
- offset_seconds=self.current_video_offset,
- fps=self.current_video_fps
- )
-
- print(f"Timeline synced with {actual_offset}s offset.")
- else:
- print(f"DEBUG: No offset found for index {found_index}")
-
- except Exception as e:
- QMessageBox.warning(self, "JSON Error", f"Could not parse JSON: {e}")
-
- # --- Cache Logic ---
- # cache_path = self.file_path.rsplit('.', 1)[0] + "_pose_cache.csv"
- # use_cache = None
- # if os.path.exists(cache_path):
- # reply = QMessageBox.question(self, 'Cache Found',
- # "Use existing pose cache?",
- # QMessageBox.Yes | QMessageBox.No)
- # use_cache = cache_path if reply == QMessageBox.Yes else None
-
-
- # Pass the observation info to the worker
- self.worker = PoseAnalyzerWorker(self.file_path, self.selected_obs_subkey, self.predictor)
- self.worker.progress.connect(self.update_status)
- self.worker.finished_data.connect(self.setup_workspace)
- self.worker.start()
-
- def update_status(self, msg):
- debug_print()
-
- self.info_label.setText(f"Status:\n{msg}")
-
- def setup_workspace(self, data):
- debug_print()
- self.data = data
- self.player.setSource(QUrl.fromLocalFile(data["video_path"]))
- self.player.play()
- self.player.pause()
- self.timeline.set_data(data)
- self.skeleton_overlay.set_data(data)
- self.update_video_geometry()
- for btn in self.transport_btns:
- btn.setEnabled(True)
- total_f = data['total_frames']
- fps = data['fps']
- tot_s = int(total_f / fps)
-
- # Display 0 / Total
- self.lbl_time_counter.setText(f"00:00 / {tot_s//60:02d}:{tot_s%60:02d}")
- self.lbl_frame_counter.setText(f"0 / {total_f-1}")
-
- # Sync widgets
- self.timeline.set_data(data)
- self.skeleton_overlay.set_data(data)
-
- # Force a seek to frame 0 to initialize the video buffer
- self.seek_video(0)
- self.btn_load.setEnabled(True)
-
- info_text = (
- f"File: {os.path.basename(data['video_path'])}\n"
- f"Resolution: {data['width']}x{data['height']}\n"
- f"FPS: {data['fps']:.2f}\n"
- f"Total Frames: {data['total_frames']}\n\n"
- f"Timeline Legend (Opacity):\n"
- f"255 Alpha = Large Deviation\n"
- f"160 Alpha = Moderate Deviation\n"
- f"80 Alpha = Small Deviation\n"
- f"Empty = Rest (Baseline)"
- )
- self.info_label.setText(info_text)
-
-
- def toggle_playback(self):
- debug_print()
-
- if not hasattr(self, 'data'): return
-
- # If we are at the end, jump to the start first
- fps = self.data["fps"]
- current_frame = int((self.player.position() / 1000.0) * fps + 0.5)
- if current_frame >= self.data["total_frames"] - 1:
- self.seek_video(0)
-
- if self.player.playbackState() == QMediaPlayer.PlayingState:
- self.player.pause()
- self.btn_play.setText("Play")
- else:
- self.player.play()
- self.btn_play.setText("Pause")
-
- def update_timeline_playhead(self, position_ms):
- debug_print()
- if hasattr(self, 'data') and self.data["fps"] > 0:
- fps = self.data["fps"]
- total_f = self.data["total_frames"]
-
- # Current frame calculation
- current_f = int((position_ms / 1000.0) * fps)
-
- # --- PREVENT BLACK FRAME AT END ---
- # If we are within 1 frame of the end, stop and lock to the last valid frame
- if current_f >= total_f - 1:
- if self.player.playbackState() == QMediaPlayer.PlayingState:
- self.player.pause()
- self.btn_play.setText("Play")
- current_f = total_f - 1
- # Seek slightly back from total duration to keep the image visible
- last_valid_ms = int(((total_f - 1) / fps) * 1000)
- self.player.setPosition(last_valid_ms)
-
- # Sync UI
- self.timeline.set_playhead(current_f)
- self.skeleton_overlay.set_frame(current_f)
- self.update_counters(current_f)
-
-
- def on_track_selected(self, track_name):
- debug_print()
-
- self.selected_track = track_name
-
- if not track_name:
- self.info_label.setText("No track selected.\nClick a data track to inspect.")
- self.info_label.setStyleSheet("color: #AAAAAA; font-family: 'Segoe UI'; font-size: 10pt;")
- else:
- self.info_label.setStyleSheet("color: #00FF00; font-family: 'Segoe UI'; font-size: 10pt;")
- self.update_inspector() # Refresh immediately on click
-
-
- def update_inspector(self):
- debug_print()
- if not hasattr(self, 'selected_track') or not self.selected_track or not self.data:
- return
-
- # 1. Temporal Logic
- current_f = int((self.player.position() / 1000.0) * self.data["fps"])
- current_f = max(0, min(current_f, self.data["total_frames"] - 1))
-
- is_ai = "AI:" in self.selected_track
- is_obs = "OBS:" in self.selected_track
-
- # 2. Status/Raw Logic
- if is_ai or is_obs:
- # Check Activity for Behavior Tracks
- events = self.data["events"].get(self.selected_track, [])
- is_active = any(start <= current_f <= end for start, end, *rest in events)
- active_color = "#ff5555" if is_active else "#888888"
-
- status_line = f"ACTIVE: {'YES' if is_active else 'NO'}"
- raw_line = "" # Do not display raw for AI/OBS
- else:
- # Kinematics Logic (No Active status)
- status_line = ""
- raw_info = "N/A"
- cache_path = self.file_path.rsplit('.', 1)[0] + "_pose_raw.csv"
- print(cache_path)
-
- if os.path.exists(cache_path):
- try:
-
- # Row 2 in CSV is Frame 0. pandas.read_csv uses Row 1 as header.
- # So Frame 0 is df.iloc[0].
- print(current_f)
- print(len(self.worker.pose_df))
- if current_f < len(self.worker.pose_df):
- row = self.worker.pose_df.iloc[current_f]
- print(self.selected_track)
- col_x, col_y, col_c = f"{self.selected_track}_x", f"{self.selected_track}_y", f"{self.selected_track}_conf"
- print(self.worker.pose_df.columns)
-
- if col_x in self.worker.pose_df.columns and col_y in self.worker.pose_df.columns:
- print("me")
- rx, ry = row[col_x], row[col_y]
- rc = row[col_c] if col_c in self.worker.pose_df.columns else 0.0
- raw_info = f"X: {rx:.2f} | Y: {ry:.2f} | Conf: {rc:.2f}"
- except Exception as e:
- print(f"Inspector CSV Error: {e}")
- raw_info = "Index Error"
-
- raw_line = f"RAW (CSV): {raw_info}"
-
- # 3. Construct Display
- display_text = (
- f"TRACK: {self.selected_track}
"
- f"FRAME: {current_f}
"
- f"{status_line}"
- f"{raw_line}"
- )
-
- # 4. Performance Report
- if is_ai:
- target_name = self.selected_track.replace("AI: ", "")
- pattern = f"ml_{target_name}_performance_*.txt"
- report_files = sorted(glob.glob(pattern))
-
- report_content = "No report found."
- if report_files:
- try:
- with open(report_files[-1], 'r') as f:
- report_content = f.read().replace('\n', '
')
- except: pass
-
- display_text += f"