diff --git a/README.md b/README.md index b52d5bf..ca137f6 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,44 @@ -# flares +FLARES (fNIRS Lightweight Analysis, Research, & Evaluation Suite) +================================================================= -fNIRS Lightweight Analysis, Research, & Evaluation Suite \ No newline at end of file +FLARES is a lightweight standalone application to extract meaningful data out of .snirf files. + +FLARES is free and open-source software that runs on Windows, MacOS, and Linux. Please read the information regarding each operating system below. + +Visit the official [FLARES web site](https://research.dezeeuw.ca/flares). + +[![Python web site](https://img.shields.io/badge/Made%20with-Python-1f425f.svg)](https://www.python.org) + +# For MacOS Users + +Due to the cost of an Apple Developer account, the application is not certified by Apple. Once the application is extracted and attempted to be launched for the first time you will get a popup stating: + +"Apple could not verify flares.app is free of malware that may harm your Mac or compromise your privacy.", with the options of "Done" or "Move to Trash". + +The solution around this is to use finder and navigate to the flares-darwin folder. Once the folder has been located, right click the folder and click the option "New Terminal at Folder". Once the terminal opens, run the following command (you can copy + paste): + +```xattr -dr com.apple.quarantine flares.app & pid1=$!; xattr -dr com.apple.quarantine flares_updater.app & pid2=$!; wait $pid1 $pid2; exit``` + +Once the command has been executed and the text "[Process completed]" appears, you may close the terminal window and attempt to open the application again. If you choose to unrestrict the app through Settings > Privacy & Security, the app may not be able to update correctly in the future. + +This only applies for the first time you attempt to run FLARES. Subsequent times, including after updates, will function correctly as-is. + +# For Windows Users + +Due to the cost of a code signing certificate, the application is not digitally signed. Once the application is extracted and attempted to be launched for the first time you will get a popup stating: + +"Windows protected your PC - Microsoft Defender SmartScreen prevented an unrecognized app from starting. Running this app might put your PC at risk.", with the options of" More info" or "Don't run". + +The solution around this is to click "More info" and then select "Run anyway". + +This only applies for the first time you attempt to run FLARES. Subsequent times, including after updates, will function correctly as-is. + +# For Linux Users + +There are no conditions for Linux users at this time. + +# Licence + +FLARES is distributed under the GPL-3.0 license. + +Copyright (C) 2025 Tyler de Zeeuw \ No newline at end of file diff --git a/fNIRS_module.py b/fNIRS_module.py new file mode 100644 index 0000000..c9ce53a --- /dev/null +++ b/fNIRS_module.py @@ -0,0 +1,4074 @@ +""" +Filename: fNIRS_module.py +Description: Core functionality for FLARES + +Author: Tyler de Zeeuw +License: GPL-3.0 +""" + +# Built-in imports +import os +import sys +import time +import logging +import platform +import warnings +import threading +from io import BytesIO +from copy import deepcopy +from pathlib import Path +from zipfile import ZipFile +from datetime import datetime +from itertools import compress +from multiprocessing import Queue +from typing import Any, Optional, cast, Literal, Iterator, Union + +# External library imports +import pywt # type: ignore +import qtpy # type: ignore +import xlrd # type: ignore +import psutil +import scooby # type: ignore +import requests +import pyvistaqt # type: ignore +import darkdetect # type: ignore +import numpy as np +import pandas as pd +from PIL import Image +import seaborn as sns +import neurokit2 as nk # type: ignore +from tqdm.auto import tqdm +from pandas import DataFrame +import matplotlib.pyplot as plt +from matplotlib.axes import Axes +from numpy.typing import NDArray +import vtkmodules.util.data_model +from numpy import floating, float64 +from matplotlib.lines import Line2D +import matplotlib.colors as mcolors +from scipy.stats import ttest_1samp # type: ignore +from matplotlib.figure import Figure +import statsmodels.formula.api as smf # type: ignore +import vtkmodules.util.execution_model +from nilearn.plotting import plot_design_matrix # type: ignore +from scipy.signal import welch, butter, filtfilt # type: ignore +from matplotlib.colors import LinearSegmentedColormap +from IPython.display import display, Markdown, clear_output # type: ignore +from statsmodels.tools.sm_exceptions import ConvergenceWarning # type: ignore +from concurrent.futures import ProcessPoolExecutor, as_completed +from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + +# External library imports for mne +import mne +from mne import EvokedArray, Info, read_source_spaces, stc_near_sensors # type: ignore +from mne.source_space import SourceSpaces +from mne.transforms import Transform # type: ignore +from mne.io import BaseRaw, read_raw_snirf # type: ignore +from mne.annotations import Annotations # type: ignore +from mne_nirs.visualisation import plot_glm_group_topo # type: ignore +from mne_nirs.channels import get_long_channels, get_short_channels, picks_pair_to_idx # type: ignore +from mne_nirs.experimental_design import make_first_level_design_matrix # type: ignore +from mne_nirs.statistics import run_glm, statsmodels_to_results # type: ignore +from mne_nirs.signal_enhancement import enhance_negative_correlation, short_channel_regression # type: ignore +from mne.preprocessing.nirs import beer_lambert_law, optical_density, temporal_derivative_distribution_repair, source_detector_distances, short_channels # type: ignore +from mne_nirs.io.fold import fold_channel_specificity # type: ignore +from mne_nirs.preprocessing import peak_power # type: ignore +from mne.viz import Brain +from mne_nirs.statistics._glm_level_first import RegressionResults # type: ignore +from mne.filter import filter_data # type: ignore + +CURRENT_VERSION = "1.0.0" + +GUI = False +PLATFORM_NAME = platform.system().lower() + +BASE_SNIRF_FOLDER: str +SNIRF_SUBFOLDERS: list[str] +STIM_DURATION: list[float] +MAX_WORKERS: int + +SECONDS_TO_STRIP: int +DOWNSAMPLE: bool +DOWNSAMPLE_FREQUENCY: int +FORCE_DROP_CHANNELS: list[str] +SOURCE_DETECTOR_SEPARATOR: str + +OPTODE_FILE: bool +OPTODE_FILE_PATH: str +OPTODE_FILE_SEPARATOR: str + +TDDR: bool + +WAVELET: bool +IQR: float + +HEART_RATE: bool +SECONDS_TO_STRIP_HR: int +MAX_LOW_HR: int +MAX_HIGH_HR: int +SMOOTHING_WINDOW_HR: int +HEART_RATE_WINDOW: int +SHORT_CHANNEL: bool +SHORT_CHANNEL_THRESH: float + +SCI: bool +SCI_TIME_WINDOW: int +SCI_THRESHOLD: float +PSP: bool +PSP_TIME_WINDOW: int +PSP_THRESHOLD: float + +# TODO: Implement +SNR: bool +SNR_TIME_WINDOW : int +SNR_THRESHOLD: float + +EXCLUDE_CHANNELS: bool +MAX_BAD_CHANNELS: int +LONG_CHANNEL_THRESH: float + +PPF: float + +DRIFT_MODEL: str +DURATION_BETWEEN_ACTIVITIES: int +HRF_MODEL: str +SHORT_CHANNEL_REGRESSION: bool + +N_JOBS: int + +TARGET_ACTIVITY: str +TARGET_CONTROL: str +ROI_GROUP_1: list[list[int]] +ROI_GROUP_2: list[list[int]] +ROI_GROUP_1_NAME: str +ROI_GROUP_2_NAME: str +P_THRESHOLD: float + +SEE_BAD_IMAGES: bool +ABS_T_VALUE: int +ABS_THETA_VALUE: int +ABS_CONTRAST_T_VALUE: int +ABS_CONTRAST_THETA_VALUE: int +ABS_SIGNIFICANCE_T_VALUE: int +ABS_SIGNIFICANCE_THETA_VALUE: int +BRAIN_DISTANCE: float +BRAIN_MODE: str + +EPOCH_REJECT_CRITERIA_THRESH: float +TIME_MIN_THRESH: int +TIME_MAX_THRESH: int + +VERBOSITY: bool + +REJECT_PAIRS = None +FORCE_DROP_ANNOTATIONS = None +FILTER_LOW_PASS = None +FILTER_HIGH_PASS = None +EPOCH_PAIR_TOLERANCE_WINDOW = None + +# FIXME: Shouldn't need each ordering - just order it before checking +FIXED_CATEGORY_COLORS = { + "SCI only": "skyblue", + "PSP only": "salmon", + "SNR only": "lightgreen", + "PSP + SCI": "orange", + "SCI + SNR": "violet", + "PSP + SNR": "gold", + "SCI + PSP": "orange", + "SNR + SCI": "violet", + "SNR + PSP": "gold", + "PSP + SNR + SCI": "gray", + "SCI + PSP + SNR": "gray", + "SCI + SNR + PSP": "gray", + "PSP + SCI + SNR": "gray", + "PSP + SNR + SCI": "gray", + "SNR + SCI + PSP": "gray", + "SNR + PSP + SCI": "gray", +} + + +REQUIRED_KEYS: dict[str, Any] = { + "BASE_SNIRF_FOLDER": str, + "SNIRF_SUBFOLDERS": list, + "STIM_DURATION": list, + "MAX_WORKERS": int, + "SECONDS_TO_STRIP": int, + "DOWNSAMPLE": bool, + "DOWNSAMPLE_FREQUENCY": int, + "FORCE_DROP_CHANNELS": list, + "SOURCE_DETECTOR_SEPARATOR": str, + "OPTODE_FILE": bool, + "OPTODE_FILE_PATH": str, + "OPTODE_FILE_SEPARATOR": str, + "TDDR": bool, + "WAVELET": bool, + "IQR": float, + "HEART_RATE": bool, + "SECONDS_TO_STRIP_HR": int, + "MAX_LOW_HR": int, + "MAX_HIGH_HR": int, + "SMOOTHING_WINDOW_HR": int, + "HEART_RATE_WINDOW": int, + "SHORT_CHANNEL": bool, + "SHORT_CHANNEL_THRESH": float, + "SCI": bool, + "SCI_TIME_WINDOW": int, + "SCI_THRESHOLD": float, + "PSP": bool, + "PSP_TIME_WINDOW": int, + "PSP_THRESHOLD": float, + "SNR": bool, + "SNR_TIME_WINDOW": int, + "SNR_THRESHOLD": float, + "EXCLUDE_CHANNELS": bool, + "MAX_BAD_CHANNELS": int, + "LONG_CHANNEL_THRESH": float, + "PPF": float, + "DRIFT_MODEL": str, + "DURATION_BETWEEN_ACTIVITIES": int, + "HRF_MODEL": str, + "SHORT_CHANNEL_REGRESSION": bool, + "N_JOBS": int, + "TARGET_ACTIVITY": str, + "TARGET_CONTROL": str, + "ROI_GROUP_1": list, + "ROI_GROUP_2": list, + "ROI_GROUP_1_NAME": str, + "ROI_GROUP_2_NAME": str, + "P_THRESHOLD": float, + "SEE_BAD_IMAGES": bool, + "ABS_T_VALUE": int, + "ABS_THETA_VALUE": int, + "ABS_CONTRAST_T_VALUE": int, + "ABS_CONTRAST_THETA_VALUE": int, + "ABS_SIGNIFICANCE_T_VALUE": int, + "ABS_SIGNIFICANCE_THETA_VALUE": int, + "BRAIN_DISTANCE": float, + "BRAIN_MODE": str, + "EPOCH_REJECT_CRITERIA_THRESH": float, + "TIME_MIN_THRESH": int, + "TIME_MAX_THRESH": int, + "VERBOSITY": bool, + + # "REJECT_PAIRS": bool, + # "FORCE_DROP_ANNOTATIONS": list, + # "FILTER_LOW_PASS": float, + # "FILTER_HIGH_PASS": float, + # "EPOCH_PAIR_TOLERANCE_WINDOW": int, +} + + +# Ensure that we are working in the directory of this file +script_dir = os.path.dirname(os.path.abspath(__file__)) +os.chdir(script_dir) + + +# Configure logging to file with timestamps and realtime flush +if PLATFORM_NAME == 'darwin': + logging.basicConfig( + filename=os.path.join(os.path.dirname(sys.executable), "../../../fnirs_analysis.log"), + level=logging.INFO, + format='%(asctime)s - %(processName)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + filemode='a' + ) + +else: + logging.basicConfig( + filename='fnirs_analysis.log', + level=logging.INFO, + format='%(asctime)s - %(processName)s - %(levelname)s - %(message)s', + datefmt='%Y-%m-%d %H:%M:%S', + filemode='a' + ) + +logger = logging.getLogger() + + + +class ProcessingError(Exception): + def __init__(self, message: str = "Something went wrong!"): + self.message = message + super().__init__(self.message) + + +def gui_entry(config: dict[str, Any], gui_queue: Queue, progress_queue: Queue) -> None: + try: + print("setting config") + set_config(config, True) + + # Start a thread to forward progress messages back to GUI + def forward_progress(): + while True: + try: + msg = progress_queue.get(timeout=1) + if msg == "__done__": + break + gui_queue.put(msg) + except: + continue + + t = threading.Thread(target=forward_progress, daemon=True) + t.start() + + # Run the actual processing, with progress_queue passed down + print("actual call") + result = run_groups(config, True, progress_queue=progress_queue) + + # Signal end of progress + progress_queue.put("__done__") + t.join() + + gui_queue.put({"success": True, "result": result}) + + + except Exception as e: + import traceback + gui_queue.put({ + "success": False, + "error": str(e), + "traceback": traceback.format_exc() + }) + + +def set_config(config: dict[str, Any], gui: bool = False) -> None: + """ + Validates and applies the given configuration dictionary. + + Parameters + ---------- + config : dict[str, Any] + Dictionary containing configuration keys and their values. + """ + + if gui: + globals().update({"GUI": True}) + + # Ensure all keys are present + for key, expected_type in REQUIRED_KEYS.items(): + if key not in config: + raise KeyError(f"Missing config key: {key}") + + value = config[key] + if not isinstance(value, expected_type): + # Special handling for lists to check list contents + if expected_type == list and isinstance(value, list): + continue # optionally: validate inner types too + raise TypeError(f"Key '{key}' has incorrect type. Expected {expected_type.__name__}, got {type(value).__name__}") + + # Update the global variables to match the values in the config keys + globals().update(config) + + # Ensure that passed through variables are correct or that they actually exist + assert Path(BASE_SNIRF_FOLDER).is_dir(), "BASE_SNIRF_FOLDER was not found. Please check the folder location and try again." + + for folder in SNIRF_SUBFOLDERS: + assert Path(os.path.join(BASE_SNIRF_FOLDER, folder)).is_dir(), f"The subfolder {folder} could not be found. Please check the folder location and try again." + + assert len(SNIRF_SUBFOLDERS) == len(STIM_DURATION), f"The amount of subfolders do not match the amount of stim durations. Subfolders: {len(SNIRF_SUBFOLDERS)} Stim durations: {len(STIM_DURATION)}" + + if OPTODE_FILE: + path = Path(OPTODE_FILE_PATH) + assert path.is_file(), "OPTODE_FILE was specified, but OPTODE_FILE_PATH is not a file." + assert path.suffix == ".txt", "OPTODE_FILE_PATH does not end with a .txt extension." + + # Ensure that the BASE_SNIRF_FOLDER is an absolute path - helpful when logger.infoing later + if 'BASE_SNIRF_FOLDER' in globals(): + abs_path = str(Path(BASE_SNIRF_FOLDER).resolve()) + globals()['BASE_SNIRF_FOLDER'] = abs_path + + # Supress MNE's warnings + if not VERBOSITY: + warnings.filterwarnings("ignore", category=ConvergenceWarning) + warnings.filterwarnings("ignore", category=RuntimeWarning) + + logger.info("[Config] Configuration successfully set.") + + + +def run_groups(config, gui: bool = False, progress_queue=None) -> tuple[dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], dict[str, dict[str, BaseRaw]], dict[str, list[Figure]], dict[str, str], float]: + """ + Process multiple data folders and aggregate results, haemoglobin, figures, and processing details. + + Returns + ------- + tuple[dict[str, tuple[DataFrame, DataFrame, DataFrame]], dict[str, dict[str, BaseRaw]], dict[str, list[Figure]], dict[str, str]] + - dict[str, tuple[DataFrame, DataFrame, DataFrame]]: Results dataframes grouped by folder. + - dict[str, dict[str, BaseRaw]]: Raw haemoglobin data indexed by file ID. + - dict[str, list[Figure]]: Figures generated during processing grouped by step. + - dict[str, str]: Processing status messages indexed by file ID. + - float: Elapsed time + """ + + # Create dictionaries to store our results + all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]] = {} + all_figures: dict[str, list[Figure]] = {} + all_raw_haemo: dict[str, dict[str, BaseRaw]] = {} + all_processes: dict[str, str] = {} + + # Variables to store our total files to be processed and the remaining amount of files while the program is running + total_files = 0 + files_remaining = {'count': 0} + + start_time = time.time() + + # Iterate over all the folders and determine how many files are in the folder + logger.info("Calculating how many files there are...") + for folder in SNIRF_SUBFOLDERS: + full_path = os.path.join(BASE_SNIRF_FOLDER, folder) + num_items = len([ + f for f in os.listdir(full_path) + if os.path.isfile(os.path.join(full_path, f)) + ]) + total_files += num_items + logger.info(f"Total of {total_files} files.") + + # Set the remaining count to be the total amount of files + files_remaining['count'] = total_files + + # Iterate over all the folders + for folder, stim_duration in zip(SNIRF_SUBFOLDERS, STIM_DURATION): + full_path = os.path.join(BASE_SNIRF_FOLDER, folder) + try: + # Process all participants in the folder + logger.info(f"Processing all files in {folder}...") + raw_haemo, df_roi, df_cha, df_con, df_design_matrix, figures, process = process_folder(full_path, stim_duration, files_remaining, config, gui, progress_queue=progress_queue) + + # Store the results into the corresponding dictionaries + logger.info(f"Storing the results from the {folder} folder...") + + # TODO: This looks yucky + try: + all_results[folder] = (df_roi, df_cha, df_con, df_design_matrix) + logger.info(f"Applied all results.") + except: + pass + try: + for step, fig_list in figures.items(): + all_figures.setdefault(step, []).extend(fig_list) + logger.info(f"Applied all figures.") + except: + pass + try: + for file_id, raw in raw_haemo.items(): + all_raw_haemo[file_id] = raw + logger.info(f"Applied all haemo.") + except: + pass + try: + for file_id, p in process.items(): + all_processes[file_id] = p + logger.info(f"Applied all processes.") + except: + pass + + except ProcessingError as e: + logger.info(f"Something happened! {e}") + # Something really bad happened. No partial return + raise Exception(e) + + except Exception as e: + logger.info(f"Something happened! {e}") + # Still return a partial analysis even if something goes wrong + return all_results, all_raw_haemo, all_figures, all_processes, time.time() - start_time + + return all_results, all_raw_haemo, all_figures, all_processes, time.time() - start_time + + + +def create_image_montage(images: list[Image.Image], cols: int) -> Optional[Image.Image]: + """ + Creates a grid montage image from a list of PIL Images. + + Parameters + ---------- + images : list[Image.Image] + List of images to arrange in the montage. + cols : int + Number of columns in the montage grid. + + Returns + ------- + Optional[Image.Image] + The combined montage image, or None if the input list of images is empty. + """ + + # Verify that we have images to process + if not images: + return None + + # Calculate the width, height, and rows + logger.info("Calculating the montage parameters...") + widths, heights = zip(*(i.size for i in images)) + max_width = max(widths) + max_height = max(heights) + rows = (len(images) + cols - 1) // cols + + # Create the montage image + logger.info("Creating the montage...") + montage = Image.new('RGBA', (cols * max_width, rows * max_height), (255, 255, 255, 255)) + for idx, image in enumerate(images): + x = (idx % cols) * max_width + y = (idx // cols) * max_height + montage.paste(image, (x, y)) # type: ignore + + return montage + + + +def show_all_images(figures: dict[str, list[BytesIO]], inline: bool = False) -> None: + """ + Displays montages of figures either inline or in separate windows. + + Parameters + ---------- + figures : dict[str, list[Figure]] + Dictionary containing lists of figures categorized by type. + inline : bool, optional + If True, display images inline (e.g., in Jupyter notebooks). Otherwise, opens them in separate windows (default is False). + """ + + if inline: + logger.info("Inline was selected.") + else: + logger.info("Inline was not selected.") + + # If we have less than 4 figures, the columns should be the exact amount of images we have. If we have more, enforce 4 columns + logger.info("Calculating columns...") + if len(figures.get('Raw', [])) < 4: + cols = len(figures.get('Raw', [])) + else: + cols = 4 + + # Iterate over all of the types of figure, create a montage with figures of the same type, and display the resulting image + logger.info("Generating images...") + for _, fig_bytes_list in figures.items(): + pil_images = [] + for b in fig_bytes_list: + try: + img = Image.open(BytesIO(b)).convert("RGB") + pil_images.append(img) + except Exception as e: + logger.warning(f"Could not open image from bytes: {e}") + continue + + montage = create_image_montage(pil_images, cols) + + if montage: + # Determine how to display the images to the user + if inline: + display(montage) + else: + montage.show() + + + +def save_all_images(figures: dict[str, list[Figure]]) -> None: + """ + Saves montages of figures as timestamped PNG files in folder called 'images'. + + Parameters + ---------- + figures : dict[str, list[Figure]] + Dictionary containing lists of figures categorized by type. + """ + + # Get the current working directory and create a folder called images if it does not exist + logger.info("Getting the current directory...") + if PLATFORM_NAME == 'darwin': + images_folder = os.path.join(os.path.dirname(sys.executable), "../../../images") + else: + cwd = os.getcwd() + images_folder = os.path.join(cwd, "images") + + logger.info("Attempting to create the images folder...") + os.makedirs(images_folder, exist_ok=True) + + # Generate a timestamp to be appended to the end of the file name + logger.info("Generating the timestamp...") + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + # If we have less than 4 figures, the columns should be the exact value. If we have more, enforce 4 columns + logger.info("Calculating columns...") + raw_fig_count = len(figures.get('Raw', [])) + if raw_fig_count < 4: + cols = raw_fig_count + else: + cols = 4 + + # Iterate over all of the types of figures, create a montage with figures of the same type, and save the resulting image + logger.info("Generating images...") + for step, fig_bytes_list in figures.items(): + pil_images = [] + for b in fig_bytes_list: + try: + img = Image.open(BytesIO(b)).convert("RGB") + pil_images.append(img) + except Exception as e: + logger.warning(f"Could not open image from bytes: {e}") + continue + + montage = create_image_montage(pil_images, cols) + if montage: + filename = f"{step}_{timestamp}.png" + save_path = os.path.join(images_folder, filename) + montage.save(save_path) # type: ignore + logger.info(f"Saved image to {save_path}") + + logger.info(f"All images have been saved to '{images_folder}'.") + + + +def load_snirf(file_path: str, ID: str, drop_prefixes: list[str]) -> tuple[BaseRaw, Figure]: + """ + Loads a snirf file, optionally drops channels, downsamples, and creates a figure showing the results. + + Parameters + ---------- + file_path : str + Path of the snirf file to load. + ID : str + File name of the the snirf file that was loaded. + drop_prefixes : list[str] + List of channel name prefixes to drop from the data. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The processed data object. + - Figure: The corresponding Matplotlib figure. + """ + + logger.info(f"Loading the snirf file ({ID})...") + + # Read the snirf file + raw = read_raw_snirf(file_path, verbose=VERBOSITY) # type: ignore + raw.load_data(verbose=VERBOSITY) # type: ignore + + # Strip the specified amount of seconds from the start of the file + total_duration = getattr(raw, "times")[-1] + if total_duration > SECONDS_TO_STRIP: + raw.crop(tmin=SECONDS_TO_STRIP, tmax=total_duration, verbose=VERBOSITY) # type: ignore + logger.info(f"Stripped first {SECONDS_TO_STRIP} second(s) of data.") + else: + logger.info(f"Data length ({total_duration:.2f}s) less than strip duration; no cropping applied.") + + # If the user forcibly dropped channels, remove them now before any processing occurs + logger.info("Checking if there are channels to forcibly drop...") + if drop_prefixes: + logger.info("Force dropped channels was specified.") + channels_to_drop = [ch for ch in cast(list[str], getattr(raw, "ch_names")) if any(ch.startswith(prefix) for prefix in drop_prefixes)] + raw.drop_channels(channels_to_drop, "raise") # type: ignore + logger.info("Force dropped channels:", channels_to_drop) + + # If the user wants to downsample, do it right away + logger.info("Checking if we should downsample...") + if DOWNSAMPLE: + logger.info("Downsample was specified.") + sfreq_old = getattr(raw, "info")["sfreq"] + raw.resample(DOWNSAMPLE_FREQUENCY, verbose=VERBOSITY) # type: ignore + sfreq_new = getattr(raw, "info")["sfreq"] + logger.info(f"Finished downsampling. Old frequency: {sfreq_old}. New frequency: {sfreq_new}.") + + # Create a figure for the results + logger.info("Creating the figure...") + fig = cast(Figure, raw.plot(show=False, n_channels=len(getattr(raw, "ch_names")), duration=raw.times[-1]).figure) # type: ignore + fig.suptitle(f"Raw fNIRS Data for {ID}", fontsize=16) # type: ignore + fig.subplots_adjust(top=0.92) + plt.close(fig) + + logger.info("Successfully loaded the snirf file.") + + return raw, fig + + + +def calculate_and_apply_updated_optode_coordinates(data: BaseRaw) -> BaseRaw: + """ + Update optode coordinates on the given MNE Raw data using a specified optode file. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process with new optode coordinates. + + Returns + ------- + BaseRaw + The processed data object with the updated montage applied. + """ + + logger.info("Updating optode coordinates...") + + fiducials: dict[str, NDArray[floating[Any]]] = {} + ch_positions: dict[str, NDArray[floating[Any]]] = {} + + # Read the lines from the optode file + logger.info(f"Reading optode file from {OPTODE_FILE_PATH}") + with open(OPTODE_FILE_PATH, 'r') as f: + for line in f: + if line.strip(): + # Split by the semicolon and convert to meters + ch_name, coords_str = line.split(OPTODE_FILE_SEPARATOR) + coords = np.array(list(map(float, coords_str.strip().split()))) * 0.001 + logger.info(f"Read line: {ch_name} with coords (m): {coords}") + + # The key we have is a fiducial + if ch_name.lower() in ['lpa', 'nz', 'rpa']: + fiducials[ch_name.lower()] = coords + + # The key we have is a source or detector + else: + ch_positions[ch_name.upper()] = coords + + + # Create montage with updated coords in head space + logger.info("Creating and applying the montage...") + initial_montage = mne.channels.make_dig_montage(ch_pos=ch_positions, nasion=fiducials.get('nz'), lpa=fiducials.get('lpa'), rpa=fiducials.get('rpa'), coord_frame='head') # type: ignore + data.set_montage(initial_montage, verbose=VERBOSITY) # type: ignore + logger.info("Successfully updated optode coordinates.") + + return data + + + +def calculate_and_apply_tddr(data: BaseRaw, ID: str) -> tuple[BaseRaw, Figure]: + """ + Applies Temporal Derivative Distribution Repair (TDDR) to the raw data and creates a figure showing the results. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + ID : str + File name of the the snirf file that was loaded. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The processed data object. + - Figure: The corresponding Matplotlib figure. + """ + + # Apply TDDR + logger.info("Applying temporal derivative distribution repair...") + raw_with_tddr = cast(BaseRaw, temporal_derivative_distribution_repair(data, verbose=VERBOSITY)) + + # Create a figure for the results + logger.info("Creating the figure...") + fig = cast(Figure, raw_with_tddr.plot(show=False, n_channels=len(getattr(data, "ch_names")), duration=data.times[-1]).figure) # type: ignore + fig.suptitle(f"TDDR for {ID}", fontsize=16) # type: ignore + fig.subplots_adjust(top=0.92) + plt.close(fig) + + logger.info("Successfully applied temporal derivative distribution repair.") + return raw_with_tddr, fig + + + +def iqr_threshold(coeffs: NDArray[float64], k: float = 1.5) -> floating[Any]: + + """ + Calculate the interquartile range (IQR) threshold scaled by a factor, k. + + Parameters + ---------- + coeffs : NDArray[float64] + Array of coefficients to compute the IQR from. + k : float, optional + Scaling factor for the IQR (default is 1.5). + + Returns + ------- + floating[Any] + The scaled IQR threshold value. + """ + + # Calculate the IQR + q1 = np.percentile(coeffs, 25) + q3 = np.percentile(coeffs, 75) + iqr = q3 - q1 + + return k * iqr + + + +def wavelet_iqr_denoise(signal: NDArray[float64], wavelet: str = 'db4', level: int = 3) -> NDArray[float64]: + """ + Denoises a signal using wavelet decomposition and IQR-based thresholding on detail coefficients. + + Parameters + ---------- + signal : NDArray[float64] + The input signal array to denoise. + wavelet : str, optional + The type of wavelet to use for decomposition (default is 'db4'). + level : int, optional + Decomposition level for wavelet transform (default is 3). + + Returns + ------- + NDArray[float64] + The denoised signal array, with the same length as the input. + """ + + # Decompose the signal using wavelet transform and initialize a list with approximation coefficients + coeffs: list[NDArray[float64]] = pywt.wavedec(signal, wavelet, level=level) # type: ignore + cA = coeffs[0] + denoised_coeffs = [cA] + + # Threshold detail coefficients to reduce noise + for cD in coeffs[1:]: + threshold = iqr_threshold(cD, IQR) + cD_thresh = np.sign(cD) * np.maximum(np.abs(cD) - threshold, 0.0) # np.where((cD < lower) | (cD > upper), 0, cD) + cD_thresh = cD_thresh.astype(float64) + denoised_coeffs.append(cD_thresh) + + # Reconstruct the denoised signal + denoised_signal = cast(NDArray[float64], pywt.waverec(denoised_coeffs, wavelet)) # type: ignore + return denoised_signal[:len(signal)] + + + +def calculate_and_apply_wavelet(data: BaseRaw, ID: str) -> tuple[BaseRaw, Figure]: + """ + Applies a wavelet IQR denoising filter to the data and generates a plot. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + ID : str + File name of the the snirf file that was loaded. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The processed data object. + - Figure: The corresponding Matplotlib figure. + """ + + logger.info("Applying the wavelet filter...") + + # Denoise the data + logger.info("Denoising the data...") + loaded_data: NDArray[float64] = data.get_data(verbose=VERBOSITY) # type: ignore + denoised_data = np.zeros_like(loaded_data) + + logger.info("Calculating the IQR, decomposing the signal, and thresholding the coefficients...") + for ch in range(loaded_data.shape[0]): + denoised_data[ch, :] = wavelet_iqr_denoise(loaded_data[ch, :], wavelet='db4', level=3) + + # Reconstruct the data with the annotations + logger.info("Reconstructing the data with annotations...") + raw_with_tddr_and_wavelet = mne.io.RawArray(denoised_data, cast(mne.Info, data.info), verbose=VERBOSITY) + raw_with_tddr_and_wavelet.set_annotations(data.annotations.copy(), verbose=VERBOSITY) # type: ignore + + # Create a figure for the results + logger.info("Creating the figure...") + fig = cast(Figure, raw_with_tddr_and_wavelet.plot(show=False, n_channels=len(getattr(data, "ch_names")), duration=data.times[-1]).figure) # type: ignore + fig.suptitle(f"Wavelet for {ID}", fontsize=16) # type: ignore + fig.subplots_adjust(top=0.92) + plt.close(fig) + + logger.info("Successfully applied the wavelet filter.") + + return raw_with_tddr_and_wavelet, fig + + + +def short_channel_processing_for_hr(data: BaseRaw, short_chans: BaseRaw | None) -> tuple[float, NDArray[float64], NDArray[float64]]: + """ + Extract and trim short-channel fNIRS signal for heart rate analysis. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + short_chans : BaseRaw | None + Data object with only short separation channels, or None if unavailable. + + Returns + ------- + tuple[float, NDArray[float64], NDArray[float64]] + - float: Sampling frequency of the signal. + - NDArray[float64]: Trimmed short-channel signal. + - NDArray[float64]: Corresponding time values. + """ + + # Find the short channel (or best candidate) and extract signal data and sampling frequency + logger.info("Extracting the signal and calculating the sampling frequency...") + + # If a short channel exists, use it for our signal. Otherwise just take the first channel in the data + # TODO: Find a better way around this + if short_chans is not None: + signal = cast(NDArray[float64], short_chans.get_data(picks=[0], verbose=VERBOSITY))[0] # type: ignore + else: + signal = cast(NDArray[float64], data.get_data(picks=[0], verbose=VERBOSITY))[0] # type: ignore + + # Calculate the sampling frequency + sfreq = cast(int, data.info['sfreq']) + + # Trim start and end of the signal to remove edge artifacts + logger.info(f"Removing {SECONDS_TO_STRIP_HR} seconds from the beginning and end of the file...") + strip_samples = int(sfreq * SECONDS_TO_STRIP_HR) + signal_trimmed = signal[strip_samples:-strip_samples] + times_trimmed = cast(NDArray[float64], getattr(data, "times"))[strip_samples:-strip_samples] + + return sfreq, signal_trimmed, times_trimmed + + + +def calculate_heart_rate_neurokit(sfreq: float, signal_trimmed: NDArray[float64]) -> tuple[NDArray[float64], float]: + """ + Calculate and smooth heart rate from a trimmed signal using NeuroKit. + + Parameters + ---------- + sfreq : float + Sampling frequency of the signal. + signal_trimmed : NDArray[float64] + Preprocessed and trimmed fNIRS signal. + + Returns + ------- + tuple[NDArray[float64], float] + - NDArray[float64]: Smoothed heart rate time series (BPM). + - float: Mean heart rate. + """ + + logger.info("Calculating heart rate using NeuroKit...") + + # Filter signal to isolate heart rate frequencies and detect peaks + logger.info("Filtering the signal and detecting peaks...") + signal_filtered = cast(NDArray[float64], nk.signal_filter(signal_trimmed, sampling_rate=sfreq, lowcut=0.8, highcut=2.5)) # type: ignore + peaks_dict = cast(dict[str, Any], nk.signal_findpeaks(signal_filtered)) # type: ignore + peaks = peaks_dict['Peaks'] + hr = cast(NDArray[float64], nk.signal_rate(peaks, sampling_rate=sfreq, desired_length=len(signal_trimmed))) # type: ignore + hr_clean = np.clip(hr, MAX_LOW_HR, MAX_HIGH_HR) + + # Smooth heart rate time series by replacing spikes with local rolling mean and calculate the mean + logger.info("Smoothing the signal and calculating the mean...") + hr_series = pd.Series(hr_clean) + local_median = hr_series.rolling(window=SMOOTHING_WINDOW_HR, center=True, min_periods=1).median() + spikes = hr_series > (local_median + 10) + smoothed_values = hr_series.copy() + smoothed_spikes = hr_series.rolling(window=SMOOTHING_WINDOW_HR, center=True, min_periods=1).mean() + smoothed_values[spikes] = smoothed_spikes[spikes] + hr_smooth_nk = cast(NDArray[float64], smoothed_values.to_numpy()) # type: ignore + mean_hr_nk = hr_smooth_nk.mean() + + logger.info("Original HR min/max: %f, %f", hr_clean.min(), hr_clean.max()) + logger.info("Smoothed HR min/max:%f, %f", hr_smooth_nk.min(), hr_smooth_nk.max()) + logger.info(f"Estimated mean HR nk: {mean_hr_nk:.1f} BPM") + + logger.info("Successfully calculated heart rate using NeuroKit.") + + return hr_smooth_nk, mean_hr_nk + + + +def calculate_heart_rate_scipy(sfreq: float, signal_trimmed: NDArray[float64]) -> tuple[NDArray[floating[Any]], NDArray[float64], np.ndarray[Any, np.dtype[np.bool_]], float]: + """ + Estimate heart rate using spectral analysis on a high-pass filtered signal. + + Parameters + ---------- + sfreq : float + Sampling frequency of the input signal. + signal_trimmed : NDArray[float64] + Trimmed fNIRS signal to analyze. + + Returns + ------- + tuple[NDArray[floating[Any]], NDArray[float64], np.ndarray[Any, np.dtype[np.bool_]], float] + - NDArray[floating[Any]]: Frequencies converted to beats per minute (BPM). + - NDArray[float64]: Power spectral density (PSD) of the signal. + - np.ndarray[Any, np.dtype[np.bool_]]: Boolean mask indicating frequencies within heart rate range (30-300 BPM). + - float: Estimated mean heart rate in BPM corresponding to the PSD peak within the range. + """ + + logger.info("Calculating heart rate using SciPy...") + + # Apply a high-pass Butterworth filter to remove slow trends below 0.5 Hz from the trimmed signal (actual data) + logger.info("Applying a butterworth filter...") + b, a = cast(tuple[NDArray[float64], NDArray[float64]], butter(2, 0.5 / (sfreq / 2), btype='high')) + signal_hp = cast(NDArray[float64],filtfilt(b, a, signal_trimmed)) + + # Calculate the Power Spectral Density (PSD) of the filtered signal using Welch's method + logger.info("Calculating the PSD...") + nperseg = min(len(signal_hp), 4096) + frequencies_scipy, psd_scipy = cast(tuple[NDArray[float64], NDArray[float64]], welch(signal_hp, fs=sfreq, nperseg=nperseg, noverlap=nperseg//2)) + + # Convert frequency values to beats per minute (BPM) and set a heart rate range (30-300 BPM) + logger.info("Converting to BPM...") + freq_bpm_scipy = frequencies_scipy * 60 + freq_range_scipy = (freq_bpm_scipy > 30) & (freq_bpm_scipy < 300) + + # Identify the peak frequency within the heart rate range and estimate the mean heart rate in BPM + logger.info("Finding the mean...") + peak_index = np.argmax(psd_scipy[freq_range_scipy]) + mean_hr_scipy = freq_bpm_scipy[freq_range_scipy][peak_index] + + logger.info("Successfully calculated heart rate using SciPy.") + + return freq_bpm_scipy, psd_scipy, freq_range_scipy, mean_hr_scipy + + +def plot_heart_rate( + freq_bpm_scipy: NDArray[floating[Any]], + psd_scipy: NDArray[float64], + freq_range_scipy: np.ndarray[Any, np.dtype[np.bool_]], + mean_hr_scipy: float, + hr_smooth_nk: NDArray[floating[Any]], + mean_hr_nk: float, + times_trimmed: NDArray[floating[Any]], + overruled: bool +) -> tuple[Figure, Figure]: + """ + Generate plots comparing heart rate estimates from SciPy PSD and NeuroKit2. + + Parameters + ---------- + freq_bpm_scipy : NDArray[floating[Any]] + Frequencies in beats per minute from SciPy PSD analysis. + psd_scipy : NDArray[float64] + Power spectral density values corresponding to freq_bpm_scipy. + freq_range_scipy : np.ndarray[Any, np.dtype[np.bool_]] + Boolean mask indicating the heart rate frequency range used in PSD. + mean_hr_scipy : float + Mean heart rate estimated from SciPy PSD peak. + hr_smooth_nk : NDArray[floating[Any]] + Smoothed instantaneous heart rate from NeuroKit2. + mean_hr_nk : float + Mean heart rate estimated from NeuroKit2 data. + times_trimmed : NDArray[floating[Any]] + Time points corresponding to hr_smooth_nk values. + overruled: bool + True if the heart rate from NeuroKit2 is overriding the results from the PSD. + + Returns + ------- + tuple[Figure, Figure] + - Figure showing the PSD and SciPy heart rate estimate. + - Figure showing the time series comparison of heart rates. + """ + + # Create the first plot for the PSD. Add a yellow range to show what we will be filtering to. + logger.info("Creating the figure...") + fig1, ax1 = plt.subplots(figsize=(10, 5)) # type: ignore + ax1.set_xlim(30, 300) + ax1.plot(freq_bpm_scipy[freq_range_scipy], psd_scipy[freq_range_scipy]) # type: ignore + ax1.axvline(x=mean_hr_scipy, color='red', linestyle='--', label=f'Mean HR: {mean_hr_scipy:.1f} BPM') # type: ignore + ax1.axvspan(min(mean_hr_nk - HEART_RATE_WINDOW, mean_hr_scipy - HEART_RATE_WINDOW), max(mean_hr_nk + HEART_RATE_WINDOW, mean_hr_scipy + HEART_RATE_WINDOW), color='yellow', alpha=0.3, label=f'HR Range ±{HEART_RATE_WINDOW} BPM') # type: ignore + ax1.set_xlabel('Heart Rate (BPM)') # type: ignore + ax1.set_ylabel('Power Spectral Density') # type: ignore + ax1.set_title('PSD of fNIRS signal - Peak indicates Heart Rate') # type: ignore + ax1.grid(True) # type: ignore + + # Was the value we reported here correct for the data on the graph or was it overruled? + if overruled: + note = ( + '\n' + 'Note: Calculation was bad!\n' + 'Data has been set to match\n' + 'the value from NeuroKit2.' + ) + phantom = Line2D([0], [0], color='none', label=note) + handles, _ = ax1.get_legend_handles_labels() + ax1.legend(handles=handles + [phantom]) # type: ignore + + else: + ax1.legend() # type: ignore + plt.close(fig1) + + # Create the second plot showing the rolling heart rate, as well as the two averages that were calculated + logger.info("Creating the figure...") + fig2, ax2 = plt.subplots(figsize=(14, 6)) # type: ignore + ax2.plot(times_trimmed, hr_smooth_nk, label='Instantaneous HR (NeuroKit2)', color='blue', alpha=0.7) # type: ignore + ax2.axhline(mean_hr_nk, color='red', linestyle='--', label=f'Mean HR NeuroKit2: {mean_hr_nk:.1f} BPM') # type: ignore + ax2.axhline(mean_hr_scipy, color='orange', linestyle=':', label=f'SciPy Welch PSD (HP filtered): {mean_hr_scipy:.1f} BPM') # type: ignore + ax2.set_xlabel('Time (seconds)') # type: ignore + ax2.set_ylabel('Heart Rate (BPM)') # type: ignore + ax2.set_title('Heart Rate Estimates Comparison') # type: ignore + ax2.legend() # type: ignore + ax2.grid(True) # type: ignore + fig2.tight_layout() + plt.close(fig2) + + return fig1, fig2 + + + +def plot_timechannel_quality_metrics(data: BaseRaw, scores: NDArray[float64], times: list[tuple[float]], color_stops: tuple[list[float], list[float]], threshold: float, title: Optional[str] = None) -> tuple[Figure, Figure]: + + """ + Generate two heatmaps visualizing channel quality metrics over time. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + scores : NDArray[float64] + A 2D array of quality scores for each channel over time. + times : list[tuple[float]] + List of time boundaries used to label each score column. + color_stops : tuple[list[float], list[float]] + Two lists of color values for custom colormaps. + threshold : float, + Threshold value for the color bar. + title : Optional[str], optional + Base title for the figures, (default is None). + + Returns + ------- + tuple[Figure, Figure] + - Figure: Heatmap of all scores across channels and time. + - Figure: Binary heatmap showing only scores above the threshold. + """ + + # Get only the hbo / hbr channels once as we dont need to see the same results twice + half_ch = len(getattr(data, "ch_names")) // 2 + ch_names = getattr(data, "ch_names")[:half_ch] + scores = scores[:half_ch, :] + + # Extract rounded time points to use as column headers + cols = [np.round(t[0]) for t in times] + n_chans = len(ch_names) + vsize = 0.2 * n_chans + + # Create the first figure + fig1, ax1 = plt.subplots(figsize=(10, vsize), layout="constrained") # type: ignore + fig1.suptitle(title + " - All Scores", fontsize=16, fontweight="bold") # type: ignore + + # Create a DataFrame to structure data for the heatmap + data_to_plot = DataFrame( + data=scores, + columns=pd.Index(cols, name="Time (s)"), + index=pd.Index(ch_names, name="Channel"), + ) + + # Define a custom colormap using provided color stops and base colors + base_colors = ['red', 'red', 'yellow', 'green', 'green'] + colors = list(zip(color_stops[0], base_colors[:len(color_stops[0])])) + cmap = mcolors.LinearSegmentedColormap.from_list('gyr', colors) + + # Plot heatmap of scores + sns.heatmap( # type: ignore + data=data_to_plot, + cmap=cmap, + vmin=0, + vmax=1, + cbar_kws=dict(label="Score"), + ax=ax1, + ) + + # Add vertical dashed lines at each time boundary, sit the title, and place a black strikethrough through a bad channel + for x in range(1, len(times)): + ax1.axvline(x, ls="dashed", lw=0.25, dashes=(25, 15), color="gray") # type: ignore + ax1.set_title("All Scores", fontweight="bold") # type: ignore + markbad(data, ax1, ch_names) + + # Calculate average score per channel and annotate to the right of the heatmap + avg_sci_subset: pd.Series[float] = data_to_plot.mean(axis=1) # type: ignore + norm = mcolors.Normalize(vmin=0, vmax=1) + text_x = data_to_plot.shape[1] + 0.5 + for i, val in enumerate(avg_sci_subset): + color = cmap(norm(val)) + ax1.text( # type: ignore + text_x, + i + 0.5, + f"{val:.3f}", + va='center', + ha='left', + fontsize=9, + color=color + ) + ax1.set_xlim(right=text_x + 1.5) + + plt.close(fig1) + + # Create the second figure + fig2, ax2 = plt.subplots(figsize=(10, vsize), layout="constrained") # type: ignore + fig2.suptitle(title + " - Scores Above Threshold", fontsize=16, fontweight="bold") # type: ignore + + # Create a DataFrame to structure data for the heatmap + data_to_plot = DataFrame( + data=scores > threshold, + columns=pd.Index(cols, name="Time (s)"), + index=pd.Index(ch_names, name="Channel"), + ) + + # Define a custom colormap using provided color stops and base colors + base_colors = ['red', 'red', 'white', 'white'] + colors = list(zip(color_stops[1], base_colors[:len(color_stops[1])])) + cmap = mcolors.LinearSegmentedColormap.from_list('gyr', colors) + + # Plot heatmap of scores + sns.heatmap( # type: ignore + data=data_to_plot, + vmin=0, + vmax=1, + cmap=cmap, + cbar_kws=dict(label="Score"), + ax=ax2, + ) + + # Add vertical dashed lines at each time boundary, sit the title, and place a black strikethrough through a bad channel + for x in range(1, len(times)): + ax2.axvline(x, ls="dashed", lw=0.25, dashes=(25, 15), color="gray") # type: ignore + ax2.set_title("Scores > Threshold", fontweight="bold") # type: ignore + markbad(data, ax2, ch_names) + + plt.close(fig2) + + return fig1, fig2 + + + +def markbad(data: BaseRaw, ax: Axes, ch_names: list[str]) -> None: + """ + Add a strikethrough to a plot for channels marked as bad. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + ax : Axes + Matplotlib Axes object where the strikethrough lines will be drawn. + ch_names : list[str] + List of channel names corresponding to the y-axis of the plot. + """ + + # Iterate over all the channels + for i, ch in enumerate(ch_names): + + # If it is marked as bad, place a strikethrough on the channel + if ch in data.info["bads"]: + ax.axhline(i + 0.5, ls="solid", lw=4, color="black", zorder=10) # type: ignore + + + +def calculate_scalp_coupling(data: BaseRaw, l_freq: float = 0.7, h_freq: float = 1.5) -> tuple[list[str], Figure, Figure]: + """ + Calculate the scalp coupling index (SCI) and identify bad channels based on a threshold. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + l_freq : float, optional + Low cutoff frequency for bandpass filtering in Hz (default is 0.7). + h_freq : float, optional + High cutoff frequency for bandpass filtering in Hz (default is 1.5) + + Returns + ------- + tuple[list[str], Figure, Figure] + - list[str]: Channel names identified as bad based on SCI threshold. + - Figure: Heatmap of all SCI scores across time and channels. + - Figure: Binary heatmap of SCI scores exceeding the threshold. + """ + + logger.info("Calculating scalp coupling index...") + + # Compute the SCI + _, scores, times = cast(tuple[NDArray[float64], NDArray[float64], list[tuple[float]]], scalp_coupling_index_windowed_raw(data, time_window=SCI_TIME_WINDOW, l_freq=l_freq, h_freq=h_freq)) + + # Identify channels that don't meet the provided threshold + logger.info("Identifying channels that do not meet the threshold...") + sci = scores.mean(axis=1) + data.info["bads"] = list(compress(cast(list[str], getattr(data, "ch_names")), sci < SCI_THRESHOLD)) + + # Determine the colors based on the threshold, and create the figures + logger.info("Creating the figures...") + color_stops = ([0.0, SCI_THRESHOLD, SCI_THRESHOLD+0.1, 0.8, 1.0], [0.0, SCI_THRESHOLD, SCI_THRESHOLD, 1.0]) + fig1, fig2 = plot_timechannel_quality_metrics(data, scores, times, color_stops, SCI_THRESHOLD, "Scalp Coupling Index") + + logger.info("Successfully calculated scalp coupling index.") + + return list(compress(cast(list[str], getattr(data, "ch_names")), sci < SCI_THRESHOLD)), fig1, fig2 + + + +def scalp_coupling_index_windowed_raw(data: BaseRaw, time_window: float = 3.0, l_freq: float = 0.7, h_freq: float = 1.5, l_trans_bandwidth: float = 0.3, h_trans_bandwidth: float = 0.3) -> tuple[BaseRaw, NDArray[float64], list[tuple[float, float]]]: + """ + Compute windowed scalp coupling index (SCI) across fNIRS channels. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + time_window : float, optional + Length of each time window in seconds (default is 3.0). + l_freq : float, optional + Low cutoff frequency for filtering in Hz (default is 0.7). + h_freq : float, optional + High cutoff frequency for filtering in Hz (default is 1.5). + l_trans_bandwidth : float, optional + Transition bandwidth for the low cutoff in Hz (default is 0.3). + h_trans_bandwidth : float, optional + Transition bandwidth for the high cutoff in Hz (default is 0.3). + + Returns + ------- + tuple[BaseRaw, NDArray[float64], list[tuple[float, float]]] + - BaseRaw: The original data object (unchanged). Ensures compatibility with peak_power(). + - NDArray[float64]: Correlation scores for each channel and time window. + - list[tuple[float, float]]: Time intervals for each window in seconds. + """ + + # Pick only fNIRS channels and sort them by channel name + picks: NDArray[np.intp] = mne.pick_types(cast(mne.Info, data.info), fnirs=True) # type: ignore + picks = picks[np.argsort([getattr(data, "ch_names")[pick] for pick in picks])] + + # FIXME: This may happen if the heart rate calculation tries to set a value way too low + if l_freq < 0.3: + l_freq = 0.3 + + # Band-pass filter the selected fNIRS channels + filtered_data = cast(NDArray[float64], filter_data( + getattr(data, "_data"), + getattr(data, "info")["sfreq"], + l_freq, + h_freq, + picks=picks, + verbose=False, + l_trans_bandwidth=l_trans_bandwidth, # type: ignore + h_trans_bandwidth=h_trans_bandwidth, # type: ignore + )) + + # Calculate number of samples per time window, the total number of windows, and prepare output variables + window_samples = int(np.ceil(time_window * getattr(data, "info")["sfreq"])) + n_windows = int(np.floor(len(data) / window_samples)) + scores = np.zeros((len(picks), n_windows)) + times: list[tuple[float, float]] = [] + + # Slide through the data in windows to compute scalp coupling index (SCI) + for window in range(n_windows): + start_sample = int(window * window_samples) + end_sample = start_sample + window_samples + end_sample = np.min([end_sample, len(data) - 1]) + + # Track time boundaries for each window + t_start = getattr(data, "times")[start_sample] + t_stop = getattr(data, "times")[end_sample] + times.append((t_start, t_stop)) + + # Iterate through channels in pairs (hbo, hbr). This requires them to be sorted by channel name + for ii in range(0, len(picks), 2): + c1: NDArray[float64] = filtered_data[picks[ii]][start_sample:end_sample] + c2 = filtered_data[picks[ii + 1]][start_sample:end_sample] + + # Ensure the correlation data is valid + if np.std(c1) == 0 or np.std(c2) == 0 or np.any(np.isnan(c1)) or np.any(np.isnan(c2)): + c = 0 + else: + c = np.corrcoef(c1, c2)[0][1] + + # Assign the computed correlation to both channels in the pair + scores[ii, window] = c + scores[ii + 1, window] = c + + scores = scores[np.argsort(picks)] + + return data, scores, times + + + +def calculate_peak_power(data: BaseRaw, l_freq: float = 0.7, h_freq: float = 1.5) -> tuple[list[str], Figure, Figure]: + """ + Calculate peak spectral power (PSP) for fNIRS channels and identify bad channels. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + l_freq : float, optional + Low cutoff frequency for filtering in Hz (default is 0.7) + h_freq : float, optional + High cutoff frequency for filtering in Hz (default is 1.5) + + Returns + ------- + tuple[list[str], Figure, Figure] + - list[str]: Names of channels below the PSP threshold. + - Figure: Heatmap of all PSP scores. + - Figure: Heatmap of scores above the PSP threshold. + """ + + logger.info("Calculating peak spectral power...") + + # Compute the PSP + _, scores, times = cast(tuple[NDArray[float64], NDArray[float64], list[tuple[float]]], peak_power(data, time_window=PSP_TIME_WINDOW, threshold=PSP_THRESHOLD, l_freq=l_freq, h_freq=h_freq, verbose=False)) + + # Identify channels that don't meet the provided threshold + logger.info("Identifying channels that do not meet the threshold...") + psp = scores.mean(axis=1) + data.info["bads"] = list(compress(cast(list[str], getattr(data, "ch_names")), psp < PSP_THRESHOLD)) + + # Determine the colors based on the threshold, and create the figures + logger.info("Creating the figures...") + color_stops = ([0.0, PSP_THRESHOLD, PSP_THRESHOLD+0.1, 0.3, 1.0], [0.0, PSP_THRESHOLD, PSP_THRESHOLD, 1.0]) + psp1, psp2 = plot_timechannel_quality_metrics(data, scores, times, color_stops, PSP_THRESHOLD, "Peak Spectral Power") + + logger.info("Successfully calculated peak spectral power.") + + return list(compress(cast(list[str], getattr(data, "ch_names")), psp < PSP_THRESHOLD)), psp1, psp2 + + + +def calculate_signal_noise_ratio(data: BaseRaw) -> tuple[list[str], Figure]: + """ + Calculates the signal-to-noise ratio (SNR) for each channel and identifies those below a defined threshold. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + + Returns + ------- + tuple[list[str], Figure] + - list[str]: A list of channel names that fall below the SNR threshold and are considered bad. + - Figure: A matplotlib Figure showing the channels' SNR values. + """ + + logger.info("Calculating signal to noise ratio...") + + # Compute the signal-to-noise ratio values + logger.info("Computing the signal to noise power...") + signal_band=(0.01, 0.5) + noise_band=(1.0, 10.0) + data_signal = data.copy().filter(*signal_band, verbose=False) #type: ignore + data_noise = data.copy().filter(*noise_band, verbose=False) #type: ignore + signal_power = np.mean(data_signal.get_data()**2, axis=1) #type: ignore + noise_power = np.mean(data_noise.get_data()**2, axis=1) #type: ignore + + # Calculate the snr using the standard formula for dB + snr = 10 * np.log10(signal_power / (noise_power + np.finfo(float).eps)) + + # TODO: Understand what this does + groups: dict[str, list[str]] = {} + for ch in getattr(data, "ch_names"): + # Look for the space in the channel names and remove the characters after + # This is so we can get both oxy and deoxy to remove, as they will have the same source and detector + base = ch.rsplit(' ', 1)[0] + groups.setdefault(base, []).append(ch) # type: ignore + + # If any of the channels do not meet our threshold, they will get inserted into the bad_channels set + bad_channels: set[str] = set() + for base, ch_list in groups.items(): + if any(s < SNR_THRESHOLD for s, ch in zip(snr, getattr(data, "ch_names")) if ch in ch_list): + bad_channels.update(ch_list) + + # Design and create the figure + logger.info("Creating the figure...") + snr_fig, ax = plt.subplots(figsize=(12, 4), layout="constrained") # type: ignore + colors = [(0/20, 'red'), (SNR_THRESHOLD/20, 'red'), ((SNR_THRESHOLD+.5)/20, 'yellow'), ((SNR_THRESHOLD+1)/20, 'green'), (20/20, 'green')] + cmap = LinearSegmentedColormap.from_list('custom_snr_cmap', colors) + norm = mcolors.Normalize(vmin=0, vmax=20) + scatter = ax.scatter(range(len(snr)), snr, c=snr, cmap=cmap, alpha=0.8, s=100, norm=norm) # type: ignore + ax.set(xlabel="Channel Number", ylabel="Signal-to-Noise Ratio (dB)", xlim=[0, len(snr)], ylim=[0, 20]) + ax.axhline(SNR_THRESHOLD, color='black', linestyle='--', alpha=0.3, linewidth=1) # type: ignore + cbar = snr_fig.colorbar(scatter, ax=ax, label="SNR Thresholds (dB)") # type: ignore + cbar.set_ticks([0, SNR_THRESHOLD, SNR_THRESHOLD+1, 20]) # type: ignore + cbar.set_ticklabels(['0', str(SNR_THRESHOLD), str(SNR_THRESHOLD+1), '20']) # type: ignore + + plt.close() + + logger.info("Successfully calculated signal to noise ratio.") + + return list(bad_channels), snr_fig + + + +def mark_bad_channels(data: BaseRaw, ID: str, bad_channels_sci: set[str], bad_channels_psp: set[str], bad_channels_snr: set[str]) -> tuple[BaseRaw, Figure, int]: + """ + Drops bad channels from the data and generates a bar plot showing which channels were removed and why. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + ID : str + File name of the the snirf file that was loaded. + bad_channels_sci : set[str] + Channels marked as bad by the SCI method. + bad_channels_psp : set[str] + Channels marked as bad by the PSP method. + bad_channels_snr : set[str] + Channels marked as bad by the SNR method. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The modified data object with bad channels removed. + - Figure: A matplotlib Figure showing the dropped channels categorized by method. + """ + + logger.info("Dropping the channels that were marked bad...") + + # Combine all of the bad channels into one and ensure the short channel is not present + bad_channels = bad_channels_sci | bad_channels_psp | bad_channels_snr + + logger.info(f"Channels that were bad on SCI: {bad_channels_sci}") + logger.info(f"Channels that were bad on PSP: {bad_channels_psp}") + logger.info(f"Channels that were bad on SNR: {bad_channels_snr}") + logger.info(f"Total bad channels: {bad_channels}") + + # Add the channles to the bads key and drop the bads key from the data + data.info["bads"] = list(bad_channels) + data = cast(BaseRaw, data.drop_channels(getattr(data, "info")["bads"])) # type: ignore + + # Organize channels into categories + sets = [ + (bad_channels_sci, "SCI"), + (bad_channels_psp, "PSP"), + (bad_channels_snr, "SNR"), + ] + + # Graph what channels were dropped and why they were dropped + channel_categories: dict[str, str] = {} + + for ch in bad_channels: + present_in = [name for s, name in sets if ch in s] + # Create a label for the category + if len(present_in) == 1: + label = f"{present_in[0]} only" + else: + label = " + ".join(sorted(present_in)) + channel_categories[ch] = label + + # Sort channels alphabetically within categories for nicer visualization + logger.info("Sorting the bad channels by type...") + categories = sorted(set(channel_categories.values())) + channel_names: list[str] = [] + category_labels: list[str] = [] + for cat in categories: + chs_in_cat = sorted([ch for ch, c in channel_categories.items() if c == cat]) + channel_names.extend(chs_in_cat) + category_labels.extend([cat] * len(chs_in_cat)) + + colors = {cat: FIXED_CATEGORY_COLORS[cat] for cat in categories} + + logger.info("Creating the figure...") + # Create the figure + fig, ax = plt.subplots(figsize=(10, max(3, len(channel_names) * 0.3))) # type: ignore + y_pos = range(len(channel_names)) + ax.barh(y_pos, [1]*len(channel_names), color=[colors[cat] for cat in category_labels]) # type: ignore + ax.set_yticks(y_pos) # type: ignore + ax.set_yticklabels(channel_names) # type: ignore + ax.set_xlabel("Marked as Bad") # type: ignore + ax.set_title(f"Bad Channels by Method for {ID}") # type: ignore + ax.set_xlim(0, 1) + ax.set_xticks([]) # type: ignore + ax.grid(axis='x', linestyle='--', alpha=0.7) # type: ignore + + # Add a legend denoting why the channels were bad + for label, color in colors.items(): + ax.bar(0, 0, color=color, label=label) # type: ignore + ax.legend() # type: ignore + + fig.tight_layout() + plt.close(fig) + + logger.info("Successfully dropped the channels that were marked bad.") + + return data, fig, len(bad_channels) + + + +def calculate_optical_density(data: BaseRaw, ID: str) -> tuple[BaseRaw, Figure]: + """ + Converts raw intensity data to optical density and generates a plot of the transformed signals. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + ID : str + File name of the the snirf file that was loaded. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The transformed data in optical density format. + - Figure: A matplotlib figure displaying the optical density signals across all channels. + """ + + logger.info("Calculating optical density...") + + # Calculate the optical density from the raw data + optical_density_data = cast(BaseRaw, optical_density(data)) + + logger.info("Creating the figure...") + fig = cast(Figure, optical_density_data.plot(show=False, n_channels=len(getattr(data, "ch_names")), duration=getattr(data, "times")[-1]).figure) # type: ignore + fig.suptitle(f"Optical density data for {ID}", fontsize=16) # type: ignore + fig.subplots_adjust(top=0.92) + plt.close(fig) + + logger.info("Successfully calculated optical density.") + + return optical_density_data, fig + + + +# STEP 9: Haemoglobin concentration +def calculate_haemoglobin_concentration(optical_density_data: BaseRaw, ID: str) -> tuple[BaseRaw, Figure]: + """ + Calculates haemoglobin concentration from optical density data using the Beer-Lambert law and generates a plot. + + Parameters + ---------- + optical_density_data : BaseRaw + The data in optical density format. + ID : str + File name of the the snirf file that was loaded. + + Returns + ------- + tuple[BaseRaw, Figure] + - BaseRaw: The haemoglobin concentration data object. + - Figure: A matplotlib figure displaying the haemoglobin concentration signals. + """ + + logger.info("Calculating haemoglobin concentration data...") + + # Get the haemoglobin concentration using beer lambert law + haemoglobin_concentration_data = beer_lambert_law(optical_density_data, PPF) + + logger.info("Creating the figure...") + fig = cast(Figure, optical_density_data.plot(show=False, n_channels=len(getattr(optical_density_data, "ch_names")), duration=getattr(optical_density_data, "times")[-1]).figure) # type: ignore + fig.suptitle(f"Haemoglobin concentration data for {ID}", fontsize=16) # type: ignore + fig.subplots_adjust(top=0.92) + plt.close(fig) + + logger.info("Successfully calculated haemoglobin concentration data.") + + return haemoglobin_concentration_data, fig + + + +# -------------------------------------- HARDCODED ----------------------------------------------- + +def extract_normal_epochs(haemoglobin_concentration_data: BaseRaw) -> dict[str, list[Any] | mne.evoked.EvokedArray]: + + events, _ = mne.events_from_annotations(haemoglobin_concentration_data, event_id={"Reach": 1, "Start of Rest": 2}, verbose=VERBOSITY) # type: ignore + event_dict = {"Reach": 1, "Start of Rest": 2} + + epochs = mne.Epochs( + haemoglobin_concentration_data, + events, + event_id=event_dict, + tmin=TIME_MIN_THRESH, + tmax=TIME_MAX_THRESH, + reject=dict(hbo=EPOCH_REJECT_CRITERIA_THRESH), + reject_by_annotation=True, + proj=True, + baseline=(None, 0), + preload=True, + detrend=None, + verbose=VERBOSITY, + ) + + evoked_dict: dict[str, list[Any] | mne.evoked.EvokedArray] = { + "Reach/HbO": epochs["Reach"].average(picks="hbo"), # type: ignore + "Reach/HbR": epochs["Reach"].average(picks="hbr"), # type: ignore + } + + # Rename channels until the encoding of frequency in ch_name is fixed + for condition in evoked_dict: + evoked_dict[condition].rename_channels(lambda x: x[:-4]) # type: ignore + + return evoked_dict + + + +def calculate_and_apply_negative_correlation_enhancement(haemoglobin_concentration_data: BaseRaw) -> dict[str, list[Any] | mne.evoked.EvokedArray]: + + events, _ = mne.events_from_annotations(haemoglobin_concentration_data, event_id={"Reach": 1, "Start of Rest": 2}, verbose=VERBOSITY) # type: ignore + event_dict = {"Reach": 1, "Start of Rest": 2} + + raw_anti = enhance_negative_correlation(haemoglobin_concentration_data) + + epochs_anti = mne.Epochs( + raw_anti, + events, + event_id=event_dict, + tmin=TIME_MIN_THRESH, + tmax=TIME_MAX_THRESH, + reject=dict(hbo=EPOCH_REJECT_CRITERIA_THRESH), + reject_by_annotation=True, + proj=True, + baseline=(None, 0), + preload=True, + detrend=None, + verbose=VERBOSITY, + ) + + evoked_dict_anti: dict[str, list[Any] | mne.evoked.EvokedArray] = { + "Reach/HbO": epochs_anti["Reach"].average(picks="hbo"), # type: ignore + "Reach/HbR": epochs_anti["Reach"].average(picks="hbr"), # type: ignore + } + + # Rename channels until the encoding of frequency in ch_name is fixed + for condition in evoked_dict_anti: + evoked_dict_anti[condition].rename_channels(lambda x: x[:-4]) # type: ignore + + return evoked_dict_anti + + + +def calculate_and_apply_short_channel_correction(optical_density_data: BaseRaw) -> dict[str, list[Any] | mne.evoked.EvokedArray]: + + od_corrected = short_channel_regression(optical_density_data, SHORT_CHANNEL_THRESH) + haemoglobin_concentration_data = beer_lambert_law(od_corrected, PPF) + + events, _ = mne.events_from_annotations(haemoglobin_concentration_data, event_id={"Reach": 1, "Start of Rest": 2}, verbose=VERBOSITY) # type: ignore + event_dict = {"Reach": 1, "Start of Rest": 2} + + epochs_corr = mne.Epochs( + haemoglobin_concentration_data, + events, + event_id=event_dict, + tmin=TIME_MIN_THRESH, + tmax=TIME_MAX_THRESH, + reject=dict(hbo=EPOCH_REJECT_CRITERIA_THRESH), + reject_by_annotation=True, + proj=True, + baseline=(None, 0), + preload=True, + detrend=None, + verbose=VERBOSITY, + ) + + evoked_dict_corr: dict[str, list[Any] | mne.evoked.EvokedArray] = { + "Reach/HbO": epochs_corr["Reach"].average(picks="hbo"), # type: ignore + "Reach/HbR": epochs_corr["Reach"].average(picks="hbr"), # type: ignore + } + + # Rename channels until the encoding of frequency in ch_name is fixed + for condition in evoked_dict_corr: + evoked_dict_corr[condition].rename_channels(lambda x: x[:-4]) # type: ignore + + return evoked_dict_corr + + + +def signal_enhancement_techniques_images(evoked_dict: dict[str, list[Any] | mne.evoked.EvokedArray], evoked_dict_anti: dict[str, list[Any] | mne.evoked.EvokedArray], evoked_dict_corr:dict[str, list[Any] | mne.evoked.EvokedArray] | None): + + # If we have two images, ensure we only have two columns + if evoked_dict_corr is None: + fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) # type: ignore + + # If we have three images, ensure we have three columns + else: + fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 6)) # type: ignore + + color_dict = dict(HbO="#AA3377", HbR="b") + + # TODO: This is to prevent the warning that we are only plotting one channel. Don't we want all though? + mne.set_log_level('WARNING') # type: ignore + + logger.info("Creating the figure...") + + # Plot the graph for the original data + mne.viz.plot_compare_evokeds( # type: ignore + evoked_dict, + combine="mean", + ci=0.95, # type: ignore + axes=axes[0], + colors=color_dict, + ylim=dict(hbo=[-10, 15]), + show=False, + ) + + # Plot the graph for the enhanced anticorrelation data + mne.viz.plot_compare_evokeds( # type: ignore + evoked_dict_anti, + combine="mean", + ci=0.95, # type: ignore + axes=axes[1], + colors=color_dict, + ylim=dict(hbo=[-10, 15]), + show=False, + ) + + # Plot the graph for short channel regression data, if it exists + if evoked_dict_corr is not None: + mne.viz.plot_compare_evokeds( # type: ignore + evoked_dict_corr, + combine="mean", + ci=0.95, # type: ignore + axes=axes[2], + colors=color_dict, + ylim=dict(hbo=[-10, 15]), + show=False, + ) + + mne.set_log_level('INFO') # type: ignore + + # If we have a short channel, set three titles + if evoked_dict_corr is not None: + for column, condition in enumerate( + ["Original Data", "With Enhanced Anticorrelation", "With Short Regression"] + ): + axes[column].set_title(f"{condition}") + + # If we do not have a short channel, set two titles + else: + for column, condition in enumerate( + ["Original Data", "With Enhanced Anticorrelation"] + ): + axes[column].set_title(f"{condition}") + + plt.close(fig) + + return fig + + + +def create_design_matrix(data: BaseRaw, stim_duration: float, short_chans: BaseRaw | None) -> tuple[DataFrame, Figure]: + """ + Creates a design matrix for first-level analysis including optional short channel regression, and generates a plot. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + stim_duration : float + Duration of the stimulus/event in seconds. + short_chans : BaseRaw | None + Data object containing only short channels for systemic component regression, or None if there is no short channels. + + Returns + ------- + tuple[DataFrame, Figure] + - DataFrame: The generated design matrix. + - Figure: A matplotlib figure visualizing the design matrix. + """ + + # Create the design martix + logger.info("Creating the design matrix... (This may take some time)") + + # If the design matrix is fir, calculate some of the extra required parameters before creating the matrix + if HRF_MODEL == "fir": + sfreq = getattr(data, "info")["sfreq"] + fir_delays = range(int(sfreq*15)) + + design_matrix = make_first_level_design_matrix( + data, + stim_dur=0.1, + hrf_model=HRF_MODEL, + drift_model=DRIFT_MODEL, + high_pass=1/(2*DURATION_BETWEEN_ACTIVITIES), + fir_delays=fir_delays + ) + + # Using a canonical hrf model + else: + design_matrix = make_first_level_design_matrix( + data, + stim_dur=stim_duration, + hrf_model=HRF_MODEL, + drift_model=DRIFT_MODEL, + high_pass=1/(2*DURATION_BETWEEN_ACTIVITIES), + ) + + # If we have a short channel, and short channel regression was specified, apply it to the design matrix + if short_chans is not None: + if SHORT_CHANNEL_REGRESSION: + logger.info("Applying short channel regression...") + for chan in range(len(short_chans.ch_names)): # type: ignore + design_matrix[f"short_{chan}"] = short_chans.get_data(chan).T # type: ignore + + logger.info("Creating the figure...") + fig, ax1 = plt.subplots(figsize=(10, 6), constrained_layout=True) # type: ignore + plot_design_matrix(design_matrix, axes=ax1) + plt.close(fig) + + logger.info("Successfully created the design matrix.") + + return design_matrix, fig + + + +def run_GLM_analysis(data: BaseRaw, design_matrix: DataFrame) -> RegressionResults: + """ + Runs a General Linear Model (GLM) analysis on the provided data using the specified design matrix. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + design_matrix : DataFrame + The design matrix specifying regressors for the GLM. + + Returns + ------- + RegressionResults + The fitted GLM results object containing regression coefficients and statistics. + """ + + logger.info("Running the GLM...") + glm_est = run_glm(data, design_matrix, n_jobs=N_JOBS) + logger.info("Successfully ran the GLM.") + return glm_est + + + +def individual_GLM_analysis(file_path: str, ID: str, stim_duration: float = 5.0, progress_callback=None) -> tuple[BaseRaw, BaseRaw, DataFrame, DataFrame, DataFrame, DataFrame, dict[str, Figure], str, bool, bool]: + """ + Performs individual-level General Linear Model (GLM) analysis on fNIRS data from a SNIRF file. + + Parameters + ---------- + file_path : str + Path to the SNIRF file containing the participant's raw data. + ID : str + Unique identifier for the participant, used for labeling output. + stim_duration : float, optional + Duration of the stimulus in seconds for constructing the design matrix (default is 5.0) + + Returns + ------- + tuple[BaseRaw, BaseRaw, DataFrame, DataFrame, DataFrame, DataFrame, dict[str, Figure], str, bool, bool] + - BaseRaw: Processed fNIRS data + - BaseRaw: Full layout raw data prior to bad channel rejection + - DataFrame: Region of interest statistics + - DataFrame: Channel-level GLM statistics + - DataFrame: Contrast results + - DataFrame: Design matrix used for GLM + - dict[str, Figure]: Dictionary of figures generated during processing + - str: Description of processing steps applied + - bool: Whether the GLM successfully ran to completion + - bool: Whether the analysis result is valid based on quality checks + """ + + # Setting up variables to be used later + fig_dict: dict[str, Figure] = {} + bad_channels_sci = [] + bad_channels_psp = [] + bad_channels_snr = [] + mean_hr_nk = 70 + mean_hr_scipy = 70 + num_bad_channels = 0 + valid = True + short_chans = None + roi: DataFrame = DataFrame() + cha: DataFrame = DataFrame() + con: DataFrame = DataFrame() + design_matrix = DataFrame() + + # Load the file, get the sources and detectors, update their position, and calculate the short channel and any large distance channels + # STEP 1 + data, fig = load_snirf(file_path, ID, FORCE_DROP_CHANNELS) + fig_dict['Raw'] = fig + order_of_operations = "Loaded Raw File" + if progress_callback: progress_callback(1) + + # Initalize the participants full layout to be the current data regardless if it will be updated later + raw_full_layout = data + + try: + # Did the user want to load new channel positions from an optode file? + # STEP 2 + if OPTODE_FILE: + data = calculate_and_apply_updated_optode_coordinates(data) + order_of_operations += " + Updated Optode Placements" + if progress_callback: progress_callback(2) + + # STEP 2.5 + # TODO: remember why i do this + # I think its because i want a participants whole layout to plot without any bads + # but i shouldnt need to do od and bll just check the last three numbers + temp = data.copy() + temp_od = cast(BaseRaw, optical_density(temp, verbose=VERBOSITY)) + raw_full_layout = beer_lambert_law(temp_od, ppf=PPF) + + # If specified, apply TDDR to the data + # STEP 3 + if TDDR: + data, fig = calculate_and_apply_tddr(data, ID) + order_of_operations += " + TDDR Filter" + fig_dict['TDDR'] = fig + if progress_callback: progress_callback(3) + + # If specified, apply a wavelet filter to the data + # STEP 4 + if WAVELET: + data, fig = calculate_and_apply_wavelet(data, ID) + order_of_operations += " + Wavelet Filter" + fig_dict['Wavelet'] = fig + if progress_callback: progress_callback(4) + + # If specified, attempt to get short channels from the data + # STEP 4.5 + if SHORT_CHANNEL: + try: + short_chans = get_short_channels(data, SHORT_CHANNEL_THRESH) + except Exception as e: + raise ProcessingError("SHORT_CHANNEL was specified, but no short channel was found. Please ensure the data has a short channel and that SHORT_CHANNEL_THRESH is set correctly.") + pass + else: + pass + + # Ensure that there is no short or really long channels in the data + data = get_long_channels(data, SHORT_CHANNEL_THRESH, LONG_CHANNEL_THRESH) + + # STEP 5 + if HEART_RATE: + sfreq, signal_trimmed, times_trimmed = short_channel_processing_for_hr(data, short_chans) + hr_smooth_nk, mean_hr_nk = calculate_heart_rate_neurokit(sfreq, signal_trimmed) + freq_bpm_scipy, psd_scipy, freq_range_scipy, mean_hr_scipy = calculate_heart_rate_scipy(sfreq, signal_trimmed) + + # HACK: This sucks but looking at the graphs I trust neurokit2 more + overruled = False + if mean_hr_scipy < mean_hr_nk - 15: + mean_hr_scipy = mean_hr_nk + overruled = True + if mean_hr_scipy > mean_hr_nk + 15: + mean_hr_scipy = mean_hr_nk + overruled = True + + hr1, hr2 = plot_heart_rate(freq_bpm_scipy, psd_scipy, freq_range_scipy, mean_hr_scipy, hr_smooth_nk, mean_hr_nk, times_trimmed, overruled) + order_of_operations += " + Heart Rate Calculation" + fig_dict['HeartRate_PSD'] = hr1 + fig_dict['HeartRate_Time'] = hr2 + if progress_callback: progress_callback(5) + + # If specified, calculate and apply SCI + # STEP 6 + if SCI: + bad_channels_sci, sci1, sci2 = calculate_scalp_coupling(data.copy(), min(mean_hr_nk - HEART_RATE_WINDOW, mean_hr_scipy - HEART_RATE_WINDOW) / 60, max(mean_hr_nk + HEART_RATE_WINDOW, mean_hr_scipy + HEART_RATE_WINDOW) / 60) + order_of_operations += " + SCI Calculation" + fig_dict['SCI1'] = sci1 + fig_dict['SCI2'] = sci2 + if progress_callback: progress_callback(6) + + # If specified, calculate and apply PSP + if PSP: + bad_channels_psp, psp1, psp2 = calculate_peak_power(data.copy(), min(mean_hr_nk - HEART_RATE_WINDOW, mean_hr_scipy - HEART_RATE_WINDOW) / 60, max(mean_hr_nk + HEART_RATE_WINDOW, mean_hr_scipy + HEART_RATE_WINDOW) / 60) + order_of_operations += " + PSP Calculation" + fig_dict['PSP1'] = psp1 + fig_dict['PSP2'] = psp2 + if progress_callback: progress_callback(7) + + # If specified, calculate and apply SNR + if SNR: + bad_channels_snr, fig = calculate_signal_noise_ratio(data.copy()) + order_of_operations += " + SNR Calculation" + fig_dict['SNR'] = fig + + # If specified, drop channels that were marked as bad + # STEP 7 + if EXCLUDE_CHANNELS: + data, fig, num_bad_channels = mark_bad_channels(data, ID, set(bad_channels_sci), set(bad_channels_psp), set(bad_channels_snr)) + order_of_operations += " + Excluded Bad Channels" + fig_dict['Bads'] = fig + if progress_callback: progress_callback(7) + + # Calculate the optical density + # STEP 8 + data, fig = calculate_optical_density(data, ID) + order_of_operations += " + Optical Density" + fig_dict['OpticalDensity'] = fig + if progress_callback: progress_callback(8) + + # Mainly for visualization. Could be implemented in the future + # STEP 8.5 + evoked_dict_corr = None + if SHORT_CHANNEL: + short_chans_od = cast(BaseRaw, optical_density(short_chans)) + data_recombined = cast(BaseRaw, data.copy().add_channels([short_chans_od])) # type: ignore + evoked_dict_corr = calculate_and_apply_short_channel_correction(data_recombined.copy()) + + # Calculate the haemoglobin concentration + # STEP 9 + data, fig = calculate_haemoglobin_concentration(data, ID) + order_of_operations += " + Haemoglobin Concentration" + fig_dict['HaemoglobinConcentration'] = fig + if progress_callback: progress_callback(9) + + # Mainly for visualization. Could be implemented in the future + # STEP 9.5 + evoked_dict = extract_normal_epochs(data.copy()) + evoked_dict_anti = calculate_and_apply_negative_correlation_enhancement(data.copy()) + fig = signal_enhancement_techniques_images(evoked_dict, evoked_dict_anti, evoked_dict_corr) + fig_dict['SignalEnhancement'] = fig + + # Create the design matrix + # STEP 10 + # HACK FIXME - Downsampling to 10 is certaintly not the best way... right? + if HRF_MODEL == 'fir': + data.resample(10, verbose=VERBOSITY) # type: ignore + if short_chans is not None: + short_chans.resample(10, verbose=VERBOSITY) # type: ignore + + design_matrix, fig = create_design_matrix(data, stim_duration, short_chans) + order_of_operations += " + Design Matrix" + fig_dict['DesignMatrix'] = fig + if progress_callback: progress_callback(10) + + # Run the glm on the design matrix + # STEP 11 + glm_est: RegressionResults = run_GLM_analysis(data, design_matrix) + order_of_operations += " + GLM" + if progress_callback: progress_callback(11) + + # Add the regions of interest to the groups + # STEP 12 + logger.info("Performing the finishing touches...") + order_of_operations += " + Finishing Touches" + + # Extract the channel metrics + logger.info("Calculating channel results...") + cha = cast(DataFrame, glm_est.to_dataframe()) # type: ignore + + logger.info("Creating groups...") + if HRF_MODEL == "fir": + groups = dict(AllChannels=range(len(data.ch_names))) # type: ignore + + else: + groups: dict[str, list[int]] = dict( + group_1_picks = picks_pair_to_idx(data, ROI_GROUP_1, on_missing="ignore"), # type: ignore + group_2_picks = picks_pair_to_idx(data, ROI_GROUP_2, on_missing="ignore"), # type: ignore + ) + + # Compute region of interest results from the channel data + logger.info("Calculating region of intrest results...") + roi = glm_est.to_dataframe_region_of_interest(groups, design_matrix.columns, demographic_info=True) # type: ignore + + # Create the contrast matrix + logger.info("Creating the contrast matrix...") + contrast_matrix = np.eye(design_matrix.shape[1]) + basic_conts = dict( + [(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)] + ) + + # Calculate contrast differently depending on the hrf model + if HRF_MODEL == 'fir': + # Find all FIR regressors for TARGET_ACTIVITY + delay_cols = [col for col in design_matrix.columns if col.startswith(f"{TARGET_ACTIVITY}_delay_")] + + if not delay_cols: + raise ValueError(f"No FIR regressors found for condition {TARGET_ACTIVITY}.") + + # Sum or average their contrast vectors + fir_contrast = np.sum([basic_conts[col] for col in delay_cols], axis=0) + fir_contrast /= len(delay_cols) + + # Compute contrast + contrast = glm_est.compute_contrast(fir_contrast) # type: ignore + con = cast(DataFrame, contrast.to_dataframe()) # type: ignore + + else: + # Create and compute the contrast + contrast_t = basic_conts[TARGET_ACTIVITY] + contrast = glm_est.compute_contrast(contrast_t) # type: ignore + con = cast(DataFrame, contrast.to_dataframe()) # type: ignore + + # Add the participant ID to the dataframes + roi["ID"] = cha["ID"] = con["ID"] = design_matrix["ID"] = ID + + # Convert to uM for nicer plotting below. + logger.info("Converting to uM...") + cha["theta"] = cha["theta"].astype(float) * 1.0e6 + roi["theta"] = roi["theta"].astype(float) * 1.0e6 + con["effect"] = con["effect"].astype(float) * 1.0e6 + + # If we exceed the maximum allowed bad channels, apply an X over the figures + logger.info("Checking amount of bad channels...") + if num_bad_channels >= MAX_BAD_CHANNELS: + valid=False + logger.info("Drawing some big X's...") + for _, fig in fig_dict.items(): + add_x_overlay(fig, 'Too many bad channels!', 'red') + + logger.info("Completed individual analysis.") + + if progress_callback: progress_callback(12) + + # Clear the output for the next participant unless we are told to be verbose + if not VERBOSITY: + clear_output(wait=True) + + + # Something really went wrong and we should not continue + except ProcessingError as e: + logger.info("An error occured!", e) + raise + + # Something went wrong at one of the steps. Return what data we gathered, but set the validity of this run to False + except Exception as e: + logger.info("An error occured!", e) + fig_dict_bytes = convert_fig_dict_to_png_bytes(fig_dict) + return data, raw_full_layout, roi, cha, con, design_matrix, fig_dict, order_of_operations, False, False + + fig_dict_bytes = convert_fig_dict_to_png_bytes(fig_dict) + + return data, raw_full_layout, roi, cha, con, design_matrix, fig_dict_bytes, order_of_operations, True, valid + + + +def add_x_overlay(fig: Figure, reason: str, color: str) -> None: + """ + Adds a large 'X' across the figure if the participant met the bad channel criteria. + + Parameters + ---------- + fig : Figure + Matplotlib figure to draw the X on. + reason: str + Why the X is being drawn. + color: str + What color the reason should be. + """ + + # Draw the big X on the graph + ax = fig.add_axes([0, 0, 1, 1], zorder=100) # type: ignore + ax.set_axis_off() + ax.plot([0, 1], [0, 1], color='red', linewidth=8, transform=fig.transFigure, clip_on=False) # type: ignore + ax.plot([0, 1], [1, 0], color='red', linewidth=8, transform=fig.transFigure, clip_on=False) # type: ignore + ax.text(0.5, 0.5, reason, color=color, fontsize=26, fontweight='bold', ha='center', va='center', transform=fig.transFigure, zorder=101, bbox=dict(facecolor='white', alpha=0.8, edgecolor='red', boxstyle='round,pad=0.4')) # type: ignore + + + +from io import BytesIO + +def convert_fig_dict_to_png_bytes(fig_dict): + result = {} + for key, fig in fig_dict.items(): + buf = BytesIO() + fig.savefig(buf, format='png') + buf.seek(0) + result[key] = buf.read() + return result + + + +def process_file_worker(args): + file_path, file_name, stim_duration, config, gui, progress_queue = args + try: + set_config(config, gui) + + def progress_callback(step_idx): + print(f"[Worker] Step {step_idx} for {file_name}") + if progress_queue: + progress_queue.put(('progress', file_name, step_idx)) + + result = individual_GLM_analysis( + file_path, file_name, stim_duration, + progress_callback=progress_callback + ) + return file_name, result, None + except Exception as e: + return file_name, None, e + + + +def process_folder(folder_path: str, stim_duration: float, files_remaining: dict[str, int], config , gui: bool = False, progress_queue=None) -> tuple[dict[str, dict[str, BaseRaw]], DataFrame, DataFrame, DataFrame, DataFrame, dict[str, list[Figure]], dict[str, str]]: + df_roi = DataFrame() + df_cha = DataFrame() + df_con = DataFrame() + df_design_matrix = DataFrame() + + raw_haemo_dict: dict[str, dict[str, BaseRaw]] = {} + process_dict: dict[str, str] = {} + figures_by_step: dict[str, list[Figure]] = { + step: [] for step in [ + 'Raw', 'TDDR', 'Wavelet', 'HeartRate_PSD', 'HeartRate_Time', + 'SCI1', 'SCI2', 'PSP1', 'PSP2', 'SNR', 'Bads', + 'OpticalDensity', 'HaemoglobinConcentration', 'SignalEnhancement', 'DesignMatrix' + ] + } + + file_args = [ + (os.path.join(folder_path, file_name), file_name, stim_duration, config, gui, progress_queue) + for file_name in os.listdir(folder_path) + if os.path.isfile(os.path.join(folder_path, file_name)) + ] + + print("[process_folder] File args:", file_args) + + available_mem = psutil.virtual_memory().available + if (MAX_WORKERS >= available_mem / (1024 ** 3)): + print(f"WARNING: You have set MAX_WORKERS to {MAX_WORKERS}. Each worker should have at least 1GB of system memory. Your device currently has a total of {available_mem / (1024 ** 3):.2f}GB free.\nPlease consider lowering MAX_WORKERS to prevent potential crashing due to insufficient system memory.") + + with ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor: + future_to_file = { + executor.submit(process_file_worker, args): args[1] for args in file_args + } + + with tqdm(total=len(file_args), desc="Processing files") as pbar: + for future in as_completed(future_to_file): + file_name = future_to_file[future] + files_remaining['count'] -= 1 + logger.info(f"Files remaining: {files_remaining['count']}") + pbar.update(1) + + try: + file_name, result, error = future.result() + if error: + logger.info(f"Error processing {file_name}: {error}") + continue + + raw_haemo_filtered, raw_haemo_full, roi, channel, contrast, design_matrix, fig_dict, process, finished, valid = result + + if finished and valid: + logger.info(f"Finished processing {file_name}. This participant was valid.") + raw_haemo_dict[file_name] = { + "filtered": raw_haemo_filtered, + "full_layout": raw_haemo_full + } + process_dict[file_name] = process + + for step in figures_by_step: + if step in fig_dict: + figures_by_step[step].append(fig_dict[step]) + + df_roi = pd.concat([df_roi, roi], ignore_index=True) + df_cha = pd.concat([df_cha, channel], ignore_index=True) + df_con = pd.concat([df_con, contrast], ignore_index=True) + df_design_matrix = pd.concat([df_design_matrix, design_matrix], ignore_index=True) + + else: + logger.info(f"Finished processing {file_name}. This participant was NOT valid.") + if SEE_BAD_IMAGES: + for step in figures_by_step: + if step in fig_dict: + figures_by_step[step].append(fig_dict[step]) + except Exception as e: + logger.info(f"Unexpected error processing {file_name}: {e}") + raise + + return raw_haemo_dict, df_roi, df_cha, df_con, df_design_matrix, figures_by_step, process_dict + + + +def verify_channel_positions(data: BaseRaw) -> None: + """ + Visualizes the sensor/channel positions of the raw data for verification. + + Parameters + ---------- + data : BaseRaw + The loaded data object to process. + """ + logger.info("Creating the figure...") + data.plot_sensors(show_names=True, to_sphere=True, show=False, verbose=VERBOSITY) # type: ignore + plt.show() # type: ignore + + + +def plot_3d_evoked_array( + inst: Union[BaseRaw, EvokedArray, Info], + statsmodel_df: DataFrame, + picks: Optional[Union[str, list[str]]] = "hbo", + value: str = "Coef.", + background: str = "w", + figure: Optional[object] = None, + clim: Union[str, dict[str, Union[str, list[float]]]] = "auto", + mode: str = "weighted", + colormap: str = "RdBu_r", + surface: str = "pial", + hemi: str = "both", + size: int = 800, + view: Optional[Union[str, dict[str, float]]] = None, + colorbar: bool = True, + distance: float = 0.03, + subjects_dir: Optional[str] = None, + src: Optional[SourceSpaces] = None, + verbose: bool = False, +) -> Brain: + '''Ported from MNE''' + + info: Info = cast(Info, deepcopy(inst if isinstance(inst, Info) else inst.info)) # type: ignore + if not (getattr(info, "ch_names") == list(statsmodel_df["ch_name"].values)): # type: ignore + raise RuntimeError( + 'MNE data structure does not match dataframe ' + f'results.\nMNE = {getattr(info, "ch_names")}.\n' + f'GLM = {list(statsmodel_df["ch_name"].values)}' # type: ignore + ) + + ea = EvokedArray(np.tile(statsmodel_df[value].values.T, (1, 1)).T, info.copy()) # type: ignore + + # TODO: mimic behaviour of other MNE-NIRS glm plotting options + if picks is not None: + ea = ea.pick(picks=picks) # type: ignore + + if subjects_dir is None: + subjects_dir = os.environ["SUBJECTS_DIR"] + if src is None: + fname_src_fs = os.path.join( + subjects_dir, "fsaverage", "bem", "fsaverage-ico-5-src.fif" + ) + src = read_source_spaces(fname_src_fs, verbose=verbose) + + picks = getattr(ea, "info")["ch_names"] + + # Set coord frame + for idx in range(len(getattr(ea, "ch_names"))): + getattr(ea, "info")["chs"][idx]["coord_frame"] = 4 + + # Generate source estimate + kwargs = dict( + evoked=ea, + subject="fsaverage", + trans=Transform('head', 'mri', np.eye(4)), + distance=distance, + mode=mode, + surface=surface, + subjects_dir=subjects_dir, + src=src, + project=True, + ) + + stc = stc_near_sensors(picks=picks, **kwargs, verbose=verbose) # type: ignore + + + from mne import SourceEstimate + assert isinstance(stc, SourceEstimate) # or your specific subclass + + # Produce brain plot + brain: Brain = stc.plot( # type: ignore + src=src, + subjects_dir=subjects_dir, + hemi=hemi, + surface=surface, + initial_time=0, + clim=clim, # type: ignore + size=size, + colormap=colormap, + figure=figure, + background=background, + colorbar=colorbar, + verbose=verbose, + ) + if view is not None: + brain.show_view(view) # type: ignore + + return brain + + + +def brain_3d_visualization(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], all_haemo: dict[str, dict[str, BaseRaw]], participant_number: int, t_or_theta: Literal['t', 'theta'] = 'theta', show_optodes: Literal['sensors', 'labels', 'none', 'all'] = 'all', show_text: bool = True) -> None: + + + # Determine if we are visualizing t or theta to set the appropriate limit + if t_or_theta == 't': + clim = dict(kind="value", pos_lims=(0, ABS_T_VALUE/2, ABS_T_VALUE)) + elif t_or_theta == 'theta': + clim = dict(kind="value", pos_lims=(0, ABS_THETA_VALUE/2, ABS_THETA_VALUE)) + + + # Loop over all groups + for index, group_name in enumerate(all_results): + + # We only care for their channel results + (_, df_cha, _, _) = all_results[group_name] + + # Get all activity conditions + for cond in [TARGET_ACTIVITY]: + + if HRF_MODEL == 'fir': + ch_summary = df_cha.query(f"Condition.str.startswith('{cond}_delay_') and Chroma == 'hbo'", engine='python') # type: ignore + + else: + # Filter for the condition and chromophore + ch_summary = df_cha.query("Condition in [@cond] and Chroma == 'hbo'") # type: ignore + + # Determine number of unique participants based on their ID + n_participants = ch_summary["ID"].nunique() + + # WE JUST NEED SOMEONES OPTODE DATA TO PLOT ON THE BRAIN! + # TODO: This should take the average positions of all participants + # We will just take the passed through parameter + participant_to_plot = ch_summary["ID"].unique()[participant_number] # type: ignore + participant_raw_full: BaseRaw = all_haemo[participant_to_plot]["full_layout"] + + # Use ordinary least squares (OLS) if only one participant + if n_participants == 1: + + # t values + if t_or_theta == 't': + ch_model = smf.ols("t ~ -1 + ch_name", ch_summary).fit() # type: ignore + + # theta values + elif t_or_theta == 'theta': + ch_model = smf.ols("theta ~ -1 + ch_name", ch_summary).fit() # type: ignore + + logger.info("OLS model is being used as there is only one participant!") + + # Use mixed effects model if there is multiple participants + else: + + # t values + if t_or_theta == 't': + ch_model = smf.mixedlm("t ~ -1 + ch_name", ch_summary, groups=ch_summary["ID"]).fit(method="nm") # type: ignore + + # theta values + elif t_or_theta == 'theta': + ch_model = smf.mixedlm("theta ~ -1 + ch_name", ch_summary, groups=ch_summary["ID"]).fit(method="nm") # type: ignore + + # Convert model results + model_df = cast(DataFrame, statsmodels_to_results(ch_model, order=ch_summary["ch_name"].unique())) # type: ignore + + valid_channels = ch_summary["ch_name"].unique().tolist() # type: ignore + raw_for_plot = participant_raw_full.copy().pick(picks=valid_channels) # type: ignore + + + brain = plot_3d_evoked_array(raw_for_plot.pick(picks="hbo"), model_df, view="dorsal", distance=BRAIN_DISTANCE, colorbar=True, clim=clim, mode=BRAIN_MODE, size=(800, 700)) # type: ignore + + if show_optodes == 'all' or show_optodes == 'sensors': + brain.add_sensors(getattr(raw_for_plot, "info"), trans=Transform('head', 'mri', np.eye(4)), fnirs=["channels", "pairs", "sources", "detectors"], verbose=VERBOSITY) # type: ignore + + + # Read and parse the file + if show_optodes == 'all' or show_optodes == 'labels': + positions: list[tuple[str, list[float]]] = [] + with open(OPTODE_FILE_PATH, 'r') as f: + for line in f: + line = line.strip() + if not line or ':' not in line: + continue # skip empty/malformed lines + name, coords = line.split(':', 1) + coords = [float(x) for x in coords.strip().split()] + positions.append((name.strip(), coords)) + + for name, (x, y, z) in positions: + brain._renderer.text3d(x, y, z, name, color=('red' if name.startswith('s') else 'blue' if name.startswith('d') else 'gray'), scale=0.002) # type: ignore + + # Set the display text for the brain image + # display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nGroup: ' + group_name + '\nCondition: '+ cond + '\nReject Criteria Threshold: ' + str(EPOCH_REJECT_CRITERIA_THRESH) + '\nMin Time Threshold: ' + # + str(TIME_MIN_THRESH) + 's\nMax Time Threshold: ' + str(TIME_MAX_THRESH) + 's\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + '\nStim Duration: ' + # + str(STIM_DURATION[index]) + 's\nLooking at: ' + t_or_theta + ' values') + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + if HRF_MODEL == 'fir': + display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nGroup: ' + group_name + '\nCondition: '+ cond + '\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + + '\nLooking at: ' + t_or_theta + ' values') + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + else: + display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nGroup: ' + group_name + '\nCondition: '+ cond + '\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + '\nStim Duration: ' + + str(STIM_DURATION[index]) + '\nLooking at: ' + t_or_theta + ' values') + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + + # Apply the text onto the brain + if show_text: + brain.add_text(0.12, 0.64, display_text, "title", font_size=11, color="k") # type: ignore + + + +def plot_fir_model_results(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], all_haemo: dict[str, dict[str, BaseRaw]], participant_number: int, t_or_theta: Literal['t', 'theta'] = 'theta') -> None: + + + if HRF_MODEL != 'fir': + logger.info("This method only works when HRF_MODEL is set to 'fir'.") + + else: + for group_name in all_results: + + (df_roi, _, _, df_design_matrix) = all_results[group_name] + first_id = df_design_matrix["ID"].unique()[participant_number] # type: ignore + first_dm = df_design_matrix.query(f"ID == '{first_id}'").copy() # type: ignore + first_dm.index = np.round([0.1 * i for i in range(len(first_dm))], decimals=1) # type: ignore + df_design_matrix = first_dm + + participant = all_haemo[first_id]["full_layout"] + + df_roi["isActivity"] = [TARGET_ACTIVITY in n for n in df_roi["Condition"]] # type: ignore + df_roi["isDelay"] = ["delay" in n for n in df_roi["Condition"]] # type: ignore + df_roi = df_roi.query("isDelay in [True]") # type: ignore + df_roi = df_roi.query("isActivity in [True]") # type: ignore + + + df_roi.loc[:, "TidyCond"] = "" + df_roi.loc[df_roi["isActivity"] == True, "TidyCond"] = TARGET_ACTIVITY # noqa: E712 + # Finally, extract the FIR delay in to its own column in data frame + df_roi.loc[:, "delay"] = [n.split("_")[-1] for n in df_roi.Condition] # type: ignore + + if t_or_theta == 'theta': + lme = smf.mixedlm("theta ~ -1 + delay:TidyCond:Chroma", df_roi, groups=df_roi["ID"]).fit() # type: ignore + elif t_or_theta == 't': + lme = smf.mixedlm("t ~ -1 + delay:TidyCond:Chroma", df_roi, groups=df_roi["ID"]).fit() # type: ignore + + df_sum: DataFrame = statsmodels_to_results(lme) # type: ignore + + + df_sum["delay"] = [int(n) for n in df_sum["delay"]] # type: ignore + df_sum = df_sum.sort_values("delay") # type: ignore + + # logger.info the result for the oxyhaemoglobin data in the Reach condition + df_sum.query(f"TidyCond in ['{TARGET_ACTIVITY}']").query("Chroma in ['hbo']") # type: ignore + + axes1: list[Axes] + fig, axes1 = plt.subplots(nrows=1, ncols=3, figsize=(20, 10)) # type: ignore + + # Extract design matrix columns that correspond to the condition of interest + dm_cond_idxs = np.where([TARGET_ACTIVITY in n for n in df_design_matrix.columns])[0] + dm_cond_colnames: list[str] = [df_design_matrix.columns[i] for i in dm_cond_idxs] + dm_cond = df_design_matrix[dm_cond_colnames] + + # 2. Extract hbo GLM estimates + df_hbo = df_sum.query(f"TidyCond in ['{TARGET_ACTIVITY}']").query("Chroma in ['hbo']") # type: ignore + vals_hbo = [float(v) for v in df_hbo["Coef."]] # type: ignore + + dm_cond_scaled_hbo = dm_cond * vals_hbo + + # 3. Extract hbr GLM estimates + df_hbr = df_sum.query(f"TidyCond in ['{TARGET_ACTIVITY}']").query("Chroma in ['hbr']") # type: ignore + vals_hbr = [float(v) for v in df_hbr["Coef."]] # type: ignore + + dm_cond_scaled_hbr = dm_cond * vals_hbr + + # Extract the time scale for plotting. + # Set time zero to be the onset. + index_values = cast(NDArray[float64], dm_cond_scaled_hbo.index.to_numpy(dtype=float) - participant.annotations.onset[1]) # type: ignore + + # Plot the result + axes1[0].plot(index_values, np.asarray(dm_cond)) # type: ignore + axes1[1].plot(index_values, np.asarray(dm_cond_scaled_hbo)) # type: ignore + axes1[2].plot(index_values, np.sum(dm_cond_scaled_hbo, axis=1), "r") # type: ignore + axes1[2].plot(index_values, np.sum(dm_cond_scaled_hbr, axis=1), "b") # type: ignore + + + valid_mask = (index_values >= 0) & (index_values <= 15) + hbo_sum_window = np.sum(dm_cond_scaled_hbo.loc[valid_mask, :], axis=1) + peak_idx_in_window = np.argmax(hbo_sum_window) + peak_idx = np.where(valid_mask)[0][peak_idx_in_window] + peak_time = float(round(index_values[peak_idx], 2)) # type: ignore + + axes1[2].axvline(x=peak_time, color='k', linestyle='--', linewidth=1.5, label='Peak time') # type: ignore + + # Format the plot + for ax in range(3): + axes1[ax].set_xlim(-5, 20) + axes1[ax].set_xlabel("Time (s)") # type: ignore + axes1[0].set_ylim(-0.2, 1.2) + axes1[1].set_ylim(-4, 8) + axes1[2].set_ylim(-4, 8) + axes1[0].set_title(f"FIR Model for {group_name} (Unscaled by GLM {TARGET_ACTIVITY} estimates) ({t_or_theta})") # type: ignore + axes1[1].set_title(f"FIR Components for {group_name} (Scaled by GLM {TARGET_ACTIVITY} estimates) ({t_or_theta})") # type: ignore + axes1[2].set_title(f"Evoked Response for {group_name} ({TARGET_ACTIVITY}) ({t_or_theta})") # type: ignore + axes1[0].set_ylabel("FIR Model") # type: ignore + axes1[1].set_ylabel("Oyxhaemoglobin (ΔμMol)") # type: ignore + axes1[2].set_ylabel("Haemoglobin (ΔμMol)") # type: ignore + axes1[2].legend(["Oyxhaemoglobin", "Deoyxhaemoglobin", f"Peak {peak_time}s"]) # type: ignore + + fig.tight_layout() + + + # We can also extract the 95% confidence intervals of the estimates too + l95_hbo = [float(v) for v in df_hbo["[0.025"]] # type: ignore + u95_hbo = [float(v) for v in df_hbo["0.975]"]] # type: ignore + dm_cond_scaled_hbo_l95 = dm_cond * l95_hbo + dm_cond_scaled_hbo_u95 = dm_cond * u95_hbo + l95_hbr = [float(v) for v in df_hbr["[0.025"]] # type: ignore + u95_hbr = [float(v) for v in df_hbr["0.975]"]] # type: ignore + dm_cond_scaled_hbr_l95 = dm_cond * l95_hbr + dm_cond_scaled_hbr_u95 = dm_cond * u95_hbr + + axes2: Axes + fig, axes2 = plt.subplots(nrows=1, ncols=1, figsize=(7, 7)) # type: ignore + + # Plot the result + axes2.plot(index_values, np.sum(dm_cond_scaled_hbo, axis=1), "r") # type: ignore + axes2.plot(index_values, np.sum(dm_cond_scaled_hbr, axis=1), "b") # type: ignore + axes2.axvline(x=peak_time, color='k', linestyle='--', linewidth=1.5, label='Peak time') # type: ignore + + axes2.fill_between( # type: ignore + index_values, + np.asarray(np.sum(dm_cond_scaled_hbo_l95, axis=1)), + np.asarray(np.sum(dm_cond_scaled_hbo_u95, axis=1)), + facecolor="red", + alpha=0.25, + ) + axes2.fill_between( # type: ignore + index_values, + np.asarray(np.sum(dm_cond_scaled_hbr_l95, axis=1)), + np.asarray(np.sum(dm_cond_scaled_hbr_u95, axis=1)), + facecolor="blue", + alpha=0.25, + ) + + # Format the plot + axes2.set_xlim(-5, 20) + axes2.set_ylim(-8, 12) + axes2.set_title(f"Evoked Response with 95% confidence intervals for {group_name} ({TARGET_ACTIVITY}) ({t_or_theta})") # type: ignore + axes2.set_ylabel("Haemoglobin (ΔμMol)") # type: ignore + axes2.legend(["Oyxhaemoglobin", "Deoyxhaemoglobin", f"Peak {peak_time}s"]) # type: ignore + axes2.set_xlabel("Time (s)") # type: ignore + + fig.tight_layout() + + plt.show() # type: ignore + + + +def plot_2d_theta_graph(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]]) -> None: + '''This method will create a 2d boxplot showing the theta values for each channel and group as independent ranges on the same graph.\n + Inputs:\n + all_results (dict) - Contains the df_roi, df_cha, and df_con for each group\n + ''' + + + # Create a list to store the channel results of all groups + df_all_cha_list: list[DataFrame] = [] + + # Iterate over each group in all_results + for group_name, (_, df_cha, _, _) in all_results.items(): + df_cha["group"] = group_name # Add the group name to the data + df_all_cha_list.append(df_cha) # Append the dataframe to the list + + # Combine all the data into a single DataFrame + df_all_cha = pd.concat(df_all_cha_list, ignore_index=True) + + # Filter for the target activity + if HRF_MODEL == 'fir': + df_target = df_all_cha[df_all_cha['Condition'].str.startswith(f"{TARGET_ACTIVITY}_delay_")] # type: ignore + else: + df_target = df_all_cha[df_all_cha["Condition"] == TARGET_ACTIVITY] + + # Get the number of unique groups to know how many colors are needed for the boxplot + unique_groups = df_target["group"].nunique() + palette = sns.color_palette("Set2", unique_groups) + + # Create the boxplot + fig = plt.figure(figsize=(15, 6)) # type: ignore + sns.boxplot( + data=df_target, + x="ch_name", + y="theta", + hue="group", + palette=palette + ) + + # Format the boxplot + plt.title("Theta Coefficients by Channel and Group") # type: ignore + plt.xticks(rotation=90) # type: ignore + plt.ylabel("Theta (µM)") # type: ignore + plt.xlabel("Channel") # type: ignore + plt.legend(title="Group") # type: ignore + plt.tight_layout() + plt.show() # type: ignore + + + +def plot_individual_theta_averages(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]]) -> None: + + if HRF_MODEL == 'fir': + logger.info("This method does not work when HRF_MODEL is set to 'fir'.") + return + else: + # Iterate over all the groups + for group_name in all_results: + + # Store the region of interest data + (df_roi, _, _, _) = all_results[group_name] + + # Filter the results down to what we want + grp_results = df_roi.query(f"Condition in ['{TARGET_ACTIVITY}', '{TARGET_CONTROL}']").copy() # type: ignore + grp_results = grp_results.query("Chroma in ['hbo']").copy() # type: ignore + + # Rename the ROI's to be the friendly name + roi_label_map = { + "group_1_picks": ROI_GROUP_1_NAME, + "group_2_picks": ROI_GROUP_2_NAME, + } + grp_results["ROI"] = grp_results["ROI"].replace(roi_label_map) # type: ignore + + # Create the catplot + sns.catplot( + x="Condition", + y="theta", + col="ID", + hue="ROI", + data=grp_results, + col_wrap=5, + errorbar=None, + palette="muted", + height=4, + s=10, + dodge=False, + ) + + plt.show() # type: ignore + + + +def plot_group_theta_averages(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]]) -> None: + '''This method will create a stripplot showing the theta vaules for each region of interest for each group.\n + Inputs:\n + all_results (dict) - Contains the df_roi, df_cha, and df_con for each group\n''' + + if HRF_MODEL == 'fir': + logger.info("This method does not work when HRF_MODEL is set to 'fir'.") + return + else: + # Rename the ROI's to be the friendly name + roi_label_map = { + "group_1_picks": ROI_GROUP_1_NAME, + "group_2_picks": ROI_GROUP_2_NAME, + } + + # Setup subplot grid + n = len(all_results) + ncols = 2 + nrows = (n + 1) // ncols # round up + fig, axes = cast(tuple[Figure, np.ndarray[Any, Any]], plt.subplots(nrows=nrows, ncols=ncols, figsize=(12, 5 * nrows), squeeze=False)) # type: ignore + + index = -1 + # Iterate over all groups + for index, (group_name, ax) in enumerate(zip(all_results, axes.flatten())): + + # Store the region of interest data + (df_roi, _, _, _) = all_results[group_name] + + # Filter the results down to what we want + grp_results = df_roi.query(f"Condition in ['{TARGET_ACTIVITY}', '{TARGET_CONTROL}']").copy() # type: ignore + + # Run a mixedlm model on the data + roi_model = smf.mixedlm("theta ~ -1 + ROI:Condition:Chroma", grp_results, groups=grp_results["ID"]).fit(method="nm") # type: ignore + + # Apply the new friendly names on to the data + df = cast(DataFrame, statsmodels_to_results(roi_model)) + df["ROI"] = df["ROI"].map(roi_label_map) # type: ignore + + # Create a stripplot: + sns.stripplot( + x="Condition", + y="Coef.", + hue="ROI", + data=df.query("Chroma == 'hbo'"), # type: ignore + dodge=False, + jitter=False, + size=5, + palette="muted", + ax=ax, + ) + + # Format the stripplot + ax.set_title(f"Results for {group_name}") + ax.legend(title="ROI", loc="upper right") + + if index == -1: + # No groups, so remove all axes + for ax in axes.flatten(): + fig.delaxes(ax) + # Remove any unused axes and apply final touches + else: + for j in range(index + 1, len(axes.flatten())): + fig.delaxes(axes.flatten()[j]) + + fig.tight_layout() + fig.suptitle("Theta Averages Across Groups", fontsize=16, y=1.02) # type: ignore + plt.show() # type: ignore + + + +def compute_p_group_stats(df_cha: DataFrame, bad_pairs: set[tuple[int, int]], t_or_theta: Literal['t', 'theta'] = 't') -> DataFrame: + + if HRF_MODEL == 'fir': + # Filter: All delays for the target activity + df_activity = df_cha[df_cha['Condition'].str.startswith(f"{TARGET_ACTIVITY}_delay_") & (df_cha['Chroma'] == 'hbo')] # type: ignore + + # Aggregate across FIR delays *per subject* for each channel + df_agg = (df_activity.groupby(['Source', 'Detector', 'ID'])[['t', 'theta']].mean().reset_index()) # type: ignore + + else: + # Canonical HRF case + df_agg = df_cha[(df_cha['Condition'] == TARGET_ACTIVITY) & (df_cha['Chroma'] == 'hbo')].copy() + + # Filter the channel data down to what we want + grouped = cast(Iterator[tuple[tuple[int, int], Any]], df_agg.groupby(['Source', 'Detector'])) # type: ignore + + # Create an empty list to store the data for our result + data: list[dict[str, Any]] = [] + + # Iterate over the filtered channel data + for (src, det), group in grouped: + + # If it is a bad channel pairing, do not process it + if (src, det) in bad_pairs: + logger.info(f"Skipping bad channel Source {src} - Detector {det}") + continue + + # Drop any missing values that could exist + t_values = group['t'].dropna().values + t_values = np.array(t_values, dtype=float) + theta_values = group['theta'].dropna().values + theta_values = np.array(theta_values, dtype=float) + + # Ensure that we still have our two t values, otherwise do not process this pairing + # TODO: is the t values throwing a warning good enough? + if len(t_values) < 2: + logger.info(f"Skipping Source {src} - Detector {det}: not enough data (n={len(t_values)})") + continue + + + # NOTE: This is still calculated with t values as it is a t-test + # Perform one-sample t-test on t-values across subjects + shitte, pval = ttest_1samp(t_values, popmean=0) + + print(shitte) + + # Store all of the data for this ttest using the mean t-value for visualization + if t_or_theta == 't': + data.append({ + 'Source': src, + 'Detector': det, + 't_or_theta': np.mean(t_values), + 'p_value': pval + }) + + else: + data.append({ + 'Source': src, + 'Detector': det, + 't_or_theta': np.mean(theta_values), + 'p_value': pval + }) + + # Create a DataFrame with the data and ensure it is not empty + result = DataFrame(data) + if result.empty: + logger.info("No valid channel pairs with enough data for group-level testing.") + + return result + + + +def get_bad_src_det_pairs(raw: BaseRaw) -> set[tuple[int, int]]: + '''This method figures out the bad source and detector pairings for the 2d t+p graph to prevent them from being plotted. + Inputs:\n + raw (RawSNIRF) - Contains all the snirf data for the last participant processed. Only used to get the channels\n + Outputs:\n + bad_pairs (set) - Set containing all of the bad pairs of sources and detectors''' + + # Create a set to store the bad pairs + bad_pairs: set[tuple[int, int]] = set() + + # Iterate over all the channels in bads key + for ch_name in getattr(raw, "info")["bads"]: + try: + # Get all characters before the space + parts = ch_name.split()[0] + + # Split with the separator + src_str, det_str = parts.split(SOURCE_DETECTOR_SEPARATOR) + src = int(src_str[1:]) + det = int(det_str[1:]) + + # Add to the set + bad_pairs.add((src, det)) + + except Exception as e: + logger.info(f"Could not parse bad channel '{ch_name}': {e}") + + return bad_pairs + + + +def plot_avg_significant_activity(raw: BaseRaw, all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], t_or_theta: Literal['t', 'theta'] = 't') -> None: + '''This method plots the average t values for the groups on a 2D graph. p values less than or equal to P_THRESHOLD are solid lines, while greater p values are dashed lines.\n + Inputs:\n + raw (RawSNIRF) - Contains all the snirf data for the last participant processed. Only used to get the channel locations.\n + all_results (dict) - Contains the df_roi, df_cha, and df_con for each group\n''' + + # Iterate over all the groups + for group_name in all_results: + (_, df_cha, _, _) = all_results[group_name] + + if HRF_MODEL == 'fir': + mask = df_cha['Condition'].str.startswith(f"{TARGET_ACTIVITY}_delay_") & (df_cha['Chroma'] == 'hbo') # type: ignore + filtered_df = df_cha[mask] + num_tests = filtered_df.groupby(['Source', 'Detector']).ngroups # type: ignore + else: + num_tests = len(cast(Iterator[tuple[tuple[int, int], Any]], df_cha.query(f"Condition == '{TARGET_ACTIVITY}' and Chroma == 'hbo'").groupby(['Source', 'Detector']))) # type: ignore + + logger.info(f"Number of tests: {num_tests}") + + # Compute average t-value across individuals for each channel pairing + bad_pairs = get_bad_src_det_pairs(raw) + avg_df = compute_p_group_stats(df_cha, bad_pairs, t_or_theta) + + logger.info(f"Average {t_or_theta}-values and p-values for {TARGET_ACTIVITY}:") + for _, row in avg_df.iterrows(): # type: ignore + logger.info(f"Source {row['Source']} <-> Detector {row['Detector']}: " + f"Avg {t_or_theta}-value = {row['t_or_theta']:.3f}, Avg p-value = {row['p_value']:.3f}") + + # Extract the cource and detector positions from raw + src_pos: dict[int, tuple[float, float]] = {} + det_pos: dict[int, tuple[float, float]] = {} + for ch in getattr(raw, "info")["chs"]: + ch_name = ch['ch_name'] + if not ch_name or not ch['loc'].any(): + continue + parts = ch_name.split()[0] + src_str, det_str = parts.split(SOURCE_DETECTOR_SEPARATOR) + src_num = int(src_str[1:]) + det_num = int(det_str[1:]) + src_pos[src_num] = ch['loc'][3:5] + det_pos[det_num] = ch['loc'][6:8] + + # Set up the plot + fig, ax = plt.subplots(figsize=(8, 6)) # type: ignore + + # Plot the sources + for pos in src_pos.values(): + ax.scatter(pos[0], pos[1], s=120, c='k', marker='o', edgecolors='white', linewidths=1, zorder=3) # type: ignore + + # Plot the detectors + for pos in det_pos.values(): + ax.scatter(pos[0], pos[1], s=120, c='k', marker='s', edgecolors='white', linewidths=1, zorder=3) # type: ignore + + # Ensure that the colors stay within the boundaries even if they are over or under the max/min values + if t_or_theta == 't': + norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_T_VALUE, vmax=ABS_SIGNIFICANCE_T_VALUE) + elif t_or_theta == 'theta': + norm = mcolors.Normalize(vmin=-ABS_SIGNIFICANCE_THETA_VALUE, vmax=ABS_SIGNIFICANCE_THETA_VALUE) + + cmap: mcolors.Colormap = plt.get_cmap('seismic') + + # Plot connections with avg t-values + for row in avg_df.itertuples(): + src: int = cast(int, row.Source) # type: ignore + det: int = cast(int, row.Detector) # type: ignore + tval: float = cast(float, row.t_or_theta) # type: ignore + pval: float = cast(float, row.p_value) # type: ignore + + + if src in src_pos and det in det_pos: + x = [src_pos[src][0], det_pos[det][0]] + y = [src_pos[src][1], det_pos[det][1]] + style = '-' if pval <= P_THRESHOLD else '--' + ax.plot(x, y, linestyle=style, color=cmap(norm(tval)), linewidth=4, alpha=0.9, zorder=2) # type: ignore + + # Format the Colorbar + sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) + sm.set_array([]) + cbar = plt.colorbar(sm, ax=ax, shrink=0.85) # type: ignore + cbar.set_label(f'Average {TARGET_ACTIVITY} {t_or_theta} value (hbo)', fontsize=11) # type: ignore + + # Formatting the subplots + ax.set_aspect('equal') + ax.set_title(f"Average {t_or_theta} values for {TARGET_ACTIVITY} (HbO) for {group_name}", fontsize=14) # type: ignore + ax.set_xlabel('X position (m)', fontsize=11) # type: ignore + ax.set_ylabel('Y position (m)', fontsize=11) # type: ignore + ax.grid(True, alpha=0.3) # type: ignore + + # Set axis limits to be 1cm more than the optode positions + all_x = [pos[0] for pos in src_pos.values()] + [pos[0] for pos in det_pos.values()] + all_y = [pos[1] for pos in src_pos.values()] + [pos[1] for pos in det_pos.values()] + ax.set_xlim(min(all_x)-0.01, max(all_x)+0.01) + ax.set_ylim(min(all_y)-0.01, max(all_y)+0.01) + + fig.tight_layout() + plt.show() # type: ignore + + + +def generate_montage_locations(): + """Get standard MNI montage locations in dataframe. + + Data is returned in the same format as the eeg_positions library. + """ + # standard_1020 and standard_1005 are in MNI (fsaverage) space already, + # but we need to undo the scaling that head_scale will do + montage = mne.channels.make_standard_montage( + "standard_1005", head_size=0.09700884729534559 + ) + for d in montage.dig: + d["coord_frame"] = 2003 + montage.dig[:] = montage.dig[3:] + montage.add_mni_fiducials() # now in fsaverage space + coords = pd.DataFrame.from_dict(montage.get_positions()["ch_pos"]).T + coords["label"] = coords.index + coords = coords.rename(columns={0: "x", 1: "y", 2: "z"}) + + return coords.reset_index(drop=True) + + +def _find_closest_standard_location(position, reference, *, out="label"): + """Return closest montage label to coordinates. + + Parameters + ---------- + position : array, shape (3,) + Coordinates. + reference : dataframe + As generated by _generate_montage_locations. + trans_pos : str + Apply a transformation to positions to specified frame. + Use None for no transformation. + """ + from scipy.spatial.distance import cdist + + p0 = np.array(position) + p0.shape = (-1, 3) + # head_mri_t, _ = _get_trans("fsaverage", "head", "mri") + # p0 = apply_trans(head_mri_t, p0) + dists = cdist(p0, np.asarray(reference[["x", "y", "z"]], float)) + + if out == "label": + min_idx = np.argmin(dists) + return reference["label"][min_idx] + else: + assert out == "dists" + return dists + + + +def _source_detector_fold_table(raw, cidx, reference, fold_tbl, interpolate): + src = raw.info["chs"][cidx]["loc"][3:6] + det = raw.info["chs"][cidx]["loc"][6:9] + + ref_lab = list(reference["label"]) + dists = _find_closest_standard_location([src, det], reference, out="dists") + src_min, det_min = np.argmin(dists, axis=1) + src_name, det_name = ref_lab[src_min], ref_lab[det_min] + + tbl = fold_tbl.query("Source == @src_name and Detector == @det_name") + dist = np.linalg.norm(dists[[0, 1], [src_min, det_min]]) + # Try reversing source and detector + if len(tbl) == 0: + tbl = fold_tbl.query("Source == @det_name and Detector == @src_name") + if len(tbl) == 0 and interpolate: + # Try something hopefully not too terrible: pick the one with the + # smallest net distance + good = np.isin(fold_tbl["Source"], reference["label"]) & np.isin( + fold_tbl["Detector"], reference["label"] + ) + assert good.any() + tbl = fold_tbl[good] + assert len(tbl) + src_idx = [ref_lab.index(src) for src in tbl["Source"]] + det_idx = [ref_lab.index(det) for det in tbl["Detector"]] + # Original + tot_dist = np.linalg.norm([dists[0, src_idx], dists[1, det_idx]], axis=0) + assert tot_dist.shape == (len(tbl),) + idx = np.argmin(tot_dist) + dist_1 = tot_dist[idx] + src_1, det_1 = ref_lab[src_idx[idx]], ref_lab[det_idx[idx]] + # And the reverse + tot_dist = np.linalg.norm([dists[0, det_idx], dists[1, src_idx]], axis=0) + idx = np.argmin(tot_dist) + dist_2 = tot_dist[idx] + src_2, det_2 = ref_lab[det_idx[idx]], ref_lab[src_idx[idx]] + if dist_1 < dist_2: + new_dist, src_use, det_use = dist_1, src_1, det_1 + else: + new_dist, src_use, det_use = dist_2, det_2, src_2 + + + tbl = fold_tbl.query("Source == @src_use and Detector == @det_use") + tbl = tbl.copy() + tbl["BestSource"] = src_name + tbl["BestDetector"] = det_name + tbl["BestMatchDistance"] = dist + tbl["MatchDistance"] = new_dist + assert len(tbl) + else: + tbl = tbl.copy() + tbl["BestSource"] = src_name + tbl["BestDetector"] = det_name + tbl["BestMatchDistance"] = dist + tbl["MatchDistance"] = dist + + tbl = tbl.copy() # don't get warnings about setting values later + return tbl + +from mne.utils import _check_fname, _validate_type, warn + + +def _read_fold_xls(fname, atlas="Juelich"): + """Read fOLD toolbox xls file. + + The values are then manipulated in to a tidy dataframe. + + Note the xls files are not included as no license is provided. + + Parameters + ---------- + fname : str + Path to xls file. + atlas : str + Requested atlas. + """ + page_reference = {"AAL2": 2, "AICHA": 5, "Brodmann": 8, "Juelich": 11, "Loni": 14} + + tbl = pd.read_excel(fname, sheet_name=page_reference[atlas]) + + # Remove the spacing between rows + empty_rows = np.where(np.isnan(tbl["Specificity"]))[0] + tbl = tbl.drop(empty_rows).reset_index(drop=True) + + # Empty values in the table mean its the same as above + for row_idx in range(1, tbl.shape[0]): + for col_idx, col in enumerate(tbl.columns): + if not isinstance(tbl[col][row_idx], str): + if np.isnan(tbl[col][row_idx]): + tbl.iloc[row_idx, col_idx] = tbl.iloc[row_idx - 1, col_idx] + + tbl["Specificity"] = tbl["Specificity"] * 100 + tbl["brainSens"] = tbl["brainSens"] * 100 + return tbl + +import os.path as op + +def _check_load_fold(fold_files, atlas): + # _validate_type(fold_files, (list, "path-like", None), "fold_files") + if fold_files is None: + fold_files = mne.get_config("MNE_NIRS_FOLD_PATH") + if fold_files is None: + raise ValueError( + "MNE_NIRS_FOLD_PATH not set, either set it using " + "mne.set_config or pass fold_files as str or list" + ) + if not isinstance(fold_files, list): # path-like + fold_files = _check_fname( + fold_files, + overwrite="read", + must_exist=True, + name="fold_files", + need_dir=True, + ) + fold_files = [op.join(fold_files, f"10-{x}.xls") for x in (5, 10)] + + fold_tbl = pd.DataFrame() + for fi, fname in enumerate(fold_files): + fname = _check_fname( + fname, overwrite="read", must_exist=True, name=f"fold_files[{fi}]" + ) + fold_tbl = pd.concat( + [fold_tbl, _read_fold_xls(fname, atlas=atlas)], ignore_index=True + ) + return fold_tbl + + + +def fold_channel_specificity_normal(raw, fold_files=None, atlas="Juelich", interpolate=False): + """Return the landmarks and specificity a channel is sensitive to. + + Parameters + + """ # noqa: E501 + _validate_type(raw, BaseRaw, "raw") + + reference_locations = generate_montage_locations() + + fold_tbl = _check_load_fold(fold_files, atlas) + + chan_spec = list() + for cidx in range(len(raw.ch_names)): + tbl = _source_detector_fold_table( + raw, cidx, reference_locations, fold_tbl, interpolate + ) + chan_spec.append(tbl.reset_index(drop=True)) + + return chan_spec + + + +def fold_channels(raw: BaseRaw, all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], fold_path: str) -> None: + + + # Locate the fOLD excel files + mne.set_config('MNE_NIRS_FOLD_PATH', fold_path) # type: ignore + + # Iterate over all of the groups + for group_name in all_results: + + output = None + + # List to store the results + landmark_specificity_data: list[dict[str, Any]] = [] + + # Filter the data to only what we want + hbo_channel_names = cast(list[str], getattr(raw.copy().pick(picks='hbo'), "ch_names")) # type: ignore + + # Format the output to make it slightly easier to read + logger.info("*" * 60) + logger.info(f'Landmark Specificity for {group_name}:') + logger.info("*" * 60) + + if GUI: + + num_channels = len(hbo_channel_names) + rows, cols = 4, 7 # 6 rows and 4 columns of pie charts + fig, axes = plt.subplots(rows, cols, figsize=(16, 10), constrained_layout=True) + axes = axes.flatten() # Flatten the axes array for easier indexing + + # If more pie charts than subplots, create extra subplots + if num_channels > rows * cols: + fig, axes = plt.subplots((num_channels // cols) + 1, cols, figsize=(16, 10), constrained_layout=True) + axes = axes.flatten() + + # Create a list for consistent color mapping + landmarks = [ + "1 - Primary Somatosensory Cortex", + "2 - Primary Somatosensory Cortex", + "3 - Primary Somatosensory Cortex", + "4 - Primary Motor Cortex", + "5 - Somatosensory Association Cortex", + "6 - Pre-Motor and Supplementary Motor Cortex", + "7 - Somatosensory Association Cortex", + "8 - Includes Frontal eye fields", + "9 - Dorsolateral prefrontal cortex", + "10 - Frontopolar area", + "11 - Orbitofrontal area", + "17 - Primary Visual Cortex (V1)", + "18 - Visual Association Cortex (V2)", + "19 - V3", + "20 - Inferior Temporal gyrus", + "21 - Middle Temporal gyrus", + "22 - Superior Temporal Gyrus", + "23 - Ventral Posterior cingulate cortex", + "24 - Ventral Anterior cingulate cortex", + "25 - Subgenual cortex", + "32 - Dorsal anterior cingulate cortex", + "37 - Fusiform gyrus", + "38 - Temporopolar area", + "39 - Angular gyrus, part of Wernicke's area", + "40 - Supramarginal gyrus part of Wernicke's area", + "41 - Primary and Auditory Association Cortex", + "42 - Primary and Auditory Association Cortex", + "43 - Subcentral area", + "44 - pars opercularis, part of Broca's area", + "45 - pars triangularis Broca's area", + "46 - Dorsolateral prefrontal cortex", + "47 - Inferior prefrontal gyrus", + "48 - Retrosubicular area", + "Brain_Outside", + ] + + cmap1 = plt.cm.get_cmap('tab20') # First 20 colors + cmap2 = plt.cm.get_cmap('tab20b') # Next 20 colors + + # Combine the colors from both colormaps + colors = [cmap1(i) for i in range(20)] + [cmap2(i) for i in range(20)] # Total 40 colors + + landmarks.sort(key=lambda x: (int(x.split(" - ")[0]) if x.split(" - ")[0].isdigit() else float('inf'))) + + landmark_color_map = {landmark: colors[i % len(colors)] for i, landmark in enumerate(landmarks)} + + # Iterate over each channel + for idx, channel_name in enumerate(hbo_channel_names): + + # Run the fOLD on the selected channel + channel_data = raw.copy().pick(picks=channel_name) # type: ignore + + output = cast(list[DataFrame], fold_channel_specificity_normal(channel_data, interpolate=True, atlas='Brodmann')) + + # Process each DataFrame that fold_channel_specificity returns + for df_data in output: + + # Extract the relevant columns + useful_data = df_data[['Landmark', 'Specificity']] + + # Store the results + landmark_specificity_data.append({ + 'Channel': channel_name, + 'Data': useful_data, + }) + + # logger.info the results + for data in landmark_specificity_data: + logger.info(f"Channel: {data['Channel']}") + logger.info(f"{data['Data']}") + logger.info("-" * 60) + + + # If PLOT_ENABLED is True, plot the results + if GUI: + unique_landmarks = sorted(useful_data['Landmark'].unique()) + color_list = [landmark_color_map[landmark] for landmark in useful_data['Landmark']] + + # Plot specificity for each channel + ax = axes[idx] # Use the correct axis for this channel + + labels = [f'{landmark.split(" - ")[0]}' if landmark != 'Brain_Outside' else 'B' for landmark in useful_data['Landmark']] + + wedges, texts, autotexts = ax.pie( + useful_data['Specificity'], + autopct='%1.1f%%', + startangle=90, + labels=labels, # Add the labels here + labeldistance=1.05, # Adjust label position to avoid overlap with the wedges + colors=color_list) # Ensure color consistency + + ax.set_title(f'{channel_name}') + ax.axis('equal') # Equal aspect ratio ensures the pie chart is circular. + + + # Reset the list for the next particcipant + landmark_specificity_data = [] + + if GUI: + handles = [ + plt.Line2D([0], [0], marker='o', color='w', label=landmark, markersize=10, + markerfacecolor=landmark_color_map[landmark]) + for landmark in landmarks + ] + n_landmarks = len(landmarks) + + # Calculate the figure size based on number of rows and columns + fig_width = 5 + fig_height = n_landmarks / 4 + + # Create a new figure window for the legend + legend_fig = plt.figure(figsize=(fig_width, fig_height)) + legend_axes = legend_fig.add_subplot(111) + legend_axes.axis('off') # Turn off axis for the legend window + legend_axes.legend(handles=handles, loc='center', fontsize=10, title="Landmarks") + + if GUI: + for ax in axes[len(hbo_channel_names):]: + ax.axis('off') + plt.show() + + + + +def brain_landmarks_3d(raw_haemo: BaseRaw, show_optodes: Literal['sensors', 'labels', 'none', 'all'] = 'all') -> None: + + brain = Brain("fsaverage", background="white", size=(800, 700)) # type: ignore + + # Add optode text labels manually + if show_optodes == 'all' or show_optodes == 'sensors': + brain.add_sensors(getattr(raw_haemo, "info"), trans=Transform('head', 'mri', np.eye(4)), fnirs=["channels", "pairs", "sources", "detectors"], verbose=VERBOSITY) # type: ignore + + # Read and parse the file + if show_optodes == 'all' or show_optodes == 'labels': + positions: list[tuple[str, list[float]]] = [] + with open(OPTODE_FILE_PATH, 'r') as f: + for line in f: + line = line.strip() + if not line or ':' not in line: + continue # skip empty/malformed lines + name, coords = line.split(':', 1) + coords = [float(x) for x in coords.strip().split()] + positions.append((name.strip(), coords)) + + for name, (x, y, z) in positions: + brain._renderer.text3d(x, y, z, name, color=('red' if name.startswith('s') else 'blue' if name.startswith('d') else 'gray'), scale=0.002) # type: ignore + + for ch in getattr(raw_haemo, "info")['chs']: + logger.info(ch['ch_name'], ch['loc'][:3]) + + # Add Brodmann labels + labels = cast(list[mne.Label], mne.read_labels_from_annot("fsaverage", "PALS_B12_Brodmann", "rh", verbose=VERBOSITY)) # type: ignore + + label_colors = { + "Brodmann.39-rh": "blue", + "Brodmann.40-rh": "green", + "Brodmann.6-rh": "pink", + "Brodmann.7-rh": "orange", + "Brodmann.17-rh": "red", + "Brodmann.1-rh": "yellow", + "Brodmann.2-rh": "yellow", + "Brodmann.3-rh": "yellow", + "Brodmann.18-rh": "red", + "Brodmann.19-rh": "red", + "Brodmann.4-rh": "purple", + "Brodmann.8-rh": "white" + } + + for label in labels: + name = getattr(label, "name", None) + if not isinstance(name, str): + continue + if name in label_colors: + brain.add_label(label, borders=False, color=label_colors[name]) # type: ignore + + + +def data_to_csv(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]]): + + logger.info("Getting the current directory...") + if PLATFORM_NAME == 'darwin': + csvs_folder = os.path.join(os.path.dirname(sys.executable), "../../../csvs") + else: + cwd = os.getcwd() + csvs_folder = os.path.join(cwd, "csvs") + + logger.info("Attempting to create the csvs folder...") + os.makedirs(csvs_folder, exist_ok=True) + + # Generate a timestamp to be appended to the end of the file name + logger.info("Generating the timestamp...") + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + # Iterate over all groups + for group_name in all_results: + + # Get the channel data and generate the file name + (_, df_cha, _, _) = all_results[group_name] + + filename = f"{group_name}_{timestamp}.csv" + save_path = os.path.join(csvs_folder, filename) + + # Filter to just the target condition and store it in the csv + if HRF_MODEL == 'fir': + filtered_df = df_cha[ + df_cha["Condition"].str.startswith(TARGET_ACTIVITY) & + (df_cha["Chroma"] == "hbo") + ] + + # Step 2: Define the aggregation logic + agg_funcs = { + 'df': 'mean', + 'mse': 'mean', + 'p_value': 'mean', + 'se': 'mean', + 't': 'mean', + 'theta': 'mean', + 'Source': 'mean', + 'Detector': 'mean', + 'Significant': lambda x: x.sum() > (len(x) / 2), + 'Chroma': 'first', # assuming all are the same + 'ch_name': 'first', # same ch_name in the group + 'ID': 'first', # same ID in the group + } + + # Step 3: Group and aggregate + averaged_df = ( + filtered_df + .groupby(['ch_name', 'ID'], as_index=False) + .agg(agg_funcs) + ) + + # Step 4: Rename and add 'Condition' as TARGET_ACTIVITY + averaged_df.insert(0, 'Condition', TARGET_ACTIVITY) + + averaged_df["df"] = averaged_df["df"].round().astype(int) + averaged_df["Source"] = averaged_df["Source"].round().astype(int) + averaged_df["Detector"] = averaged_df["Detector"].round().astype(int) + + # Step 5: Reset index and reorder columns + ordered_cols = [ + 'Condition', 'df', 'mse', 'p_value', 'se', 't', 'theta', + 'Source', 'Detector', 'Chroma', 'Significant', 'ch_name', 'ID' + ] + averaged_df = averaged_df[ordered_cols].reset_index(drop=True) + averaged_df = averaged_df.sort_values(by=["ID", "Detector", "Source"]).reset_index(drop=True) + + output_df = averaged_df + else: + output_df = df_cha.query(f"Condition == '{TARGET_ACTIVITY}' and Chroma == 'hbo'") # type: ignore + output_df.to_csv(save_path) + + + +def all_data_to_csv(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]]): + + logger.info("Getting the current directory...") + if PLATFORM_NAME == 'darwin': + csvs_folder = os.path.join(os.path.dirname(sys.executable), "../../../csvs") + else: + cwd = os.getcwd() + csvs_folder = os.path.join(cwd, "csvs") + + logger.info("Attempting to create the csvs folder...") + os.makedirs(csvs_folder, exist_ok=True) + + # Generate a timestamp to be appended to the end of the file name + logger.info("Generating the timestamp...") + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + + # Iterate over all groups + for group_name in all_results: + + # Get the channel data and generate the file name + (_, df_cha, _, _) = all_results[group_name] + + filename = f"{group_name}_{timestamp}_all.csv" + save_path = os.path.join(csvs_folder, filename) + + # Filter to just the target condition and store it in the csv + if HRF_MODEL == 'fir': + output_df = df_cha + else: + output_df = df_cha.query(f"Condition == '{TARGET_ACTIVITY}' and Chroma == 'hbo'") # type: ignore + output_df.to_csv(save_path) + + + +def brain_3d_contrast(con_model_df: DataFrame, con_model_df_filtered: BaseRaw, common_channels: list[str], first_name: str, second_name: str, first_stim: float, second_stim: float, t_or_theta: Literal['t', 'theta'] = 'theta', show_optodes: Literal['sensors', 'labels', 'none', 'all'] = 'all', show_text: bool = True) -> None: + # Filter DataFrame to only common channels, and sort by raw order + con_model = con_model_df + + con_model["ch_name"] = pd.Categorical( + con_model["ch_name"], categories=common_channels, ordered=True + ) + con_model = con_model.sort_values("ch_name").reset_index(drop=True) # type: ignore + + + if t_or_theta == 't': + clim=dict(kind="value", pos_lims=(0, ABS_T_VALUE/2, ABS_T_VALUE)) + elif t_or_theta == 'theta': + clim=dict(kind="value", pos_lims=(0, ABS_THETA_VALUE/2, ABS_THETA_VALUE)) + + # Plot brain figure + brain = plot_3d_evoked_array(con_model_df_filtered.copy().pick(picks="hbo"), con_model, view="dorsal", distance=BRAIN_DISTANCE, colorbar=True, mode=BRAIN_MODE, clim=clim, size=(800, 700), verbose=VERBOSITY) # type: ignore + + if show_optodes == 'all' or show_optodes == 'sensors': + brain.add_sensors(getattr(con_model_df_filtered, "info"), trans=Transform('head', 'mri', np.eye(4)), fnirs=["channels", "pairs", "sources", "detectors"], verbose=VERBOSITY) # type: ignore + + # Read and parse the file + if show_optodes == 'all' or show_optodes == 'labels': + positions: list[tuple[str, list[float]]] = [] + with open(OPTODE_FILE_PATH, 'r') as f: + for line in f: + line = line.strip() + if not line or ':' not in line: + continue # skip empty/malformed lines + name, coords = line.split(':', 1) + coords = [float(x) for x in coords.strip().split()] + positions.append((name.strip(), coords)) + + for name, (x, y, z) in positions: + brain._renderer.text3d(x, y, z, name, color=('red' if name.startswith('s') else 'blue' if name.startswith('d') else 'gray'), scale=0.002) # type: ignore + + # Set the display text for the brain image + # display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nContrast: ' + first_name + ' - ' + second_name + '\nReject Criteria Threshold: ' + str(EPOCH_REJECT_CRITERIA_THRESH) + '\nMin Time Threshold: ' + + # str(TIME_MIN_THRESH) + 's\nMax Time Threshold: ' + str(TIME_MAX_THRESH) + 's\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + '\nStim Duration: ' + str(first_stim) + ', ' + + # str(second_stim) + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + '\nLooking at: ' + t_or_theta + ' values') + + if HRF_MODEL == 'fir': + display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nContrast: ' + first_name + ' - ' + second_name + '\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + '\nLooking at: ' + t_or_theta + ' values') + else: + display_text = ('Folder: ' + str(BASE_SNIRF_FOLDER) + '\nContrast: ' + first_name + ' - ' + second_name + '\nShort Channel Regression: ' + str(SHORT_CHANNEL_REGRESSION) + + '\nStim Duration: ' + str(first_stim) + ', ' + str(second_stim) + '\nBrain Distance: ' + str(BRAIN_DISTANCE) + '\nBrain Mode: ' + BRAIN_MODE + '\nLooking at: ' + t_or_theta + ' values') + + # Apply the text onto the brain + if show_text: + brain.add_text(0.12, 0.70, display_text, "title", font_size=11, color="k") # type: ignore + + + +def plot_2d_3d_contrasts_between_groups(all_results: dict[str, tuple[DataFrame, DataFrame, DataFrame, DataFrame]], all_raw_haemo: dict[str, dict[str, BaseRaw]], t_or_theta: Literal['t', 'theta'] = 'theta', show_optodes: Literal['sensors', 'labels', 'none', 'all'] = 'all', show_text: bool = True) -> None: + + + # Dictionary to store data for each group + group_dfs: dict[str, DataFrame] = {} + + # GET RAW HAEMO OF THE FIRST PARTICIPANT + raw_haemo = all_raw_haemo[list(all_raw_haemo.keys())[0]]["full_layout"] + + # Store all contrasts with the corresponding group name + for group_name, (_, _, df_con, _) in all_results.items(): + group_dfs[group_name] = df_con + group_dfs[group_name]["group"] = group_name + + # Concatenate all groups together + df_combined = pd.concat(group_dfs.values(), ignore_index=True) + + con_summary = df_combined.query("Chroma == 'hbo'").copy() # type: ignore + + valid_channels = cast(DataFrame, (pd.crosstab(con_summary['group'], con_summary['ch_name']) > 1).all()) # type: ignore + valid_channels = valid_channels[valid_channels].index.tolist() + + # Filter data to only these channels + con_summary = con_summary[con_summary['ch_name'].isin(valid_channels)] # type: ignore + + + # # Verify your data looks as expected + # logger.info(con_summary[['group', 'ch_name', 'Chroma', 'effect']].head()) + # logger.info("\nUnique values:") + # logger.info("Groups:", con_summary['group'].unique()) + # logger.info("Channels:", con_summary['ch_name'].unique()) + # logger.info("Chroma:", con_summary['Chroma'].unique()) # Should be just 'hbo' + + model_formula = "effect ~ -1 + group:ch_name:Chroma" + con_model = smf.mixedlm(model_formula, con_summary, groups=con_summary["ID"]).fit(method="nm") # type: ignore + + # logger.info(con_model.summary()) + + + # # Fit the mixed-effects model + # model_formula = "effect ~ -1 + group:ch_name:Chroma" + + # #model_formula = "effect ~ -1 + group + ch_name" + + # con_model = smf.mixedlm( + # model_formula, con_summary_filtered, groups=con_summary_filtered["ID"] + # ).fit(method="nm") + + # Get the t values if we are comparing them + + t_values: pd.Series[float] = pd.Series(dtype=float) + if t_or_theta == 't': + t_values = con_model.tvalues + + # Get all the group names from the dictionary and how many groups we have + group_names = list(group_dfs.keys()) + n_groups = len(group_names) + + # Store DataFrames for each contrast + for i in range(n_groups): + for j in range(i + 1, n_groups): + group1_name = group_names[i] + group2_name = group_names[j] + + if t_or_theta == 't': + # Extract the t-values for both groups + group1_vals = t_values.filter(like=f"group[{group1_name}]") # type: ignore + group2_vals = t_values.filter(like=f"group[{group2_name}]") # type: ignore + vlim_value = ABS_CONTRAST_T_VALUE + + elif t_or_theta == 'theta': + # Extract the coefficients for both groups + group1_vals = con_model.params.filter(like=f"group[{group1_name}]") + group2_vals = con_model.params.filter(like=f"group[{group2_name}]") + vlim_value = ABS_CONTRAST_THETA_VALUE + + # TODO: Does this work for all separators? + # Extract channel names + + group1_channels: list[str] = [ + name.split(":")[1].split("[")[1].split("]")[0] + for name in getattr(group1_vals, "index") + ] + group2_channels: list[str] = [ + name.split(":")[1].split("[")[1].split("]")[0] + for name in getattr(group2_vals, "index") + ] + + # Create the DataFrames with channel indices + df_group1 = DataFrame( + {"Coef.": group1_vals.values}, index=group1_channels # type: ignore + ) + df_group2 = DataFrame( + {"Coef.": group2_vals.values}, index=group2_channels # type: ignore + ) + + # Merge the two DataFrames on the channel names + df_contrast = df_group1.join(df_group2, how="inner", lsuffix=f"_{group1_name}", rsuffix=f"_{group2_name}") # type: ignore + + # Compute the contrasts + contrast_1_2 = df_contrast[f"Coef._{group1_name}"] - df_contrast[f"Coef._{group2_name}"] + contrast_2_1 = df_contrast[f"Coef._{group2_name}"] - df_contrast[f"Coef._{group1_name}"] + + # Add the a-b / 1-2 contrast to the DataFrame. The order and names of the keys in the DataFrame are important! + df_contrast["Coef."] = contrast_1_2 + con_model_df_1_2 = DataFrame({ + "ch_name": df_contrast.index, + "Coef.": df_contrast["Coef."], + "Chroma": "hbo" + }) + + + mne_ch_names = getattr(raw_haemo.copy().pick(picks="hbo"), "ch_names") # type: ignore + glm_ch_names = cast(list[DataFrame], con_model_df_1_2["ch_name"].tolist()) + + # Get ordered common channels + common_channels = [ch for ch in mne_ch_names if ch in glm_ch_names] + + # Filter raw data to these channels + con_model_df_filtered = raw_haemo.copy().pick(picks=common_channels) # type: ignore + + # Reindex GLM results to match MNE channel order + con_model_df_1_2 = con_model_df_1_2.set_index("ch_name").loc[common_channels].reset_index() # type: ignore + + # Create the 3d visualization + brain_3d_contrast(con_model_df_1_2, con_model_df_filtered, common_channels, group1_name, group2_name, STIM_DURATION[i], STIM_DURATION[j], t_or_theta, show_optodes, show_text) + + plot_glm_group_topo(con_model_df_filtered.copy().pick(picks="hbo"), con_model_df_1_2, names=True, res=128, vlim=(-vlim_value, vlim_value)) # type: ignore + + + # TODO: The title currently goes on the colorbar. Low priority + plt.title(f"Contrast: {group1_name} vs {group2_name}") # type: ignore + plt.show() # type: ignore + + # Add the b-a / 2-1 contrast to the DataFrame. The order and names of the keys in the DataFrame are important! + df_contrast["Coef."] = contrast_2_1 + con_model_df_2_1 = DataFrame({ + "ch_name": df_contrast.index, + "Coef.": df_contrast["Coef."], + "Chroma": "hbo" + }) + + + mne_ch_names = getattr(raw_haemo.copy().pick(picks="hbo"), "ch_names") # type: ignore + glm_ch_names = cast(list[DataFrame], con_model_df_2_1["ch_name"].tolist()) + + # Get ordered common channels + common_channels = [ch for ch in mne_ch_names if ch in glm_ch_names] + + # Filter raw data to these channels + con_model_df_filtered = raw_haemo.copy().pick(picks=common_channels) # type: ignore + + # Reindex GLM results to match MNE channel order + con_model_df_2_1 = con_model_df_2_1.set_index("ch_name").loc[common_channels].reset_index() # type: ignore + + # Create the 3d visualization + brain_3d_contrast(con_model_df_2_1, con_model_df_filtered, common_channels, group2_name, group1_name, STIM_DURATION[j], STIM_DURATION[i], t_or_theta, show_optodes, show_text) + + plot_glm_group_topo(con_model_df_filtered.copy().pick(picks="hbo"), con_model_df_2_1, names=True, res=128, vlim=(-vlim_value, vlim_value)) # type: ignore + + # TODO: The title currently goes on the colorbar. Low priority + plt.title(f"Contrast: {group2_name} vs {group1_name}") # type: ignore + plt.show() # type: ignore + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# TODO: Is any of this still useful? + +def calculate_annotations(raw_haemo_filtered, file_name, output_folder=None, save_images=None): + '''Method that extract the annotations from the data.\n + Input:\n + raw_haemo_filtered (RawSNIRF) - The filtered haemoglobin concentration data\n + file_name (string) - The file name of the current file\n + output_folder (string) - (optional) Where to save the images. Default is None\n + save_images (string) - (optional) Bool to save the images or not. Default is None + Output:\n + events (ndarray) - Array containing row number and what index the event is\n + event_dict (dict) - Contains the names of the events''' + + if output_folder is None: + output_folder = None + if save_images is None: + save_images = None + + # Get when the events occur and what they are called, and display a figure with the result + events, event_dict = mne.events_from_annotations(raw_haemo_filtered) + + # Do we save the image? + if save_images: + fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw_haemo_filtered.info["sfreq"], show=False) + save_path = output_folder + "/8. Annotations for " + file_name + ".png" + fig.savefig(save_path) + + return events, event_dict + + + +def calculate_good_epochs(raw_haemo_filtered, events, event_dict, file_name, tmin=None, tmax=None, reject_thresh=None, target_activity=None, target_control=None, output_folder=None, save_images=None): + '''Calculates what epochs are good and creates a graph showing if any are dropped.\n + Input:\n + raw_haemo_filtered (RawSNIRF) - The filtered haemoglobin concentration data\n + events (ndarray) - Array containing row number and what index the event is\n + event_dict (dict) - Contains the names of the events\n + file_name (string) - The file name of the current file\n + tmin (float) - (optional) Time in seconds to display before the event. Default is TIME_MIN_THRESH\n + tmax (float) - (optional) Time in seconds to display after the event. Default is TIME_MAX_THRESH\n + reject_thresh (float) - (optional) Value that determines the threshold for rejecting epochs. Default is EPOCH_REJECT_CRITERIA_THRESH\n + target_activity (string) - (optional) The target activity. Default is TARGET_ACTIVITY\n + target_control (string) - (optional) The target control. Default is TARGET_CONTROL\n + output_folder (string) - (optional) Where to save the images. Default is None\n + save_images (string) - (optional) Bool to save the images or not. Default is None + Output:\n + good_epochs (Epochs) - The remaining good epochs\n + all_epochs (Epochs) - All of the epochs''' + + if tmin is None: + tmin = TIME_MIN_THRESH + if tmax is None: + tmax = TIME_MAX_THRESH + if reject_thresh is None: + reject_thresh = EPOCH_REJECT_CRITERIA_THRESH + if target_activity is None: + target_activity = TARGET_ACTIVITY + if target_control is None: + target_control = TARGET_CONTROL + if output_folder is None: + output_folder = None + if save_images is None: + save_images = None + + # Get all the good epochs + good_epochs = mne.Epochs( + raw_haemo_filtered, + events, + event_id=event_dict, + tmin=tmin, + tmax=tmax, + reject=dict(hbo=reject_thresh), + reject_by_annotation=True, + proj=True, + baseline=(None, 0), + preload=True, + detrend=None, + verbose=True, + ) + + # Get all the epochs + all_epochs = mne.Epochs( + raw_haemo_filtered, + events, + event_id=event_dict, + tmin=tmin, + tmax=tmax, + proj=True, + baseline=(None, 0), + preload=True, + detrend=None, + verbose=True, + ) + + if REJECT_PAIRS: + # Calculate which epochs were in all but not in good + all_idx = all_epochs.selection + good_idx = good_epochs.selection + bad_idx = np.setdiff1d(all_idx, good_idx) + + # Split the controls and the activities + event_ids = all_epochs.events[:, 2] + control_id = event_dict[target_control] + activity_id = event_dict[target_activity] + + to_reject_extra = set() + + for i, idx in enumerate(all_idx): + if idx in bad_idx: + ev = event_ids[i] + # If the control was bad, drop the following activity + if ev == control_id and i + 1 < len(all_idx): + if event_ids[i + 1] == activity_id: + to_reject_extra.add(all_idx[i + 1]) + # If the activity was bad, drop the preceding activity + if ev == activity_id and i - 1 >= 0: + if event_ids[i - 1] == control_id: + to_reject_extra.add(all_idx[i - 1]) + + # Create a list to store all the new drops, only adding them if they are currently classified as good + drop_idx_in_good = [ + np.where(good_idx == idx)[0][0] for idx in to_reject_extra if idx in good_idx + ] + + # Drop the pairings of the bad epochs + good_epochs.drop(drop_idx_in_good) + + # Do we save the image? + if save_images: + drop_log_fig = good_epochs.plot_drop_log(show=False) + save_path = output_folder + "/8. Epoch drops for " + file_name + ".png" + drop_log_fig.savefig(save_path) + + return good_epochs, all_epochs + + + +def bad_check(raw_od, max_bad_channels=None): + '''Method to see if we have more bad channels than our allowed threshold.\n + Inputs:\n + raw_od (RawSNIRF) - The optical density data\n + max_bad_channels (int) - (optional) The max amount of bad channels we want to tolerate. Default is MAX_BAD_CHANNELS\n + Output\n + (bool) - True it we had more bad channels than the threshold, False if we did not''' + + if max_bad_channels is None: + max_bad_channels = MAX_BAD_CHANNELS + + # Check if there is more bad channels in the bads key compared to the allowed amount + if len(raw_od.info.get('bads', [])) >= max_bad_channels: + return True + else: + return False + + + +def remove_bad_epoch_pairings(raw_haemo_filtered_minus_short, good_epochs, epoch_pair_tolerance_window=None): + '''Method to apply our new epochs to the loaded data in working memory. This is to ensure that the GLM does not see these epochs. + Inputs:\n + raw_haemo_filtered_minus_short (RawSNIRF) - The filtered haemoglobin concentration data\n + good_epochs (Epochs) - The epochs we want the loaded data to take on\n + epoch_pair_tolerance_window (int) - (optional) The amount of data points the paired epoch can deviate from the expected amount. Default is EPOCH_PAIR_TOLERANCE_WINDOW\n + Output:\n + raw_haemo_filtered_good_epochs (RawSNIRF) - The filtered haemoglobin concentration data with only the good epochs''' + + if epoch_pair_tolerance_window is None: + epoch_pair_tolerance_window = EPOCH_PAIR_TOLERANCE_WINDOW + # Copy the input haemoglobin concentration data and drop the bad channels + raw_haemo_filtered_good_epochs = raw_haemo_filtered_minus_short.copy() + raw_haemo_filtered_good_epochs = raw_haemo_filtered_good_epochs.drop_channels(raw_haemo_filtered_good_epochs.info['bads']) + + # Get the event IDs of the good events + good_event_samples = set(good_epochs.events[:, 0]) + logger.info(f"Total good events (epochs): {len(good_event_samples)}") + + # Get the current annotations + raw_annots = raw_haemo_filtered_good_epochs.annotations + + # Create lists to use for processing + clean_descriptions = [] + clean_onsets = [] + clean_durations = [] + dropped = [] + + # Get the frequency of the file + sfreq = raw_haemo_filtered_good_epochs.info['sfreq'] + + for desc, onset, dur in zip(raw_annots.description, raw_annots.onset, raw_annots.duration): + # Convert annotation onset time to sample index + sample = int(onset * sfreq) + + if FORCE_DROP_ANNOTATIONS: + for i in FORCE_DROP_ANNOTATIONS: + if desc == i: + dropped.append((desc, onset)) + continue + + # Check if the annotation is within the tolerance of any good event + matched = any(abs(sample - event_sample) <= epoch_pair_tolerance_window for event_sample in good_event_samples) + + # We found a matching event + if matched: + clean_descriptions.append(desc) + clean_onsets.append(onset) + clean_durations.append(dur) + else: + dropped.append((desc, onset)) + + # Create the new filtered annotations + new_annots = Annotations( + onset=clean_onsets, + duration=clean_durations, + description=clean_descriptions, + ) + + # Assign the new annotations + raw_haemo_filtered_good_epochs.set_annotations(new_annots) + + # logger.info out the results + logger.info(f"Original annotations: {len(raw_annots)}") + logger.info(f"Kept annotations: {len(clean_descriptions)}") + logger.info("Kept annotation types:", set(clean_descriptions)) + if dropped: + logger.info(f"Dropped annotations: {len(dropped)}") + logger.info("Dropped annotations:") + for desc, onset in dropped: + logger.info(f" - {desc} at {onset:.2f}s") + else: + logger.info("No annotations were dropped!") + + return raw_haemo_filtered_good_epochs \ No newline at end of file diff --git a/flares_updater.py b/flares_updater.py new file mode 100644 index 0000000..13d2973 --- /dev/null +++ b/flares_updater.py @@ -0,0 +1,255 @@ +""" +Filename: flares_updater.py +Description: FLARES updater executable + +Author: Tyler de Zeeuw +License: GPL-3.0 +""" + +# Built-in imports +import os +import sys +import time +import shlex +import psutil +import shutil +import platform +import subprocess +from datetime import datetime + +PLATFORM_NAME = platform.system().lower() + +if PLATFORM_NAME == 'darwin': + LOG_FILE = os.path.join(os.path.dirname(sys.executable), "../../../flares_updater.log") +else: + LOG_FILE = os.path.join(os.getcwd(), "flares_updater.log") + + +def log(msg): + with open(LOG_FILE, "a", encoding="utf-8") as f: + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + f.write(f"{timestamp} - {msg}\n") + + +def kill_all_processes_by_executable(exe_path): + terminated_any = False + exe_path = os.path.realpath(exe_path) + + if PLATFORM_NAME == 'windows': + for proc in psutil.process_iter(['pid', 'exe']): + try: + proc_exe = proc.info.get('exe') + if proc_exe and os.path.samefile(os.path.realpath(proc_exe), exe_path): + log(f"Terminating process: PID {proc.pid}") + _terminate_process(proc) + terminated_any = True + except Exception as e: + log(f"Error terminating process (Windows): {e}") + elif PLATFORM_NAME == 'linux': + for proc in psutil.process_iter(['pid', 'cmdline']): + try: + cmdline = proc.info.get('cmdline', []) + if cmdline: + proc_cmd = os.path.realpath(cmdline[0]) + if os.path.samefile(proc_cmd, exe_path): + log(f"Terminating process: PID {proc.pid}") + _terminate_process(proc) + terminated_any = True + except Exception as e: + log(f"Error terminating process (Linux): {e}") + + if not terminated_any: + log(f"No running processes found for {exe_path}") + return terminated_any + + +def _terminate_process(proc): + try: + proc.terminate() + proc.wait(timeout=10) + log(f"Process {proc.pid} terminated gracefully.") + except psutil.TimeoutExpired: + log(f"Process {proc.pid} did not terminate in time. Killing forcefully.") + proc.kill() + proc.wait(timeout=5) + log(f"Process {proc.pid} killed.") + + +def wait_for_unlock(path, timeout=100): + start_time = time.time() + while time.time() - start_time < timeout: + try: + if os.path.isdir(path): + shutil.rmtree(path) + else: + os.remove(path) + log(f"Deleted (after wait): {path}") + return + except Exception as e: + log(f"Still locked: {path} - {e}") + time.sleep(1) + log(f"Failed to delete after wait: {path}") + + +def delete_path(path): + if os.path.exists(path): + try: + if os.path.isdir(path): + shutil.rmtree(path) + log(f"Deleted directory: {path}") + else: + os.remove(path) + log(f"Deleted file: {path}") + except Exception as e: + log(f"Error deleting {path}: {e}") + + +def copy_update_files(src_folder, dest_folder, updater_name): + for item in os.listdir(src_folder): + if item.lower() == updater_name.lower(): + log(f"Skipping updater executable: {item}") + continue + s = os.path.join(src_folder, item) + d = os.path.join(dest_folder, item) + delete_path(d) + try: + if os.path.isdir(s): + shutil.copytree(s, d) + log(f"Copied folder: {s} -> {d}") + else: + shutil.copy2(s, d) + log(f"Copied file: {s} -> {d}") + except Exception as e: + log(f"Error copying {s} -> {d}: {e}") + + +def copy_update_files_darwin(src_folder, dest_folder, updater_name): + + updater_name = updater_name + ".app" + + for item in os.listdir(src_folder): + if item.lower() == updater_name.lower(): + log(f"Skipping updater executable: {item}") + continue + s = os.path.join(src_folder, item) + d = os.path.join(dest_folder, item) + delete_path(d) + try: + if os.path.isdir(s): + subprocess.check_call(["ditto", s, d]) + log(f"Copied folder with ditto: {s} -> {d}") + else: + shutil.copy2(s, d) + log(f"Copied file: {s} -> {d}") + except Exception as e: + log(f"Error copying {s} -> {d}: {e}") + + +def remove_quarantine(app_path): + script = f''' + do shell script "xattr -d -r com.apple.quarantine {shlex.quote(app_path)}" with administrator privileges with prompt "FLARES needs privileges to finish the update. (1/2)" + ''' + try: + subprocess.run(['osascript', '-e', script], check=True) + print("✅ Quarantine attribute removed.") + except subprocess.CalledProcessError as e: + print("⌠Failed to remove quarantine attribute.") + print(e) + + +def main(): + try: + log(f"[Updater] sys.argv: {sys.argv}") + + if len(sys.argv) != 3: + log("Invalid arguments. Usage: flares_updater ") + sys.exit(1) + + update_folder = sys.argv[1] + main_exe = sys.argv[2] + + # Interesting naming convention + parent_dir = os.path.dirname(os.path.abspath(main_exe)) + pparent_dir = os.path.dirname(parent_dir) + ppparent_dir = os.path.dirname(pparent_dir) + pppparent_dir = os.path.dirname(ppparent_dir) + + updater_name = os.path.basename(sys.argv[0]) + + log("Updater started.") + log(f"Update folder: {update_folder}") + log(f"Main EXE: {main_exe}") + log(f"Updater EXE: {updater_name}") + if PLATFORM_NAME == 'darwin': + log(f"Main App Folder: {ppparent_dir}") + + # Kill all instances of main app + kill_all_processes_by_executable(main_exe) + + # Wait until main_exe process is fully gone (polling) + for _ in range(20): # wait max 10 seconds + running = False + for proc in psutil.process_iter(['exe', 'cmdline']): + try: + if PLATFORM_NAME == 'windows': + proc_exe = proc.info.get('exe') + if proc_exe and os.path.samefile(os.path.realpath(proc_exe), os.path.realpath(main_exe)): + running = True + break + elif PLATFORM_NAME == 'linux': + cmdline = proc.info.get('cmdline', []) + if cmdline: + proc_cmd = os.path.realpath(cmdline[0]) + if os.path.samefile(proc_cmd, os.path.realpath(main_exe)): + running = True + break + except Exception as e: + log(f"Polling error: {e}") + if not running: + break + time.sleep(0.5) + else: + log("Warning: main executable still running after wait timeout.") + + # Delete old version files + if PLATFORM_NAME == 'darwin': + log(f'Attempting to delete {ppparent_dir}') + delete_path(ppparent_dir) + update_folder = os.path.join(sys.argv[1], "flares-darwin") + copy_update_files_darwin(update_folder, pppparent_dir, updater_name) + + else: + delete_path(main_exe) + wait_for_unlock(os.path.join(parent_dir, "_internal")) + + # Copy new files excluding the updater itself + copy_update_files(update_folder, parent_dir, updater_name) + + except Exception as e: + log(f"Something went wrong: {e}") + + # Relaunch main app + try: + if PLATFORM_NAME == 'linux': + os.chmod(main_exe, 0o755) + log("Added executable bit") + + if PLATFORM_NAME == 'darwin': + os.chmod(ppparent_dir, 0o755) + log("Added executable bit") + remove_quarantine(ppparent_dir) + log(f"Removed the quarantine flag on {ppparent_dir}") + subprocess.Popen(['open', ppparent_dir, "--args", "--finish-update"]) + else: + subprocess.Popen([main_exe, "--finish-update"], cwd=parent_dir) + + log("Relaunched main app.") + except Exception as e: + log(f"Failed to relaunch main app: {e}") + + log("Updater completed. Exiting.") + sys.exit(0) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/icons/article_24dp_1F1F1F.svg b/icons/article_24dp_1F1F1F.svg new file mode 100644 index 0000000..66793ed --- /dev/null +++ b/icons/article_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/content_copy_24dp_1F1F1F.svg b/icons/content_copy_24dp_1F1F1F.svg new file mode 100644 index 0000000..aeabcb9 --- /dev/null +++ b/icons/content_copy_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/content_cut_24dp_1F1F1F.svg b/icons/content_cut_24dp_1F1F1F.svg new file mode 100644 index 0000000..1a03cdb --- /dev/null +++ b/icons/content_cut_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/content_paste_24dp_1F1F1F.svg b/icons/content_paste_24dp_1F1F1F.svg new file mode 100644 index 0000000..6eea988 --- /dev/null +++ b/icons/content_paste_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/desktop.ini b/icons/desktop.ini new file mode 100644 index 0000000..54844b6 --- /dev/null +++ b/icons/desktop.ini @@ -0,0 +1,2 @@ +[LocalizedFileNames] +updater.png=@updater.png,0 diff --git a/icons/exit_to_app_24dp_1F1F1F.svg b/icons/exit_to_app_24dp_1F1F1F.svg new file mode 100644 index 0000000..7da9840 --- /dev/null +++ b/icons/exit_to_app_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/file_open_24dp_1F1F1F.svg b/icons/file_open_24dp_1F1F1F.svg new file mode 100644 index 0000000..29343b7 --- /dev/null +++ b/icons/file_open_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/folder_24dp_1F1F1F.svg b/icons/folder_24dp_1F1F1F.svg new file mode 100644 index 0000000..c4edc42 --- /dev/null +++ b/icons/folder_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/folder_copy_24dp_1F1F1F.svg b/icons/folder_copy_24dp_1F1F1F.svg new file mode 100644 index 0000000..d903aa9 --- /dev/null +++ b/icons/folder_copy_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/help_24dp_1F1F1F.svg b/icons/help_24dp_1F1F1F.svg new file mode 100644 index 0000000..ea47319 --- /dev/null +++ b/icons/help_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/info_24dp_1F1F1F.svg b/icons/info_24dp_1F1F1F.svg new file mode 100644 index 0000000..f749f5c --- /dev/null +++ b/icons/info_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/main.icns b/icons/main.icns new file mode 100644 index 0000000..d579a69 Binary files /dev/null and b/icons/main.icns differ diff --git a/icons/main.ico b/icons/main.ico new file mode 100644 index 0000000..5a64c66 Binary files /dev/null and b/icons/main.ico differ diff --git a/icons/save_24dp_1F1F1F.svg b/icons/save_24dp_1F1F1F.svg new file mode 100644 index 0000000..a8b8172 --- /dev/null +++ b/icons/save_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/save_as_24dp_1F1F1F.svg b/icons/save_as_24dp_1F1F1F.svg new file mode 100644 index 0000000..671d587 --- /dev/null +++ b/icons/save_as_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/update_24dp_1F1F1F.svg b/icons/update_24dp_1F1F1F.svg new file mode 100644 index 0000000..c62cfde --- /dev/null +++ b/icons/update_24dp_1F1F1F.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/icons/updater.icns b/icons/updater.icns new file mode 100644 index 0000000..d579a69 Binary files /dev/null and b/icons/updater.icns differ diff --git a/icons/updater.ico b/icons/updater.ico new file mode 100644 index 0000000..70f1181 Binary files /dev/null and b/icons/updater.ico differ diff --git a/main.py b/main.py new file mode 100644 index 0000000..2a0d1c1 --- /dev/null +++ b/main.py @@ -0,0 +1,1725 @@ +""" +Filename: main.py +Description: FLARES main executable + +Author: Tyler de Zeeuw +License: GPL-3.0 +""" + +# Built-in imports +import os +import re +import sys +import time +import shlex +import pickle +import shutil +import zipfile +import platform +import traceback +import subprocess +from pathlib import Path +from datetime import datetime +from multiprocessing import Process, current_process, freeze_support, Manager + +# External library imports +import psutil +import requests +from PySide6.QtWidgets import ( + QApplication, QWidget, QMessageBox, QVBoxLayout, QHBoxLayout, QTextEdit, QScrollArea, QComboBox, QGridLayout, + QPushButton, QMainWindow, QFileDialog, QLabel, QLineEdit, QFrame, QSizePolicy +) +from PySide6.QtCore import QThread, Signal, Qt, QTimer +from PySide6.QtGui import QAction, QKeySequence, QIcon, QIntValidator, QDoubleValidator + +CURRENT_VERSION = "1.0.0" + +API_URL = "https://git.research.dezeeuw.ca/api/v1/repos/tyler/flares/releases" +PLATFORM_NAME = platform.system().lower() + +# Selectable parameters on the right side of the window +SECTIONS = [ + { + "title": "Preprocessing", + "params": [ + {"name": "SECONDS_TO_STRIP", "default": 0, "type": int, "help": "Seconds to remove from beginning of file. Setting this to 0 will remove nothing from the file."}, + {"name": "DOWNSAMPLE", "default": True, "type": bool, "help": "Downsample snirf files."}, + {"name": "DOWNSAMPLE_FREQUENCY", "default": 25, "type": int, "help": "Frequency (Hz) to downsample to. If this is set higher than the input data, new data will be interpolated."}, + {"name": "FORCE_DROP_CHANNELS", "default": "", "type": str, "help": "Channels to forcibly drop (comma separated)."}, + {"name": "SOURCE_DETECTOR_SEPARATOR", "default": "_", "type": str, "help": "Separator between source and detector."}, + ] + }, + { + "title": "Update Optode Positions", + "params": [ + {"name": "OPTODE_FILE", "default": True, "type": bool, "help": "Use optode file to update positions."}, + {"name": "OPTODE_FILE_PATH", "default": "", "type": str, "help": "Optode file location."}, + {"name": "OPTODE_FILE_SEPARATOR", "default": ":", "type": str, "help": "Separator in optode file."}, + ] + }, + { + "title": "Temporal Derivative Distribution Repair filtering", + "params": [ + {"name": "TDDR", "default": True, "type": bool, "help": "Apply TDDR filtering."}, + ] + }, + { + "title": "Wavelet filtering", + "params": [ + {"name": "WAVELET", "default": True, "type": bool, "help": "Apply Wavelet filtering."}, + {"name": "IQR", "default": 1.5, "type": float, "help": "Interquartile Range for Wavelet filter."}, + ] + }, + { + "title": "Heart rate", + "params": [ + {"name": "HEART_RATE", "default": True, "type": bool, "help": "Calculate heart rate."}, + {"name": "SECONDS_TO_STRIP_HR", "default": 5, "type": int, "help": "Seconds to strip for HR calculation."}, + {"name": "MAX_LOW_HR", "default": 40, "type": int, "help": "Minimum heart rate value."}, + {"name": "MAX_HIGH_HR", "default": 200, "type": int, "help": "Maximum heart rate value."}, + {"name": "SMOOTHING_WINDOW_HR", "default": 100, "type": int, "help": "Rolling average window for HR."}, + {"name": "HEART_RATE_WINDOW", "default": 25, "type": int, "help": "Range of BPM around average."}, + {"name": "SHORT_CHANNEL", "default": True, "type": bool, "help": "Indicates if data has short channel."}, + {"name": "SHORT_CHANNEL_THRESH", "default": 0.013, "type": float, "help": "Threshold for short channel (m)."}, + ] + }, + { + "title": "Scalp Coupling Index / Peak Spectral Power / Signal to Noise Ratio", + "params": [ + {"name": "SCI", "default": True, "type": bool, "help": "Calculate Scalp Coupling Index."}, + {"name": "SCI_TIME_WINDOW", "default": 3, "type": int, "help": "SCI time window."}, + {"name": "SCI_THRESHOLD", "default": 0.6, "type": float, "help": "SCI threshold (0-1)."}, + {"name": "PSP", "default": True, "type": bool, "help": "Calculate Peak Spectral Power."}, + {"name": "PSP_TIME_WINDOW", "default": 3, "type": int, "help": "PSP time window."}, + {"name": "PSP_THRESHOLD", "default": 0.1, "type": float, "help": "PSP threshold."}, + {"name": "SNR", "default": True, "type": bool, "help": "Calculate Signal to Noise Ratio."}, + {"name": "SNR_TIME_WINDOW", "default": -1, "type": int, "help": "SNR time window."}, + {"name": "SNR_THRESHOLD", "default": 2.0, "type": float, "help": "SNR threshold (dB)."}, + ] + }, + { + "title": "Drop bad channels", + "params": [ + {"name": "EXCLUDE_CHANNELS", "default": True, "type": bool, "help": "Drop channels failing metrics."}, + {"name": "MAX_BAD_CHANNELS", "default": 15, "type": int, "help": "Max bad channels allowed."}, + {"name": "LONG_CHANNEL_THRESH", "default": 0.045, "type": float, "help": "Max distance (m) for channel."}, + ] + }, + { + "title": "Optical Density", + "params": [ + # Intentionally empty (TODO) + ] + }, + { + "title": "Haemoglobin Concentration", + "params": [ + {"name": "PPF", "default": 0.1, "type": float, "help": "Partial Pathlength Factor."}, + ] + }, + { + "title": "Design Matrix", + "params": [ + {"name": "DRIFT_MODEL", "default": "cosine", "type": str, "help": "Drift model for GLM."}, + {"name": "DURATION_BETWEEN_ACTIVITIES", "default": 35, "type": int, "help": "Time between activities (s)."}, + {"name": "SHORT_CHANNEL_REGRESSION", "default": True, "type": bool, "help": "Use short channel regression."}, + ] + }, + { + "title": "General Linear Model", + "params": [ + {"name": "N_JOBS", "default": 1, "type": int, "help": "Number of jobs for processing."}, + ] + }, + { + "title": "Finishing Touches", + "params": [ + # Intentionally empty (TODO) + ] + }, +] + + + +class UpdateDownloadThread(QThread): + """ + Thread that downloads and extracts an update package and emits a signal on completion or error. + + Args: + download_url (str): URL of the update zip file to download. + latest_version (str): Version string of the latest update. + """ + + update_ready = Signal(str, str) + error_occurred = Signal(str) + + def __init__(self, download_url, latest_version): + super().__init__() + self.download_url = download_url + self.latest_version = latest_version + + def run(self): + try: + local_filename = os.path.basename(self.download_url) + + if PLATFORM_NAME == 'darwin': + tmp_dir = '/tmp/flarestempupdate' + os.makedirs(tmp_dir, exist_ok=True) + local_path = os.path.join(tmp_dir, local_filename) + else: + local_path = os.path.join(os.getcwd(), local_filename) + + # Download the file + with requests.get(self.download_url, stream=True, timeout=15) as r: + r.raise_for_status() + with open(local_path, 'wb') as f: + for chunk in r.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + + # Extract folder name (remove .zip) + if PLATFORM_NAME == 'darwin': + extract_folder = os.path.splitext(local_filename)[0] + extract_path = os.path.join(tmp_dir, extract_folder) + + else: + extract_folder = os.path.splitext(local_filename)[0] + extract_path = os.path.join(os.getcwd(), extract_folder) + + # Create the folder if not exists + os.makedirs(extract_path, exist_ok=True) + + # Extract the zip file contents + if PLATFORM_NAME == 'darwin': + subprocess.run(['ditto', '-xk', local_path, extract_path], check=True) + else: + with zipfile.ZipFile(local_path, 'r') as zip_ref: + zip_ref.extractall(extract_path) + + # Remove the zip once extracted and emit a signal + os.remove(local_path) + self.update_ready.emit(self.latest_version, extract_path) + + except Exception as e: + # Emit a signal signifying failure + self.error_occurred.emit(str(e)) + + + +class UpdateCheckThread(QThread): + """ + Thread that checks for updates by querying the API and emits a signal based on the result. + + Signals: + download_requested(str, str): Emitted with (download_url, latest_version) when an update is available. + no_update_available(): Emitted when no update is found or current version is up to date. + error_occurred(str): Emitted with an error message if the update check fails. + """ + + download_requested = Signal(str, str) + no_update_available = Signal() + error_occurred = Signal(str) + + def run(self): + try: + latest_version, download_url = self.get_latest_release_for_platform() + if not latest_version: + self.no_update_available.emit() + return + + if not download_url: + self.error_occurred.emit(f"No download available for platform '{PLATFORM_NAME}'") + return + + if self.version_compare(latest_version, CURRENT_VERSION) > 0: + self.download_requested.emit(download_url, latest_version) + else: + self.no_update_available.emit() + + except Exception as e: + self.error_occurred.emit(f"Update check failed: {e}") + + def version_compare(self, v1, v2): + def normalize(v): return [int(x) for x in v.split(".")] + return (normalize(v1) > normalize(v2)) - (normalize(v1) < normalize(v2)) + + def get_latest_release_for_platform(self): + response = requests.get(API_URL, timeout=5) + response.raise_for_status() + releases = response.json() + + if not releases: + return None, None + + latest = releases[0] + tag = latest["tag_name"].lstrip("v") + + for asset in latest.get("assets", []): + if PLATFORM_NAME in asset["name"].lower(): + return tag, asset["browser_download_url"] + + return tag, None + + + +class LocalPendingUpdateCheckThread(QThread): + """ + Thread that checks for locally pending updates by scanning the download directory and emits a signal accordingly. + + Args: + current_version (str): Current application version. + platform_suffix (str): Platform-specific suffix to identify update folders. + """ + + pending_update_found = Signal(str, str) + no_pending_update = Signal() + + def __init__(self, current_version, platform_suffix): + super().__init__() + self.current_version = current_version + self.platform_suffix = platform_suffix + + def version_compare(self, v1, v2): + def normalize(v): return [int(x) for x in v.split(".")] + return (normalize(v1) > normalize(v2)) - (normalize(v1) < normalize(v2)) + + def run(self): + if PLATFORM_NAME == 'darwin': + cwd = '/tmp/flarestempupdate' + else: + cwd = os.getcwd() + + pattern = re.compile(r".*-(\d+\.\d+\.\d+)" + re.escape(self.platform_suffix) + r"$") + found = False + + try: + for item in os.listdir(cwd): + folder_path = os.path.join(cwd, item) + if os.path.isdir(folder_path) and item.endswith(self.platform_suffix): + match = pattern.match(item) + if match: + folder_version = match.group(1) + if self.version_compare(folder_version, self.current_version) > 0: + self.pending_update_found.emit(folder_version, folder_path) + found = True + break + except: + pass + + if not found: + self.no_pending_update.emit() + + + +class AboutWindow(QWidget): + """ + Simple About window displaying basic application information. + + Args: + parent (QWidget, optional): Parent widget of this window. Defaults to None. + """ + + def __init__(self, parent=None): + super().__init__(parent, Qt.WindowType.Window) + self.setWindowTitle("About FLARES") + self.resize(250, 100) + + layout = QVBoxLayout() + label = QLabel("About FLARES", self) + label2 = QLabel("fNIRS Lightweight Analysis, Research, & Evaluation Suite", self) + label3 = QLabel("FLARES is licensed under the GPL-3.0 licence. For more information, visit https://www.gnu.org/licenses/gpl-3.0.en.html", self) + label4 = QLabel(f"Version v{CURRENT_VERSION}") + + layout.addWidget(label) + layout.addWidget(label2) + layout.addWidget(label3) + layout.addWidget(label4) + + self.setLayout(layout) + + + +class UserGuideWindow(QWidget): + """ + Simple User Guide window displaying basic information on how to use the software. + + Args: + parent (QWidget, optional): Parent widget of this window. Defaults to None. + """ + + def __init__(self, parent=None): + super().__init__(parent, Qt.WindowType.Window) + self.setWindowTitle("User Guide for FLARES") + self.resize(250, 100) + + layout = QVBoxLayout() + label = QLabel("No user guide available yet!", self) + + layout.addWidget(label) + + self.setLayout(layout) + + + +class ProgressBubble(QWidget): + """ + A clickable widget displaying a progress bar made of colored rectangles and a label. + + Args: + display_name (str): Text to display above the progress bar. + file_path (str): Associated file path stored with the bubble. + + """ + + clicked = Signal(object) + + def __init__(self, display_name, file_path): + super().__init__() + + self.layout = QVBoxLayout() + self.label = QLabel(display_name) + self.label.setAlignment(Qt.AlignmentFlag.AlignCenter) + self.label.setStyleSheet(""" + QLabel { + border: 1px solid #888; + border-radius: 10px; + padding: 8px 12px; + background-color: #e0f0ff; + } + """) + + self.progress_layout = QHBoxLayout() + + self.rects = [] + for _ in range(12): + rect = QFrame() + rect.setFixedSize(10, 20) + rect.setStyleSheet("background-color: white; border: 1px solid gray;") + self.progress_layout.addWidget(rect) + self.rects.append(rect) + + self.layout.addWidget(self.label) + self.layout.addLayout(self.progress_layout) + self.setLayout(self.layout) + + # Store the file path + self.file_path = file_path + + self.current_step = 0 + # Make the bubble clickable + self.setCursor(Qt.CursorShape.PointingHandCursor) + + # Resize policy to make bubbles responsive + # TODO: Not only do this once but when window is resized too + self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum) + + def update_progress(self, step_index): + self.current_step = step_index + for i, rect in enumerate(self.rects): + if i < step_index: + rect.setStyleSheet("background-color: green; border: 1px solid gray;") + elif i == step_index: + rect.setStyleSheet("background-color: yellow; border: 1px solid gray;") + else: + rect.setStyleSheet("background-color: white; border: 1px solid gray;") + + def mousePressEvent(self, event): + self.clicked.emit(self) + super().mousePressEvent(event) + + + +class ParamSection(QWidget): + """ + A widget section that dynamically creates labeled input fields from parameter metadata. + + Args: + section_data (dict): Dictionary containing section title and list of parameter info. + Expected format: + { + "title": str, + "params": [ + { + "name": str, + "type": type, + "default": any, + "help": str (optional) + }, + ... + ] + } + """ + + def __init__(self, section_data): + super().__init__() + layout = QVBoxLayout() + self.setLayout(layout) + self.widgets = {} + + self.selected_path = None + + # Title label + title_label = QLabel(section_data["title"]) + title_label.setStyleSheet("font-weight: bold; font-size: 14px; margin-top: 10px; margin-bottom: 5px;") + layout.addWidget(title_label) + + # Horizontal line + line = QFrame() + line.setFrameShape(QFrame.Shape.HLine) + line.setFrameShadow(QFrame.Shadow.Sunken) + layout.addWidget(line) + + for param in section_data["params"]: + h_layout = QHBoxLayout() + + label = QLabel(param["name"]) + label.setFixedWidth(180) + + label.setToolTip(param.get("help", "")) + + help_text = param.get("help", "") + + help_btn = QPushButton("?") + help_btn.setFixedWidth(25) + help_btn.setToolTip(help_text) + help_btn.clicked.connect(lambda _, text=help_text: self.show_help_popup(text)) + + + h_layout.addWidget(help_btn) + h_layout.setStretch(0, 1) # Set stretch factor for button (10%) + + h_layout.addWidget(label) + h_layout.setStretch(1, 3) # Set the stretch factor for label (40%) + + + # Create input widget based on type + if param["type"] == bool: + widget = QComboBox() + widget.addItems(["True", "False"]) + widget.setCurrentText(str(param["default"])) + elif param["type"] == int: + widget = QLineEdit() + widget.setValidator(QIntValidator()) + widget.setText(str(param["default"])) + elif param["type"] == float: + widget = QLineEdit() + widget.setValidator(QDoubleValidator()) + widget.setText(str(param["default"])) + else: # str or list, treat as text for now + widget = QLineEdit() + widget.setText(str(param["default"])) + + widget.setToolTip(help_text) + + h_layout.addWidget(widget) + h_layout.setStretch(2, 5) # Set stretch factor for input field (50%) + + layout.addLayout(h_layout) + self.widgets[param["name"]] = widget + + + def show_help_popup(self, text): + msg = QMessageBox(self) + msg.setWindowTitle("Parameter Info") + msg.setText(text) + msg.exec() + + + +class ViewerWindow(QWidget): + """ + Window displaying various fNIRS data visualization and analysis options via buttons. + + Args: + all_results (dict): Analysis results data. + all_haemo (dict): Haemodynamic data per subject. + all_figures (dict): Figures generated from the data. + config_snapshot (dict): Configuration snapshot used for analysis. + parent (QWidget, optional): Parent widget. Defaults to None. + """ + + def __init__(self, all_results, all_haemo, all_figures, config_snapshot, parent=None): + try: + super().__init__(parent, Qt.WindowType.Window) + + self.all_results = all_results + self.all_haemo = all_haemo + self.all_figures = all_figures + self.config_snapshot = config_snapshot + + if not self.all_haemo: + QMessageBox.critical(self, "Data Error", "No haemodynamic data available!") + return + + subjects_dir = resource_path("mne_data/MNE-sample-data/subjects") + os.environ["SUBJECTS_DIR"] = subjects_dir + + # TODO: Thread all of this to not freeze main window + import fNIRS_module + fNIRS_module.set_config(self.config_snapshot, True) # Set globals in this process + + layout = QVBoxLayout() + button_actions = [ + ("Show all Images", lambda: fNIRS_module.show_all_images(self.all_figures)), + ("save_all_images", lambda: fNIRS_module.save_all_images(self.all_figures)), + ("data_to_csv", lambda: fNIRS_module.data_to_csv(self.all_results)), + ("plot_2d_theta_graph", lambda: fNIRS_module.plot_2d_theta_graph(self.all_results)), + ("verify_channel_positions", lambda: fNIRS_module.verify_channel_positions(self.all_haemo[list(self.all_haemo.keys())[0]]["full_layout"])), + ("brain_landmarks_3d", lambda: fNIRS_module.brain_landmarks_3d(self.all_haemo[list(self.all_haemo.keys())[0]]["full_layout"], 'all')), + ("plot_2d_3d_contrasts_between_groups", lambda: fNIRS_module.plot_2d_3d_contrasts_between_groups(self.all_results, self.all_haemo, 'theta', 'all', True)), + ("brain_3d_visualization", lambda: fNIRS_module.brain_3d_visualization(self.all_results, self.all_haemo, 0, 't', 'all', True)), + ("plot_fir_model_results", lambda: fNIRS_module.plot_fir_model_results(self.all_results, self.all_haemo, 0, 'theta')), + ("plot_individual_theta_averages", lambda: fNIRS_module.plot_individual_theta_averages(self.all_results)), + ("plot_group_theta_averages", lambda: fNIRS_module.plot_group_theta_averages(self.all_results)), + ("plot_avg_significant_activity", lambda: fNIRS_module.plot_avg_significant_activity(self.all_haemo[list(self.all_haemo.keys())[0]]["full_layout"], self.all_results, 'theta')), + ("fold_channels", lambda: fNIRS_module.fold_channels(self.all_haemo[list(self.all_haemo.keys())[0]]["full_layout"].copy(), self.all_results, resource_path("mne_data/fOLD/fOLD-public-master/Supplementary"))), + ] + + for text, func in button_actions: + btn = QPushButton(text) + btn.clicked.connect(self.make_safe_callback(func)) + layout.addWidget(btn) + + self.setLayout(layout) + + except Exception as e: + QMessageBox.critical(None, "Startup Error", f"ViewerWindow failed:\n{str(e)}") + + def make_safe_callback(self, func): + def safe_func(): + try: + func() + except Exception as e: + QMessageBox.critical(self, "Error", f"An error occurred:\n{str(e)}") + return safe_func + + + +class MainApplication(QMainWindow): + """ + Main application window that creates and sets up the UI. + """ + + progress_update_signal = Signal(str, int) + + def __init__(self): + super().__init__() + self.setWindowTitle("FLARES") + self.setGeometry(100, 100, 1280, 720) + + self.about = None + self.help = None + self.bubble_widgets = {} + self.param_sections = [] + self.folder_paths = [] + + self.init_ui() + self.create_menu_bar() + + self.platform_suffix = "-" + PLATFORM_NAME + self.pending_update_version = None + self.pending_update_path = None + + # Start local pending update check thread + self.local_check_thread = LocalPendingUpdateCheckThread(CURRENT_VERSION, self.platform_suffix) + self.local_check_thread.pending_update_found.connect(self.on_pending_update_found) + self.local_check_thread.no_pending_update.connect(self.on_no_pending_update) + self.local_check_thread.start() + + + def init_ui(self): + + # Central widget and main horizontal layout + central = QWidget() + self.setCentralWidget(central) + + main_layout = QHBoxLayout() + central.setLayout(main_layout) + + # Left container with vertical layout: top left + bottom left + left_container = QWidget() + left_layout = QVBoxLayout() + left_container.setLayout(left_layout) + left_container.setMinimumWidth(300) + + # Top left widget (for demo, use QTextEdit) + self.top_left_widget = QTextEdit() + self.top_left_widget.setReadOnly(True) + self.top_left_widget.setPlaceholderText("Click a file below to get started! No files below? Open one using File > Open!") + self.top_left_widget.setFixedHeight(250) + + # Bottom left: the bubbles inside a scroll area + self.bubble_container = QWidget() + self.bubble_layout = QGridLayout() + self.bubble_layout.setAlignment(Qt.AlignmentFlag.AlignTop) + self.bubble_container.setLayout(self.bubble_layout) + + self.scroll_area = QScrollArea() + self.scroll_area.setWidgetResizable(True) + self.scroll_area.setWidget(self.bubble_container) + self.scroll_area.setMinimumHeight(300) + + # Add top left and bottom left to left layout + left_layout.addWidget(self.top_left_widget) + left_layout.addWidget(self.scroll_area) + + self.progress_update_signal.connect(self.update_file_progress) + + # Right widget (full height on right side) — example QTextEdit + self.right_container = QWidget() + right_container_layout = QVBoxLayout() + self.right_container.setLayout(right_container_layout) + + # Content widget inside scroll area + self.right_content_widget = QWidget() + right_content_layout = QVBoxLayout() + self.right_content_widget.setLayout(right_content_layout) + + # Option selector dropdown + self.option_selector = QComboBox() + self.option_selector.addItems(["FIR"]) + right_content_layout.addWidget(self.option_selector) + + # Container for the sections + self.rows_container = QWidget() + self.rows_layout = QVBoxLayout() + self.rows_layout.setSpacing(10) + self.rows_container.setLayout(self.rows_layout) + right_content_layout.addWidget(self.rows_container) + + # Spacer at bottom inside scroll area content to push content up + right_content_layout.addStretch() + + # Scroll area for the right side content + self.right_scroll_area = QScrollArea() + self.right_scroll_area.setWidgetResizable(True) + self.right_scroll_area.setWidget(self.right_content_widget) + + # Buttons widget (fixed below the scroll area) + buttons_widget = QWidget() + buttons_layout = QHBoxLayout() + buttons_widget.setLayout(buttons_layout) + buttons_layout.addStretch() + + self.button1 = QPushButton("Process") + self.button2 = QPushButton("Clear") + self.button3 = QPushButton("Analysis") + + buttons_layout.addWidget(self.button1) + buttons_layout.addWidget(self.button2) + buttons_layout.addWidget(self.button3) + + self.button1.setMinimumSize(100, 40) + self.button2.setMinimumSize(100, 40) + self.button3.setMinimumSize(100, 40) + + self.button3.setVisible(False) + + self.button1.clicked.connect(self.on_run_task) + self.button2.clicked.connect(self.clear_all) + self.button3.clicked.connect(self.open_viewer_window) + + # Add scroll area and buttons widget to right container layout + right_container_layout.addWidget(self.right_scroll_area) + right_container_layout.addWidget(buttons_widget) + + # Add left and right containers to main layout + main_layout.addWidget(left_container, stretch=55) + main_layout.addWidget(self.right_container, stretch=45) + + # Set size policy to expand + self.right_container.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) + self.right_scroll_area.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding) + + # Store ParamSection widgets + self.option_selector.currentIndexChanged.connect(self.update_sections) + + # Initial build + self.update_sections(0) + + + def create_menu_bar(self): + '''Menu Bar at the top of the screen''' + + menu_bar = self.menuBar() + + def make_action(name, shortcut=None, slot=None, checkable=False, checked=False, icon=None): + action = QAction(name, self) + + if shortcut: + action.setShortcut(QKeySequence(shortcut)) + if slot: + action.triggered.connect(slot) + if checkable: + action.setCheckable(True) + action.setChecked(checked) + if icon: + action.setIcon(QIcon(icon)) + return action + + # File menu and actions + file_menu = menu_bar.addMenu("File") + file_actions = [ + ("Open File...", "Ctrl+O", self.open_file_dialog, resource_path("icons/file_open_24dp_1F1F1F.svg")), + ("Open Folder...", "Ctrl+Alt+O", self.open_folder_dialog, resource_path("icons/folder_24dp_1F1F1F.svg")), + ("Open Folders...", "Ctrl+Shift+O", self.open_multiple_folders_dialog, resource_path("icons/folder_copy_24dp_1F1F1F.svg")), + ("Load Project...", "Ctrl+L", self.load_project, resource_path("icons/article_24dp_1F1F1F.svg")), + ("Save Project...", "Ctrl+S", self.save_project, resource_path("icons/save_24dp_1F1F1F.svg")), + ("Save Project As...", "Ctrl+Shift+S", self.save_project, resource_path("icons/save_as_24dp_1F1F1F.svg")), # Maybe connect a separate method if different + ] + + for i, (name, shortcut, slot, icon) in enumerate(file_actions): + file_menu.addAction(make_action(name, shortcut, slot, icon=icon)) + if i == 2: # after the first 3 actions (0,1,2) + file_menu.addSeparator() + + file_menu.addSeparator() + file_menu.addAction(make_action("Exit", "Ctrl+Q", QApplication.instance().quit, icon=resource_path("icons/exit_to_app_24dp_1F1F1F.svg"))) + + # Edit menu + edit_menu = menu_bar.addMenu("Edit") + edit_actions = [ + ("Cut", "Ctrl+X", self.cut_text, resource_path("icons/content_cut_24dp_1F1F1F.svg")), + ("Copy", "Ctrl+C", self.copy_text, resource_path("icons/content_copy_24dp_1F1F1F.svg")), + ("Paste", "Ctrl+V", self.paste_text, resource_path("icons/content_paste_24dp_1F1F1F.svg")) + ] + for name, shortcut, slot, icon in edit_actions: + edit_menu.addAction(make_action(name, shortcut, slot, icon=icon)) + + # View menu + view_menu = menu_bar.addMenu("View") + toggle_statusbar_action = make_action("Toggle Status Bar", checkable=True, checked=True, slot=None) + view_menu.addAction(toggle_statusbar_action) + + # Options menu (Help & About) + options_menu = menu_bar.addMenu("Options") + + options_actions = [ + ("User Guide", "F1", self.user_guide, resource_path("icons/help_24dp_1F1F1F.svg")), + ("Check for Updates", "F5", self.manual_check_for_updates, resource_path("icons/update_24dp_1F1F1F.svg")), + ("About", "F12", self.about_window, resource_path("icons/info_24dp_1F1F1F.svg")) + ] + + for i, (name, shortcut, slot, icon) in enumerate(options_actions): + options_menu.addAction(make_action(name, shortcut, slot, icon=icon)) + if i == 1: # after the first 2 actions (0,1) + options_menu.addSeparator() + + # Optional: status bar + self.statusbar = self.statusBar() + self.statusbar.showMessage("Ready") + + + def update_sections(self, index): + # Clear previous sections + for i in reversed(range(self.rows_layout.count())): + widget = self.rows_layout.itemAt(i).widget() + if widget is not None: + widget.deleteLater() + self.param_sections.clear() + + # Add ParamSection widgets from SECTIONS + for section in SECTIONS: + section_widget = ParamSection(section) + self.rows_layout.addWidget(section_widget) + + self.param_sections.append(section_widget) + + + def clear_all(self): + + # Clear the bubble layout + while self.bubble_layout.count(): + item = self.bubble_layout.takeAt(0) + widget = item.widget() + if widget: + widget.deleteLater() + + # Clear file data + self.bubble_widgets.clear() + self.statusBar().clearMessage() + + # Reset any other data variables + self.collected_data_snapshot = None + self.all_results = None + self.all_haemo = None + self.all_figures = None + + # Reset any visible UI elements + self.button3.setVisible(False) + self.top_left_widget.clear() + + + def open_viewer_window(self): + if not hasattr(self, "all_figures") or self.all_figures is None: + QMessageBox.warning(self, "No Data", "No figures to show yet!") + return + self.viewer_window = ViewerWindow(self.all_results, self.all_haemo, self.all_figures, self.collected_data_snapshot, self) + self.viewer_window.show() + + + def copy_text(self): + self.top_left_widget.copy() # Trigger copy + self.statusbar.showMessage("Copied to clipboard") # Show status message + + def cut_text(self): + self.top_left_widget.cut() # Trigger cut + self.statusbar.showMessage("Cut to clipboard") # Show status message + + def paste_text(self): + self.top_left_widget.paste() # Trigger paste + self.statusbar.showMessage("Pasted from clipboard") # Show status message + + + def about_window(self): + if self.about is None or not self.about.isVisible(): + self.about = AboutWindow(self) + self.about.show() + + def user_guide(self): + if self.help is None or not self.help.isVisible(): + self.help = UserGuideWindow(self) + self.help.show() + + + def open_file_dialog(self): + file_path, _ = QFileDialog.getOpenFileName( + self, "Open File", "", "All Files (*);;Text Files (*.txt)" + ) + if file_path: + self.selected_path = file_path # store the file path + self.show_files_as_bubbles(file_path) + + def open_folder_dialog(self): + folder_path = QFileDialog.getExistingDirectory( + self, "Select Folder", "" + ) + if folder_path: + self.selected_path = folder_path # store the folder path + self.show_files_as_bubbles(folder_path) + + + def open_multiple_folders_dialog(self): + while True: + folder = QFileDialog.getExistingDirectory(self, "Select Folder") + if not folder: + break + + if not hasattr(self, 'selected_paths'): + self.selected_paths = [] + if folder not in self.selected_paths: + self.selected_paths.append(folder) + + self.show_files_as_bubbles(self.selected_paths) + + # Ask if the user wants to add another + more = QMessageBox.question( + self, + "Add Another?", + "Do you want to select another folder?", + QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No, + ) + if more == QMessageBox.StandardButton.No: + break + + def save_project(self): + + filename, _ = QFileDialog.getSaveFileName( + self, "Save Project", "", "FLARE Project (*.flare)" + ) + if not filename: + return + + try: + project_data = { + "file_list": [bubble.file_path for bubble in self.bubble_widgets.values()], + "progress_states": { + bubble.file_path: bubble.current_step for bubble in self.bubble_widgets.values() + }, + "all_results": self.all_results, + "all_haemo": self.all_haemo, + "all_figures": self.all_figures, + "config_snapshot": self.collected_data_snapshot, + } + + with open(filename, "wb") as f: + pickle.dump(project_data, f) + + QMessageBox.information(self, "Success", f"Project saved to:\n{filename}") + + except Exception as e: + QMessageBox.critical(self, "Error", f"Failed to save project:\n{e}") + + + + def load_project(self): + + filename, _ = QFileDialog.getOpenFileName( + self, "Load Project", "", "FLARE Project (*.flare)" + ) + if not filename: + return + + try: + with open(filename, "rb") as f: + data = pickle.load(f) + + self.collected_data_snapshot = data["config_snapshot"] + self.all_results = data["all_results"] + self.all_haemo = data["all_haemo"] + self.all_figures = data["all_figures"] + + for section_widget in self.param_sections: + for name, widget in section_widget.widgets.items(): + if name not in self.collected_data_snapshot: + continue + value = self.collected_data_snapshot[name] + + if isinstance(widget, QComboBox): + widget.setCurrentText("True" if value else "False") + + elif isinstance(widget, QLineEdit): + validator = widget.validator() + + if isinstance(validator, QIntValidator): + widget.setText(str(int(value))) + elif isinstance(validator, QDoubleValidator): + widget.setText(str(float(value))) + else: + widget.setText(str(value)) + + self.show_files_as_bubbles_from_list(data["file_list"], data.get("progress_states", {}), filename) + + # Re-enable the "Viewer" button + self.button3.setVisible(True) + + QMessageBox.information(self, "Loaded", f"Project loaded from:\n{filename}") + + except Exception as e: + QMessageBox.critical(self, "Error", f"Failed to load project:\n{e}") + + + + def show_files_as_bubbles(self, folder_paths): + + if isinstance(folder_paths, str): + folder_paths = [folder_paths] + + # Clear previous bubbles + while self.bubble_layout.count(): + item = self.bubble_layout.takeAt(0) + widget = item.widget() + if widget: + widget.deleteLater() + + temp_bubble = ProgressBubble("Test Bubble", "") # A dummy bubble for measurement + temp_bubble.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy. Preferred) + # temp_bubble.setAttribute(Qt.WA_OpaquePaintEvent) # Improve rendering? + temp_bubble.adjustSize() # Adjust size after the widget is created + bubble_width = temp_bubble.width() # Get the actual width of a bubble + available_width = self.bubble_container.width() + + cols = max(1, available_width // bubble_width) # Ensure at least 1 column + + index = 0 + + for folder_path in folder_paths: + if not os.path.isdir(folder_path): + continue + + files = os.listdir(folder_path) + files = [f for f in files if os.path.isfile(os.path.join(folder_path, f))] + + for filename in files: + full_path = os.path.join(folder_path, filename) + display_name = f"{os.path.basename(folder_path)} / {filename}" + + bubble = ProgressBubble(display_name, full_path) + bubble.clicked.connect(self.on_bubble_clicked) + self.bubble_widgets[filename] = bubble + + row = index // cols + col = index % cols + self.bubble_layout.addWidget(bubble, row, col) + index += 1 + + self.statusBar().showMessage(f"{index} file(s) loaded from: {', '.join(folder_paths)}") + + + def show_files_as_bubbles_from_list(self, file_list, progress_states=None, filenames=None): + progress_states = progress_states or {} + + # Clear old + while self.bubble_layout.count(): + item = self.bubble_layout.takeAt(0) + widget = item.widget() + if widget: + widget.deleteLater() + + self.bubble_widgets = {} + + temp_bubble = ProgressBubble("Test Bubble", "") # A dummy bubble for measurement + temp_bubble.setSizePolicy(QSizePolicy.Policy.Preferred, QSizePolicy.Policy.Preferred) + # temp_bubble.setAttribute(Qt.WA_OpaquePaintEvent) # Improves rendering? + temp_bubble.adjustSize() # Adjust size after the widget is created + bubble_width = temp_bubble.width() # Get the actual width of a bubble + available_width = self.bubble_container.width() + + cols = max(1, available_width // bubble_width) # Ensure at least 1 column + + index = 0 + + for index, file_path in enumerate(file_list): + filename = os.path.basename(file_path) + display_name = f"{os.path.basename(os.path.dirname(file_path))} / {filename}" + + # Create bubble with full path + bubble = ProgressBubble(display_name, file_path) + bubble.clicked.connect(self.on_bubble_clicked) + self.bubble_widgets[file_path] = bubble + + step = progress_states.get(file_path, 0) + bubble.update_progress(step) + + row = index // cols + col = index % cols + self.bubble_layout.addWidget(bubble, row, col) + + self.statusBar().showMessage(f"{len(file_list)} files loaded from from {os.path.abspath(filenames)}.") + + + def on_bubble_clicked(self, bubble): + file_path = bubble.file_path + if not os.path.exists(file_path): + self.top_left_widget.setText("File not found.") + return + + size = os.path.getsize(file_path) + created = time.ctime(os.path.getctime(file_path)) + modified = time.ctime(os.path.getmtime(file_path)) + + # snirf_info = self.get_snirf_metadata_mne(file_path) + + info = f"""\ + File: {os.path.basename(file_path)} + Size: {size:,} bytes + Created: {created} + Modified: {modified} + Full Path: {file_path} + """ + + # if "Error" in snirf_info: + # info += f"\nSNIRF Metadata could not be loaded: {snirf_info['Error']}" + # else: + # info += "\nSNIRF Metadata:\n" + # for k, v in snirf_info.items(): + # if isinstance(v, list): + # info += f" {k}:\n" + # for item in v: + # info += f" - {item}\n" + # else: + # info += f" {k}: {v}\n" + + self.top_left_widget.setText(info) + + def placeholder(self): + QMessageBox.information(self, "Placeholder", "This feature is not implemented yet.") + + + + + '''MODULE FILE''' + def on_run_task(self): + + collected_data = {} + + # Add all parameter key-value pairs + for section_widget in self.param_sections: + for name, widget in section_widget.widgets.items(): + if isinstance(widget, QComboBox): + val = widget.currentText() == "True" + elif isinstance(widget, QLineEdit): + text = widget.text() + validator = widget.validator() + if isinstance(validator, QIntValidator): + val = int(text or 0) + elif isinstance(validator, QDoubleValidator): + val = float(text or 0.0) + else: + val = text + else: + val = None + collected_data[name] = val # Flattened! + + + if hasattr(self, "selected_paths") and self.selected_paths: + # Handle multiple folders + parents = [Path(p).parent for p in self.selected_paths] + base_parents = set(str(p) for p in parents) + if len(base_parents) > 1: + raise ValueError("Selected folders must have the same parent directory") + + + collected_data["BASE_SNIRF_FOLDER"] = base_parents.pop() + collected_data["SNIRF_SUBFOLDERS"] = [Path(p).name for p in self.selected_paths] + collected_data["STIM_DURATION"] = [0 for _ in self.selected_paths] + + + elif hasattr(self, "selected_path") and self.selected_path: + # Handle single folder + selected_path = Path(self.selected_path) + collected_data["BASE_SNIRF_FOLDER"] = str(selected_path.parent) + collected_data["SNIRF_SUBFOLDERS"] = [selected_path.name] + collected_data["STIM_DURATION"] = [0] + + else: + # No folder selected - handle gracefully or raise error + raise ValueError("No folder(s) selected") + + + collected_data["HRF_MODEL"] = 'fir' + + collected_data["MAX_WORKERS"] = 12 + collected_data["FORCE_DROP_CHANNELS"] = [] + + collected_data["TARGET_ACTIVITY"] = "Reach" + collected_data["TARGET_CONTROL"] = "Start of Rest" + collected_data["ROI_GROUP_1"] = [[1, 1], [1, 2], [2, 1], [2, 4], [3, 1], # Channel pairings for a region of interest. + [2, 2], [4, 3], [4, 4], [5, 5], [6, 4]] + collected_data["ROI_GROUP_2"] = [[6, 5], [6, 8], [7, 7], [7, 8], [8, 5], # Channel pairings for another region of interest. + [8, 6], [9, 6], [9, 7], [10, 7], [10, 8]] + collected_data["ROI_GROUP_1_NAME"] = "Parieto-Ocipital" # Friendly name for the first region of interest group. + collected_data["ROI_GROUP_2_NAME"] = "Fronto-Parietal" + collected_data["P_THRESHOLD"] = 0.05 + + collected_data["SEE_BAD_IMAGES"] = True + collected_data["ABS_T_VALUE"] = 6 + collected_data["ABS_THETA_VALUE"] = 10 + collected_data["ABS_CONTRAST_T_VALUE"] = 6 + collected_data["ABS_CONTRAST_THETA_VALUE"] = 10 + collected_data["ABS_SIGNIFICANCE_T_VALUE"] = 6 + collected_data["ABS_SIGNIFICANCE_THETA_VALUE"] = 10 + collected_data["BRAIN_DISTANCE"] = 0.02 + collected_data["BRAIN_MODE"] = "weighted" + + collected_data["EPOCH_REJECT_CRITERIA_THRESH"] = 20e-2 + collected_data["TIME_MIN_THRESH"] = -5 + collected_data["TIME_MAX_THRESH"] = 15 + collected_data["VERBOSITY"] = True + + self.collected_data_snapshot = collected_data.copy() + + if current_process().name == 'MainProcess': + + + self.manager = Manager() + self.result_queue = self.manager.Queue() + self.progress_queue = self.manager.Queue() + self.result_process = Process( + target=run_gui_entry_wrapper, + args=(collected_data, self.result_queue, self.progress_queue) + ) + + self.result_process.daemon = False + + + self.result_process.start() + print("start was called") + + self.statusbar.showMessage("Running processing in background...") + + # Poll the queue periodically + self.result_timer = QTimer() + self.result_timer.timeout.connect(self.check_for_pipeline_results) + self.result_timer.start() + + self.statusbar.showMessage("Task started in separate process.") + + + + + def check_for_pipeline_results(self): + while not self.result_queue.empty(): + msg = self.result_queue.get() + + if isinstance(msg, dict): + if msg.get("success"): + all_results, all_haemo, all_figures, all_processes, elapsed_time = msg["result"] + + self.all_results = all_results + self.all_haemo = all_haemo + self.all_figures = all_figures + self.all_processes = all_processes + self.elapsed_time = elapsed_time + + self.statusbar.showMessage(f"Processing complete! Time elapsed: {elapsed_time:.2f} seconds") + + self.button3.setVisible(True) + + else: + error_msg = msg.get("error", "Unknown error") + print("Error during processing:", error_msg) + self.statusbar.showMessage(f"Processing failed! {error_msg}") + + self.result_timer.stop() + + self.cleanup_after_process() + return + + elif isinstance(msg, tuple) and msg[0] == 'progress': + _, file_name, step_index = msg + self.progress_update_signal.emit(file_name, step_index) + + + def cleanup_after_process(self): + + if hasattr(self, 'result_process'): + self.result_process.join(timeout=0) + if self.result_process.is_alive(): + self.result_process.terminate() + self.result_process.join() + + if hasattr(self, 'result_queue'): + if 'AutoProxy' in repr(self.result_queue): + pass + else: + self.result_queue.close() + self.result_queue.join_thread() + + if hasattr(self, 'progress_queue'): + if 'AutoProxy' in repr(self.progress_queue): + pass + else: + self.progress_queue.close() + self.progress_queue.join_thread() + + # Shutdown manager to kill its server process and clean up + if hasattr(self, 'manager'): + self.manager.shutdown() + + + def update_file_progress(self, filename, step_index): + bubble = self.bubble_widgets.get(filename) + if bubble: + bubble.update_progress(step_index) + + + '''UPDATER''' + def manual_check_for_updates(self): + self.local_check_thread = LocalPendingUpdateCheckThread(CURRENT_VERSION, self.platform_suffix) + self.local_check_thread.pending_update_found.connect(self.on_pending_update_found) + self.local_check_thread.no_pending_update.connect(self.on_no_pending_update) + self.local_check_thread.start() + + def on_pending_update_found(self, version, folder_path): + self.statusBar().showMessage(f"Pending update found: version {version}") + self.pending_update_version = version + self.pending_update_path = folder_path + self.show_pending_update_popup() + + def on_no_pending_update(self): + # No pending update found locally, start server check directly + self.statusBar().showMessage("No pending local update found. Checking server...") + self.start_update_check_thread() + + def show_pending_update_popup(self): + msg_box = QMessageBox(self) + msg_box.setWindowTitle("Pending Update Found") + msg_box.setText(f"A previously downloaded update (version {self.pending_update_version}) is available at:\n{self.pending_update_path}\nWould you like to install it now?") + install_now_button = msg_box.addButton("Install Now", QMessageBox.ButtonRole.AcceptRole) + install_later_button = msg_box.addButton("Install Later", QMessageBox.ButtonRole.RejectRole) + msg_box.exec() + + if msg_box.clickedButton() == install_now_button: + self.install_update(self.pending_update_path) + else: + self.statusBar().showMessage("Pending update available. Install later.") + # After user dismisses, still check the server for new updates + self.start_update_check_thread() + + def start_update_check_thread(self): + self.check_thread = UpdateCheckThread() + self.check_thread.download_requested.connect(self.on_server_update_requested) + self.check_thread.no_update_available.connect(self.on_server_no_update) + self.check_thread.error_occurred.connect(self.on_error) + self.check_thread.start() + + def on_server_no_update(self): + self.statusBar().showMessage("No new updates found on server.", 5000) + + def on_server_update_requested(self, download_url, latest_version): + if self.pending_update_version: + cmp = self.version_compare(latest_version, self.pending_update_version) + if cmp > 0: + # Server version is newer than pending update + self.statusBar().showMessage(f"Newer version {latest_version} available on server. Removing old pending update...") + try: + shutil.rmtree(self.pending_update_path) + self.statusBar().showMessage(f"Deleted old update folder: {self.pending_update_path}") + except Exception as e: + self.statusBar().showMessage(f"Failed to delete old update folder: {e}") + + # Clear pending update info so new download proceeds + self.pending_update_version = None + self.pending_update_path = None + + # Download the new update + self.download_update(download_url, latest_version) + elif cmp == 0: + # Versions equal, no download needed + self.statusBar().showMessage(f"Pending update version {self.pending_update_version} is already latest. No download needed.") + else: + # Server version older than pending? Unlikely but just keep pending update + self.statusBar().showMessage(f"Pending update version {self.pending_update_version} is newer than server version. No action.") + else: + # No pending update, just download + self.download_update(download_url, latest_version) + + def download_update(self, download_url, latest_version): + self.statusBar().showMessage("Downloading update...") + self.download_thread = UpdateDownloadThread(download_url, latest_version) + self.download_thread.update_ready.connect(self.on_update_ready) + self.download_thread.error_occurred.connect(self.on_error) + self.download_thread.start() + + def on_update_ready(self, latest_version, extract_folder): + self.statusBar().showMessage("Update downloaded and extracted.") + + msg_box = QMessageBox(self) + msg_box.setWindowTitle("Update Ready") + msg_box.setText(f"Version {latest_version} has been downloaded and extracted to:\n{extract_folder}\nWould you like to install it now?") + install_now_button = msg_box.addButton("Install Now", QMessageBox.ButtonRole.AcceptRole) + install_later_button = msg_box.addButton("Install Later", QMessageBox.ButtonRole.RejectRole) + + msg_box.exec() + + if msg_box.clickedButton() == install_now_button: + self.install_update(extract_folder) + else: + self.statusBar().showMessage("Update ready. Install later.") + + + def install_update(self, extract_folder): + # Path to updater executable + + if PLATFORM_NAME == 'windows': + updater_path = os.path.join(os.getcwd(), "flares_updater.exe") + elif PLATFORM_NAME == 'darwin': + if getattr(sys, 'frozen', False): + updater_path = os.path.join(os.path.dirname(sys.executable), "../../../flares_updater.app") + else: + updater_path = os.path.join(os.getcwd(), "../flares_updater.app") + + elif PLATFORM_NAME == 'linux': + updater_path = os.path.join(os.getcwd(), "flares_updater") + else: + updater_path = os.getcwd() + + if not os.path.exists(updater_path): + QMessageBox.critical(self, "Error", f"Updater not found at:\n{updater_path}. The absolute path was {os.path.abspath(updater_path)}") + return + + # Launch updater with extracted folder path as argument + try: + # Pass current app's executable path for updater to relaunch + main_app_executable = os.path.abspath(sys.argv[0]) + + print(f'Launching updater with: "{updater_path}" "{extract_folder}" "{main_app_executable}"') + + if PLATFORM_NAME == 'darwin': + subprocess.Popen(['open', updater_path, '--args', extract_folder, main_app_executable]) + else: + subprocess.Popen([updater_path, f'{extract_folder}', f'{main_app_executable}'], cwd=os.path.dirname(updater_path)) + + # Close the current app so updater can replace files + sys.exit(0) + + except Exception as e: + QMessageBox.critical(self, "Error", f"[Updater Launch Failed]\n{str(e)}\n{traceback.format_exc()}") + + def on_error(self, message): + # print(f"Error: {message}") + self.statusBar().showMessage(f"Error occurred during update process. {message}") + + def version_compare(self, v1, v2): + def normalize(v): return [int(x) for x in v.split(".")] + return (normalize(v1) > normalize(v2)) - (normalize(v1) < normalize(v2)) + + + def closeEvent(self, event): + # Gracefully shut down multiprocessing children + print("Window is closing. Cleaning up...") + + if hasattr(self, 'manager'): + self.manager.shutdown() + + for child in self.findChildren(QWidget): + if child is not self and child.isVisible(): + child.close() + + kill_child_processes() + + event.accept() + + +def wait_for_process_to_exit(process_name, timeout=10): + """ + Waits for a process with the specified name to exit within a timeout period. + + Args: + process_name (str): Name (or part of the name) of the process to wait for. + timeout (int, optional): Maximum time to wait in seconds. Defaults to 10. + + Returns: + bool: True if the process exited before the timeout, False otherwise. + """ + + print(f"Waiting for {process_name} to exit...") + deadline = time.time() + timeout + while time.time() < deadline: + still_running = False + for proc in psutil.process_iter(['name']): + try: + if proc.info['name'] and process_name.lower() in proc.info['name'].lower(): + still_running = True + print(f"Still running: {proc.info['name']} (PID: {proc.pid})") + break + except (psutil.NoSuchProcess, psutil.AccessDenied): + continue + if not still_running: + print(f"{process_name} has exited.") + return True + time.sleep(0.5) + print(f"{process_name} did not exit in time.") + return False + + +def finish_update_if_needed(): + """ + Completes a pending application update if '--finish-update' is present in the command-line arguments. + """ + + if "--finish-update" in sys.argv: + print("Finishing update...") + + if PLATFORM_NAME == 'darwin': + app_dir = '/tmp/flarestempupdate' + else: + app_dir = os.getcwd() + + # 1. Find update folder + update_folder = None + for entry in os.listdir(app_dir): + entry_path = os.path.join(app_dir, entry) + if os.path.isdir(entry_path) and entry.startswith("flares-") and entry.endswith("-" + PLATFORM_NAME): + update_folder = os.path.join(app_dir, entry) + break + + if update_folder is None: + print("No update folder found. Skipping update steps.") + return + + if PLATFORM_NAME == 'darwin': + update_folder = os.path.join(update_folder, "flares-darwin") + + # 2. Wait for flares_updater to exit + print("Waiting for flares_updater to exit...") + for proc in psutil.process_iter(['pid', 'name']): + if proc.info['name'] and "flares_updater" in proc.info['name'].lower(): + try: + proc.wait(timeout=5) + except psutil.TimeoutExpired: + print("Force killing lingering flares_updater") + proc.kill() + + # 3. Replace the updater + if PLATFORM_NAME == 'windows': + new_updater = os.path.join(update_folder, "flares_updater.exe") + dest_updater = os.path.join(app_dir, "flares_updater.exe") + + elif PLATFORM_NAME == 'darwin': + new_updater = os.path.join(update_folder, "flares_updater.app") + dest_updater = os.path.abspath(os.path.join(sys.executable, "../../../../flares_updater.app")) + + elif PLATFORM_NAME == 'linux': + new_updater = os.path.join(update_folder, "flares_updater") + dest_updater = os.path.join(app_dir, "flares_updater") + + else: + print("No platform??") + new_updater = os.getcwd() + dest_updater = os.getcwd() + + print(f"New updater is {new_updater}") + print(f"Dest updater is {dest_updater}") + + print("Writable?", os.access(dest_updater, os.W_OK)) + print("Executable path:", sys.executable) + print("Trying to copy:", new_updater, "->", dest_updater) + + if os.path.exists(new_updater): + try: + if os.path.exists(dest_updater): + if PLATFORM_NAME == 'darwin': + try: + if os.path.isdir(dest_updater): + shutil.rmtree(dest_updater) + print(f"Deleted directory: {dest_updater}") + else: + os.remove(dest_updater) + print(f"Deleted file: {dest_updater}") + except Exception as e: + print(f"Error deleting {dest_updater}: {e}") + else: + os.remove(dest_updater) + + if PLATFORM_NAME == 'darwin': + wait_for_process_to_exit("flares_updater", timeout=10) + subprocess.check_call(["ditto", new_updater, dest_updater]) + else: + shutil.copy2(new_updater, dest_updater) + + if PLATFORM_NAME in ('linux', 'darwin'): + os.chmod(dest_updater, 0o755) + + if PLATFORM_NAME == 'darwin': + remove_quarantine(dest_updater) + + print("flares_updater replaced.") + except Exception as e: + print(f"Failed to replace flares_updater: {e}") + + # 4. Delete the update folder + try: + if PLATFORM_NAME == 'darwin': + shutil.rmtree(app_dir) + else: + shutil.rmtree(update_folder) + except Exception as e: + print(f"Failed to delete update folder: {e}") + + QMessageBox.information(None, "Update Complete", "The application has been successfully updated.") + sys.argv.remove("--finish-update") + + +def remove_quarantine(app_path): + """ + Removes the macOS quarantine attribute from the specified application path. + """ + + script = f''' + do shell script "xattr -d -r com.apple.quarantine {shlex.quote(app_path)}" with administrator privileges with prompt "FLARES needs privileges to finish the update. (2/2)" + ''' + try: + subprocess.run(['osascript', '-e', script], check=True) + print("✅ Quarantine attribute removed.") + except subprocess.CalledProcessError as e: + print("⌠Failed to remove quarantine attribute.") + print(e) + + +def run_gui_entry_wrapper(config, gui_queue, progress_queue): + """ + Where the processing happens + """ + + try: + + import fNIRS_module + fNIRS_module.gui_entry(config, gui_queue, progress_queue) + + sys.exit(0) + + except Exception as e: + tb_str = traceback.format_exc() + gui_queue.put({ + "success": False, + "error": f"Child process crashed: {str(e)}\nTraceback:\n{tb_str}" + }) + + +def resource_path(relative_path): + """ + Get absolute path to resource regardless of running directly or packaged using PyInstaller + """ + + if hasattr(sys, '_MEIPASS'): + # PyInstaller bundle path + base_path = sys._MEIPASS + else: + base_path = os.path.abspath(".") + + return os.path.join(base_path, relative_path) + + +def kill_child_processes(): + """ + Goodbye children + """ + + try: + parent = psutil.Process(os.getpid()) + children = parent.children(recursive=True) + for child in children: + try: + child.kill() + except psutil.NoSuchProcess: + pass + psutil.wait_procs(children, timeout=5) + except Exception as e: + print(f"Error killing child processes: {e}") + + +def exception_hook(exc_type, exc_value, exc_traceback): + """ + Method that will display a popup when the program hard crashes containg what went wrong + """ + + error_msg = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + print(error_msg) # also print to console + + kill_child_processes() + + # Show error message box + # Make sure QApplication exists (or create a minimal one) + app = QApplication.instance() + if app is None: + app = QApplication(sys.argv) + + QMessageBox.critical(None, "Unexpected Error", f"An unhandled exception occurred:\n\n{error_msg}") + + # Exit the app after user acknowledges + sys.exit(1) + + +if __name__ == "__main__": + # Redirect exceptions to the popup window + sys.excepthook = exception_hook + + # Set up application logging + if PLATFORM_NAME == "darwin": + log_path = os.path.join(os.path.dirname(sys.executable), "../../../flares.log") + else: + log_path = os.path.join(os.getcwd(), "flares.log") + + os.remove(log_path) + sys.stdout = open(log_path, "a", buffering=1) + sys.stderr = sys.stdout + print(f"\n=== App started at {datetime.now()} ===\n") + + freeze_support() # Required for PyInstaller + multiprocessing + + # Only run GUI in the main process + if current_process().name == 'MainProcess': + app = QApplication(sys.argv) + finish_update_if_needed() + window = MainApplication() + + if PLATFORM_NAME == "darwin": + app.setWindowIcon(QIcon(resource_path("icons/main.icns"))) + window.setWindowIcon(QIcon(resource_path("icons/main.icns"))) + else: + app.setWindowIcon(QIcon(resource_path("icons/main.ico"))) + window.setWindowIcon(QIcon(resource_path("icons/main.ico"))) + window.show() + sys.exit(app.exec()) \ No newline at end of file diff --git a/mne/__init__.py b/mne/__init__.py new file mode 100644 index 0000000..6abe2bc --- /dev/null +++ b/mne/__init__.py @@ -0,0 +1,36 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE software for MEG and EEG data analysis.""" +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Generic release markers: +# X.Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# X.Y # Final release +# +# Dev branch marker is: 'X.Y.devN' where N is an integer. +# +import lazy_loader as lazy + +try: + from importlib.metadata import version + + __version__ = version("mne") +except Exception: + __version__ = "0.0.0" + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) + +# initialize logging +from .utils import set_log_level, set_log_file + +set_log_level(None, False) +set_log_file() diff --git a/mne/__init__.pyi b/mne/__init__.pyi new file mode 100644 index 0000000..d50b520 --- /dev/null +++ b/mne/__init__.pyi @@ -0,0 +1,430 @@ +__all__ = [ + "AcqParserFIF", + "Annotations", + "BaseEpochs", + "BiHemiLabel", + "Covariance", + "Dipole", + "DipoleFixed", + "Epochs", + "EpochsArray", + "Evoked", + "EvokedArray", + "Forward", + "Info", + "Label", + "MixedSourceEstimate", + "MixedVectorSourceEstimate", + "Projection", + "Report", + "SourceEstimate", + "SourceMorph", + "SourceSpaces", + "Transform", + "VectorSourceEstimate", + "VolSourceEstimate", + "VolVectorSourceEstimate", + "add_reference_channels", + "add_source_space_distances", + "annotations_from_events", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "beamformer", + "channel_indices_by_type", + "channel_type", + "channels", + "chpi", + "combine_evoked", + "commands", + "compute_covariance", + "compute_proj_epochs", + "compute_proj_evoked", + "compute_proj_raw", + "compute_rank", + "compute_raw_covariance", + "compute_source_morph", + "concatenate_epochs", + "concatenate_events", + "concatenate_raws", + "convert_forward_solution", + "coreg", + "count_annotations", + "count_events", + "create_default_subject", + "create_info", + "cuda", + "datasets", + "decimate_surface", + "decoding", + "defaults", + "dig_mri_distances", + "dipole", + "epochs", + "equalize_channels", + "event", + "events_from_annotations", + "export", + "extract_label_time_course", + "filter", + "find_events", + "find_layout", + "find_stim_steps", + "fit_dipole", + "forward", + "get_config", + "get_config_path", + "get_head_surf", + "get_meg_helmet_surf", + "get_montage_volume_labels", + "get_volume_labels_from_aseg", + "get_volume_labels_from_src", + "grade_to_tris", + "grade_to_vertices", + "grand_average", + "grow_labels", + "gui", + "head_to_mni", + "head_to_mri", + "inverse_sparse", + "io", + "label_sign_flip", + "labels_to_stc", + "make_ad_hoc_cov", + "make_bem_model", + "make_bem_solution", + "make_field_map", + "make_fixed_length_epochs", + "make_fixed_length_events", + "make_forward_dipole", + "make_forward_solution", + "make_sphere_model", + "match_channel_orders", + "merge_events", + "minimum_norm", + "morph_labels", + "morph_source_spaces", + "open_docs", + "open_report", + "parse_config", + "pick_channels", + "pick_channels_cov", + "pick_channels_forward", + "pick_channels_regexp", + "pick_events", + "pick_info", + "pick_types", + "pick_types_forward", + "preprocessing", + "random_parcellation", + "read_annotations", + "read_bem_solution", + "read_bem_surfaces", + "read_cov", + "read_dipole", + "read_epochs", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_events", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds", + "read_evokeds_mff", + "read_forward_solution", + "read_freesurfer_lut", + "read_label", + "read_labels_from_annot", + "read_lta", + "read_morph_map", + "read_proj", + "read_reject_parameters", + "read_source_estimate", + "read_source_morph", + "read_source_spaces", + "read_surface", + "read_talxfm", + "read_trans", + "read_tri", + "read_vectorview_selection", + "rename_channels", + "report", + "scale_bem", + "scale_labels", + "scale_mri", + "scale_source_space", + "sensitivity_map", + "set_bipolar_reference", + "set_cache_dir", + "set_config", + "set_eeg_reference", + "set_log_file", + "set_log_level", + "set_memmap_min_size", + "setup_source_space", + "setup_volume_source_space", + "simulation", + "source_space", + "spatial_dist_adjacency", + "spatial_inter_hemi_adjacency", + "spatial_src_adjacency", + "spatial_tris_adjacency", + "spatio_temporal_dist_adjacency", + "spatio_temporal_src_adjacency", + "spatio_temporal_tris_adjacency", + "split_label", + "stats", + "stc_near_sensors", + "stc_to_label", + "surface", + "sys_info", + "time_frequency", + "transform_surface_to", + "use_coil_def", + "use_log_level", + "verbose", + "vertex_to_mni", + "viz", + "what", + "whiten_evoked", + "write_bem_solution", + "write_bem_surfaces", + "write_cov", + "write_events", + "write_evokeds", + "write_forward_solution", + "write_head_bem", + "write_label", + "write_labels_to_annot", + "write_proj", + "write_source_spaces", + "write_surface", + "write_trans", +] +from . import ( + beamformer, + channels, + chpi, + commands, + coreg, + cuda, + datasets, + decoding, + defaults, + dipole, + epochs, + event, + export, + filter, + forward, + gui, + inverse_sparse, + io, + minimum_norm, + preprocessing, + report, + simulation, + source_space, + stats, + surface, + time_frequency, + viz, +) +from ._fiff.meas_info import Info, create_info +from ._fiff.pick import ( + channel_indices_by_type, + channel_type, + pick_channels, + pick_channels_cov, + pick_channels_forward, + pick_channels_regexp, + pick_info, + pick_types, + pick_types_forward, +) +from ._fiff.proj import Projection +from ._fiff.reference import ( + add_reference_channels, + set_bipolar_reference, + set_eeg_reference, +) +from ._fiff.what import what +from ._freesurfer import ( + get_volume_labels_from_aseg, + head_to_mni, + head_to_mri, + read_freesurfer_lut, + read_lta, + read_talxfm, + vertex_to_mni, +) +from .annotations import ( + Annotations, + annotations_from_events, + count_annotations, + events_from_annotations, + read_annotations, +) +from .bem import ( + make_bem_model, + make_bem_solution, + make_sphere_model, + read_bem_solution, + read_bem_surfaces, + write_bem_solution, + write_bem_surfaces, + write_head_bem, +) +from .channels import ( + equalize_channels, + find_layout, + read_vectorview_selection, + rename_channels, +) +from .coreg import ( + create_default_subject, + scale_bem, + scale_labels, + scale_mri, + scale_source_space, +) +from .cov import ( + Covariance, + compute_covariance, + compute_raw_covariance, + make_ad_hoc_cov, + read_cov, + whiten_evoked, + write_cov, +) +from .dipole import Dipole, DipoleFixed, fit_dipole, read_dipole +from .epochs import ( + BaseEpochs, + Epochs, + EpochsArray, + concatenate_epochs, + make_fixed_length_epochs, + read_epochs, +) +from .event import ( + AcqParserFIF, + concatenate_events, + count_events, + find_events, + find_stim_steps, + make_fixed_length_events, + merge_events, + pick_events, + read_events, + write_events, +) +from .evoked import Evoked, EvokedArray, combine_evoked, read_evokeds, write_evokeds +from .forward import ( + Forward, + apply_forward, + apply_forward_raw, + average_forward_solutions, + convert_forward_solution, + make_field_map, + make_forward_dipole, + make_forward_solution, + read_forward_solution, + use_coil_def, + write_forward_solution, +) +from .io import ( + read_epochs_fieldtrip, + read_evoked_besa, + read_evoked_fieldtrip, + read_evokeds_mff, +) +from .io.base import concatenate_raws, match_channel_orders +from .io.eeglab import read_epochs_eeglab +from .io.kit import read_epochs_kit +from .label import ( + BiHemiLabel, + Label, + grow_labels, + label_sign_flip, + labels_to_stc, + morph_labels, + random_parcellation, + read_label, + read_labels_from_annot, + split_label, + stc_to_label, + write_label, + write_labels_to_annot, +) +from .misc import parse_config, read_reject_parameters +from .morph import ( + SourceMorph, + compute_source_morph, + grade_to_vertices, + read_source_morph, +) +from .morph_map import read_morph_map +from .proj import ( + compute_proj_epochs, + compute_proj_evoked, + compute_proj_raw, + read_proj, + sensitivity_map, + write_proj, +) +from .rank import compute_rank +from .report import Report, open_report +from .source_estimate import ( + MixedSourceEstimate, + MixedVectorSourceEstimate, + SourceEstimate, + VectorSourceEstimate, + VolSourceEstimate, + VolVectorSourceEstimate, + extract_label_time_course, + grade_to_tris, + read_source_estimate, + spatial_dist_adjacency, + spatial_inter_hemi_adjacency, + spatial_src_adjacency, + spatial_tris_adjacency, + spatio_temporal_dist_adjacency, + spatio_temporal_src_adjacency, + spatio_temporal_tris_adjacency, + stc_near_sensors, +) +from .source_space._source_space import ( + SourceSpaces, + add_source_space_distances, + get_volume_labels_from_src, + morph_source_spaces, + read_source_spaces, + setup_source_space, + setup_volume_source_space, + write_source_spaces, +) +from .surface import ( + decimate_surface, + dig_mri_distances, + get_head_surf, + get_meg_helmet_surf, + get_montage_volume_labels, + read_surface, + read_tri, + write_surface, +) +from .transforms import Transform, read_trans, transform_surface_to, write_trans +from .utils import ( + get_config, + get_config_path, + grand_average, + open_docs, + set_cache_dir, + set_config, + set_log_file, + set_log_level, + set_memmap_min_size, + sys_info, + use_log_level, + verbose, +) diff --git a/mne/__main__.py b/mne/__main__.py new file mode 100644 index 0000000..e0a0dc5 --- /dev/null +++ b/mne/__main__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .commands.utils import main + +if __name__ == "__main__": + main() diff --git a/mne/_fiff/__init__.py b/mne/_fiff/__init__.py new file mode 100644 index 0000000..48a5871 --- /dev/null +++ b/mne/_fiff/__init__.py @@ -0,0 +1,8 @@ +"""Private module for FIF basic I/O routines.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# All imports should be done directly to submodules, so we don't import +# anything here or use lazy_loader. diff --git a/mne/_fiff/_digitization.py b/mne/_fiff/_digitization.py new file mode 100644 index 0000000..e55fd5d --- /dev/null +++ b/mne/_fiff/_digitization.py @@ -0,0 +1,605 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import heapq +from collections import Counter + +import numpy as np + +from ..utils import Bunch, _check_fname, _validate_type, logger, verbose, warn +from .constants import FIFF, _coord_frame_named +from .tag import read_tag +from .tree import dir_tree_find +from .write import _safe_name_list, start_and_end_file, write_dig_points + +_dig_kind_dict = { + "cardinal": FIFF.FIFFV_POINT_CARDINAL, + "hpi": FIFF.FIFFV_POINT_HPI, + "eeg": FIFF.FIFFV_POINT_EEG, + "extra": FIFF.FIFFV_POINT_EXTRA, +} +_dig_kind_ints = tuple(sorted(_dig_kind_dict.values())) +_dig_kind_proper = { + "cardinal": "Cardinal", + "hpi": "HPI", + "eeg": "EEG", + "extra": "Extra", + "unknown": "Unknown", +} +_dig_kind_rev = {val: key for key, val in _dig_kind_dict.items()} +_cardinal_kind_rev = {1: "LPA", 2: "Nasion", 3: "RPA", 4: "Inion"} + + +def _format_dig_points(dig, enforce_order=False): + """Format the dig points nicely.""" + if enforce_order and dig is not None: + # reorder points based on type: + # Fiducials/HPI, EEG, extra (headshape) + fids_digpoints = [] + hpi_digpoints = [] + eeg_digpoints = [] + extra_digpoints = [] + head_digpoints = [] + + # use a heap to enforce order on FIDS, EEG, Extra + for idx, digpoint in enumerate(dig): + ident = digpoint["ident"] + kind = digpoint["kind"] + + # push onto heap based on 'ident' (for the order) for + # each of the possible DigPoint 'kind's + # keep track of 'idx' in case of any clashes in + # the 'ident' variable, which can occur when + # user passes in DigMontage + DigMontage + if kind == FIFF.FIFFV_POINT_CARDINAL: + heapq.heappush(fids_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HPI: + heapq.heappush(hpi_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EEG: + heapq.heappush(eeg_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_EXTRA: + heapq.heappush(extra_digpoints, (ident, idx, digpoint)) + elif kind == FIFF.FIFFV_POINT_HEAD: + heapq.heappush(head_digpoints, (ident, idx, digpoint)) + + # now recreate dig based on sorted order + fids_digpoints.sort(), hpi_digpoints.sort() + eeg_digpoints.sort() + extra_digpoints.sort(), head_digpoints.sort() + new_dig = [] + for idx, d in enumerate( + fids_digpoints + + hpi_digpoints + + extra_digpoints + + eeg_digpoints + + head_digpoints + ): + new_dig.append(d[-1]) + dig = new_dig + + return [DigPoint(d) for d in dig] if dig is not None else dig + + +def _get_dig_eeg(dig): + return [d for d in dig if d["kind"] == FIFF.FIFFV_POINT_EEG] + + +def _count_points_by_type(dig): + """Get the number of points of each type.""" + occurrences = Counter([d["kind"] for d in dig]) + return dict( + fid=occurrences[FIFF.FIFFV_POINT_CARDINAL], + hpi=occurrences[FIFF.FIFFV_POINT_HPI], + eeg=occurrences[FIFF.FIFFV_POINT_EEG], + extra=occurrences[FIFF.FIFFV_POINT_EXTRA], + ) + + +_dig_keys = {"kind", "ident", "r", "coord_frame"} + + +class DigPoint(dict): + """Container for a digitization point. + + This is a simple subclass of the standard dict type designed to provide + a readable string representation. + + Parameters + ---------- + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, + or 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + """ + + def __repr__(self): # noqa: D105 + from ..transforms import _coord_frame_name + + if self["kind"] == FIFF.FIFFV_POINT_CARDINAL: + id_ = _cardinal_kind_rev.get(self["ident"], "Unknown cardinal") + else: + id_ = _dig_kind_proper[_dig_kind_rev.get(self["kind"], "unknown")] + id_ = f"{id_} #{self['ident']}" + id_ = id_.rjust(10) + cf = _coord_frame_name(self["coord_frame"]) + x, y, z = self["r"] + if "voxel" in cf: + pos = (f"({x:0.1f}, {y:0.1f}, {z:0.1f})").ljust(25) + else: + pos = (f"({x * 1e3:0.1f}, {y * 1e3:0.1f}, {z * 1e3:0.1f}) mm").ljust(25) + return f"" + + # speed up info copy by only deep copying the mutable item + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + return DigPoint( + kind=self["kind"], + r=self["r"].copy(), + ident=self["ident"], + coord_frame=self["coord_frame"], + ) + + def __eq__(self, other): # noqa: D105 + """Compare two DigPoints. + + Two digpoints are equal if they are the same kind, share the same + coordinate frame and position. + """ + my_keys = ["kind", "ident", "coord_frame"] + if set(self.keys()) != set(other.keys()): + return False + elif any(self[_] != other[_] for _ in my_keys): + return False + else: + return np.allclose(self["r"], other["r"]) + + +def _read_dig_fif(fid, meas_info, *, return_ch_names=False): + """Read digitizer data from a FIFF file.""" + isotrak = dir_tree_find(meas_info, FIFF.FIFFB_ISOTRAK) + dig = None + ch_names = None + if len(isotrak) == 0: + logger.info("Isotrak not found") + elif len(isotrak) > 1: + warn("Multiple Isotrak found") + else: + isotrak = isotrak[0] + coord_frame = FIFF.FIFFV_COORD_HEAD + dig = [] + for k in range(isotrak["nent"]): + kind = isotrak["directory"][k].kind + pos = isotrak["directory"][k].pos + if kind == FIFF.FIFF_DIG_POINT: + tag = read_tag(fid, pos) + dig.append(tag.data) + elif kind == FIFF.FIFF_MNE_COORD_FRAME: + tag = read_tag(fid, pos) + coord_frame = _coord_frame_named.get(int(tag.data.item())) + elif kind == FIFF.FIFF_MNE_CH_NAME_LIST: + tag = read_tag(fid, pos) + ch_names = _safe_name_list(tag.data, "read", "ch_names") + for d in dig: + d["coord_frame"] = coord_frame + out = _format_dig_points(dig) + if return_ch_names: + out = (out, ch_names) + return out + + +@verbose +def write_dig( + fname, pts, coord_frame=None, *, ch_names=None, overwrite=False, verbose=None +): + """Write digitization data to a FIF file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : int | str | None + If all the points have the same coordinate frame, specify the type + here. Can be None (default) if the points could have varying + coordinate frames. + ch_names : list of str | None + Channel names associated with the digitization points, if available. + + .. versionadded:: 1.9 + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + .. versionadded:: 1.0 + """ + from ..transforms import _to_const + + fname = _check_fname(fname, overwrite=overwrite) + if coord_frame is not None: + coord_frame = _to_const(coord_frame) + pts_frames = {pt.get("coord_frame", coord_frame) for pt in pts} + bad_frames = pts_frames - {coord_frame} + if len(bad_frames) > 0: + raise ValueError( + "Points have coord_frame entries that are incompatible with " + f"coord_frame={coord_frame}: {tuple(bad_frames)}." + ) + _validate_type(ch_names, (None, list, tuple), "ch_names") + if ch_names is not None: + for ci, ch_name in enumerate(ch_names): + _validate_type(ch_name, str, f"ch_names[{ci}]") + + with start_and_end_file(fname) as fid: + write_dig_points( + fid, pts, block=True, coord_frame=coord_frame, ch_names=ch_names + ) + + +_cardinal_ident_mapping = { + FIFF.FIFFV_POINT_NASION: "nasion", + FIFF.FIFFV_POINT_LPA: "lpa", + FIFF.FIFFV_POINT_RPA: "rpa", +} + + +def _ensure_fiducials_head(dig): + # Ensure that there are all three fiducials in the head coord frame + fids = dict() + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + name = _cardinal_ident_mapping.get(d["ident"], None) + if name is not None: + fids[name] = d + radius = None + mults = dict( + lpa=[-1, 0, 0], + rpa=[1, 0, 0], + nasion=[0, 1, 0], + ) + for ident, name in _cardinal_ident_mapping.items(): + if name not in fids: + if radius is None: + radius = [ + np.linalg.norm(d["r"]) + for d in dig + if d["coord_frame"] == FIFF.FIFFV_COORD_HEAD + and not np.isnan(d["r"]).any() + ] + if not radius: + return # can't complete, no head points + radius = np.mean(radius) + dig.append( + DigPoint( + kind=FIFF.FIFFV_POINT_CARDINAL, + ident=ident, + r=np.array(mults[name], float) * radius, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) + + +# XXXX: +# This does something really similar to _read_dig_montage_fif but: +# - does not check coord_frame +# - does not do any operation that implies assumptions with the names +def _get_data_as_dict_from_dig(dig, exclude_ref_channel=True): + """Obtain coordinate data from a Dig. + + Parameters + ---------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + + Returns + ------- + ch_pos : dict + The container of all relevant channel positions inside dig. + """ + # Split up the dig points by category + hsp, hpi, elp = list(), list(), list() + fids, dig_ch_pos_location = dict(), list() + dig = [] if dig is None else dig + + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + fids[_cardinal_ident_mapping[d["ident"]]] = d["r"] + elif d["kind"] == FIFF.FIFFV_POINT_HPI: + hpi.append(d["r"]) + elp.append(d["r"]) + elif d["kind"] == FIFF.FIFFV_POINT_EXTRA: + hsp.append(d["r"]) + elif d["kind"] == FIFF.FIFFV_POINT_EEG: + if d["ident"] != 0 or not exclude_ref_channel: + dig_ch_pos_location.append(d["r"]) + + dig_coord_frames = set([d["coord_frame"] for d in dig]) + if len(dig_coord_frames) == 0: + dig_coord_frames = set([FIFF.FIFFV_COORD_HEAD]) + if len(dig_coord_frames) != 1: + raise RuntimeError( + "Only single coordinate frame in dig is supported, " + f"got {dig_coord_frames}" + ) + dig_ch_pos_location = np.array(dig_ch_pos_location) + dig_ch_pos_location.shape = (-1, 3) # empty will be (0, 3) + return Bunch( + nasion=fids.get("nasion", None), + lpa=fids.get("lpa", None), + rpa=fids.get("rpa", None), + hsp=np.array(hsp) if len(hsp) else None, + hpi=np.array(hpi) if len(hpi) else None, + elp=np.array(elp) if len(elp) else None, + dig_ch_pos_location=dig_ch_pos_location, + coord_frame=dig_coord_frames.pop(), + ) + + +def _get_fid_coords(dig, raise_error=True): + fid_coords = Bunch(nasion=None, lpa=None, rpa=None) + fid_coord_frames = dict() + + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + key = _cardinal_ident_mapping[d["ident"]] + fid_coords[key] = d["r"] + fid_coord_frames[key] = d["coord_frame"] + + if len(fid_coord_frames) > 0 and raise_error: + if set(fid_coord_frames.keys()) != set(["nasion", "lpa", "rpa"]): + raise ValueError( + f"Some fiducial points are missing (got {fid_coord_frames.keys()})." + ) + + if len(set(fid_coord_frames.values())) > 1: + raise ValueError( + "All fiducial points must be in the same coordinate system " + f"(got {len(fid_coord_frames)})" + ) + + coord_frame = fid_coord_frames.popitem()[1] if fid_coord_frames else None + + return fid_coords, coord_frame + + +def _coord_frame_const(coord_frame): + from ..transforms import _str_to_frame + + if not isinstance(coord_frame, str) or coord_frame not in _str_to_frame: + raise ValueError( + f"coord_frame must be one of {sorted(_str_to_frame.keys())}, got " + f"{coord_frame}" + ) + return _str_to_frame[coord_frame] + + +def _make_dig_points( + nasion=None, + lpa=None, + rpa=None, + hpi=None, + extra_points=None, + dig_ch_pos=None, + *, + coord_frame="head", + add_missing_fiducials=False, +): + """Construct digitizer info for the info. + + Parameters + ---------- + nasion : array-like | numpy.ndarray, shape (3,) | None + Point designated as the nasion point. + lpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the left auricular point. + rpa : array-like | numpy.ndarray, shape (3,) | None + Point designated as the right auricular point. + hpi : array-like | numpy.ndarray, shape (n_points, 3) | None + Points designated as head position indicator points. + extra_points : array-like | numpy.ndarray, shape (n_points, 3) + Points designed as the headshape points. + dig_ch_pos : dict + Dict of EEG channel positions. + coord_frame : str + The coordinate frame of the points. Usually this is "unknown" + for native digitizer space. Defaults to "head". + add_missing_fiducials : bool + If True, add fiducials to the dig points if they are not present. + Requires that coord_frame='head' and that lpa, nasion, and rpa are all + None. + + Returns + ------- + dig : list of dicts + A container of DigPoints to be added to the info['dig']. + """ + coord_frame = _coord_frame_const(coord_frame) + + dig = [] + if lpa is not None: + lpa = np.asarray(lpa) + if lpa.shape != (3,): + raise ValueError(f"LPA should have the shape (3,) instead of {lpa.shape}") + dig.append( + { + "r": lpa, + "ident": FIFF.FIFFV_POINT_LPA, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if nasion is not None: + nasion = np.asarray(nasion) + if nasion.shape != (3,): + raise ValueError( + f"Nasion should have the shape (3,) instead of {nasion.shape}" + ) + dig.append( + { + "r": nasion, + "ident": FIFF.FIFFV_POINT_NASION, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if rpa is not None: + rpa = np.asarray(rpa) + if rpa.shape != (3,): + raise ValueError(f"RPA should have the shape (3,) instead of {rpa.shape}") + dig.append( + { + "r": rpa, + "ident": FIFF.FIFFV_POINT_RPA, + "kind": FIFF.FIFFV_POINT_CARDINAL, + "coord_frame": coord_frame, + } + ) + if hpi is not None: + hpi = np.asarray(hpi) + if hpi.ndim != 2 or hpi.shape[1] != 3: + raise ValueError( + f"HPI should have the shape (n_points, 3) instead of {hpi.shape}" + ) + for idx, point in enumerate(hpi): + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": coord_frame, + } + ) + if extra_points is not None: + extra_points = np.asarray(extra_points) + if len(extra_points) and extra_points.shape[1] != 3: + raise ValueError( + "Points should have the shape (n_points, 3) instead of " + f"{extra_points.shape}" + ) + for idx, point in enumerate(extra_points): + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_EXTRA, + "coord_frame": coord_frame, + } + ) + if dig_ch_pos is not None: + idents = [] + use_arange = False + for key, value in dig_ch_pos.items(): + _validate_type(key, str, "dig_ch_pos") + try: + idents.append(int(key[-3:])) + except ValueError: + use_arange = True + _validate_type(value, (np.ndarray, list, tuple), "dig_ch_pos") + value = np.array(value, dtype=float) + dig_ch_pos[key] = value + if value.shape != (3,): + raise RuntimeError( + "The position should be a 1D array of 3 floats. " + f"Provided shape {value.shape}." + ) + if use_arange: + idents = np.arange(1, len(dig_ch_pos) + 1) + for key, ident in zip(dig_ch_pos, idents): + dig.append( + { + "r": dig_ch_pos[key], + "ident": int(ident), + "kind": FIFF.FIFFV_POINT_EEG, + "coord_frame": coord_frame, + } + ) + if add_missing_fiducials: + assert coord_frame == FIFF.FIFFV_COORD_HEAD + # These being none is really an assumption that if you have one you + # should have all three. But we can relax this later if necessary. + assert lpa is None + assert rpa is None + assert nasion is None + _ensure_fiducials_head(dig) + + return _format_dig_points(dig) + + +def _call_make_dig_points(nasion, lpa, rpa, hpi, extra, convert=True): + from ..transforms import ( + Transform, + apply_trans, + get_ras_to_neuromag_trans, + ) + + if convert: + neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa) + nasion = apply_trans(neuromag_trans, nasion) + lpa = apply_trans(neuromag_trans, lpa) + rpa = apply_trans(neuromag_trans, rpa) + + if hpi is not None: + hpi = apply_trans(neuromag_trans, hpi) + + extra = apply_trans(neuromag_trans, extra).astype(np.float32) + else: + neuromag_trans = None + + ctf_head_t = Transform(fro="ctf_head", to="head", trans=neuromag_trans) + + info_dig = _make_dig_points( + nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=extra + ) + + return info_dig, ctf_head_t + + +############################################################################## +# From artemis123 (we have modified the function a bit) +def _artemis123_read_pos(nas, lpa, rpa, hpi, extra): + # move into MNE head coords + dig_points, _ = _call_make_dig_points(nas, lpa, rpa, hpi, extra) + return dig_points + + +############################################################################## +# From bti +def _make_bti_dig_points( + nasion, + lpa, + rpa, + hpi, + extra, + convert=False, + use_hpi=False, + bti_dev_t=False, + dev_ctf_t=False, +): + from ..transforms import ( + Transform, + combine_transforms, + invert_transform, + ) + + _hpi = hpi if use_hpi else None + info_dig, ctf_head_t = _call_make_dig_points(nasion, lpa, rpa, _hpi, extra, convert) + + if convert: + t = combine_transforms( + invert_transform(bti_dev_t), dev_ctf_t, "meg", "ctf_head" + ) + dev_head_t = combine_transforms(t, ctf_head_t, "meg", "head") + else: + dev_head_t = Transform("meg", "head", trans=None) + + return info_dig, dev_head_t, ctf_head_t # ctf_head_t should not be needed diff --git a/mne/_fiff/compensator.py b/mne/_fiff/compensator.py new file mode 100644 index 0000000..914dc1b --- /dev/null +++ b/mne/_fiff/compensator.py @@ -0,0 +1,168 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..utils import fill_doc +from .constants import FIFF + + +def get_current_comp(info): + """Get the current compensation in effect in the data.""" + comp = None + first_comp = -1 + for k, chan in enumerate(info["chs"]): + if chan["kind"] == FIFF.FIFFV_MEG_CH: + comp = int(chan["coil_type"]) >> 16 + if first_comp < 0: + first_comp = comp + elif comp != first_comp: + raise ValueError("Compensation is not set equally on all MEG channels") + return comp + + +def set_current_comp(info, comp): + """Set the current compensation in effect in the data.""" + comp_now = get_current_comp(info) + for k, chan in enumerate(info["chs"]): + if chan["kind"] == FIFF.FIFFV_MEG_CH: + rem = chan["coil_type"] - (comp_now << 16) + chan["coil_type"] = int(rem + (comp << 16)) + + +def _make_compensator(info, grade): + """Auxiliary function for make_compensator.""" + for k in range(len(info["comps"])): + if info["comps"][k]["kind"] == grade: + this_data = info["comps"][k]["data"] + + # Create the preselector + presel = np.zeros((this_data["ncol"], info["nchan"])) + for col, col_name in enumerate(this_data["col_names"]): + ind = [k for k, ch in enumerate(info["ch_names"]) if ch == col_name] + if len(ind) == 0: + raise ValueError(f"Channel {col_name} is not available in data") + elif len(ind) > 1: + raise ValueError(f"Ambiguous channel {col_name}") + presel[col, ind[0]] = 1.0 + + # Create the postselector (zero entries for channels not found) + postsel = np.zeros((info["nchan"], this_data["nrow"])) + for c, ch_name in enumerate(info["ch_names"]): + ind = [ + k for k, ch in enumerate(this_data["row_names"]) if ch == ch_name + ] + if len(ind) > 1: + raise ValueError(f"Ambiguous channel {ch_name}") + elif len(ind) == 1: + postsel[c, ind[0]] = 1.0 + # else, don't use it at all (postsel[c, ?] = 0.0) by allocation + this_comp = np.dot(postsel, np.dot(this_data["data"], presel)) + return this_comp + + raise ValueError(f"Desired compensation matrix (grade = {grade:d}) not found") + + +@fill_doc +def make_compensator(info, from_, to, exclude_comp_chs=False): + """Return compensation matrix eg. for CTF system. + + Create a compensation matrix to bring the data from one compensation + state to another. + + Parameters + ---------- + %(info_not_none)s + from_ : int + Compensation in the input data. + to : int + Desired compensation in the output. + exclude_comp_chs : bool + Exclude compensation channels from the output. + + Returns + ------- + comp : array | None. + The compensation matrix. Might be None if no compensation + is needed (from == to). + """ + if from_ == to: + return None + + # s_orig = s_from + C1*s_from = (I + C1)*s_from + # s_to = s_orig - C2*s_orig = (I - C2)*s_orig + # s_to = (I - C2)*(I + C1)*s_from = (I + C1 - C2 - C2*C1)*s_from + if from_ != 0: + C1 = _make_compensator(info, from_) + comp_from_0 = np.linalg.inv(np.eye(info["nchan"]) - C1) + if to != 0: + C2 = _make_compensator(info, to) + comp_0_to = np.eye(info["nchan"]) - C2 + if from_ != 0: + if to != 0: + # This is mathematically equivalent, but has higher numerical + # error than using the inverse to always go to zero and back + # comp = np.eye(info['nchan']) + C1 - C2 - np.dot(C2, C1) + comp = np.dot(comp_0_to, comp_from_0) + else: + comp = comp_from_0 + else: + # from == 0, to != 0 guaranteed here + comp = comp_0_to + + if exclude_comp_chs: + pick = [ + k for k, c in enumerate(info["chs"]) if c["kind"] != FIFF.FIFFV_REF_MEG_CH + ] + + if len(pick) == 0: + raise ValueError( + "Nothing remains after excluding the compensation channels" + ) + + comp = comp[pick, :] + + return comp + + +# @verbose +# def compensate_to(data, to, verbose=None): +# """ +# % +# % [newdata] = mne_compensate_to(data,to) +# % +# % Apply compensation to the data as desired +# % +# """ +# +# newdata = data.copy() +# now = get_current_comp(newdata['info']) +# +# # Are we there already? +# if now == to: +# logger.info('Data are already compensated as desired') +# +# # Make the compensator and apply it to all data sets +# comp = make_compensator(newdata['info'], now, to) +# for k in range(len(newdata['evoked'])): +# newdata['evoked'][k]['epochs'] = np.dot(comp, +# newdata['evoked'][k]['epochs']) +# +# # Update the compensation info in the channel descriptors +# newdata['info']['chs'] = set_current_comp(newdata['info']['chs'], to) +# return newdata + + +# def set_current_comp(chs, value): +# """Set the current compensation value in the channel info structures +# """ +# new_chs = chs +# +# lower_half = int('FFFF', 16) # hex2dec('FFFF') +# for k in range(len(chs)): +# if chs[k]['kind'] == FIFF.FIFFV_MEG_CH: +# coil_type = float(chs[k]['coil_type']) & lower_half +# new_chs[k]['coil_type'] = int(coil_type | (value << 16)) +# +# return new_chs diff --git a/mne/_fiff/constants.py b/mne/_fiff/constants.py new file mode 100644 index 0000000..7020e87 --- /dev/null +++ b/mne/_fiff/constants.py @@ -0,0 +1,1218 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils._bunch import BunchConstNamed + +FIFF = BunchConstNamed() + +# +# FIFF version number in use +# +FIFF.FIFFC_MAJOR_VERSION = 1 +FIFF.FIFFC_MINOR_VERSION = 4 +FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION + +# +# Blocks +# +FIFF.FIFFB_ROOT = 999 +FIFF.FIFFB_MEAS = 100 +FIFF.FIFFB_MEAS_INFO = 101 +FIFF.FIFFB_RAW_DATA = 102 +FIFF.FIFFB_PROCESSED_DATA = 103 +FIFF.FIFFB_EVOKED = 104 +FIFF.FIFFB_ASPECT = 105 +FIFF.FIFFB_SUBJECT = 106 +FIFF.FIFFB_ISOTRAK = 107 +FIFF.FIFFB_HPI_MEAS = 108 # HPI measurement +FIFF.FIFFB_HPI_RESULT = 109 # Result of a HPI fitting procedure +FIFF.FIFFB_HPI_COIL = 110 # Data acquired from one HPI coil +FIFF.FIFFB_PROJECT = 111 +FIFF.FIFFB_CONTINUOUS_DATA = 112 +FIFF.FIFFB_CH_INFO = 113 # Extra channel information +FIFF.FIFFB_VOID = 114 +FIFF.FIFFB_EVENTS = 115 +FIFF.FIFFB_INDEX = 116 +FIFF.FIFFB_DACQ_PARS = 117 +FIFF.FIFFB_REF = 118 +FIFF.FIFFB_IAS_RAW_DATA = 119 +FIFF.FIFFB_IAS_ASPECT = 120 +FIFF.FIFFB_HPI_SUBSYSTEM = 121 +# FIFF.FIFFB_PHANTOM_SUBSYSTEM = 122 +# FIFF.FIFFB_STATUS_SUBSYSTEM = 123 +FIFF.FIFFB_DEVICE = 124 +FIFF.FIFFB_HELIUM = 125 +FIFF.FIFFB_CHANNEL_INFO = 126 + +FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related +FIFF.FIFFB_BEM = 310 # Boundary-element method +FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces +FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition +FIFF.FIFFB_PROJ = 313 +FIFF.FIFFB_PROJ_ITEM = 314 +FIFF.FIFFB_MRI = 200 +FIFF.FIFFB_MRI_SET = 201 +FIFF.FIFFB_MRI_SLICE = 202 +FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices' +FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes... +FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data +FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region +FIFF.FIFFB_PROCESSING_HISTORY = 900 +FIFF.FIFFB_PROCESSING_RECORD = 901 + +FIFF.FIFFB_DATA_CORRECTION = 500 +FIFF.FIFFB_CHANNEL_DECOUPLER = 501 +FIFF.FIFFB_SSS_INFO = 502 +FIFF.FIFFB_SSS_CAL = 503 +FIFF.FIFFB_SSS_ST_INFO = 504 +FIFF.FIFFB_SSS_BASES = 505 +FIFF.FIFFB_IAS = 510 +# +# Of general interest +# +FIFF.FIFF_FILE_ID = 100 +FIFF.FIFF_DIR_POINTER = 101 +FIFF.FIFF_BLOCK_ID = 103 +FIFF.FIFF_BLOCK_START = 104 +FIFF.FIFF_BLOCK_END = 105 +FIFF.FIFF_FREE_LIST = 106 +FIFF.FIFF_FREE_BLOCK = 107 +FIFF.FIFF_NOP = 108 +FIFF.FIFF_PARENT_FILE_ID = 109 +FIFF.FIFF_PARENT_BLOCK_ID = 110 +FIFF.FIFF_BLOCK_NAME = 111 +FIFF.FIFF_BLOCK_VERSION = 112 +FIFF.FIFF_CREATOR = 113 # Program that created the file (string) +FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string) +FIFF.FIFF_REF_ROLE = 115 +FIFF.FIFF_REF_FILE_ID = 116 +FIFF.FIFF_REF_FILE_NUM = 117 +FIFF.FIFF_REF_FILE_NAME = 118 +# +# Megacq saves the parameters in these tags +# +FIFF.FIFF_DACQ_PARS = 150 +FIFF.FIFF_DACQ_STIM = 151 + +FIFF.FIFF_DEVICE_TYPE = 152 +FIFF.FIFF_DEVICE_MODEL = 153 +FIFF.FIFF_DEVICE_SERIAL = 154 +FIFF.FIFF_DEVICE_SITE = 155 + +FIFF.FIFF_HE_LEVEL_RAW = 156 +FIFF.FIFF_HELIUM_LEVEL = 157 +FIFF.FIFF_ORIG_FILE_GUID = 158 +FIFF.FIFF_UTC_OFFSET = 159 + +FIFF.FIFF_NCHAN = 200 +FIFF.FIFF_SFREQ = 201 +FIFF.FIFF_DATA_PACK = 202 +FIFF.FIFF_CH_INFO = 203 +FIFF.FIFF_MEAS_DATE = 204 +FIFF.FIFF_SUBJECT = 205 +FIFF.FIFF_COMMENT = 206 +FIFF.FIFF_NAVE = 207 +FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch +FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch +FIFF.FIFF_ASPECT_KIND = 210 +FIFF.FIFF_REF_EVENT = 211 +FIFF.FIFF_EXPERIMENTER = 212 +FIFF.FIFF_DIG_POINT = 213 +FIFF.FIFF_CH_POS = 214 +FIFF.FIFF_HPI_SLOPES = 215 # HPI data +FIFF.FIFF_HPI_NCOIL = 216 +FIFF.FIFF_REQ_EVENT = 217 +FIFF.FIFF_REQ_LIMIT = 218 +FIFF.FIFF_LOWPASS = 219 +FIFF.FIFF_BAD_CHS = 220 +FIFF.FIFF_ARTEF_REMOVAL = 221 +FIFF.FIFF_COORD_TRANS = 222 +FIFF.FIFF_HIGHPASS = 223 +FIFF.FIFF_CH_CALS = 224 # This will not occur in new files +FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi +FIFF.FIFF_HPI_CORR_COEFF = 226 # HPI curve fit correlations +FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging +FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch +FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum + +FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage +FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage +FIFF.FIFF_NAME = 233 # Intended to be a short name. +FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object +FIFF.FIFF_DIG_STRING = 234 # String of digitized points +FIFF.FIFF_LINE_FREQ = 235 # Line frequency +FIFF.FIFF_GANTRY_ANGLE = 282 # Tilt angle of the gantry in degrees. + +# +# HPI fitting program tags +# +FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency +FIFF.FIFF_HPI_COIL_MOMENTS = ( + 240 # Estimated moment vectors for the HPI coil magnetic dipoles +) +FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit +FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below) +FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit +FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference +FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement +FIFF.FIFF_HPI_COILS_USED = ( + 246 # List of coils finally used when the transformation was computed +) +FIFF.FIFF_HPI_DIGITIZATION_ORDER = ( + 247 # Which Isotrak digitization point corresponds to each of the coils energized +) + + +# +# Tags used for storing channel info +# +FIFF.FIFF_CH_SCAN_NO = ( + 250 # Channel scan number. Corresponds to fiffChInfoRec.scanNo field +) +FIFF.FIFF_CH_LOGICAL_NO = ( + 251 # Channel logical number. Corresponds to fiffChInfoRec.logNo field +) +FIFF.FIFF_CH_KIND = 252 # Channel type. Corresponds to fiffChInfoRec.kind field" +FIFF.FIFF_CH_RANGE = ( + 253 # Conversion from recorded number to (possibly virtual) voltage at the output" +) +FIFF.FIFF_CH_CAL = 254 # Calibration coefficient from output voltage to some real units +FIFF.FIFF_CH_LOC = 255 # Channel loc +FIFF.FIFF_CH_UNIT = 256 # Unit of the data +FIFF.FIFF_CH_UNIT_MUL = 257 # Unit multiplier exponent +FIFF.FIFF_CH_DACQ_NAME = 258 # Name of the channel in the data acquisition system. Corresponds to fiffChInfoRec.name. +FIFF.FIFF_CH_COIL_TYPE = 350 # Coil type in coil_def.dat +FIFF.FIFF_CH_COORD_FRAME = 351 # Coordinate frame (integer) + +# +# Pointers +# +FIFF.FIFFV_NEXT_SEQ = 0 +FIFF.FIFFV_NEXT_NONE = -1 +# +# Channel types +# +FIFF.FIFFV_BIO_CH = 102 +FIFF.FIFFV_MEG_CH = 1 +FIFF.FIFFV_REF_MEG_CH = 301 +FIFF.FIFFV_EEG_CH = 2 +FIFF.FIFFV_MCG_CH = 201 +FIFF.FIFFV_STIM_CH = 3 +FIFF.FIFFV_EOG_CH = 202 +FIFF.FIFFV_EMG_CH = 302 +FIFF.FIFFV_ECG_CH = 402 +FIFF.FIFFV_MISC_CH = 502 +FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring +FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG +FIFF.FIFFV_DBS_CH = 803 # deep brain stimulation +FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only) +FIFF.FIFFV_ECOG_CH = 902 +FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only) +FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel +FIFF.FIFFV_DIPOLE_WAVE = 1000 # Dipole time curve (xplotter/xfit) +FIFF.FIFFV_GOODNESS_FIT = 1001 # Goodness of fit (xplotter/xfit) +FIFF.FIFFV_FNIRS_CH = 1100 # Functional near-infrared spectroscopy +FIFF.FIFFV_TEMPERATURE_CH = 1200 # Functional near-infrared spectroscopy +FIFF.FIFFV_GALVANIC_CH = 1300 # Galvanic skin response +FIFF.FIFFV_EYETRACK_CH = 1400 # Eye-tracking + +_ch_kind_named = { + key: key + for key in ( + FIFF.FIFFV_BIO_CH, + FIFF.FIFFV_MEG_CH, + FIFF.FIFFV_REF_MEG_CH, + FIFF.FIFFV_EEG_CH, + FIFF.FIFFV_MCG_CH, + FIFF.FIFFV_STIM_CH, + FIFF.FIFFV_EOG_CH, + FIFF.FIFFV_EMG_CH, + FIFF.FIFFV_ECG_CH, + FIFF.FIFFV_MISC_CH, + FIFF.FIFFV_RESP_CH, + FIFF.FIFFV_SEEG_CH, + FIFF.FIFFV_DBS_CH, + FIFF.FIFFV_SYST_CH, + FIFF.FIFFV_ECOG_CH, + FIFF.FIFFV_IAS_CH, + FIFF.FIFFV_EXCI_CH, + FIFF.FIFFV_DIPOLE_WAVE, + FIFF.FIFFV_GOODNESS_FIT, + FIFF.FIFFV_FNIRS_CH, + FIFF.FIFFV_GALVANIC_CH, + FIFF.FIFFV_TEMPERATURE_CH, + FIFF.FIFFV_EYETRACK_CH, + ) +} + +# +# Quaternion channels for head position monitoring +# +FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion +FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation +FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation +FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation +FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation +FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation +FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation +FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi +FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi +FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi +# +# Coordinate frames +# +FIFF.FIFFV_COORD_UNKNOWN = 0 +FIFF.FIFFV_COORD_DEVICE = 1 +FIFF.FIFFV_COORD_ISOTRAK = 2 +FIFF.FIFFV_COORD_HPI = 3 +FIFF.FIFFV_COORD_HEAD = 4 +FIFF.FIFFV_COORD_MRI = 5 +FIFF.FIFFV_COORD_MRI_SLICE = 6 +FIFF.FIFFV_COORD_MRI_DISPLAY = 7 +FIFF.FIFFV_COORD_DICOM_DEVICE = 8 +FIFF.FIFFV_COORD_IMAGING_DEVICE = 9 +_coord_frame_named = { + key: key + for key in ( + FIFF.FIFFV_COORD_UNKNOWN, + FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_COORD_ISOTRAK, + FIFF.FIFFV_COORD_HPI, + FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_COORD_MRI, + FIFF.FIFFV_COORD_MRI_SLICE, + FIFF.FIFFV_COORD_MRI_DISPLAY, + FIFF.FIFFV_COORD_DICOM_DEVICE, + FIFF.FIFFV_COORD_IMAGING_DEVICE, + ) +} +# +# Needed for raw and evoked-response data +# +FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data +FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers +FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel +FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples + +# +# Info on subject +# +FIFF.FIFF_SUBJ_ID = 400 # Subject ID +FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject +FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject +FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject +FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject +FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject +FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject +FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject in kg +FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject in m +FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject +FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System + +FIFF.FIFFV_SUBJ_HAND_RIGHT = 1 # Righthanded +FIFF.FIFFV_SUBJ_HAND_LEFT = 2 # Lefthanded +FIFF.FIFFV_SUBJ_HAND_AMBI = 3 # Ambidextrous + +FIFF.FIFFV_SUBJ_SEX_UNKNOWN = 0 # Unknown gender +FIFF.FIFFV_SUBJ_SEX_MALE = 1 # Male +FIFF.FIFFV_SUBJ_SEX_FEMALE = 2 # Female + +FIFF.FIFF_PROJ_ID = 500 +FIFF.FIFF_PROJ_NAME = 501 +FIFF.FIFF_PROJ_AIM = 502 +FIFF.FIFF_PROJ_PERSONS = 503 +FIFF.FIFF_PROJ_COMMENT = 504 + +FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers +FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: +FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel +FIFF.FIFF_EVENT_BITS = 603 # Event bits array + +# +# Tags used in saving SQUID characteristics etc. +# +FIFF.FIFF_SQUID_BIAS = 701 +FIFF.FIFF_SQUID_OFFSET = 702 +FIFF.FIFF_SQUID_GATE = 703 +# +# Aspect values used to save characteristic curves of SQUIDs. (mjk) +# +FIFF.FIFFV_ASPECT_IFII_LOW = 1100 +FIFF.FIFFV_ASPECT_IFII_HIGH = 1101 +FIFF.FIFFV_ASPECT_GATE = 1102 + +# +# Values for file references +# +FIFF.FIFFV_ROLE_PREV_FILE = 1 +FIFF.FIFFV_ROLE_NEXT_FILE = 2 + +# +# References +# +FIFF.FIFF_REF_PATH = 1101 + +# +# Different aspects of data +# +FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs +FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean +FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data +FIFF.FIFFV_ASPECT_SUBAVERAGE = 103 # Partial average (subaverage) +FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage +FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph +FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum +FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve + +# +# BEM surface IDs +# +FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1 +FIFF.FIFFV_BEM_SURF_ID_NOT_KNOWN = 0 +FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1 +FIFF.FIFFV_BEM_SURF_ID_CSF = 2 +FIFF.FIFFV_BEM_SURF_ID_SKULL = 3 +FIFF.FIFFV_BEM_SURF_ID_HEAD = 4 + +FIFF.FIFF_SPHERE_ORIGIN = 3001 +FIFF.FIFF_SPHERE_RADIUS = 3002 + +FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number +FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name +FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface +FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface +FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3) +FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3) +FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors + +FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix +FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below +FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model +FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment +FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach +FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach + +# +# More of those defined in MNE +# +FIFF.FIFFV_MNE_SURF_UNKNOWN = -1 +FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101 +FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102 +FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system +# +# These relate to the Isotrak data (enum(point)) +# +FIFF.FIFFV_POINT_CARDINAL = 1 +FIFF.FIFFV_POINT_HPI = 2 +FIFF.FIFFV_POINT_EEG = 3 +FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG +FIFF.FIFFV_POINT_EXTRA = 4 +FIFF.FIFFV_POINT_HEAD = 5 # Point on the surface of the head +_dig_kind_named = { + key: key + for key in ( + FIFF.FIFFV_POINT_CARDINAL, + FIFF.FIFFV_POINT_HPI, + FIFF.FIFFV_POINT_EEG, + FIFF.FIFFV_POINT_EXTRA, + FIFF.FIFFV_POINT_HEAD, + ) +} +# +# Cardinal point types (enum(cardinal_point)) +# +FIFF.FIFFV_POINT_LPA = 1 +FIFF.FIFFV_POINT_NASION = 2 +FIFF.FIFFV_POINT_RPA = 3 +FIFF.FIFFV_POINT_INION = 4 +_dig_cardinal_named = { + key: key + for key in ( + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_RPA, + FIFF.FIFFV_POINT_INION, + ) +} +# +# SSP +# +FIFF.FIFF_PROJ_ITEM_KIND = 3411 +FIFF.FIFF_PROJ_ITEM_TIME = 3412 +FIFF.FIFF_PROJ_ITEM_NVEC = 3414 +FIFF.FIFF_PROJ_ITEM_VECTORS = 3415 +FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416 +FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417 +# XPlotter +FIFF.FIFF_XPLOTTER_LAYOUT = 3501 # string - "Xplotter layout tag" +# +# MRIs +# +FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH +FIFF.FIFF_MRI_SOURCE_FORMAT = 2002 +FIFF.FIFF_MRI_PIXEL_ENCODING = 2003 +FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004 +FIFF.FIFF_MRI_PIXEL_SCALE = 2005 +FIFF.FIFF_MRI_PIXEL_DATA = 2006 +FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007 +FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008 +FIFF.FIFF_MRI_BOUNDING_BOX = 2009 +FIFF.FIFF_MRI_WIDTH = 2010 +FIFF.FIFF_MRI_WIDTH_M = 2011 +FIFF.FIFF_MRI_HEIGHT = 2012 +FIFF.FIFF_MRI_HEIGHT_M = 2013 +FIFF.FIFF_MRI_DEPTH = 2014 +FIFF.FIFF_MRI_DEPTH_M = 2015 +FIFF.FIFF_MRI_THICKNESS = 2016 +FIFF.FIFF_MRI_SCENE_AIM = 2017 +FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020 +FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021 +FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022 +FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023 +FIFF.FIFF_MRI_VOXEL_DATA = 2030 +FIFF.FIFF_MRI_VOXEL_ENCODING = 2031 +FIFF.FIFF_MRI_MRILAB_SETUP = 2100 +FIFF.FIFF_MRI_SEG_REGION_ID = 2200 +# +FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0 +FIFF.FIFFV_MRI_PIXEL_BYTE = 1 +FIFF.FIFFV_MRI_PIXEL_WORD = 2 +FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3 +FIFF.FIFFV_MRI_PIXEL_FLOAT = 4 +FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5 +FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6 +FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7 +FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8 +# +# These are the MNE fiff definitions (range 350-390 reserved for MNE) +# +FIFF.FIFFB_MNE = 350 +FIFF.FIFFB_MNE_SOURCE_SPACE = 351 +FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352 +FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353 +FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354 +FIFF.FIFFB_MNE_COV = 355 +FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356 +FIFF.FIFFB_MNE_NAMED_MATRIX = 357 +FIFF.FIFFB_MNE_ENV = 358 +FIFF.FIFFB_MNE_BAD_CHANNELS = 359 +FIFF.FIFFB_MNE_VERTEX_MAP = 360 +FIFF.FIFFB_MNE_EVENTS = 361 +FIFF.FIFFB_MNE_MORPH_MAP = 362 +FIFF.FIFFB_MNE_SURFACE_MAP = 363 +FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364 + +# +# CTF compensation data +# +FIFF.FIFFB_MNE_CTF_COMP = 370 +FIFF.FIFFB_MNE_CTF_COMP_DATA = 371 +FIFF.FIFFB_MNE_DERIVATIONS = 372 + +FIFF.FIFFB_MNE_EPOCHS = 373 +FIFF.FIFFB_MNE_ICA = 374 +# +# Fiff tags associated with MNE computations (3500...) +# +# +# 3500... Bookkeeping +# +FIFF.FIFF_MNE_ROW_NAMES = 3502 +FIFF.FIFF_MNE_COL_NAMES = 3503 +FIFF.FIFF_MNE_NROW = 3504 +FIFF.FIFF_MNE_NCOL = 3505 +FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults: +# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI +# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD +# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD +FIFF.FIFF_MNE_CH_NAME_LIST = 3507 +FIFF.FIFF_MNE_FILE_NAME = ( + 3508 # This removes the collision with fiff_file.h (used to be 3501) +) +# +# 3510... 3590... Source space or surface +# +FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices +FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals +FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices +FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = ( + 3515 # Nearest source space vertex for all vertices +) +FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = ( + 3516 # Distance to the Nearest source space vertex for all vertices +) +FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier +FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume +FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based) + +FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = ( + 3596 # Voxel space dimensions in a volume source space +) +FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = ( + 3597 # Matrix to interpolate a volume source space into a mri volume +) +FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation + +FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles +FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation +FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = ( + 3592 # Number of triangles corresponding to the number of vertices in use +) +FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = ( + 3593 # The triangulation of the used vertices in the source space +) +FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces) +FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = ( + 3595 # Neighbors for each source space point (used for volume source spaces) +) + +FIFF.FIFF_MNE_SOURCE_SPACE_DIST = ( + 3599 # Distances between vertices in use (along the surface) +) +FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = ( + 3600 # If distance is above this limit (in the volume) it has not been calculated +) + +FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data +FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map + +# +# 3520... Forward solution +# +FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520 +FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free +FIFF.FIFF_MNE_INCLUDED_METHODS = 3522 +FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523 +# +# 3530... Covariance matrix +# +FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix +FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension +FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle) +FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix +FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above +FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535 +FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom +FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used +FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood + +# +# 3540... Inverse operator +# +# We store the inverse operator as the eigenleads, eigenfields, +# and weights +# +FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads +FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = ( + 3546 # The eigenleads (already weighted with R^0.5) +) +FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields +FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values +FIFF.FIFF_MNE_PRIORS_USED = ( + 3543 # Which kind of priors have been used for the source covariance matrix +) +FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix +# This matrix includes the whitening operator as well +# The regularization is applied +FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = ( + 3545 # Contains the orientation of one source per row +) +# The source orientations must be expressed in the coordinate system +# given by FIFF_MNE_COORD_FRAME +FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ? +# +# 3550... Saved environment info +# +FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created +FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file +FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = ( + 3552 # Reference to an external binary file (big-endian) */ +) +FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = ( + 3553 # Reference to an external binary file (little-endian) */ +) +# +# 3560... Miscellaneous +# +FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active? +FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI101 / STI 014) +FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes +FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data +FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes +FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channel values +FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string +FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data +FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning +FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end +# +# 3570... Morphing maps +# +FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere +FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from +FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to +# +# 3580... CTF compensation data +# +FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation +FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself +FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated? + +FIFF.FIFF_MNE_DERIVATION_DATA = ( + 3585 # Used to store information about EEG and other derivations +) +# +# 3601... values associated with ICA decomposition +# +FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters +FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names +FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener +FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components +FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance +FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean +FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix +FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources +FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params +# +# Miscellaneous +# +FIFF.FIFF_MNE_KIT_SYSTEM_ID = 3612 # Unique ID assigned to KIT systems +# +# Maxfilter tags +# +FIFF.FIFF_SSS_FRAME = 263 +FIFF.FIFF_SSS_JOB = 264 +FIFF.FIFF_SSS_ORIGIN = 265 +FIFF.FIFF_SSS_ORD_IN = 266 +FIFF.FIFF_SSS_ORD_OUT = 267 +FIFF.FIFF_SSS_NMAG = 268 +FIFF.FIFF_SSS_COMPONENTS = 269 +FIFF.FIFF_SSS_CAL_CHANS = 270 +FIFF.FIFF_SSS_CAL_CORRS = 271 +FIFF.FIFF_SSS_ST_CORR = 272 +FIFF.FIFF_SSS_NFREE = 278 +FIFF.FIFF_SSS_ST_LENGTH = 279 +FIFF.FIFF_DECOUPLER_MATRIX = 800 +# +# Fiff values associated with MNE computations +# +FIFF.FIFFV_MNE_UNKNOWN_ORI = 0 +FIFF.FIFFV_MNE_FIXED_ORI = 1 +FIFF.FIFFV_MNE_FREE_ORI = 2 + +FIFF.FIFFV_MNE_MEG = 1 +FIFF.FIFFV_MNE_EEG = 2 +FIFF.FIFFV_MNE_MEG_EEG = 3 + +FIFF.FIFFV_MNE_PRIORS_NONE = 0 +FIFF.FIFFV_MNE_PRIORS_DEPTH = 1 +FIFF.FIFFV_MNE_PRIORS_LORETA = 2 +FIFF.FIFFV_MNE_PRIORS_SULCI = 3 + +FIFF.FIFFV_MNE_UNKNOWN_COV = 0 +FIFF.FIFFV_MNE_SENSOR_COV = 1 +FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called +FIFF.FIFFV_MNE_SOURCE_COV = 2 +FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3 +FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers +FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior +FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior + +# +# Output map types +# +FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size +FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic +FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic +FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic +FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = ( + 9 # Square root of the (approximate) chi^2 statistic +) +FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar) +FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector) +# +# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE) +# +FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1 +FIFF.FIFFV_MNE_SPACE_SURFACE = 1 +FIFF.FIFFV_MNE_SPACE_VOLUME = 2 +FIFF.FIFFV_MNE_SPACE_DISCRETE = 3 +# +# Covariance matrix channel classification +# +FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea +FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T] +FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m] +FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V] +# +# Projection item kinds +# +FIFF.FIFFV_PROJ_ITEM_NONE = 0 +FIFF.FIFFV_PROJ_ITEM_FIELD = 1 +FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2 +FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3 +FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4 +FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5 +FIFF.FIFFV_PROJ_ITEM_EEG_AVREF = ( + 10 # Linear projection related to EEG average reference +) +FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = ( + FIFF.FIFFV_PROJ_ITEM_EEG_AVREF +) # backward compat alias +# +# Custom EEG references +# +FIFF.FIFFV_MNE_CUSTOM_REF_OFF = 0 +FIFF.FIFFV_MNE_CUSTOM_REF_ON = 1 +FIFF.FIFFV_MNE_CUSTOM_REF_CSD = 2 +# +# SSS job options +# +FIFF.FIFFV_SSS_JOB_NOTHING = 0 # No SSS, just copy input to output +FIFF.FIFFV_SSS_JOB_CTC = 1 # No SSS, only cross-talk correction +FIFF.FIFFV_SSS_JOB_FILTER = 2 # Spatial maxwell filtering +FIFF.FIFFV_SSS_JOB_VIRT = 3 # Transform data to another sensor array +FIFF.FIFFV_SSS_JOB_HEAD_POS = 4 # Estimate head positions, no SSS +FIFF.FIFFV_SSS_JOB_MOVEC_FIT = 5 # Estimate and compensate head movement +FIFF.FIFFV_SSS_JOB_MOVEC_QUA = ( + 6 # Compensate head movement from previously estimated head positions +) +FIFF.FIFFV_SSS_JOB_REC_ALL = 7 # Reconstruct inside and outside signals +FIFF.FIFFV_SSS_JOB_REC_IN = 8 # Reconstruct inside signals +FIFF.FIFFV_SSS_JOB_REC_OUT = 9 # Reconstruct outside signals +FIFF.FIFFV_SSS_JOB_ST = 10 # Spatio-temporal maxwell filtering +FIFF.FIFFV_SSS_JOB_TPROJ = 11 # Temporal projection, no SSS +FIFF.FIFFV_SSS_JOB_XSSS = 12 # Cross-validation SSS +FIFF.FIFFV_SSS_JOB_XSUB = 13 # Cross-validation subtraction, no SSS +FIFF.FIFFV_SSS_JOB_XWAV = 14 # Cross-validation noise waveforms +FIFF.FIFFV_SSS_JOB_NCOV = 15 # Noise covariance estimation +FIFF.FIFFV_SSS_JOB_SCOV = 16 # SSS sample covariance estimation +# } + +# +# Additional coordinate frames +# +FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data +FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates +FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates +FIFF.FIFFV_MNE_COORD_DIGITIZER = ( + FIFF.FIFFV_COORD_ISOTRAK +) # Original (Polhemus) digitizer coordinates +FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates +FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates +FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin +FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates +FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0) +FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0) +FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates +# +# 4D and KIT use the same head coordinate system definition as CTF +# +FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD +FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD + +# +# FWD Types +# + +FWD = BunchConstNamed() + +FWD.COIL_UNKNOWN = 0 +FWD.COILC_UNKNOWN = 0 +FWD.COILC_EEG = 1000 +FWD.COILC_MAG = 1 +FWD.COILC_AXIAL_GRAD = 2 +FWD.COILC_PLANAR_GRAD = 3 +FWD.COILC_AXIAL_GRAD2 = 4 + +FWD.COIL_ACCURACY_POINT = 0 +FWD.COIL_ACCURACY_NORMAL = 1 +FWD.COIL_ACCURACY_ACCURATE = 2 + +FWD.BEM_IP_APPROACH_LIMIT = 0.1 + +FWD.BEM_LIN_FIELD_SIMPLE = 1 +FWD.BEM_LIN_FIELD_FERGUSON = 2 +FWD.BEM_LIN_FIELD_URANKAR = 3 + +# +# Data types +# +FIFF.FIFFT_VOID = 0 +FIFF.FIFFT_BYTE = 1 +FIFF.FIFFT_SHORT = 2 +FIFF.FIFFT_INT = 3 +FIFF.FIFFT_FLOAT = 4 +FIFF.FIFFT_DOUBLE = 5 +FIFF.FIFFT_JULIAN = 6 +FIFF.FIFFT_USHORT = 7 +FIFF.FIFFT_UINT = 8 +FIFF.FIFFT_ULONG = 9 +FIFF.FIFFT_STRING = 10 +FIFF.FIFFT_LONG = 11 +FIFF.FIFFT_DAU_PACK13 = 13 +FIFF.FIFFT_DAU_PACK14 = 14 +FIFF.FIFFT_DAU_PACK16 = 16 +FIFF.FIFFT_COMPLEX_FLOAT = 20 +FIFF.FIFFT_COMPLEX_DOUBLE = 21 +FIFF.FIFFT_OLD_PACK = 23 +FIFF.FIFFT_CH_INFO_STRUCT = 30 +FIFF.FIFFT_ID_STRUCT = 31 +FIFF.FIFFT_DIR_ENTRY_STRUCT = 32 +FIFF.FIFFT_DIG_POINT_STRUCT = 33 +FIFF.FIFFT_CH_POS_STRUCT = 34 +FIFF.FIFFT_COORD_TRANS_STRUCT = 35 +FIFF.FIFFT_DIG_STRING_STRUCT = 36 +FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37 +FIFF.FIFFT_MATRIX = 0x40000000 # 1073741824, 1 << 30 +FIFF.FIFFT_SPARSE_CCS_MATRIX = 0x00100000 # 1048576 +FIFF.FIFFT_SPARSE_RCS_MATRIX = 0x00200000 # 2097152 + +# +# Units of measurement +# +FIFF.FIFF_UNIT_NONE = -1 +# +# SI base units +# +FIFF.FIFF_UNIT_UNITLESS = 0 +FIFF.FIFF_UNIT_M = 1 # meter +FIFF.FIFF_UNIT_KG = 2 # kilogram +FIFF.FIFF_UNIT_SEC = 3 # second +FIFF.FIFF_UNIT_A = 4 # ampere +FIFF.FIFF_UNIT_K = 5 # Kelvin +FIFF.FIFF_UNIT_MOL = 6 # mole +# +# SI Supplementary units +# +FIFF.FIFF_UNIT_RAD = 7 # radian +FIFF.FIFF_UNIT_SR = 8 # steradian +# +# SI base candela +# +FIFF.FIFF_UNIT_CD = 9 # candela +# +# SI derived units +# +FIFF.FIFF_UNIT_MOL_M3 = 10 # mol/m^3 +FIFF.FIFF_UNIT_HZ = 101 # hertz +FIFF.FIFF_UNIT_N = 102 # Newton +FIFF.FIFF_UNIT_PA = 103 # pascal +FIFF.FIFF_UNIT_J = 104 # joule +FIFF.FIFF_UNIT_W = 105 # watt +FIFF.FIFF_UNIT_C = 106 # coulomb +FIFF.FIFF_UNIT_V = 107 # volt +FIFF.FIFF_UNIT_F = 108 # farad +FIFF.FIFF_UNIT_OHM = 109 # ohm +FIFF.FIFF_UNIT_S = 110 # Siemens (same as Moh, what fiff-constants calls it) +FIFF.FIFF_UNIT_WB = 111 # weber +FIFF.FIFF_UNIT_T = 112 # tesla +FIFF.FIFF_UNIT_H = 113 # Henry +FIFF.FIFF_UNIT_CEL = 114 # celsius +FIFF.FIFF_UNIT_LM = 115 # lumen +FIFF.FIFF_UNIT_LX = 116 # lux +FIFF.FIFF_UNIT_V_M2 = 117 # V/m^2 +# +# Others we need +# +FIFF.FIFF_UNIT_T_M = 201 # T/m +FIFF.FIFF_UNIT_AM = 202 # Am +FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2 +FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3 + +FIFF.FIFF_UNIT_PX = 210 # Pixel +_ch_unit_named = { + key: key + for key in ( + FIFF.FIFF_UNIT_NONE, + FIFF.FIFF_UNIT_UNITLESS, + FIFF.FIFF_UNIT_M, + FIFF.FIFF_UNIT_KG, + FIFF.FIFF_UNIT_SEC, + FIFF.FIFF_UNIT_A, + FIFF.FIFF_UNIT_K, + FIFF.FIFF_UNIT_MOL, + FIFF.FIFF_UNIT_RAD, + FIFF.FIFF_UNIT_SR, + FIFF.FIFF_UNIT_CD, + FIFF.FIFF_UNIT_MOL_M3, + FIFF.FIFF_UNIT_HZ, + FIFF.FIFF_UNIT_N, + FIFF.FIFF_UNIT_PA, + FIFF.FIFF_UNIT_J, + FIFF.FIFF_UNIT_W, + FIFF.FIFF_UNIT_C, + FIFF.FIFF_UNIT_V, + FIFF.FIFF_UNIT_F, + FIFF.FIFF_UNIT_OHM, + FIFF.FIFF_UNIT_S, + FIFF.FIFF_UNIT_WB, + FIFF.FIFF_UNIT_T, + FIFF.FIFF_UNIT_H, + FIFF.FIFF_UNIT_CEL, + FIFF.FIFF_UNIT_LM, + FIFF.FIFF_UNIT_LX, + FIFF.FIFF_UNIT_V_M2, + FIFF.FIFF_UNIT_T_M, + FIFF.FIFF_UNIT_AM, + FIFF.FIFF_UNIT_AM_M2, + FIFF.FIFF_UNIT_AM_M3, + FIFF.FIFF_UNIT_PX, + ) +} +# +# Multipliers +# +FIFF.FIFF_UNITM_E = 18 +FIFF.FIFF_UNITM_PET = 15 +FIFF.FIFF_UNITM_T = 12 +FIFF.FIFF_UNITM_GIG = 9 +FIFF.FIFF_UNITM_MEG = 6 +FIFF.FIFF_UNITM_K = 3 +FIFF.FIFF_UNITM_H = 2 +FIFF.FIFF_UNITM_DA = 1 +FIFF.FIFF_UNITM_NONE = 0 +FIFF.FIFF_UNITM_D = -1 +FIFF.FIFF_UNITM_C = -2 +FIFF.FIFF_UNITM_M = -3 +FIFF.FIFF_UNITM_MU = -6 +FIFF.FIFF_UNITM_N = -9 +FIFF.FIFF_UNITM_P = -12 +FIFF.FIFF_UNITM_F = -15 +FIFF.FIFF_UNITM_A = -18 +_ch_unit_mul_named = { + key: key + for key in ( + FIFF.FIFF_UNITM_E, + FIFF.FIFF_UNITM_PET, + FIFF.FIFF_UNITM_T, + FIFF.FIFF_UNITM_GIG, + FIFF.FIFF_UNITM_MEG, + FIFF.FIFF_UNITM_K, + FIFF.FIFF_UNITM_H, + FIFF.FIFF_UNITM_DA, + FIFF.FIFF_UNITM_NONE, + FIFF.FIFF_UNITM_D, + FIFF.FIFF_UNITM_C, + FIFF.FIFF_UNITM_M, + FIFF.FIFF_UNITM_MU, + FIFF.FIFF_UNITM_N, + FIFF.FIFF_UNITM_P, + FIFF.FIFF_UNITM_F, + FIFF.FIFF_UNITM_A, + ) +} + +# +# Coil types +# +FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data +FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0 +FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils +FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT +FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system +FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead +FIFF.FIFFV_COIL_EEG_CSD = 6 # CSD-transformed EEG lead + +FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition +# The coil info contains dipole location (r0) and +# direction (ex) +FIFF.FIFFV_COIL_FNIRS_HBO = 300 # fNIRS oxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_HBR = 301 # fNIRS deoxyhemoglobin +FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE = 302 # fNIRS continuous wave amplitude +FIFF.FIFFV_COIL_FNIRS_OD = 303 # fNIRS optical density +FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE = 304 # fNIRS frequency domain AC amplitude +FIFF.FIFFV_COIL_FNIRS_FD_PHASE = 305 # fNIRS frequency domain phase +FIFF.FIFFV_COIL_FNIRS_RAW = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE # old alias +FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE = 306 # fNIRS time-domain gated amplitude +FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE = 307 # fNIRS time-domain moments amplitude + +FIFF.FIFFV_COIL_EYETRACK_POS = 400 # Eye-tracking gaze position +FIFF.FIFFV_COIL_EYETRACK_PUPIL = 401 # Eye-tracking pupil size + +FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software + +FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer +FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer + +FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor +FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer +FIFF.FIFFV_COIL_VV_PLANAR_T4 = 3015 # Vectorview planar gradiometer (MEG-MRI) +FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer +FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer +FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer +FIFF.FIFFV_COIL_VV_MAG_T4 = 3025 # Vectorview magnetometer (MEG-MRI) + +FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer +FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer +# +# Magnes reference sensors +# +FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003 +FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004 +FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005 +FIFF.FIFFV_COIL_MAGNES_R_MAG = FIFF.FIFFV_COIL_MAGNES_REF_MAG +FIFF.FIFFV_COIL_MAGNES_R_GRAD = FIFF.FIFFV_COIL_MAGNES_REF_GRAD +FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + +# +# CTF coil and channel types +# +FIFF.FIFFV_COIL_CTF_GRAD = 5001 +FIFF.FIFFV_COIL_CTF_REF_MAG = 5002 +FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003 +FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004 +# +# KIT system coil types +# +FIFF.FIFFV_COIL_KIT_GRAD = 6001 +FIFF.FIFFV_COIL_KIT_REF_MAG = 6002 +# +# BabySQUID sensors +# +FIFF.FIFFV_COIL_BABY_GRAD = 7001 +# +# BabyMEG sensors +# +FIFF.FIFFV_COIL_BABY_MAG = 7002 +FIFF.FIFFV_COIL_BABY_REF_MAG = 7003 +FIFF.FIFFV_COIL_BABY_REF_MAG2 = 7004 +# +# Artemis123 sensors +# +FIFF.FIFFV_COIL_ARTEMIS123_GRAD = 7501 +FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG = 7502 +FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD = 7503 +# +# QuSpin sensors +# +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG = 8001 +FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2 = 8002 +# +# FieldLine sensors +# +FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1 = 8101 +# +# Kernel sensors +# +FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1 = 8201 +# +# KRISS sensors +# +FIFF.FIFFV_COIL_KRISS_GRAD = 9001 +# +# Compumedics adult/pediatric gradiometer +# +FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD = 9101 +FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD = 9102 +_ch_coil_type_named = { + key: key + for key in ( + FIFF.FIFFV_COIL_NONE, + FIFF.FIFFV_COIL_EEG, + FIFF.FIFFV_COIL_NM_122, + FIFF.FIFFV_COIL_NM_24, + FIFF.FIFFV_COIL_NM_MCG_AXIAL, + FIFF.FIFFV_COIL_EEG_BIPOLAR, + FIFF.FIFFV_COIL_EEG_CSD, + FIFF.FIFFV_COIL_DIPOLE, + FIFF.FIFFV_COIL_FNIRS_HBO, + FIFF.FIFFV_COIL_FNIRS_HBR, + FIFF.FIFFV_COIL_FNIRS_RAW, + FIFF.FIFFV_COIL_FNIRS_OD, + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_FD_PHASE, + FIFF.FIFFV_COIL_FNIRS_TD_GATED_AMPLITUDE, + FIFF.FIFFV_COIL_FNIRS_TD_MOMENTS_AMPLITUDE, + FIFF.FIFFV_COIL_MCG_42, + FIFF.FIFFV_COIL_EYETRACK_POS, + FIFF.FIFFV_COIL_EYETRACK_PUPIL, + FIFF.FIFFV_COIL_POINT_MAGNETOMETER, + FIFF.FIFFV_COIL_AXIAL_GRAD_5CM, + FIFF.FIFFV_COIL_VV_PLANAR_W, + FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3, + FIFF.FIFFV_COIL_VV_PLANAR_T4, + FIFF.FIFFV_COIL_VV_MAG_W, + FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, + FIFF.FIFFV_COIL_VV_MAG_T4, + FIFF.FIFFV_COIL_MAGNES_MAG, + FIFF.FIFFV_COIL_MAGNES_GRAD, + FIFF.FIFFV_COIL_MAGNES_REF_MAG, + FIFF.FIFFV_COIL_MAGNES_REF_GRAD, + FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD, + FIFF.FIFFV_COIL_CTF_GRAD, + FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, + FIFF.FIFFV_COIL_KIT_GRAD, + FIFF.FIFFV_COIL_KIT_REF_MAG, + FIFF.FIFFV_COIL_BABY_GRAD, + FIFF.FIFFV_COIL_BABY_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG, + FIFF.FIFFV_COIL_BABY_REF_MAG2, + FIFF.FIFFV_COIL_ARTEMIS123_GRAD, + FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG, + FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD, + FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG, + FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + FIFF.FIFFV_COIL_FIELDLINE_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1, + FIFF.FIFFV_COIL_KRISS_GRAD, + FIFF.FIFFV_COIL_COMPUMEDICS_ADULT_GRAD, + FIFF.FIFFV_COIL_COMPUMEDICS_PEDIATRIC_GRAD, + ) +} + +# MNE RealTime +FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command +FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client + +# MNE epochs bookkeeping +FIFF.FIFF_MNE_EPOCHS_SELECTION = 3800 # the epochs selection +FIFF.FIFF_MNE_EPOCHS_DROP_LOG = 3801 # the drop log +FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT = 3802 # rejection and flat params +FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ = 3803 # original raw sfreq + +# MNE annotations +FIFF.FIFFB_MNE_ANNOTATIONS = 3810 # annotations block + +# MNE Metadata Dataframes +FIFF.FIFFB_MNE_METADATA = 3811 # metadata dataframes block + +# Table to match unrecognized channel location names to their known aliases +CHANNEL_LOC_ALIASES = { + # this set of aliases are published in doi:10.1097/WNP.0000000000000316 and + # doi:10.1016/S1388-2457(00)00527-7. + "Cb1": "POO7", + "Cb2": "POO8", + "CB1": "POO7", + "CB2": "POO8", + "T1": "T9", + "T2": "T10", + "T3": "T7", + "T4": "T8", + "T5": "T9", + "T6": "T10", + "M1": "TP9", + "M2": "TP10", + # EGI ref chan is named VREF/Vertex Ref. + # In the standard montages for EGI, the ref is named Cz + "VREF": "Cz", + "Vertex Reference": "Cz" + # add a comment here (with doi of a published source) above any new + # aliases, as they are added +} diff --git a/mne/_fiff/ctf_comp.py b/mne/_fiff/ctf_comp.py new file mode 100644 index 0000000..87269ba --- /dev/null +++ b/mne/_fiff/ctf_comp.py @@ -0,0 +1,189 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy + +import numpy as np + +from ..utils import _pl, logger, verbose +from .constants import FIFF +from .matrix import _read_named_matrix, write_named_matrix +from .tag import read_tag +from .tree import dir_tree_find +from .write import end_block, start_block, write_int + + +def _add_kind(one): + """Convert CTF kind to MNE kind.""" + if one["ctfkind"] == int("47314252", 16): + one["kind"] = 1 + elif one["ctfkind"] == int("47324252", 16): + one["kind"] = 2 + elif one["ctfkind"] == int("47334252", 16): + one["kind"] = 3 + else: + one["kind"] = int(one["ctfkind"]) + + +def _calibrate_comp( + comp, chs, row_names, col_names, mult_keys=("range", "cal"), flip=False +): + """Get row and column cals.""" + ch_names = [c["ch_name"] for c in chs] + row_cals = np.zeros(len(row_names)) + col_cals = np.zeros(len(col_names)) + for names, cals, inv in zip( + (row_names, col_names), (row_cals, col_cals), (False, True) + ): + for ii in range(len(cals)): + p = ch_names.count(names[ii]) + if p != 1: + raise RuntimeError( + f"Channel {names[ii]} does not appear exactly once " + f"in data, found {p:d} instance{_pl(p)}" + ) + idx = ch_names.index(names[ii]) + val = chs[idx][mult_keys[0]] * chs[idx][mult_keys[1]] + val = float(1.0 / val) if inv else float(val) + val = 1.0 / val if flip else val + cals[ii] = val + comp["rowcals"] = row_cals + comp["colcals"] = col_cals + comp["data"]["data"] = row_cals[:, None] * comp["data"]["data"] * col_cals[None, :] + + +@verbose +def read_ctf_comp(fid, node, chs, verbose=None): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + return _read_ctf_comp(fid, node, chs, None) + + +def _read_ctf_comp(fid, node, chs, ch_names_mapping): + """Read the CTF software compensation data from the given node. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node in the FIF tree. + chs : list + The list of channels from info['chs'] to match with + compensators that are read. + ch_names_mapping : dict | None + The channel renaming to use. + %(verbose)s + + Returns + ------- + compdata : list + The compensation data + """ + from .meas_info import _rename_comps + + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + compdata = [] + comps = dir_tree_find(node, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + for node in comps: + # Read the data we need + mat = _read_named_matrix(fid, node, FIFF.FIFF_MNE_CTF_COMP_DATA) + for p in range(node["nent"]): + kind = node["directory"][p].kind + pos = node["directory"][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_KIND: + tag = read_tag(fid, pos) + break + else: + raise Exception("Compensation type not found") + + # Get the compensation kind and map it to a simple number + one = dict(ctfkind=tag.data.item()) + del tag + _add_kind(one) + for p in range(node["nent"]): + kind = node["directory"][p].kind + pos = node["directory"][p].pos + if kind == FIFF.FIFF_MNE_CTF_COMP_CALIBRATED: + tag = read_tag(fid, pos) + calibrated = tag.data + break + else: + calibrated = False + + one["save_calibrated"] = bool(calibrated) + one["data"] = mat + _rename_comps([one], ch_names_mapping) + if not calibrated: + # Calibrate... + _calibrate_comp(one, chs, mat["row_names"], mat["col_names"]) + else: + one["rowcals"] = np.ones(mat["data"].shape[0], dtype=np.float64) + one["colcals"] = np.ones(mat["data"].shape[1], dtype=np.float64) + + compdata.append(one) + + if len(compdata) > 0: + logger.info(f" Read {len(compdata)} compensation matrices") + + return compdata + + +############################################################################### +# Writing + + +def write_ctf_comp(fid, comps): + """Write the CTF compensation data into a fif file. + + Parameters + ---------- + fid : file + The open FIF file descriptor + + comps : list + The compensation data to write + """ + if len(comps) <= 0: + return + + # This is very simple in fact + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP) + for comp in comps: + start_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + # Write the compensation kind + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_KIND, comp["ctfkind"]) + if comp.get("save_calibrated", False): + write_int(fid, FIFF.FIFF_MNE_CTF_COMP_CALIBRATED, comp["save_calibrated"]) + + if not comp.get("save_calibrated", True): + # Undo calibration + comp = deepcopy(comp) + data = ( + (1.0 / comp["rowcals"][:, None]) + * comp["data"]["data"] + * (1.0 / comp["colcals"][None, :]) + ) + comp["data"]["data"] = data + write_named_matrix(fid, FIFF.FIFF_MNE_CTF_COMP_DATA, comp["data"]) + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP_DATA) + + end_block(fid, FIFF.FIFFB_MNE_CTF_COMP) diff --git a/mne/_fiff/matrix.py b/mne/_fiff/matrix.py new file mode 100644 index 0000000..f27d868 --- /dev/null +++ b/mne/_fiff/matrix.py @@ -0,0 +1,137 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils import logger +from .constants import FIFF +from .tag import find_tag, has_tag +from .write import ( + end_block, + start_block, + write_float_matrix, + write_int, + write_name_list, +) + + +def _transpose_named_matrix(mat): + """Transpose mat inplace (no copy).""" + mat["nrow"], mat["ncol"] = mat["ncol"], mat["nrow"] + mat["row_names"], mat["col_names"] = mat["col_names"], mat["row_names"] + mat["data"] = mat["data"].T + + +def _read_named_matrix(fid, node, matkind, indent=" ", transpose=False): + """Read named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + node : dict + The node in the tree. + matkind : int + The type of matrix. + transpose : bool + If True, transpose the matrix. Default is False. + %(verbose)s + + Returns + ------- + mat: dict + The matrix data + """ + # Descend one level if necessary + if node["block"] != FIFF.FIFFB_MNE_NAMED_MATRIX: + for k in range(node["nchild"]): + if node["children"][k]["block"] == FIFF.FIFFB_MNE_NAMED_MATRIX: + if has_tag(node["children"][k], matkind): + node = node["children"][k] + break + else: + logger.info( + f"{indent}Desired named matrix (kind = {matkind}) not available" + ) + return None + else: + if not has_tag(node, matkind): + logger.info( + f"{indent}Desired named matrix (kind = {matkind}) not available" + ) + return None + + # Read everything we need + tag = find_tag(fid, node, matkind) + if tag is None: + raise ValueError("Matrix data missing") + else: + data = tag.data + + nrow, ncol = data.shape + tag = find_tag(fid, node, FIFF.FIFF_MNE_NROW) + if tag is not None and tag.data != nrow: + raise ValueError( + "Number of rows in matrix data and FIFF_MNE_NROW tag do not match" + ) + + tag = find_tag(fid, node, FIFF.FIFF_MNE_NCOL) + if tag is not None and tag.data != ncol: + raise ValueError( + "Number of columns in matrix data and FIFF_MNE_NCOL tag do not match" + ) + + tag = find_tag(fid, node, FIFF.FIFF_MNE_ROW_NAMES) + row_names = tag.data.split(":") if tag is not None else [] + + tag = find_tag(fid, node, FIFF.FIFF_MNE_COL_NAMES) + col_names = tag.data.split(":") if tag is not None else [] + + mat = dict( + nrow=nrow, ncol=ncol, row_names=row_names, col_names=col_names, data=data + ) + if transpose: + _transpose_named_matrix(mat) + return mat + + +def write_named_matrix(fid, kind, mat): + """Write named matrix from the given node. + + Parameters + ---------- + fid : file + The opened file descriptor. + kind : int + The kind of the matrix. + matkind : int + The type of matrix. + """ + # let's save ourselves from disaster + n_tot = mat["nrow"] * mat["ncol"] + if mat["data"].size != n_tot: + ratio = n_tot / float(mat["data"].size) + if n_tot < mat["data"].size and ratio > 0: + ratio = 1 / ratio + raise ValueError( + f"Cannot write matrix: row ({mat['nrow']}) and column ({mat['ncol']}) " + f"total element ({n_tot}) mismatch with data size ({mat['data'].size}), " + f"appears to be off by a factor of {ratio:g}x" + ) + start_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) + write_int(fid, FIFF.FIFF_MNE_NROW, mat["nrow"]) + write_int(fid, FIFF.FIFF_MNE_NCOL, mat["ncol"]) + + if len(mat["row_names"]) > 0: + # let's prevent unintentional stupidity + if len(mat["row_names"]) != mat["nrow"]: + raise ValueError('len(mat["row_names"]) != mat["nrow"]') + write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, mat["row_names"]) + + if len(mat["col_names"]) > 0: + # let's prevent unintentional stupidity + if len(mat["col_names"]) != mat["ncol"]: + raise ValueError('len(mat["col_names"]) != mat["ncol"]') + write_name_list(fid, FIFF.FIFF_MNE_COL_NAMES, mat["col_names"]) + + write_float_matrix(fid, kind, mat["data"]) + end_block(fid, FIFF.FIFFB_MNE_NAMED_MATRIX) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py new file mode 100644 index 0000000..629d9a4 --- /dev/null +++ b/mne/_fiff/meas_info.py @@ -0,0 +1,3764 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import contextlib +import datetime +import operator +import re +import string +from collections import Counter, OrderedDict +from collections.abc import Mapping +from copy import deepcopy +from functools import partial +from io import BytesIO +from textwrap import shorten + +import numpy as np + +from ..defaults import _handle_default +from ..html_templates import _get_html_template +from ..utils import ( + _check_fname, + _check_on_missing, + _check_option, + _dt_to_stamp, + _on_missing, + _pl, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + logger, + object_diff, + repr_html, + verbose, + warn, +) +from ._digitization import ( + DigPoint, + _dig_kind_ints, + _dig_kind_proper, + _dig_kind_rev, + _format_dig_points, + _get_data_as_dict_from_dig, + _read_dig_fif, + write_dig, +) +from .compensator import get_current_comp +from .constants import FIFF, _ch_unit_mul_named, _coord_frame_named +from .ctf_comp import _read_ctf_comp, write_ctf_comp +from .open import fiff_open +from .pick import ( + _DATA_CH_TYPES_SPLIT, + _contains_ch_type, + _picks_to_idx, + channel_type, + get_channel_type_constants, + pick_types, +) +from .proc_history import _read_proc_history, _write_proc_history +from .proj import ( + Projection, + _normalize_proj, + _proj_equal, + _read_proj, + _uniquify_projs, + _write_proj, +) +from .tag import ( + _ch_coord_dict, + _float_item, + _int_item, + _rename_list, + _update_ch_info_named, + find_tag, + read_tag, +) +from .tree import dir_tree_find +from .write import ( + DATE_NONE, + _safe_name_list, + end_block, + start_and_end_file, + start_block, + write_ch_info, + write_coord_trans, + write_dig_points, + write_float, + write_float_matrix, + write_id, + write_int, + write_julian, + write_name_list_sanitized, + write_string, +) + +b = bytes # alias + +_SCALAR_CH_KEYS = ( + "scanno", + "logno", + "kind", + "range", + "cal", + "coil_type", + "unit", + "unit_mul", + "coord_frame", +) +_ALL_CH_KEYS_SET = set(_SCALAR_CH_KEYS + ("loc", "ch_name")) +# XXX we need to require these except when doing simplify_info +_MIN_CH_KEYS_SET = set(("kind", "cal", "unit", "loc", "ch_name")) + + +def _get_valid_units(): + """Get valid units according to the International System of Units (SI). + + The International System of Units (SI, :footcite:`WikipediaSI`) is the + default system for describing units in the Brain Imaging Data Structure + (BIDS). For more information, see the BIDS specification + :footcite:`BIDSdocs` and the appendix "Units" therein. + + References + ---------- + .. footbibliography:: + """ + valid_prefix_names = [ + "yocto", + "zepto", + "atto", + "femto", + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "mega", + "giga", + "tera", + "peta", + "exa", + "zetta", + "yotta", + ] + valid_prefix_symbols = [ + "y", + "z", + "a", + "f", + "p", + "n", + "µ", + "m", + "c", + "d", + "da", + "h", + "k", + "M", + "G", + "T", + "P", + "E", + "Z", + "Y", + ] + valid_unit_names = [ + "metre", + "kilogram", + "second", + "ampere", + "kelvin", + "mole", + "candela", + "radian", + "steradian", + "hertz", + "newton", + "pascal", + "joule", + "watt", + "coulomb", + "volt", + "farad", + "ohm", + "siemens", + "weber", + "tesla", + "henry", + "degree Celsius", + "lumen", + "lux", + "becquerel", + "gray", + "sievert", + "katal", + ] + valid_unit_symbols = [ + "m", + "kg", + "s", + "A", + "K", + "mol", + "cd", + "rad", + "sr", + "Hz", + "N", + "Pa", + "J", + "W", + "C", + "V", + "F", + "Ω", + "S", + "Wb", + "T", + "H", + "°C", + "lm", + "lx", + "Bq", + "Gy", + "Sv", + "kat", + ] + + # Valid units are all possible combinations of either prefix name or prefix + # symbol together with either unit name or unit symbol. E.g., nV for + # nanovolt + valid_units = [] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_names + for unit in valid_unit_names + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_names + for unit in valid_unit_symbols + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_symbols + for unit in valid_unit_names + ] + valid_units += [ + "".join([prefix, unit]) + for prefix in valid_prefix_symbols + for unit in valid_unit_symbols + ] + + # units are also valid without a prefix + valid_units += valid_unit_names + valid_units += valid_unit_symbols + + # we also accept "n/a" as a unit, which is the default missing value in + # BIDS + valid_units += ["n/a"] + + return tuple(valid_units) + + +@verbose +def _unique_channel_names(ch_names, max_length=None, verbose=None): + """Ensure unique channel names.""" + suffixes = tuple(string.ascii_lowercase) + if max_length is not None: + ch_names[:] = [name[:max_length] for name in ch_names] + unique_ids = np.unique(ch_names, return_index=True)[1] + if len(unique_ids) != len(ch_names): + dups = {ch_names[x] for x in np.setdiff1d(range(len(ch_names)), unique_ids)} + warn( + "Channel names are not unique, found duplicates for: " + f"{dups}. Applying running numbers for duplicates." + ) + for ch_stem in dups: + overlaps = np.where(np.array(ch_names) == ch_stem)[0] + # We need an extra character since we append '-'. + # np.ceil(...) is the maximum number of appended digits. + if max_length is not None: + n_keep = max_length - 1 - int(np.ceil(np.log10(len(overlaps)))) + else: + n_keep = np.inf + n_keep = min(len(ch_stem), n_keep) + ch_stem = ch_stem[:n_keep] + for idx, ch_idx in enumerate(overlaps): + # try idx first, then loop through lower case chars + for suffix in (idx,) + suffixes: + ch_name = ch_stem + f"-{suffix}" + if ch_name not in ch_names: + break + if ch_name not in ch_names: + ch_names[ch_idx] = ch_name + else: + raise ValueError( + "Adding a single alphanumeric for a " + "duplicate resulted in another " + f"duplicate name {ch_name}" + ) + return ch_names + + +# %% Mixin classes + + +class MontageMixin: + """Mixin for Montage getting and setting.""" + + @fill_doc + def get_montage(self): + """Get a DigMontage from instance. + + Returns + ------- + montage : None | DigMontage + A copy of the channel positions, if available, otherwise ``None``. + """ + from ..channels.montage import make_dig_montage + from ..transforms import _frame_to_str + + info = self if isinstance(self, Info) else self.info + if info["dig"] is None: + return None + # obtain coord_frame, and landmark coords + # (nasion, lpa, rpa, hsp, hpi) from DigPoints + montage_bunch = _get_data_as_dict_from_dig(info["dig"]) + coord_frame = _frame_to_str.get(montage_bunch.coord_frame) + + # get the channel names and chs data structure + ch_names, chs = info["ch_names"], info["chs"] + picks = pick_types( + info, + meg=False, + eeg=True, + seeg=True, + ecog=True, + dbs=True, + fnirs=True, + exclude=[], + ) + + # channel positions from dig do not match ch_names one to one, + # so use loc[:3] instead + ch_pos = {ch_names[ii]: chs[ii]["loc"][:3] for ii in picks} + + # fNIRS uses multiple channels for the same sensors, we use + # a private function to format these for dig montage. + fnirs_picks = pick_types(info, fnirs=True, exclude=[]) + if len(ch_pos) == len(fnirs_picks): + ch_pos = _get_fnirs_ch_pos(info) + elif len(fnirs_picks) > 0: + raise ValueError( + "MNE does not support getting the montage " + "for a mix of fNIRS and other data types. " + "Please raise a GitHub issue if you " + "require this feature." + ) + + # create montage + montage = make_dig_montage( + ch_pos=ch_pos, + coord_frame=coord_frame, + nasion=montage_bunch.nasion, + lpa=montage_bunch.lpa, + rpa=montage_bunch.rpa, + hsp=montage_bunch.hsp, + hpi=montage_bunch.hpi, + ) + return montage + + @verbose + def set_montage( + self, + montage, + match_case=True, + match_alias=False, + on_missing="raise", + verbose=None, + ): + """Set %(montage_types)s channel positions and digitization points. + + Parameters + ---------- + %(montage)s + %(match_case)s + %(match_alias)s + %(on_missing_montage)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance, modified in-place. + + See Also + -------- + mne.channels.make_standard_montage + mne.channels.make_dig_montage + mne.channels.read_custom_montage + + Notes + ----- + .. warning:: + Only %(montage_types)s channels can have their positions set using + a montage. Other channel types (e.g., MEG channels) should have + their positions defined properly using their data reading + functions. + .. warning:: + Applying a montage will only set locations of channels that exist + at the time it is applied. This means when + :ref:`re-referencing ` + make sure to apply the montage only after calling + :func:`mne.add_reference_channels` + """ + # How to set up a montage to old named fif file (walk through example) + # https://gist.github.com/massich/f6a9f4799f1fbeb8f5e8f8bc7b07d3df + + from ..channels.montage import _set_montage + + info = self if isinstance(self, Info) else self.info + _set_montage(info, montage, match_case, match_alias, on_missing) + return self + + +channel_type_constants = get_channel_type_constants(include_defaults=True) +_human2fiff = { + k: v.get("kind", FIFF.FIFFV_COIL_NONE) for k, v in channel_type_constants.items() +} +_human2unit = { + k: v.get("unit", FIFF.FIFF_UNIT_NONE) for k, v in channel_type_constants.items() +} +_unit2human = { + FIFF.FIFF_UNIT_V: "V", + FIFF.FIFF_UNIT_T: "T", + FIFF.FIFF_UNIT_T_M: "T/m", + FIFF.FIFF_UNIT_MOL: "M", + FIFF.FIFF_UNIT_NONE: "NA", + FIFF.FIFF_UNIT_CEL: "C", + FIFF.FIFF_UNIT_S: "S", + FIFF.FIFF_UNIT_PX: "px", +} + + +def _check_set(ch, projs, ch_type): + """Ensure type change is compatible with projectors.""" + new_kind = _human2fiff[ch_type] + if ch["kind"] != new_kind: + for proj in projs: + if ch["ch_name"] in proj["data"]["col_names"]: + raise RuntimeError( + f'Cannot change channel type for channel {ch["ch_name"]} in ' + f'projector "{proj["desc"]}"' + ) + ch["kind"] = new_kind + + +class SetChannelsMixin(MontageMixin): + """Mixin class for Raw, Evoked, Epochs.""" + + def _get_channel_positions(self, picks=None): + """Get channel locations from info. + + Parameters + ---------- + picks : str | list | slice | None + None gets good data indices. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + picks = _picks_to_idx(info, picks) + chs = info["chs"] + pos = np.array([chs[k]["loc"][:3] for k in picks]) + n_zero = np.sum(np.sum(np.abs(pos), axis=1) == 0) + if n_zero > 1: # XXX some systems have origin (0, 0, 0) + raise ValueError( + f"Could not extract channel positions for {n_zero} channels" + ) + return pos + + def _set_channel_positions(self, pos, names): + """Update channel locations in info. + + Parameters + ---------- + pos : array-like | np.ndarray, shape (n_points, 3) + The channel positions to be set. + names : list of str + The names of the channels to be set. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + if len(pos) != len(names): + raise ValueError( + "Number of channel positions not equal to the number of names given." + ) + pos = np.asarray(pos, dtype=np.float64) + if pos.shape[-1] != 3 or pos.ndim != 2: + msg = ( + f"Channel positions must have the shape (n_points, 3) not {pos.shape}." + ) + raise ValueError(msg) + for name, p in zip(names, pos): + if name in self.ch_names: + idx = self.ch_names.index(name) + info["chs"][idx]["loc"][:3] = p + else: + msg = f"{name} was not found in the info. Cannot be updated." + raise ValueError(msg) + + @verbose + def set_channel_types(self, mapping, *, on_unit_change="warn", verbose=None): + """Specify the sensor types of channels. + + Parameters + ---------- + mapping : dict + A dictionary mapping channel names to sensor types, e.g., + ``{'EEG061': 'eog'}``. + on_unit_change : ``'raise'`` | ``'warn'`` | ``'ignore'`` + What to do if the measurement unit of a channel is changed + automatically to match the new sensor type. + + .. versionadded:: 1.4 + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + The following :term:`sensor types` are accepted: + + bio, chpi, csd, dbs, dipole, ecg, ecog, eeg, emg, eog, exci, + eyegaze, fnirs_cw_amplitude, fnirs_fd_ac_amplitude, fnirs_fd_phase, + fnirs_od, gof, gsr, hbo, hbr, ias, misc, pupil, ref_meg, resp, + seeg, stim, syst, temperature. + + When working with eye-tracking data, see + :func:`mne.preprocessing.eyetracking.set_channel_types_eyetrack`. + + .. versionadded:: 0.9.0 + """ + info = self if isinstance(self, Info) else self.info + ch_names = info["ch_names"] + + # first check and assemble clean mappings of index and name + unit_changes = dict() + for ch_name, ch_type in mapping.items(): + if ch_name not in ch_names: + raise ValueError( + f"This channel name ({ch_name}) doesn't exist in info." + ) + + c_ind = ch_names.index(ch_name) + if ch_type not in _human2fiff: + raise ValueError( + f"This function cannot change to this channel type: {ch_type}. " + "Accepted channel types are " + f"{', '.join(sorted(_human2unit.keys()))}." + ) + # Set sensor type + _check_set(info["chs"][c_ind], info["projs"], ch_type) + unit_old = info["chs"][c_ind]["unit"] + unit_new = _human2unit[ch_type] + if unit_old not in _unit2human: + raise ValueError( + f"Channel '{ch_name}' has unknown unit ({unit_old}). Please fix the" + " measurement info of your data." + ) + if unit_old != _human2unit[ch_type]: + this_change = (_unit2human[unit_old], _unit2human[unit_new]) + if this_change not in unit_changes: + unit_changes[this_change] = list() + unit_changes[this_change].append(ch_name) + # reset unit multiplication factor since the unit has now changed + info["chs"][c_ind]["unit_mul"] = _ch_unit_mul_named[0] + info["chs"][c_ind]["unit"] = _human2unit[ch_type] + if ch_type in ["eeg", "seeg", "ecog", "dbs"]: + coil_type = FIFF.FIFFV_COIL_EEG + elif ch_type == "hbo": + coil_type = FIFF.FIFFV_COIL_FNIRS_HBO + elif ch_type == "hbr": + coil_type = FIFF.FIFFV_COIL_FNIRS_HBR + elif ch_type == "fnirs_cw_amplitude": + coil_type = FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + elif ch_type == "fnirs_fd_ac_amplitude": + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + elif ch_type == "fnirs_fd_phase": + coil_type = FIFF.FIFFV_COIL_FNIRS_FD_PHASE + elif ch_type == "fnirs_od": + coil_type = FIFF.FIFFV_COIL_FNIRS_OD + elif ch_type == "eyetrack_pos": + coil_type = FIFF.FIFFV_COIL_EYETRACK_POS + elif ch_type == "eyetrack_pupil": + coil_type = FIFF.FIFFV_COIL_EYETRACK_PUPIL + else: + coil_type = FIFF.FIFFV_COIL_NONE + info["chs"][c_ind]["coil_type"] = coil_type + + msg = "The unit for channel(s) {0} has changed from {1} to {2}." + for this_change, names in unit_changes.items(): + _on_missing( + on_missing=on_unit_change, + msg=msg.format(", ".join(sorted(names)), *this_change), + name="on_unit_change", + ) + + return self + + @verbose + def rename_channels(self, mapping, allow_duplicates=False, *, verbose=None): + """Rename channels. + + Parameters + ---------- + %(mapping_rename_channels_duplicates)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The instance (modified in place). + + .. versionchanged:: 0.20 + Return the instance. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + from ..channels.channels import rename_channels + from ..io import BaseRaw + + info = self if isinstance(self, Info) else self.info + + ch_names_orig = list(info["ch_names"]) + rename_channels(info, mapping, allow_duplicates) + + # Update self._orig_units for Raw + if isinstance(self, BaseRaw): + # whatever mapping was provided, now we can just use a dict + mapping = dict(zip(ch_names_orig, info["ch_names"])) + for old_name, new_name in mapping.items(): + if old_name in self._orig_units: + self._orig_units[new_name] = self._orig_units.pop(old_name) + ch_names = self.annotations.ch_names + for ci, ch in enumerate(ch_names): + ch_names[ci] = tuple(mapping.get(name, name) for name in ch) + + return self + + @verbose + def plot_sensors( + self, + kind="topomap", + ch_type=None, + title=None, + show_names=False, + ch_groups=None, + to_sphere=True, + axes=None, + block=False, + show=True, + sphere=None, + *, + verbose=None, + ): + """Plot sensor positions. + + Parameters + ---------- + kind : str + Whether to plot the sensors as 3d, topomap or as an interactive + sensor selection dialog. Available options 'topomap', '3d', + 'select'. If 'select', a set of channels can be selected + interactively by using lasso selector or clicking while holding + control key. The selected channels are returned along with the + figure instance. Defaults to 'topomap'. + ch_type : None | str + The channel type to plot. Available options ``'mag'``, ``'grad'``, + ``'eeg'``, ``'seeg'``, ``'dbs'``, ``'ecog'``, ``'all'``. If ``'all'``, all + the available mag, grad, eeg, seeg, dbs, and ecog channels are plotted. If + None (default), then channels are chosen in the order given above. + title : str | None + Title for the figure. If None (default), equals to ``'Sensor + positions (%%s)' %% ch_type``. + show_names : bool | array of str + Whether to display all channel names. If an array, only the channel + names in the array are shown. Defaults to False. + ch_groups : 'position' | array of shape (n_ch_groups, n_picks) | None + Channel groups for coloring the sensors. If None (default), default + coloring scheme is used. If 'position', the sensors are divided + into 8 regions. See ``order`` kwarg of :func:`mne.viz.plot_raw`. If + array, the channels are divided by picks given in the array. + + .. versionadded:: 0.13.0 + to_sphere : bool + Whether to project the 3d locations to a sphere. When False, the + sensor array appears similar as to looking downwards straight above + the subject's head. Has no effect when kind='3d'. Defaults to True. + + .. versionadded:: 0.14.0 + axes : instance of Axes | instance of Axes3D | None + Axes to draw the sensors to. If ``kind='3d'``, axes must be an + instance of Axes3D. If None (default), a new axes will be created. + + .. versionadded:: 0.13.0 + block : bool + Whether to halt program execution until the figure is closed. + Defaults to False. + + .. versionadded:: 0.13.0 + show : bool + Show figure if True. Defaults to True. + %(sphere_topomap_auto)s + %(verbose)s + + Returns + ------- + fig : instance of Figure + Figure containing the sensor topography. + selection : list + A list of selected channels. Only returned if ``kind=='select'``. + + See Also + -------- + mne.viz.plot_layout + + Notes + ----- + This function plots the sensor locations from the info structure using + matplotlib. For drawing the sensors using PyVista see + :func:`mne.viz.plot_alignment`. + + .. versionadded:: 0.12.0 + """ + from ..viz.utils import plot_sensors + + return plot_sensors( + self if isinstance(self, Info) else self.info, + kind=kind, + ch_type=ch_type, + title=title, + show_names=show_names, + ch_groups=ch_groups, + to_sphere=to_sphere, + axes=axes, + block=block, + show=show, + sphere=sphere, + verbose=verbose, + ) + + @verbose + def anonymize(self, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + Parameters + ---------- + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + + Notes + ----- + %(anonymize_info_notes)s + + .. versionadded:: 0.13.0 + """ + info = self if isinstance(self, Info) else self.info + anonymize_info(info, daysback=daysback, keep_his=keep_his, verbose=verbose) + self.set_meas_date(info["meas_date"]) # unify annot update + return self + + def set_meas_date(self, meas_date): + """Set the measurement start date. + + Parameters + ---------- + meas_date : datetime | float | tuple | None + The new measurement date. + If datetime object, it must be timezone-aware and in UTC. + A tuple of (seconds, microseconds) or float (alias for + ``(meas_date, 0)``) can also be passed and a datetime + object will be automatically created. If None, will remove + the time reference. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified raw instance. Operates in place. + + See Also + -------- + mne.io.Raw.anonymize + + Notes + ----- + If you want to remove all time references in the file, call + :func:`mne.io.anonymize_info(inst.info) ` + after calling ``inst.set_meas_date(None)``. + + .. versionadded:: 0.20 + """ + from ..annotations import _handle_meas_date + + info = self if isinstance(self, Info) else self.info + + meas_date = _handle_meas_date(meas_date) + with info._unlock(): + info["meas_date"] = meas_date + + # clear file_id and meas_id if needed + if meas_date is None: + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + value["secs"] = DATE_NONE[0] + value["usecs"] = DATE_NONE[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value["machid"].copy() + _tmp[:] = 0 + value["machid"] = _tmp + + if hasattr(self, "annotations"): + self.annotations._orig_time = meas_date + return self + + +class ContainsMixin: + """Mixin class for Raw, Evoked, Epochs and Info.""" + + def __contains__(self, ch_type): + """Check channel type membership. + + Parameters + ---------- + ch_type : str + Channel type to check for. Can be e.g. ``'meg'``, ``'eeg'``, + ``'stim'``, etc. + + Returns + ------- + in : bool + Whether or not the instance contains the given channel type. + + Examples + -------- + Channel type membership can be tested as:: + + >>> 'meg' in inst # doctest: +SKIP + True + >>> 'seeg' in inst # doctest: +SKIP + False + + """ + # this method is not supported by Info object. An Info object inherits from a + # dictionary and the 'key' in Info call is present all across MNE codebase, e.g. + # to check for the presence of a key: + # >>> 'bads' in info + if ch_type == "meg": + has_ch_type = _contains_ch_type(self.info, "mag") or _contains_ch_type( + self.info, "grad" + ) + else: + has_ch_type = _contains_ch_type(self.info, ch_type) + return has_ch_type + + @property + def compensation_grade(self): + """The current gradient compensation grade.""" + info = self if isinstance(self, Info) else self.info + return get_current_comp(info) + + @fill_doc + def get_channel_types(self, picks=None, unique=False, only_data_chs=False): + """Get a list of channel type for each channel. + + Parameters + ---------- + %(picks_all)s + unique : bool + Whether to return only unique channel types. Default is ``False``. + only_data_chs : bool + Whether to ignore non-data channels. Default is ``False``. + + Returns + ------- + channel_types : list + The channel types. + """ + info = self if isinstance(self, Info) else self.info + none = "data" if only_data_chs else "all" + picks = _picks_to_idx(info, picks, none, (), allow_empty=False) + ch_types = [channel_type(info, pick) for pick in picks] + if only_data_chs: + ch_types = [ + ch_type for ch_type in ch_types if ch_type in _DATA_CH_TYPES_SPLIT + ] + if unique: + # set does not preserve order but dict does, so let's just use it + ch_types = list({k: k for k in ch_types}.keys()) + return ch_types + + +# %% ValidatedDict class + + +class ValidatedDict(dict): + _attributes = {} # subclasses should set this to validated attributes + + def __init__(self, *args, **kwargs): + self._unlocked = True + super().__init__(*args, **kwargs) + self._unlocked = False + + def __getstate__(self): + """Get state (for pickling).""" + return {"_unlocked": self._unlocked} + + def __setstate__(self, state): + """Set state (for pickling).""" + self._unlocked = state["_unlocked"] + + def __setitem__(self, key, val): + """Attribute setter.""" + # During unpickling, the _unlocked attribute has not been set, so + # let __setstate__ do it later and act unlocked now + unlocked = getattr(self, "_unlocked", True) + if key in self._attributes: + if isinstance(self._attributes[key], str): + if not unlocked: + raise RuntimeError(self._attributes[key]) + else: + val = self._attributes[key]( + val, info=self + ) # attribute checker function + else: + class_name = self.__class__.__name__ + extra = "" + if "temp" in self._attributes: + var_name = _camel_to_snake(class_name) + extra = ( + f"You can set {var_name}['temp'] to store temporary objects in " + f"{class_name} instances, but these will not survive an I/O " + "round-trip." + ) + raise RuntimeError( + f"{class_name} does not support directly setting the key {repr(key)}. " + + extra + ) + super().__setitem__(key, val) + + def update(self, other=None, **kwargs): + """Update method using __setitem__().""" + iterable = other.items() if isinstance(other, Mapping) else other + if other is not None: + for key, val in iterable: + self[key] = val + for key, val in kwargs.items(): + self[key] = val + + def copy(self): + """Copy the instance. + + Returns + ------- + info : instance of Info + The copied info. + """ + return deepcopy(self) + + def __repr__(self): + """Return a string representation.""" + mapping = ", ".join(f"{key}: {val}" for key, val in self.items()) + return f"<{_camel_to_snake(self.__class__.__name__)} | {mapping}>" + + +# %% Subject info + + +def _check_types(x, *, info, name, types, cast=None): + _validate_type(x, types, name) + if cast is not None and x is not None: + x = cast(x) + return x + + +class SubjectInfo(ValidatedDict): + _attributes = { + "id": partial(_check_types, name='subject_info["id"]', types=int), + "his_id": partial(_check_types, name='subject_info["his_id"]', types=str), + "last_name": partial(_check_types, name='subject_info["last_name"]', types=str), + "first_name": partial( + _check_types, name='subject_info["first_name"]', types=str + ), + "middle_name": partial( + _check_types, name='subject_info["middle_name"]', types=str + ), + "birthday": partial( + _check_types, name='subject_info["birthday"]', types=(datetime.date, None) + ), + "sex": partial(_check_types, name='subject_info["sex"]', types=int), + "hand": partial(_check_types, name='subject_info["hand"]', types=int), + "weight": partial( + _check_types, name='subject_info["weight"]', types="numeric", cast=float + ), + "height": partial( + _check_types, name='subject_info["height"]', types="numeric", cast=float + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "subject_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +class HeliumInfo(ValidatedDict): + _attributes = { + "he_level_raw": partial( + _check_types, + name='helium_info["he_level_raw"]', + types="numeric", + cast=float, + ), + "helium_level": partial( + _check_types, + name='helium_info["helium_level"]', + types="numeric", + cast=float, + ), + "orig_file_guid": partial( + _check_types, name='helium_info["orig_file_guid"]', types=str + ), + "meas_date": partial( + _check_types, + name='helium_info["meas_date"]', + types=(datetime.datetime, None), + ), + } + + def __init__(self, initial): + _validate_type(initial, dict, "helium_info") + super().__init__() + for key, val in initial.items(): + self[key] = val + + +# %% Info class and helpers + + +def _format_trans(obj, key): + from ..transforms import Transform + + try: + t = obj[key] + except KeyError: + pass + else: + if t is not None: + obj[key] = Transform(t["from"], t["to"], t["trans"]) + + +def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True): + ch_keys = set(ch) + bad = sorted(ch_keys.difference(_ALL_CH_KEYS_SET)) + if bad: + raise KeyError(f"key{_pl(bad)} errantly present for {name}[{ci}]: {bad}") + if check_min: + bad = sorted(_MIN_CH_KEYS_SET.difference(ch_keys)) + if bad: + raise KeyError( + f"key{_pl(bad)} missing for {name}[{ci}]: {bad}", + ) + + +def _check_bads_info_compat(bads, info): + _validate_type(bads, list, "bads") + if not len(bads): + return # e.g. in empty_info + for bi, bad in enumerate(bads): + _validate_type(bad, str, f"bads[{bi}]") + if "ch_names" not in info: # somewhere in init, or deepcopy, or _empty_info, etc. + return + missing = [bad for bad in bads if bad not in info["ch_names"]] + if len(missing) > 0: + raise ValueError(f"bad channel(s) {missing} marked do not exist in info") + + +class MNEBadsList(list): + """Subclass of bads that checks inplace operations.""" + + def __init__(self, *, bads, info): + _check_bads_info_compat(bads, info) + self._mne_info = info + super().__init__(bads) + + def extend(self, iterable): + if not isinstance(iterable, list): + iterable = list(iterable) + # can happen during pickling + try: + info = self._mne_info + except AttributeError: + pass # can happen during pickling + else: + _check_bads_info_compat(iterable, info) + return super().extend(iterable) + + def append(self, x): + return self.extend([x]) + + def __iadd__(self, x): + self.extend(x) + return self + + +# As options are added here, test_meas_info.py:test_info_bad should be updated +def _check_bads(bads, *, info): + return MNEBadsList(bads=bads, info=info) + + +def _check_dev_head_t(dev_head_t, *, info): + from ..transforms import Transform, _ensure_trans + + _validate_type(dev_head_t, (Transform, None), "info['dev_head_t']") + if dev_head_t is not None: + dev_head_t = _ensure_trans(dev_head_t, "meg", "head") + return dev_head_t + + +# TODO: Add fNIRS convention to loc +class Info(ValidatedDict, SetChannelsMixin, MontageMixin, ContainsMixin): + """Measurement information. + + This data structure behaves like a dictionary. It contains all metadata + that is available for a recording. However, its keys are restricted to + those provided by the + `FIF format specification `__, + so new entries should not be manually added. + + .. note:: + This class should not be instantiated directly via + ``mne.Info(...)``. Instead, use :func:`mne.create_info` to create + measurement information from scratch. + + .. warning:: + The only entries that should be manually changed by the user are: + ``info['bads']``, ``info['description']``, ``info['device_info']`` + ``info['dev_head_t']``, ``info['experimenter']``, + ``info['helium_info']``, ``info['line_freq']``, ``info['temp']``, + and ``info['subject_info']``. + + All other entries should be considered read-only, though they can be + modified by various MNE-Python functions or methods (which have + safeguards to ensure all fields remain in sync). + + Parameters + ---------- + *args : list + Arguments. + **kwargs : dict + Keyword arguments. + + Attributes + ---------- + acq_pars : str | None + MEG system acquisition parameters. + See :class:`mne.AcqParserFIF` for details. + acq_stim : str | None + MEG system stimulus parameters. + bads : list of str + List of bad (noisy/broken) channels, by name. These channels will by + default be ignored by many processing steps. + ch_names : list of str + The names of the channels. + chs : list of dict + A list of channel information dictionaries, one per channel. + See Notes for more information. + command_line : str + Contains the command and arguments used to create the source space + (used for source estimation). + comps : list of dict + CTF software gradient compensation data. + See Notes for more information. + ctf_head_t : Transform | None + The transformation from 4D/CTF head coordinates to Neuromag head + coordinates. This is only present in 4D/CTF data. + custom_ref_applied : int + Whether a custom (=other than an average projector) reference has been + applied to the EEG data. This flag is checked by some algorithms that + require an average reference to be set. + description : str | None + String description of the recording. + dev_ctf_t : Transform | None + The transformation from device coordinates to 4D/CTF head coordinates. + This is only present in 4D/CTF data. + dev_head_t : Transform | None + The device to head transformation. + device_info : dict | None + Information about the acquisition device. See Notes for details. + + .. versionadded:: 0.19 + dig : list of dict | None + The Polhemus digitization data in head coordinates. + See Notes for more information. + events : list of dict + Event list, sometimes extracted from the stim channels by Neuromag + systems. In general this should not be used and + :func:`mne.find_events` should be used for event processing. + See Notes for more information. + experimenter : str | None + Name of the person that ran the experiment. + file_id : dict | None + The FIF globally unique ID. See Notes for more information. + gantry_angle : float | None + Tilt angle of the gantry in degrees. + helium_info : dict | None + Information about the device helium. See Notes for details. + + .. versionadded:: 0.19 + highpass : float + Highpass corner frequency in Hertz. Zero indicates a DC recording. + hpi_meas : list of dict + HPI measurements that were taken at the start of the recording + (e.g. coil frequencies). + See Notes for details. + hpi_results : list of dict + Head position indicator (HPI) digitization points and fit information + (e.g., the resulting transform). + See Notes for details. + hpi_subsystem : dict | None + Information about the HPI subsystem that was used (e.g., event + channel used for cHPI measurements). + See Notes for details. + kit_system_id : int + Identifies the KIT system. + line_freq : float | None + Frequency of the power line in Hertz. + lowpass : float + Lowpass corner frequency in Hertz. + It is automatically set to half the sampling rate if there is + otherwise no low-pass applied to the data. + maxshield : bool + True if active shielding (IAS) was active during recording. + meas_date : datetime + The time (UTC) of the recording. + + .. versionchanged:: 0.20 + This is stored as a :class:`~python:datetime.datetime` object + instead of a tuple of seconds/microseconds. + meas_file : str | None + Raw measurement file (used for source estimation). + meas_id : dict | None + The ID assigned to this measurement by the acquisition system or + during file conversion. Follows the same format as ``file_id``. + mri_file : str | None + File containing the MRI to head transformation (used for source + estimation). + mri_head_t : dict | None + Transformation from MRI to head coordinates (used for source + estimation). + mri_id : dict | None + MRI unique ID (used for source estimation). + nchan : int + Number of channels. + proc_history : list of dict + The MaxFilter processing history. + See Notes for details. + proj_id : int | None + ID number of the project the experiment belongs to. + proj_name : str | None + Name of the project the experiment belongs to. + projs : list of Projection + List of SSP operators that operate on the data. + See :class:`mne.Projection` for details. + sfreq : float + Sampling frequency in Hertz. + subject_info : dict | None + Information about the subject. + See Notes for details. + temp : object | None + Can be used to store temporary objects in an Info instance. It will not + survive an I/O roundtrip. + + .. versionadded:: 0.24 + utc_offset : str + "UTC offset of related meas_date (sHH:MM). + + .. versionadded:: 0.19 + working_dir : str + Working directory used when the source space was created (used for + source estimation). + xplotter_layout : str + Layout of the Xplotter (Neuromag system only). + + See Also + -------- + mne.create_info + + Notes + ----- + The following parameters have a nested structure. + + * ``chs`` list of dict: + + cal : float + The calibration factor to bring the channels to physical + units. Used in product with ``range`` to scale the data read + from disk. + ch_name : str + The channel name. + coil_type : int + Coil type, e.g. ``FIFFV_COIL_MEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + kind : int + The kind of channel, e.g. ``FIFFV_EEG_CH``. + loc : array, shape (12,) + Channel location information. The first three elements ``[:3]`` always store + the nominal channel position. The remaining 9 elements store different + information based on the channel type: + + MEG + Remaining 9 elements ``[3:]``, contain the EX, EY, and EZ normal + triplets (columns) of the coil rotation/orientation matrix. + EEG + Elements ``[3:6]`` contain the reference channel position. + Eyetrack + Element ``[3]`` contains information about which eye was tracked + (-1 for left, 1 for right), and element ``[4]`` contains information + about the the axis of coordinate data (-1 for x-coordinate data, 1 for + y-coordinate data). + Dipole + Elements ``[3:6]`` contain dipole orientation information. + logno : int + Logical channel number, conventions in the usage of this + number vary. + range : float + The hardware-oriented part of the calibration factor. + This should be only applied to the continuous raw data. + Used in product with ``cal`` to scale data read from disk. + scanno : int + Scanning order number, starting from 1. + unit : int + The unit to use, e.g. ``FIFF_UNIT_T_M``. + unit_mul : int + Unit multipliers, most commonly ``FIFF_UNITM_NONE``. + + * ``comps`` list of dict: + + ctfkind : int + CTF compensation grade. + colcals : ndarray + Column calibrations. + mat : dict + A named matrix dictionary (with entries "data", "col_names", etc.) + containing the compensation matrix. + rowcals : ndarray + Row calibrations. + save_calibrated : bool + Were the compensation data saved in calibrated form. + + * ``device_info`` dict: + + type : str + Device type. + model : str + Device model. + serial : str + Device serial. + site : str + Device site. + + * ``dig`` list of dict: + + kind : int + The kind of channel, + e.g. ``FIFFV_POINT_EEG``, ``FIFFV_POINT_CARDINAL``. + r : array, shape (3,) + 3D position in m. and coord_frame. + ident : int + Number specifying the identity of the point. + e.g. ``FIFFV_POINT_NASION`` if kind is ``FIFFV_POINT_CARDINAL``, or + 42 if kind is ``FIFFV_POINT_EEG``. + coord_frame : int + The coordinate frame used, e.g. ``FIFFV_COORD_HEAD``. + + * ``events`` list of dict: + + channels : list of int + Channel indices for the events. + list : ndarray, shape (n_events * 3,) + Events in triplets as number of samples, before, after. + + * ``file_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``helium_info`` dict: + + he_level_raw : float + Helium level (%) before position correction. + helium_level : float + Helium level (%) after position correction. + orig_file_guid : str + Original file GUID. + meas_date : datetime.datetime + The helium level meas date. + + .. versionchanged:: 1.8 + This is stored as a :class:`~python:datetime.datetime` object + instead of a tuple of seconds/microseconds. + + * ``hpi_meas`` list of dict: + + creator : str + Program that did the measurement. + sfreq : float + Sample rate. + nchan : int + Number of channels used. + nave : int + Number of averages used. + ncoil : int + Number of coils used. + first_samp : int + First sample used. + last_samp : int + Last sample used. + hpi_coils : list of dict + Coils, containing: + + number: int + Coil number + epoch : ndarray + Buffer containing one epoch and channel. + slopes : ndarray, shape (n_channels,) + HPI data. + corr_coeff : ndarray, shape (n_channels,) + HPI curve fit correlations. + coil_freq : float + HPI coil excitation frequency + + * ``hpi_results`` list of dict: + + dig_points : list + Digitization points (see ``dig`` definition) for the HPI coils. + order : ndarray, shape (ncoil,) + The determined digitization order. + used : ndarray, shape (nused,) + The indices of the used coils. + moments : ndarray, shape (ncoil, 3) + The coil moments. + goodness : ndarray, shape (ncoil,) + The goodness of fits. + good_limit : float + The goodness of fit limit. + dist_limit : float + The distance limit. + accept : int + Whether or not the fit was accepted. + coord_trans : instance of Transform + The resulting MEG<->head transformation. + + * ``hpi_subsystem`` dict: + + ncoil : int + The number of coils. + event_channel : str + The event channel used to encode cHPI status (e.g., STI201). + hpi_coils : list of ndarray + List of length ``ncoil``, each 4-element ndarray contains the + event bits used on the event channel to indicate cHPI status + (using the first element of these arrays is typically + sufficient). + + * ``mri_id`` dict: + + version : int + FIF format version, i.e. ``FIFFC_VERSION``. + machid : ndarray, shape (2,) + Unique machine ID, usually derived from the MAC address. + secs : int + Time in seconds. + usecs : int + Time in microseconds. + + * ``proc_history`` list of dict: + + block_id : dict + See ``id`` above. + date : ndarray, shape (2,) + 2-element tuple of seconds and microseconds. + experimenter : str + Name of the person who ran the program. + creator : str + Program that did the processing. + max_info : dict + Maxwel filtering info, can contain: + + sss_info : dict + SSS processing information. + max_st + tSSS processing information. + sss_ctc : dict + Cross-talk processing information. + sss_cal : dict + Fine-calibration information. + smartshield : dict + MaxShield information. This dictionary is (always?) empty, + but its presence implies that MaxShield was used during + acquisition. + + * ``subject_info`` dict: + + id : int + Integer subject identifier. + his_id : str + String subject identifier. + last_name : str + Last name. + first_name : str + First name. + middle_name : str + Middle name. + birthday : datetime.date + The subject birthday. + + .. versionchanged:: 1.8 + This is stored as a :class:`~python:datetime.date` object + instead of a tuple of seconds/microseconds. + sex : int + Subject sex (0=unknown, 1=male, 2=female). + hand : int + Handedness (1=right, 2=left, 3=ambidextrous). + weight : float + Weight in kilograms. + height : float + Height in meters. + """ + + _attributes = { + "acq_pars": "acq_pars cannot be set directly. " + "See mne.AcqParserFIF() for details.", + "acq_stim": "acq_stim cannot be set directly.", + "bads": _check_bads, + "ch_names": "ch_names cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), inst.pick(), " + "inst.rename_channels(), inst.reorder_channels() " + "and inst.set_channel_types() instead.", + "chs": "chs cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), inst.pick(), " + "inst.rename_channels(), inst.reorder_channels() " + "and inst.set_channel_types() instead.", + "command_line": "command_line cannot be set directly.", + "comps": "comps cannot be set directly. " + "Please use method Raw.apply_gradient_compensation() " + "instead.", + "ctf_head_t": "ctf_head_t cannot be set directly.", + "custom_ref_applied": "custom_ref_applied cannot be set directly. " + "Please use method inst.set_eeg_reference() " + "instead.", + "description": partial(_check_types, name="description", types=(str, None)), + "dev_ctf_t": "dev_ctf_t cannot be set directly.", + "dev_head_t": _check_dev_head_t, + "device_info": partial(_check_types, name="device_info", types=(dict, None)), + "dig": "dig cannot be set directly. " + "Please use method inst.set_montage() instead.", + "events": "events cannot be set directly.", + "experimenter": partial(_check_types, name="experimenter", types=(str, None)), + "file_id": "file_id cannot be set directly.", + "gantry_angle": "gantry_angle cannot be set directly.", + "helium_info": partial( + _check_types, name="helium_info", types=(dict, None), cast=HeliumInfo + ), + "highpass": "highpass cannot be set directly. " + "Please use method inst.filter() instead.", + "hpi_meas": "hpi_meas can not be set directly.", + "hpi_results": "hpi_results cannot be set directly.", + "hpi_subsystem": "hpi_subsystem cannot be set directly.", + "kit_system_id": "kit_system_id cannot be set directly.", + "line_freq": partial( + _check_types, name="line_freq", types=("numeric", None), cast=float + ), + "lowpass": "lowpass cannot be set directly. " + "Please use method inst.filter() instead.", + "maxshield": "maxshield cannot be set directly.", + "meas_date": "meas_date cannot be set directly. " + "Please use method inst.set_meas_date() instead.", + "meas_file": "meas_file cannot be set directly.", + "meas_id": "meas_id cannot be set directly.", + "mri_file": "mri_file cannot be set directly.", + "mri_head_t": "mri_head_t cannot be set directly.", + "mri_id": "mri_id cannot be set directly.", + "nchan": "nchan cannot be set directly. " + "Please use methods inst.add_channels(), " + "inst.drop_channels(), and inst.pick() instead.", + "proc_history": "proc_history cannot be set directly.", + "proj_id": "proj_id cannot be set directly.", + "proj_name": "proj_name cannot be set directly.", + "projs": "projs cannot be set directly. " + "Please use methods inst.add_proj() and inst.del_proj() " + "instead.", + "sfreq": "sfreq cannot be set directly. " + "Please use method inst.resample() instead.", + "subject_info": partial( + _check_types, name="subject_info", types=(dict, None), cast=SubjectInfo + ), + "temp": lambda x, info=None: x, + "utc_offset": "utc_offset cannot be set directly.", + "working_dir": "working_dir cannot be set directly.", + "xplotter_layout": "xplotter_layout cannot be set directly.", + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._unlocked = True + # Deal with h5io writing things as dict + if "bads" in self: + self["bads"] = MNEBadsList(bads=self["bads"], info=self) + for key in ("dev_head_t", "ctf_head_t", "dev_ctf_t"): + _format_trans(self, key) + for res in self.get("hpi_results", []): + _format_trans(res, "coord_trans") + if self.get("dig", None) is not None and len(self["dig"]): + if isinstance(self["dig"], dict): # needs to be unpacked + self["dig"] = _dict_unpack(self["dig"], _DIG_CAST) + if not isinstance(self["dig"][0], DigPoint): + self["dig"] = _format_dig_points(self["dig"]) + if isinstance(self.get("chs", None), dict): + self["chs"]["ch_name"] = [ + str(x) for x in np.char.decode(self["chs"]["ch_name"], encoding="utf8") + ] + self["chs"] = _dict_unpack(self["chs"], _CH_CAST) + for pi, proj in enumerate(self.get("projs", [])): + if not isinstance(proj, Projection): + self["projs"][pi] = Projection(**proj) + # Old files could have meas_date as tuple instead of datetime + try: + meas_date = self["meas_date"] + except KeyError: + pass + else: + self["meas_date"] = _ensure_meas_date_none_or_dt(meas_date) + self._unlocked = False + # with validation and casting + for key in ("helium_info", "subject_info"): + if key in self: + self[key] = self[key] + + def __setstate__(self, state): + """Set state (for pickling).""" + super().__setstate__(state) + self["bads"] = MNEBadsList(bads=self["bads"], info=self) + + @contextlib.contextmanager + def _unlock(self, *, update_redundant=False, check_after=False): + """Context manager unlocking access to attributes.""" + # needed for nested _unlock() + state = self._unlocked if hasattr(self, "_unlocked") else False + + self._unlocked = True + try: + yield + except Exception: + raise + else: + if update_redundant: + self._update_redundant() + if check_after: + self._check_consistency() + finally: + self._unlocked = state + + def normalize_proj(self): + """(Re-)Normalize projection vectors after subselection. + + Applying projection after sub-selecting a set of channels that + were originally used to compute the original projection vectors + can be dangerous (e.g., if few channels remain, most power was + in channels that are no longer picked, etc.). By default, mne + will emit a warning when this is done. + + This function will re-normalize projectors to use only the + remaining channels, thus avoiding that warning. Only use this + function if you're confident that the projection vectors still + adequately capture the original signal of interest. + """ + _normalize_proj(self) + + def __repr__(self): + """Summarize info instead of printing all.""" + from ..io.kit.constants import KIT_SYSNAMES + from ..transforms import Transform, _coord_frame_name + + MAX_WIDTH = 68 + strs = [" {frame2} transform" + else: + entr = "" + elif k in ["sfreq", "lowpass", "highpass"]: + entr = f"{v:.1f} Hz" + elif isinstance(v, str): + entr = shorten(v, MAX_WIDTH, placeholder=" ...") + elif k == "chs": + # TODO someday we should refactor with _repr_html_ with + # bad vs good + ch_types = [channel_type(self, idx) for idx in range(len(v))] + ch_counts = Counter(ch_types) + entr = ", ".join( + f"{count} {titles.get(ch_type, ch_type.upper())}" + for ch_type, count in ch_counts.items() + ) + elif k == "custom_ref_applied": + entr = str(bool(v)) + if not v: + non_empty -= 1 # don't count if 0 + elif isinstance(v, ValidatedDict): + entr = repr(v) + else: + try: + this_len = len(v) + except TypeError: + entr = f"{v}" if v is not None else "" + else: + if this_len > 0: + entr = f"{this_len} item{_pl(this_len)} ({type(v).__name__})" + else: + entr = "" + if entr != "": + non_empty += 1 + strs.append(f"{k}: {entr}") + st = "\n ".join(sorted(strs)) + st += "\n>" + st %= non_empty + return st + + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + result = Info.__new__(Info) + result._unlocked = True + for k, v in self.items(): + # chs is roughly half the time but most are immutable + if k == "chs": + # dict shallow copy is fast, so use it then overwrite + result[k] = list() + for ch in v: + ch = ch.copy() # shallow + ch["loc"] = ch["loc"].copy() + result[k].append(ch) + elif k == "ch_names": + # we know it's list of str, shallow okay and saves ~100 µs + result[k] = v.copy() + elif k == "hpi_meas": + hms = list() + for hm in v: + hm = hm.copy() + # the only mutable thing here is some entries in coils + hm["hpi_coils"] = [coil.copy() for coil in hm["hpi_coils"]] + # There is a *tiny* risk here that someone could write + # raw.info['hpi_meas'][0]['hpi_coils'][1]['epoch'] = ... + # and assume that info.copy() will make an actual copy, + # but copying these entries has a 2x slowdown penalty so + # probably not worth it for such a deep corner case: + # for coil in hpi_coils: + # for key in ('epoch', 'slopes', 'corr_coeff'): + # coil[key] = coil[key].copy() + hms.append(hm) + result[k] = hms + else: + result[k] = deepcopy(v, memodict) + result._unlocked = False + return result + + def _check_consistency(self, prepend_error=""): + """Do some self-consistency checks and datatype tweaks.""" + meas_date = self.get("meas_date") + if meas_date is not None: + if ( + not isinstance(self["meas_date"], datetime.datetime) + or self["meas_date"].tzinfo is None + or self["meas_date"].tzinfo is not datetime.timezone.utc + ): + raise RuntimeError( + f'{prepend_error}info["meas_date"] must be a datetime object in UTC' + f' or None, got {repr(self["meas_date"])!r}' + ) + + chs = [ch["ch_name"] for ch in self["chs"]] + if ( + len(self["ch_names"]) != len(chs) + or any(ch_1 != ch_2 for ch_1, ch_2 in zip(self["ch_names"], chs)) + or self["nchan"] != len(chs) + ): + raise RuntimeError( + f"{prepend_error}info channel name inconsistency detected, please " + "notify MNE-Python developers" + ) + + # make sure we have the proper datatypes + with self._unlock(): + for key in ("sfreq", "highpass", "lowpass"): + if self.get(key) is not None: + self[key] = float(self[key]) + + for pi, proj in enumerate(self.get("projs", [])): + _validate_type(proj, Projection, f'info["projs"][{pi}]') + for key in ("kind", "active", "desc", "data", "explained_var"): + if key not in proj: + raise RuntimeError(f"Projection incomplete, missing {key}") + + # Ensure info['chs'] has immutable entries (copies much faster) + for ci, ch in enumerate(self["chs"]): + _check_ch_keys(ch, ci) + ch_name = ch["ch_name"] + _validate_type(ch_name, str, f'info["chs"][{ci}]["ch_name"]') + for key in _SCALAR_CH_KEYS: + val = ch.get(key, 1) + _validate_type(val, "numeric", f'info["chs"][{ci}][{key}]') + loc = ch["loc"] + if not (isinstance(loc, np.ndarray) and loc.shape == (12,)): + raise TypeError( + f'Bad info: info["chs"][{ci}]["loc"] must be ndarray with ' + f"12 elements, got {repr(loc)}" + ) + + # make sure channel names are unique + with self._unlock(): + self["ch_names"] = _unique_channel_names(self["ch_names"]) + for idx, ch_name in enumerate(self["ch_names"]): + self["chs"][idx]["ch_name"] = ch_name + + def _update_redundant(self): + """Update the redundant entries.""" + with self._unlock(): + self["ch_names"] = [ch["ch_name"] for ch in self["chs"]] + self["nchan"] = len(self["chs"]) + + @property + def ch_names(self): + try: + ch_names = self["ch_names"] + except KeyError: + ch_names = [] + + return ch_names + + @repr_html + def _repr_html_(self): + """Summarize info for HTML representation.""" + info_template = _get_html_template("repr", "info.html.jinja") + return info_template.render(info=self) + + def save(self, fname): + """Write measurement info in fif file. + + Parameters + ---------- + fname : path-like + The name of the file. Should end by ``'-info.fif'``. + """ + write_info(fname, self) + + +def _simplify_info(info, *, keep=()): + """Return a simplified info structure to speed up picking.""" + chs = [ + {key: ch[key] for key in ("ch_name", "kind", "unit", "coil_type", "loc", "cal")} + for ch in info["chs"] + ] + keys = ("bads", "comps", "projs", "custom_ref_applied") + keep + sub_info = Info((key, info[key]) for key in keys if key in info) + with sub_info._unlock(): + sub_info["chs"] = chs + sub_info._update_redundant() + return sub_info + + +@verbose +def read_fiducials(fname, verbose=None): + """Read fiducials from a fiff file. + + Parameters + ---------- + fname : path-like + The filename to read. + %(verbose)s + + Returns + ------- + pts : list of dict + List of digitizer points (each point in a dict). + coord_frame : int + The coordinate frame of the points (one of + ``mne.io.constants.FIFF.FIFFV_COORD_...``). + """ + fname = _check_fname(fname=fname, overwrite="read", must_exist=True) + fid, tree, _ = fiff_open(fname) + with fid: + isotrak = dir_tree_find(tree, FIFF.FIFFB_ISOTRAK) + isotrak = isotrak[0] + pts = [] + coord_frame = FIFF.FIFFV_COORD_HEAD + for k in range(isotrak["nent"]): + kind = isotrak["directory"][k].kind + pos = isotrak["directory"][k].pos + if kind == FIFF.FIFF_DIG_POINT: + tag = read_tag(fid, pos) + pts.append(DigPoint(tag.data)) + elif kind == FIFF.FIFF_MNE_COORD_FRAME: + tag = read_tag(fid, pos) + coord_frame = tag.data[0] + coord_frame = _coord_frame_named.get(coord_frame, coord_frame) + + # coord_frame is not stored in the tag + for pt in pts: + pt["coord_frame"] = coord_frame + + return pts, coord_frame + + +@verbose +def write_fiducials( + fname, pts, coord_frame="unknown", *, overwrite=False, verbose=None +): + """Write fiducials to a fiff file. + + Parameters + ---------- + fname : path-like + Destination file name. + pts : iterator of dict + Iterator through digitizer points. Each point is a dictionary with + the keys 'kind', 'ident' and 'r'. + coord_frame : str | int + The coordinate frame of the points. If a string, must be one of + ``'meg'``, ``'mri'``, ``'mri_voxel'``, ``'head'``, + ``'mri_tal'``, ``'ras'``, ``'fs_tal'``, ``'ctf_head'``, + ``'ctf_meg'``, and ``'unknown'`` + If an integer, must be one of the constants defined as + ``mne.io.constants.FIFF.FIFFV_COORD_...``. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + """ + write_dig(fname, pts, coord_frame, overwrite=overwrite) + + +@verbose +def read_info(fname, verbose=None): + """Read measurement info from a file. + + Parameters + ---------- + fname : path-like + File name. + %(verbose)s + + Returns + ------- + %(info_not_none)s + """ + check_fname(fname, "Info", (".fif", ".fif.gz")) + fname = _check_fname(fname, must_exist=True, overwrite="read") + f, tree, _ = fiff_open(fname) + with f as fid: + info = read_meas_info(fid, tree)[0] + return info + + +def read_bad_channels(fid, node): + """Read bad channels. + + Parameters + ---------- + fid : file + The file descriptor. + node : dict + The node of the FIF tree that contains info on the bad channels. + + Returns + ------- + bads : list + A list of bad channel's names. + """ + return _read_bad_channels(fid, node) + + +def _read_bad_channels(fid, node, ch_names_mapping): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + nodes = dir_tree_find(node, FIFF.FIFFB_MNE_BAD_CHANNELS) + + bads = [] + if len(nodes) > 0: + for node in nodes: + tag = find_tag(fid, node, FIFF.FIFF_MNE_CH_NAME_LIST) + if tag is not None and tag.data is not None: + bads = _safe_name_list(tag.data, "read", "bads") + bads[:] = _rename_list(bads, ch_names_mapping) + return bads + + +def _write_bad_channels(fid, bads, ch_names_mapping): + if bads is not None and len(bads) > 0: + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + bads = _rename_list(bads, ch_names_mapping) + start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + write_name_list_sanitized(fid, FIFF.FIFF_MNE_CH_NAME_LIST, bads, "bads") + end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS) + + +@verbose +def read_meas_info(fid, tree, clean_bads=False, verbose=None): + """Read the measurement info. + + Parameters + ---------- + fid : file + Open file descriptor. + tree : tree + FIF tree structure. + clean_bads : bool + If True, clean info['bads'] before running consistency check. + Should only be needed for old files where we did not check bads + before saving. + %(verbose)s + + Returns + ------- + %(info_not_none)s + meas : dict + Node in tree that contains the info. + """ + from ..transforms import Transform, invert_transform + + # Find the desired blocks + meas = dir_tree_find(tree, FIFF.FIFFB_MEAS) + if len(meas) == 0: + raise ValueError("Could not find measurement data") + if len(meas) > 1: + raise ValueError("Cannot read more that 1 measurement data") + meas = meas[0] + + meas_info = dir_tree_find(meas, FIFF.FIFFB_MEAS_INFO) + if len(meas_info) == 0: + raise ValueError("Could not find measurement info") + if len(meas_info) > 1: + raise ValueError("Cannot read more that 1 measurement info") + meas_info = meas_info[0] + + # Read measurement info + dev_head_t = None + ctf_head_t = None + dev_ctf_t = None + meas_date = None + utc_offset = None + highpass = None + lowpass = None + nchan = None + sfreq = None + chs = [] + experimenter = None + description = None + proj_id = None + proj_name = None + line_freq = None + gantry_angle = None + custom_ref_applied = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + xplotter_layout = None + kit_system_id = None + for k in range(meas_info["nent"]): + kind = meas_info["directory"][k].kind + pos = meas_info["directory"][k].pos + if kind == FIFF.FIFF_NCHAN: + tag = read_tag(fid, pos) + nchan = int(tag.data.item()) + elif kind == FIFF.FIFF_SFREQ: + tag = read_tag(fid, pos) + sfreq = float(tag.data.item()) + elif kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + elif kind == FIFF.FIFF_LOWPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data.item()): + lowpass = float(tag.data.item()) + elif kind == FIFF.FIFF_HIGHPASS: + tag = read_tag(fid, pos) + if not np.isnan(tag.data): + highpass = float(tag.data.item()) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + meas_date = tuple(tag.data) + if len(meas_date) == 1: # can happen from old C conversions + meas_date = (meas_date[0], 0) + elif kind == FIFF.FIFF_UTC_OFFSET: + tag = read_tag(fid, pos) + utc_offset = str(tag.data) + elif kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + + if ( + cand["from"] == FIFF.FIFFV_COORD_DEVICE + and cand["to"] == FIFF.FIFFV_COORD_HEAD + ): + dev_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_COORD_HEAD + and cand["to"] == FIFF.FIFFV_COORD_DEVICE + ): + # this reversal can happen with BabyMEG data + dev_head_t = invert_transform(cand) + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + and cand["to"] == FIFF.FIFFV_COORD_HEAD + ): + ctf_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE + and cand["to"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + ): + dev_ctf_t = cand + elif kind == FIFF.FIFF_EXPERIMENTER: + tag = read_tag(fid, pos) + experimenter = tag.data + elif kind == FIFF.FIFF_DESCRIPTION: + tag = read_tag(fid, pos) + description = tag.data + elif kind == FIFF.FIFF_PROJ_ID: + tag = read_tag(fid, pos) + proj_id = tag.data + elif kind == FIFF.FIFF_PROJ_NAME: + tag = read_tag(fid, pos) + proj_name = tag.data + elif kind == FIFF.FIFF_LINE_FREQ: + tag = read_tag(fid, pos) + line_freq = float(tag.data.item()) + elif kind == FIFF.FIFF_GANTRY_ANGLE: + tag = read_tag(fid, pos) + gantry_angle = float(tag.data.item()) + elif kind in [FIFF.FIFF_MNE_CUSTOM_REF, 236]: # 236 used before v0.11 + tag = read_tag(fid, pos) + custom_ref_applied = int(tag.data.item()) + elif kind == FIFF.FIFF_XPLOTTER_LAYOUT: + tag = read_tag(fid, pos) + xplotter_layout = str(tag.data) + elif kind == FIFF.FIFF_MNE_KIT_SYSTEM_ID: + tag = read_tag(fid, pos) + kit_system_id = int(tag.data.item()) + ch_names_mapping = _read_extended_ch_info(chs, meas_info, fid) + + # Check that we have everything we need + if nchan is None: + raise ValueError("Number of channels is not defined") + + if sfreq is None: + raise ValueError("Sampling frequency is not defined") + + if len(chs) == 0: + raise ValueError("Channel information not defined") + + if len(chs) != nchan: + raise ValueError("Incorrect number of channel definitions found") + + if dev_head_t is None or ctf_head_t is None: + hpi_result = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + if len(hpi_result) == 1: + hpi_result = hpi_result[0] + for k in range(hpi_result["nent"]): + kind = hpi_result["directory"][k].kind + pos = hpi_result["directory"][k].pos + if kind == FIFF.FIFF_COORD_TRANS: + tag = read_tag(fid, pos) + cand = tag.data + if ( + cand["from"] == FIFF.FIFFV_COORD_DEVICE + and cand["to"] == FIFF.FIFFV_COORD_HEAD + and dev_head_t is None + ): + dev_head_t = cand + elif ( + cand["from"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD + and cand["to"] == FIFF.FIFFV_COORD_HEAD + and ctf_head_t is None + ): + ctf_head_t = cand + + # Locate the Polhemus data + dig = _read_dig_fif(fid, meas_info) + + # Locate the acquisition information + acqpars = dir_tree_find(meas_info, FIFF.FIFFB_DACQ_PARS) + acq_pars = None + acq_stim = None + if len(acqpars) == 1: + acqpars = acqpars[0] + for k in range(acqpars["nent"]): + kind = acqpars["directory"][k].kind + pos = acqpars["directory"][k].pos + if kind == FIFF.FIFF_DACQ_PARS: + tag = read_tag(fid, pos) + acq_pars = tag.data + elif kind == FIFF.FIFF_DACQ_STIM: + tag = read_tag(fid, pos) + acq_stim = tag.data + + # Load the SSP data + projs = _read_proj(fid, meas_info, ch_names_mapping=ch_names_mapping) + + # Load the CTF compensation data + comps = _read_ctf_comp(fid, meas_info, chs, ch_names_mapping=ch_names_mapping) + + # Load the bad channel list + bads = _read_bad_channels(fid, meas_info, ch_names_mapping=ch_names_mapping) + + # + # Put the data together + # + info = Info(file_id=tree["id"]) + info._unlocked = True + + # Locate events list + events = dir_tree_find(meas_info, FIFF.FIFFB_EVENTS) + evs = list() + for event in events: + ev = dict() + for k in range(event["nent"]): + kind = event["directory"][k].kind + pos = event["directory"][k].pos + if kind == FIFF.FIFF_EVENT_CHANNELS: + ev["channels"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_EVENT_LIST: + ev["list"] = read_tag(fid, pos).data + evs.append(ev) + info["events"] = evs + + # Locate HPI result + hpi_results = dir_tree_find(meas_info, FIFF.FIFFB_HPI_RESULT) + hrs = list() + for hpi_result in hpi_results: + hr = dict() + hr["dig_points"] = [] + for k in range(hpi_result["nent"]): + kind = hpi_result["directory"][k].kind + pos = hpi_result["directory"][k].pos + if kind == FIFF.FIFF_DIG_POINT: + hr["dig_points"].append(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_HPI_DIGITIZATION_ORDER: + hr["order"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COILS_USED: + hr["used"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_COIL_MOMENTS: + hr["moments"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOODNESS: + hr["goodness"] = read_tag(fid, pos).data + elif kind == FIFF.FIFF_HPI_FIT_GOOD_LIMIT: + hr["good_limit"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_FIT_DIST_LIMIT: + hr["dist_limit"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_FIT_ACCEPT: + hr["accept"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_COORD_TRANS: + hr["coord_trans"] = read_tag(fid, pos).data + hrs.append(hr) + info["hpi_results"] = hrs + + # Locate HPI Measurement + hpi_meass = dir_tree_find(meas_info, FIFF.FIFFB_HPI_MEAS) + hms = list() + for hpi_meas in hpi_meass: + hm = dict() + for k in range(hpi_meas["nent"]): + kind = hpi_meas["directory"][k].kind + pos = hpi_meas["directory"][k].pos + if kind == FIFF.FIFF_CREATOR: + hm["creator"] = str(read_tag(fid, pos).data) + elif kind == FIFF.FIFF_SFREQ: + hm["sfreq"] = float(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_NCHAN: + hm["nchan"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_NAVE: + hm["nave"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_HPI_NCOIL: + hm["ncoil"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_FIRST_SAMPLE: + hm["first_samp"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_LAST_SAMPLE: + hm["last_samp"] = int(read_tag(fid, pos).data.item()) + hpi_coils = dir_tree_find(hpi_meas, FIFF.FIFFB_HPI_COIL) + hcs = [] + for hpi_coil in hpi_coils: + hc = dict() + for k in range(hpi_coil["nent"]): + kind = hpi_coil["directory"][k].kind + pos = hpi_coil["directory"][k].pos + if kind == FIFF.FIFF_HPI_COIL_NO: + hc["number"] = int(read_tag(fid, pos).data.item()) + elif kind == FIFF.FIFF_EPOCH: + hc["epoch"] = read_tag(fid, pos).data + hc["epoch"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_SLOPES: + hc["slopes"] = read_tag(fid, pos).data + hc["slopes"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_CORR_COEFF: + hc["corr_coeff"] = read_tag(fid, pos).data + hc["corr_coeff"].flags.writeable = False + elif kind == FIFF.FIFF_HPI_COIL_FREQ: + hc["coil_freq"] = float(read_tag(fid, pos).data.item()) + hcs.append(hc) + hm["hpi_coils"] = hcs + hms.append(hm) + info["hpi_meas"] = hms + del hms + + subject_info = dir_tree_find(meas_info, FIFF.FIFFB_SUBJECT) + si = None + if len(subject_info) == 1: + subject_info = subject_info[0] + si = dict() + for k in range(subject_info["nent"]): + kind = subject_info["directory"][k].kind + pos = subject_info["directory"][k].pos + if kind == FIFF.FIFF_SUBJ_ID: + tag = read_tag(fid, pos) + si["id"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HIS_ID: + tag = read_tag(fid, pos) + si["his_id"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_LAST_NAME: + tag = read_tag(fid, pos) + si["last_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_FIRST_NAME: + tag = read_tag(fid, pos) + si["first_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_MIDDLE_NAME: + tag = read_tag(fid, pos) + si["middle_name"] = str(tag.data) + elif kind == FIFF.FIFF_SUBJ_BIRTH_DAY: + try: + tag = read_tag(fid, pos) + except OverflowError: + warn( + "Encountered an error while trying to read the " + "birthday from the input data. No birthday will be " + "set. Please check the integrity of the birthday " + "information in the input data." + ) + continue + si["birthday"] = tag.data + elif kind == FIFF.FIFF_SUBJ_SEX: + tag = read_tag(fid, pos) + si["sex"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HAND: + tag = read_tag(fid, pos) + si["hand"] = int(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_WEIGHT: + tag = read_tag(fid, pos) + si["weight"] = float(tag.data.item()) + elif kind == FIFF.FIFF_SUBJ_HEIGHT: + tag = read_tag(fid, pos) + si["height"] = float(tag.data.item()) + info["subject_info"] = si + del si + + device_info = dir_tree_find(meas_info, FIFF.FIFFB_DEVICE) + di = None + if len(device_info) == 1: + device_info = device_info[0] + di = dict() + for k in range(device_info["nent"]): + kind = device_info["directory"][k].kind + pos = device_info["directory"][k].pos + if kind == FIFF.FIFF_DEVICE_TYPE: + tag = read_tag(fid, pos) + di["type"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_MODEL: + tag = read_tag(fid, pos) + di["model"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SERIAL: + tag = read_tag(fid, pos) + di["serial"] = str(tag.data) + elif kind == FIFF.FIFF_DEVICE_SITE: + tag = read_tag(fid, pos) + di["site"] = str(tag.data) + info["device_info"] = di + del di + + helium_info = dir_tree_find(meas_info, FIFF.FIFFB_HELIUM) + hi = None + if len(helium_info) == 1: + helium_info = helium_info[0] + hi = dict() + for k in range(helium_info["nent"]): + kind = helium_info["directory"][k].kind + pos = helium_info["directory"][k].pos + if kind == FIFF.FIFF_HE_LEVEL_RAW: + tag = read_tag(fid, pos) + hi["he_level_raw"] = float(tag.data.item()) + elif kind == FIFF.FIFF_HELIUM_LEVEL: + tag = read_tag(fid, pos) + hi["helium_level"] = float(tag.data.item()) + elif kind == FIFF.FIFF_ORIG_FILE_GUID: + tag = read_tag(fid, pos) + hi["orig_file_guid"] = str(tag.data) + elif kind == FIFF.FIFF_MEAS_DATE: + tag = read_tag(fid, pos) + hi["meas_date"] = _ensure_meas_date_none_or_dt( + tuple(int(t) for t in tag.data), + ) + info["helium_info"] = hi + del hi + + hpi_subsystem = dir_tree_find(meas_info, FIFF.FIFFB_HPI_SUBSYSTEM) + hs = None + if len(hpi_subsystem) == 1: + hpi_subsystem = hpi_subsystem[0] + hs = dict() + for k in range(hpi_subsystem["nent"]): + kind = hpi_subsystem["directory"][k].kind + pos = hpi_subsystem["directory"][k].pos + if kind == FIFF.FIFF_HPI_NCOIL: + tag = read_tag(fid, pos) + hs["ncoil"] = int(tag.data.item()) + elif kind == FIFF.FIFF_EVENT_CHANNEL: + tag = read_tag(fid, pos) + hs["event_channel"] = str(tag.data) + hpi_coils = dir_tree_find(hpi_subsystem, FIFF.FIFFB_HPI_COIL) + hc = [] + for coil in hpi_coils: + this_coil = dict() + for j in range(coil["nent"]): + kind = coil["directory"][j].kind + pos = coil["directory"][j].pos + if kind == FIFF.FIFF_EVENT_BITS: + tag = read_tag(fid, pos) + this_coil["event_bits"] = np.array(tag.data) + hc.append(this_coil) + hs["hpi_coils"] = hc + info["hpi_subsystem"] = hs + + # Read processing history + info["proc_history"] = _read_proc_history(fid, tree) + + # Make the most appropriate selection for the measurement id + if meas_info["parent_id"] is None: + if meas_info["id"] is None: + if meas["id"] is None: + if meas["parent_id"] is None: + info["meas_id"] = info["file_id"] + else: + info["meas_id"] = meas["parent_id"] + else: + info["meas_id"] = meas["id"] + else: + info["meas_id"] = meas_info["id"] + else: + info["meas_id"] = meas_info["parent_id"] + info["experimenter"] = experimenter + info["description"] = description + info["proj_id"] = proj_id + info["proj_name"] = proj_name + if meas_date is None: + meas_date = (info["meas_id"]["secs"], info["meas_id"]["usecs"]) + info["meas_date"] = _ensure_meas_date_none_or_dt(meas_date) + info["utc_offset"] = utc_offset + + info["sfreq"] = sfreq + info["highpass"] = highpass if highpass is not None else 0.0 + info["lowpass"] = lowpass if lowpass is not None else info["sfreq"] / 2.0 + info["line_freq"] = line_freq + info["gantry_angle"] = gantry_angle + + # Add the channel information and make a list of channel names + # for convenience + info["chs"] = chs + + # + # Add the coordinate transformations + # + info["dev_head_t"] = dev_head_t + info["ctf_head_t"] = ctf_head_t + info["dev_ctf_t"] = dev_ctf_t + if dev_head_t is not None and ctf_head_t is not None and dev_ctf_t is None: + head_ctf_trans = np.linalg.inv(ctf_head_t["trans"]) + dev_ctf_trans = np.dot(head_ctf_trans, info["dev_head_t"]["trans"]) + info["dev_ctf_t"] = Transform("meg", "ctf_head", dev_ctf_trans) + + # All kinds of auxliary stuff + info["dig"] = _format_dig_points(dig) + info["bads"] = bads + info._update_redundant() + if clean_bads: + info["bads"] = [b for b in bads if b in info["ch_names"]] + info["projs"] = projs + info["comps"] = comps + info["acq_pars"] = acq_pars + info["acq_stim"] = acq_stim + info["custom_ref_applied"] = custom_ref_applied + info["xplotter_layout"] = xplotter_layout + info["kit_system_id"] = kit_system_id + info._check_consistency() + info._unlocked = False + return info, meas + + +def _read_extended_ch_info(chs, parent, fid): + ch_infos = dir_tree_find(parent, FIFF.FIFFB_CH_INFO) + if len(ch_infos) == 0: + return + _check_option("length of channel infos", len(ch_infos), [len(chs)]) + logger.info(" Reading extended channel information") + + # Here we assume that ``remap`` is in the same order as the channels + # themselves, which is hopefully safe enough. + ch_names_mapping = dict() + for new, ch in zip(ch_infos, chs): + for k in range(new["nent"]): + kind = new["directory"][k].kind + try: + key, cast = _CH_READ_MAP[kind] + except KeyError: + # This shouldn't happen if we're up to date with the FIFF + # spec + warn(f"Discarding extra channel information kind {kind}") + continue + assert key in ch + data = read_tag(fid, new["directory"][k].pos).data + if data is not None: + data = cast(data) + if key == "ch_name": + ch_names_mapping[ch[key]] = data + ch[key] = data + _update_ch_info_named(ch) + # we need to return ch_names_mapping so that we can also rename the + # bad channels + return ch_names_mapping + + +def _rename_comps(comps, ch_names_mapping): + if not (comps and ch_names_mapping): + return + for comp in comps: + data = comp["data"] + for key in ("row_names", "col_names"): + data[key][:] = _rename_list(data[key], ch_names_mapping) + + +def _ensure_meas_date_none_or_dt(meas_date): + if meas_date is None or np.array_equal(meas_date, DATE_NONE): + meas_date = None + elif not isinstance(meas_date, datetime.datetime): + meas_date = _stamp_to_dt(meas_date) + return meas_date + + +def _check_dates(info, prepend_error=""): + """Check dates before writing as fif files. + + It's needed because of the limited integer precision + of the fix standard. + """ + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + for key_2 in ("secs", "usecs"): + if ( + value[key_2] < np.iinfo(">i4").min + or value[key_2] > np.iinfo(">i4").max + ): + raise RuntimeError( + f"{prepend_error}info[{key}][{key_2}] must be between " + f'"{np.iinfo(">i4").min!r}" and "{np.iinfo(">i4").max!r}", got ' + f'"{value[key_2]!r}"' + ) + + meas_date = info.get("meas_date") + if meas_date is None: + return + + meas_date_stamp = _dt_to_stamp(meas_date) + if ( + meas_date_stamp[0] < np.iinfo(">i4").min + or meas_date_stamp[0] > np.iinfo(">i4").max + ): + raise RuntimeError( + f'{prepend_error}info["meas_date"] seconds must be between ' + f'"{(np.iinfo(">i4").min, 0)!r}" and "{(np.iinfo(">i4").max, 0)!r}", got ' + f'"{meas_date_stamp[0]!r}"' + ) + + +@fill_doc +def write_meas_info(fid, info, data_type=None, reset_range=True): + """Write measurement info into a file id (from a fif file). + + Parameters + ---------- + fid : file + Open file descriptor. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + + Notes + ----- + Tags are written in a particular order for compatibility with maxfilter. + """ + info._check_consistency() + _check_dates(info) + + # Measurement info + start_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Add measurement id + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) + + for event in info["events"]: + start_block(fid, FIFF.FIFFB_EVENTS) + if event.get("channels") is not None: + write_int(fid, FIFF.FIFF_EVENT_CHANNELS, event["channels"]) + if event.get("list") is not None: + write_int(fid, FIFF.FIFF_EVENT_LIST, event["list"]) + end_block(fid, FIFF.FIFFB_EVENTS) + + # HPI Result + for hpi_result in info["hpi_results"]: + start_block(fid, FIFF.FIFFB_HPI_RESULT) + write_dig_points(fid, hpi_result["dig_points"]) + if "order" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_DIGITIZATION_ORDER, hpi_result["order"]) + if "used" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_COILS_USED, hpi_result["used"]) + if "moments" in hpi_result: + write_float_matrix(fid, FIFF.FIFF_HPI_COIL_MOMENTS, hpi_result["moments"]) + if "goodness" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOODNESS, hpi_result["goodness"]) + if "good_limit" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_GOOD_LIMIT, hpi_result["good_limit"]) + if "dist_limit" in hpi_result: + write_float(fid, FIFF.FIFF_HPI_FIT_DIST_LIMIT, hpi_result["dist_limit"]) + if "accept" in hpi_result: + write_int(fid, FIFF.FIFF_HPI_FIT_ACCEPT, hpi_result["accept"]) + if "coord_trans" in hpi_result: + write_coord_trans(fid, hpi_result["coord_trans"]) + end_block(fid, FIFF.FIFFB_HPI_RESULT) + + # HPI Measurement + for hpi_meas in info["hpi_meas"]: + start_block(fid, FIFF.FIFFB_HPI_MEAS) + if hpi_meas.get("creator") is not None: + write_string(fid, FIFF.FIFF_CREATOR, hpi_meas["creator"]) + if hpi_meas.get("sfreq") is not None: + write_float(fid, FIFF.FIFF_SFREQ, hpi_meas["sfreq"]) + if hpi_meas.get("nchan") is not None: + write_int(fid, FIFF.FIFF_NCHAN, hpi_meas["nchan"]) + if hpi_meas.get("nave") is not None: + write_int(fid, FIFF.FIFF_NAVE, hpi_meas["nave"]) + if hpi_meas.get("ncoil") is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hpi_meas["ncoil"]) + if hpi_meas.get("first_samp") is not None: + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, hpi_meas["first_samp"]) + if hpi_meas.get("last_samp") is not None: + write_int(fid, FIFF.FIFF_LAST_SAMPLE, hpi_meas["last_samp"]) + for hpi_coil in hpi_meas["hpi_coils"]: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if hpi_coil.get("number") is not None: + write_int(fid, FIFF.FIFF_HPI_COIL_NO, hpi_coil["number"]) + if hpi_coil.get("epoch") is not None: + write_float_matrix(fid, FIFF.FIFF_EPOCH, hpi_coil["epoch"]) + if hpi_coil.get("slopes") is not None: + write_float(fid, FIFF.FIFF_HPI_SLOPES, hpi_coil["slopes"]) + if hpi_coil.get("corr_coeff") is not None: + write_float(fid, FIFF.FIFF_HPI_CORR_COEFF, hpi_coil["corr_coeff"]) + if hpi_coil.get("coil_freq") is not None: + write_float(fid, FIFF.FIFF_HPI_COIL_FREQ, hpi_coil["coil_freq"]) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_MEAS) + + # Polhemus data + write_dig_points(fid, info["dig"], block=True) + + # megacq parameters + if info["acq_pars"] is not None or info["acq_stim"] is not None: + start_block(fid, FIFF.FIFFB_DACQ_PARS) + if info["acq_pars"] is not None: + write_string(fid, FIFF.FIFF_DACQ_PARS, info["acq_pars"]) + + if info["acq_stim"] is not None: + write_string(fid, FIFF.FIFF_DACQ_STIM, info["acq_stim"]) + + end_block(fid, FIFF.FIFFB_DACQ_PARS) + + # Coordinate transformations if the HPI result block was not there + if info["dev_head_t"] is not None: + write_coord_trans(fid, info["dev_head_t"]) + + if info["ctf_head_t"] is not None: + write_coord_trans(fid, info["ctf_head_t"]) + + if info["dev_ctf_t"] is not None: + write_coord_trans(fid, info["dev_ctf_t"]) + + # Projectors + ch_names_mapping = _make_ch_names_mapping(info["chs"]) + _write_proj(fid, info["projs"], ch_names_mapping=ch_names_mapping) + + # Bad channels + _write_bad_channels(fid, info["bads"], ch_names_mapping=ch_names_mapping) + + # General + if info.get("experimenter") is not None: + write_string(fid, FIFF.FIFF_EXPERIMENTER, info["experimenter"]) + if info.get("description") is not None: + write_string(fid, FIFF.FIFF_DESCRIPTION, info["description"]) + if info.get("proj_id") is not None: + write_int(fid, FIFF.FIFF_PROJ_ID, info["proj_id"]) + if info.get("proj_name") is not None: + write_string(fid, FIFF.FIFF_PROJ_NAME, info["proj_name"]) + if info.get("meas_date") is not None: + write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(info["meas_date"])) + if info.get("utc_offset") is not None: + write_string(fid, FIFF.FIFF_UTC_OFFSET, info["utc_offset"]) + write_int(fid, FIFF.FIFF_NCHAN, info["nchan"]) + write_float(fid, FIFF.FIFF_SFREQ, info["sfreq"]) + if info["lowpass"] is not None: + write_float(fid, FIFF.FIFF_LOWPASS, info["lowpass"]) + if info["highpass"] is not None: + write_float(fid, FIFF.FIFF_HIGHPASS, info["highpass"]) + if info.get("line_freq") is not None: + write_float(fid, FIFF.FIFF_LINE_FREQ, info["line_freq"]) + if info.get("gantry_angle") is not None: + write_float(fid, FIFF.FIFF_GANTRY_ANGLE, info["gantry_angle"]) + if data_type is not None: + write_int(fid, FIFF.FIFF_DATA_PACK, data_type) + if info.get("custom_ref_applied"): + write_int(fid, FIFF.FIFF_MNE_CUSTOM_REF, info["custom_ref_applied"]) + if info.get("xplotter_layout"): + write_string(fid, FIFF.FIFF_XPLOTTER_LAYOUT, info["xplotter_layout"]) + + # Channel information + _write_ch_infos(fid, info["chs"], reset_range, ch_names_mapping) + + # Subject information + if info.get("subject_info") is not None: + start_block(fid, FIFF.FIFFB_SUBJECT) + si = info["subject_info"] + if si.get("id") is not None: + write_int(fid, FIFF.FIFF_SUBJ_ID, si["id"]) + if si.get("his_id") is not None: + write_string(fid, FIFF.FIFF_SUBJ_HIS_ID, si["his_id"]) + if si.get("last_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_LAST_NAME, si["last_name"]) + if si.get("first_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_FIRST_NAME, si["first_name"]) + if si.get("middle_name") is not None: + write_string(fid, FIFF.FIFF_SUBJ_MIDDLE_NAME, si["middle_name"]) + if si.get("birthday") is not None: + write_julian(fid, FIFF.FIFF_SUBJ_BIRTH_DAY, si["birthday"]) + if si.get("sex") is not None: + write_int(fid, FIFF.FIFF_SUBJ_SEX, si["sex"]) + if si.get("hand") is not None: + write_int(fid, FIFF.FIFF_SUBJ_HAND, si["hand"]) + if si.get("weight") is not None: + write_float(fid, FIFF.FIFF_SUBJ_WEIGHT, si["weight"]) + if si.get("height") is not None: + write_float(fid, FIFF.FIFF_SUBJ_HEIGHT, si["height"]) + end_block(fid, FIFF.FIFFB_SUBJECT) + del si + + if info.get("device_info") is not None: + start_block(fid, FIFF.FIFFB_DEVICE) + di = info["device_info"] + if di.get("type") is not None: + write_string(fid, FIFF.FIFF_DEVICE_TYPE, di["type"]) + for key in ("model", "serial", "site"): + if di.get(key) is not None: + write_string(fid, getattr(FIFF, "FIFF_DEVICE_" + key.upper()), di[key]) + end_block(fid, FIFF.FIFFB_DEVICE) + del di + + if info.get("helium_info") is not None: + start_block(fid, FIFF.FIFFB_HELIUM) + hi = info["helium_info"] + if hi.get("he_level_raw") is not None: + write_float(fid, FIFF.FIFF_HE_LEVEL_RAW, hi["he_level_raw"]) + if hi.get("helium_level") is not None: + write_float(fid, FIFF.FIFF_HELIUM_LEVEL, hi["helium_level"]) + if hi.get("orig_file_guid") is not None: + write_string(fid, FIFF.FIFF_ORIG_FILE_GUID, hi["orig_file_guid"]) + write_int(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(hi["meas_date"])) + end_block(fid, FIFF.FIFFB_HELIUM) + del hi + + if info.get("hpi_subsystem") is not None: + hs = info["hpi_subsystem"] + start_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + if hs.get("ncoil") is not None: + write_int(fid, FIFF.FIFF_HPI_NCOIL, hs["ncoil"]) + if hs.get("event_channel") is not None: + write_string(fid, FIFF.FIFF_EVENT_CHANNEL, hs["event_channel"]) + if hs.get("hpi_coils") is not None: + for coil in hs["hpi_coils"]: + start_block(fid, FIFF.FIFFB_HPI_COIL) + if coil.get("event_bits") is not None: + write_int(fid, FIFF.FIFF_EVENT_BITS, coil["event_bits"]) + end_block(fid, FIFF.FIFFB_HPI_COIL) + end_block(fid, FIFF.FIFFB_HPI_SUBSYSTEM) + del hs + + # CTF compensation info + comps = info["comps"] + if ch_names_mapping: + comps = deepcopy(comps) + _rename_comps(comps, ch_names_mapping) + write_ctf_comp(fid, comps) + + # KIT system ID + if info.get("kit_system_id") is not None: + write_int(fid, FIFF.FIFF_MNE_KIT_SYSTEM_ID, info["kit_system_id"]) + + end_block(fid, FIFF.FIFFB_MEAS_INFO) + + # Processing history + _write_proc_history(fid, info) + + +@fill_doc +def write_info(fname, info, data_type=None, reset_range=True): + """Write measurement info in fif file. + + Parameters + ---------- + fname : path-like + The name of the file. Should end by ``-info.fif``. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), or 16 (FIFFT_DAU_PACK16) for + raw data. + reset_range : bool + If True, info['chs'][k]['range'] will be set to unity. + """ + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_MEAS) + write_meas_info(fid, info, data_type, reset_range) + end_block(fid, FIFF.FIFFB_MEAS) + + +@verbose +def _merge_info_values(infos, key, verbose=None): + """Merge things together. + + Fork for {'dict', 'list', 'array', 'other'} + and consider cases where one or all are of the same type. + + Does special things for "projs", "bads", and "meas_date". + """ + values = [d[key] for d in infos] + msg = ( + f"Don't know how to merge '{key}'. Make sure values are compatible, got types:" + f"\n {[type(v) for v in values]}" + ) + + def _flatten(lists): + return [item for sublist in lists for item in sublist] + + def _check_isinstance(values, kind, func): + return func([isinstance(v, kind) for v in values]) + + def _where_isinstance(values, kind): + """Get indices of instances.""" + return np.where([isinstance(v, type) for v in values])[0] + + # list + if _check_isinstance(values, list, all): + lists = (d[key] for d in infos) + if key == "projs": + return _uniquify_projs(_flatten(lists)) + elif key == "bads": + return sorted(set(_flatten(lists))) + else: + return _flatten(lists) + elif _check_isinstance(values, list, any): + idx = _where_isinstance(values, list) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + lists = (d[key] for d in infos if isinstance(d[key], list)) + return _flatten(lists) + # dict + elif _check_isinstance(values, dict, all): + is_qual = all(object_diff(values[0], v) == "" for v in values[1:]) + if is_qual: + return values[0] + else: + RuntimeError(msg) + elif _check_isinstance(values, dict, any): + idx = _where_isinstance(values, dict) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # ndarray + elif _check_isinstance(values, np.ndarray, all) or _check_isinstance( + values, tuple, all + ): + is_qual = all(np.array_equal(values[0], x) for x in values[1:]) + if is_qual: + return values[0] + elif key == "meas_date": + logger.info(f"Found multiple entries for {key}. Setting value to `None`") + return None + else: + raise RuntimeError(msg) + elif _check_isinstance(values, (np.ndarray, tuple), any): + idx = _where_isinstance(values, np.ndarray) + if len(idx) == 1: + return values[int(idx)] + elif len(idx) > 1: + raise RuntimeError(msg) + # other + else: + unique_values = set(values) + if len(unique_values) == 1: + return list(values)[0] + elif isinstance(list(unique_values)[0], BytesIO): + logger.info("Found multiple StringIO instances. Setting value to `None`") + return None + elif isinstance(list(unique_values)[0], str): + logger.info("Found multiple filenames. Setting value to `None`") + return None + else: + raise RuntimeError(msg) + + +@verbose +def _merge_info(infos, force_update_to_first=False, verbose=None): + """Merge multiple measurement info dictionaries. + + - Fields that are present in only one info object will be used in the + merged info. + - Fields that are present in multiple info objects and are the same + will be used in the merged info. + - Fields that are present in multiple info objects and are different + will result in a None value in the merged info. + - Channels will be concatenated. If multiple info objects contain + channels with the same name, an exception is raised. + + Parameters + ---------- + infos | list of instance of Info + Info objects to merge into one info object. + force_update_to_first : bool + If True, force the fields for objects in `info` will be updated + to match those in the first item. Use at your own risk, as this + may overwrite important metadata. + %(verbose)s + + Returns + ------- + info : instance of Info + The merged info object. + """ + for info in infos: + info._check_consistency() + if force_update_to_first is True: + infos = deepcopy(infos) + _force_update_info(infos[0], infos[1:]) + info = Info() + info._unlocked = True + info["chs"] = [] + for this_info in infos: + info["chs"].extend(this_info["chs"]) + info._update_redundant() + duplicates = {ch for ch in info["ch_names"] if info["ch_names"].count(ch) > 1} + if len(duplicates) > 0: + msg = ( + "The following channels are present in more than one input " + f"measurement info objects: {list(duplicates)}" + ) + raise ValueError(msg) + + transforms = ["ctf_head_t", "dev_head_t", "dev_ctf_t"] + for trans_name in transforms: + trans = [i[trans_name] for i in infos if i[trans_name]] + if len(trans) == 0: + info[trans_name] = None + elif len(trans) == 1: + info[trans_name] = trans[0] + elif all( + np.all(trans[0]["trans"] == x["trans"]) + and trans[0]["from"] == x["from"] + and trans[0]["to"] == x["to"] + for x in trans[1:] + ): + info[trans_name] = trans[0] + else: + msg = f"Measurement infos provide mutually inconsistent {trans_name}" + raise ValueError(msg) + + # KIT system-IDs + kit_sys_ids = [i["kit_system_id"] for i in infos if i["kit_system_id"]] + if len(kit_sys_ids) == 0: + info["kit_system_id"] = None + elif len(set(kit_sys_ids)) == 1: + info["kit_system_id"] = kit_sys_ids[0] + else: + raise ValueError("Trying to merge channels from different KIT systems") + + # hpi infos and digitization data: + fields = ["hpi_results", "hpi_meas", "dig"] + for k in fields: + values = [i[k] for i in infos if i[k]] + if len(values) == 0: + info[k] = [] + elif len(values) == 1: + info[k] = values[0] + elif all(object_diff(values[0], v) == "" for v in values[1:]): + info[k] = values[0] + else: + msg = f"Measurement infos are inconsistent for {k}" + raise ValueError(msg) + + # other fields + other_fields = [ + "acq_pars", + "acq_stim", + "bads", + "comps", + "custom_ref_applied", + "description", + "experimenter", + "file_id", + "highpass", + "utc_offset", + "hpi_subsystem", + "events", + "device_info", + "helium_info", + "line_freq", + "lowpass", + "meas_id", + "proj_id", + "proj_name", + "projs", + "sfreq", + "gantry_angle", + "subject_info", + "sfreq", + "xplotter_layout", + "proc_history", + ] + + for k in other_fields: + info[k] = _merge_info_values(infos, k) + + info["meas_date"] = infos[0]["meas_date"] + info._unlocked = False + + return info + + +@verbose +def create_info(ch_names, sfreq, ch_types="misc", verbose=None): + """Create a basic Info instance suitable for use with create_raw. + + Parameters + ---------- + ch_names : list of str | int + Channel names. If an int, a list of channel names will be created + from ``range(ch_names)``. + sfreq : float + Sample rate of the data. + ch_types : list of str | str + Channel types, default is ``'misc'`` which is a + :term:`non-data channel `. + Currently supported fields are 'bio', 'chpi', 'csd', 'dbs', 'dipole', + 'ecg', 'ecog', 'eeg', 'emg', 'eog', 'exci', 'eyegaze', + 'fnirs_cw_amplitude', 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', + 'fnirs_od', 'gof', 'gsr', 'hbo', 'hbr', 'ias', 'misc', 'pupil', + 'ref_meg', 'resp', 'seeg', 'stim', 'syst', 'temperature' (see also + :term:`sensor types`). + If str, then all channels are assumed to be of the same type. + %(verbose)s + + Returns + ------- + %(info_not_none)s + + Notes + ----- + The info dictionary will be sparsely populated to enable functionality + within the rest of the package. Advanced functionality such as source + localization can only be obtained through substantial, proper + modifications of the info structure (not recommended). + + Note that the MEG device-to-head transform ``info['dev_head_t']`` will + be initialized to the identity transform. + + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog, resp, fnirs_fd_ac_amplitude, + fnirs_cw_amplitude, fnirs_od + * T: mag, chpi, ref_meg + * T/m: grad + * M: hbo, hbr + * rad: fnirs_fd_phase + * Am: dipole + * S: gsr + * C: temperature + * V/m²: csd + * GOF: gof + * AU: misc, stim, eyegaze, pupil + """ + try: + ch_names = operator.index(ch_names) # int-like + except TypeError: + pass + else: + ch_names = list(np.arange(ch_names).astype(str)) + _validate_type(ch_names, (list, tuple), "ch_names", ("list, tuple, or int")) + sfreq = float(sfreq) + if sfreq <= 0: + raise ValueError("sfreq must be positive") + nchan = len(ch_names) + if isinstance(ch_types, str): + ch_types = [ch_types] * nchan + ch_types = np.atleast_1d(np.array(ch_types, np.str_)) + if ch_types.ndim != 1 or len(ch_types) != nchan: + raise ValueError( + f"ch_types and ch_names must be the same length ({len(ch_types)} != " + f"{nchan}) for ch_types={ch_types}" + ) + info = _empty_info(sfreq) + ch_types_dict = get_channel_type_constants(include_defaults=True) + for ci, (ch_name, ch_type) in enumerate(zip(ch_names, ch_types)): + _validate_type(ch_name, "str", "each entry in ch_names") + _validate_type(ch_type, "str", "each entry in ch_types") + if ch_type not in ch_types_dict: + raise KeyError(f"kind must be one of {list(ch_types_dict)}, not {ch_type}") + this_ch_dict = ch_types_dict[ch_type] + kind = this_ch_dict["kind"] + # handle chpi, where kind is a *list* of FIFF constants: + kind = kind[0] if isinstance(kind, list | tuple) else kind + # mirror what tag.py does here + coord_frame = _ch_coord_dict.get(kind, FIFF.FIFFV_COORD_UNKNOWN) + coil_type = this_ch_dict.get("coil_type", FIFF.FIFFV_COIL_NONE) + unit = this_ch_dict.get("unit", FIFF.FIFF_UNIT_NONE) + chan_info = dict( + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + range=1.0, + cal=1.0, + kind=kind, + coil_type=coil_type, + unit=unit, + coord_frame=coord_frame, + ch_name=str(ch_name), + scanno=ci + 1, + logno=ci + 1, + ) + info["chs"].append(chan_info) + + info._update_redundant() + info._check_consistency() + info._unlocked = False + return info + + +RAW_INFO_FIELDS = ( + "acq_pars", + "acq_stim", + "bads", + "ch_names", + "chs", + "comps", + "ctf_head_t", + "custom_ref_applied", + "description", + "dev_ctf_t", + "dev_head_t", + "dig", + "experimenter", + "events", + "utc_offset", + "device_info", + "file_id", + "highpass", + "hpi_meas", + "hpi_results", + "helium_info", + "hpi_subsystem", + "kit_system_id", + "line_freq", + "lowpass", + "meas_date", + "meas_id", + "nchan", + "proj_id", + "proj_name", + "projs", + "sfreq", + "subject_info", + "xplotter_layout", + "proc_history", + "gantry_angle", +) + + +def _empty_info(sfreq): + """Create an empty info dictionary.""" + from ..transforms import Transform + + _none_keys = ( + "acq_pars", + "acq_stim", + "ctf_head_t", + "description", + "dev_ctf_t", + "dig", + "experimenter", + "utc_offset", + "device_info", + "file_id", + "highpass", + "hpi_subsystem", + "kit_system_id", + "helium_info", + "line_freq", + "lowpass", + "meas_date", + "meas_id", + "proj_id", + "proj_name", + "subject_info", + "xplotter_layout", + "gantry_angle", + ) + _list_keys = ( + "bads", + "chs", + "comps", + "events", + "hpi_meas", + "hpi_results", + "projs", + "proc_history", + ) + info = Info() + info._unlocked = True + for k in _none_keys: + info[k] = None + for k in _list_keys: + info[k] = list() + info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + info["highpass"] = 0.0 + info["sfreq"] = float(sfreq) + info["lowpass"] = info["sfreq"] / 2.0 + info["dev_head_t"] = Transform("meg", "head") + info._update_redundant() + info._check_consistency() + return info + + +def _force_update_info(info_base, info_target): + """Update target info objects with values from info base. + + Note that values in info_target will be overwritten by those in info_base. + This will overwrite all fields except for: 'chs', 'ch_names', 'nchan'. + + Parameters + ---------- + info_base : mne.Info + The Info object you want to use for overwriting values + in target Info objects. + info_target : mne.Info | list of mne.Info + The Info object(s) you wish to overwrite using info_base. These objects + will be modified in-place. + """ + exclude_keys = ["chs", "ch_names", "nchan", "bads"] + info_target = np.atleast_1d(info_target).ravel() + all_infos = np.hstack([info_base, info_target]) + for ii in all_infos: + if not isinstance(ii, Info): + raise ValueError(f"Inputs must be of type Info. Found type {type(ii)}") + for key, val in info_base.items(): + if key in exclude_keys: + continue + for i_targ in info_target: + with i_targ._unlock(): + i_targ[key] = val + + +def _add_timedelta_to_stamp(meas_date_stamp, delta_t): + """Add a timedelta to a meas_date tuple.""" + if meas_date_stamp is not None: + meas_date_stamp = _dt_to_stamp(_stamp_to_dt(meas_date_stamp) + delta_t) + return meas_date_stamp + + +@verbose +def anonymize_info(info, daysback=None, keep_his=False, verbose=None): + """Anonymize measurement information in place. + + .. warning:: If ``info`` is part of an object like + :class:`raw.info `, you should directly use + the method :meth:`raw.anonymize() ` + to ensure that all parts of the data are anonymized and + stay synchronized (e.g., + :class:`raw.annotations `). + + Parameters + ---------- + %(info_not_none)s + %(daysback_anonymize_info)s + %(keep_his_anonymize_info)s + %(verbose)s + + Returns + ------- + info : instance of Info + The anonymized measurement information. + + Notes + ----- + %(anonymize_info_notes)s + """ + _validate_type(info, "info", "self") + + default_anon_dos = datetime.datetime( + 2000, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc + ) + default_str = "mne_anonymize" + default_subject_id = 0 + default_sex = 0 + default_desc = "Anonymized using a time shift to preserve age at acquisition" + + none_meas_date = info["meas_date"] is None + + if none_meas_date: + if daysback is not None: + warn( + 'Input info has "meas_date" set to None. ' + "Removing all information from time/date structures, " + "*NOT* performing any time shifts!" + ) + else: + # compute timeshift delta + if daysback is None: + delta_t = info["meas_date"] - default_anon_dos + else: + delta_t = datetime.timedelta(days=daysback) + with info._unlock(): + info["meas_date"] = info["meas_date"] - delta_t + + # file_id and meas_id + for key in ("file_id", "meas_id"): + value = info.get(key) + if value is not None: + assert "msecs" not in value + if none_meas_date or ((value["secs"], value["usecs"]) == DATE_NONE): + # Don't try to shift backwards in time when no measurement + # date is available or when file_id is already a place holder + tmp = DATE_NONE + else: + tmp = _add_timedelta_to_stamp((value["secs"], value["usecs"]), -delta_t) + value["secs"] = tmp[0] + value["usecs"] = tmp[1] + # The following copy is needed for a test CTF dataset + # otherwise value['machid'][:] = 0 would suffice + _tmp = value["machid"].copy() + _tmp[:] = 0 + value["machid"] = _tmp + + # subject info + subject_info = info.get("subject_info") + if subject_info is not None: + if subject_info.get("id") is not None: + subject_info["id"] = default_subject_id + if keep_his: + logger.info( + "Not fully anonymizing info - keeping his_id, sex, and hand info" + ) + else: + if subject_info.get("his_id") is not None: + subject_info["his_id"] = str(default_subject_id) + if subject_info.get("sex") is not None: + subject_info["sex"] = default_sex + if subject_info.get("hand") is not None: + del subject_info["hand"] # there's no "unknown" setting + + for key in ("last_name", "first_name", "middle_name"): + if subject_info.get(key) is not None: + subject_info[key] = default_str + + # anonymize the subject birthday + if none_meas_date: + subject_info.pop("birthday", None) + elif subject_info.get("birthday") is not None: + subject_info["birthday"] = subject_info["birthday"] - delta_t + + for key in ("weight", "height"): + if subject_info.get(key) is not None: + subject_info[key] = 0 + + info["experimenter"] = default_str + info["description"] = default_desc + with info._unlock(): + if info["proj_id"] is not None: + info["proj_id"] = np.zeros_like(info["proj_id"]) + if info["proj_name"] is not None: + info["proj_name"] = default_str + if info["utc_offset"] is not None: + info["utc_offset"] = None + + proc_hist = info.get("proc_history") + if proc_hist is not None: + for record in proc_hist: + record["block_id"]["machid"][:] = 0 + record["experimenter"] = default_str + if none_meas_date: + record["block_id"]["secs"] = DATE_NONE[0] + record["block_id"]["usecs"] = DATE_NONE[1] + record["date"] = DATE_NONE + else: + this_t0 = (record["block_id"]["secs"], record["block_id"]["usecs"]) + this_t1 = _add_timedelta_to_stamp(this_t0, -delta_t) + record["block_id"]["secs"] = this_t1[0] + record["block_id"]["usecs"] = this_t1[1] + record["date"] = _add_timedelta_to_stamp(record["date"], -delta_t) + + hi = info.get("helium_info") + if hi is not None: + if hi.get("orig_file_guid") is not None: + hi["orig_file_guid"] = default_str + if none_meas_date and hi.get("meas_date") is not None: + hi["meas_date"] = _ensure_meas_date_none_or_dt(DATE_NONE) + elif hi.get("meas_date") is not None: + hi["meas_date"] = hi["meas_date"] - delta_t + + di = info.get("device_info") + if di is not None: + for k in ("serial", "site"): + if di.get(k) is not None: + di[k] = default_str + + err_mesg = ( + "anonymize_info generated an inconsistent info object. Underlying Error:\n" + ) + info._check_consistency(prepend_error=err_mesg) + err_mesg = ( + "anonymize_info generated an inconsistent info object. " + "daysback parameter was too large. " + "Underlying Error:\n" + ) + _check_dates(info, prepend_error=err_mesg) + + return info + + +@fill_doc +def _bad_chans_comp(info, ch_names): + """Check if channel names are consistent with current compensation status. + + Parameters + ---------- + %(info_not_none)s + + ch_names : list of str + The channel names to check. + + Returns + ------- + status : bool + True if compensation is *currently* in use but some compensation + channels are not included in picks + + False if compensation is *currently* not being used + or if compensation is being used and all compensation channels + in info and included in picks. + + missing_ch_names: array-like of str, shape (n_missing,) + The names of compensation channels not included in picks. + Returns [] if no channels are missing. + + """ + if "comps" not in info: + # should this be thought of as a bug? + return False, [] + + # only include compensation channels that would affect selected channels + ch_names_s = set(ch_names) + comp_names = [] + for comp in info["comps"]: + if len(ch_names_s.intersection(comp["data"]["row_names"])) > 0: + comp_names.extend(comp["data"]["col_names"]) + comp_names = sorted(set(comp_names)) + + missing_ch_names = sorted(set(comp_names).difference(ch_names)) + + if get_current_comp(info) != 0 and len(missing_ch_names) > 0: + return True, missing_ch_names + + return False, missing_ch_names + + +_DIG_CAST = dict(kind=int, ident=int, r=lambda x: x, coord_frame=int) +# key -> const, cast, write +_CH_INFO_MAP = OrderedDict( + scanno=(FIFF.FIFF_CH_SCAN_NO, _int_item, write_int), + logno=(FIFF.FIFF_CH_LOGICAL_NO, _int_item, write_int), + kind=(FIFF.FIFF_CH_KIND, _int_item, write_int), + range=(FIFF.FIFF_CH_RANGE, _float_item, write_float), + cal=(FIFF.FIFF_CH_CAL, _float_item, write_float), + coil_type=(FIFF.FIFF_CH_COIL_TYPE, _int_item, write_int), + loc=(FIFF.FIFF_CH_LOC, lambda x: x, write_float), + unit=(FIFF.FIFF_CH_UNIT, _int_item, write_int), + unit_mul=(FIFF.FIFF_CH_UNIT_MUL, _int_item, write_int), + ch_name=(FIFF.FIFF_CH_DACQ_NAME, str, write_string), + coord_frame=(FIFF.FIFF_CH_COORD_FRAME, _int_item, write_int), +) +# key -> cast +_CH_CAST = OrderedDict((key, val[1]) for key, val in _CH_INFO_MAP.items()) +# const -> key, cast +_CH_READ_MAP = OrderedDict((val[0], (key, val[1])) for key, val in _CH_INFO_MAP.items()) + + +@contextlib.contextmanager +def _writing_info_hdf5(info): + # Make info writing faster by packing chs and dig into numpy arrays + orig_dig = info.get("dig", None) + orig_chs = info["chs"] + with info._unlock(): + try: + if orig_dig is not None and len(orig_dig) > 0: + info["dig"] = _dict_pack(info["dig"], _DIG_CAST) + info["chs"] = _dict_pack(info["chs"], _CH_CAST) + info["chs"]["ch_name"] = np.char.encode( + info["chs"]["ch_name"], encoding="utf8" + ) + yield + finally: + if orig_dig is not None: + info["dig"] = orig_dig + info["chs"] = orig_chs + + +def _dict_pack(obj, casts): + # pack a list of dict into dict of array + return {key: np.array([o[key] for o in obj]) for key in casts} + + +def _dict_unpack(obj, casts): + # unpack a dict of array into a list of dict + n = len(obj[list(casts)[0]]) + return [{key: cast(obj[key][ii]) for key, cast in casts.items()} for ii in range(n)] + + +def _make_ch_names_mapping(chs): + orig_ch_names = [c["ch_name"] for c in chs] + ch_names = orig_ch_names.copy() + _unique_channel_names(ch_names, max_length=15, verbose="error") + ch_names_mapping = dict() + if orig_ch_names != ch_names: + ch_names_mapping.update(zip(orig_ch_names, ch_names)) + return ch_names_mapping + + +def _write_ch_infos(fid, chs, reset_range, ch_names_mapping): + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + for k, c in enumerate(chs): + # Scan numbers may have been messed up + c = c.copy() + c["ch_name"] = ch_names_mapping.get(c["ch_name"], c["ch_name"]) + assert len(c["ch_name"]) <= 15 + c["scanno"] = k + 1 + # for float/double, the "range" param is unnecessary + if reset_range: + c["range"] = 1.0 + write_ch_info(fid, c) + # only write new-style channel information if necessary + if len(ch_names_mapping): + logger.info( + " Writing channel names to FIF truncated to 15 characters " + "with remapping" + ) + for ch in chs: + start_block(fid, FIFF.FIFFB_CH_INFO) + assert set(ch) == set(_CH_INFO_MAP) + for key, (const, _, write) in _CH_INFO_MAP.items(): + write(fid, const, ch[key]) + end_block(fid, FIFF.FIFFB_CH_INFO) + + +def _ensure_infos_match(info1, info2, name, *, on_mismatch="raise"): + """Check if infos match. + + Parameters + ---------- + info1, info2 : instance of Info + The infos to compare. + name : str + The name of the object appearing in the error message of the comparison + fails. + on_mismatch : 'raise' | 'warn' | 'ignore' + What to do in case of a mismatch of ``dev_head_t`` between ``info1`` + and ``info2``. + """ + _check_on_missing(on_missing=on_mismatch, name="on_mismatch") + + info1._check_consistency() + info2._check_consistency() + + if info1["nchan"] != info2["nchan"]: + raise ValueError(f"{name}.info['nchan'] must match") + if set(info1["bads"]) != set(info2["bads"]): + raise ValueError(f"{name}.info['bads'] must match") + if info1["sfreq"] != info2["sfreq"]: + raise ValueError(f"{name}.info['sfreq'] must match") + if set(info1["ch_names"]) != set(info2["ch_names"]): + raise ValueError(f"{name}.info['ch_names'] must match") + if info1["ch_names"] != info2["ch_names"]: + msg = ( + f"{name}.info['ch_names']: Channel order must match. Use " + '"mne.match_channel_orders()" to sort channels.' + ) + raise ValueError(msg) + if len(info2["projs"]) != len(info1["projs"]): + raise ValueError(f"SSP projectors in {name} must be the same") + if any(not _proj_equal(p1, p2) for p1, p2 in zip(info2["projs"], info1["projs"])): + raise ValueError(f"SSP projectors in {name} must be the same") + if (info1["dev_head_t"] is None) ^ (info2["dev_head_t"] is None) or ( + info1["dev_head_t"] is not None + and not np.allclose( + info1["dev_head_t"]["trans"], + info2["dev_head_t"]["trans"], + rtol=1e-6, + equal_nan=True, + ) + ): + msg = ( + f"{name}.info['dev_head_t'] differs. The " + f"instances probably come from different runs, and " + f"are therefore associated with different head " + f"positions. Manually change info['dev_head_t'] to " + f"avoid this message but beware that this means the " + f"MEG sensors will not be properly spatially aligned. " + f"See mne.preprocessing.maxwell_filter to realign the " + f"runs to a common head position." + ) + _on_missing(on_missing=on_mismatch, msg=msg, name="on_mismatch") + + +def _get_fnirs_ch_pos(info): + """Return positions of each fNIRS optode. + + fNIRS uses two types of optodes, sources and detectors. + There can be multiple connections between each source + and detector at different wavelengths. This function + returns the location of each source and detector. + """ + from ..preprocessing.nirs import _fnirs_optode_names, _optode_position + + srcs, dets = _fnirs_optode_names(info) + ch_pos = {} + for optode in [*srcs, *dets]: + ch_pos[optode] = _optode_position(info, optode) + return ch_pos + + +def _camel_to_snake(s): + return re.sub(r"(? 0: + dir_tag = read_tag(fid, dirpos) + if dir_tag is None or dir_tag.data is None: + fid.seek(0, 2) # move to end of file + size = fid.tell() + extra = "" if size > dirpos else f" > file size {size}" + warn( + "FIF tag directory missing at the end of the file " + f"(at byte {dirpos}{extra}), possibly corrupted file: {fname}" + ) + else: + directory = dir_tag.data + read_slow = False + if read_slow: + pos = 0 + fid.seek(pos, 0) + directory = list() + while pos is not None: + tag = _read_tag_header(fid, pos) + if tag is None: + break # HACK : to fix file ending with empty tag... + pos = tag.next_pos + directory.append(tag) + + tree, _ = make_dir_tree(fid, directory, indent=1) + + logger.debug("[done]") + + # Back to the beginning + fid.seek(0) + + return fid, tree, directory + + +@verbose +def show_fiff( + fname, + indent=" ", + read_limit=np.inf, + max_str=30, + output=str, + tag=None, + *, + show_bytes=False, + verbose=None, +): + """Show FIFF information. + + This function is similar to mne_show_fiff. + + Parameters + ---------- + fname : path-like + Filename to evaluate. + indent : str + How to indent the lines. + read_limit : int + Max number of bytes of data to read from a tag. Can be np.inf + to always read all data (helps test read completion). + max_str : int + Max number of characters of string representation to print for + each tag's data. + output : type + Either str or list. str is a convenience output for printing. + tag : int | None + Provide information about this tag. If None (default), all information + is shown. + show_bytes : bool + If True (default False), print the byte offsets of each tag. + %(verbose)s + + Returns + ------- + contents : str + The contents of the file. + """ + if output not in [list, str]: + raise ValueError("output must be list or str") + if isinstance(tag, str): # command mne show_fiff passes string + tag = int(tag) + fname = _check_fname(fname, "read", True) + f, tree, _ = fiff_open(fname) + # This gets set to 0 (unknown) by fiff_open, but FIFFB_ROOT probably + # makes more sense for display + tree["block"] = FIFF.FIFFB_ROOT + with f as fid: + out = _show_tree( + fid, + tree, + indent=indent, + level=0, + read_limit=read_limit, + max_str=max_str, + tag_id=tag, + show_bytes=show_bytes, + ) + if output is str: + out = "\n".join(out) + return out + + +def _find_type(value, fmts=("FIFF_",), exclude=("FIFF_UNIT",)): + """Find matching values.""" + value = int(value) + vals = [ + k + for k, v in FIFF.items() + if v == value + and any(fmt in k for fmt in fmts) + and not any(exc in k for exc in exclude) + ] + if len(vals) == 0: + vals = ["???"] + return vals + + +def _show_tree( + fid, + tree, + indent, + level, + read_limit, + max_str, + tag_id, + *, + show_bytes=False, +): + """Show FIFF tree.""" + this_idt = indent * level + next_idt = indent * (level + 1) + # print block-level information + found_types = "/".join(_find_type(tree["block"], fmts=["FIFFB_"])) + out = [f"{this_idt}{str(int(tree['block'])).ljust(4)} = {found_types}"] + tag_found = False + if tag_id is None or out[0].strip().startswith(str(tag_id)): + tag_found = True + + if tree["directory"] is not None: + kinds = [ent.kind for ent in tree["directory"]] + [-1] + types = [ent.type for ent in tree["directory"]] + sizes = [ent.size for ent in tree["directory"]] + poss = [ent.pos for ent in tree["directory"]] + counter = 0 + good = True + for k, kn, size, pos, type_ in zip(kinds[:-1], kinds[1:], sizes, poss, types): + if not tag_found and k != tag_id: + continue + tag = Tag(kind=k, type=type_, size=size, next=FIFF.FIFFV_NEXT_NONE, pos=pos) + if read_limit is None or size <= read_limit: + try: + tag = read_tag(fid, pos) + except Exception: + good = False + + if kn == k: + # don't print if the next item is the same type (count 'em) + counter += 1 + else: + if show_bytes: + at = f" @{pos}" + else: + at = "" + # find the tag type + this_type = _find_type(k, fmts=["FIFF_"]) + # prepend a count if necessary + prepend = "x" + str(counter + 1) + ": " if counter > 0 else "" + postpend = "" + # print tag data nicely + if tag.data is not None: + postpend = " = " + str(tag.data)[:max_str] + if isinstance(tag.data, np.ndarray): + if tag.data.size > 1: + postpend += " ... array size=" + str(tag.data.size) + elif isinstance(tag.data, dict): + postpend += " ... dict len=" + str(len(tag.data)) + elif isinstance(tag.data, str): + postpend += " ... str len=" + str(len(tag.data)) + elif isinstance(tag.data, list | tuple): + postpend += " ... list len=" + str(len(tag.data)) + elif issparse(tag.data): + postpend += ( + f" ... sparse ({tag.data.getformat()}) shape=" + f"{tag.data.shape}" + ) + else: + postpend += " ... type=" + str(type(tag.data)) + postpend = ">" * 20 + f"BAD @{pos}" if not good else postpend + matrix_info = _matrix_info(tag) + if matrix_info is not None: + _, type_, _, _ = matrix_info + type_ = _call_dict_names.get(type_, f"?{type_}?") + this_type = "/".join(this_type) + out += [ + f"{next_idt}{prepend}{str(k).ljust(4)} = " + f"{this_type}{at} ({size}b {type_}) {postpend}" + ] + out[-1] = out[-1].replace("\n", "¶") + counter = 0 + good = True + if tag_id in kinds: + tag_found = True + if not tag_found: + out = [""] + level = -1 # removes extra indent + # deal with children + for branch in tree["children"]: + out += _show_tree( + fid, + branch, + indent, + level + 1, + read_limit, + max_str, + tag_id, + show_bytes=show_bytes, + ) + return out diff --git a/mne/_fiff/pick.py b/mne/_fiff/pick.py new file mode 100644 index 0000000..ec3479f --- /dev/null +++ b/mne/_fiff/pick.py @@ -0,0 +1,1413 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from copy import deepcopy + +import numpy as np + +from ..utils import ( + _check_option, + _ensure_int, + _validate_type, + fill_doc, + logger, + verbose, +) +from .constants import FIFF + + +def get_channel_type_constants(include_defaults=False): + """Return all known channel types, and associated FIFF constants. + + Parameters + ---------- + include_defaults : bool + Whether to include default values for "unit" and "coil_type" for all + entries (see Notes). Defaults are generally based on values normally + present for a VectorView MEG system. Defaults to ``False``. + + Returns + ------- + channel_types : dict + The keys are channel type strings, and the values are dictionaries of + FIFF constants for "kind", and possibly "unit" and "coil_type". + + Notes + ----- + Values which might vary within a channel type across real data + recordings are excluded unless ``include_defaults=True``. For example, + "ref_meg" channels may have coil type + ``FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD``, ``FIFFV_COIL_VV_MAG_T3``, etc + (depending on the recording system), so no "coil_type" entry is given + for "ref_meg" unless ``include_defaults`` is requested. + """ + base = dict( + grad=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T_M), + mag=dict(kind=FIFF.FIFFV_MEG_CH, unit=FIFF.FIFF_UNIT_T), + ref_meg=dict(kind=FIFF.FIFFV_REF_MEG_CH), + eeg=dict( + kind=FIFF.FIFFV_EEG_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG + ), + seeg=dict( + kind=FIFF.FIFFV_SEEG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG, + ), + dbs=dict( + kind=FIFF.FIFFV_DBS_CH, unit=FIFF.FIFF_UNIT_V, coil_type=FIFF.FIFFV_COIL_EEG + ), + ecog=dict( + kind=FIFF.FIFFV_ECOG_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_EEG, + ), + eog=dict(kind=FIFF.FIFFV_EOG_CH, unit=FIFF.FIFF_UNIT_V), + emg=dict(kind=FIFF.FIFFV_EMG_CH, unit=FIFF.FIFF_UNIT_V), + ecg=dict(kind=FIFF.FIFFV_ECG_CH, unit=FIFF.FIFF_UNIT_V), + resp=dict(kind=FIFF.FIFFV_RESP_CH, unit=FIFF.FIFF_UNIT_V), + bio=dict(kind=FIFF.FIFFV_BIO_CH, unit=FIFF.FIFF_UNIT_V), + misc=dict(kind=FIFF.FIFFV_MISC_CH, unit=FIFF.FIFF_UNIT_V), + stim=dict(kind=FIFF.FIFFV_STIM_CH), + exci=dict(kind=FIFF.FIFFV_EXCI_CH), + syst=dict(kind=FIFF.FIFFV_SYST_CH), + ias=dict(kind=FIFF.FIFFV_IAS_CH), + gof=dict(kind=FIFF.FIFFV_GOODNESS_FIT), + dipole=dict(kind=FIFF.FIFFV_DIPOLE_WAVE), + chpi=dict( + kind=[ + FIFF.FIFFV_QUAT_0, + FIFF.FIFFV_QUAT_1, + FIFF.FIFFV_QUAT_2, + FIFF.FIFFV_QUAT_3, + FIFF.FIFFV_QUAT_4, + FIFF.FIFFV_QUAT_5, + FIFF.FIFFV_QUAT_6, + FIFF.FIFFV_HPI_G, + FIFF.FIFFV_HPI_ERR, + FIFF.FIFFV_HPI_MOV, + ] + ), + fnirs_cw_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE, + ), + fnirs_fd_ac_amplitude=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_V, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE, + ), + fnirs_fd_phase=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_RAD, + coil_type=FIFF.FIFFV_COIL_FNIRS_FD_PHASE, + ), + fnirs_od=dict(kind=FIFF.FIFFV_FNIRS_CH, coil_type=FIFF.FIFFV_COIL_FNIRS_OD), + hbo=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBO, + ), + hbr=dict( + kind=FIFF.FIFFV_FNIRS_CH, + unit=FIFF.FIFF_UNIT_MOL, + coil_type=FIFF.FIFFV_COIL_FNIRS_HBR, + ), + csd=dict( + kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V_M2, + coil_type=FIFF.FIFFV_COIL_EEG_CSD, + ), + temperature=dict(kind=FIFF.FIFFV_TEMPERATURE_CH, unit=FIFF.FIFF_UNIT_CEL), + gsr=dict(kind=FIFF.FIFFV_GALVANIC_CH, unit=FIFF.FIFF_UNIT_S), + eyegaze=dict( + kind=FIFF.FIFFV_EYETRACK_CH, coil_type=FIFF.FIFFV_COIL_EYETRACK_POS + ), + pupil=dict( + kind=FIFF.FIFFV_EYETRACK_CH, coil_type=FIFF.FIFFV_COIL_EYETRACK_PUPIL + ), + ) + if include_defaults: + coil_none = dict(coil_type=FIFF.FIFFV_COIL_NONE) + unit_none = dict(unit=FIFF.FIFF_UNIT_NONE) + defaults = dict( + grad=dict(coil_type=FIFF.FIFFV_COIL_VV_PLANAR_T1), + mag=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3), + ref_meg=dict(coil_type=FIFF.FIFFV_COIL_VV_MAG_T3, unit=FIFF.FIFF_UNIT_T), + misc=dict(**coil_none, **unit_none), # NB: overwrites UNIT_V + stim=dict(unit=FIFF.FIFF_UNIT_V, **coil_none), + eog=coil_none, + ecg=coil_none, + emg=coil_none, + bio=coil_none, + fnirs_od=unit_none, + pupil=unit_none, + eyegaze=dict(unit=FIFF.FIFF_UNIT_PX), + ) + for key, value in defaults.items(): + base[key].update(value) + return base + + +_first_rule = { + FIFF.FIFFV_MEG_CH: "meg", + FIFF.FIFFV_REF_MEG_CH: "ref_meg", + FIFF.FIFFV_EEG_CH: "eeg", + FIFF.FIFFV_STIM_CH: "stim", + FIFF.FIFFV_EOG_CH: "eog", + FIFF.FIFFV_EMG_CH: "emg", + FIFF.FIFFV_ECG_CH: "ecg", + FIFF.FIFFV_RESP_CH: "resp", + FIFF.FIFFV_MISC_CH: "misc", + FIFF.FIFFV_EXCI_CH: "exci", + FIFF.FIFFV_IAS_CH: "ias", + FIFF.FIFFV_SYST_CH: "syst", + FIFF.FIFFV_SEEG_CH: "seeg", + FIFF.FIFFV_DBS_CH: "dbs", + FIFF.FIFFV_BIO_CH: "bio", + FIFF.FIFFV_QUAT_0: "chpi", + FIFF.FIFFV_QUAT_1: "chpi", + FIFF.FIFFV_QUAT_2: "chpi", + FIFF.FIFFV_QUAT_3: "chpi", + FIFF.FIFFV_QUAT_4: "chpi", + FIFF.FIFFV_QUAT_5: "chpi", + FIFF.FIFFV_QUAT_6: "chpi", + FIFF.FIFFV_HPI_G: "chpi", + FIFF.FIFFV_HPI_ERR: "chpi", + FIFF.FIFFV_HPI_MOV: "chpi", + FIFF.FIFFV_DIPOLE_WAVE: "dipole", + FIFF.FIFFV_GOODNESS_FIT: "gof", + FIFF.FIFFV_ECOG_CH: "ecog", + FIFF.FIFFV_FNIRS_CH: "fnirs", + FIFF.FIFFV_TEMPERATURE_CH: "temperature", + FIFF.FIFFV_GALVANIC_CH: "gsr", + FIFF.FIFFV_EYETRACK_CH: "eyetrack", +} +# How to reduce our categories in channel_type (originally) +_second_rules = { + "meg": ("unit", {FIFF.FIFF_UNIT_T_M: "grad", FIFF.FIFF_UNIT_T: "mag"}), + "fnirs": ( + "coil_type", + { + FIFF.FIFFV_COIL_FNIRS_HBO: "hbo", + FIFF.FIFFV_COIL_FNIRS_HBR: "hbr", + FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE: "fnirs_cw_amplitude", + FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE: "fnirs_fd_ac_amplitude", + FIFF.FIFFV_COIL_FNIRS_FD_PHASE: "fnirs_fd_phase", + FIFF.FIFFV_COIL_FNIRS_OD: "fnirs_od", + }, + ), + "eeg": ( + "coil_type", + { + FIFF.FIFFV_COIL_EEG: "eeg", + FIFF.FIFFV_COIL_EEG_BIPOLAR: "eeg", + FIFF.FIFFV_COIL_NONE: "eeg", # MNE-C backward compat + FIFF.FIFFV_COIL_EEG_CSD: "csd", + }, + ), + "eyetrack": ( + "coil_type", + { + FIFF.FIFFV_COIL_EYETRACK_POS: "eyegaze", + FIFF.FIFFV_COIL_EYETRACK_PUPIL: "pupil", + }, + ), +} + + +@fill_doc +def channel_type(info, idx): + """Get channel type. + + Parameters + ---------- + %(info_not_none)s + idx : int + Index of channel. + + Returns + ------- + type : str + Type of channel. Will be one of:: + + {'bio', 'chpi', 'dbs', 'dipole', 'ecg', 'ecog', 'eeg', 'emg', + 'eog', 'exci', 'eyetrack', 'fnirs', 'gof', 'gsr', 'ias', 'misc', + 'meg', 'ref_meg', 'resp', 'seeg', 'stim', 'syst', 'temperature'} + """ + # This is faster than the original _channel_type_old now in test_pick.py + # because it uses (at most!) two dict lookups plus one conditional + # to get the channel type string. + ch = info["chs"][idx] + try: + first_kind = _first_rule[ch["kind"]] + except KeyError: + raise ValueError( + f'Unknown channel type ({ch["kind"]}) for channel "{ch["ch_name"]}"' + ) + if first_kind in _second_rules: + key, second_rule = _second_rules[first_kind] + first_kind = second_rule[ch[key]] + return first_kind + + +@verbose +def pick_channels(ch_names, include, exclude=(), ordered=True, *, verbose=None): + """Pick channels by names. + + Returns the indices of ``ch_names`` in ``include`` but not in ``exclude``. + + Parameters + ---------- + ch_names : list of str + List of channels. + include : list of str + List of channels to include (if empty include all available). + + .. note:: This is to be treated as a set. The order of this list + is not used or maintained in ``sel``. + + exclude : list of str + List of channels to exclude (if empty do not exclude any channel). + Defaults to []. + %(ordered)s + %(verbose)s + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels_regexp, pick_types + """ + if len(np.unique(ch_names)) != len(ch_names): + raise RuntimeError("ch_names is not a unique list, picking is unsafe") + _validate_type(ordered, bool, "ordered") + _check_excludes_includes(include) + _check_excludes_includes(exclude) + if not isinstance(include, list): + include = list(include) + if len(include) == 0: + include = list(ch_names) + if not isinstance(exclude, list): + exclude = list(exclude) + sel, missing = list(), list() + for name in include: + if name in ch_names: + if name not in exclude: + sel.append(ch_names.index(name)) + else: + missing.append(name) + if len(missing) and ordered: + raise ValueError( + f"Missing channels from ch_names required by include:\n{missing}" + ) + if not ordered: + sel = np.unique(sel) + return np.array(sel, int) + + +def pick_channels_regexp(ch_names, regexp): + """Pick channels using regular expression. + + Returns the indices of the good channels in ch_names. + + Parameters + ---------- + ch_names : list of str + List of channels. + + regexp : str + The regular expression. See python standard module for regular + expressions. + + Returns + ------- + sel : array of int + Indices of good channels. + + See Also + -------- + pick_channels + + Examples + -------- + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG ...1') + [0] + >>> pick_channels_regexp(['MEG 2331', 'MEG 2332', 'MEG 2333'], 'MEG *') + [0, 1, 2] + """ + r = re.compile(regexp) + return [k for k, name in enumerate(ch_names) if r.match(name)] + + +def _triage_meg_pick(ch, meg): + """Triage an MEG pick type.""" + if meg is True: + return True + elif ch["unit"] == FIFF.FIFF_UNIT_T_M: + if meg == "grad": + return True + elif meg == "planar1" and ch["ch_name"].endswith("2"): + return True + elif meg == "planar2" and ch["ch_name"].endswith("3"): + return True + elif meg == "mag" and ch["unit"] == FIFF.FIFF_UNIT_T: + return True + return False + + +def _triage_fnirs_pick(ch, fnirs, warned): + """Triage an fNIRS pick type.""" + if fnirs is True: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_HBO and "hbo" in fnirs: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_HBR and "hbr" in fnirs: + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE + and "fnirs_cw_amplitude" in fnirs + ): + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_FD_AC_AMPLITUDE + and "fnirs_fd_ac_amplitude" in fnirs + ): + return True + elif ( + ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_FD_PHASE and "fnirs_fd_phase" in fnirs + ): + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_FNIRS_OD and "fnirs_od" in fnirs: + return True + return False + + +def _triage_eyetrack_pick(ch, eyetrack): + """Triage an eyetrack pick type.""" + if eyetrack is False: + return False + elif eyetrack is True: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_PUPIL and "pupil" in eyetrack: + return True + elif ch["coil_type"] == FIFF.FIFFV_COIL_EYETRACK_POS and "eyegaze" in eyetrack: + return True + return False + + +def _check_meg_type(meg, allow_auto=False): + """Ensure a valid meg type.""" + if isinstance(meg, str): + allowed_types = ["grad", "mag", "planar1", "planar2"] + allowed_types += ["auto"] if allow_auto else [] + if meg not in allowed_types: + raise ValueError( + f"meg value must be one of {allowed_types} or bool, not {meg}" + ) + + +def _check_info_exclude(info, exclude): + _validate_type(info, "info") + info._check_consistency() + if exclude is None: + raise ValueError('exclude must be a list of strings or "bads"') + elif exclude == "bads": + exclude = info.get("bads", []) + elif not isinstance(exclude, list | tuple): + raise ValueError( + 'exclude must either be "bads" or a list of strings.' + " If only one channel is to be excluded, use " + "[ch_name] instead of passing ch_name." + ) + return exclude + + +@fill_doc +def pick_types( + info, + meg=False, + eeg=False, + stim=False, + eog=False, + ecg=False, + emg=False, + ref_meg="auto", + *, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=False, + dipole=False, + gof=False, + bio=False, + ecog=False, + fnirs=False, + csd=False, + dbs=False, + temperature=False, + gsr=False, + eyetrack=False, + include=(), + exclude="bads", + selection=None, +): + """Pick channels by type and names. + + Parameters + ---------- + %(info_not_none)s + %(pick_types_params)s + + Returns + ------- + sel : array of int + Indices of good channels. + """ + # NOTE: Changes to this function's signature should also be changed in + # PickChannelsMixin + _validate_type(meg, (bool, str), "meg") + + exclude = _check_info_exclude(info, exclude) + nchan = info["nchan"] + pick = np.zeros(nchan, dtype=bool) + + _check_meg_type(ref_meg, allow_auto=True) + _check_meg_type(meg) + if isinstance(ref_meg, str) and ref_meg == "auto": + ref_meg = ( + "comps" in info + and info["comps"] is not None + and len(info["comps"]) > 0 + and meg is not False + ) + + for param in ( + eeg, + stim, + eog, + ecg, + emg, + misc, + resp, + chpi, + exci, + ias, + syst, + seeg, + dipole, + gof, + bio, + ecog, + csd, + dbs, + temperature, + gsr, + ): + if not isinstance(param, bool): + w = ( + "Parameters for all channel types (with the exception of " + '"meg", "ref_meg", "fnirs", and "eyetrack") must be of type ' + "bool, not {}." + ) + raise ValueError(w.format(type(param))) + + param_dict = dict( + eeg=eeg, + stim=stim, + eog=eog, + ecg=ecg, + emg=emg, + misc=misc, + resp=resp, + chpi=chpi, + exci=exci, + ias=ias, + syst=syst, + seeg=seeg, + dbs=dbs, + dipole=dipole, + gof=gof, + bio=bio, + ecog=ecog, + csd=csd, + temperature=temperature, + gsr=gsr, + eyetrack=eyetrack, + ) + + # avoid triage if possible + if isinstance(meg, bool): + for key in ("grad", "mag"): + param_dict[key] = meg + if isinstance(fnirs, bool): + for key in _FNIRS_CH_TYPES_SPLIT: + param_dict[key] = fnirs + warned = [False] + for k in range(nchan): + ch_type = channel_type(info, k) + try: + pick[k] = param_dict[ch_type] + except KeyError: # not so simple + assert ( + ch_type + in ("grad", "mag", "ref_meg") + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + ) + if ch_type in ("grad", "mag"): + pick[k] = _triage_meg_pick(info["chs"][k], meg) + elif ch_type == "ref_meg": + pick[k] = _triage_meg_pick(info["chs"][k], ref_meg) + elif ch_type in ("eyegaze", "pupil"): + pick[k] = _triage_eyetrack_pick(info["chs"][k], eyetrack) + else: # ch_type in ('hbo', 'hbr') + pick[k] = _triage_fnirs_pick(info["chs"][k], fnirs, warned) + + # restrict channels to selection if provided + if selection is not None: + # the selection only restricts these types of channels + sel_kind = [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH, FIFF.FIFFV_EEG_CH] + for k in np.where(pick)[0]: + if ( + info["chs"][k]["kind"] in sel_kind + and info["ch_names"][k] not in selection + ): + pick[k] = False + + myinclude = [info["ch_names"][k] for k in range(nchan) if pick[k]] + myinclude += include + + if len(myinclude) == 0: + sel = np.array([], int) + else: + sel = pick_channels(info["ch_names"], myinclude, exclude, ordered=False) + + return sel + + +@verbose +def pick_info(info, sel=(), copy=True, verbose=None): + """Restrict an info structure to a selection of channels. + + Parameters + ---------- + %(info_not_none)s + sel : list of int | None + Indices of channels to include. If None, all channels + are included. + copy : bool + If copy is False, info is modified inplace. + %(verbose)s + + Returns + ------- + res : dict + Info structure restricted to a selection of channels. + """ + # avoid circular imports + from .meas_info import _bad_chans_comp + + info._check_consistency() + info = info.copy() if copy else info + if sel is None: + return info + elif len(sel) == 0: + raise ValueError("No channels match the selection.") + ch_set = set(info["ch_names"][k] for k in sel) + n_unique = len(ch_set) + if n_unique != len(sel): + raise ValueError( + f"Found {n_unique} / {len(sel)} unique names, sel is not unique" + ) + + # make sure required the compensation channels are present + if len(info.get("comps", [])) > 0: + ch_names = [info["ch_names"][idx] for idx in sel] + _, comps_missing = _bad_chans_comp(info, ch_names) + if len(comps_missing) > 0: + logger.info( + f"Removing {len(info['comps'])} compensators from info because " + "not all compensation channels were picked." + ) + with info._unlock(): + info["comps"] = [] + with info._unlock(): + info["chs"] = [info["chs"][k] for k in sel] + info._update_redundant() + info["bads"] = [ch for ch in info["bads"] if ch in info["ch_names"]] + if "comps" in info: + comps = deepcopy(info["comps"]) + for c in comps: + row_idx = [ + k for k, n in enumerate(c["data"]["row_names"]) if n in info["ch_names"] + ] + row_names = [c["data"]["row_names"][i] for i in row_idx] + rowcals = c["rowcals"][row_idx] + c["rowcals"] = rowcals + c["data"]["nrow"] = len(row_names) + c["data"]["row_names"] = row_names + c["data"]["data"] = c["data"]["data"][row_idx] + with info._unlock(): + info["comps"] = comps + if info.get("custom_ref_applied", False) and not _electrode_types(info): + with info._unlock(): + info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + # remove unused projectors + if info.get("projs", False): + projs = list() + for p in info["projs"]: + if any(ch_name in ch_set for ch_name in p["data"]["col_names"]): + projs.append(p) + if len(projs) != len(info["projs"]): + with info._unlock(): + info["projs"] = projs + info._check_consistency() + + return info + + +def _has_kit_refs(info, picks): + """Determine if KIT ref channels are chosen. + + This is currently only used by make_forward_solution, which cannot + run when KIT reference channels are included. + """ + for p in picks: + if info["chs"][p]["coil_type"] == FIFF.FIFFV_COIL_KIT_REF_MAG: + return True + return False + + +@verbose +def pick_channels_forward( + orig, include=(), exclude=(), ordered=True, copy=True, *, verbose=None +): + """Pick channels from forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + include : list of str + List of channels to include (if empty, include all available). + Defaults to []. + exclude : list of str | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to []. + If 'bads', then exclude bad channels in orig. + %(ordered)s + copy : bool + If True (default), make a copy. + + .. versionadded:: 0.19 + %(verbose)s + + Returns + ------- + res : dict + Forward solution restricted to selected channels. If include and + exclude are empty it returns orig without copy. + """ + orig["info"]._check_consistency() + if len(include) == 0 and len(exclude) == 0: + return orig.copy() if copy else orig + exclude = _check_excludes_includes(exclude, info=orig["info"], allow_bads=True) + + # Allow for possibility of channel ordering in forward solution being + # different from that of the M/EEG file it is based on. + sel_sol = pick_channels( + orig["sol"]["row_names"], include=include, exclude=exclude, ordered=ordered + ) + sel_info = pick_channels( + orig["info"]["ch_names"], include=include, exclude=exclude, ordered=ordered + ) + + fwd = deepcopy(orig) if copy else orig + + # Check that forward solution and original data file agree on #channels + if len(sel_sol) != len(sel_info): + raise ValueError( + "Forward solution and functional data appear to " + "have different channel names, please check." + ) + + # Do we have something? + nuse = len(sel_sol) + if nuse == 0: + raise ValueError("Nothing remains after picking") + + logger.info(f" {nuse:d} out of {fwd['nchan']} channels remain after picking") + + # Pick the correct rows of the forward operator using sel_sol + fwd["sol"]["data"] = fwd["sol"]["data"][sel_sol, :] + fwd["_orig_sol"] = fwd["_orig_sol"][sel_sol, :] + fwd["sol"]["nrow"] = nuse + + ch_names = [fwd["sol"]["row_names"][k] for k in sel_sol] + fwd["nchan"] = nuse + fwd["sol"]["row_names"] = ch_names + + # Pick the appropriate channel names from the info-dict using sel_info + with fwd["info"]._unlock(): + fwd["info"]["chs"] = [fwd["info"]["chs"][k] for k in sel_info] + fwd["info"]._update_redundant() + fwd["info"]["bads"] = [b for b in fwd["info"]["bads"] if b in ch_names] + + if fwd["sol_grad"] is not None: + fwd["sol_grad"]["data"] = fwd["sol_grad"]["data"][sel_sol, :] + fwd["_orig_sol_grad"] = fwd["_orig_sol_grad"][sel_sol, :] + fwd["sol_grad"]["nrow"] = nuse + fwd["sol_grad"]["row_names"] = [ + fwd["sol_grad"]["row_names"][k] for k in sel_sol + ] + + return fwd + + +def pick_types_forward( + orig, + meg=False, + eeg=False, + ref_meg=True, + seeg=False, + ecog=False, + dbs=False, + include=(), + exclude=(), +): + """Pick by channel type and names from a forward operator. + + Parameters + ---------- + orig : dict + A forward solution. + meg : bool | str + If True include MEG channels. If string it can be 'mag', 'grad', + 'planar1' or 'planar2' to select only magnetometers, all gradiometers, + or a specific type of gradiometer. + eeg : bool + If True include EEG channels. + ref_meg : bool + If True include CTF / 4D reference channels. + seeg : bool + If True include stereotactic EEG channels. + ecog : bool + If True include electrocorticography channels. + dbs : bool + If True include deep brain stimulation channels. + include : list of str + List of additional channels to include. If empty do not include any. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any (default). + If 'bads', exclude channels in orig['info']['bads']. + + Returns + ------- + res : dict + Forward solution restricted to selected channel types. + """ + info = orig["info"] + sel = pick_types( + info, + meg, + eeg, + ref_meg=ref_meg, + seeg=seeg, + ecog=ecog, + dbs=dbs, + include=include, + exclude=exclude, + ) + if len(sel) == 0: + raise ValueError("No valid channels found") + include_ch_names = [info["ch_names"][k] for k in sel] + + return pick_channels_forward(orig, include_ch_names) + + +@fill_doc +def channel_indices_by_type(info, picks=None): + """Get indices of channels by type. + + Parameters + ---------- + %(info_not_none)s + %(picks_all)s + + Returns + ------- + idx_by_type : dict + A dictionary that maps each channel type to a (possibly empty) list of + channel indices. + """ + idx_by_type = { + key: list() + for key in _PICK_TYPES_KEYS + if key not in ("meg", "fnirs", "eyetrack") + } + idx_by_type.update( + mag=list(), + grad=list(), + hbo=list(), + hbr=list(), + fnirs_cw_amplitude=list(), + fnirs_fd_ac_amplitude=list(), + fnirs_fd_phase=list(), + fnirs_od=list(), + eyegaze=list(), + pupil=list(), + ) + picks = _picks_to_idx(info, picks, none="all", exclude=(), allow_empty=True) + for k in picks: + ch_type = channel_type(info, k) + for key in idx_by_type.keys(): + if ch_type == key: + idx_by_type[key].append(k) + return idx_by_type + + +@verbose +def pick_channels_cov( + orig, include=(), exclude="bads", ordered=True, copy=True, *, verbose=None +): + """Pick channels from covariance matrix. + + Parameters + ---------- + orig : Covariance + A covariance. + include : list of str, (optional) + List of channels to include (if empty, include all available). + exclude : list of str, (optional) | 'bads' + Channels to exclude (if empty, do not exclude any). Defaults to 'bads'. + %(ordered)s + copy : bool + If True (the default), return a copy of the covariance matrix with the + modified channels. If False, channels are modified in-place. + + .. versionadded:: 0.20.0 + %(verbose)s + + Returns + ------- + res : dict + Covariance solution restricted to selected channels. + """ + if copy: + orig = orig.copy() + # A little peculiarity of the cov objects is that these two fields + # should not be copied over when None. + if "method" in orig and orig["method"] is None: + del orig["method"] + if "loglik" in orig and orig["loglik"] is None: + del orig["loglik"] + + exclude = orig["bads"] if exclude == "bads" else exclude + sel = pick_channels( + orig["names"], include=include, exclude=exclude, ordered=ordered + ) + data = orig["data"][sel][:, sel] if not orig["diag"] else orig["data"][sel] + names = [orig["names"][k] for k in sel] + bads = [name for name in orig["bads"] if name in orig["names"]] + + orig["data"] = data + orig["names"] = names + orig["bads"] = bads + orig["dim"] = len(data) + + return orig + + +def _mag_grad_dependent(info): + """Determine of mag and grad should be dealt with jointly.""" + # right now just uses SSS, could be computed / checked from cov + # but probably overkill + return any( + ph.get("max_info", {}).get("sss_info", {}).get("in_order", 0) + for ph in info.get("proc_history", []) + ) + + +@fill_doc +def _contains_ch_type(info, ch_type): + """Check whether a certain channel type is in an info object. + + Parameters + ---------- + %(info_not_none)s + ch_type : str + the channel type to be checked for + + Returns + ------- + has_ch_type : bool + Whether the channel type is present or not. + """ + _validate_type(ch_type, "str", "ch_type") + + meg_extras = list(_MEG_CH_TYPES_SPLIT) + fnirs_extras = list(_FNIRS_CH_TYPES_SPLIT) + et_extras = list(_EYETRACK_CH_TYPES_SPLIT) + valid_channel_types = sorted( + [key for key in _PICK_TYPES_KEYS if key != "meg"] + + meg_extras + + fnirs_extras + + et_extras + ) + _check_option("ch_type", ch_type, valid_channel_types) + if info is None: + raise ValueError( + f'Cannot check for channels of type "{ch_type}" because info is None' + ) + return any(ch_type == channel_type(info, ii) for ii in range(info["nchan"])) + + +@fill_doc +def _picks_by_type(info, meg_combined=False, ref_meg=False, exclude="bads"): + """Get data channel indices as separate list of tuples. + + Parameters + ---------- + %(info_not_none)s + meg_combined : bool | 'auto' + Whether to return combined picks for grad and mag. + Can be 'auto' to choose based on Maxwell filtering status. + ref_meg : bool + If True include CTF / 4D reference channels + exclude : list of str | str + List of channels to exclude. If 'bads' (default), exclude channels + in info['bads']. + + Returns + ------- + picks_list : list of tuples + The list of tuples of picks and the type string. + """ + _validate_type(ref_meg, bool, "ref_meg") + exclude = _check_info_exclude(info, exclude) + if meg_combined == "auto": + meg_combined = _mag_grad_dependent(info) + + picks_list = {ch_type: list() for ch_type in _DATA_CH_TYPES_SPLIT} + for k in range(info["nchan"]): + if info["chs"][k]["ch_name"] not in exclude: + this_type = channel_type(info, k) + try: + picks_list[this_type].append(k) + except KeyError: + # This annoyance is due to differences in pick_types + # and channel_type behavior + if this_type == "ref_meg": + ch = info["chs"][k] + if _triage_meg_pick(ch, ref_meg): + if ch["unit"] == FIFF.FIFF_UNIT_T: + picks_list["mag"].append(k) + elif ch["unit"] == FIFF.FIFF_UNIT_T_M: + picks_list["grad"].append(k) + else: + pass # not a data channel type + picks_list = [ + (ch_type, np.array(picks_list[ch_type], int)) + for ch_type in _DATA_CH_TYPES_SPLIT + ] + assert _DATA_CH_TYPES_SPLIT[:2] == ("mag", "grad") + if meg_combined and len(picks_list[0][1]) and len(picks_list[1][1]): + picks_list.insert( + 0, + ( + "meg", + np.unique(np.concatenate([picks_list.pop(0)[1], picks_list.pop(0)[1]])), + ), + ) + picks_list = [p for p in picks_list if len(p[1])] + return picks_list + + +def _check_excludes_includes(chs, info=None, allow_bads=False): + """Ensure that inputs to exclude/include are list-like or "bads". + + Parameters + ---------- + chs : any input, should be list, tuple, set, str + The channels passed to include or exclude. + allow_bads : bool + Allow the user to supply "bads" as a string for auto exclusion. + + Returns + ------- + chs : list + Channels to be excluded/excluded. If allow_bads, and chs=="bads", + this will be the bad channels found in 'info'. + """ + from .meas_info import Info + + if not isinstance(chs, list | tuple | set | np.ndarray): + if allow_bads is True: + if not isinstance(info, Info): + raise ValueError("Supply an info object if allow_bads is true") + elif chs != "bads": + raise ValueError('If chs is a string, it must be "bads"') + else: + chs = info["bads"] + else: + raise ValueError( + 'include/exclude must be list, tuple, ndarray, or "bads". You provided ' + f"type {type(chs)}." + ) + return chs + + +_PICK_TYPES_DATA_DICT = dict( + meg=True, + eeg=True, + csd=True, + stim=False, + eog=False, + ecg=False, + emg=False, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=True, + dipole=False, + gof=False, + bio=False, + ecog=True, + fnirs=True, + dbs=True, + temperature=False, + gsr=False, + eyetrack=True, +) +_PICK_TYPES_KEYS = tuple(list(_PICK_TYPES_DATA_DICT) + ["ref_meg"]) +_MEG_CH_TYPES_SPLIT = ("mag", "grad", "planar1", "planar2") +_FNIRS_CH_TYPES_SPLIT = ( + "hbo", + "hbr", + "fnirs_cw_amplitude", + "fnirs_fd_ac_amplitude", + "fnirs_fd_phase", + "fnirs_od", +) +_EYETRACK_CH_TYPES_SPLIT = ("eyegaze", "pupil") +_DATA_CH_TYPES_ORDER_DEFAULT = ( + ( + "mag", + "grad", + "eeg", + "csd", + "eog", + "ecg", + "resp", + "emg", + "ref_meg", + "misc", + "stim", + "chpi", + "exci", + "ias", + "syst", + "seeg", + "bio", + "ecog", + "dbs", + "temperature", + "gsr", + "gof", + "dipole", + ) + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + + ("whitened",) +) + +# Valid data types, ordered for consistency, used in viz/evoked. +_VALID_CHANNEL_TYPES = ( + ( + "eeg", + "grad", + "mag", + "seeg", + "eog", + "ecg", + "resp", + "emg", + "dipole", + "gof", + "bio", + "ecog", + "dbs", + ) + + _FNIRS_CH_TYPES_SPLIT + + _EYETRACK_CH_TYPES_SPLIT + + ("misc", "csd") +) +_DATA_CH_TYPES_SPLIT = ( + "mag", + "grad", + "eeg", + "csd", + "seeg", + "ecog", + "dbs", +) + _FNIRS_CH_TYPES_SPLIT +# Electrode types (e.g., can be average-referenced together or separately) +_ELECTRODE_CH_TYPES = ("eeg", "ecog", "seeg", "dbs") + + +def _electrode_types(info, *, exclude="bads"): + return [ + ch_type + for ch_type in _ELECTRODE_CH_TYPES + if len(pick_types(info, exclude=exclude, **{ch_type: True})) + ] + + +def _pick_data_channels(info, exclude="bads", with_ref_meg=True, with_aux=False): + """Pick only data channels.""" + kwargs = _PICK_TYPES_DATA_DICT + if with_aux: + kwargs = kwargs.copy() + kwargs.update(eog=True, ecg=True, emg=True, bio=True) + return pick_types(info, ref_meg=with_ref_meg, exclude=exclude, **kwargs) + + +def _pick_data_or_ica(info, exclude=()): + """Pick only data or ICA channels.""" + if any(ch_name.startswith("ICA") for ch_name in info["ch_names"]): + picks = pick_types(info, exclude=exclude, misc=True) + else: + picks = _pick_data_channels(info, exclude=exclude, with_ref_meg=True) + return picks + + +def _picks_to_idx( + info, + picks, + none="data", + exclude="bads", + allow_empty=False, + with_ref_meg=True, + return_kind=False, + picks_on="channels", +): + """Convert and check pick validity. + + Parameters + ---------- + picks_on : str + 'channels' (default) for error messages about selection of channels. + 'components' for error messages about selection of components. + """ + from .meas_info import Info + + picked_ch_type_or_generic = False + # + # None -> all, data, or data_or_ica (ndarray of int) + # + if isinstance(info, Info): + n_chan = info["nchan"] + else: + info = _ensure_int(info, "info", "an int or Info") + n_chan = info + assert n_chan >= 0 + + orig_picks = picks + # We do some extra_repr gymnastics to avoid calling repr(orig_picks) too + # soon as it can be a performance bottleneck (repr on ndarray is slow) + extra_repr = "" + if picks is None: + if isinstance(info, int): # special wrapper for no real info + picks = np.arange(n_chan) + extra_repr = ", treated as range({n_chan})" + else: + picks = none # let _picks_str_to_idx handle it + extra_repr = f'None, treated as "{none}"' + + # + # slice + # + if isinstance(picks, slice): + picks = np.arange(n_chan)[picks] + + # + # -> ndarray of int (and make a copy) + # + picks = np.atleast_1d(picks) # this works even for picks == 'something' + picks = np.array([], dtype=int) if len(picks) == 0 else picks + if picks.ndim != 1: + raise ValueError(f"picks must be 1D, got {picks.ndim}D") + if picks.dtype.char in ("S", "U"): + picks = _picks_str_to_idx( + info, + picks, + exclude, + with_ref_meg, + return_kind, + extra_repr, + allow_empty, + orig_picks, + ) + if return_kind: + picked_ch_type_or_generic = picks[1] + picks = picks[0] + if picks.dtype.kind not in ["i", "u"]: + extra_ch = " or list of str (names)" if picks_on == "channels" else "" + msg = ( + f"picks must be a list of int (indices){extra_ch}. " + f"The provided data type {picks.dtype} is invalid." + ) + raise TypeError(msg) + del extra_repr + picks = picks.astype(int) + + # + # ensure we have (optionally non-empty) ndarray of valid int + # + if len(picks) == 0 and not allow_empty: + raise ValueError( + f"No appropriate {picks_on} found for the given picks ({orig_picks!r})" + ) + if (picks < -n_chan).any(): + raise IndexError(f"All picks must be >= {-n_chan}, got {repr(orig_picks)}") + if (picks >= n_chan).any(): + raise IndexError( + f"All picks must be < n_{picks_on} ({n_chan}), got {repr(orig_picks)}" + ) + picks %= n_chan # ensure positive + if return_kind: + return picks, picked_ch_type_or_generic + return picks + + +def _picks_str_to_idx( + info, picks, exclude, with_ref_meg, return_kind, extra_repr, allow_empty, orig_picks +): + """Turn a list of str into ndarray of int.""" + # special case for _picks_to_idx w/no info: shouldn't really happen + if isinstance(info, int): + raise ValueError( + "picks as str can only be used when measurement info is available" + ) + + # + # first: check our special cases + # + + picks_generic = list() + if len(picks) == 1: + if picks[0] in ("all", "data", "data_or_ica"): + if picks[0] == "all": + use_exclude = info["bads"] if exclude == "bads" else exclude + picks_generic = pick_channels( + info["ch_names"], info["ch_names"], exclude=use_exclude + ) + elif picks[0] == "data": + picks_generic = _pick_data_channels( + info, exclude=exclude, with_ref_meg=with_ref_meg + ) + elif picks[0] == "data_or_ica": + picks_generic = _pick_data_or_ica(info, exclude=exclude) + if len(picks_generic) == 0 and orig_picks is None and not allow_empty: + raise ValueError( + f"picks ({repr(orig_picks) + extra_repr}) yielded no channels, " + "consider passing picks explicitly" + ) + + # + # second: match all to channel names + # + + bad_names = [] + picks_name = list() + for pick in picks: + try: + picks_name.append(info["ch_names"].index(pick)) + except ValueError: + bad_names.append(pick) + + # + # third: match all to types + # + bad_type = None + picks_type = list() + kwargs = dict(meg=False) + meg, fnirs, eyetrack = set(), set(), set() + for pick in picks: + if pick in _PICK_TYPES_KEYS: + kwargs[pick] = True + elif pick in _MEG_CH_TYPES_SPLIT: + meg |= {pick} + elif pick in _FNIRS_CH_TYPES_SPLIT: + fnirs |= {pick} + elif pick in _EYETRACK_CH_TYPES_SPLIT: + eyetrack |= {pick} + else: + bad_type = pick + break + else: + # bad_type is None but this could still be empty + bad_type = list(picks) + # triage MEG, FNIRS, and eyetrack, which are complicated due to non-bool entries + extra_picks = set() + if "ref_meg" not in picks and not with_ref_meg: + kwargs["ref_meg"] = False + if len(meg) > 0 and not kwargs.get("meg", False): + # easiest just to iterate + for use_meg in meg: + extra_picks |= set( + pick_types(info, meg=use_meg, ref_meg=False, exclude=exclude) + ) + if len(fnirs) and not kwargs.get("fnirs", False): + idx = 0 if len(fnirs) == 1 else slice(None) + kwargs["fnirs"] = list(fnirs)[idx] + if len(eyetrack) and not kwargs.get("eyetrack", False): + idx = 0 if len(eyetrack) == 1 else slice(None) + kwargs["eyetrack"] = list(eyetrack)[idx] # slice(None) is equivalent to all + picks_type = pick_types(info, exclude=exclude, **kwargs) + if len(extra_picks) > 0: + picks_type = sorted(set(picks_type) | set(extra_picks)) + + # + # finally: ensure we have exactly one usable list + # + all_picks = (picks_generic, picks_name, picks_type) + any_found = [len(p) > 0 for p in all_picks] + if sum(any_found) == 0: + if not allow_empty: + raise ValueError( + f"picks ({repr(orig_picks) + extra_repr}) could not be interpreted as " + f'channel names (no channel "{bad_names}"), channel types (no type' + f' "{bad_type}" present), or a generic type (just "all" or "data")' + ) + picks = np.array([], int) + elif sum(any_found) > 1: + raise RuntimeError( + "Some channel names are ambiguously equivalent to " + "channel types, cannot use string-based " + "picks for these" + ) + else: + picks = np.array(all_picks[np.where(any_found)[0][0]]) + + picked_ch_type_or_generic = not len(picks_name) + if len(bad_names) > 0 and not picked_ch_type_or_generic: + raise ValueError( + f"Channel(s) {bad_names} could not be picked, because " + "they are not present in the info instance." + ) + + if return_kind: + return picks, picked_ch_type_or_generic + return picks diff --git a/mne/_fiff/proc_history.py b/mne/_fiff/proc_history.py new file mode 100644 index 0000000..caa2d3d --- /dev/null +++ b/mne/_fiff/proc_history.py @@ -0,0 +1,345 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..fixes import _csc_array_cast +from ..utils import _check_fname, warn +from .constants import FIFF +from .open import fiff_open, read_tag +from .tag import _float_item, _int_item, find_tag +from .tree import dir_tree_find +from .write import ( + _safe_name_list, + end_block, + start_block, + write_float, + write_float_matrix, + write_float_sparse, + write_id, + write_int, + write_int_matrix, + write_name_list_sanitized, + write_string, +) + +_proc_map = dict( # ID, caster, writer + parent_file_id=( + FIFF.FIFF_PARENT_FILE_ID, + dict, + write_id, + ), + block_id=( + FIFF.FIFF_BLOCK_ID, + dict, + write_id, + ), + parent_block_id=( + FIFF.FIFF_PARENT_BLOCK_ID, + dict, + write_id, + ), + date=( + FIFF.FIFF_MEAS_DATE, + lambda d: tuple(int(dd) for dd in d), + write_int, + ), + experimenter=( + FIFF.FIFF_EXPERIMENTER, + str, + write_string, + ), + creator=( + FIFF.FIFF_CREATOR, + str, + write_string, + ), +) + + +def _read_proc_history(fid, tree): + """Read processing history from fiff file. + + This function reads the SSS info, the CTC correction and the + calibaraions from the SSS processing logs inside af a raw file + (C.f. Maxfilter v2.2 manual (October 2010), page 21):: + + 104 = { 900 = proc. history + 104 = { 901 = proc. record + 103 = block ID + 204 = date + 212 = scientist + 113 = creator program + 104 = { 502 = SSS info + 264 = SSS task + 263 = SSS coord frame + 265 = SSS origin + 266 = SSS ins.order + 267 = SSS outs.order + 268 = SSS nr chnls + 269 = SSS components + 278 = SSS nfree + 243 = HPI g limit 0.98 + 244 = HPI dist limit 0.005 + 105 = } 502 = SSS info + 104 = { 504 = MaxST info + 264 = SSS task + 272 = SSST subspace correlation + 279 = SSST buffer length + 105 = } + 104 = { 501 = CTC correction + 103 = block ID + 204 = date + 113 = creator program + 800 = CTC matrix + 3417 = proj item chs + 105 = } 501 = CTC correction + 104 = { 503 = SSS finecalib. + 270 = SSS cal chnls + 271 = SSS cal coeff + 105 = } 503 = SSS finecalib. + 105 = } 901 = proc. record + 105 = } 900 = proc. history + """ + proc_history = dir_tree_find(tree, FIFF.FIFFB_PROCESSING_HISTORY) + out = list() + if len(proc_history) > 0: + proc_history = proc_history[0] + proc_records = dir_tree_find(proc_history, FIFF.FIFFB_PROCESSING_RECORD) + for proc_record in proc_records: + record = dict() + for i_ent in range(proc_record["nent"]): + kind = proc_record["directory"][i_ent].kind + pos = proc_record["directory"][i_ent].pos + for key, (id_, cast, _) in _proc_map.items(): + if kind == id_: + tag = read_tag(fid, pos) + record[key] = cast(tag.data) + break + else: + warn(f"Unknown processing history item {kind}") + record["max_info"] = _read_maxfilter_record(fid, proc_record) + iass = dir_tree_find(proc_record, FIFF.FIFFB_IAS) + if len(iass) > 0: + # XXX should eventually populate this + ss = [dict() for _ in range(len(iass))] + record["ias"] = ss + if len(record["max_info"]) > 0: + out.append(record) + return out + + +def _write_proc_history(fid, info): + """Write processing history to file.""" + if len(info["proc_history"]) > 0: + start_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + for record in info["proc_history"]: + start_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + for key, (id_, _, writer) in _proc_map.items(): + if key in record: + writer(fid, id_, record[key]) + _write_maxfilter_record(fid, record["max_info"]) + if "ias" in record: + for _ in record["ias"]: + start_block(fid, FIFF.FIFFB_IAS) + # XXX should eventually populate this + end_block(fid, FIFF.FIFFB_IAS) + end_block(fid, FIFF.FIFFB_PROCESSING_RECORD) + end_block(fid, FIFF.FIFFB_PROCESSING_HISTORY) + + +_sss_info_keys = ( + "job", + "frame", + "origin", + "in_order", + "out_order", + "nchan", + "components", + "nfree", + "hpi_g_limit", + "hpi_dist_limit", +) +_sss_info_ids = ( + FIFF.FIFF_SSS_JOB, + FIFF.FIFF_SSS_FRAME, + FIFF.FIFF_SSS_ORIGIN, + FIFF.FIFF_SSS_ORD_IN, + FIFF.FIFF_SSS_ORD_OUT, + FIFF.FIFF_SSS_NMAG, + FIFF.FIFF_SSS_COMPONENTS, + FIFF.FIFF_SSS_NFREE, + FIFF.FIFF_HPI_FIT_GOOD_LIMIT, + FIFF.FIFF_HPI_FIT_DIST_LIMIT, +) +_sss_info_writers = ( + write_int, + write_int, + write_float, + write_int, + write_int, + write_int, + write_int, + write_int, + write_float, + write_float, +) +_sss_info_casters = ( + _int_item, + _int_item, + np.array, + _int_item, + _int_item, + _int_item, + np.array, + _int_item, + _float_item, + _float_item, +) + +_max_st_keys = ("job", "subspcorr", "buflen") +_max_st_ids = (FIFF.FIFF_SSS_JOB, FIFF.FIFF_SSS_ST_CORR, FIFF.FIFF_SSS_ST_LENGTH) +_max_st_writers = (write_int, write_float, write_float) +_max_st_casters = (_int_item, _float_item, _float_item) + +_sss_ctc_keys = ("block_id", "date", "creator", "decoupler") +_sss_ctc_ids = ( + FIFF.FIFF_BLOCK_ID, + FIFF.FIFF_MEAS_DATE, + FIFF.FIFF_CREATOR, + FIFF.FIFF_DECOUPLER_MATRIX, +) +_sss_ctc_writers = (write_id, write_int, write_string, write_float_sparse) +_sss_ctc_casters = (dict, np.array, str, _csc_array_cast) + +_sss_cal_keys = ("cal_chans", "cal_corrs") +_sss_cal_ids = (FIFF.FIFF_SSS_CAL_CHANS, FIFF.FIFF_SSS_CAL_CORRS) +_sss_cal_writers = (write_int_matrix, write_float_matrix) +_sss_cal_casters = (np.array, np.array) + + +def _read_ctc(fname): + """Read cross-talk correction matrix.""" + fname = _check_fname(fname, overwrite="read", must_exist=True) + f, tree, _ = fiff_open(fname) + with f as fid: + sss_ctc = _read_maxfilter_record(fid, tree)["sss_ctc"] + bad_str = f"Invalid cross-talk FIF: {fname}" + if len(sss_ctc) == 0: + raise ValueError(bad_str) + node = dir_tree_find(tree, FIFF.FIFFB_DATA_CORRECTION)[0] + comment = find_tag(fid, node, FIFF.FIFF_COMMENT).data + if comment != "cross-talk compensation matrix": + raise ValueError(bad_str) + sss_ctc["creator"] = find_tag(fid, node, FIFF.FIFF_CREATOR).data + sss_ctc["date"] = find_tag(fid, node, FIFF.FIFF_MEAS_DATE).data + return sss_ctc + + +def _read_maxfilter_record(fid, tree): + """Read maxfilter processing record from file.""" + sss_info_block = dir_tree_find(tree, FIFF.FIFFB_SSS_INFO) # 502 + sss_info = dict() + if len(sss_info_block) > 0: + sss_info_block = sss_info_block[0] + for i_ent in range(sss_info_block["nent"]): + kind = sss_info_block["directory"][i_ent].kind + pos = sss_info_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_info_keys, _sss_info_ids, _sss_info_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_info[key] = cast(tag.data) + break + + max_st_block = dir_tree_find(tree, FIFF.FIFFB_SSS_ST_INFO) # 504 + max_st = dict() + if len(max_st_block) > 0: + max_st_block = max_st_block[0] + for i_ent in range(max_st_block["nent"]): + kind = max_st_block["directory"][i_ent].kind + pos = max_st_block["directory"][i_ent].pos + for key, id_, cast in zip(_max_st_keys, _max_st_ids, _max_st_casters): + if kind == id_: + tag = read_tag(fid, pos) + max_st[key] = cast(tag.data) + break + + sss_ctc_block = dir_tree_find(tree, FIFF.FIFFB_CHANNEL_DECOUPLER) # 501 + sss_ctc = dict() + if len(sss_ctc_block) > 0: + sss_ctc_block = sss_ctc_block[0] + for i_ent in range(sss_ctc_block["nent"]): + kind = sss_ctc_block["directory"][i_ent].kind + pos = sss_ctc_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_ctc[key] = cast(tag.data) + break + else: + if kind == FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST: + tag = read_tag(fid, pos) + chs = _safe_name_list(tag.data, "read", "proj_items_chs") + # This list can null chars in the last entry, e.g.: + # [..., 'MEG2642', 'MEG2643', 'MEG2641\x00 ... \x00'] + chs[-1] = chs[-1].split("\x00")[0] + sss_ctc["proj_items_chs"] = chs + + sss_cal_block = dir_tree_find(tree, FIFF.FIFFB_SSS_CAL) # 503 + sss_cal = dict() + if len(sss_cal_block) > 0: + sss_cal_block = sss_cal_block[0] + for i_ent in range(sss_cal_block["nent"]): + kind = sss_cal_block["directory"][i_ent].kind + pos = sss_cal_block["directory"][i_ent].pos + for key, id_, cast in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_casters): + if kind == id_: + tag = read_tag(fid, pos) + sss_cal[key] = cast(tag.data) + break + + max_info = dict(sss_info=sss_info, sss_ctc=sss_ctc, sss_cal=sss_cal, max_st=max_st) + return max_info + + +def _write_maxfilter_record(fid, record): + """Write maxfilter processing record to file.""" + sss_info = record["sss_info"] + if len(sss_info) > 0: + start_block(fid, FIFF.FIFFB_SSS_INFO) + for key, id_, writer in zip(_sss_info_keys, _sss_info_ids, _sss_info_writers): + if key in sss_info: + writer(fid, id_, sss_info[key]) + end_block(fid, FIFF.FIFFB_SSS_INFO) + + max_st = record["max_st"] + if len(max_st) > 0: + start_block(fid, FIFF.FIFFB_SSS_ST_INFO) + for key, id_, writer in zip(_max_st_keys, _max_st_ids, _max_st_writers): + if key in max_st: + writer(fid, id_, max_st[key]) + end_block(fid, FIFF.FIFFB_SSS_ST_INFO) + + sss_ctc = record["sss_ctc"] + if len(sss_ctc) > 0: # dict has entries + start_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + for key, id_, writer in zip(_sss_ctc_keys, _sss_ctc_ids, _sss_ctc_writers): + if key in sss_ctc: + writer(fid, id_, sss_ctc[key]) + if "proj_items_chs" in sss_ctc: + write_name_list_sanitized( + fid, + FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, + sss_ctc["proj_items_chs"], + "proj_items_chs", + ) + end_block(fid, FIFF.FIFFB_CHANNEL_DECOUPLER) + + sss_cal = record["sss_cal"] + if len(sss_cal) > 0: + start_block(fid, FIFF.FIFFB_SSS_CAL) + for key, id_, writer in zip(_sss_cal_keys, _sss_cal_ids, _sss_cal_writers): + if key in sss_cal: + writer(fid, id_, sss_cal[key]) + end_block(fid, FIFF.FIFFB_SSS_CAL) diff --git a/mne/_fiff/proj.py b/mne/_fiff/proj.py new file mode 100644 index 0000000..0376826 --- /dev/null +++ b/mne/_fiff/proj.py @@ -0,0 +1,1189 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from copy import deepcopy +from itertools import count + +import numpy as np + +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from ..fixes import _safe_svd +from ..utils import ( + _check_option, + _validate_type, + fill_doc, + logger, + object_diff, + verbose, + warn, +) +from .constants import FIFF +from .pick import _ELECTRODE_CH_TYPES, _electrode_types, pick_info, pick_types +from .tag import _rename_list, find_tag +from .tree import dir_tree_find +from .write import ( + _safe_name_list, + end_block, + start_block, + write_float, + write_float_matrix, + write_int, + write_name_list_sanitized, + write_string, +) + + +class Projection(dict): + """Dictionary-like object holding a projection vector. + + Projection vectors are stored in a list in ``inst.info["projs"]``. Each projection + vector has 5 keys: ``active``, ``data``, ``desc``, ``explained_var``, ``kind``. + + .. warning:: This class is generally not meant to be instantiated + directly, use ``compute_proj_*`` functions instead. + + Parameters + ---------- + data : dict + The data dictionary. + desc : str + The projector description. + kind : int + The projector kind. + active : bool + Whether or not the projector has been applied. + explained_var : float | None + The proportion of explained variance. + """ + + def __init__( + self, + *, + data, + desc="", + kind=FIFF.FIFFV_PROJ_ITEM_FIELD, + active=False, + explained_var=None, + ): + super().__init__( + desc=desc, kind=kind, active=active, data=data, explained_var=explained_var + ) + + def __repr__(self): # noqa: D105 + s = str(self["desc"]) + s += f", active : {self['active']}" + s += f", n_channels : {len(self['data']['col_names'])}" + if self["explained_var"] is not None: + s += f', exp. var : {self["explained_var"] * 100:0.2f}%' + return f"" + + # speed up info copy by taking advantage of mutability + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + cls = self.__class__ + result = cls.__new__(cls) + for k, v in self.items(): + if k == "data": + v = v.copy() + v["data"] = v["data"].copy() + result[k] = v + else: + result[k] = v # kind, active, desc, explained_var immutable + return result + + def __eq__(self, other): + """Equality == method.""" + return True if len(object_diff(self, other)) == 0 else False + + def __ne__(self, other): + """Different != method.""" + return not self.__eq__(other) + + @fill_doc + def plot_topomap( + self, + info, + *, + sensors=True, + show_names=False, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=False, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + ): + """Plot topographic maps of SSP projections. + + Parameters + ---------- + %(info_not_none)s Used to determine the layout. + %(sensors_topomap)s + %(show_names_topomap)s + + .. versionadded:: 1.2 + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 1.2 + %(border_topomap)s + + .. versionadded:: 0.20 + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap_proj)s + %(cnorm)s + + .. versionadded:: 1.2 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + + .. versionadded:: 1.2 + %(units_topomap)s + + .. versionadded:: 1.2 + %(axes_plot_projs_topomap)s + %(show)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + + Notes + ----- + .. versionadded:: 0.15.0 + """ # noqa: E501 + from ..viz.topomap import plot_projs_topomap + + return plot_projs_topomap( + self, + info, + sensors=sensors, + show_names=show_names, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + show=show, + ) + + +class ProjMixin: + """Mixin class for Raw, Evoked, Epochs. + + Notes + ----- + This mixin adds a proj attribute as a property to data containers. + It is True if at least one proj is present and all of them are active. + The projs might not be applied yet if data are not preloaded. In + this case it's the _projector attribute that does the job. + If a private _data attribute is present then the projs applied + to it are the ones marked as active. + + A proj parameter passed in constructor of raw or epochs calls + apply_proj and hence after the .proj attribute is True. + + As soon as you've applied the projs it will stay active in the + remaining pipeline. + + The suggested pipeline is proj=True in epochs (it's cheaper than for raw). + + When you use delayed SSP in Epochs, projs are applied when you call + get_data() method. They are not applied to the evoked._data unless you call + apply_proj(). The reason is that you want to reject with projs although + it's not stored in proj mode. + """ + + @property + def proj(self): + """Whether or not projections are active.""" + return len(self.info["projs"]) > 0 and all( + p["active"] for p in self.info["projs"] + ) + + @verbose + def add_proj(self, projs, remove_existing=False, verbose=None): + """Add SSP projection vectors. + + Parameters + ---------- + projs : list + List with projection vectors. + remove_existing : bool + Remove the projection vectors currently in the file. + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The data container. + """ + if isinstance(projs, Projection): + projs = [projs] + + if not isinstance(projs, list) and not all( + isinstance(p, Projection) for p in projs + ): + raise ValueError("Only projs can be added. You supplied something else.") + + # mark proj as inactive, as they have not been applied + projs = deactivate_proj(projs, copy=True) + if remove_existing: + # we cannot remove the proj if they are active + if any(p["active"] for p in self.info["projs"]): + raise ValueError( + "Cannot remove projectors that have already been applied" + ) + with self.info._unlock(): + self.info["projs"] = projs + else: + self.info["projs"].extend(projs) + # We don't want to add projectors that are activated again. + with self.info._unlock(): + self.info["projs"] = _uniquify_projs( + self.info["projs"], check_active=False, sort=False + ) + return self + + @verbose + def apply_proj(self, verbose=None): + """Apply the signal space projection (SSP) operators to the data. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + + Notes + ----- + Once the projectors have been applied, they can no longer be + removed. It is usually not recommended to apply the projectors at + too early stages, as they are applied automatically later on + (e.g. when computing inverse solutions). + Hint: using the copy method individual projection vectors + can be tested without affecting the original data. + With evoked data, consider the following example:: + + projs_a = mne.read_proj('proj_a.fif') + projs_b = mne.read_proj('proj_b.fif') + # add the first, copy, apply and see ... + evoked.add_proj(a).copy().apply_proj().plot() + # add the second, copy, apply and see ... + evoked.add_proj(b).copy().apply_proj().plot() + # drop the first and see again + evoked.copy().del_proj(0).apply_proj().plot() + evoked.apply_proj() # finally keep both + """ + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + if self.info["projs"] is None or len(self.info["projs"]) == 0: + logger.info( + "No projector specified for this dataset. " + "Please consider the method self.add_proj." + ) + return self + + # Exit delayed mode if you apply proj + if isinstance(self, BaseEpochs) and self._do_delayed_proj: + logger.info("Leaving delayed SSP mode.") + self._do_delayed_proj = False + + if all(p["active"] for p in self.info["projs"]): + logger.info( + "Projections have already been applied. " + "Setting proj attribute to True." + ) + return self + + _projector, info = setup_proj( + deepcopy(self.info), add_eeg_ref=False, activate=True + ) + # let's not raise a RuntimeError here, otherwise interactive plotting + if _projector is None: # won't be fun. + logger.info("The projections don't apply to these data. Doing nothing.") + return self + self._projector, self.info = _projector, info + if isinstance(self, BaseRaw | Evoked): + if self.preload: + self._data = np.dot(self._projector, self._data) + else: # BaseEpochs + if self.preload: + for ii, e in enumerate(self._data): + self._data[ii] = self._project_epoch(e) + else: + self.load_data() # will automatically apply + logger.info("SSP projectors applied...") + return self + + def del_proj(self, idx="all"): + """Remove SSP projection vector. + + .. note:: The projection vector can only be removed if it is inactive + (has not been applied to the data). + + Parameters + ---------- + idx : int | list of int | str + Index of the projector to remove. Can also be "all" (default) + to remove all projectors. + + Returns + ------- + self : instance of Raw | Epochs | Evoked + The instance. + """ + if isinstance(idx, str) and idx == "all": + idx = list(range(len(self.info["projs"]))) + idx = np.atleast_1d(np.array(idx, int)).ravel() + + for ii in idx: + proj = self.info["projs"][ii] + if proj["active"] and set(self.info["ch_names"]) & set( + proj["data"]["col_names"] + ): + msg = ( + f"Cannot remove projector that has already been " + f"applied, unless you first remove all channels it " + f"applies to. The problematic projector is: {proj}" + ) + raise ValueError(msg) + + keep = np.ones(len(self.info["projs"])) + keep[idx] = False # works with negative indexing and does checks + with self.info._unlock(): + self.info["projs"] = [p for p, k in zip(self.info["projs"], keep) if k] + return self + + @fill_doc + def plot_projs_topomap( + self, + ch_type=None, + *, + sensors=True, + show_names=False, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=False, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + ): + """Plot SSP vector. + + Parameters + ---------- + %(ch_type_topomap_proj)s + %(sensors_topomap)s + %(show_names_topomap)s + + .. versionadded:: 1.2 + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 0.20 + + .. versionchanged:: 0.21 + + - The default was changed to ``'local'`` for MEG sensors. + - ``'local'`` was changed to use a convex hull mask + - ``'head'`` was changed to extrapolate out to the clipping circle. + %(border_topomap)s + + .. versionadded:: 0.20 + %(res_topomap)s + %(size_topomap)s + Only applies when plotting multiple topomaps at a time. + %(cmap_topomap)s + %(vlim_plot_topomap_proj)s + %(cnorm)s + + .. versionadded:: 1.2 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + + .. versionadded:: 1.2 + %(units_topomap)s + + .. versionadded:: 1.2 + %(axes_plot_projs_topomap)s + %(show)s + + Returns + ------- + fig : instance of Figure + Figure distributing one image per channel across sensor topography. + """ + _projs = [deepcopy(_proj) for _proj in self.info["projs"]] + if _projs is None or len(_projs) == 0: + raise ValueError("No projectors in Info; nothing to plot.") + if ch_type is not None: + # make sure the requested channel type(s) exist + _validate_type(ch_type, (str, list, tuple), "ch_type") + if isinstance(ch_type, str): + ch_type = [ch_type] + bad_ch_types = [_type not in self for _type in ch_type] + if any(bad_ch_types): + raise ValueError( + f"ch_type {ch_type[bad_ch_types]} not " + f"present in {self.__class__.__name__}." + ) + # remove projs from unrequested channel types. This is a bit + # convoluted because Projection objects don't store channel types, + # only channel names + available_ch_types = np.array(self.get_channel_types()) + for _proj in _projs[::-1]: + idx = np.isin(self.ch_names, _proj["data"]["col_names"]) + proj_ch_type = np.unique(available_ch_types[idx]) + err_msg = "Projector contains multiple channel types" + assert len(proj_ch_type) == 1, err_msg + if proj_ch_type[0] != ch_type: + _projs.remove(_proj) + if len(_projs) == 0: + raise ValueError( + f"Nothing to plot (no projectors for channel type {ch_type})." + ) + # now we have non-empty _projs list with correct channel type(s) + from ..viz.topomap import plot_projs_topomap + + fig = plot_projs_topomap( + _projs, + self.info, + sensors=sensors, + show_names=show_names, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + show=show, + ) + return fig + + def _reconstruct_proj(self, mode="accurate", origin="auto"): + from ..forward import _map_meg_or_eeg_channels + + if len(self.info["projs"]) == 0: + return self + self.apply_proj() + for kind in ("meg", "eeg"): + kwargs = dict(meg=False) + kwargs[kind] = True + picks = pick_types(self.info, **kwargs) + if len(picks) == 0: + continue + info_from = pick_info(self.info, picks) + info_to = info_from.copy() + with info_to._unlock(): + info_to["projs"] = [] + if kind == "eeg" and _has_eeg_average_ref_proj(info_from): + info_to["projs"] = [ + make_eeg_average_ref_proj(info_to, verbose=False) + ] + mapping = _map_meg_or_eeg_channels( + info_from, info_to, mode=mode, origin=origin + ) + self.data[..., picks, :] = np.matmul(mapping, self.data[..., picks, :]) + return self + + +def _proj_equal(a, b, check_active=True): + """Test if two projectors are equal.""" + equal = ( + (a["active"] == b["active"] or not check_active) + and a["kind"] == b["kind"] + and a["desc"] == b["desc"] + and a["data"]["col_names"] == b["data"]["col_names"] + and a["data"]["row_names"] == b["data"]["row_names"] + and a["data"]["ncol"] == b["data"]["ncol"] + and a["data"]["nrow"] == b["data"]["nrow"] + and np.all(a["data"]["data"] == b["data"]["data"]) + ) + return equal + + +@verbose +def _read_proj(fid, node, *, ch_names_mapping=None, verbose=None): + ch_names_mapping = {} if ch_names_mapping is None else ch_names_mapping + projs = list() + + # Locate the projection data + nodes = dir_tree_find(node, FIFF.FIFFB_PROJ) + if len(nodes) == 0: + return projs + + # This might exist but we won't use it: + # global_nchan = None + # tag = find_tag(fid, nodes[0], FIFF.FIFF_NCHAN) + # if tag is not None: + # global_nchan = int(tag.data.item()) + + items = dir_tree_find(nodes[0], FIFF.FIFFB_PROJ_ITEM) + for item in items: + # Find all desired tags in one item + + # This probably also exists but used to be written incorrectly + # sometimes + # tag = find_tag(fid, item, FIFF.FIFF_NCHAN) + # if tag is not None: + # nchan = int(tag.data.item()) + # else: + # nchan = global_nchan + + tag = find_tag(fid, item, FIFF.FIFF_DESCRIPTION) + if tag is not None: + desc = tag.data + else: + tag = find_tag(fid, item, FIFF.FIFF_NAME) + if tag is not None: + desc = tag.data + else: + raise ValueError("Projection item description missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_KIND) + if tag is not None: + kind = int(tag.data.item()) + else: + raise ValueError("Projection item kind missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_NVEC) + if tag is not None: + nvec = int(tag.data.item()) + else: + raise ValueError("Number of projection vectors not specified") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST) + if tag is not None: + names = _safe_name_list(tag.data, "read", "names") + else: + raise ValueError("Projection item channel list missing") + + tag = find_tag(fid, item, FIFF.FIFF_PROJ_ITEM_VECTORS) + if tag is not None: + data = tag.data + else: + raise ValueError("Projection item data missing") + + tag = find_tag(fid, item, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE) + if tag is not None: + active = bool(tag.data.item()) + else: + active = False + + tag = find_tag(fid, item, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR) + if tag is not None: + explained_var = float(tag.data.item()) + else: + explained_var = None + + # handle the case when data is transposed for some reason + if data.shape[0] == len(names) and data.shape[1] == nvec: + data = data.T + + if data.shape[1] != len(names): + raise ValueError( + "Number of channel names does not match the size of data matrix" + ) + + # just always use this, we used to have bugs with writing the + # number correctly... + nchan = len(names) + names[:] = _rename_list(names, ch_names_mapping) + # Use exactly the same fields in data as in a named matrix + one = Projection( + kind=kind, + active=active, + desc=desc, + data=dict( + nrow=nvec, ncol=nchan, row_names=None, col_names=names, data=data + ), + explained_var=explained_var, + ) + + projs.append(one) + + if len(projs) > 0: + logger.info(f" Read a total of {len(projs)} projection items:") + for proj in projs: + misc = "active" if proj["active"] else " idle" + logger.info( + f' {proj["desc"]} ' + f'({proj["data"]["nrow"]} x ' + f'{len(proj["data"]["col_names"])}) {misc}' + ) + + return projs + + +############################################################################### +# Write + + +def _write_proj(fid, projs, *, ch_names_mapping=None): + """Write a projection operator to a file. + + Parameters + ---------- + fid : file + The file descriptor of the open file. + projs : dict + The projection operator. + """ + if len(projs) == 0: + return + + ch_names_mapping = dict() if ch_names_mapping is None else ch_names_mapping + # validation + _validate_type(projs, (list, tuple), "projs") + for pi, proj in enumerate(projs): + _validate_type(proj, Projection, f"projs[{pi}]") + + start_block(fid, FIFF.FIFFB_PROJ) + + for proj in projs: + start_block(fid, FIFF.FIFFB_PROJ_ITEM) + write_int(fid, FIFF.FIFF_NCHAN, len(proj["data"]["col_names"])) + names = _rename_list(proj["data"]["col_names"], ch_names_mapping) + write_name_list_sanitized( + fid, FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST, names, "col_names" + ) + write_string(fid, FIFF.FIFF_NAME, proj["desc"]) + write_int(fid, FIFF.FIFF_PROJ_ITEM_KIND, proj["kind"]) + if proj["kind"] == FIFF.FIFFV_PROJ_ITEM_FIELD: + write_float(fid, FIFF.FIFF_PROJ_ITEM_TIME, 0.0) + + write_int(fid, FIFF.FIFF_PROJ_ITEM_NVEC, proj["data"]["nrow"]) + write_int(fid, FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE, proj["active"]) + write_float_matrix(fid, FIFF.FIFF_PROJ_ITEM_VECTORS, proj["data"]["data"]) + if proj["explained_var"] is not None: + write_float(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR, proj["explained_var"]) + end_block(fid, FIFF.FIFFB_PROJ_ITEM) + + end_block(fid, FIFF.FIFFB_PROJ) + + +############################################################################### +# Utils + + +def _check_projs(projs, copy=True): + """Check that projs is a list of Projection.""" + _validate_type(projs, (list, tuple), "projs") + for pi, p in enumerate(projs): + _validate_type(p, Projection, f"projs[{pi}]") + return deepcopy(projs) if copy else projs + + +def make_projector(projs, ch_names, bads=(), include_active=True): + """Create an SSP operator from SSP projection vectors. + + Parameters + ---------- + projs : list + List of projection vectors. + ch_names : list of str + List of channels to include in the projection matrix. + bads : list of str + Some bad channels to exclude. If bad channels were marked + in the raw file when projs were calculated using mne-python, + they should not need to be included here as they will + have been automatically omitted from the projectors. + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + U : array + The orthogonal basis of the projection vectors. + """ + return _make_projector(projs, ch_names, bads, include_active) + + +def _make_projector(projs, ch_names, bads=(), include_active=True, inplace=False): + """Subselect projs based on ch_names and bads. + + Use inplace=True mode to modify ``projs`` inplace so that no + warning will be raised next time projectors are constructed with + the given inputs. If inplace=True, no meaningful data are returned. + """ + nchan = len(ch_names) + if nchan == 0: + raise ValueError("No channel names specified") + + default_return = (np.eye(nchan, nchan), 0, np.empty((nchan, 0))) + + # Check trivial cases first + if projs is None: + return default_return + + nvec = 0 + nproj = 0 + for p in projs: + if not p["active"] or include_active: + nproj += 1 + nvec += p["data"]["nrow"] + + if nproj == 0: + return default_return + + # Pick the appropriate entries + vecs = np.zeros((nchan, nvec)) + nvec = 0 + nonzero = 0 + bads = set(bads) + for k, p in enumerate(projs): + if not p["active"] or include_active: + if len(p["data"]["col_names"]) != len(np.unique(p["data"]["col_names"])): + raise ValueError( + f"Channel name list in projection item {k}" + " contains duplicate items" + ) + + # Get the two selection vectors to pick correct elements from + # the projection vectors omitting bad channels + sel = [] + vecsel = [] + p_set = set(p["data"]["col_names"]) # faster membership access + for c, name in enumerate(ch_names): + if name not in bads and name in p_set: + sel.append(c) + vecsel.append(p["data"]["col_names"].index(name)) + + # If there is something to pick, pickit + nrow = p["data"]["nrow"] + this_vecs = vecs[:, nvec : nvec + nrow] + if len(sel) > 0: + this_vecs[sel] = p["data"]["data"][:, vecsel].T + + # Rescale for better detection of small singular values + for v in range(p["data"]["nrow"]): + psize = np.linalg.norm(this_vecs[:, v]) + if psize > 0: + orig_n = p["data"]["data"].any(axis=0).sum() + # Average ref still works if channels are removed + # Use relative power to determine if we're in trouble. + # 10% loss is hopefully a reasonable threshold. + if ( + psize < 0.9 + and not inplace + and ( + p["kind"] != FIFF.FIFFV_PROJ_ITEM_EEG_AVREF + or len(vecsel) == 1 + ) + ): + warn( + f'Projection vector {repr(p["desc"])} has been ' + f"reduced to {100 * psize:0.2f}% of its " + "original magnitude by subselecting " + f"{len(vecsel)}/{orig_n} of the original " + "channels. If the ignored channels were bad " + "during SSP computation, we recommend " + "recomputing proj (via compute_proj_raw " + "or related functions) with the bad channels " + "properly marked, because computing SSP with bad " + "channels present in the data but unmarked is " + "dangerous (it can bias the PCA used by SSP). " + "On the other hand, if you know that all channels " + "were good during SSP computation, you can safely " + "use info.normalize_proj() to suppress this " + "warning during projection." + ) + this_vecs[:, v] /= psize + nonzero += 1 + # If doing "inplace" mode, "fix" the projectors to only operate + # on this subset of channels. + if inplace: + p["data"]["data"] = this_vecs[sel].T + p["data"]["col_names"] = [p["data"]["col_names"][ii] for ii in vecsel] + p["data"]["ncol"] = len(p["data"]["col_names"]) + nvec += p["data"]["nrow"] + + # Check whether all of the vectors are exactly zero + if nonzero == 0 or inplace: + return default_return + + # Reorthogonalize the vectors + U, S, _ = _safe_svd(vecs[:, :nvec], full_matrices=False) + + # Throw away the linearly dependent guys + nproj = np.sum((S / S[0]) > 1e-2) + U = U[:, :nproj] + + # Here is the celebrated result + proj = np.eye(nchan, nchan) - np.dot(U, U.T) + if nproj >= nchan: # e.g., 3 channels and 3 projectors + raise RuntimeError( + f"Application of {nproj} projectors for {nchan} channels " + "will yield no components." + ) + + return proj, nproj, U + + +def _normalize_proj(info): + """Normalize proj after subselection to avoid warnings. + + This is really only useful for tests, and might not be needed + eventually if we change or improve our handling of projectors + with picks. + """ + # Here we do info.get b/c info can actually be a noise cov + _make_projector( + info["projs"], + info.get("ch_names", info.get("names")), + info["bads"], + include_active=True, + inplace=True, + ) + + +@fill_doc +def make_projector_info(info, include_active=True): + """Make an SSP operator using the measurement info. + + Calls make_projector on good channels. + + Parameters + ---------- + %(info_not_none)s + include_active : bool + Also include projectors that are already active. + + Returns + ------- + proj : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + nproj : int + How many items in the projector. + """ + proj, nproj, _ = make_projector( + info["projs"], info["ch_names"], info["bads"], include_active + ) + return proj, nproj + + +@verbose +def activate_proj(projs, copy=True, verbose=None): + """Set all projections to active. + + Useful before passing them to make_projector. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Activate the projection items + for proj in projs: + proj["active"] = True + + logger.info(f"{len(projs)} projection items activated") + + return projs + + +@verbose +def deactivate_proj(projs, copy=True, verbose=None): + """Set all projections to inactive. + + Useful before saving raw data without projectors applied. + + Parameters + ---------- + projs : list + The projectors. + copy : bool + Modify projs in place or operate on a copy. + %(verbose)s + + Returns + ------- + projs : list + The projectors. + """ + if copy: + projs = deepcopy(projs) + + # Deactivate the projection items + for proj in projs: + proj["active"] = False + + logger.info(f"{len(projs)} projection items deactivated") + + return projs + + +# Keep in sync with doc below +_EEG_AVREF_PICK_DICT = {k: True for k in _ELECTRODE_CH_TYPES} + + +@verbose +def make_eeg_average_ref_proj(info, activate=True, *, ch_type="eeg", verbose=None): + """Create an EEG average reference SSP projection vector. + + Parameters + ---------- + %(info_not_none)s + activate : bool + If True projections are activated. + ch_type : str + The channel type to use for reference projection. + Valid types are ``'eeg'``, ``'ecog'``, ``'seeg'`` and ``'dbs'``. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + proj: instance of Projection + The SSP/PCA projector. + """ + if info.get("custom_ref_applied", False): + raise RuntimeError( + "A custom reference has been applied to the " + "data earlier. Please use the " + "mne.io.set_eeg_reference function to move from " + "one EEG reference to another." + ) + + _validate_type(ch_type, (list, tuple, str), "ch_type") + singleton = False + if isinstance(ch_type, str): + ch_type = [ch_type] + singleton = True + for ci, this_ch_type in enumerate(ch_type): + _check_option( + "ch_type" + ("" if singleton else f"[{ci}]"), + this_ch_type, + list(_EEG_AVREF_PICK_DICT), + ) + + ch_type_name = "/".join(c.upper() for c in ch_type) + logger.info(f"Adding average {ch_type_name} reference projection.") + + ch_dict = {c: True for c in ch_type} + for c in ch_type: + one_picks = pick_types(info, exclude="bads", **{c: True}) + if len(one_picks) == 0: + raise ValueError( + f"Cannot create {ch_type_name} average reference " + f"projector (no {c.upper()} data found)" + ) + del ch_type + ch_sel = pick_types(info, **ch_dict, exclude="bads") + ch_names = info["ch_names"] + ch_names = [ch_names[k] for k in ch_sel] + n_chs = len(ch_sel) + vec = np.ones((1, n_chs)) + vec /= np.sqrt(n_chs) + explained_var = None + proj_data = dict(col_names=ch_names, row_names=None, data=vec, nrow=1, ncol=n_chs) + proj = Projection( + active=activate, + data=proj_data, + explained_var=explained_var, + desc=f"Average {ch_type_name} reference", + kind=FIFF.FIFFV_PROJ_ITEM_EEG_AVREF, + ) + return proj + + +@verbose +def _has_eeg_average_ref_proj( + info, *, projs=None, check_active=False, ch_type=None, verbose=None +): + """Determine if a list of projectors has an average EEG ref. + + Optionally, set check_active=True to additionally check if the CAR + has already been applied. + """ + from .meas_info import Info + + _validate_type(info, Info, "info") + projs = info.get("projs", []) if projs is None else projs + if ch_type is None: + pick_kwargs = _EEG_AVREF_PICK_DICT + else: + ch_type = [ch_type] if isinstance(ch_type, str) else ch_type + pick_kwargs = {ch_type: True for ch_type in ch_type} + ch_type = "/".join(c.upper() for c in pick_kwargs) + want_names = [ + info["ch_names"][pick] + for pick in pick_types(info, exclude="bads", **pick_kwargs) + ] + if not want_names: + return False + found_names = list() + for proj in projs: + if proj["kind"] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF or re.match( + "^Average .* reference$", proj["desc"] + ): + if not check_active or proj["active"]: + found_names.extend(proj["data"]["col_names"]) + # If some are missing we have a problem (keep order for the message, + # otherwise we could use set logic) + missing = [name for name in want_names if name not in found_names] + if missing: + if found_names: # found some but not all: warn + warn(f"Incomplete {ch_type} projector, missing channel(s) {missing}") + return False + return True + + +def _needs_eeg_average_ref_proj(info): + """Determine if the EEG needs an averge EEG reference. + + This returns True if no custom reference has been applied and no average + reference projection is present in the list of projections. + """ + if info["custom_ref_applied"]: + return False + if not _electrode_types(info): + return False + if _has_eeg_average_ref_proj(info): + return False + return True + + +@verbose +def setup_proj( + info, add_eeg_ref=True, activate=True, *, eeg_ref_ch_type="eeg", verbose=None +): + """Set up projection for Raw and Epochs. + + Parameters + ---------- + %(info_not_none)s Warning: will be modified in-place. + add_eeg_ref : bool + If True, an EEG average reference will be added (unless one + already exists). + activate : bool + If True projections are activated. + eeg_ref_ch_type : str + The channel type to use for reference projection. + Valid types are 'eeg', 'ecog', 'seeg' and 'dbs'. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + projector : array of shape [n_channels, n_channels] + The projection operator to apply to the data. + info : mne.Info + The modified measurement info. + """ + # Add EEG ref reference proj if necessary + if add_eeg_ref and _needs_eeg_average_ref_proj(info): + eeg_proj = make_eeg_average_ref_proj( + info, activate=activate, ch_type=eeg_ref_ch_type + ) + info["projs"].append(eeg_proj) + + # Create the projector + projector, nproj = make_projector_info(info) + if nproj == 0: + if verbose: + logger.info("The projection vectors do not apply to these channels") + projector = None + else: + logger.info(f"Created an SSP operator (subspace dimension = {nproj})") + + # The projection items have been activated + if activate: + with info._unlock(): + info["projs"] = activate_proj(info["projs"], copy=False) + + return projector, info + + +def _uniquify_projs(projs, check_active=True, sort=True): + """Make unique projs.""" + final_projs = [] + for proj in projs: # flatten + if not any(_proj_equal(p, proj, check_active) for p in final_projs): + final_projs.append(proj) + + my_count = count(len(final_projs)) + + def sorter(x): + """Sort in a nice way.""" + digits = [s for s in x["desc"] if s.isdigit()] + if digits: + sort_idx = int(digits[-1]) + else: + sort_idx = next(my_count) + return (sort_idx, x["desc"]) + + return sorted(final_projs, key=sorter) if sort else final_projs diff --git a/mne/_fiff/reference.py b/mne/_fiff/reference.py new file mode 100644 index 0000000..e70bf5e --- /dev/null +++ b/mne/_fiff/reference.py @@ -0,0 +1,738 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..defaults import DEFAULTS +from ..utils import ( + _check_option, + _check_preload, + _on_missing, + _validate_type, + fill_doc, + logger, + pinv, + verbose, + warn, +) +from .constants import FIFF +from .meas_info import _check_ch_keys +from .pick import _ELECTRODE_CH_TYPES, pick_channels, pick_channels_forward, pick_types +from .proj import _has_eeg_average_ref_proj, make_eeg_average_ref_proj, setup_proj + + +def _check_before_reference(inst, ref_from, ref_to, ch_type): + """Prepare instance for referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + ch_type = _get_ch_type(inst, ch_type) + ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False} + eeg_idx = pick_types(inst.info, **ch_dict) + + if ref_to is None: + ref_to = [inst.ch_names[i] for i in eeg_idx] + extra = "EEG channels found" + else: + extra = "channels supplied" + if len(ref_to) == 0: + raise ValueError(f"No {extra} to apply the reference to") + + _check_ssp(inst, ref_from + ref_to) + + # If the reference touches EEG/ECoG/sEEG/DBS electrodes, note in the + # info that a non-CAR has been applied. + ref_to_channels = pick_channels(inst.ch_names, ref_to, ordered=True) + if len(np.intersect1d(ref_to_channels, eeg_idx)) > 0: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + + return ref_to + + +def _check_ssp(inst, ref_items): + """Check for SSPs that may block re-referencing.""" + projs_to_remove = [] + for i, proj in enumerate(inst.info["projs"]): + # Remove any average reference projections + if ( + proj["desc"] == "Average EEG reference" + or proj["kind"] == FIFF.FIFFV_PROJ_ITEM_EEG_AVREF + ): + logger.info("Removing existing average EEG reference projection.") + # Don't remove the projection right away, but do this at the end of + # this loop. + projs_to_remove.append(i) + + # Inactive SSPs may block re-referencing + elif ( + not proj["active"] + and len([ch for ch in ref_items if ch in proj["data"]["col_names"]]) > 0 + ): + raise RuntimeError( + "Inactive signal space projection (SSP) operators are " + "present that operate on sensors involved in the desired " + "referencing scheme. These projectors need to be applied " + "using the apply_proj() method function before the desired " + "reference can be set." + ) + + for i in projs_to_remove: + del inst.info["projs"][i] + + # Need to call setup_proj after changing the projs: + inst._projector, _ = setup_proj(inst.info, add_eeg_ref=False, activate=False) + + +def _check_before_dict_reference(inst, ref_dict): + """Prepare instance for dict-based referencing.""" + # Check to see that data is preloaded + _check_preload(inst, "Applying a reference") + + # Promote all values to list-like. This simplifies our logic and also helps catch + # self-referencing cases like `{"Cz": ["Cz"]}` + _refdict = {k: [v] if isinstance(v, str) else list(v) for k, v in ref_dict.items()} + + # Check that keys are strings and values are lists-of-strings + key_types = {type(k) for k in _refdict} + value_types = {type(v) for val in _refdict.values() for v in val} + for elem_name, elem in dict(key=key_types, value=value_types).items(): + if bad_elem := elem - {str}: + raise TypeError( + f"{elem_name.capitalize()}s in the ref_channels dict must be strings. " + f"Your dict has {elem_name}s of type " + f'{", ".join(map(lambda x: x.__name__, bad_elem))}.' + ) + + # Check that keys are valid channels and values are lists-of-valid-channels + ch_set = set(inst.ch_names) + bad_ch_set = set(inst.info["bads"]) + keys = set(_refdict) + values = set(sum(_refdict.values(), [])) + for elem_name, elem in dict(key=keys, value=values).items(): + if bad_elem := elem - ch_set: + raise ValueError( + f'ref_channels dict contains invalid {elem_name}(s) ' + f'({", ".join(bad_elem)}) ' + "that are not names of channels in the instance." + ) + # Check that values are not bad channels + if bad_elem := elem.intersection(bad_ch_set): + warn( + f"ref_channels dict contains {elem_name}(s) " + f"({', '.join(bad_elem)}) " + "that are marked as bad channels." + ) + + _check_ssp(inst, keys.union(values)) + + # Check for self-referencing + self_ref = [[k] == v for k, v in _refdict.items()] + if any(self_ref): + which = np.array(list(_refdict))[np.nonzero(self_ref)] + for ch in which: + warn(f"Channel {ch} is self-referenced, which will nullify the channel.") + + # Check that channel types match. First unpack list-like vals into separate items: + pairs = [(k, v) for k in _refdict for v in _refdict[k]] + ch_type_map = dict(zip(inst.ch_names, inst.get_channel_types())) + mismatch = [ch_type_map[k] != ch_type_map[v] for k, v in pairs] + if any(mismatch): + mismatch_pairs = np.array(pairs)[mismatch] + for k, v in mismatch_pairs: + warn( + f"Channel {k} ({ch_type_map[k]}) is referenced to channel {v} which is " + f"a different channel type ({ch_type_map[v]})." + ) + + # convert channel names to indices + keys_ix = pick_channels(inst.ch_names, list(_refdict), ordered=True) + vals_ix = (pick_channels(inst.ch_names, v, ordered=True) for v in _refdict.values()) + return dict(zip(keys_ix, vals_ix)) + + +def _apply_reference(inst, ref_from, ref_to=None, forward=None, ch_type="auto"): + """Apply a custom EEG referencing scheme.""" + ref_to = _check_before_reference(inst, ref_from, ref_to, ch_type) + + # Compute reference + if len(ref_from) > 0: + # this is guaranteed below, but we should avoid the crazy pick_channels + # behavior that [] gives all. Also use ordered=True just to make sure + # that all supplied channels actually exist. + assert len(ref_to) > 0 + ref_names = ref_from + ref_from = pick_channels(inst.ch_names, ref_from, ordered=True) + ref_to = pick_channels(inst.ch_names, ref_to, ordered=True) + + data = inst._data + ref_data = data[..., ref_from, :].mean(-2, keepdims=True) + data[..., ref_to, :] -= ref_data + ref_data = ref_data[..., 0, :] + + # REST + if forward is not None: + # use ch_sel and the given forward + forward = pick_channels_forward(forward, ref_names, ordered=True) + # 1-3. Compute a forward (G) and avg-ref'ed data (done above) + G = forward["sol"]["data"] + assert G.shape[0] == len(ref_names) + # 4. Compute the forward (G) and average-reference it (Ga): + Ga = G - np.mean(G, axis=0, keepdims=True) + # 5. Compute the Ga_inv by SVD + Ga_inv = pinv(Ga, rtol=1e-6) + # 6. Compute Ra = (G @ Ga_inv) in eq (8) from G and Ga_inv + Ra = G @ Ga_inv + # 7-8. Compute Vp = Ra @ Va; then Vpa=average(Vp) + Vpa = np.mean(Ra @ data[..., ref_from, :], axis=-2, keepdims=True) + data[..., ref_to, :] += Vpa + else: + ref_data = None + + return inst, ref_data + + +def _apply_dict_reference(inst, ref_dict): + """Apply a dict-based custom EEG referencing scheme.""" + # this converts all keys to channel indices and all values to arrays of ch. indices: + ref_dict = _check_before_dict_reference(inst, ref_dict) + + data = inst._data + orig_data = data.copy() + for ref_to, ref_from in ref_dict.items(): + ref_data = orig_data[..., ref_from, :].mean(-2, keepdims=True) + data[..., [ref_to], :] -= ref_data + + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_ON + return inst, None + + +@fill_doc +def add_reference_channels(inst, ref_channels, copy=True): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during recording. + This is useful when you need to re-reference your data to different + channels. These added channels will consist of all zeros. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with added EEG reference channels. + + Notes + ----- + .. warning:: + When :ref:`re-referencing `, + make sure to apply the montage using :meth:`mne.io.Raw.set_montage` + only after calling this function. Applying a montage will only set + locations of channels that exist at the time it is applied. + """ + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + # Check to see that data is preloaded + _check_preload(inst, "add_reference_channels") + _validate_type(ref_channels, (list, tuple, str), "ref_channels") + if isinstance(ref_channels, str): + ref_channels = [ref_channels] + for ch in ref_channels: + if ch in inst.info["ch_names"]: + raise ValueError(f"Channel {ch} already specified in inst.") + + # Once CAR is applied (active), don't allow adding channels + if _has_eeg_average_ref_proj(inst.info, check_active=True): + raise RuntimeError("Average reference already applied to data.") + + if copy: + inst = inst.copy() + + if isinstance(inst, BaseRaw | Evoked): + data = inst._data + refs = np.zeros((len(ref_channels), data.shape[1])) + data = np.vstack((data, refs)) + inst._data = data + elif isinstance(inst, BaseEpochs): + data = inst._data + x, y, z = data.shape + refs = np.zeros((x * len(ref_channels), z)) + data = np.vstack((data.reshape((x * y, z), order="F"), refs)) + data = data.reshape(x, y + len(ref_channels), z, order="F") + inst._data = data + else: + raise TypeError( + f"inst should be Raw, Epochs, or Evoked instead of {type(inst)}." + ) + nchan = len(inst.info["ch_names"]) + + if inst.info.get("dig", None) is not None: + # A montage has been set. Try to infer location of reference channels. + # "zeroth" EEG electrode dig points is reference + ref_dig_loc = [ + dl + for dl in inst.info["dig"] + if (dl["kind"] == FIFF.FIFFV_POINT_EEG and dl["ident"] == 0) + ] + if len(ref_channels) > 1 or len(ref_dig_loc) != len(ref_channels): + ref_dig_array = np.full(12, np.nan) + warn( + "Location for this channel is unknown or ambiguous; consider calling " + "set_montage() after adding new reference channels if needed. " + "Applying a montage will only set locations of channels that " + "exist at the time it is applied." + ) + else: # n_ref_channels == 1 and a single ref digitization exists + ref_dig_array = np.concatenate( + (ref_dig_loc[0]["r"], ref_dig_loc[0]["r"], np.zeros(6)) + ) + # Replace the (possibly new) Ref location for each channel + for idx in pick_types(inst.info, meg=False, eeg=True, exclude=[]): + inst.info["chs"][idx]["loc"][3:6] = ref_dig_loc[0]["r"] + else: + # If no montage has ever been set, we cannot even try to infer a location. + ref_dig_array = np.full(12, np.nan) + + for ch in ref_channels: + chan_info = { + "ch_name": ch, + "coil_type": FIFF.FIFFV_COIL_EEG, + "kind": FIFF.FIFFV_EEG_CH, + "logno": nchan + 1, + "scanno": nchan + 1, + "cal": 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "unit": FIFF.FIFF_UNIT_V, + "coord_frame": FIFF.FIFFV_COORD_HEAD, + "loc": ref_dig_array, + } + inst.info["chs"].append(chan_info) + inst.info._update_redundant() + range_ = np.arange(1, len(ref_channels) + 1) + if isinstance(inst, BaseRaw): + inst._cals = np.hstack((inst._cals, [1] * len(ref_channels))) + for pi, picks in enumerate(inst._read_picks): + inst._read_picks[pi] = np.concatenate([picks, np.max(picks) + range_]) + elif isinstance(inst, BaseEpochs): + picks = inst.picks + inst.picks = np.concatenate([picks, np.max(picks) + range_]) + inst.info._check_consistency() + set_eeg_reference(inst, ref_channels=ref_channels, copy=False, verbose=False) + return inst + + +_ref_dict = { + FIFF.FIFFV_MNE_CUSTOM_REF_ON: "on", + FIFF.FIFFV_MNE_CUSTOM_REF_OFF: "off", + FIFF.FIFFV_MNE_CUSTOM_REF_CSD: "CSD", +} + + +def _check_can_reref(inst): + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "Instance") + current_custom = inst.info["custom_ref_applied"] + if current_custom not in ( + FIFF.FIFFV_MNE_CUSTOM_REF_ON, + FIFF.FIFFV_MNE_CUSTOM_REF_OFF, + ): + raise RuntimeError( + "Cannot set new reference on data with custom reference type " + f"{_ref_dict[current_custom]!r}" + ) + + +@verbose +def set_eeg_reference( + inst, + ref_channels="average", + copy=True, + projection=False, + ch_type="auto", + forward=None, + *, + joint=False, + verbose=None, +): + """Specify which reference to use for EEG data. + + Use this function to explicitly specify the desired reference for EEG. + This can be either an existing electrode or a new virtual channel. + This function will re-reference the data according to the desired + reference. + + Note that it is also possible to re-reference the signal using a + Laplacian (LAP) "reference-free" transformation using the + :func:`.compute_current_source_density` function. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Instance of Raw or Epochs with EEG channels and reference channel(s). + %(ref_channels_set_eeg_reference)s + copy : bool + Specifies whether the data will be copied (True) or modified in-place + (False). Defaults to True. + %(projection_set_eeg_reference)s + %(ch_type_set_eeg_reference)s + %(forward_set_eeg_reference)s + %(joint_set_eeg_reference)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with EEG channels re-referenced. If ``ref_channels="average"`` and + ``projection=True`` a projection will be added instead of directly + re-referencing the data. + ref_data : array + Array of reference data subtracted from EEG channels. This will be + ``None`` if ``projection=True``, or if ``ref_channels`` is ``"REST"`` or a + :class:`dict`. + %(set_eeg_reference_see_also_notes)s + """ + from ..forward import Forward + + _check_can_reref(inst) + + if isinstance(ref_channels, dict): + logger.info("Applying a custom dict-based reference.") + return _apply_dict_reference(inst, ref_channels) + + ch_type = _get_ch_type(inst, ch_type) + + if projection: # average reference projector + if ref_channels != "average": + raise ValueError( + 'Setting projection=True is only supported for ref_channels="average", ' + f"got {ref_channels!r}." + ) + # We need verbose='error' here in case we add projs sequentially + if _has_eeg_average_ref_proj(inst.info, ch_type=ch_type, verbose="error"): + warn( + "An average reference projection was already added. The data " + "has been left untouched." + ) + else: + # Creating an average reference may fail. In this case, make + # sure that the custom_ref_applied flag is left untouched. + custom_ref_applied = inst.info["custom_ref_applied"] + + try: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = FIFF.FIFFV_MNE_CUSTOM_REF_OFF + if joint: + inst.add_proj( + make_eeg_average_ref_proj( + inst.info, ch_type=ch_type, activate=False + ) + ) + else: + for this_ch_type in ch_type: + inst.add_proj( + make_eeg_average_ref_proj( + inst.info, ch_type=this_ch_type, activate=False + ) + ) + except Exception: + with inst.info._unlock(): + inst.info["custom_ref_applied"] = custom_ref_applied + raise + # If the data has been preloaded, projections will no + # longer be automatically applied. + if inst.preload: + logger.info( + "Average reference projection was added, " + "but has not been applied yet. Use the " + "apply_proj method to apply it." + ) + return inst, None + del projection # not used anymore + + inst = inst.copy() if copy else inst + ch_dict = {**{type_: True for type_ in ch_type}, "meg": False, "ref_meg": False} + ch_sel = [inst.ch_names[i] for i in pick_types(inst.info, **ch_dict)] + + if ref_channels == "REST": + _validate_type(forward, Forward, 'forward when ref_channels="REST"') + else: + forward = None # signal to _apply_reference not to do REST + + if ref_channels in ("average", "REST"): + logger.info(f"Applying {ref_channels} reference.") + ref_channels = ch_sel + + if ref_channels == []: + logger.info("EEG data marked as already having the desired reference.") + else: + logger.info( + "Applying a custom " + f"{tuple(DEFAULTS['titles'][type_] for type_ in ch_type)} " + "reference." + ) + + return _apply_reference(inst, ref_channels, ch_sel, forward, ch_type=ch_type) + + +def _get_ch_type(inst, ch_type): + _validate_type(ch_type, (str, list, tuple), "ch_type") + valid_ch_types = ("auto",) + _ELECTRODE_CH_TYPES + if isinstance(ch_type, str): + _check_option("ch_type", ch_type, valid_ch_types) + if ch_type != "auto": + ch_type = [ch_type] + elif isinstance(ch_type, list | tuple): + for type_ in ch_type: + _validate_type(type_, str, "ch_type") + _check_option("ch_type", type_, valid_ch_types[1:]) + ch_type = list(ch_type) + + # if ch_type is 'auto', search through list to find first reasonable + # reference-able channel type. + if ch_type == "auto": + for type_ in _ELECTRODE_CH_TYPES: + if type_ in inst: + ch_type = [type_] + logger.info( + f"{DEFAULTS['titles'][type_]} channel type selected for " + "re-referencing" + ) + break + # if auto comes up empty, or the user specifies a bad ch_type. + else: + raise ValueError("No EEG, ECoG, sEEG or DBS channels found to rereference.") + return ch_type + + +@verbose +def set_bipolar_reference( + inst, + anode, + cathode, + ch_name=None, + ch_info=None, + drop_refs=True, + copy=True, + on_bad="warn", + verbose=None, +): + """Re-reference selected channels using a bipolar referencing scheme. + + A bipolar reference takes the difference between two channels (the anode + minus the cathode) and adds it as a new virtual channel. The original + channels will be dropped by default. + + Multiple anodes and cathodes can be specified, in which case multiple + virtual channels will be created. The 1st cathode will be subtracted + from the 1st anode, the 2nd cathode from the 2nd anode, etc. + + By default, the virtual channels will be annotated with channel-info and + -location of the anodes and coil types will be set to EEG_BIPOLAR. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + Data containing the unreferenced channels. + anode : str | list of str + The name(s) of the channel(s) to use as anode in the bipolar reference. + cathode : str | list of str + The name(s) of the channel(s) to use as cathode in the bipolar + reference. + ch_name : str | list of str | None + The channel name(s) for the virtual channel(s) containing the resulting + signal. By default, bipolar channels are named after the anode and + cathode, but it is recommended to supply a more meaningful name. + ch_info : dict | list of dict | None + This parameter can be used to supply a dictionary (or a dictionary for + each bipolar channel) containing channel information to merge in, + overwriting the default values. Defaults to None. + drop_refs : bool + Whether to drop the anode/cathode channels from the instance. + copy : bool + Whether to operate on a copy of the data (True) or modify it in-place + (False). Defaults to True. + on_bad : str + If a bipolar channel is created from a bad anode or a bad cathode, mne + warns if on_bad="warns", raises ValueError if on_bad="raise", and does + nothing if on_bad="ignore". For "warn" and "ignore", the new bipolar + channel will be marked as bad. Defaults to on_bad="warns". + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with the specified channels re-referenced. + + See Also + -------- + set_eeg_reference : Convenience function for creating an EEG reference. + + Notes + ----- + 1. If the anodes contain any EEG channels, this function removes + any pre-existing average reference projections. + + 2. During source localization, the EEG signal should have an average + reference. + + 3. The data must be preloaded. + + .. versionadded:: 0.9.0 + """ + from ..epochs import BaseEpochs, EpochsArray + from ..evoked import EvokedArray + from ..io import BaseRaw, RawArray + from .meas_info import create_info + + _check_can_reref(inst) + if not isinstance(anode, list): + anode = [anode] + + if not isinstance(cathode, list): + cathode = [cathode] + + if len(anode) != len(cathode): + raise ValueError( + f"Number of anodes (got {len(anode)}) must equal the number " + f"of cathodes (got {len(cathode)})." + ) + + if ch_name is None: + ch_name = [f"{a}-{c}" for (a, c) in zip(anode, cathode)] + elif not isinstance(ch_name, list): + ch_name = [ch_name] + if len(ch_name) != len(anode): + raise ValueError( + "Number of channel names must equal the number of " + f"anodes/cathodes (got {len(ch_name)})." + ) + + # Check for duplicate channel names (it is allowed to give the name of the + # anode or cathode channel, as they will be replaced). + for ch, a, c in zip(ch_name, anode, cathode): + if ch not in [a, c] and ch in inst.ch_names: + raise ValueError( + f'There is already a channel named "{ch}", please ' + "specify a different name for the bipolar " + "channel using the ch_name parameter." + ) + + if ch_info is None: + ch_info = [{} for _ in anode] + elif not isinstance(ch_info, list): + ch_info = [ch_info] + if len(ch_info) != len(anode): + raise ValueError( + "Number of channel info dictionaries must equal the " + "number of anodes/cathodes." + ) + + if copy: + inst = inst.copy() + + anode = _check_before_reference( + inst, ref_from=cathode, ref_to=anode, ch_type="auto" + ) + + # Create bipolar reference channels by multiplying the data + # (channels x time) with a matrix (n_virtual_channels x channels) + # and add them to the instance. + multiplier = np.zeros((len(anode), len(inst.ch_names))) + for idx, (a, c) in enumerate(zip(anode, cathode)): + multiplier[idx, inst.ch_names.index(a)] = 1 + multiplier[idx, inst.ch_names.index(c)] = -1 + + ref_info = create_info( + ch_names=ch_name, + sfreq=inst.info["sfreq"], + ch_types=inst.get_channel_types(picks=anode), + ) + + # Update "chs" in Reference-Info. + for ch_idx, (an, info) in enumerate(zip(anode, ch_info)): + _check_ch_keys(info, ch_idx, name="ch_info", check_min=False) + an_idx = inst.ch_names.index(an) + # Copy everything from anode (except ch_name). + an_chs = {k: v for k, v in inst.info["chs"][an_idx].items() if k != "ch_name"} + ref_info["chs"][ch_idx].update(an_chs) + # Set coil-type to bipolar. + ref_info["chs"][ch_idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + # Update with info from ch_info-parameter. + ref_info["chs"][ch_idx].update(info) + + # Set other info-keys from original instance. + pick_info = { + k: v + for k, v in inst.info.items() + if k not in ["chs", "ch_names", "bads", "nchan", "sfreq"] + } + + with ref_info._unlock(): + ref_info.update(pick_info) + + # Rereferencing of data. + ref_data = multiplier @ inst._data + + if isinstance(inst, BaseRaw): + ref_inst = RawArray(ref_data, ref_info, first_samp=inst.first_samp, copy=None) + elif isinstance(inst, BaseEpochs): + ref_inst = EpochsArray( + ref_data, + ref_info, + events=inst.events, + tmin=inst.tmin, + event_id=inst.event_id, + metadata=inst.metadata, + ) + else: + ref_inst = EvokedArray( + ref_data, + ref_info, + tmin=inst.tmin, + comment=inst.comment, + nave=inst.nave, + kind="average", + ) + + # Add referenced instance to original instance. + inst.add_channels([ref_inst], force_update_info=True) + + # Handle bad channels. + bad_bipolar_chs = [] + for ch_idx, (a, c) in enumerate(zip(anode, cathode)): + if a in inst.info["bads"] or c in inst.info["bads"]: + bad_bipolar_chs.append(ch_name[ch_idx]) + + # Add warnings if bad channels are present. + if bad_bipolar_chs: + msg = f"Bipolar channels are based on bad channels: {bad_bipolar_chs}." + _on_missing(on_bad, msg) + inst.info["bads"] += bad_bipolar_chs + + added_channels = ", ".join([name for name in ch_name]) + logger.info(f"Added the following bipolar channels:\n{added_channels}") + + for attr_name in ["picks", "_projector"]: + setattr(inst, attr_name, None) + + # Drop remaining channels. + if drop_refs: + drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names)) + inst.drop_channels(drop_channels) + + return inst diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py new file mode 100644 index 0000000..abc7d32 --- /dev/null +++ b/mne/_fiff/tag.py @@ -0,0 +1,523 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import html +import re +import struct +from dataclasses import dataclass +from functools import partial +from typing import Any + +import numpy as np +from scipy.sparse import csc_array, csr_array + +from ..utils import _check_option, warn +from ..utils.numerics import _julian_to_date +from .constants import ( + FIFF, + _ch_coil_type_named, + _ch_kind_named, + _ch_unit_mul_named, + _ch_unit_named, + _dig_cardinal_named, + _dig_kind_named, +) + +############################################################################## +# HELPERS + + +@dataclass +class Tag: + """Tag in FIF tree structure.""" + + kind: int + type: int + size: int + next: int + pos: int + data: Any = None + + def __eq__(self, tag): # noqa: D105 + return int( + self.kind == tag.kind + and self.type == tag.type + and self.size == tag.size + and self.next == tag.next + and self.pos == tag.pos + and self.data == tag.data + ) + + @property + def next_pos(self): + """The next tag position.""" + if self.next == FIFF.FIFFV_NEXT_SEQ: # 0 + return self.pos + 16 + self.size + elif self.next > 0: + return self.next + else: # self.next should be -1 if we get here + return None # safest to return None so that things like fid.seek die + + +def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None): + """Get a range of rows from a large tag.""" + if shape is not None: + item_size = np.dtype(dtype).itemsize + if not len(shape) == 2: + raise ValueError("Only implemented for 2D matrices") + want_shape = np.prod(shape) + have_shape = tag_size // item_size + if want_shape != have_shape: + raise ValueError( + f"Wrong shape specified, requested {want_shape} but got " + f"{have_shape}" + ) + if not len(rlims) == 2: + raise ValueError("rlims must have two elements") + n_row_out = rlims[1] - rlims[0] + if n_row_out <= 0: + raise ValueError("rlims must yield at least one output") + row_size = item_size * shape[1] + # # of bytes to skip at the beginning, # to read, where to end + start_skip = int(rlims[0] * row_size) + read_size = int(n_row_out * row_size) + end_pos = int(fid.tell() + tag_size) + # Move the pointer ahead to the read point + fid.seek(start_skip, 1) + # Do the reading + out = np.frombuffer(fid.read(read_size), dtype=dtype) + # Move the pointer ahead to the end of the tag + fid.seek(end_pos) + else: + out = np.frombuffer(fid.read(tag_size), dtype=dtype) + return out + + +def _loc_to_coil_trans(loc): + """Convert loc vector to coil_trans.""" + assert loc.shape[-1] == 12 + coil_trans = np.zeros(loc.shape[:-1] + (4, 4)) + coil_trans[..., :3, 3] = loc[..., :3] + coil_trans[..., :3, :3] = np.reshape( + loc[..., 3:], loc.shape[:-1] + (3, 3) + ).swapaxes(-1, -2) + coil_trans[..., -1, -1] = 1.0 + return coil_trans + + +def _coil_trans_to_loc(coil_trans): + """Convert coil_trans to loc.""" + coil_trans = coil_trans.astype(np.float64) + return np.roll(coil_trans.T[:, :3], 1, 0).flatten() + + +def _loc_to_eeg_loc(loc): + """Convert a loc to an EEG loc.""" + if not np.isfinite(loc[:3]).all(): + raise RuntimeError("Missing EEG channel location") + if np.isfinite(loc[3:6]).all() and (loc[3:6]).any(): + return np.array([loc[0:3], loc[3:6]]).T + else: + return loc[0:3][:, np.newaxis].copy() + + +############################################################################## +# READING FUNCTIONS + +# None of these functions have docstring because it's more compact that way, +# and hopefully it's clear what they do by their names and variable values. +# See ``read_tag`` for variable descriptions. Return values are implied +# by the function names. + + +def _read_tag_header(fid, pos): + """Read only the header of a Tag.""" + fid.seek(pos, 0) + s = fid.read(16) + if len(s) != 16: + where = fid.tell() - len(s) + extra = f" in file {fid.name}" if hasattr(fid, "name") else "" + warn(f"Invalid tag with only {len(s)}/16 bytes at position {where}{extra}") + return None + # struct.unpack faster than np.frombuffer, saves ~10% of time some places + kind, type_, size, next_ = struct.unpack(">iIii", s) + return Tag(kind, type_, size, next_, pos) + + +def _read_matrix(fid, tag, shape, rlims): + """Read a matrix (dense or sparse) tag.""" + # This should be easy to implement (see _frombuffer_rows) + # if we need it, but for now, it's not... + if shape is not None or rlims is not None: + raise ValueError("Row reading not implemented for matrices yet") + + matrix_coding, matrix_type, bit, dtype = _matrix_info(tag) + + pos = tag.pos + 16 + fid.seek(pos + tag.size - 4, 0) + if matrix_coding == "dense": + # Find dimensions and return to the beginning of tag data + ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + fid.seek(-(ndim + 1) * 4, 1) + dims = np.frombuffer(fid.read(4 * ndim), dtype=">i4")[::-1] + # + # Back to where the data start + # + fid.seek(pos, 0) + + if ndim > 3: + raise Exception( + "Only 2 or 3-dimensional matrices are supported at this time" + ) + + data = fid.read(int(bit * dims.prod())) + data = np.frombuffer(data, dtype=dtype) + # Note: we need the non-conjugate transpose here + if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT: + data = data.view(">c8") + elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE: + data = data.view(">c16") + data.shape = dims + else: + # Find dimensions and return to the beginning of tag data + ndim = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + fid.seek(-(ndim + 2) * 4, 1) + dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype=">i4") + if ndim != 2: + raise Exception("Only two-dimensional matrices are supported at this time") + + # Back to where the data start + fid.seek(pos, 0) + nnz = int(dims[0]) + nrow = int(dims[1]) + ncol = int(dims[2]) + # We need to make a copy so that we can own the data, otherwise we get: + # _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, + # E ValueError: WRITEBACKIFCOPY base is read-only + data = np.frombuffer(fid.read(bit * nnz), dtype=dtype).astype(np.float32) + shape = (dims[1], dims[2]) + if matrix_coding == "sparse CCS": + tmp_indices = fid.read(4 * nnz) + indices = np.frombuffer(tmp_indices, dtype=">i4") + tmp_ptr = fid.read(4 * (ncol + 1)) + indptr = np.frombuffer(tmp_ptr, dtype=">i4") + swap = nrow + klass = csc_array + else: + assert matrix_coding == "sparse RCS", matrix_coding + tmp_indices = fid.read(4 * nnz) + indices = np.frombuffer(tmp_indices, dtype=">i4") + tmp_ptr = fid.read(4 * (nrow + 1)) + indptr = np.frombuffer(tmp_ptr, dtype=">i4") + swap = ncol + klass = csr_array + if indptr[-1] > len(indices) or np.any(indptr < 0): + # There was a bug in MNE-C that caused some data to be + # stored without byte swapping + indices = np.concatenate( + ( + np.frombuffer(tmp_indices[: 4 * (swap + 1)], dtype=">i4"), + np.frombuffer(tmp_indices[4 * (swap + 1) :], dtype="c8") + return d + + +def _read_complex_double(fid, tag, shape, rlims): + """Read complex double tag.""" + # data gets stored twice as large + if shape is not None: + shape = (shape[0], shape[1] * 2) + d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims) + d = d.view(">c16") + return d + + +def _read_id_struct(fid, tag, shape, rlims): + """Read ID struct tag.""" + return dict( + version=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + machid=np.frombuffer(fid.read(8), dtype=">i4"), + secs=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + usecs=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + ) + + +def _read_dig_point_struct(fid, tag, shape, rlims): + """Read dig point struct tag.""" + kind = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + kind = _dig_kind_named.get(kind, kind) + ident = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + if kind == FIFF.FIFFV_POINT_CARDINAL: + ident = _dig_cardinal_named.get(ident, ident) + return dict( + kind=kind, + ident=ident, + r=np.frombuffer(fid.read(12), dtype=">f4"), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + + +def _read_coord_trans_struct(fid, tag, shape, rlims): + """Read coord trans struct tag.""" + from ..transforms import Transform + + fro = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + to = int(np.frombuffer(fid.read(4), dtype=">i4").item()) + rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3) + move = np.frombuffer(fid.read(12), dtype=">f4") + trans = np.r_[np.c_[rot, move], np.array([[0], [0], [0], [1]]).T] + data = Transform(fro, to, trans) + fid.seek(48, 1) # Skip over the inverse transformation + return data + + +_ch_coord_dict = { + FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_ECOG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_SEEG_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_DBS_CH: FIFF.FIFFV_COORD_HEAD, + FIFF.FIFFV_FNIRS_CH: FIFF.FIFFV_COORD_HEAD, +} + + +def _read_ch_info_struct(fid, tag, shape, rlims): + """Read channel info struct tag.""" + d = dict( + scanno=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + logno=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + kind=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + range=float(np.frombuffer(fid.read(4), dtype=">f4").item()), + cal=float(np.frombuffer(fid.read(4), dtype=">f4").item()), + coil_type=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + # deal with really old OSX Anaconda bug by casting to float64 + loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64), + # unit and exponent + unit=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4").item()), + ) + # channel name + ch_name = np.frombuffer(fid.read(16), dtype=">c") + ch_name = ch_name[: np.argmax(ch_name == b"")].tobytes() + d["ch_name"] = ch_name.decode() + # coil coordinate system definition + _update_ch_info_named(d) + return d + + +def _update_ch_info_named(d): + d["coord_frame"] = _ch_coord_dict.get(d["kind"], FIFF.FIFFV_COORD_UNKNOWN) + d["kind"] = _ch_kind_named.get(d["kind"], d["kind"]) + d["coil_type"] = _ch_coil_type_named.get(d["coil_type"], d["coil_type"]) + d["unit"] = _ch_unit_named.get(d["unit"], d["unit"]) + d["unit_mul"] = _ch_unit_mul_named.get(d["unit_mul"], d["unit_mul"]) + + +def _read_old_pack(fid, tag, shape, rlims): + """Read old pack tag.""" + offset = float(np.frombuffer(fid.read(4), dtype=">f4").item()) + scale = float(np.frombuffer(fid.read(4), dtype=">f4").item()) + data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2") + data = data * scale # to float64 + data += offset + return data + + +def _read_dir_entry_struct(fid, tag, shape, rlims): + """Read dir entry struct tag.""" + pos = tag.pos + 16 + entries = list() + for offset in range(1, tag.size // 16): + ent = _read_tag_header(fid, pos + offset * 16) + # The position of the real tag on disk is stored in the "next" entry within the + # directory, so we need to overwrite ent.pos. For safety let's also overwrite + # ent.next to point nowhere + ent.pos, ent.next = ent.next, FIFF.FIFFV_NEXT_NONE + entries.append(ent) + return entries + + +def _read_julian(fid, tag, shape, rlims): + """Read julian tag.""" + return _julian_to_date(int(np.frombuffer(fid.read(4), dtype=">i4").item())) + + +# Read types call dict +_call_dict = { + FIFF.FIFFT_STRING: _read_string, + FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float, + FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double, + FIFF.FIFFT_ID_STRUCT: _read_id_struct, + FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct, + FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct, + FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct, + FIFF.FIFFT_OLD_PACK: _read_old_pack, + FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct, + FIFF.FIFFT_JULIAN: _read_julian, +} +_call_dict_names = { + FIFF.FIFFT_STRING: "str", + FIFF.FIFFT_COMPLEX_FLOAT: "c8", + FIFF.FIFFT_COMPLEX_DOUBLE: "c16", + FIFF.FIFFT_ID_STRUCT: "ids", + FIFF.FIFFT_DIG_POINT_STRUCT: "dps", + FIFF.FIFFT_COORD_TRANS_STRUCT: "cts", + FIFF.FIFFT_CH_INFO_STRUCT: "cis", + FIFF.FIFFT_OLD_PACK: "op_", + FIFF.FIFFT_DIR_ENTRY_STRUCT: "dir", + FIFF.FIFFT_JULIAN: "jul", + FIFF.FIFFT_VOID: "nul", # 0 +} + +# Append the simple types +_simple_dict = { + FIFF.FIFFT_BYTE: ">B", + FIFF.FIFFT_SHORT: ">i2", + FIFF.FIFFT_INT: ">i4", + FIFF.FIFFT_USHORT: ">u2", + FIFF.FIFFT_UINT: ">u4", + FIFF.FIFFT_FLOAT: ">f4", + FIFF.FIFFT_DOUBLE: ">f8", + FIFF.FIFFT_DAU_PACK16: ">i2", +} +for key, dtype in _simple_dict.items(): + _call_dict[key] = partial(_read_simple, dtype=dtype) + _call_dict_names[key] = dtype + + +def read_tag(fid, pos, shape=None, rlims=None): + """Read a Tag from a file at a given position. + + Parameters + ---------- + fid : file + The open FIF file descriptor. + pos : int + The position of the Tag in the file. + shape : tuple | None + If tuple, the shape of the stored matrix. Only to be used with + data stored as a vector (not implemented for matrices yet). + rlims : tuple | None + If tuple, the first (inclusive) and last (exclusive) rows to retrieve. + Note that data are assumed to be stored row-major in the file. Only to + be used with data stored as a vector (not implemented for matrices + yet). + + Returns + ------- + tag : Tag + The Tag read. + """ + tag = _read_tag_header(fid, pos) + if tag is None: + return tag + if tag.size > 0: + if _matrix_info(tag) is not None: + tag.data = _read_matrix(fid, tag, shape, rlims) + else: + # All other data types + try: + fun = _call_dict[tag.type] + except KeyError: + raise Exception(f"Unimplemented tag data type {tag.type}") from None + tag.data = fun(fid, tag, shape, rlims) + return tag + + +def find_tag(fid, node, findkind): + """Find Tag in an open FIF file descriptor. + + Parameters + ---------- + fid : file-like + Open file. + node : dict + Node to search. + findkind : int + Tag kind to find. + + Returns + ------- + tag : instance of Tag + The first tag found. + """ + if node["directory"] is not None: + for subnode in node["directory"]: + if subnode.kind == findkind: + return read_tag(fid, subnode.pos) + return None + + +def has_tag(node, kind): + """Check if the node contains a Tag of a given kind.""" + for d in node["directory"]: + if d.kind == kind: + return True + return False + + +def _rename_list(bads, ch_names_mapping): + return [ch_names_mapping.get(bad, bad) for bad in bads] + + +def _int_item(x): + return int(x.item()) + + +def _float_item(x): + return float(x.item()) + + +def _matrix_info(tag): + matrix_coding = tag.type & 0xFFFF0000 + if matrix_coding == 0 or tag.size == 0: + return None + matrix_type = tag.type & 0x0000FFFF + matrix_coding_dict = { + FIFF.FIFFT_MATRIX: "dense", + FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_CCS_MATRIX: "sparse CCS", + FIFF.FIFFT_MATRIX | FIFF.FIFFT_SPARSE_RCS_MATRIX: "sparse RCS", + } + _check_option("matrix_coding", matrix_coding, list(matrix_coding_dict)) + matrix_coding = matrix_coding_dict[matrix_coding] + matrix_bit_dtype = { + FIFF.FIFFT_INT: (4, ">i4"), + FIFF.FIFFT_JULIAN: (4, ">i4"), + FIFF.FIFFT_FLOAT: (4, ">f4"), + FIFF.FIFFT_DOUBLE: (8, ">f8"), + FIFF.FIFFT_COMPLEX_FLOAT: (8, ">f4"), + FIFF.FIFFT_COMPLEX_DOUBLE: (16, ">f8"), + } + _check_option("matrix_type", matrix_type, list(matrix_bit_dtype)) + bit, dtype = matrix_bit_dtype[matrix_type] + return matrix_coding, matrix_type, bit, dtype diff --git a/mne/_fiff/tree.py b/mne/_fiff/tree.py new file mode 100644 index 0000000..142c40a --- /dev/null +++ b/mne/_fiff/tree.py @@ -0,0 +1,108 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +from ..utils import logger, verbose +from .constants import FIFF +from .tag import read_tag + + +def dir_tree_find(tree, kind): + """Find nodes of the given kind from a directory tree structure. + + Parameters + ---------- + tree : dict + Directory tree. + kind : int + Kind to find. + + Returns + ------- + nodes : list + List of matching nodes. + """ + nodes = [] + + if isinstance(tree, list): + for t in tree: + nodes += dir_tree_find(t, kind) + else: + # Am I desirable myself? + if tree["block"] == kind: + nodes.append(tree) + + # Search the subtrees + for child in tree["children"]: + nodes += dir_tree_find(child, kind) + return nodes + + +@verbose +def make_dir_tree(fid, directory, start=0, indent=0, verbose=None): + """Create the directory tree structure.""" + if directory[start].kind == FIFF.FIFF_BLOCK_START: + tag = read_tag(fid, directory[start].pos) + block = tag.data.item() + else: + block = 0 + + start_separate = False + + this = start + + tree = dict() + tree["block"] = block + tree["id"] = None + tree["parent_id"] = None + tree["nent"] = 0 + tree["nchild"] = 0 + tree["directory"] = directory[this] + tree["children"] = [] + + while this < len(directory): + if directory[this].kind == FIFF.FIFF_BLOCK_START: + if this != start: + if not start_separate: + start_separate = True + logger.debug(" " * indent + f"start {{ {block}") + child, this = make_dir_tree(fid, directory, this, indent + 1) + tree["nchild"] += 1 + tree["children"].append(child) + elif directory[this].kind == FIFF.FIFF_BLOCK_END: + tag = read_tag(fid, directory[start].pos) + if tag.data == block: + break + else: + tree["nent"] += 1 + if tree["nent"] == 1: + tree["directory"] = list() + tree["directory"].append(directory[this]) + + # Add the id information if available + if block == 0: + if directory[this].kind == FIFF.FIFF_FILE_ID: + tag = read_tag(fid, directory[this].pos) + tree["id"] = tag.data + else: + if directory[this].kind == FIFF.FIFF_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree["id"] = tag.data + elif directory[this].kind == FIFF.FIFF_PARENT_BLOCK_ID: + tag = read_tag(fid, directory[this].pos) + tree["parent_id"] = tag.data + + this += 1 + + # Eliminate the empty directory + if tree["nent"] == 0: + tree["directory"] = None + + content = f"block = {tree['block']} nent = {tree['nent']} nchild = {tree['nchild']}" + if start_separate: + logger.debug(" " * indent + f"end }} {content}") + else: + logger.debug(" " * indent + content) + last = this + return tree, last diff --git a/mne/_fiff/utils.py b/mne/_fiff/utils.py new file mode 100644 index 0000000..b158914 --- /dev/null +++ b/mne/_fiff/utils.py @@ -0,0 +1,331 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import os.path as op +from pathlib import Path + +import numpy as np + +from .constants import FIFF +from .meas_info import _get_valid_units + + +def _check_orig_units(orig_units): + """Check original units from a raw file. + + Units that are close to a valid_unit but not equal can be remapped to fit + into the valid_units. All other units that are not valid will be replaced + with "n/a". + + Parameters + ---------- + orig_units : dict + Dictionary mapping channel names to their units as specified in + the header file. Example: {'FC1': 'nV'} + + Returns + ------- + orig_units_remapped : dict + Dictionary mapping channel names to their VALID units as specified in + the header file. Invalid units are now labeled "n/a". + Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'} + """ + if orig_units is None: + return + valid_units = _get_valid_units() + valid_units_lowered = [unit.lower() for unit in valid_units] + orig_units_remapped = dict(orig_units) + for ch_name, unit in orig_units.items(): + # Be lenient: we ignore case for now. + if unit.lower() in valid_units_lowered: + continue + + # Common "invalid units" can be remapped to their valid equivalent + remap_dict = dict() + remap_dict["uv"] = "µV" + remap_dict["μv"] = "µV" # greek letter mu vs micro sign. use micro + remap_dict["\x83\xeav"] = "µV" # for shift-jis mu, use micro + if unit.lower() in remap_dict: + orig_units_remapped[ch_name] = remap_dict[unit.lower()] + continue + + # Some units cannot be saved, they are invalid: assign "n/a" + orig_units_remapped[ch_name] = "n/a" + + return orig_units_remapped + + +def _find_channels(ch_names, ch_type="EOG"): + """Find EOG channel.""" + substrings = (ch_type,) + substrings = [s.upper() for s in substrings] + if ch_type == "EOG": + substrings = ("EOG", "EYE") + eog_idx = [ + idx + for idx, ch in enumerate(ch_names) + if any(substring in ch.upper() for substring in substrings) + ] + return eog_idx + + +def _mult_cal_one(data_view, one, idx, cals, mult): + """Take a chunk of raw data, multiply by mult or cals, and store.""" + one = np.asarray(one, dtype=data_view.dtype) + assert data_view.shape[1] == one.shape[1], ( + data_view.shape[1], + one.shape[1], + ) # noqa: E501 + if mult is not None: + assert mult.ndim == one.ndim == 2 + data_view[:] = mult @ one[idx] + else: + assert cals is not None + if isinstance(idx, slice): + data_view[:] = one[idx] + else: + # faster than doing one = one[idx] + np.take(one, idx, axis=0, out=data_view) + data_view *= cals + + +def _blk_read_lims(start, stop, buf_len): + """Deal with indexing in the middle of a data block. + + Parameters + ---------- + start : int + Starting index. + stop : int + Ending index (exclusive). + buf_len : int + Buffer size in samples. + + Returns + ------- + block_start_idx : int + The first block to start reading from. + r_lims : list + The read limits. + d_lims : list + The write limits. + + Notes + ----- + Consider this example:: + + >>> start, stop, buf_len = 2, 27, 10 + + +---------+---------+--------- + File structure: | buf0 | buf1 | buf2 | + +---------+---------+--------- + File time: 0 10 20 30 + +---------+---------+--------- + Requested time: 2 27 + + | | + blockstart blockstop + | | + start stop + + We need 27 - 2 = 25 samples (per channel) to store our data, and + we need to read from 3 buffers (30 samples) to get all of our data. + + On all reads but the first, the data we read starts at + the first sample of the buffer. On all reads but the last, + the data we read ends on the last sample of the buffer. + + We call ``this_data`` the variable that stores the current buffer's data, + and ``data`` the variable that stores the total output. + + On the first read, we need to do this:: + + >>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP + + On the second read, we need to do:: + + >>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP + + On the final read, we need to do:: + + >>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP + + This function encapsulates this logic to allow a loop over blocks, where + data is stored using the following limits:: + + >>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP + + """ # noqa: E501 + # this is used to deal with indexing in the middle of a sampling period + assert all(isinstance(x, int) for x in (start, stop, buf_len)) + block_start_idx = start // buf_len + block_start = block_start_idx * buf_len + last_used_samp = stop - 1 + block_stop = last_used_samp - last_used_samp % buf_len + buf_len + read_size = block_stop - block_start + n_blk = read_size // buf_len + (read_size % buf_len != 0) + start_offset = start - block_start + end_offset = block_stop - stop + d_lims = np.empty((n_blk, 2), int) + r_lims = np.empty((n_blk, 2), int) + for bi in range(n_blk): + # Triage start (sidx) and end (eidx) indices for + # data (d) and read (r) + if bi == 0: + d_sidx = 0 + r_sidx = start_offset + else: + d_sidx = bi * buf_len - start_offset + r_sidx = 0 + if bi == n_blk - 1: + d_eidx = stop - start + r_eidx = buf_len - end_offset + else: + d_eidx = (bi + 1) * buf_len - start_offset + r_eidx = buf_len + d_lims[bi] = [d_sidx, d_eidx] + r_lims[bi] = [r_sidx, r_eidx] + return block_start_idx, r_lims, d_lims + + +def _file_size(fname): + """Get the file size in bytes.""" + with open(fname, "rb") as f: + f.seek(0, os.SEEK_END) + return f.tell() + + +def _read_segments_file( + raw, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype, + n_channels=None, + offset=0, + trigger_ch=None, +): + """Read a chunk of raw data.""" + if n_channels is None: + n_channels = raw._raw_extras[fi]["orig_nchan"] + + n_bytes = np.dtype(dtype).itemsize + # data_offset and data_left count data samples (channels x time points), + # not bytes. + data_offset = n_channels * start * n_bytes + offset + data_left = (stop - start) * n_channels + + # Read up to 100 MB of data at a time, block_size is in data samples + block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels + block_size = min(data_left, block_size) + with open(raw.filenames[fi], "rb", buffering=0) as fid: + fid.seek(data_offset) + # extract data in chunks + for sample_start in np.arange(0, data_left, block_size) // n_channels: + count = min(block_size, data_left - sample_start * n_channels) + block = np.fromfile(fid, dtype, count) + if block.size != count: + raise RuntimeError( + f"Incorrect number of samples ({block.size} != {count}), please " + "report this error to MNE-Python developers" + ) + block = block.reshape(n_channels, -1, order="F") + n_samples = block.shape[1] # = count // n_channels + sample_stop = sample_start + n_samples + if trigger_ch is not None: + stim_ch = trigger_ch[start:stop][sample_start:sample_stop] + block = np.vstack((block, stim_ch)) + data_view = data[:, sample_start:sample_stop] + _mult_cal_one(data_view, block, idx, cals, mult) + + +def read_str(fid, count=1): + """Read string from a binary file in a python version compatible way.""" + dtype = np.dtype(f">S{count}") + string = fid.read(dtype.itemsize) + data = np.frombuffer(string, dtype=dtype)[0] + bytestr = b"".join([data[0 : data.index(b"\x00") if b"\x00" in data else count]]) + + return str(bytestr.decode("ascii")) # Return native str type for Py2/3 + + +def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): + """Initialize info['chs'] for eeg channels.""" + chs = list() + for idx, ch_name in enumerate(ch_names): + if ch_name in eog or idx in eog: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EOG_CH + elif ch_name in ecg or idx in ecg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_ECG_CH + elif ch_name in emg or idx in emg: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_EMG_CH + elif ch_name in misc or idx in misc: + coil_type = FIFF.FIFFV_COIL_NONE + kind = FIFF.FIFFV_MISC_CH + else: + coil_type = ch_coil + kind = ch_kind + + chan_info = { + "cal": cals[idx], + "logno": idx + 1, + "scanno": idx + 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "ch_name": ch_name, + "unit": FIFF.FIFF_UNIT_V, + "coord_frame": FIFF.FIFFV_COORD_HEAD, + "coil_type": coil_type, + "kind": kind, + "loc": np.zeros(12), + } + if coil_type == FIFF.FIFFV_COIL_EEG: + chan_info["loc"][:3] = np.nan + chs.append(chan_info) + return chs + + +def _construct_bids_filename(base, ext, part_idx, validate=True): + """Construct a BIDS compatible filename for split files.""" + # insert index in filename + dirname = op.dirname(base) + base = op.basename(base) + deconstructed_base = base.split("_") + if len(deconstructed_base) < 2 and validate: + raise ValueError( + "Filename base must end with an underscore followed " + f"by the modality (e.g., _eeg or _meg), got {base}" + ) + suffix = deconstructed_base[-1] + base = "_".join(deconstructed_base[:-1]) + use_fname = f"{base}_split-{part_idx + 1:02}_{suffix}{ext}" + if dirname: + use_fname = op.join(dirname, use_fname) + return use_fname + + +def _make_split_fnames(fname, n_splits, split_naming): + """Make a list of split filenames.""" + if n_splits == 1: + fname = Path(fname) + return [fname] + res = [] + base, ext = op.splitext(fname) + for i in range(n_splits): + if split_naming == "neuromag": + path = Path(f"{base}-{i:d}{ext}" if i else fname) + res.append(path) + else: + assert split_naming == "bids" + path = Path(_construct_bids_filename(base, ext, i)) + res.append(path) + return res diff --git a/mne/_fiff/what.py b/mne/_fiff/what.py new file mode 100644 index 0000000..d91f79d --- /dev/null +++ b/mne/_fiff/what.py @@ -0,0 +1,70 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import OrderedDict +from inspect import signature + +from ..utils import _check_fname, logger + + +def what(fname): + """Try to determine the type of the FIF file. + + Parameters + ---------- + fname : path-like + The filename. Should end in ``.fif`` or ``.fif.gz``. + + Returns + ------- + what : str | None + The type of the file. Will be 'unknown' if it could not be determined. + + Notes + ----- + .. versionadded:: 0.19 + """ + from ..bem import read_bem_solution, read_bem_surfaces + from ..cov import read_cov + from ..epochs import read_epochs + from ..event import read_events + from ..evoked import read_evokeds + from ..forward import read_forward_solution + from ..io import read_raw_fif + from ..minimum_norm import read_inverse_operator + from ..preprocessing import read_ica + from ..proj import read_proj + from ..source_space import read_source_spaces + from ..transforms import read_trans + from .meas_info import read_fiducials + + fname = _check_fname(fname, overwrite="read", must_exist=True) + checks = OrderedDict() + checks["raw"] = read_raw_fif + checks["ica"] = read_ica + checks["epochs"] = read_epochs + checks["evoked"] = read_evokeds + checks["forward"] = read_forward_solution + checks["inverse"] = read_inverse_operator + checks["src"] = read_source_spaces + checks["bem solution"] = read_bem_solution + checks["bem surfaces"] = read_bem_surfaces + checks["cov"] = read_cov + checks["transform"] = read_trans + checks["events"] = read_events + checks["fiducials"] = read_fiducials + checks["proj"] = read_proj + for what, func in checks.items(): + args = signature(func).parameters + assert "verbose" in args, func + kwargs = dict(verbose="error") + if "preload" in args: + kwargs["preload"] = False + try: + func(fname, **kwargs) + except Exception as exp: + logger.debug(f"Not {what}: {exp}") + else: + return what + return "unknown" diff --git a/mne/_fiff/write.py b/mne/_fiff/write.py new file mode 100644 index 0000000..1fc32f0 --- /dev/null +++ b/mne/_fiff/write.py @@ -0,0 +1,454 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import os.path as op +import re +import time +import uuid +from contextlib import contextmanager +from gzip import GzipFile + +import numpy as np +from scipy.sparse import csc_array, csr_array + +from ..utils import _file_like, _validate_type, logger +from ..utils.numerics import _date_to_julian +from .constants import FIFF + +# We choose a "magic" date to store (because meas_date is obligatory) +# to treat as meas_date=None. This one should be impossible for systems +# to write -- the second field is microseconds, so anything >= 1e6 +# should be moved into the first field (seconds). +DATE_NONE = (0, 2**31 - 1) + + +def _write(fid, data, kind, data_size, FIFFT_TYPE, dtype): + """Write data.""" + if isinstance(data, np.ndarray): + data_size *= data.size + + # XXX for string types the data size is used as + # computed in ``write_string``. + + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(FIFFT_TYPE, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(data, dtype=dtype).tobytes()) + + +def _get_split_size(split_size): + """Convert human-readable bytes to machine-readable bytes.""" + if isinstance(split_size, str): + exp = dict(MB=20, GB=30).get(split_size[-2:], None) + if exp is None: + raise ValueError('split_size has to end with either "MB" or "GB"') + split_size = int(float(split_size[:-2]) * 2**exp) + + if split_size > 2147483648: + raise ValueError("split_size cannot be larger than 2GB") + return split_size + + +_NEXT_FILE_BUFFER = 1048576 # 2 ** 20 extra cushion for last post-data tags + + +def write_nop(fid, last=False): + """Write a FIFF_NOP.""" + fid.write(np.array(FIFF.FIFF_NOP, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_VOID, dtype=">i4").tobytes()) + fid.write(np.array(0, dtype=">i4").tobytes()) + next_ = FIFF.FIFFV_NEXT_NONE if last else FIFF.FIFFV_NEXT_SEQ + fid.write(np.array(next_, dtype=">i4").tobytes()) + + +INT32_MAX = 2147483647 + + +def write_int(fid, kind, data): + """Write a 32-bit integer tag to a fif file.""" + data_size = 4 + data = np.asarray(data) + if data.dtype.kind not in "uib" and data.size > 0: + raise TypeError( + f"Cannot safely write data kind {kind} with dtype {data.dtype} as int", + ) + max_val = data.max() if data.size > 0 else 0 + if max_val > INT32_MAX: + raise TypeError( + f"Value {max_val} exceeds maximum allowed ({INT32_MAX}) for tag {kind}" + ) + data = data.astype(">i4").T + _write(fid, data, kind, data_size, FIFF.FIFFT_INT, ">i4") + + +def write_double(fid, kind, data): + """Write a double-precision floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype=">f8").T + _write(fid, data, kind, data_size, FIFF.FIFFT_DOUBLE, ">f8") + + +def write_float(fid, kind, data): + """Write a single-precision floating point tag to a fif file.""" + data_size = 4 + data = np.array(data, dtype=">f4").T + _write(fid, data, kind, data_size, FIFF.FIFFT_FLOAT, ">f4") + + +def write_dau_pack16(fid, kind, data): + """Write a dau_pack16 tag to a fif file.""" + data_size = 2 + data = np.array(data, dtype=">i2").T + _write(fid, data, kind, data_size, FIFF.FIFFT_DAU_PACK16, ">i2") + + +def write_complex64(fid, kind, data): + """Write a 64 bit complex floating point tag to a fif file.""" + data_size = 8 + data = np.array(data, dtype=">c8").T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c8") + + +def write_complex128(fid, kind, data): + """Write a 128 bit complex floating point tag to a fif file.""" + data_size = 16 + data = np.array(data, dtype=">c16").T + _write(fid, data, kind, data_size, FIFF.FIFFT_COMPLEX_FLOAT, ">c16") + + +def write_julian(fid, kind, data): + """Write a Julian-formatted date to a FIF file.""" + assert isinstance(data, datetime.date), type(data) + data_size = 4 + jd = _date_to_julian(data) + data = np.array(jd, dtype=">i4") + _write(fid, data, kind, data_size, FIFF.FIFFT_JULIAN, ">i4") + + +def write_string(fid, kind, data): + """Write a string tag.""" + try: + str_data = str(data).encode("latin1") + except UnicodeEncodeError: + str_data = str(data).encode("latin1", errors="xmlcharrefreplace") + data_size = len(str_data) # therefore compute size here + if data_size > 0: + _write(fid, str_data, kind, data_size, FIFF.FIFFT_STRING, ">S") + + +def write_name_list(fid, kind, data): + """Write a colon-separated list of names. + + Parameters + ---------- + data : list of strings + """ + write_string(fid, kind, ":".join(data)) + + +def write_name_list_sanitized(fid, kind, lst, name): + """Write a sanitized, colon-separated list of names.""" + write_string(fid, kind, _safe_name_list(lst, "write", name)) + + +def _safe_name_list(lst, operation, name): + if operation == "write": + assert isinstance(lst, list | tuple | np.ndarray), type(lst) + if any("{COLON}" in val for val in lst): + raise ValueError(f'The substring "{{COLON}}" in {name} not supported.') + return ":".join(val.replace(":", "{COLON}") for val in lst) + else: + # take a sanitized string and return a list of strings + assert operation == "read" + assert lst is None or isinstance(lst, str) + if not lst: # None or empty string + return [] + return [val.replace("{COLON}", ":") for val in lst.split(":")] + + +def write_float_matrix(fid, kind, mat): + """Write a single-precision floating-point matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_FLOAT) + + +def write_double_matrix(fid, kind, mat): + """Write a double-precision floating-point matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_DOUBLE) + + +def write_int_matrix(fid, kind, mat): + """Write integer 32 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_INT) + + +def write_complex_float_matrix(fid, kind, mat): + """Write complex 64 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_FLOAT) + + +def write_complex_double_matrix(fid, kind, mat): + """Write complex 128 matrix tag.""" + _write_matrix_data(fid, kind, mat, FIFF.FIFFT_COMPLEX_DOUBLE) + + +def _write_matrix_data(fid, kind, mat, data_type): + dtype = { + FIFF.FIFFT_FLOAT: ">f4", + FIFF.FIFFT_DOUBLE: ">f8", + FIFF.FIFFT_COMPLEX_FLOAT: ">c8", + FIFF.FIFFT_COMPLEX_DOUBLE: ">c16", + FIFF.FIFFT_INT: ">i4", + }[data_type] + dtype = np.dtype(dtype) + data_size = dtype.itemsize * mat.size + 4 * (mat.ndim + 1) + matrix_type = data_type | FIFF.FIFFT_MATRIX + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(matrix_type, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(mat, dtype=dtype).tobytes()) + dims = np.empty(mat.ndim + 1, dtype=np.int32) + dims[: mat.ndim] = mat.shape[::-1] + dims[-1] = mat.ndim + fid.write(np.array(dims, dtype=">i4").tobytes()) + check_fiff_length(fid) + + +def get_machid(): + """Get (mostly) unique machine ID. + + Returns + ------- + ids : array (length 2, int32) + The machine identifier used in MNE. + """ + mac = f"{uuid.getnode():012x}".encode() # byte conversion for Py3 + mac = re.findall(b"..", mac) # split string + mac += [b"00", b"00"] # add two more fields + + # Convert to integer in reverse-order (for some reason) + from codecs import encode + + mac = b"".join([encode(h, "hex_codec") for h in mac[::-1]]) + ids = np.flipud(np.frombuffer(mac, np.int32, count=2)) + return ids + + +def get_new_file_id(): + """Create a new file ID tag.""" + secs, usecs = divmod(time.time(), 1.0) + secs, usecs = int(secs), int(usecs * 1e6) + return { + "machid": get_machid(), + "version": FIFF.FIFFC_VERSION, + "secs": secs, + "usecs": usecs, + } + + +def write_id(fid, kind, id_=None): + """Write fiff id.""" + id_ = _generate_meas_id() if id_ is None else id_ + + data_size = 5 * 4 # The id comprises five integers + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_ID_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + # Collect the bits together for one write + arr = np.array( + [id_["version"], id_["machid"][0], id_["machid"][1], id_["secs"], id_["usecs"]], + dtype=">i4", + ) + fid.write(arr.tobytes()) + + +def start_block(fid, kind): + """Write a FIFF_BLOCK_START tag.""" + write_int(fid, FIFF.FIFF_BLOCK_START, kind) + + +def end_block(fid, kind): + """Write a FIFF_BLOCK_END tag.""" + write_int(fid, FIFF.FIFF_BLOCK_END, kind) + + +def start_file(fname, id_=None): + """Open a fif file for writing and writes the compulsory header tags. + + Parameters + ---------- + fname : path-like | fid + The name of the file to open. It is recommended + that the name ends with .fif or .fif.gz. Can also be an + already opened file. + id_ : dict | None + ID to use for the FIFF_FILE_ID. + """ + if _file_like(fname): + logger.debug(f"Writing using {type(fname)} I/O") + fid = fname + fid.seek(0) + else: + fname = str(fname) + if op.splitext(fname)[1].lower() == ".gz": + logger.debug("Writing using gzip") + # defaults to compression level 9, which is barely smaller but much + # slower. 2 offers a good compromise. + fid = GzipFile(fname, "wb", compresslevel=2) + else: + logger.debug("Writing using normal I/O") + fid = open(fname, "wb") + # Write the compulsory items + write_id(fid, FIFF.FIFF_FILE_ID, id_) + write_int(fid, FIFF.FIFF_DIR_POINTER, -1) + write_int(fid, FIFF.FIFF_FREE_LIST, -1) + return fid + + +@contextmanager +def start_and_end_file(fname, id_=None): + """Start and (if successfully written) close the file.""" + with start_file(fname, id_=id_) as fid: + yield fid + end_file(fid) # we only hit this line if the yield does not err + + +def check_fiff_length(fid, close=True): + """Ensure our file hasn't grown too large to work properly.""" + if fid.tell() > 2147483648: # 2 ** 31, FIFF uses signed 32-bit locations + if close: + fid.close() + raise OSError( + "FIFF file exceeded 2GB limit, please split file, reduce" + " split_size (if possible), or save to a different " + "format" + ) + + +def end_file(fid): + """Write the closing tags to a fif file and closes the file.""" + write_nop(fid, last=True) + check_fiff_length(fid) + fid.close() + + +def write_coord_trans(fid, trans): + """Write a coordinate transformation structure.""" + data_size = 4 * 2 * 12 + 4 * 2 + fid.write(np.array(FIFF.FIFF_COORD_TRANS, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_COORD_TRANS_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + fid.write(np.array(trans["from"], dtype=">i4").tobytes()) + fid.write(np.array(trans["to"], dtype=">i4").tobytes()) + + # The transform... + rot = trans["trans"][:3, :3] + move = trans["trans"][:3, 3] + fid.write(np.array(rot, dtype=">f4").tobytes()) + fid.write(np.array(move, dtype=">f4").tobytes()) + + # ...and its inverse + trans_inv = np.linalg.inv(trans["trans"]) + rot = trans_inv[:3, :3] + move = trans_inv[:3, 3] + fid.write(np.array(rot, dtype=">f4").tobytes()) + fid.write(np.array(move, dtype=">f4").tobytes()) + + +def write_ch_info(fid, ch): + """Write a channel information record to a fif file.""" + data_size = 4 * 13 + 4 * 7 + 16 + + fid.write(np.array(FIFF.FIFF_CH_INFO, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_CH_INFO_STRUCT, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + # Start writing fiffChInfoRec + fid.write(np.array(ch["scanno"], dtype=">i4").tobytes()) + fid.write(np.array(ch["logno"], dtype=">i4").tobytes()) + fid.write(np.array(ch["kind"], dtype=">i4").tobytes()) + fid.write(np.array(ch["range"], dtype=">f4").tobytes()) + fid.write(np.array(ch["cal"], dtype=">f4").tobytes()) + fid.write(np.array(ch["coil_type"], dtype=">i4").tobytes()) + fid.write(np.array(ch["loc"], dtype=">f4").tobytes()) # writing 12 values + + # unit and unit multiplier + fid.write(np.array(ch["unit"], dtype=">i4").tobytes()) + fid.write(np.array(ch["unit_mul"], dtype=">i4").tobytes()) + + # Finally channel name + ch_name = ch["ch_name"][:15] + fid.write(np.array(ch_name, dtype=">c").tobytes()) + fid.write(b"\0" * (16 - len(ch_name))) + + +def write_dig_points(fid, dig, block=False, coord_frame=None, *, ch_names=None): + """Write a set of digitizer data points into a fif file.""" + if dig is not None: + data_size = 5 * 4 + if block: + start_block(fid, FIFF.FIFFB_ISOTRAK) + if coord_frame is not None: + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, coord_frame) + for d in dig: + fid.write(np.array(FIFF.FIFF_DIG_POINT, ">i4").tobytes()) + fid.write(np.array(FIFF.FIFFT_DIG_POINT_STRUCT, ">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, ">i4").tobytes()) + # Start writing fiffDigPointRec + fid.write(np.array(d["kind"], ">i4").tobytes()) + fid.write(np.array(d["ident"], ">i4").tobytes()) + fid.write(np.array(d["r"][:3], ">f4").tobytes()) + if ch_names is not None: + write_name_list_sanitized( + fid, FIFF.FIFF_MNE_CH_NAME_LIST, ch_names, "ch_names" + ) + if block: + end_block(fid, FIFF.FIFFB_ISOTRAK) + + +def write_float_sparse_rcs(fid, kind, mat): + """Write a single-precision sparse compressed row matrix tag.""" + return write_float_sparse(fid, kind, mat, fmt="csr") + + +def write_float_sparse(fid, kind, mat, fmt="auto"): + """Write a single-precision floating-point sparse matrix tag.""" + if fmt == "auto": + fmt = "csr" if isinstance(mat, csr_array) else "csc" + need = csr_array if fmt == "csr" else csc_array + matrix_type = getattr(FIFF, f"FIFFT_SPARSE_{fmt[-1].upper()}CS_MATRIX") + _validate_type(mat, need, "sparse") + matrix_type = matrix_type | FIFF.FIFFT_MATRIX | FIFF.FIFFT_FLOAT + nnzm = mat.nnz + nrow = mat.shape[0] + data_size = 4 * nnzm + 4 * nnzm + 4 * (nrow + 1) + 4 * 4 + + fid.write(np.array(kind, dtype=">i4").tobytes()) + fid.write(np.array(matrix_type, dtype=">i4").tobytes()) + fid.write(np.array(data_size, dtype=">i4").tobytes()) + fid.write(np.array(FIFF.FIFFV_NEXT_SEQ, dtype=">i4").tobytes()) + + fid.write(np.array(mat.data, dtype=">f4").tobytes()) + fid.write(np.array(mat.indices, dtype=">i4").tobytes()) + fid.write(np.array(mat.indptr, dtype=">i4").tobytes()) + + dims = [nnzm, mat.shape[0], mat.shape[1], 2] + fid.write(np.array(dims, dtype=">i4").tobytes()) + check_fiff_length(fid) + + +def _generate_meas_id(): + """Generate a new meas_id dict.""" + id_ = dict() + id_["version"] = FIFF.FIFFC_VERSION + id_["machid"] = get_machid() + id_["secs"], id_["usecs"] = DATE_NONE + return id_ diff --git a/mne/_freesurfer.py b/mne/_freesurfer.py new file mode 100644 index 0000000..9b4ec3d --- /dev/null +++ b/mne/_freesurfer.py @@ -0,0 +1,854 @@ +"""Freesurfer handling functions.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +from gzip import GzipFile +from pathlib import Path + +import numpy as np + +from ._fiff.constants import FIFF +from ._fiff.meas_info import read_fiducials +from .surface import _read_mri_surface, read_surface +from .transforms import ( + Transform, + _ensure_trans, + apply_trans, + combine_transforms, + invert_transform, + read_ras_mni_t, +) +from .utils import ( + _check_fname, + _check_option, + _import_nibabel, + _validate_type, + get_subjects_dir, + logger, + verbose, +) + + +def _check_subject_dir(subject, subjects_dir): + """Check that the Freesurfer subject directory is as expected.""" + subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) + for img_name in ("T1", "brain", "aseg"): + if not (subjects_dir / subject / "mri" / f"{img_name}.mgz").is_file(): + raise ValueError( + "Freesurfer recon-all subject folder " + "is incorrect or improperly formatted, " + f"got {subjects_dir / subject}" + ) + return subjects_dir / subject + + +def _get_aseg(aseg, subject, subjects_dir): + """Check that the anatomical segmentation file exists and load it.""" + nib = _import_nibabel("load aseg") + subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) + if aseg == "auto": # use aparc+aseg if auto + aseg = _check_fname( + subjects_dir / subject / "mri" / "aparc+aseg.mgz", + overwrite="read", + must_exist=False, + ) + if not aseg: # if doesn't exist use wmparc + aseg = subjects_dir / subject / "mri" / "wmparc.mgz" + else: + aseg = subjects_dir / subject / "mri" / f"{aseg}.mgz" + _check_fname(aseg, overwrite="read", must_exist=True) + aseg = nib.load(aseg) + aseg_data = np.array(aseg.dataobj) + return aseg, aseg_data + + +def _reorient_image(img, axcodes="RAS"): + """Reorient an image to a given orientation. + + Parameters + ---------- + img : instance of SpatialImage + The MRI image. + axcodes : tuple | str + The axis codes specifying the orientation, e.g. "RAS". + See :func:`nibabel.orientations.aff2axcodes`. + + Returns + ------- + img_data : ndarray + The reoriented image data. + vox_ras_t : ndarray + The new transform from the new voxels to surface RAS. + + Notes + ----- + .. versionadded:: 0.24 + """ + nib = _import_nibabel("reorient MRI image") + orig_data = np.array(img.dataobj).astype(np.float32) + # reorient data to RAS + ornt = nib.orientations.axcodes2ornt( + nib.orientations.aff2axcodes(img.affine) + ).astype(int) + ras_ornt = nib.orientations.axcodes2ornt(axcodes) + ornt_trans = nib.orientations.ornt_transform(ornt, ras_ornt) + img_data = nib.orientations.apply_orientation(orig_data, ornt_trans) + orig_mgh = nib.MGHImage(orig_data, img.affine) + aff_trans = nib.orientations.inv_ornt_aff(ornt_trans, img.shape) + vox_ras_t = np.dot(orig_mgh.header.get_vox2ras_tkr(), aff_trans) + return img_data, vox_ras_t + + +def _mri_orientation(orientation): + """Get MRI orientation information from an image. + + Parameters + ---------- + orientation : str + Orientation that you want. Can be "axial", "sagittal", or "coronal". + + Returns + ------- + axis : int + The dimension of the axis to take slices over when plotting. + x : int + The dimension of the x axis. + y : int + The dimension of the y axis. + + Notes + ----- + .. versionadded:: 0.21 + .. versionchanged:: 0.24 + """ + _check_option("orientation", orientation, ("coronal", "axial", "sagittal")) + axis = dict(coronal=1, axial=2, sagittal=0)[orientation] + x, y = sorted(set([0, 1, 2]).difference(set([axis]))) + return axis, x, y + + +def _get_mri_info_data(mri, data): + # Read the segmentation data using nibabel + if data: + _import_nibabel("load MRI atlas data") + out = dict() + _, out["vox_mri_t"], out["mri_ras_t"], dims, _, mgz = _read_mri_info( + mri, return_img=True + ) + out.update( + mri_width=dims[0], mri_height=dims[1], mri_depth=dims[1], mri_volume_name=mri + ) + if data: + assert mgz is not None + out["mri_vox_t"] = invert_transform(out["vox_mri_t"]) + out["data"] = np.asarray(mgz.dataobj) + return out + + +def _get_mgz_header(fname): + """Adapted from nibabel to quickly extract header info.""" + fname = _check_fname(fname, overwrite="read", must_exist=True, name="MRI image") + if fname.suffix != ".mgz": + raise OSError("Filename must end with .mgz") + header_dtd = [ + ("version", ">i4"), + ("dims", ">i4", (4,)), + ("type", ">i4"), + ("dof", ">i4"), + ("goodRASFlag", ">i2"), + ("delta", ">f4", (3,)), + ("Mdc", ">f4", (3, 3)), + ("Pxyz_c", ">f4", (3,)), + ] + header_dtype = np.dtype(header_dtd) + with GzipFile(fname, "rb") as fid: + hdr_str = fid.read(header_dtype.itemsize) + header = np.ndarray(shape=(), dtype=header_dtype, buffer=hdr_str) + # dims + dims = header["dims"].astype(int) + dims = dims[:3] if len(dims) == 4 else dims + # vox2ras_tkr + delta = header["delta"] + ds = np.array(delta, float) + ns = np.array(dims * ds) / 2.0 + v2rtkr = np.array( + [ + [-ds[0], 0, 0, ns[0]], + [0, 0, ds[2], -ns[2]], + [0, -ds[1], 0, ns[1]], + [0, 0, 0, 1], + ], + dtype=np.float32, + ) + # ras2vox + d = np.diag(delta) + pcrs_c = dims / 2.0 + Mdc = header["Mdc"].T + pxyz_0 = header["Pxyz_c"] - np.dot(Mdc, np.dot(d, pcrs_c)) + M = np.eye(4, 4) + M[0:3, 0:3] = np.dot(Mdc, d) + M[0:3, 3] = pxyz_0.T + header = dict(dims=dims, vox2ras_tkr=v2rtkr, vox2ras=M, zooms=header["delta"]) + return header + + +def _get_atlas_values(vol_info, rr): + # Transform MRI coordinates (where our surfaces live) to voxels + rr_vox = apply_trans(vol_info["mri_vox_t"], rr) + good = ( + (rr_vox >= -0.5) & (rr_vox < np.array(vol_info["data"].shape, int) - 0.5) + ).all(-1) + idx = np.round(rr_vox[good].T).astype(np.int64) + values = np.full(rr.shape[0], np.nan) + values[good] = vol_info["data"][tuple(idx)] + return values + + +def get_volume_labels_from_aseg(mgz_fname, return_colors=False, atlas_ids=None): + """Return a list of names and colors of segmented volumes. + + Parameters + ---------- + mgz_fname : path-like + Filename to read. Typically ``aseg.mgz`` or some variant in the + freesurfer pipeline. + return_colors : bool + If True returns also the labels colors. + atlas_ids : dict | None + A lookup table providing a mapping from region names (str) to ID values + (int). Can be None to use the standard Freesurfer LUT. + + .. versionadded:: 0.21.0 + + Returns + ------- + label_names : list of str + The names of segmented volumes included in this mgz file. + label_colors : list of str + The RGB colors of the labels included in this mgz file. + + See Also + -------- + read_freesurfer_lut + + Notes + ----- + .. versionchanged:: 0.21.0 + The label names are now sorted in the same order as their corresponding + values in the MRI file. + + .. versionadded:: 0.9.0 + """ + nib = _import_nibabel("load MRI atlas data") + mgz_fname = _check_fname( + mgz_fname, overwrite="read", must_exist=True, name="mgz_fname" + ) + atlas = nib.load(mgz_fname) + data = np.asarray(atlas.dataobj) # don't need float here + want = np.unique(data) + if atlas_ids is None: + atlas_ids, colors = read_freesurfer_lut() + elif return_colors: + raise ValueError("return_colors must be False if atlas_ids are provided") + # restrict to the ones in the MRI, sorted by label name + keep = np.isin(list(atlas_ids.values()), want) + keys = sorted( + (key for ki, key in enumerate(atlas_ids.keys()) if keep[ki]), + key=lambda x: atlas_ids[x], + ) + if return_colors: + colors = [colors[k] for k in keys] + out = keys, colors + else: + out = keys + return out + + +############################################################################## +# Head to MRI volume conversion + + +@verbose +def head_to_mri( + pos, + subject, + mri_head_t, + subjects_dir=None, + *, + kind="mri", + unscale=False, + verbose=None, +): + """Convert pos from head coordinate system to MRI ones. + + Parameters + ---------- + pos : array, shape (n_pos, 3) + The coordinates (in m) in head coordinate system. + %(subject)s + mri_head_t : instance of Transform + MRI<->Head coordinate transformation. + %(subjects_dir)s + kind : str + The MRI coordinate frame kind, can be ``'mri'`` (default) for + FreeSurfer surface RAS or ``'ras'`` (default in 1.2) to use MRI RAS + (scanner RAS). + + .. versionadded:: 1.2 + unscale : bool + For surrogate MRIs (e.g., scaled using ``mne coreg``), if True + (default False), use the MRI scaling parameters to obtain points in + the original/surrogate subject's MRI space. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_pos, 3) + The MRI RAS coordinates (in mm) of pos. + + Notes + ----- + This function requires nibabel. + """ + from .coreg import read_mri_cfg + + _validate_type(kind, str, "kind") + _check_option("kind", kind, ("ras", "mri")) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + t1_fname = subjects_dir / subject / "mri" / "T1.mgz" + head_mri_t = _ensure_trans(mri_head_t, "head", "mri") + if kind == "ras": + _, _, mri_ras_t, _, _ = _read_mri_info(t1_fname) + head_ras_t = combine_transforms(head_mri_t, mri_ras_t, "head", "ras") + head_dest_t = head_ras_t + else: + assert kind == "mri" + head_dest_t = head_mri_t + pos_dest = apply_trans(head_dest_t, pos) + # unscale if requested + if unscale: + params = read_mri_cfg(subject, subjects_dir) + pos_dest /= params["scale"] + pos_dest *= 1e3 # mm + return pos_dest + + +############################################################################## +# Surface to MNI conversion + + +@verbose +def vertex_to_mni(vertices, hemis, subject, subjects_dir=None, verbose=None): + """Convert the array of vertices for a hemisphere to MNI coordinates. + + Parameters + ---------- + vertices : int, or list of int + Vertex number(s) to convert. + hemis : int, or list of int + Hemisphere(s) the vertices belong to. + %(subject)s + subjects_dir : str, or None + Path to ``SUBJECTS_DIR`` if it is not set in the environment. + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_vertices, 3) + The MNI coordinates (in mm) of the vertices. + """ + singleton = False + if not isinstance(vertices, list) and not isinstance(vertices, np.ndarray): + singleton = True + vertices = [vertices] + + if not isinstance(hemis, list) and not isinstance(hemis, np.ndarray): + hemis = [hemis] * len(vertices) + + if not len(hemis) == len(vertices): + raise ValueError("hemi and vertices must match in length") + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + + surfs = [subjects_dir / subject / "surf" / f"{h}.white" for h in ["lh", "rh"]] + + # read surface locations in MRI space + rr = [read_surface(s)[0] for s in surfs] + + # take point locations in MRI space and convert to MNI coordinates + xfm = read_talxfm(subject, subjects_dir) + xfm["trans"][:3, 3] *= 1000.0 # m->mm + data = np.array([rr[h][v, :] for h, v in zip(hemis, vertices)]) + if singleton: + data = data[0] + return apply_trans(xfm["trans"], data) + + +############################################################################## +# Volume to MNI conversion + + +@verbose +def head_to_mni(pos, subject, mri_head_t, subjects_dir=None, verbose=None): + """Convert pos from head coordinate system to MNI ones. + + Parameters + ---------- + pos : array, shape (n_pos, 3) + The coordinates (in m) in head coordinate system. + %(subject)s + mri_head_t : instance of Transform + MRI<->Head coordinate transformation. + %(subjects_dir)s + %(verbose)s + + Returns + ------- + coordinates : array, shape (n_pos, 3) + The MNI coordinates (in mm) of pos. + + Notes + ----- + This function requires either nibabel. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + + # before we go from head to MRI (surface RAS) + head_mni_t = combine_transforms( + _ensure_trans(mri_head_t, "head", "mri"), + read_talxfm(subject, subjects_dir), + "head", + "mni_tal", + ) + return apply_trans(head_mni_t, pos) * 1000.0 + + +@verbose +def get_mni_fiducials(subject, subjects_dir=None, verbose=None): + """Estimate fiducials for a subject. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + fids_mri : list + List of estimated fiducials (each point in a dict), in the order + LPA, nasion, RPA. + + Notes + ----- + This takes the ``fsaverage-fiducials.fif`` file included with MNE—which + contain the LPA, nasion, and RPA for the ``fsaverage`` subject—and + transforms them to the given FreeSurfer subject's MRI space. + The MRI of ``fsaverage`` is already in MNI Talairach space, so applying + the inverse of the given subject's MNI Talairach affine transformation + (``$SUBJECTS_DIR/$SUBJECT/mri/transforms/talairach.xfm``) is used + to estimate the subject's fiducial locations. + + For more details about the coordinate systems and transformations involved, + see https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems and + :ref:`tut-source-alignment`. + """ + # Eventually we might want to allow using the MNI Talairach with-skull + # transformation rather than the standard brain-based MNI Talaranch + # transformation, and/or project the points onto the head surface + # (if available). + fname_fids_fs = ( + Path(__file__).parent / "data" / "fsaverage" / "fsaverage-fiducials.fif" + ) + + # Read fsaverage fiducials file and subject Talairach. + fids, coord_frame = read_fiducials(fname_fids_fs) + assert coord_frame == FIFF.FIFFV_COORD_MRI + if subject == "fsaverage": + return fids # special short-circuit for fsaverage + mni_mri_t = invert_transform(read_talxfm(subject, subjects_dir)) + for f in fids: + f["r"] = apply_trans(mni_mri_t, f["r"]) + return fids + + +@verbose +def estimate_head_mri_t(subject, subjects_dir=None, verbose=None): + """Estimate the head->mri transform from fsaverage fiducials. + + A subject's fiducials can be estimated given a Freesurfer ``recon-all`` + by transforming ``fsaverage`` fiducials using the inverse Talairach + transform, see :func:`mne.coreg.get_mni_fiducials`. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + %(trans_not_none)s + """ + from .channels.montage import compute_native_head_t, make_dig_montage + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + lpa, nasion, rpa = get_mni_fiducials(subject, subjects_dir) + montage = make_dig_montage( + lpa=lpa["r"], nasion=nasion["r"], rpa=rpa["r"], coord_frame="mri" + ) + return invert_transform(compute_native_head_t(montage)) + + +def _get_affine_from_lta_info(lines): + """Get the vox2ras affine from lta file info.""" + volume_data = np.loadtxt([line.split("=")[1] for line in lines]) + # get the size of the volume (number of voxels), slice resolution. + # the matrix of directional cosines and the ras at the center of the bore + dims, deltas, dir_cos, center_ras = ( + volume_data[0], + volume_data[1], + volume_data[2:5], + volume_data[5], + ) + dir_cos_delta = dir_cos.T * deltas + vol_center = (dir_cos_delta @ dims[:3]) / 2 + affine = np.eye(4) + affine[:3, :3] = dir_cos_delta + affine[:3, 3] = center_ras - vol_center + return affine + + +@verbose +def read_lta(fname, verbose=None): + """Read a Freesurfer linear transform array file. + + Parameters + ---------- + fname : path-like + The transform filename. + %(verbose)s + + Returns + ------- + affine : ndarray + The affine transformation described by the lta file. + """ + _check_fname(fname, "read", must_exist=True) + with open(fname) as fid: + lines = fid.readlines() + # 0 is linear vox2vox, 1 is linear ras2ras + trans_type = int(lines[0].split("=")[1].strip()[0]) + assert trans_type in (0, 1) + affine = np.loadtxt(lines[5:9]) + if trans_type == 1: + return affine + + src_affine = _get_affine_from_lta_info(lines[12:18]) + dst_affine = _get_affine_from_lta_info(lines[21:27]) + + # don't compute if src and dst are already identical + if np.allclose(src_affine, dst_affine): + return affine + + ras2ras = src_affine @ np.linalg.inv(affine) @ np.linalg.inv(dst_affine) + affine = np.linalg.inv(np.linalg.inv(src_affine) @ ras2ras @ src_affine) + return affine + + +@verbose +def read_talxfm(subject, subjects_dir=None, verbose=None): + """Compute MRI-to-MNI transform from FreeSurfer talairach.xfm file. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + mri_mni_t : instance of Transform + The affine transformation from MRI to MNI space for the subject. + """ + # Adapted from freesurfer m-files. Altered to deal with Norig + # and Torig correctly + subjects_dir = get_subjects_dir(subjects_dir) + # Setup the RAS to MNI transform + ras_mni_t = read_ras_mni_t(subject, subjects_dir) + ras_mni_t["trans"][:3, 3] /= 1000.0 # mm->m + + # We want to get from Freesurfer surface RAS ('mri') to MNI ('mni_tal'). + # This file only gives us RAS (non-zero origin) ('ras') to MNI ('mni_tal'). + # Se we need to get the ras->mri transform from the MRI headers. + + # To do this, we get Norig and Torig + # (i.e. vox_ras_t and vox_mri_t, respectively) + path = subjects_dir / subject / "mri" / "orig.mgz" + if not path.is_file(): + path = subjects_dir / subject / "mri" / "T1.mgz" + if not path.is_file(): + raise OSError(f"mri not found: {path}") + _, _, mri_ras_t, _, _ = _read_mri_info(path) + mri_mni_t = combine_transforms(mri_ras_t, ras_mni_t, "mri", "mni_tal") + return mri_mni_t + + +def _check_mri(mri, subject, subjects_dir) -> str: + """Check whether an mri exists in the Freesurfer subject directory.""" + _validate_type(mri, "path-like", mri) + mri = Path(mri) + if mri.is_file() and mri.name != mri: + return str(mri) + elif not mri.is_file(): + if subject is None: + raise FileNotFoundError( + f"MRI file {mri!r} not found and no subject provided." + ) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + mri = subjects_dir / subject / "mri" / mri + if not mri.is_file(): + raise FileNotFoundError( + f"MRI file {mri!r} not found in the subjects directory " + f"{subjects_dir!r} for subject {subject}." + ) + if mri.name == mri: + raise OSError( + f"Ambiguous filename - found {mri!r} in current folder. " + "If this is correct prefix name with relative or absolute path." + ) + return str(mri) + + +def _read_mri_info(path, units="m", return_img=False, use_nibabel=False): + # This is equivalent but 100x slower, so only use nibabel if we need to + # (later): + if use_nibabel: + nib = _import_nibabel() + hdr = nib.load(path).header + n_orig = hdr.get_vox2ras() + t_orig = hdr.get_vox2ras_tkr() + dims = hdr.get_data_shape() + zooms = hdr.get_zooms()[:3] + else: + hdr = _get_mgz_header(path) + n_orig = hdr["vox2ras"] + t_orig = hdr["vox2ras_tkr"] + dims = hdr["dims"] + zooms = hdr["zooms"] + + # extract the MRI_VOXEL to RAS (non-zero origin) transform + vox_ras_t = Transform("mri_voxel", "ras", n_orig) + + # extract the MRI_VOXEL to MRI transform + vox_mri_t = Transform("mri_voxel", "mri", t_orig) + + # construct the MRI to RAS (non-zero origin) transform + mri_ras_t = combine_transforms(invert_transform(vox_mri_t), vox_ras_t, "mri", "ras") + + assert units in ("m", "mm") + if units == "m": + conv = np.array([[1e-3, 1e-3, 1e-3, 1]]).T + # scaling and translation terms + vox_ras_t["trans"] *= conv + vox_mri_t["trans"] *= conv + # just the translation term + mri_ras_t["trans"][:, 3:4] *= conv + + out = (vox_ras_t, vox_mri_t, mri_ras_t, dims, zooms) + if return_img: + nibabel = _import_nibabel() + out += (nibabel.load(path),) + return out + + +def read_freesurfer_lut(fname=None): + """Read a Freesurfer-formatted LUT. + + Parameters + ---------- + fname : path-like | None + The filename. Can be None to read the standard Freesurfer LUT. + + Returns + ------- + atlas_ids : dict + Mapping from label names to IDs. + colors : dict + Mapping from label names to colors. + """ + lut = _get_lut(fname) + names, ids = lut["name"], lut["id"] + colors = np.array([lut["R"], lut["G"], lut["B"], lut["A"]], float).T + atlas_ids = dict(zip(names, ids)) + colors = dict(zip(names, colors)) + return atlas_ids, colors + + +def _get_lut(fname=None): + """Get a FreeSurfer LUT.""" + if fname is None: + fname = Path(__file__).parent / "data" / "FreeSurferColorLUT.txt" + _check_fname(fname, "read", must_exist=True) + dtype = [ + ("id", " 0 + lut["name"] = [str(name) for name in lut["name"]] + return lut + + +@verbose +def _get_head_surface(surf, subject, subjects_dir, bem=None, verbose=None): + """Get a head surface from the Freesurfer subject directory. + + Parameters + ---------- + surf : str + The name of the surface 'auto', 'head', 'outer_skin', 'head-dense' + or 'seghead'. + %(subject)s + %(subjects_dir)s + bem : mne.bem.ConductorModel | None + The conductor model that stores information about the head surface. + %(verbose)s + + Returns + ------- + head_surf : dict | None + A dictionary with keys 'rr', 'tris', 'ntri', 'use_tris', 'np' + and 'coord_frame' that store information for mesh plotting and other + useful information about the head surface. + + Notes + ----- + .. versionadded: 0.24 + """ + from .bem import _bem_find_surface, read_bem_surfaces + + _check_option("surf", surf, ("auto", "head", "outer_skin", "head-dense", "seghead")) + if surf in ("auto", "head", "outer_skin"): + if bem is not None: + try: + return _bem_find_surface(bem, "head") + except RuntimeError: + logger.info( + "Could not find the surface for " + "head in the provided BEM model, " + "looking in the subject directory." + ) + if subject is None: + if surf == "auto": + return + raise ValueError( + "To plot the head surface, the BEM/sphere" + " model must contain a head surface " + 'or "subject" must be provided (got ' + "None)" + ) + subject_dir = op.join(get_subjects_dir(subjects_dir, raise_error=True), subject) + if surf in ("head-dense", "seghead"): + try_fnames = [ + op.join(subject_dir, "bem", f"{subject}-head-dense.fif"), + op.join(subject_dir, "surf", "lh.seghead"), + ] + else: + try_fnames = [ + op.join(subject_dir, "bem", "outer_skin.surf"), + op.join(subject_dir, "bem", "flash", "outer_skin.surf"), + op.join(subject_dir, "bem", f"{subject}-head-sparse.fif"), + op.join(subject_dir, "bem", f"{subject}-head.fif"), + ] + for fname in try_fnames: + if op.exists(fname): + logger.info(f"Using {op.basename(fname)} for head surface.") + if op.splitext(fname)[-1] == ".fif": + return read_bem_surfaces(fname, on_defects="warn")[0] + else: + return _read_mri_surface(fname) + raise OSError( + "No head surface found for subject " + f"{subject} after trying:\n" + "\n".join(try_fnames) + ) + + +@verbose +def _get_skull_surface(surf, subject, subjects_dir, bem=None, verbose=None): + """Get a skull surface from the Freesurfer subject directory. + + Parameters + ---------- + surf : str + The name of the surface 'outer' or 'inner'. + %(subject)s + %(subjects_dir)s + bem : mne.bem.ConductorModel | None + The conductor model that stores information about the skull surface. + %(verbose)s + + Returns + ------- + skull_surf : dict | None + A dictionary with keys 'rr', 'tris', 'ntri', 'use_tris', 'np' + and 'coord_frame' that store information for mesh plotting and other + useful information about the head surface. + + Notes + ----- + .. versionadded: 0.24 + """ + from .bem import _bem_find_surface + + if bem is not None: + try: + return _bem_find_surface(bem, surf + "_skull") + except RuntimeError: + logger.info( + "Could not find the surface for " + "skull in the provided BEM model, " + "looking in the subject directory." + ) + subjects_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) + fname = _check_fname( + subjects_dir / subject / "bem" / (surf + "_skull.surf"), + overwrite="read", + must_exist=True, + name=f"{surf} skull surface", + ) + return _read_mri_surface(fname) + + +def _estimate_talxfm_rigid(subject, subjects_dir): + from .coreg import _trans_from_params, fit_matched_points + + xfm = read_talxfm(subject, subjects_dir) + # XYZ+origin + halfway + pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5]) + pts_subj = apply_trans(invert_transform(xfm), pts_tal) + # we fit with scaling enabled, but then discard it (we just need + # the rigid-body components) + params = fit_matched_points(pts_subj, pts_tal, scale=3, out="params") + rigid = _trans_from_params((True, True, False), params[:6]) + return rigid diff --git a/mne/_ola.py b/mne/_ola.py new file mode 100644 index 0000000..135ff83 --- /dev/null +++ b/mne/_ola.py @@ -0,0 +1,463 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from scipy.signal import get_window + +from .utils import _ensure_int, logger, verbose + +############################################################################### +# Class for interpolation between adjacent points + + +class _Interp2: + r"""Interpolate between two points. + + Parameters + ---------- + control_points : array, shape (n_changes,) + The control points (indices) to use. + values : callable | array, shape (n_changes, ...) + Callable that takes the control point and returns a list of + arrays that must be interpolated. + interp : str + Can be 'zero', 'linear', 'hann', or 'cos2' (same as hann). + + Notes + ----- + This will process data using overlapping windows of potentially + different sizes to achieve a constant output value using different + 2-point interpolation schemes. For example, for linear interpolation, + and window sizes of 6 and 17, this would look like:: + + 1 _ _ + |\ / '-. .-' + | \ / '-. .-' + | x |-.-| + | / \ .-' '-. + |/ \_.-' '-. + 0 +----|----|----|----|--- + 0 5 10 15 20 25 + + """ + + def __init__(self, control_points, values, interp="hann"): + # set up interpolation + self.control_points = np.array(control_points, int).ravel() + if not np.array_equal(np.unique(self.control_points), self.control_points): + raise ValueError("Control points must be sorted and unique") + if len(self.control_points) == 0: + raise ValueError("Must be at least one control point") + if not (self.control_points >= 0).all(): + raise ValueError( + f"All control points must be positive (got {self.control_points[:3]})" + ) + if isinstance(values, np.ndarray): + values = [values] + if isinstance(values, list | tuple): + for v in values: + if not (v is None or isinstance(v, np.ndarray)): + raise TypeError( + 'All entries in "values" must be ndarray or None, got ' + f"{type(v)}" + ) + if v is not None and v.shape[0] != len(self.control_points): + raise ValueError( + "Values, if provided, must be the same length as the number of " + f"control points ({len(self.control_points)}), got {v.shape[0]}" + ) + use_values = values + + def val(pt): + idx = np.where(control_points == pt)[0][0] + return [v[idx] if v is not None else None for v in use_values] + + values = val + self.values = values + self.n_last = None + self._position = 0 # start at zero + self._left_idx = 0 + self._left = self._right = self._use_interp = None + known_types = ("cos2", "linear", "zero", "hann") + if interp not in known_types: + raise ValueError(f'interp must be one of {known_types}, got "{interp}"') + self._interp = interp + + def feed_generator(self, n_pts): + """Feed data and get interpolators as a generator.""" + self.n_last = 0 + n_pts = _ensure_int(n_pts, "n_pts") + original_position = self._position + stop = self._position + n_pts + logger.debug(f"Feed {n_pts} ({self._position}-{stop})") + used = np.zeros(n_pts, bool) + if self._left is None: # first one + logger.debug(f" Eval @ 0 ({self.control_points[0]})") + self._left = self.values(self.control_points[0]) + if len(self.control_points) == 1: + self._right = self._left + n_used = 0 + + # Left zero-order hold condition + if self._position < self.control_points[self._left_idx]: + n_use = min(self.control_points[self._left_idx] - self._position, n_pts) + logger.debug(f" Left ZOH {n_use}") + this_sl = slice(None, n_use) + assert used[this_sl].size == n_use + assert not used[this_sl].any() + used[this_sl] = True + yield [this_sl, self._left, None, None] + self._position += n_use + n_used += n_use + self.n_last += 1 + + # Standard interpolation condition + stop_right_idx = np.where(self.control_points >= stop)[0] + if len(stop_right_idx) == 0: + stop_right_idx = [len(self.control_points) - 1] + stop_right_idx = stop_right_idx[0] + left_idxs = np.arange(self._left_idx, stop_right_idx) + self.n_last += max(len(left_idxs) - 1, 0) + for bi, left_idx in enumerate(left_idxs): + if left_idx != self._left_idx or self._right is None: + if self._right is not None: + assert left_idx == self._left_idx + 1 + self._left = self._right + self._left_idx += 1 + self._use_interp = None # need to recreate it + eval_pt = self.control_points[self._left_idx + 1] + logger.debug(f" Eval @ {self._left_idx + 1} ({eval_pt})") + self._right = self.values(eval_pt) + assert self._right is not None + left_point = self.control_points[self._left_idx] + right_point = self.control_points[self._left_idx + 1] + if self._use_interp is None: + interp_span = right_point - left_point + if self._interp == "zero": + self._use_interp = None + elif self._interp == "linear": + self._use_interp = np.linspace( + 1.0, 0.0, interp_span, endpoint=False + ) + else: # self._interp in ('cos2', 'hann'): + self._use_interp = np.cos( + np.linspace(0, np.pi / 2.0, interp_span, endpoint=False) + ) + self._use_interp *= self._use_interp + n_use = min(stop, right_point) - self._position + if n_use > 0: + logger.debug( + f" Interp {self._interp} {n_use} ({left_point}-{right_point})" + ) + interp_start = self._position - left_point + assert interp_start >= 0 + if self._use_interp is None: + this_interp = None + else: + this_interp = self._use_interp[interp_start : interp_start + n_use] + assert this_interp.size == n_use + this_sl = slice(n_used, n_used + n_use) + assert used[this_sl].size == n_use + assert not used[this_sl].any() + used[this_sl] = True + yield [this_sl, self._left, self._right, this_interp] + self._position += n_use + n_used += n_use + + # Right zero-order hold condition + if self.control_points[self._left_idx] <= self._position: + n_use = stop - self._position + if n_use > 0: + logger.debug(f" Right ZOH {n_use}") + this_sl = slice(n_pts - n_use, None) + assert not used[this_sl].any() + used[this_sl] = True + assert self._right is not None + yield [this_sl, self._right, None, None] + self._position += n_use + n_used += n_use + self.n_last += 1 + assert self._position == stop + assert n_used == n_pts + assert used.all() + assert self._position == original_position + n_pts + + def feed(self, n_pts): + """Feed data and get interpolated values.""" + # Convenience function for assembly + out_arrays = None + for o in self.feed_generator(n_pts): + if out_arrays is None: + out_arrays = [ + np.empty(v.shape + (n_pts,)) if v is not None else None + for v in o[1] + ] + for ai, arr in enumerate(out_arrays): + if arr is not None: + if o[3] is None: + arr[..., o[0]] = o[1][ai][..., np.newaxis] + else: + arr[..., o[0]] = o[1][ai][..., np.newaxis] * o[3] + o[2][ai][ + ..., np.newaxis + ] * (1.0 - o[3]) + assert out_arrays is not None + return out_arrays + + +############################################################################### +# Constant overlap-add processing class + + +def _check_store(store): + if isinstance(store, np.ndarray): + store = [store] + if isinstance(store, list | tuple) and all( + isinstance(s, np.ndarray) for s in store + ): + store = _Storer(*store) + if not callable(store): + raise TypeError(f"store must be callable, got type {type(store)}") + return store + + +class _COLA: + r"""Constant overlap-add processing helper. + + Parameters + ---------- + process : callable + A function that takes a chunk of input data with shape + ``(n_channels, n_samples)`` and processes it. + store : callable | ndarray + A function that takes a completed chunk of output data. + Can also be an ``ndarray``, in which case it is treated as the + output data in which to store the results. + n_total : int + The total number of samples. + n_samples : int + The number of samples per window. + n_overlap : int + The overlap between windows. + window : str + The window to use. Default is "hann". + tol : float + The tolerance for COLA checking. + + Notes + ----- + This will process data using overlapping windows to achieve a constant + output value. For example, for ``n_total=27``, ``n_samples=10``, + ``n_overlap=5`` and ``window='triang'``:: + + 1 _____ _______ + | \ /\ /\ / + | \ / \ / \ / + | x x x + | / \ / \ / \ + | / \/ \/ \ + 0 +----|----|----|----|----|- + 0 5 10 15 20 25 + + This produces four windows: the first three are the requested length + (10 samples) and the last one is longer (12 samples). The first and last + window are asymmetric. + """ + + @verbose + def __init__( + self, + process, + store, + n_total, + n_samples, + n_overlap, + sfreq, + window="hann", + tol=1e-10, + *, + verbose=None, + ): + n_samples = _ensure_int(n_samples, "n_samples") + n_overlap = _ensure_int(n_overlap, "n_overlap") + n_total = _ensure_int(n_total, "n_total") + if n_samples <= 0: + raise ValueError(f"n_samples must be > 0, got {n_samples}") + if n_overlap < 0: + raise ValueError(f"n_overlap must be >= 0, got {n_overlap}") + if n_total < 0: + raise ValueError(f"n_total must be >= 0, got {n_total}") + self._n_samples = int(n_samples) + self._n_overlap = int(n_overlap) + del n_samples, n_overlap + if n_total < self._n_samples: + raise ValueError( + f"Number of samples per window ({self._n_samples}) must be at " + f"most the total number of samples ({n_total})" + ) + if not callable(process): + raise TypeError(f"process must be callable, got type {type(process)}") + self._process = process + self._step = self._n_samples - self._n_overlap + self._store = _check_store(store) + self._idx = 0 + self._in_buffers = self._out_buffers = None + + # Create our window boundaries + window_name = window if isinstance(window, str) else "custom" + self._window = get_window( + window, self._n_samples, fftbins=(self._n_samples - 1) % 2 + ) + self._window /= _check_cola( + self._window, self._n_samples, self._step, window_name, tol=tol + ) + self.starts = np.arange(0, n_total - self._n_samples + 1, self._step) + self.stops = self.starts + self._n_samples + delta = n_total - self.stops[-1] + self.stops[-1] = n_total + sfreq = float(sfreq) + pl = "s" if len(self.starts) != 1 else "" + logger.info( + f" Processing {len(self.starts):4d} data chunk{pl} of (at least) " + f"{self._n_samples / sfreq:0.1f} s with " + f"{self._n_overlap / sfreq:0.1f} s overlap and {window_name} windowing" + ) + del window, window_name + if delta > 0: + logger.info( + f" The final {delta / sfreq} s will be lumped into the final window" + ) + + @property + def _in_offset(self): + """Compute from current processing window start and buffer len.""" + return self.starts[self._idx] + self._in_buffers[0].shape[-1] + + @verbose + def feed(self, *datas, verbose=None, **kwargs): + """Pass in a chunk of data.""" + # Append to our input buffer + if self._in_buffers is None: + self._in_buffers = [None] * len(datas) + if len(datas) != len(self._in_buffers): + raise ValueError( + f"Got {len(datas)} array(s), needed {len(self._in_buffers)}" + ) + for di, data in enumerate(datas): + if not isinstance(data, np.ndarray) or data.ndim < 1: + raise TypeError( + f"data entry {di} must be an 2D ndarray, got {type(data)}" + ) + if self._in_buffers[di] is None: + # In practice, users can give large chunks, so we use + # dynamic allocation of the in buffer. We could save some + # memory allocation by only ever processing max_len at once, + # but this would increase code complexity. + self._in_buffers[di] = np.empty(data.shape[:-1] + (0,), data.dtype) + if ( + data.shape[:-1] != self._in_buffers[di].shape[:-1] + or self._in_buffers[di].dtype != data.dtype + ): + raise TypeError( + f"data must dtype {self._in_buffers[di].dtype} and " + f"shape[:-1]=={self._in_buffers[di].shape[:-1]}, got dtype " + f"{data.dtype} shape[:-1]={data.shape[:-1]}" + ) + logger.debug( + f" + Appending {self._in_offset:d}->" + f"{self._in_offset + data.shape[-1]:d}" + ) + self._in_buffers[di] = np.concatenate([self._in_buffers[di], data], -1) + if self._in_offset > self.stops[-1]: + raise ValueError( + f"data (shape {data.shape}) exceeded expected total buffer size (" + f"{self._in_offset} > {self.stops[-1]})" + ) + # Check to see if we can process the next chunk and dump outputs + while self._idx < len(self.starts) and self._in_offset >= self.stops[self._idx]: + start, stop = self.starts[self._idx], self.stops[self._idx] + this_len = stop - start + this_window = self._window.copy() + if self._idx == len(self.starts) - 1: + this_window = np.pad( + self._window, (0, this_len - len(this_window)), "constant" + ) + for offset in range(self._step, len(this_window), self._step): + n_use = len(this_window) - offset + this_window[offset:] += self._window[:n_use] + if self._idx == 0: + for offset in range(self._n_samples - self._step, 0, -self._step): + this_window[:offset] += self._window[-offset:] + logger.debug(f" * Processing {start}->{stop}") + this_proc = [in_[..., :this_len].copy() for in_ in self._in_buffers] + if not all( + proc.shape[-1] == this_len == this_window.size for proc in this_proc + ): + raise RuntimeError("internal indexing error") + outs = self._process(*this_proc, **kwargs) + if self._out_buffers is None: + max_len = np.max(self.stops - self.starts) + self._out_buffers = [ + np.zeros(o.shape[:-1] + (max_len,), o.dtype) for o in outs + ] + for oi, out in enumerate(outs): + out *= this_window + self._out_buffers[oi][..., : stop - start] += out + self._idx += 1 + if self._idx < len(self.starts): + next_start = self.starts[self._idx] + else: + next_start = self.stops[-1] + delta = next_start - self.starts[self._idx - 1] + for di in range(len(self._in_buffers)): + self._in_buffers[di] = self._in_buffers[di][..., delta:] + logger.debug(f" - Shifting input/output buffers by {delta:d} samples") + self._store(*[o[..., :delta] for o in self._out_buffers]) + for ob in self._out_buffers: + ob[..., :-delta] = ob[..., delta:] + ob[..., -delta:] = 0.0 + + +def _check_cola(win, nperseg, step, window_name, tol=1e-10): + """Check whether the Constant OverLap Add (COLA) constraint is met.""" + # adapted from SciPy + binsums = np.sum( + [win[ii * step : (ii + 1) * step] for ii in range(nperseg // step)], axis=0 + ) + if nperseg % step != 0: + binsums[: nperseg % step] += win[-(nperseg % step) :] + const = np.median(binsums) + deviation = np.max(np.abs(binsums - const)) + if deviation > tol: + raise ValueError( + f"segment length {nperseg:d} with step {step:d} for {window_name} window " + "type does not provide a constant output " + f"({100 * deviation / const:g}% deviation)" + ) + return const + + +class _Storer: + """Store data in chunks.""" + + def __init__(self, *outs, picks=None): + for oi, out in enumerate(outs): + if not isinstance(out, np.ndarray) or out.ndim < 1: + raise TypeError(f"outs[oi] must be >= 1D ndarray, got {out}") + self.outs = outs + self.idx = 0 + self.picks = picks + + def __call__(self, *outs): + if len(outs) != len(self.outs) or not all( + out.shape[-1] == outs[0].shape[-1] for out in outs + ): + raise ValueError("Bad outs") + idx = (Ellipsis,) + if self.picks is not None: + idx += (self.picks,) + stop = self.idx + outs[0].shape[-1] + idx += (slice(self.idx, stop),) + for o1, o2 in zip(self.outs, outs): + o1[idx] = o2 + self.idx = stop diff --git a/mne/annotations.py b/mne/annotations.py new file mode 100644 index 0000000..629ee7b --- /dev/null +++ b/mne/annotations.py @@ -0,0 +1,1736 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import json +import re +import warnings +from collections import Counter, OrderedDict +from collections.abc import Iterable +from copy import deepcopy +from datetime import datetime, timedelta, timezone +from itertools import takewhile +from textwrap import shorten + +import numpy as np +from scipy.io import loadmat + +from ._fiff.constants import FIFF +from ._fiff.open import fiff_open +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + _safe_name_list, + end_block, + start_and_end_file, + start_block, + write_double, + write_float, + write_name_list_sanitized, + write_string, +) +from .utils import ( + _check_dict_keys, + _check_dt, + _check_fname, + _check_option, + _check_pandas_installed, + _check_time_format, + _convert_times, + _DefaultEventParser, + _dt_to_stamp, + _is_numeric, + _mask_to_onsets_offsets, + _on_missing, + _pl, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + int_like, + logger, + verbose, + warn, +) + +# For testing windows_like_datetime, we monkeypatch "datetime" in this module. +# Keep the true datetime object around for _validate_type use. +_datetime = datetime + + +def _check_o_d_s_c(onset, duration, description, ch_names): + onset = np.atleast_1d(np.array(onset, dtype=float)) + if onset.ndim != 1: + raise ValueError( + f"Onset must be a one dimensional array, got {onset.ndim} (shape " + f"{onset.shape})." + ) + duration = np.array(duration, dtype=float) + if duration.ndim == 0 or duration.shape == (1,): + duration = np.repeat(duration, len(onset)) + if duration.ndim != 1: + raise ValueError( + f"Duration must be a one dimensional array, got {duration.ndim}." + ) + + description = np.array(description, dtype=str) + if description.ndim == 0 or description.shape == (1,): + description = np.repeat(description, len(onset)) + if description.ndim != 1: + raise ValueError( + f"Description must be a one dimensional array, got {description.ndim}." + ) + _safe_name_list(description, "write", "description") + + # ch_names: convert to ndarray of tuples + _validate_type(ch_names, (None, tuple, list, np.ndarray), "ch_names") + if ch_names is None: + ch_names = [()] * len(onset) + ch_names = list(ch_names) + for ai, ch in enumerate(ch_names): + _validate_type(ch, (list, tuple, np.ndarray), f"ch_names[{ai}]") + ch_names[ai] = tuple(ch) + for ci, name in enumerate(ch_names[ai]): + _validate_type(name, str, f"ch_names[{ai}][{ci}]") + ch_names = _ndarray_ch_names(ch_names) + + if not (len(onset) == len(duration) == len(description) == len(ch_names)): + raise ValueError( + "Onset, duration, description, and ch_names must be " + f"equal in sizes, got {len(onset)}, {len(duration)}, " + f"{len(description)}, and {len(ch_names)}." + ) + return onset, duration, description, ch_names + + +def _ndarray_ch_names(ch_names): + # np.array(..., dtype=object) if all entries are empty will give + # an empty array of shape (n_entries, 0) which is not helpful. So let's + # force it to give us an array of shape (n_entries,) full of empty + # tuples + out = np.empty(len(ch_names), dtype=object) + out[:] = ch_names + return out + + +@fill_doc +class Annotations: + """Annotation object for annotating segments of raw data. + + .. note:: + To convert events to `~mne.Annotations`, use + `~mne.annotations_from_events`. To convert existing `~mne.Annotations` + to events, use `~mne.events_from_annotations`. + + Parameters + ---------- + onset : array of float, shape (n_annotations,) + The starting time of annotations in seconds after ``orig_time``. + duration : array of float, shape (n_annotations,) | float + Durations of the annotations in seconds. If a float, all the + annotations are given the same duration. + description : array of str, shape (n_annotations,) | str + Array of strings containing description for each annotation. If a + string, all the annotations are given the same description. To reject + epochs, use description starting with keyword 'bad'. See example above. + orig_time : float | str | datetime | tuple of int | None + A POSIX Timestamp, datetime or a tuple containing the timestamp as the + first element and microseconds as the second element. Determines the + starting time of annotation acquisition. If None (default), + starting time is determined from beginning of raw data acquisition. + In general, ``raw.info['meas_date']`` (or None) can be used for syncing + the annotations with raw data if their acquisition is started at the + same time. If it is a string, it should conform to the ISO8601 format. + More precisely to this '%%Y-%%m-%%d %%H:%%M:%%S.%%f' particular case of + the ISO8601 format where the delimiter between date and time is ' '. + %(ch_names_annot)s + + .. versionadded:: 0.23 + + See Also + -------- + mne.annotations_from_events + mne.events_from_annotations + + Notes + ----- + Annotations are added to instance of :class:`mne.io.Raw` as the attribute + :attr:`raw.annotations `. + + To reject bad epochs using annotations, use + annotation description starting with 'bad' keyword. The epochs with + overlapping bad segments are then rejected automatically by default. + + To remove epochs with blinks you can do: + + >>> eog_events = mne.preprocessing.find_eog_events(raw) # doctest: +SKIP + >>> n_blinks = len(eog_events) # doctest: +SKIP + >>> onset = eog_events[:, 0] / raw.info['sfreq'] - 0.25 # doctest: +SKIP + >>> duration = np.repeat(0.5, n_blinks) # doctest: +SKIP + >>> description = ['bad blink'] * n_blinks # doctest: +SKIP + >>> annotations = mne.Annotations(onset, duration, description) # doctest: +SKIP + >>> raw.set_annotations(annotations) # doctest: +SKIP + >>> epochs = mne.Epochs(raw, events, event_id, tmin, tmax) # doctest: +SKIP + + **ch_names** + + Specifying channel names allows the creation of channel-specific + annotations. Once the annotations are assigned to a raw instance with + :meth:`mne.io.Raw.set_annotations`, if channels are renamed by the raw + instance, the annotation channels also get renamed. If channels are dropped + from the raw instance, any channel-specific annotation that has no channels + left in the raw instance will also be removed. + + **orig_time** + + If ``orig_time`` is None, the annotations are synced to the start of the + data (0 seconds). Otherwise the annotations are synced to sample 0 and + ``raw.first_samp`` is taken into account the same way as with events. + + When setting annotations, the following alignments + between ``raw.info['meas_date']`` and ``annotation.orig_time`` take place: + + :: + + ----------- meas_date=XX, orig_time=YY ----------------------------- + + | +------------------+ + |______________| RAW | + | | | + | +------------------+ + meas_date first_samp + . + . | +------+ + . |_________| ANOT | + . | | | + . | +------+ + . orig_time onset[0] + . + | +------+ + |___________________| | + | | | + | +------+ + orig_time onset[0]' + + ----------- meas_date=XX, orig_time=None --------------------------- + + | +------------------+ + |______________| RAW | + | | | + | +------------------+ + . N +------+ + . o_________| ANOT | + . n | | + . e +------+ + . + | +------+ + |________________________| | + | | | + | +------+ + orig_time onset[0]' + + ----------- meas_date=None, orig_time=YY --------------------------- + + N +------------------+ + o______________| RAW | + n | | + e +------------------+ + | +------+ + |_________| ANOT | + | | | + | +------+ + + [[[ CRASH ]]] + + ----------- meas_date=None, orig_time=None ------------------------- + + N +------------------+ + o______________| RAW | + n | | + e +------------------+ + . N +------+ + . o_________| ANOT | + . n | | + . e +------+ + . + N +------+ + o________________________| | + n | | + e +------+ + orig_time onset[0]' + + .. warning:: + This means that when ``raw.info['meas_date'] is None``, doing + ``raw.set_annotations(raw.annotations)`` will not alter ``raw`` if and + only if ``raw.first_samp == 0``. When it's non-zero, + ``raw.set_annotations`` will assume that the "new" annotations refer to + the original data (with ``first_samp==0``), and will be re-referenced to + the new time offset! + + **Specific annotation** + + ``BAD_ACQ_SKIP`` annotation leads to specific reading/writing file + behaviours. See :meth:`mne.io.read_raw_fif` and + :meth:`Raw.save() ` notes for details. + """ # noqa: E501 + + def __init__(self, onset, duration, description, orig_time=None, ch_names=None): + self._orig_time = _handle_meas_date(orig_time) + self.onset, self.duration, self.description, self.ch_names = _check_o_d_s_c( + onset, duration, description, ch_names + ) + self._sort() # ensure we're sorted + + @property + def orig_time(self): + """The time base of the Annotations.""" + return self._orig_time + + def __eq__(self, other): + """Compare to another Annotations instance.""" + if not isinstance(other, Annotations): + return False + return ( + np.array_equal(self.onset, other.onset) + and np.array_equal(self.duration, other.duration) + and np.array_equal(self.description, other.description) + and np.array_equal(self.ch_names, other.ch_names) + and self.orig_time == other.orig_time + ) + + def __repr__(self): + """Show the representation.""" + counter = Counter(self.description) + kinds = ", ".join(["{} ({})".format(*k) for k in sorted(counter.items())]) + kinds = (": " if len(kinds) > 0 else "") + kinds + ch_specific = ", channel-specific" if self._any_ch_names() else "" + s = ( + f"Annotations | {len(self.onset)} segment" + f"{_pl(len(self.onset))}{ch_specific}{kinds}" + ) + return "<" + shorten(s, width=77, placeholder=" ...") + ">" + + def __len__(self): + """Return the number of annotations. + + Returns + ------- + n_annot : int + The number of annotations. + """ + return len(self.duration) + + def __add__(self, other): + """Add (concatencate) two Annotation objects.""" + out = self.copy() + out += other + return out + + def __iadd__(self, other): + """Add (concatencate) two Annotation objects in-place. + + Both annotations must have the same orig_time + """ + if len(self) == 0: + self._orig_time = other.orig_time + if self.orig_time != other.orig_time: + raise ValueError( + "orig_time should be the same to add/concatenate 2 annotations (got " + f"{self.orig_time} != {other.orig_time})" + ) + return self.append( + other.onset, other.duration, other.description, other.ch_names + ) + + def __iter__(self): + """Iterate over the annotations.""" + # Figure this out once ahead of time for consistency and speed (for + # thousands of annotations) + with_ch_names = self._any_ch_names() + for idx in range(len(self.onset)): + yield self.__getitem__(idx, with_ch_names=with_ch_names) + + def __getitem__(self, key, *, with_ch_names=None): + """Propagate indexing and slicing to the underlying numpy structure.""" + if isinstance(key, int_like): + out_keys = ("onset", "duration", "description", "orig_time") + out_vals = ( + self.onset[key], + self.duration[key], + self.description[key], + self.orig_time, + ) + if with_ch_names or (with_ch_names is None and self._any_ch_names()): + out_keys += ("ch_names",) + out_vals += (self.ch_names[key],) + return OrderedDict(zip(out_keys, out_vals)) + else: + key = list(key) if isinstance(key, tuple) else key + return Annotations( + onset=self.onset[key], + duration=self.duration[key], + description=self.description[key], + orig_time=self.orig_time, + ch_names=self.ch_names[key], + ) + + @fill_doc + def append(self, onset, duration, description, ch_names=None): + """Add an annotated segment. Operates inplace. + + Parameters + ---------- + onset : float | array-like + Annotation time onset from the beginning of the recording in + seconds. + duration : float | array-like + Duration of the annotation in seconds. + description : str | array-like + Description for the annotation. To reject epochs, use description + starting with keyword 'bad'. + %(ch_names_annot)s + + .. versionadded:: 0.23 + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + The array-like support for arguments allows this to be used similarly + to not only ``list.append``, but also + `list.extend `__. + """ # noqa: E501 + onset, duration, description, ch_names = _check_o_d_s_c( + onset, duration, description, ch_names + ) + self.onset = np.append(self.onset, onset) + self.duration = np.append(self.duration, duration) + self.description = np.append(self.description, description) + self.ch_names = np.append(self.ch_names, ch_names) + self._sort() + return self + + def copy(self): + """Return a copy of the Annotations. + + Returns + ------- + inst : instance of Annotations + A copy of the object. + """ + return deepcopy(self) + + def delete(self, idx): + """Remove an annotation. Operates inplace. + + Parameters + ---------- + idx : int | array-like of int + Index of the annotation to remove. Can be array-like to + remove multiple indices. + """ + self.onset = np.delete(self.onset, idx) + self.duration = np.delete(self.duration, idx) + self.description = np.delete(self.description, idx) + self.ch_names = np.delete(self.ch_names, idx) + + @fill_doc + def to_data_frame(self, time_format="datetime"): + """Export annotations in tabular structure as a pandas DataFrame. + + Parameters + ---------- + %(time_format_df_raw)s + + .. versionadded:: 1.7 + + Returns + ------- + result : pandas.DataFrame + Returns a pandas DataFrame with onset, duration, and + description columns. A column named ch_names is added if any + annotations are channel-specific. + """ + pd = _check_pandas_installed(strict=True) + valid_time_formats = ["ms", "timedelta", "datetime"] + dt = _handle_meas_date(self.orig_time) + if dt is None: + dt = _handle_meas_date(0) + time_format = _check_time_format(time_format, valid_time_formats, dt) + dt = dt.replace(tzinfo=None) + times = _convert_times(self.onset, time_format, dt) + df = dict(onset=times, duration=self.duration, description=self.description) + if self._any_ch_names(): + df.update(ch_names=self.ch_names) + df = pd.DataFrame(df) + return df + + def count(self): + """Count annotations. + + Returns + ------- + counts : dict + A dictionary containing unique annotation descriptions as keys with their + counts as values. + """ + return count_annotations(self) + + def _any_ch_names(self): + return any(len(ch) for ch in self.ch_names) + + def _prune_ch_names(self, info, on_missing): + # this prunes channel names and if a given channel-specific annotation + # no longer has any channels left, it gets dropped + keep = set(info["ch_names"]) + ch_names = self.ch_names + warned = False + drop_idx = list() + for ci, ch in enumerate(ch_names): + if len(ch): + names = list() + for name in ch: + if name not in keep: + if not warned: + _on_missing( + on_missing, + "At least one channel name in " + f"annotations missing from info: {name}", + ) + warned = True + else: + names.append(name) + ch_names[ci] = tuple(names) + if not len(ch_names[ci]): + drop_idx.append(ci) + if len(drop_idx): + self.delete(drop_idx) + return self + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save annotations to FIF, CSV or TXT. + + Typically annotations get saved in the FIF file for raw data + (e.g., as ``raw.annotations``), but this offers the possibility + to also save them to disk separately in different file formats + which are easier to share between packages. + + Parameters + ---------- + fname : path-like + The filename to use. + %(overwrite)s + + .. versionadded:: 0.23 + %(verbose)s + + Notes + ----- + The format of the information stored in the saved annotation objects + depends on the chosen file format. :file:`.csv` files store the onset + as timestamps (e.g., ``2002-12-03 19:01:56.676071``), + whereas :file:`.txt` files store onset as seconds since start of the + recording (e.g., ``45.95597082905339``). + """ + check_fname( + fname, + "annotations", + ( + "-annot.fif", + "-annot.fif.gz", + "_annot.fif", + "_annot.fif.gz", + ".txt", + ".csv", + ), + ) + fname = _check_fname(fname, overwrite=overwrite) + if fname.suffix == ".txt": + _write_annotations_txt(fname, self) + elif fname.suffix == ".csv": + _write_annotations_csv(fname, self) + else: + with start_and_end_file(fname) as fid: + _write_annotations(fid, self) + + def _sort(self): + """Sort in place.""" + # instead of argsort here we use sorted so that it gives us + # the onset-then-duration hierarchy + vals = sorted(zip(self.onset, self.duration, range(len(self)))) + order = list(list(zip(*vals))[-1]) if len(vals) else [] + self.onset = self.onset[order] + self.duration = self.duration[order] + self.description = self.description[order] + self.ch_names = self.ch_names[order] + + @verbose + def crop( + self, tmin=None, tmax=None, emit_warning=False, use_orig_time=True, verbose=None + ): + """Remove all annotation that are outside of [tmin, tmax]. + + The method operates inplace. + + Parameters + ---------- + tmin : float | datetime | None + Start time of selection in seconds. + tmax : float | datetime | None + End time of selection in seconds. + emit_warning : bool + Whether to emit warnings when limiting or omitting annotations. + Defaults to False. + use_orig_time : bool + Whether to use orig_time as an offset. + Defaults to True. + %(verbose)s + + Returns + ------- + self : instance of Annotations + The cropped Annotations object. + """ + if len(self) == 0: + return self # no annotations, nothing to do + if not use_orig_time or self.orig_time is None: + offset = _handle_meas_date(0) + else: + offset = self.orig_time + if tmin is None: + tmin = timedelta(seconds=self.onset.min()) + offset + if tmax is None: + tmax = timedelta(seconds=(self.onset + self.duration).max()) + offset + for key, val in [("tmin", tmin), ("tmax", tmax)]: + _validate_type( + val, ("numeric", _datetime), key, "numeric, datetime, or None" + ) + absolute_tmin = _handle_meas_date(tmin) + absolute_tmax = _handle_meas_date(tmax) + del tmin, tmax + if absolute_tmin > absolute_tmax: + raise ValueError( + f"tmax should be greater than or equal to tmin ({absolute_tmin} < " + f"{absolute_tmax})." + ) + logger.debug(f"Cropping annotations {absolute_tmin} - {absolute_tmax}") + + onsets, durations, descriptions, ch_names = [], [], [], [] + out_of_bounds, clip_left_elem, clip_right_elem = [], [], [] + for idx, (onset, duration, description, ch) in enumerate( + zip(self.onset, self.duration, self.description, self.ch_names) + ): + # if duration is NaN behave like a zero + if np.isnan(duration): + duration = 0.0 + # convert to absolute times + absolute_onset = timedelta(seconds=onset) + offset + absolute_offset = absolute_onset + timedelta(seconds=duration) + out_of_bounds.append( + absolute_onset > absolute_tmax or absolute_offset < absolute_tmin + ) + if out_of_bounds[-1]: + clip_left_elem.append(False) + clip_right_elem.append(False) + logger.debug( + f" [{idx}] Dropping " + f"({absolute_onset} - {absolute_offset}: {description})" + ) + else: + # clip the left side + clip_left_elem.append(absolute_onset < absolute_tmin) + if clip_left_elem[-1]: + absolute_onset = absolute_tmin + clip_right_elem.append(absolute_offset > absolute_tmax) + if clip_right_elem[-1]: + absolute_offset = absolute_tmax + if clip_left_elem[-1] or clip_right_elem[-1]: + durations.append((absolute_offset - absolute_onset).total_seconds()) + else: + durations.append(duration) + onsets.append((absolute_onset - offset).total_seconds()) + logger.debug( + f" [{idx}] Keeping " + f"({absolute_onset} - {absolute_offset} -> " + f"{onset} - {onset + duration})" + ) + descriptions.append(description) + ch_names.append(ch) + logger.debug(f"Cropping complete (kept {len(onsets)})") + self.onset = np.array(onsets, float) + self.duration = np.array(durations, float) + assert (self.duration >= 0).all() + self.description = np.array(descriptions, dtype=str) + self.ch_names = _ndarray_ch_names(ch_names) + + if emit_warning: + omitted = np.array(out_of_bounds).sum() + if omitted > 0: + warn(f"Omitted {omitted} annotation(s) that were outside data range.") + limited = (np.array(clip_left_elem) | np.array(clip_right_elem)).sum() + if limited > 0: + warn( + f"Limited {limited} annotation(s) that were expanding outside the" + " data range." + ) + + return self + + @verbose + def set_durations(self, mapping, verbose=None): + """Set annotation duration(s). Operates inplace. + + Parameters + ---------- + mapping : dict | float + A dictionary mapping the annotation description to a duration in + seconds e.g. ``{'ShortStimulus' : 3, 'LongStimulus' : 12}``. + Alternatively, if a number is provided, then all annotations + durations are set to the single provided value. + %(verbose)s + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + .. versionadded:: 0.24.0 + """ + _validate_type(mapping, (int, float, dict)) + + if isinstance(mapping, dict): + _check_dict_keys( + mapping, + self.description, + valid_key_source="data", + key_description="Annotation description(s)", + ) + for stim in mapping: + map_idx = [desc == stim for desc in self.description] + self.duration[map_idx] = mapping[stim] + + elif _is_numeric(mapping): + self.duration = np.ones(self.description.shape) * mapping + + else: + raise ValueError( + "Setting durations requires the mapping of " + "descriptions to times to be provided as a dict. " + f"Instead {type(mapping)} was provided." + ) + + return self + + @verbose + def rename(self, mapping, verbose=None): + """Rename annotation description(s). Operates inplace. + + Parameters + ---------- + mapping : dict + A dictionary mapping the old description to a new description, + e.g. {'1.0' : 'Control', '2.0' : 'Stimulus'}. + %(verbose)s + + Returns + ------- + self : mne.Annotations + The modified Annotations object. + + Notes + ----- + .. versionadded:: 0.24.0 + """ + _validate_type(mapping, dict) + _check_dict_keys( + mapping, + self.description, + valid_key_source="data", + key_description="Annotation description(s)", + ) + self.description = np.array([str(mapping.get(d, d)) for d in self.description]) + return self + + +class EpochAnnotationsMixin: + """Mixin class for Annotations in Epochs.""" + + @property + def annotations(self): # noqa: D102 + return self._annotations + + @verbose + def set_annotations(self, annotations, on_missing="raise", *, verbose=None): + """Setter for Epoch annotations from Raw. + + This method does not handle offsetting the times based + on first_samp or measurement dates, since that is expected + to occur in Raw.set_annotations(). + + Parameters + ---------- + annotations : instance of mne.Annotations | None + Annotations to set. + %(on_missing_ch_names)s + %(verbose)s + + Returns + ------- + self : instance of Epochs + The epochs object with annotations. + + Notes + ----- + Annotation onsets and offsets are stored as time in seconds (not as + sample numbers). + + If you have an ``-epo.fif`` file saved to disk created before 1.0, + annotations can be added correctly only if no decimation or + resampling was performed. We thus suggest to regenerate your + :class:`mne.Epochs` from raw and re-save to disk with 1.0+ if you + want to safely work with :class:`~mne.Annotations` in epochs. + + Since this method does not handle offsetting the times based + on first_samp or measurement dates, the recommended way to add + Annotations is:: + + raw.set_annotations(annotations) + annotations = raw.annotations + epochs.set_annotations(annotations) + + .. versionadded:: 1.0 + """ + _validate_type(annotations, (Annotations, None), "annotations") + if annotations is None: + self._annotations = None + else: + if getattr(self, "_unsafe_annot_add", False): + warn( + "Adding annotations to Epochs created (and saved to disk) before " + "1.0 will yield incorrect results if decimation or resampling was " + "performed on the instance, we recommend regenerating the Epochs " + "and re-saving them to disk." + ) + new_annotations = annotations.copy() + new_annotations._prune_ch_names(self.info, on_missing) + self._annotations = new_annotations + return self + + def get_annotations_per_epoch(self): + """Get a list of annotations that occur during each epoch. + + Returns + ------- + epoch_annots : list + A list of lists (with length equal to number of epochs) where each + inner list contains any annotations that overlap the corresponding + epoch. Annotations are stored as a :class:`tuple` of onset, + duration, description (not as a :class:`~mne.Annotations` object), + where the onset is now relative to time=0 of the epoch, rather than + time=0 of the original continuous (raw) data. + """ + # create a list of annotations for each epoch + epoch_annot_list = [[] for _ in range(len(self.events))] + + # check if annotations exist + if self.annotations is None: + return epoch_annot_list + + # when each epoch and annotation starts/stops + # no need to account for first_samp here... + epoch_tzeros = self.events[:, 0] / self._raw_sfreq + epoch_starts, epoch_stops = ( + np.atleast_2d(epoch_tzeros) + np.atleast_2d(self.times[[0, -1]]).T + ) + # ... because first_samp isn't accounted for here either + annot_starts = self._annotations.onset + annot_stops = annot_starts + self._annotations.duration + + # the first two cases (annot_straddles_epoch_{start|end}) will both + # (redundantly) capture cases where an annotation fully encompasses + # an epoch (e.g., annot from 1-4s, epoch from 2-3s). The redundancy + # doesn't matter because results are summed and then cast to bool (all + # we care about is presence/absence of overlap). + annot_straddles_epoch_start = np.logical_and( + np.atleast_2d(epoch_starts) >= np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_starts) < np.atleast_2d(annot_stops).T, + ) + + annot_straddles_epoch_end = np.logical_and( + np.atleast_2d(epoch_stops) > np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_stops) <= np.atleast_2d(annot_stops).T, + ) + + # this captures the only remaining case we care about: annotations + # fully contained within an epoch (or exactly coextensive with it). + annot_fully_within_epoch = np.logical_and( + np.atleast_2d(epoch_starts) <= np.atleast_2d(annot_starts).T, + np.atleast_2d(epoch_stops) >= np.atleast_2d(annot_stops).T, + ) + + # combine all cases to get array of shape (n_annotations, n_epochs). + # Nonzero entries indicate overlap between the corresponding + # annotation (row index) and epoch (column index). + all_cases = ( + annot_straddles_epoch_start + + annot_straddles_epoch_end + + annot_fully_within_epoch + ) + + # for each Epoch-Annotation overlap occurrence: + for annot_ix, epo_ix in zip(*np.nonzero(all_cases)): + this_annot = self._annotations[annot_ix] + this_tzero = epoch_tzeros[epo_ix] + # adjust annotation onset to be relative to epoch tzero... + annot = ( + this_annot["onset"] - this_tzero, + this_annot["duration"], + this_annot["description"], + ) + # ...then add it to the correct sublist of `epoch_annot_list` + epoch_annot_list[epo_ix].append(annot) + return epoch_annot_list + + def add_annotations_to_metadata(self, overwrite=False): + """Add raw annotations into the Epochs metadata data frame. + + Adds three columns to the ``metadata`` consisting of a list + in each row: + - ``annot_onset``: the onset of each Annotation within + the Epoch relative to the start time of the Epoch (in seconds). + - ``annot_duration``: the duration of each Annotation + within the Epoch in seconds. + - ``annot_description``: the free-form text description of each + Annotation. + + Parameters + ---------- + overwrite : bool + Whether to overwrite existing columns in metadata or not. + Default is False. + + Returns + ------- + self : instance of Epochs + The modified instance (instance is also modified inplace). + + Notes + ----- + .. versionadded:: 1.0 + """ + pd = _check_pandas_installed() + + # check if annotations exist + if self.annotations is None: + warn( + f"There were no Annotations stored in {self}, so " + "metadata was not modified." + ) + return self + + # get existing metadata DataFrame or instantiate an empty one + if self._metadata is not None: + metadata = self._metadata + else: + data = np.empty((len(self.events), 0)) + metadata = pd.DataFrame(data=data) + + if ( + any( + name in metadata.columns + for name in ["annot_onset", "annot_duration", "annot_description"] + ) + and not overwrite + ): + raise RuntimeError( + "Metadata for Epochs already contains columns " + '"annot_onset", "annot_duration", or "annot_description".' + ) + + # get the Epoch annotations, then convert to separate lists for + # onsets, durations, and descriptions + epoch_annot_list = self.get_annotations_per_epoch() + onset, duration, description = [], [], [] + for epoch_annot in epoch_annot_list: + for ix, annot_prop in enumerate((onset, duration, description)): + entry = [annot[ix] for annot in epoch_annot] + + # round onset and duration to avoid IO round trip mismatch + if ix < 2: + entry = np.round(entry, decimals=12).tolist() + + annot_prop.append(entry) + + # Create a new Annotations column that is instantiated as an empty + # list per Epoch. + metadata["annot_onset"] = pd.Series(onset) + metadata["annot_duration"] = pd.Series(duration) + metadata["annot_description"] = pd.Series(description) + + # reset the metadata + self.metadata = metadata + return self + + +def _combine_annotations( + one, two, one_n_samples, one_first_samp, two_first_samp, sfreq +): + """Combine a tuple of annotations.""" + assert one is not None + assert two is not None + shift = one_n_samples / sfreq # to the right by the number of samples + shift += one_first_samp / sfreq # to the right by the offset + shift -= two_first_samp / sfreq # undo its offset + onset = np.concatenate([one.onset, two.onset + shift]) + duration = np.concatenate([one.duration, two.duration]) + description = np.concatenate([one.description, two.description]) + ch_names = np.concatenate([one.ch_names, two.ch_names]) + return Annotations(onset, duration, description, one.orig_time, ch_names) + + +def _handle_meas_date(meas_date): + """Convert meas_date to datetime or None. + + If `meas_date` is a string, it should conform to the ISO8601 format. + More precisely to this '%Y-%m-%d %H:%M:%S.%f' particular case of the + ISO8601 format where the delimiter between date and time is ' '. + Note that ISO8601 allows for ' ' or 'T' as delimiters between date and + time. + """ + if isinstance(meas_date, str): + ACCEPTED_ISO8601 = "%Y-%m-%d %H:%M:%S.%f" + try: + meas_date = datetime.strptime(meas_date, ACCEPTED_ISO8601) + except ValueError: + meas_date = None + else: + meas_date = meas_date.replace(tzinfo=timezone.utc) + elif isinstance(meas_date, tuple): + # old way + meas_date = _stamp_to_dt(meas_date) + if meas_date is not None: + if np.isscalar(meas_date): + # It would be nice just to do: + # + # meas_date = datetime.fromtimestamp(meas_date, timezone.utc) + # + # But Windows does not like timestamps < 0. So we'll use + # our specialized wrapper instead: + meas_date = np.array(np.modf(meas_date)[::-1]) + meas_date *= [1, 1e6] + meas_date = _stamp_to_dt(np.round(meas_date)) + _check_dt(meas_date) # run checks + return meas_date + + +def _sync_onset(raw, onset, inverse=False): + """Adjust onsets in relation to raw data.""" + offset = (-1 if inverse else 1) * raw._first_time + assert raw.info["meas_date"] == raw.annotations.orig_time + annot_start = onset - offset + return annot_start + + +def _annotations_starts_stops(raw, kinds, name="skip_by_annotation", invert=False): + """Get starts and stops from given kinds. + + onsets and ends are inclusive. + """ + _validate_type(kinds, (str, list, tuple), name) + if isinstance(kinds, str): + kinds = [kinds] + else: + for kind in kinds: + _validate_type(kind, "str", "All entries") + + if len(raw.annotations) == 0: + onsets, ends = np.array([], int), np.array([], int) + else: + idxs = [ + idx + for idx, desc in enumerate(raw.annotations.description) + if any(desc.upper().startswith(kind.upper()) for kind in kinds) + ] + # onsets are already sorted + onsets = raw.annotations.onset[idxs] + onsets = _sync_onset(raw, onsets) + ends = onsets + raw.annotations.duration[idxs] + onsets = raw.time_as_index(onsets, use_rounding=True) + ends = raw.time_as_index(ends, use_rounding=True) + assert (onsets <= ends).all() # all durations >= 0 + if invert: + # We need to eliminate overlaps here, otherwise wacky things happen, + # so we carefully invert the relationship + mask = np.zeros(len(raw.times), bool) + for onset, end in zip(onsets, ends): + mask[onset:end] = True + mask = ~mask + extras = onsets == ends + extra_onsets, extra_ends = onsets[extras], ends[extras] + onsets, ends = _mask_to_onsets_offsets(mask) + # Keep ones where things were exactly equal + del extras + # we could do this with a np.insert+np.searchsorted, but our + # ordered-ness should get us it for free + onsets = np.sort(np.concatenate([onsets, extra_onsets])) + ends = np.sort(np.concatenate([ends, extra_ends])) + assert (onsets <= ends).all() + return onsets, ends + + +def _write_annotations(fid, annotations): + """Write annotations.""" + start_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, annotations.onset) + write_float( + fid, FIFF.FIFF_MNE_BASELINE_MAX, annotations.duration + annotations.onset + ) + write_name_list_sanitized( + fid, FIFF.FIFF_COMMENT, annotations.description, name="description" + ) + if annotations.orig_time is not None: + write_double(fid, FIFF.FIFF_MEAS_DATE, _dt_to_stamp(annotations.orig_time)) + if annotations._any_ch_names(): + write_string( + fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, json.dumps(tuple(annotations.ch_names)) + ) + end_block(fid, FIFF.FIFFB_MNE_ANNOTATIONS) + + +def _write_annotations_csv(fname, annot): + annot = annot.to_data_frame() + if "ch_names" in annot: + annot["ch_names"] = [ + _safe_name_list(ch, "write", name=f'annot["ch_names"][{ci}') + for ci, ch in enumerate(annot["ch_names"]) + ] + annot.to_csv(fname, index=False) + + +def _write_annotations_txt(fname, annot): + content = "# MNE-Annotations\n" + if annot.orig_time is not None: + # for backward compat, we do not write tzinfo (assumed UTC) + content += f"# orig_time : {annot.orig_time.replace(tzinfo=None)}\n" + content += "# onset, duration, description" + data = [annot.onset, annot.duration, annot.description] + if annot._any_ch_names(): + content += ", ch_names" + data.append( + [ + _safe_name_list(ch, "write", f"annot.ch_names[{ci}]") + for ci, ch in enumerate(annot.ch_names) + ] + ) + content += "\n" + data = np.array(data, dtype=str).T + assert data.ndim == 2 + assert data.shape[0] == len(annot.onset) + assert data.shape[1] in (3, 4) + with open(fname, "wb") as fid: + fid.write(content.encode()) + np.savetxt(fid, data, delimiter=",", fmt="%s") + + +@fill_doc +def read_annotations( + fname, sfreq="auto", uint16_codec=None, encoding="utf8", ignore_marker_types=False +) -> Annotations: + r"""Read annotations from a file. + + This function reads a ``.fif``, ``.fif.gz``, ``.vmrk``, ``.amrk``, + ``.edf``, ``.bdf``, ``.gdf``, ``.txt``, ``.csv``, ``.cnt``, ``.cef``, or + ``.set`` file and makes an :class:`mne.Annotations` object. + + Parameters + ---------- + fname : path-like + The filename. + sfreq : float | ``'auto'`` + The sampling frequency in the file. This parameter is necessary for + \*.vmrk, \*.amrk, and \*.cef files as Annotations are expressed in + seconds and \*.vmrk/\*.amrk/\*.cef files are in samples. For any other + file format, ``sfreq`` is omitted. If set to 'auto' then the ``sfreq`` + is taken from the respective info file of the same name with according + file extension (\*.vhdr/\*.ahdr for brainvision; \*.dap for Curry 7; + \*.cdt.dpa for Curry 8). So data.vmrk/amrk looks for sfreq in + data.vhdr/ahdr, data.cef looks in data.dap and data.cdt.cef looks in + data.cdt.dpa. + uint16_codec : str | None + This parameter is only used in EEGLAB (\*.set) and omitted otherwise. + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + ``'latin1'`` or ``'utf-8'``) should be used when reading character + arrays and can therefore help you solve this problem. + %(encoding_edf)s + Only used when reading EDF annotations. + ignore_marker_types : bool + If ``True``, ignore marker types in BrainVision files (and only use their + descriptions). Defaults to ``False``. + + Returns + ------- + annot : instance of Annotations + The annotations. + + Notes + ----- + The annotations stored in a ``.csv`` require the onset columns to be + timestamps. If you have onsets as floats (in seconds), you should use the + ``.txt`` extension. + """ + from .io.brainvision.brainvision import _read_annotations_brainvision + from .io.cnt.cnt import _read_annotations_cnt + from .io.ctf.markers import _read_annotations_ctf + from .io.curry.curry import _read_annotations_curry + from .io.edf.edf import _read_annotations_edf + from .io.eeglab.eeglab import _read_annotations_eeglab + + fname = _check_fname( + fname, + overwrite="read", + must_exist=True, + need_dir=str(fname).endswith(".ds"), # for CTF + name="fname", + ) + readers = { + ".csv": _read_annotations_csv, + ".cnt": _read_annotations_cnt, + ".ds": _read_annotations_ctf, + ".cef": _read_annotations_curry, + ".set": _read_annotations_eeglab, + ".edf": _read_annotations_edf, + ".bdf": _read_annotations_edf, + ".gdf": _read_annotations_edf, + ".vmrk": _read_annotations_brainvision, + ".amrk": _read_annotations_brainvision, + ".txt": _read_annotations_txt, + } + kwargs = { + ".vmrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".amrk": {"sfreq": sfreq, "ignore_marker_types": ignore_marker_types}, + ".cef": {"sfreq": sfreq}, + ".set": {"uint16_codec": uint16_codec}, + ".edf": {"encoding": encoding}, + ".bdf": {"encoding": encoding}, + ".gdf": {"encoding": encoding}, + } + if fname.suffix in readers: + annotations = readers[fname.suffix](fname, **kwargs.get(fname.suffix, {})) + elif fname.name.endswith(("fif", "fif.gz")): + # Read FiF files + ff, tree, _ = fiff_open(fname, preload=False) + with ff as fid: + annotations = _read_annotations_fif(fid, tree) + elif fname.name.startswith("events_") and fname.suffix == ".mat": + annotations = _read_brainstorm_annotations(fname) + else: + raise OSError(f'Unknown annotation file format "{fname}"') + + if annotations is None: + raise OSError(f'No annotation data found in file "{fname}"') + return annotations + + +def _read_annotations_csv(fname): + """Read annotations from csv. + + Parameters + ---------- + fname : path-like + The filename. + + Returns + ------- + annot : instance of Annotations + The annotations. + """ + pd = _check_pandas_installed(strict=True) + df = pd.read_csv(fname, keep_default_na=False) + orig_time = df["onset"].values[0] + try: + float(orig_time) + warn( + "It looks like you have provided annotation onsets as floats. " + "These will be interpreted as MILLISECONDS. If that is not what " + "you want, save your CSV as a TXT file; the TXT reader accepts " + "onsets in seconds." + ) + except ValueError: + pass + onset_dt = pd.to_datetime(df["onset"]) + onset = (onset_dt - onset_dt[0]).dt.total_seconds() + duration = df["duration"].values.astype(float) + description = df["description"].values + ch_names = None + if "ch_names" in df.columns: + ch_names = [ + _safe_name_list(val, "read", "annotation channel name") + for val in df["ch_names"].values + ] + return Annotations(onset, duration, description, orig_time, ch_names) + + +def _read_brainstorm_annotations(fname, orig_time=None): + """Read annotations from a Brainstorm events_ file. + + Parameters + ---------- + fname : path-like + The filename + orig_time : float | int | instance of datetime | array of int | None + A POSIX Timestamp, datetime or an array containing the timestamp as the + first element and microseconds as the second element. Determines the + starting time of annotation acquisition. If None (default), + starting time is determined from beginning of raw data acquisition. + In general, ``raw.info['meas_date']`` (or None) can be used for syncing + the annotations with raw data if their acquisition is started at the + same time. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + """ + + def get_duration_from_times(t): + return t[1] - t[0] if t.shape[0] == 2 else np.zeros(len(t[0])) + + annot_data = loadmat(fname) + onsets, durations, descriptions = (list(), list(), list()) + for label, _, _, _, times, _, _ in annot_data["events"][0]: + onsets.append(times[0]) + durations.append(get_duration_from_times(times)) + n_annot = len(times[0]) + descriptions += [str(label[0])] * n_annot + + return Annotations( + onset=np.concatenate(onsets), + duration=np.concatenate(durations), + description=descriptions, + orig_time=orig_time, + ) + + +def _is_iso8601(candidate_str): + ISO8601 = r"^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}\.\d{6}$" + return re.compile(ISO8601).match(candidate_str) is not None + + +def _read_annotations_txt_parse_header(fname): + def is_orig_time(x): + return x.startswith("# orig_time :") + + with open(fname) as fid: + header = list(takewhile(lambda x: x.startswith("#"), fid)) + + orig_values = [h[13:].strip() for h in header if is_orig_time(h)] + orig_values = [_handle_meas_date(orig) for orig in orig_values if _is_iso8601(orig)] + + return None if not orig_values else orig_values[0] + + +def _read_annotations_txt(fname): + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore") + out = np.loadtxt(fname, delimiter=",", dtype=np.bytes_, unpack=True) + ch_names = None + if len(out) == 0: + onset, duration, desc = [], [], [] + else: + _check_option("text header", len(out), (3, 4)) + if len(out) == 3: + onset, duration, desc = out + else: + onset, duration, desc, ch_names = out + + onset = [float(o.decode()) for o in np.atleast_1d(onset)] + duration = [float(d.decode()) for d in np.atleast_1d(duration)] + desc = [str(d.decode()).strip() for d in np.atleast_1d(desc)] + if ch_names is not None: + ch_names = [ + _safe_name_list(ch.decode().strip(), "read", f"ch_names[{ci}]") + for ci, ch in enumerate(ch_names) + ] + + orig_time = _read_annotations_txt_parse_header(fname) + + annotations = Annotations( + onset=onset, + duration=duration, + description=desc, + orig_time=orig_time, + ch_names=ch_names, + ) + + return annotations + + +def _read_annotations_fif(fid, tree): + """Read annotations.""" + annot_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ANNOTATIONS) + if len(annot_data) == 0: + annotations = None + else: + annot_data = annot_data[0] + orig_time = ch_names = None + onset, duration, description = list(), list(), list() + for ent in annot_data["directory"]: + kind = ent.kind + pos = ent.pos + tag = read_tag(fid, pos) + if kind == FIFF.FIFF_MNE_BASELINE_MIN: + onset = tag.data + onset = list() if onset is None else onset + elif kind == FIFF.FIFF_MNE_BASELINE_MAX: + duration = tag.data + duration = list() if duration is None else duration - onset + elif kind == FIFF.FIFF_COMMENT: + description = _safe_name_list(tag.data, "read", "description") + elif kind == FIFF.FIFF_MEAS_DATE: + orig_time = tag.data + try: + orig_time = float(orig_time) # old way + except TypeError: + orig_time = tuple(orig_time) # new way + elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: + ch_names = tuple(tuple(x) for x in json.loads(tag.data)) + assert len(onset) == len(duration) == len(description) + annotations = Annotations(onset, duration, description, orig_time, ch_names) + return annotations + + +def _select_annotations_based_on_description(descriptions, event_id, regexp): + """Get a collection of descriptions and returns index of selected.""" + regexp_comp = re.compile(".*" if regexp is None else regexp) + + event_id_ = dict() + dropped = [] + # Iterate over the sorted descriptions so that the Counter mapping + # is slightly less arbitrary + for desc in sorted(descriptions): + if desc in event_id_: + continue + + if regexp_comp.match(desc) is None: + continue + + if isinstance(event_id, dict): + if desc in event_id: + event_id_[desc] = event_id[desc] + else: + continue + else: + trigger = event_id(desc) + if trigger is not None: + event_id_[desc] = trigger + else: + dropped.append(desc) + + event_sel = [ii for ii, kk in enumerate(descriptions) if kk in event_id_] + + if len(event_sel) == 0 and regexp is not None: + raise ValueError("Could not find any of the events you specified.") + + return event_sel, event_id_ + + +def _select_events_based_on_id(events, event_desc): + """Get a collection of events and returns index of selected.""" + event_desc_ = dict() + func = event_desc.get if isinstance(event_desc, dict) else event_desc + event_ids = events[np.unique(events[:, 2], return_index=True)[1], 2] + for e in event_ids: + trigger = func(e) + if trigger is not None: + event_desc_[e] = trigger + + event_sel = [ii for ii, e in enumerate(events) if e[2] in event_desc_] + + if len(event_sel) == 0: + raise ValueError("Could not find any of the events you specified.") + + return event_sel, event_desc_ + + +def _check_event_id(event_id, raw): + from .io import Raw, RawArray + from .io.brainvision.brainvision import ( + RawBrainVision, + _BVEventParser, + _check_bv_annot, + ) + + if event_id is None: + return _DefaultEventParser() + elif event_id == "auto": + if isinstance(raw, RawBrainVision): + return _BVEventParser() + elif isinstance(raw, Raw | RawArray) and _check_bv_annot( + raw.annotations.description + ): + logger.info("Non-RawBrainVision raw using branvision markers") + return _BVEventParser() + else: + return _DefaultEventParser() + elif callable(event_id) or isinstance(event_id, dict): + return event_id + else: + raise ValueError( + "Invalid type for event_id (should be None, str, " + f"dict or callable). Got {type(event_id)}." + ) + + +def _check_event_description(event_desc, events): + """Check event_id and convert to default format.""" + if event_desc is None: # convert to int to make typing-checks happy + event_desc = list(np.unique(events[:, 2])) + + if isinstance(event_desc, dict): + for val in event_desc.values(): + _validate_type(val, (str, None), "Event names") + elif isinstance(event_desc, Iterable): + event_desc = np.asarray(event_desc) + if event_desc.ndim != 1: + raise ValueError(f"event_desc must be 1D, got shape {event_desc.shape}") + event_desc = dict(zip(event_desc, map(str, event_desc))) + elif callable(event_desc): + pass + else: + raise ValueError( + "Invalid type for event_desc (should be None, list, " + f"1darray, dict or callable). Got {type(event_desc)}." + ) + + return event_desc + + +@verbose +def events_from_annotations( + raw, + event_id="auto", + regexp=r"^(?![Bb][Aa][Dd]|[Ee][Dd][Gg][Ee]).*$", + use_rounding=True, + chunk_duration=None, + tol=1e-8, + verbose=None, +): + """Get :term:`events` and ``event_id`` from an Annotations object. + + Parameters + ---------- + raw : instance of Raw + The raw data for which Annotations are defined. + event_id : dict | callable | None | ``'auto'`` + Can be: + + - **dict**: map descriptions (keys) to integer event codes (values). + Only the descriptions present will be mapped, others will be ignored. + - **callable**: must take a string input and return an integer event + code, or return ``None`` to ignore the event. + - **None**: Map descriptions to unique integer values based on their + ``sorted`` order. + - **'auto' (default)**: prefer a raw-format-specific parser: + + - Brainvision: map stimulus events to their integer part; response + events to integer part + 1000; optic events to integer part + 2000; + 'SyncStatus/Sync On' to 99998; 'New Segment/' to 99999; + all others like ``None`` with an offset of 10000. + - Other raw formats: Behaves like None. + + .. versionadded:: 0.18 + regexp : str | None + Regular expression used to filter the annotations whose + descriptions is a match. The default ignores descriptions beginning + ``'bad'`` or ``'edge'`` (case-insensitive). + + .. versionchanged:: 0.18 + Default ignores bad and edge descriptions. + use_rounding : bool + If True, use rounding (instead of truncation) when converting + times to indices. This can help avoid non-unique indices. + chunk_duration : float | None + Chunk duration in seconds. If ``chunk_duration`` is set to None + (default), generated events correspond to the annotation onsets. + If not, :func:`mne.events_from_annotations` returns as many events as + they fit within the annotation duration spaced according to + ``chunk_duration``. As a consequence annotations with duration shorter + than ``chunk_duration`` will not contribute events. + tol : float + The tolerance used to check if a chunk fits within an annotation when + ``chunk_duration`` is not ``None``. If the duration from a computed + chunk onset to the end of the annotation is smaller than + ``chunk_duration`` minus ``tol``, the onset will be discarded. + %(verbose)s + + Returns + ------- + %(events)s + event_id : dict + The event_id variable that can be passed to :class:`~mne.Epochs`. + + See Also + -------- + mne.annotations_from_events + + Notes + ----- + For data formats that store integer events as strings (e.g., NeuroScan + ``.cnt`` files), passing the Python built-in function :class:`int` as the + ``event_id`` parameter will do what most users probably want in those + circumstances: return an ``event_id`` dictionary that maps event ``'1'`` to + integer event code ``1``, ``'2'`` to ``2``, etc. + """ + if len(raw.annotations) == 0: + event_id = dict() if not isinstance(event_id, dict) else event_id + return np.empty((0, 3), dtype=int), event_id + + annotations = raw.annotations + + event_id = _check_event_id(event_id, raw) + + event_sel, event_id_ = _select_annotations_based_on_description( + annotations.description, event_id=event_id, regexp=regexp + ) + + if chunk_duration is None: + inds = raw.time_as_index( + annotations.onset, use_rounding=use_rounding, origin=annotations.orig_time + ) + if annotations.orig_time is not None: + inds += raw.first_samp + values = [event_id_[kk] for kk in annotations.description[event_sel]] + inds = inds[event_sel] + else: + inds = values = np.array([]).astype(int) + for annot in annotations[event_sel]: + annot_offset = annot["onset"] + annot["duration"] + _onsets = np.arange(annot["onset"], annot_offset, chunk_duration) + good_events = annot_offset - _onsets >= chunk_duration - tol + if good_events.any(): + _onsets = _onsets[good_events] + _inds = raw.time_as_index( + _onsets, use_rounding=use_rounding, origin=annotations.orig_time + ) + _inds += raw.first_samp + inds = np.append(inds, _inds) + _values = np.full( + shape=len(_inds), + fill_value=event_id_[annot["description"]], + dtype=int, + ) + values = np.append(values, _values) + + events = np.c_[inds, np.zeros(len(inds)), values].astype(int) + + logger.info(f"Used Annotations descriptions: {list(event_id_.keys())}") + + return events, event_id_ + + +@verbose +def annotations_from_events( + events, sfreq, event_desc=None, first_samp=0, orig_time=None, verbose=None +): + """Convert an event array to an Annotations object. + + Parameters + ---------- + events : ndarray, shape (n_events, 3) + The events. + sfreq : float + Sampling frequency. + event_desc : dict | array-like | callable | None + Events description. Can be: + + - **dict**: map integer event codes (keys) to descriptions (values). + Only the descriptions present will be mapped, others will be ignored. + - **array-like**: list, or 1d array of integers event codes to include. + Only the event codes present will be mapped, others will be ignored. + Event codes will be passed as string descriptions. + - **callable**: must take a integer event code as input and return a + string description or None to ignore it. + - **None**: Use integer event codes as descriptions. + first_samp : int + The first data sample (default=0). See :attr:`mne.io.Raw.first_samp` + docstring. + orig_time : float | str | datetime | tuple of int | None + Determines the starting time of annotation acquisition. If None + (default), starting time is determined from beginning of raw data + acquisition. For details, see :meth:`mne.Annotations` docstring. + %(verbose)s + + Returns + ------- + annot : instance of Annotations + The annotations. + + See Also + -------- + mne.events_from_annotations + + Notes + ----- + Annotations returned by this function will all have zero (null) duration. + + Creating events from annotations via the function + `mne.events_from_annotations` takes in event mappings with + key→value pairs as description→ID, whereas `mne.annotations_from_events` + takes in event mappings with key→value pairs as ID→description. + If you need to use these together, you can invert the mapping by doing:: + + event_desc = {v: k for k, v in event_id.items()} + """ + event_desc = _check_event_description(event_desc, events) + event_sel, event_desc_ = _select_events_based_on_id(events, event_desc) + events_sel = events[event_sel] + onsets = (events_sel[:, 0] - first_samp) / sfreq + descriptions = [event_desc_[e[2]] for e in events_sel] + durations = np.zeros(len(events_sel)) # dummy durations + + # Create annotations + annots = Annotations( + onset=onsets, duration=durations, description=descriptions, orig_time=orig_time + ) + + return annots + + +def _adjust_onset_meas_date(annot, raw): + """Adjust the annotation onsets based on raw meas_date.""" + # If there is a non-None meas date, then the onset should take into + # account the first_samp / first_time. + if raw.info["meas_date"] is not None: + annot.onset += raw.first_time + + +def count_annotations(annotations): + """Count annotations. + + Parameters + ---------- + annotations : mne.Annotations + The annotations instance. + + Returns + ------- + counts : dict + A dictionary containing unique annotation descriptions as keys with their + counts as values. + + Examples + -------- + >>> annotations = mne.Annotations([0, 1, 2], [1, 2, 1], ["T0", "T1", "T0"]) + >>> count_annotations(annotations) + {'T0': 2, 'T1': 1} + """ + types, counts = np.unique(annotations.description, return_counts=True) + return {str(t): int(count) for t, count in zip(types, counts)} diff --git a/mne/baseline.py b/mne/baseline.py new file mode 100644 index 0000000..4e73ed0 --- /dev/null +++ b/mne/baseline.py @@ -0,0 +1,224 @@ +"""Utility functions to baseline-correct data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from .utils import _check_option, _validate_type, logger, verbose + + +def _log_rescale(baseline, mode="mean"): + """Log the rescaling method.""" + if baseline is not None: + _check_option( + "mode", + mode, + ["logratio", "ratio", "zscore", "mean", "percent", "zlogratio"], + ) + msg = f"Applying baseline correction (mode: {mode})" + else: + msg = "No baseline correction applied" + return msg + + +@verbose +def rescale(data, times, baseline, mode="mean", copy=True, picks=None, verbose=None): + """Rescale (baseline correct) data. + + Parameters + ---------- + data : array + It can be of any shape. The only constraint is that the last + dimension should be time. + times : 1D array + Time instants is seconds. + %(baseline_rescale)s + mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio' + Perform baseline correction by + + - subtracting the mean of baseline values ('mean') + - dividing by the mean of baseline values ('ratio') + - dividing by the mean of baseline values and taking the log + ('logratio') + - subtracting the mean of baseline values followed by dividing by + the mean of baseline values ('percent') + - subtracting the mean of baseline values and dividing by the + standard deviation of baseline values ('zscore') + - dividing by the mean of baseline values, taking the log, and + dividing by the standard deviation of log baseline values + ('zlogratio') + + copy : bool + Whether to return a new instance or modify in place. + picks : list of int | None + Data to process along the axis=-2 (None, default, processes all). + %(verbose)s + + Returns + ------- + data_scaled: array + Array of same shape as data after rescaling. + """ + if copy: + data = data.copy() + if verbose is not False: + msg = _log_rescale(baseline, mode) + logger.info(msg) + if baseline is None or data.shape[-1] == 0: + return data + + bmin, bmax = baseline + if bmin is None: + imin = 0 + else: + imin = np.where(times >= bmin)[0] + if len(imin) == 0: + raise ValueError( + f"bmin is too large ({bmin}), it exceeds the largest time value" + ) + imin = int(imin[0]) + if bmax is None: + imax = len(times) + else: + imax = np.where(times <= bmax)[0] + if len(imax) == 0: + raise ValueError( + f"bmax is too small ({bmax}), it is smaller than the smallest time " + "value" + ) + imax = int(imax[-1]) + 1 + if imin >= imax: + raise ValueError( + f"Bad rescaling slice ({imin}:{imax}) from time values {bmin}, {bmax}" + ) + + # technically this is inefficient when `picks` is given, but assuming + # that we generally pick most channels for rescaling, it's not so bad + mean = np.mean(data[..., imin:imax], axis=-1, keepdims=True) + + if mode == "mean": + + def fun(d, m): + d -= m + + elif mode == "ratio": + + def fun(d, m): + d /= m + + elif mode == "logratio": + + def fun(d, m): + d /= m + np.log10(d, out=d) + + elif mode == "percent": + + def fun(d, m): + d -= m + d /= m + + elif mode == "zscore": + + def fun(d, m): + d -= m + d /= np.std(d[..., imin:imax], axis=-1, keepdims=True) + + elif mode == "zlogratio": + + def fun(d, m): + d /= m + np.log10(d, out=d) + d /= np.std(d[..., imin:imax], axis=-1, keepdims=True) + + if picks is None: + fun(data, mean) + else: + for pi in picks: + fun(data[..., pi, :], mean[..., pi, :]) + return data + + +def _check_baseline(baseline, times, sfreq, on_baseline_outside_data="raise"): + """Check if the baseline is valid and adjust it if requested. + + ``None`` values inside ``baseline`` will be replaced with ``times[0]`` and + ``times[-1]``. + + Parameters + ---------- + baseline : array-like, shape (2,) | None + Beginning and end of the baseline period, in seconds. If ``None``, + assume no baseline and return immediately. + times : array + The time points. + sfreq : float + The sampling rate. + on_baseline_outside_data : 'raise' | 'info' | 'adjust' + What to do if the baseline period exceeds the data. + If ``'raise'``, raise an exception (default). + If ``'info'``, log an info message. + If ``'adjust'``, adjust the baseline such that it is within the data range. + + Returns + ------- + (baseline_tmin, baseline_tmax) | None + The baseline with ``None`` values replaced with times, and with adjusted times + if ``on_baseline_outside_data='adjust'``; or ``None``, if ``baseline`` is + ``None``. + """ + if baseline is None: + return None + + _validate_type(baseline, "array-like") + baseline = tuple(baseline) + + if len(baseline) != 2: + raise ValueError( + f"baseline must have exactly two elements (got {len(baseline)})." + ) + + tmin, tmax = times[0], times[-1] + tstep = 1.0 / float(sfreq) + + # check default value of baseline and `tmin=0` + if baseline == (None, 0) and tmin == 0: + raise ValueError( + "Baseline interval is only one sample. Use `baseline=(0, 0)` if this is " + "desired." + ) + + baseline_tmin, baseline_tmax = baseline + + if baseline_tmin is None: + baseline_tmin = tmin + baseline_tmin = float(baseline_tmin) + + if baseline_tmax is None: + baseline_tmax = tmax + baseline_tmax = float(baseline_tmax) + + if baseline_tmin > baseline_tmax: + raise ValueError( + f"Baseline min ({baseline_tmin}) must be less than baseline max (" + f"{baseline_tmax})" + ) + + if (baseline_tmin < tmin - tstep) or (baseline_tmax > tmax + tstep): + msg = ( + f"Baseline interval [{baseline_tmin}, {baseline_tmax}] s is outside of " + f"epochs data [{tmin}, {tmax}] s. Epochs were probably cropped." + ) + if on_baseline_outside_data == "raise": + raise ValueError(msg) + elif on_baseline_outside_data == "info": + logger.info(msg) + elif on_baseline_outside_data == "adjust": + if baseline_tmin < tmin - tstep: + baseline_tmin = tmin + if baseline_tmax > tmax + tstep: + baseline_tmax = tmax + + return baseline_tmin, baseline_tmax diff --git a/mne/beamformer/__init__.py b/mne/beamformer/__init__.py new file mode 100644 index 0000000..16a5b36 --- /dev/null +++ b/mne/beamformer/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Beamformers for source localization.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/beamformer/__init__.pyi b/mne/beamformer/__init__.pyi new file mode 100644 index 0000000..f7d6eb9 --- /dev/null +++ b/mne/beamformer/__init__.pyi @@ -0,0 +1,34 @@ +__all__ = [ + "Beamformer", + "apply_dics", + "apply_dics_csd", + "apply_dics_epochs", + "apply_dics_tfr_epochs", + "apply_lcmv", + "apply_lcmv_cov", + "apply_lcmv_epochs", + "apply_lcmv_raw", + "make_dics", + "make_lcmv", + "make_lcmv_resolution_matrix", + "rap_music", + "read_beamformer", + "trap_music", +] +from ._compute_beamformer import Beamformer, read_beamformer +from ._dics import ( + apply_dics, + apply_dics_csd, + apply_dics_epochs, + apply_dics_tfr_epochs, + make_dics, +) +from ._lcmv import ( + apply_lcmv, + apply_lcmv_cov, + apply_lcmv_epochs, + apply_lcmv_raw, + make_lcmv, +) +from ._rap_music import rap_music, trap_music +from .resolution_matrix import make_lcmv_resolution_matrix diff --git a/mne/beamformer/_compute_beamformer.py b/mne/beamformer/_compute_beamformer.py new file mode 100644 index 0000000..bb947cd --- /dev/null +++ b/mne/beamformer/_compute_beamformer.py @@ -0,0 +1,603 @@ +"""Functions shared between different beamformer types.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy + +import numpy as np + +from .._fiff.proj import Projection, make_projector +from ..cov import Covariance, make_ad_hoc_cov +from ..forward.forward import _restrict_forward_to_src_sel, is_fixed_orient +from ..minimum_norm.inverse import _get_vertno, _prepare_forward +from ..source_space._source_space import label_src_vertno_sel +from ..time_frequency.csd import CrossSpectralDensity +from ..utils import ( + _check_option, + _check_src_normal, + _import_h5io_funcs, + _pl, + _reg_pinv, + _sym_mat_pow, + check_fname, + logger, + verbose, + warn, +) + + +def _check_proj_match(proj, filters): + """Check whether SSP projections in data and spatial filter match.""" + proj_data, _, _ = make_projector(proj, filters["ch_names"]) + if not np.allclose( + proj_data, filters["proj"], atol=np.finfo(float).eps, rtol=1e-13 + ): + raise ValueError( + "The SSP projections present in the data " + "do not match the projections used when " + "calculating the spatial filter." + ) + + +def _check_src_type(filters): + """Check whether src_type is in filters and set custom warning.""" + if "src_type" not in filters: + filters["src_type"] = None + warn_text = ( + "The spatial filter does not contain src_type and a robust " + "guess of src_type is not possible without src. Consider " + "recomputing the filter." + ) + return filters, warn_text + + +def _prepare_beamformer_input( + info, + forward, + label=None, + pick_ori=None, + noise_cov=None, + rank=None, + pca=False, + loose=None, + combine_xyz="fro", + exp=None, + limit=None, + allow_fixed_depth=True, + limit_depth_chs=False, +): + """Input preparation common for LCMV, DICS, and RAP-MUSIC.""" + _check_option("pick_ori", pick_ori, ("normal", "max-power", "vector", None)) + + # Restrict forward solution to selected vertices + if label is not None: + _, src_sel = label_src_vertno_sel(label, forward["src"]) + forward = _restrict_forward_to_src_sel(forward, src_sel) + + if loose is None: + loose = 0.0 if is_fixed_orient(forward) else 1.0 + # TODO: Deduplicate with _check_one_ch_type, should not be necessary + # (DICS hits this code path, LCMV does not) + if noise_cov is None: + noise_cov = make_ad_hoc_cov(info, std=1.0) + ( + forward, + info_picked, + gain, + _, + orient_prior, + _, + trace_GRGT, + noise_cov, + whitener, + ) = _prepare_forward( + forward, + info, + noise_cov, + "auto", + loose, + rank=rank, + pca=pca, + use_cps=True, + exp=exp, + limit_depth_chs=limit_depth_chs, + combine_xyz=combine_xyz, + limit=limit, + allow_fixed_depth=allow_fixed_depth, + ) + is_free_ori = not is_fixed_orient(forward) # could have been changed + nn = forward["source_nn"] + if is_free_ori: # take Z coordinate + nn = nn[2::3] + nn = nn.copy() + vertno = _get_vertno(forward["src"]) + if forward["surf_ori"]: + nn[...] = [0, 0, 1] # align to local +Z coordinate + if pick_ori is not None and not is_free_ori: + raise ValueError( + f"Normal or max-power orientation (got {pick_ori!r}) can only be picked " + "when a forward operator with free orientation is used." + ) + if pick_ori == "normal" and not forward["surf_ori"]: + raise ValueError( + "Normal orientation can only be picked when a forward operator oriented in " + "surface coordinates is used." + ) + _check_src_normal(pick_ori, forward["src"]) + del forward, info + + # Undo the scaling that MNE prefers + scale = np.sqrt((noise_cov["eig"] > 0).sum() / trace_GRGT) + gain /= scale + if orient_prior is not None: + orient_std = np.sqrt(orient_prior) + else: + orient_std = np.ones(gain.shape[1]) + + # Get the projector + proj, _, _ = make_projector(info_picked["projs"], info_picked["ch_names"]) + return (is_free_ori, info_picked, proj, vertno, gain, whitener, nn, orient_std) + + +def _reduce_leadfield_rank(G): + """Reduce the rank of the leadfield.""" + # decompose lead field + u, s, v = np.linalg.svd(G, full_matrices=False) + + # backproject, omitting one direction (equivalent to setting the smallest + # singular value to zero) + G = np.matmul(u[:, :, :-1], s[:, :-1, np.newaxis] * v[:, :-1, :]) + + return G + + +def _sym_inv_sm(x, reduce_rank, inversion, sk): + """Symmetric inversion with single- or matrix-style inversion.""" + if x.shape[1:] == (1, 1): + with np.errstate(divide="ignore", invalid="ignore"): + x_inv = 1.0 / x + x_inv[~np.isfinite(x_inv)] = 1.0 + else: + assert x.shape[1:] == (3, 3) + if inversion == "matrix": + x_inv = _sym_mat_pow(x, -1, reduce_rank=reduce_rank) + # Reapply source covariance after inversion + x_inv *= sk[:, :, np.newaxis] + x_inv *= sk[:, np.newaxis, :] + else: + # Invert for each dipole separately using plain division + diags = np.diagonal(x, axis1=1, axis2=2) + assert not reduce_rank # guaranteed earlier + with np.errstate(divide="ignore"): + diags = 1.0 / diags + # set the diagonal of each 3x3 + x_inv = np.zeros_like(x) + for k in range(x.shape[0]): + this = diags[k] + # Reapply source covariance after inversion + this *= sk[k] * sk[k] + x_inv[k].flat[::4] = this + return x_inv + + +def _compute_beamformer( + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank, + inversion, + nn, + orient_std, + whitener, +): + """Compute a spatial beamformer filter (LCMV or DICS). + + For more detailed information on the parameters, see the docstrings of + `make_lcmv` and `make_dics`. + + Parameters + ---------- + G : ndarray, shape (n_dipoles, n_channels) + The leadfield. + Cm : ndarray, shape (n_channels, n_channels) + The data covariance matrix. + reg : float + Regularization parameter. + n_orient : int + Number of dipole orientations defined at each source point + weight_norm : None | 'unit-noise-gain' | 'nai' + The weight normalization scheme to use. + pick_ori : None | 'normal' | 'max-power' + The source orientation to compute the beamformer in. + reduce_rank : bool + Whether to reduce the rank by one during computation of the filter. + rank : dict | None | 'full' | 'info' + See compute_rank. + inversion : 'matrix' | 'single' + The inversion scheme to compute the weights. + nn : ndarray, shape (n_dipoles, 3) + The source normals. + orient_std : ndarray, shape (n_dipoles,) + The std of the orientation prior used in weighting the lead fields. + whitener : ndarray, shape (n_channels, n_channels) + The whitener. + + Returns + ------- + W : ndarray, shape (n_dipoles, n_channels) + The beamformer filter weights. + """ + _check_option( + "weight_norm", + weight_norm, + ["unit-noise-gain-invariant", "unit-noise-gain", "nai", None], + ) + + # Whiten the data covariance + Cm = whitener @ Cm @ whitener.T.conj() + # Restore to properly Hermitian as large whitening coefs can have bad + # rounding error + Cm[:] = (Cm + Cm.T.conj()) / 2.0 + + assert Cm.shape == (G.shape[0],) * 2 + s, _ = np.linalg.eigh(Cm) + if not (s >= -s.max() * 1e-7).all(): + # This shouldn't ever happen, but just in case + warn( + "data covariance does not appear to be positive semidefinite, " + "results will likely be incorrect" + ) + # Tikhonov regularization using reg parameter to control for + # trade-off between spatial resolution and noise sensitivity + # eq. 25 in Gross and Ioannides, 1999 Phys. Med. Biol. 44 2081 + Cm_inv, loading_factor, rank = _reg_pinv(Cm, reg, rank) + + assert orient_std.shape == (G.shape[1],) + n_sources = G.shape[1] // n_orient + assert nn.shape == (n_sources, 3) + + logger.info(f"Computing beamformer filters for {n_sources} source{_pl(n_sources)}") + n_channels = G.shape[0] + assert n_orient in (3, 1) + Gk = np.reshape(G.T, (n_sources, n_orient, n_channels)).transpose(0, 2, 1) + assert Gk.shape == (n_sources, n_channels, n_orient) + sk = np.reshape(orient_std, (n_sources, n_orient)) + del G, orient_std + + _check_option("reduce_rank", reduce_rank, (True, False)) + + # inversion of the denominator + _check_option("inversion", inversion, ("matrix", "single")) + if ( + inversion == "single" + and n_orient > 1 + and pick_ori == "vector" + and weight_norm == "unit-noise-gain-invariant" + ): + raise ValueError( + 'Cannot use pick_ori="vector" with inversion="single" and ' + 'weight_norm="unit-noise-gain-invariant"' + ) + if reduce_rank and inversion == "single": + raise ValueError( + 'reduce_rank cannot be used with inversion="single"; ' + 'consider using inversion="matrix" if you have a ' + "rank-deficient forward model (i.e., from a sphere " + "model with MEG channels), otherwise consider using " + "reduce_rank=False" + ) + if n_orient > 1: + _, Gk_s, _ = np.linalg.svd(Gk, full_matrices=False) + assert Gk_s.shape == (n_sources, n_orient) + if not reduce_rank and (Gk_s[:, 0] > 1e6 * Gk_s[:, 2]).any(): + raise ValueError( + "Singular matrix detected when estimating spatial filters. " + "Consider reducing the rank of the forward operator by using " + "reduce_rank=True." + ) + del Gk_s + + # + # 1. Reduce rank of the lead field + # + if reduce_rank: + Gk = _reduce_leadfield_rank(Gk) + + def _compute_bf_terms(Gk, Cm_inv): + bf_numer = np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv) + bf_denom = np.matmul(bf_numer, Gk) + return bf_numer, bf_denom + + # + # 2. Reorient lead field in direction of max power or normal + # + if pick_ori == "max-power": + assert n_orient == 3 + _, bf_denom = _compute_bf_terms(Gk, Cm_inv) + if weight_norm is None: + ori_numer = np.eye(n_orient)[np.newaxis] + ori_denom = bf_denom + else: + # compute power, cf Sekihara & Nagarajan 2008, eq. 4.47 + ori_numer = bf_denom + # Cm_inv should be Hermitian so no need for .T.conj() + ori_denom = np.matmul( + np.matmul(Gk.swapaxes(-2, -1).conj(), Cm_inv @ Cm_inv), Gk + ) + ori_denom_inv = _sym_inv_sm(ori_denom, reduce_rank, inversion, sk) + ori_pick = np.matmul(ori_denom_inv, ori_numer) + assert ori_pick.shape == (n_sources, n_orient, n_orient) + + # pick eigenvector that corresponds to maximum eigenvalue: + eig_vals, eig_vecs = np.linalg.eig(ori_pick.real) # not Hermitian! + # sort eigenvectors by eigenvalues for picking: + order = np.argsort(np.abs(eig_vals), axis=-1) + # eig_vals = np.take_along_axis(eig_vals, order, axis=-1) + max_power_ori = eig_vecs[np.arange(len(eig_vecs)), :, order[:, -1]] + assert max_power_ori.shape == (n_sources, n_orient) + + # set the (otherwise arbitrary) sign to match the normal + signs = np.sign(np.sum(max_power_ori * nn, axis=1, keepdims=True)) + signs[signs == 0] = 1.0 + max_power_ori *= signs + + # Compute the lead field for the optimal orientation, + # and adjust numer/denom + Gk = np.matmul(Gk, max_power_ori[..., np.newaxis]) + n_orient = 1 + else: + max_power_ori = None + if pick_ori == "normal": + Gk = Gk[..., 2:3] + n_orient = 1 + + # + # 3. Compute numerator and denominator of beamformer formula (unit-gain) + # + + bf_numer, bf_denom = _compute_bf_terms(Gk, Cm_inv) + assert bf_denom.shape == (n_sources,) + (n_orient,) * 2 + assert bf_numer.shape == (n_sources, n_orient, n_channels) + del Gk # lead field has been adjusted and should not be used anymore + + # + # 4. Invert the denominator + # + + # Here W is W_ug, i.e.: + # G.T @ Cm_inv / (G.T @ Cm_inv @ G) + bf_denom_inv = _sym_inv_sm(bf_denom, reduce_rank, inversion, sk) + assert bf_denom_inv.shape == (n_sources, n_orient, n_orient) + W = np.matmul(bf_denom_inv, bf_numer) + assert W.shape == (n_sources, n_orient, n_channels) + del bf_denom_inv, sk + + # + # 5. Re-scale filter weights according to the selected weight_norm + # + + # Weight normalization is done by computing, for each source:: + # + # W_ung = W_ug / sqrt(W_ug @ W_ug.T) + # + # with W_ung referring to the unit-noise-gain (weight normalized) filter + # and W_ug referring to the above-calculated unit-gain filter stored in W. + + if weight_norm is not None: + # Three different ways to calculate the normalization factors here. + # Only matters when in vector mode, as otherwise n_orient == 1 and + # they are all equivalent. + # + # In MNE < 0.21, we just used the Frobenius matrix norm: + # + # noise_norm = np.linalg.norm(W, axis=(1, 2), keepdims=True) + # assert noise_norm.shape == (n_sources, 1, 1) + # W /= noise_norm + # + # Sekihara 2008 says to use sqrt(diag(W_ug @ W_ug.T)), which is not + # rotation invariant: + if weight_norm in ("unit-noise-gain", "nai"): + noise_norm = np.matmul(W, W.swapaxes(-2, -1).conj()).real + noise_norm = np.reshape( # np.diag operation over last two axes + noise_norm, (n_sources, -1, 1) + )[:, :: n_orient + 1] + np.sqrt(noise_norm, out=noise_norm) + noise_norm[noise_norm == 0] = np.inf + assert noise_norm.shape == (n_sources, n_orient, 1) + W /= noise_norm + else: + assert weight_norm == "unit-noise-gain-invariant" + # Here we use sqrtm. The shortcut: + # + # use = W + # + # ... does not match the direct route (it is rotated!), so we'll + # use the direct one to match FieldTrip: + use = bf_numer + inner = np.matmul(use, use.swapaxes(-2, -1).conj()) + W = np.matmul(_sym_mat_pow(inner, -0.5), use) + noise_norm = 1.0 + + if weight_norm == "nai": + # Estimate noise level based on covariance matrix, taking the + # first eigenvalue that falls outside the signal subspace or the + # loading factor used during regularization, whichever is largest. + if rank > len(Cm): + # Covariance matrix is full rank, no noise subspace! + # Use the loading factor as noise ceiling. + if loading_factor == 0: + raise RuntimeError( + "Cannot compute noise subspace with a full-rank " + "covariance matrix and no regularization. Try " + "manually specifying the rank of the covariance " + "matrix or using regularization." + ) + noise = loading_factor + else: + noise, _ = np.linalg.eigh(Cm) + noise = noise[-rank] + noise = max(noise, loading_factor) + W /= np.sqrt(noise) + + W = W.reshape(n_sources * n_orient, n_channels) + logger.info("Filter computation complete") + return W, max_power_ori + + +def _compute_power(Cm, W, n_orient): + """Use beamformer filters to compute source power. + + Parameters + ---------- + Cm : ndarray, shape (n_channels, n_channels) + Data covariance matrix or CSD matrix. + W : ndarray, shape (nvertices*norient, nchannels) + Beamformer weights. + + Returns + ------- + power : ndarray, shape (nvertices,) + Source power. + """ + n_sources = W.shape[0] // n_orient + + Wk = W.reshape(n_sources, n_orient, W.shape[1]) + source_power = np.trace( + (Wk @ Cm @ Wk.conj().transpose(0, 2, 1)).real, axis1=1, axis2=2 + ) + + return source_power + + +class Beamformer(dict): + """A computed beamformer. + + Notes + ----- + .. versionadded:: 0.17 + """ + + def copy(self): + """Copy the beamformer. + + Returns + ------- + beamformer : instance of Beamformer + A deep copy of the beamformer. + """ + return deepcopy(self) + + def __repr__(self): # noqa: D105 + n_verts = sum(len(v) for v in self["vertices"]) + n_channels = len(self["ch_names"]) + if self["subject"] is None: + subject = "unknown" + else: + subject = f'"{self["subject"]}"' + out = " 1: + logger.info( + " computing DICS spatial filter at " + f"{round(freq, 2)} Hz ({i + 1}/{n_freqs})" + ) + + Cm = csd.get_data(index=i) + + # XXX: Weird that real_filter happens *before* whitening, which could + # make things complex again...? + if real_filter: + Cm = Cm.real + + # compute spatial filter + n_orient = 3 if is_free_ori else 1 + W, max_power_ori = _compute_beamformer( + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank=csd_int_rank[i], + inversion=inversion, + nn=nn, + orient_std=orient_std, + whitener=whitener, + ) + Ws.append(W) + max_oris.append(max_power_ori) + + Ws = np.array(Ws) + if pick_ori == "max-power": + max_oris = np.array(max_oris) + else: + max_oris = None + + src_type = _get_src_type(forward["src"], vertices) + subject = _subject_from_forward(forward) + is_free_ori = is_free_ori if pick_ori in [None, "vector"] else False + n_sources = np.sum([len(v) for v in vertices]) + + filters = Beamformer( + kind="DICS", + weights=Ws, + csd=csd, + ch_names=ch_names, + proj=proj, + vertices=vertices, + n_sources=n_sources, + subject=subject, + pick_ori=pick_ori, + inversion=inversion, + weight_norm=weight_norm, + src_type=src_type, + source_nn=forward["source_nn"].copy(), + is_free_ori=is_free_ori, + whitener=whitener, + max_power_ori=max_oris, + ) + + return filters + + +def _prepare_noise_csd(csd, noise_csd, real_filter): + if noise_csd is not None: + csd, noise_csd = equalize_channels([csd, noise_csd]) + # Use the same noise CSD for all frequencies + if len(noise_csd.frequencies) > 1: + noise_csd = noise_csd.mean() + noise_csd = noise_csd.get_data(as_cov=True) + if real_filter: + noise_csd["data"] = noise_csd["data"].real + return csd, noise_csd + + +def _apply_dics(data, filters, info, tmin, tfr=False): + """Apply DICS spatial filter to data for source reconstruction.""" + if isinstance(data, np.ndarray) and data.ndim == (2 + tfr): + data = [data] + one_epoch = True + else: + one_epoch = False + + Ws = filters["weights"] + one_freq = len(Ws) == 1 + + subject = filters["subject"] + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + for i, M in enumerate(data): + if not one_epoch: + logger.info(f"Processing epoch : {i + 1}") + + # Apply SSPs + if not tfr: # save computation, only compute once + M_w = _proj_whiten_data(M, info["projs"], filters) + + stcs = [] + for j, W in enumerate(Ws): + if tfr: # must compute for each frequency + M_w = _proj_whiten_data(M[:, j], info["projs"], filters) + + # project to source space using beamformer weights + sol = np.dot(W, M_w) + + if filters["is_free_ori"] and filters["pick_ori"] != "vector": + logger.info("combining the current components...") + sol = combine_xyz(sol) + + tstep = 1.0 / info["sfreq"] + + stcs.append( + _make_stc( + sol, + vertices=filters["vertices"], + src_type=filters["src_type"], + tmin=tmin, + tstep=tstep, + subject=subject, + vector=(filters["pick_ori"] == "vector"), + source_nn=filters["source_nn"], + warn_text=warn_text, + ) + ) + if one_freq: + yield stcs[0] + else: + yield stcs + + logger.info("[done]") + + +@verbose +def apply_dics(evoked, filters, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights + on evoked data. + + .. warning:: The result of this function is meant as an intermediate step + for further processing (such as computing connectivity). If + you are interested in estimating source time courses, use an + LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) + instead. If you are interested in estimating spectral power at + the source level, use :func:`apply_dics_csd`. + .. warning:: This implementation has not been heavily tested so please + report any issues or suggestions. + + Parameters + ---------- + evoked : Evoked + Evoked data to apply the DICS beamformer weights to. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from :func:`make_dics`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate | list + Source time courses. If the DICS beamformer has been computed for more + than one frequency, a list is returned containing for each frequency + the corresponding time courses. + + See Also + -------- + apply_dics_epochs + apply_dics_tfr_epochs + apply_dics_csd + """ # noqa: E501 + _check_reference(evoked) + + info = evoked.info + data = evoked.data + tmin = evoked.times[0] + + sel = _check_channels_spatial_filter(evoked.ch_names, filters) + data = data[sel] + + stc = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) + + return next(stc) + + +@verbose +def apply_dics_epochs(epochs, filters, return_generator=False, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights + on single trial data. + + .. warning:: The result of this function is meant as an intermediate step + for further processing (such as computing connectivity). If + you are interested in estimating source time courses, use an + LCMV beamformer (:func:`make_lcmv`, :func:`apply_lcmv`) + instead. If you are interested in estimating spectral power at + the source level, use :func:`apply_dics_csd`. + .. warning:: This implementation has not been heavily tested so please + report any issue or suggestions. + + Parameters + ---------- + epochs : Epochs + Single trial epochs. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from :func:`make_dics`. The DICS filters must + have been computed for a single frequency only. + return_generator : bool + Return a generator object instead of a list. This allows iterating + over the stcs without having to keep them all in memory. + %(verbose)s + + Returns + ------- + stc: list | generator of (SourceEstimate | VolSourceEstimate) + The source estimates for all epochs. + + See Also + -------- + apply_dics + apply_dics_tfr_epochs + apply_dics_csd + """ + _check_reference(epochs) + + if len(filters["weights"]) > 1: + raise ValueError( + "This function only works on DICS beamformer weights that have " + "been computed for a single frequency. When calling make_dics(), " + "make sure to use a CSD object with only a single frequency (or " + "frequency-bin) defined." + ) + + info = epochs.info + tmin = epochs.times[0] + + sel = _check_channels_spatial_filter(epochs.ch_names, filters) + data = epochs.get_data(sel) + + stcs = _apply_dics(data=data, filters=filters, info=info, tmin=tmin) + + if not return_generator: + stcs = list(stcs) + + return stcs + + +@verbose +def apply_dics_tfr_epochs(epochs_tfr, filters, return_generator=False, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights + on single trial time-frequency data. + + Parameters + ---------- + epochs_tfr : EpochsTFR + Single trial time-frequency epochs. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from :func:`make_dics`. + return_generator : bool + Return a generator object instead of a list. This allows iterating + over the stcs without having to keep them all in memory. + %(verbose)s + + Returns + ------- + stcs : list of list of (SourceEstimate | VectorSourceEstimate | VolSourceEstimate) + The source estimates for all epochs (outside list) and for + all frequencies (inside list). + + See Also + -------- + apply_dics + apply_dics_epochs + apply_dics_csd + """ # noqa E501 + _validate_type(epochs_tfr, EpochsTFR) + _check_tfr_complex(epochs_tfr) + + if filters["pick_ori"] == "vector": + warn( + "Using a vector solution to compute power will lead to " + "inaccurate directions (only in the first quadrent) " + "because power is a strictly positive (squared) metric. " + "Using singular value decomposition (SVD) to determine " + "the direction is not yet supported in MNE." + ) + + sel = _check_channels_spatial_filter(epochs_tfr.ch_names, filters) + data = epochs_tfr.data[:, sel, :, :] + + stcs = _apply_dics(data, filters, epochs_tfr.info, epochs_tfr.tmin, tfr=True) + if not return_generator: + stcs = [[stc for stc in tfr_stcs] for tfr_stcs in stcs] + return stcs + + +@verbose +def apply_dics_csd(csd, filters, verbose=None): + """Apply Dynamic Imaging of Coherent Sources (DICS) beamformer weights. + + Apply a previously computed DICS beamformer to a cross-spectral density + (CSD) object to estimate source power in time and frequency windows + specified in the CSD object :footcite:`GrossEtAl2001`. + + .. note:: Only power can computed from the cross-spectral density, not + complex phase-amplitude, so vector DICS filters will be + converted to scalar source estimates since power is strictly + positive and so 3D directions cannot be combined meaningfully + (the direction would be confined to the positive quadrant). + + Parameters + ---------- + csd : instance of CrossSpectralDensity + The data cross-spectral density (CSD) matrices. A source estimate is + performed for each frequency or frequency-bin defined in the CSD + object. + filters : instance of Beamformer + DICS spatial filter (beamformer weights) + Filter weights returned from `make_dics`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate + Source power with frequency instead of time. + frequencies : list of float + The frequencies for which the source power has been computed. If the + data CSD object defines frequency-bins instead of exact frequencies, + the mean of each bin is returned. + + See Also + -------- + apply_dics + apply_dics_epochs + apply_dics_tfr_epochs + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + ch_names = filters["ch_names"] + vertices = filters["vertices"] + n_orient = 3 if filters["is_free_ori"] else 1 + subject = filters["subject"] + whitener = filters["whitener"] + n_sources = filters["n_sources"] + + # If CSD is summed over multiple frequencies, take the average frequency + frequencies = [np.mean(dfreq) for dfreq in csd.frequencies] + n_freqs = len(frequencies) + + source_power = np.zeros((n_sources, len(csd.frequencies))) + + # Ensure the CSD is in the same order as the weights + csd_picks = [csd.ch_names.index(ch) for ch in ch_names] + + logger.info("Computing DICS source power...") + for i, freq in enumerate(frequencies): + if n_freqs > 1: + logger.info( + " applying DICS spatial filter at " + f"{round(freq, 2)} Hz ({i + 1}/{n_freqs})" + ) + + Cm = csd.get_data(index=i) + Cm = Cm[csd_picks, :][:, csd_picks] + W = filters["weights"][i] + + # Whiten the CSD + Cm = np.dot(whitener, np.dot(Cm, whitener.conj().T)) + + source_power[:, i] = _compute_power(Cm, W, n_orient) + + logger.info("[done]") + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + return ( + _make_stc( + source_power, + vertices=vertices, + src_type=filters["src_type"], + tmin=0.0, + tstep=1.0, + subject=subject, + warn_text=warn_text, + ), + frequencies, + ) diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py new file mode 100644 index 0000000..cd3b291 --- /dev/null +++ b/mne/beamformer/_lcmv.py @@ -0,0 +1,503 @@ +"""Compute Linearly constrained minimum variance (LCMV) beamformer.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_channels_cov, pick_info +from ..forward import _subject_from_forward +from ..minimum_norm.inverse import _check_depth, _check_reference, combine_xyz +from ..rank import compute_rank +from ..source_estimate import _get_src_type, _make_stc +from ..utils import ( + _check_channels_spatial_filter, + _check_info_inv, + _check_one_ch_type, + logger, + verbose, +) +from ._compute_beamformer import ( + Beamformer, + _check_src_type, + _compute_beamformer, + _compute_power, + _prepare_beamformer_input, + _proj_whiten_data, +) + + +@verbose +def make_lcmv( + info, + forward, + data_cov, + reg=0.05, + noise_cov=None, + label=None, + pick_ori=None, + rank="info", + weight_norm="unit-noise-gain-invariant", + reduce_rank=False, + depth=None, + inversion="matrix", + verbose=None, +): + """Compute LCMV spatial filter. + + Parameters + ---------- + %(info_not_none)s + Specifies the channels to include. Bad channels (in ``info['bads']``) + are not used. + forward : instance of Forward + Forward operator. + data_cov : instance of Covariance + The data covariance. + reg : float + The regularization for the whitened data covariance. + noise_cov : instance of Covariance + The noise covariance. If provided, whitening will be done. Providing a + noise covariance is mandatory if you mix sensor types, e.g. + gradiometers with magnetometers or EEG with MEG. + + .. note:: + If ``noise_cov`` is ``None`` and ``weight_norm='unit-noise-gain'``, + the unit noise is assumed to be 1 in SI units, e.g., 1 T for + magnetometers, 1 V for EEG, so resulting amplitudes will be tiny. + Consider using :func:`mne.make_ad_hoc_cov` to provide a + ``noise_cov`` to set noise values that are more reasonable for + neural data or using ``weight_norm='nai'`` for weight-normalized + beamformer output that is scaled by a noise estimate. + label : instance of Label + Restricts the LCMV solution to a given label. + %(pick_ori_bf)s + + - ``'vector'`` + Keeps the currents for each direction separate + %(rank_info)s + %(weight_norm)s + + Defaults to ``'unit-noise-gain-invariant'``. + %(reduce_rank)s + %(depth)s + + .. versionadded:: 0.18 + %(inversion_bf)s + + .. versionadded:: 0.21 + %(verbose)s + + Returns + ------- + filters : instance of Beamformer + Dictionary containing filter weights from LCMV beamformer. + Contains the following keys: + + 'kind' : str + The type of beamformer, in this case 'LCMV'. + 'weights' : array + The filter weights of the beamformer. + 'data_cov' : instance of Covariance + The data covariance matrix used to compute the beamformer. + 'noise_cov' : instance of Covariance | None + The noise covariance matrix used to compute the beamformer. + 'whitener' : None | ndarray, shape (n_channels, n_channels) + Whitening matrix, provided if whitening was applied to the + covariance matrix and leadfield during computation of the + beamformer weights. + 'weight_norm' : str | None + Type of weight normalization used to compute the filter + weights. + 'pick-ori' : None | 'max-power' | 'normal' | 'vector' + The orientation in which the beamformer filters were computed. + 'ch_names' : list of str + Channels used to compute the beamformer. + 'proj' : array + Projections used to compute the beamformer. + 'is_ssp' : bool + If True, projections were applied prior to filter computation. + 'vertices' : list + Vertices for which the filter weights were computed. + 'is_free_ori' : bool + If True, the filter was computed with free source orientation. + 'n_sources' : int + Number of source location for which the filter weight were + computed. + 'src_type' : str + Type of source space. + 'source_nn' : ndarray, shape (n_sources, 3) + For each source location, the surface normal. + 'proj' : ndarray, shape (n_channels, n_channels) + Projections used to compute the beamformer. + 'subject' : str + The subject ID. + 'rank' : int + The rank of the data covariance matrix used to compute the + beamformer weights. + 'max-power-ori' : ndarray, shape (n_sources, 3) | None + When pick_ori='max-power', this fields contains the estimated + direction of maximum power at each source location. + 'inversion' : 'single' | 'matrix' + Whether the spatial filters were computed for each dipole + separately or jointly for all dipoles at each vertex using a + matrix inversion. + + Notes + ----- + The original reference is :footcite:`VanVeenEtAl1997`. + + To obtain the Sekihara unit-noise-gain vector beamformer, you should use + ``weight_norm='unit-noise-gain', pick_ori='vector'`` followed by + :meth:`vec_stc.project('pca', src) `. + + .. versionchanged:: 0.21 + The computations were extensively reworked, and the default for + ``weight_norm`` was set to ``'unit-noise-gain-invariant'``. + + References + ---------- + .. footbibliography:: + """ + # check number of sensor types present in the data and ensure a noise cov + info = _simplify_info(info, keep=("proc_history",)) + noise_cov, _, allow_mismatch = _check_one_ch_type( + "lcmv", info, forward, data_cov, noise_cov + ) + # XXX we need this extra picking step (can't just rely on minimum norm's + # because there can be a mismatch. Should probably add an extra arg to + # _prepare_beamformer_input at some point (later) + picks = _check_info_inv(info, forward, data_cov, noise_cov) + info = pick_info(info, picks) + data_rank = compute_rank(data_cov, rank=rank, info=info) + noise_rank = compute_rank(noise_cov, rank=rank, info=info) + for key in data_rank: + if ( + key not in noise_rank or data_rank[key] != noise_rank[key] + ) and not allow_mismatch: + raise ValueError( + f"{key} data rank ({data_rank[key]}) did not match the noise rank (" + f"{noise_rank.get(key, None)})" + ) + del noise_rank + rank = data_rank + logger.info(f"Making LCMV beamformer with rank {rank}") + del data_rank + depth = _check_depth(depth, "depth_sparse") + if inversion == "single": + depth["combine_xyz"] = False + + ( + is_free_ori, + info, + proj, + vertno, + G, + whitener, + nn, + orient_std, + ) = _prepare_beamformer_input( + info, + forward, + label, + pick_ori, + noise_cov=noise_cov, + rank=rank, + pca=False, + **depth, + ) + ch_names = list(info["ch_names"]) + + data_cov = pick_channels_cov(data_cov, include=ch_names) + Cm = data_cov._get_square() + if "estimator" in data_cov: + del data_cov["estimator"] + rank_int = sum(rank.values()) + del rank + + # compute spatial filter + n_orient = 3 if is_free_ori else 1 + W, max_power_ori = _compute_beamformer( + G, + Cm, + reg, + n_orient, + weight_norm, + pick_ori, + reduce_rank, + rank_int, + inversion=inversion, + nn=nn, + orient_std=orient_std, + whitener=whitener, + ) + + # get src type to store with filters for _make_stc + src_type = _get_src_type(forward["src"], vertno) + + # get subject to store with filters + subject_from = _subject_from_forward(forward) + + # Is the computed beamformer a scalar or vector beamformer? + is_free_ori = is_free_ori if pick_ori in [None, "vector"] else False + is_ssp = bool(info["projs"]) + + filters = Beamformer( + kind="LCMV", + weights=W, + data_cov=data_cov, + noise_cov=noise_cov, + whitener=whitener, + weight_norm=weight_norm, + pick_ori=pick_ori, + ch_names=ch_names, + proj=proj, + is_ssp=is_ssp, + vertices=vertno, + is_free_ori=is_free_ori, + n_sources=forward["nsource"], + src_type=src_type, + source_nn=forward["source_nn"].copy(), + subject=subject_from, + rank=rank_int, + max_power_ori=max_power_ori, + inversion=inversion, + ) + + return filters + + +def _apply_lcmv(data, filters, info, tmin): + """Apply LCMV spatial filter to data for source reconstruction.""" + if isinstance(data, np.ndarray) and data.ndim == 2: + data = [data] + return_single = True + else: + return_single = False + + W = filters["weights"] + + for i, M in enumerate(data): + if len(M) != len(filters["ch_names"]): + raise ValueError("data and picks must have the same length") + + if not return_single: + logger.info(f"Processing epoch : {i + 1}") + + M = _proj_whiten_data(M, info["projs"], filters) + + # project to source space using beamformer weights + vector = False + if filters["is_free_ori"]: + sol = np.dot(W, M) + if filters["pick_ori"] == "vector": + vector = True + else: + logger.info("combining the current components...") + sol = combine_xyz(sol) + else: + # Linear inverse: do computation here or delayed + if M.shape[0] < W.shape[0] and filters["pick_ori"] != "max-power": + sol = (W, M) + else: + sol = np.dot(W, M) + + tstep = 1.0 / info["sfreq"] + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + yield _make_stc( + sol, + vertices=filters["vertices"], + tmin=tmin, + tstep=tstep, + subject=filters["subject"], + vector=vector, + source_nn=filters["source_nn"], + src_type=filters["src_type"], + warn_text=warn_text, + ) + + logger.info("[done]") + + +@verbose +def apply_lcmv(evoked, filters, *, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on evoked data. + + Parameters + ---------- + evoked : Evoked + Evoked data to invert. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate | VectorSourceEstimate + Source time courses. + + See Also + -------- + make_lcmv, apply_lcmv_raw, apply_lcmv_epochs, apply_lcmv_cov + + Notes + ----- + .. versionadded:: 0.18 + """ + _check_reference(evoked) + + info = evoked.info + data = evoked.data + tmin = evoked.times[0] + + sel = _check_channels_spatial_filter(evoked.ch_names, filters) + data = data[sel] + + stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) + + return next(stc) + + +@verbose +def apply_lcmv_epochs(epochs, filters, *, return_generator=False, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on single trial data. + + Parameters + ---------- + epochs : Epochs + Single trial epochs. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights) + Filter weights returned from :func:`make_lcmv`. + return_generator : bool + Return a generator object instead of a list. This allows iterating + over the stcs without having to keep them all in memory. + %(verbose)s + + Returns + ------- + stc: list | generator of (SourceEstimate | VolSourceEstimate) + The source estimates for all epochs. + + See Also + -------- + make_lcmv, apply_lcmv_raw, apply_lcmv, apply_lcmv_cov + """ + _check_reference(epochs) + + info = epochs.info + tmin = epochs.times[0] + + sel = _check_channels_spatial_filter(epochs.ch_names, filters) + data = epochs.get_data(sel) + stcs = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) + + if not return_generator: + stcs = [s for s in stcs] + + return stcs + + +@verbose +def apply_lcmv_raw(raw, filters, start=None, stop=None, *, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + on raw data. + + Parameters + ---------- + raw : mne.io.Raw + Raw data to invert. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + start : int + Index of first time sample (index not time is seconds). + stop : int + Index of first time sample not to include (index not time is seconds). + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate + Source time courses. + + See Also + -------- + make_lcmv, apply_lcmv_epochs, apply_lcmv, apply_lcmv_cov + """ + _check_reference(raw) + + info = raw.info + + sel = _check_channels_spatial_filter(raw.ch_names, filters) + data, times = raw[sel, start:stop] + tmin = times[0] + + stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin) + + return next(stc) + + +@verbose +def apply_lcmv_cov(data_cov, filters, verbose=None): + """Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights. + + Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights + to a data covariance matrix to estimate source power. + + Parameters + ---------- + data_cov : instance of Covariance + Data covariance matrix. + filters : instance of Beamformer + LCMV spatial filter (beamformer weights). + Filter weights returned from :func:`make_lcmv`. + %(verbose)s + + Returns + ------- + stc : SourceEstimate | VolSourceEstimate + Source power. + + See Also + -------- + make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw + """ + sel = _check_channels_spatial_filter(data_cov.ch_names, filters) + sel_names = [data_cov.ch_names[ii] for ii in sel] + data_cov = pick_channels_cov(data_cov, sel_names) + + n_orient = filters["weights"].shape[0] // filters["n_sources"] + # Need to project and whiten along both dimensions + data = _proj_whiten_data(data_cov["data"].T, data_cov["projs"], filters) + data = _proj_whiten_data(data.T, data_cov["projs"], filters) + del data_cov + source_power = _compute_power(data, filters["weights"], n_orient) + + # compatibility with 0.16, add src_type as None if not present: + filters, warn_text = _check_src_type(filters) + + return _make_stc( + source_power, + vertices=filters["vertices"], + src_type=filters["src_type"], + tmin=0.0, + tstep=1.0, + subject=filters["subject"], + source_nn=filters["source_nn"], + warn_text=warn_text, + ) diff --git a/mne/beamformer/_rap_music.py b/mne/beamformer/_rap_music.py new file mode 100644 index 0000000..b09e1d2 --- /dev/null +++ b/mne/beamformer/_rap_music.py @@ -0,0 +1,315 @@ +"""Compute a Recursively Applied and Projected MUltiple Signal Classification (RAP-MUSIC).""" # noqa + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from scipy import linalg + +from .._fiff.pick import pick_channels_forward, pick_info +from ..fixes import _safe_svd +from ..forward import convert_forward_solution, is_fixed_orient +from ..inverse_sparse.mxne_inverse import _make_dipoles_sparse +from ..minimum_norm.inverse import _log_exp_var +from ..utils import _check_info_inv, fill_doc, logger, verbose +from ._compute_beamformer import _prepare_beamformer_input + + +@fill_doc +def _apply_rap_music( + data, info, times, forward, noise_cov, n_dipoles=2, picks=None, use_trap=False +): + """RAP-MUSIC or TRAP-MUSIC for evoked data. + + Parameters + ---------- + data : array, shape (n_channels, n_times) + Evoked data. + %(info_not_none)s + times : array + Times. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to estimate. The default value is 2. + picks : list of int + Caller ensures this is a list of int. + use_trap : bool + Use the TRAP-MUSIC variant if True (default False). + + Returns + ------- + dipoles : list of instances of Dipole + The dipole fits. + explained_data : array | None + Data explained by the dipoles using a least square fitting with the + selected active dipoles and their estimated orientation. + """ + info = pick_info(info, picks) + del picks + # things are much simpler if we avoid surface orientation + align = forward["source_nn"].copy() + if forward["surf_ori"] and not is_fixed_orient(forward): + forward = convert_forward_solution(forward, surf_ori=False) + is_free_ori, info, _, _, G, whitener, _, _ = _prepare_beamformer_input( + info, forward, noise_cov=noise_cov, rank=None + ) + forward = pick_channels_forward(forward, info["ch_names"], ordered=True) + del info + + # whiten the data (leadfield already whitened) + M = np.dot(whitener, data) + del data + + _, eig_vectors = linalg.eigh(np.dot(M, M.T)) + phi_sig = eig_vectors[:, -n_dipoles:] + + n_orient = 3 if is_free_ori else 1 + G.shape = (G.shape[0], -1, n_orient) + gain = forward["sol"]["data"].copy() + gain.shape = G.shape + n_channels = G.shape[0] + A = np.empty((n_channels, n_dipoles)) + gain_dip = np.empty((n_channels, n_dipoles)) + oris = np.empty((n_dipoles, 3)) + poss = np.empty((n_dipoles, 3)) + + G_proj = G.copy() + phi_sig_proj = phi_sig.copy() + + idxs = list() + for k in range(n_dipoles): + subcorr_max = -1.0 + source_idx, source_ori, source_pos = 0, [0, 0, 0], [0, 0, 0] + for i_source in range(G.shape[1]): + Gk = G_proj[:, i_source] + subcorr, ori = _compute_subcorr(Gk, phi_sig_proj) + if subcorr > subcorr_max: + subcorr_max = subcorr + source_idx = i_source + source_ori = ori + source_pos = forward["source_rr"][i_source] + if n_orient == 3 and align is not None: + surf_normal = forward["source_nn"][3 * i_source + 2] + # make sure ori is aligned to the surface orientation + source_ori *= np.sign(source_ori @ surf_normal) or 1.0 + if n_orient == 1: + source_ori = forward["source_nn"][i_source] + + idxs.append(source_idx) + if n_orient == 3: + Ak = np.dot(G[:, source_idx], source_ori) + else: + Ak = G[:, source_idx, 0] + A[:, k] = Ak + oris[k] = source_ori + poss[k] = source_pos + + logger.info(f"source {k + 1} found: p = {source_idx}") + if n_orient == 3: + logger.info("ori = {} {} {}".format(*tuple(oris[k]))) + + projection = _compute_proj(A[:, : k + 1]) + G_proj = np.einsum("ab,bso->aso", projection, G) + phi_sig_proj = np.dot(projection, phi_sig) + if use_trap: + phi_sig_proj = phi_sig_proj[:, -(n_dipoles - k) :] + del G, G_proj + + sol = linalg.lstsq(A, M)[0] + if n_orient == 3: + X = sol[:, np.newaxis] * oris[:, :, np.newaxis] + X.shape = (-1, len(times)) + else: + X = sol + + gain_active = gain[:, idxs] + if n_orient == 3: + gain_dip = (oris * gain_active).sum(-1) + idxs = np.array(idxs) + active_set = np.array([[3 * idxs, 3 * idxs + 1, 3 * idxs + 2]]).T.ravel() + else: + gain_dip = gain_active[:, :, 0] + active_set = idxs + gain_active = whitener @ gain_active.reshape(gain.shape[0], -1) + assert gain_active.shape == (n_channels, X.shape[0]) + + explained_data = gain_dip @ sol + M_estimate = whitener @ explained_data + _log_exp_var(M, M_estimate) + tstep = np.median(np.diff(times)) if len(times) > 1 else 1.0 + dipoles = _make_dipoles_sparse( + X, active_set, forward, times[0], tstep, M, gain_active, active_is_idx=True + ) + for dipole, ori in zip(dipoles, oris): + signs = np.sign((dipole.ori * ori).sum(-1, keepdims=True)) + dipole.ori *= signs + dipole.amplitude *= signs[:, 0] + logger.info("[done]") + return dipoles, explained_data + + +def _compute_subcorr(G, phi_sig): + """Compute the subspace correlation.""" + Ug, Sg, Vg = _safe_svd(G, full_matrices=False) + # Now we look at the actual rank of the forward fields + # in G and handle the fact that it might be rank defficient + # eg. when using MEG and a sphere model for which the + # radial component will be truly 0. + rank = np.sum(Sg > (Sg[0] * 1e-6)) + if rank == 0: + return 0, np.zeros(len(G)) + rank = max(rank, 2) # rank cannot be 1 + Ug, Sg, Vg = Ug[:, :rank], Sg[:rank], Vg[:rank] + tmp = np.dot(Ug.T.conjugate(), phi_sig) + Uc, Sc, _ = _safe_svd(tmp, full_matrices=False) + X = np.dot(Vg.T / Sg[None, :], Uc[:, 0]) # subcorr + return Sc[0], X / np.linalg.norm(X) + + +def _compute_proj(A): + """Compute the orthogonal projection operation for a manifold vector A.""" + U, _, _ = _safe_svd(A, full_matrices=False) + return np.identity(A.shape[0]) - np.dot(U, U.T.conjugate()) + + +def _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, use_trap): + """RAP-/TRAP-MUSIC implementation.""" + info = evoked.info + data = evoked.data + times = evoked.times + + picks = _check_info_inv(info, forward, data_cov=None, noise_cov=noise_cov) + + data = data[picks] + + dipoles, explained_data = _apply_rap_music( + data, info, times, forward, noise_cov, n_dipoles, picks, use_trap + ) + + if return_residual: + residual = evoked.copy().pick([info["ch_names"][p] for p in picks]) + residual.data -= explained_data + active_projs = [p for p in residual.info["projs"] if p["active"]] + for p in active_projs: + p["active"] = False + residual.add_proj(active_projs, remove_existing=True) + residual.apply_proj() + return dipoles, residual + else: + return dipoles + + +@verbose +def rap_music( + evoked, + forward, + noise_cov, + n_dipoles=5, + return_residual=False, + *, + verbose=None, +): + """RAP-MUSIC source localization method. + + Compute Recursively Applied and Projected MUltiple SIgnal Classification + (RAP-MUSIC) :footcite:`MosherLeahy1999,MosherLeahy1996` on evoked data. + + .. note:: The goodness of fit (GOF) of all the returned dipoles is the + same and corresponds to the GOF of the full set of dipoles. + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to localize. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to look for. The default value is 5. + return_residual : bool + If True, the residual is returned as an Evoked instance. + %(verbose)s + + Returns + ------- + dipoles : list of instance of Dipole + The dipole fits. + residual : instance of Evoked + The residual a.k.a. data not explained by the dipoles. + Only returned if return_residual is True. + + See Also + -------- + mne.fit_dipole + mne.beamformer.trap_music + + Notes + ----- + .. versionadded:: 0.9.0 + + References + ---------- + .. footbibliography:: + """ + return _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, False) + + +@verbose +def trap_music( + evoked, + forward, + noise_cov, + n_dipoles=5, + return_residual=False, + *, + verbose=None, +): + """TRAP-MUSIC source localization method. + + Compute Truncated Recursively Applied and Projected MUltiple SIgnal Classification + (TRAP-MUSIC) :footcite:`Makela2018` on evoked data. + + .. note:: The goodness of fit (GOF) of all the returned dipoles is the + same and corresponds to the GOF of the full set of dipoles. + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to localize. + forward : instance of Forward + Forward operator. + noise_cov : instance of Covariance + The noise covariance. + n_dipoles : int + The number of dipoles to look for. The default value is 5. + return_residual : bool + If True, the residual is returned as an Evoked instance. + %(verbose)s + + Returns + ------- + dipoles : list of instance of Dipole + The dipole fits. + residual : instance of Evoked + The residual a.k.a. data not explained by the dipoles. + Only returned if return_residual is True. + + See Also + -------- + mne.fit_dipole + mne.beamformer.rap_music + + Notes + ----- + .. versionadded:: 1.4 + + References + ---------- + .. footbibliography:: + """ + return _rap_music(evoked, forward, noise_cov, n_dipoles, return_residual, True) diff --git a/mne/beamformer/resolution_matrix.py b/mne/beamformer/resolution_matrix.py new file mode 100644 index 0000000..e2dd258 --- /dev/null +++ b/mne/beamformer/resolution_matrix.py @@ -0,0 +1,86 @@ +"""Compute resolution matrix for beamformers.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from .._fiff.pick import pick_channels, pick_channels_forward, pick_info +from ..evoked import EvokedArray +from ..utils import fill_doc, logger +from ._lcmv import apply_lcmv + + +@fill_doc +def make_lcmv_resolution_matrix(filters, forward, info): + """Compute resolution matrix for LCMV beamformer. + + Parameters + ---------- + filters : instance of Beamformer + Dictionary containing filter weights from LCMV beamformer + (see mne.beamformer.make_lcmv). + forward : instance of Forward + Forward Solution with leadfield matrix. + %(info_not_none)s Used to compute LCMV filters. + + Returns + ------- + resmat : array, shape (n_dipoles_lcmv, n_dipoles_fwd) + Resolution matrix (filter matrix multiplied to leadfield from + forward solution). Numbers of rows (n_dipoles_lcmv) and columns + (n_dipoles_fwd) may differ by a factor depending on orientation + constraints of filter and forward solution, respectively (e.g. factor 3 + for free dipole orientation versus factor 1 for scalar beamformers). + """ + # don't include bad channels from noise covariance matrix + bads_filt = filters["noise_cov"]["bads"] + ch_names = filters["noise_cov"]["names"] + + # good channels + ch_names = [c for c in ch_names if (c not in bads_filt)] + + # adjust channels in forward solution + forward = pick_channels_forward(forward, ch_names, ordered=True) + + # get leadfield matrix from forward solution + leadfield = forward["sol"]["data"] + + # get the filter weights for beamformer as matrix + filtmat = _get_matrix_from_lcmv(filters, forward, info) + + # compute resolution matrix + resmat = filtmat.dot(leadfield) + + logger.info(f"Dimensions of LCMV resolution matrix: {resmat.shape}.") + + return resmat + + +def _get_matrix_from_lcmv(filters, forward, info, verbose=None): + """Get inverse matrix for LCMV beamformer. + + Returns + ------- + invmat : array, shape (n_dipoles, n_channels) + Inverse matrix associated with LCMV beamformer filters. + """ + # number of channels for identity matrix + info = pick_info(info, pick_channels(info["ch_names"], filters["ch_names"])) + n_chs = len(info["ch_names"]) + + # create identity matrix as input for inverse operator + # set elements to zero for non-selected channels + id_mat = np.eye(n_chs) + + # convert identity matrix to evoked data type (pretending it's an epochs + evo_ident = EvokedArray(id_mat, info=info, tmin=0.0) + + # apply beamformer to identity matrix + stc_lcmv = apply_lcmv(evo_ident, filters, verbose=verbose) + + # turn source estimate into numpsy array + invmat = stc_lcmv.data + + return invmat diff --git a/mne/bem.py b/mne/bem.py new file mode 100644 index 0000000..d361272 --- /dev/null +++ b/mne/bem.py @@ -0,0 +1,2540 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import glob +import json +import os +import os.path as op +import shutil +from collections import OrderedDict +from copy import deepcopy +from functools import partial +from pathlib import Path + +import numpy as np +from scipy.optimize import fmin_cobyla + +from ._fiff._digitization import _dig_kind_dict, _dig_kind_ints, _dig_kind_rev +from ._fiff.constants import FIFF, FWD +from ._fiff.open import fiff_open +from ._fiff.tag import find_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_float, + write_float_matrix, + write_int, + write_int_matrix, + write_string, +) +from .fixes import _compare_version, _safe_svd +from .surface import ( + _complete_sphere_surf, + _compute_nearest, + _fast_cross_nd_sum, + _get_ico_surface, + _get_solids, + complete_surface_info, + decimate_surface, + read_surface, + read_tri, + transform_surface_to, + write_surface, +) +from .transforms import Transform, _ensure_trans, apply_trans +from .utils import ( + _check_fname, + _check_freesurfer_home, + _check_head_radius, + _check_option, + _ensure_int, + _import_h5io_funcs, + _import_nibabel, + _on_missing, + _path_like, + _pl, + _TempDir, + _validate_type, + _verbose_safe_false, + get_subjects_dir, + logger, + path_like, + run_subprocess, + verbose, + warn, +) +from .viz.misc import plot_bem + +# ############################################################################ +# Compute BEM solution + +# The following approach is based on: +# +# de Munck JC: "A linear discretization of the volume conductor boundary +# integral equation using analytically integrated elements", +# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990 +# + + +class ConductorModel(dict): + """BEM or sphere model. + + See :func:`~mne.make_bem_model` and :func:`~mne.make_bem_solution` to create a + :class:`mne.bem.ConductorModel`. + """ + + def __repr__(self): # noqa: D105 + if self["is_sphere"]: + center = ", ".join(f"{x * 1000.:.1f}" for x in self["r0"]) + rad = self.radius + if rad is None: # no radius / MEG only + extra = f"Sphere (no layers): r0=[{center}] mm" + else: + extra = ( + f"Sphere ({len(self['layers']) - 1} layer{_pl(self['layers'])}): " + f"r0=[{center}] R={rad * 1000.0:1.0f} mm" + ) + else: + extra = f"BEM ({len(self['surfs'])} layer{_pl(self['surfs'])})" + extra += f" solver={self['solver']}" + return f"" + + def copy(self): + """Return copy of ConductorModel instance.""" + return deepcopy(self) + + @property + def radius(self): + """Sphere radius if an EEG sphere model.""" + if not self["is_sphere"]: + raise RuntimeError("radius undefined for BEM") + return None if len(self["layers"]) == 0 else self["layers"][-1]["rad"] + + +def _calc_beta(rk, rk_norm, rk1, rk1_norm): + """Compute coefficients for calculating the magic vector omega.""" + rkk1 = rk1[0] - rk[0] + size = np.linalg.norm(rkk1) + rkk1 /= size + num = rk_norm + np.dot(rk, rkk1) + den = rk1_norm + np.dot(rk1, rkk1) + res = np.log(num / den) / size + return res + + +def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area): + """Compute the linear potential matrix element computations.""" + omega = np.zeros((len(fros), 3)) + + # we replicate a little bit of the _get_solids code here for speed + # (we need some of the intermediate values later) + v1 = tri_rr[np.newaxis, 0, :] - fros + v2 = tri_rr[np.newaxis, 1, :] - fros + v3 = tri_rr[np.newaxis, 2, :] - fros + triples = _fast_cross_nd_sum(v1, v2, v3) + l1 = np.linalg.norm(v1, axis=1) + l2 = np.linalg.norm(v2, axis=1) + l3 = np.linalg.norm(v3, axis=1) + ss = l1 * l2 * l3 + ss += np.einsum("ij,ij,i->i", v1, v2, l3) + ss += np.einsum("ij,ij,i->i", v1, v3, l2) + ss += np.einsum("ij,ij,i->i", v2, v3, l1) + solids = np.arctan2(triples, ss) + + # We *could* subselect the good points from v1, v2, v3, triples, solids, + # l1, l2, and l3, but there are *very* few bad points. So instead we do + # some unnecessary calculations, and then omit them from the final + # solution. These three lines ensure we don't get invalid values in + # _calc_beta. + bad_mask = np.abs(solids) < np.pi / 1e6 + l1[bad_mask] = 1.0 + l2[bad_mask] = 1.0 + l3[bad_mask] = 1.0 + + # Calculate the magic vector vec_omega + beta = [ + _calc_beta(v1, l1, v2, l2)[:, np.newaxis], + _calc_beta(v2, l2, v3, l3)[:, np.newaxis], + _calc_beta(v3, l3, v1, l1)[:, np.newaxis], + ] + vec_omega = (beta[2] - beta[0]) * v1 + vec_omega += (beta[0] - beta[1]) * v2 + vec_omega += (beta[1] - beta[2]) * v3 + + area2 = 2.0 * tri_area + n2 = 1.0 / (area2 * area2) + # leave omega = 0 otherwise + # Put it all together... + yys = [v1, v2, v3] + idx = [0, 1, 2, 0, 2] + for k in range(3): + diff = yys[idx[k - 1]] - yys[idx[k + 1]] + zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn) + omega[:, k] = -n2 * ( + area2 * zdots * 2.0 * solids - triples * (diff * vec_omega).sum(axis=-1) + ) + # omit the bad points from the solution + omega[bad_mask] = 0.0 + return omega + + +def _correct_auto_elements(surf, mat): + """Improve auto-element approximation.""" + pi2 = 2.0 * np.pi + tris_flat = surf["tris"].ravel() + misses = pi2 - mat.sum(axis=1) + for j, miss in enumerate(misses): + # How much is missing? + n_memb = len(surf["neighbor_tri"][j]) + assert n_memb > 0 # should be guaranteed by our surface checks + # The node itself receives one half + mat[j, j] = miss / 2.0 + # The rest is divided evenly among the member nodes... + miss /= 4.0 * n_memb + members = np.where(j == tris_flat)[0] + mods = members % 3 + offsets = np.array([[1, 2], [-1, 1], [-1, -2]]) + tri_1 = members + offsets[mods, 0] + tri_2 = members + offsets[mods, 1] + for t1, t2 in zip(tri_1, tri_2): + mat[j, tris_flat[t1]] += miss + mat[j, tris_flat[t2]] += miss + return + + +def _fwd_bem_lin_pot_coeff(surfs): + """Calculate the coefficients for linear collocation approach.""" + # taken from fwd_bem_linear_collocation.c + nps = [surf["np"] for surf in surfs] + np_tot = sum(nps) + coeff = np.zeros((np_tot, np_tot)) + offsets = np.cumsum(np.concatenate(([0], nps))) + for si_1, surf1 in enumerate(surfs): + rr_ord = np.arange(nps[si_1]) + for si_2, surf2 in enumerate(surfs): + logger.info( + f" {_bem_surf_name[surf1['id']]} ({nps[si_1]:d}) -> " + f"{_bem_surf_name[surf2['id']]} ({nps[si_2]}) ..." + ) + tri_rr = surf2["rr"][surf2["tris"]] + tri_nn = surf2["tri_nn"] + tri_area = surf2["tri_area"] + submat = coeff[ + offsets[si_1] : offsets[si_1 + 1], offsets[si_2] : offsets[si_2 + 1] + ] # view + for k in range(surf2["ntri"]): + tri = surf2["tris"][k] + if si_1 == si_2: + skip_idx = ( + (rr_ord == tri[0]) | (rr_ord == tri[1]) | (rr_ord == tri[2]) + ) + else: + skip_idx = list() + # No contribution from a triangle that + # this vertex belongs to + # if sidx1 == sidx2 and (tri == j).any(): + # continue + # Otherwise do the hard job + coeffs = _lin_pot_coeff( + fros=surf1["rr"], + tri_rr=tri_rr[k], + tri_nn=tri_nn[k], + tri_area=tri_area[k], + ) + coeffs[skip_idx] = 0.0 + submat[:, tri] -= coeffs + if si_1 == si_2: + _correct_auto_elements(surf1, submat) + return coeff + + +def _fwd_bem_multi_solution(solids, gamma, nps): + """Do multi surface solution. + + * Invert I - solids/(2*M_PI) + * Take deflation into account + * The matrix is destroyed after inversion + * This is the general multilayer case + """ + pi2 = 1.0 / (2 * np.pi) + n_tot = np.sum(nps) + assert solids.shape == (n_tot, n_tot) + nsurf = len(nps) + defl = 1.0 / n_tot + # Modify the matrix + offsets = np.cumsum(np.concatenate(([0], nps))) + for si_1 in range(nsurf): + for si_2 in range(nsurf): + mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2] + slice_j = slice(offsets[si_1], offsets[si_1 + 1]) + slice_k = slice(offsets[si_2], offsets[si_2 + 1]) + solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult + solids += np.eye(n_tot) + return np.linalg.inv(solids) + + +def _fwd_bem_homog_solution(solids, nps): + """Make a homogeneous solution.""" + return _fwd_bem_multi_solution(solids, gamma=None, nps=nps) + + +def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri): + """Modify the solution according to the IP approach.""" + n_last = n_tri[-1] + mult = (1.0 + ip_mult) / ip_mult + + logger.info(" Combining...") + offsets = np.cumsum(np.concatenate(([0], n_tri))) + for si in range(len(n_tri)): + # Pick the correct submatrix (right column) and multiply + sub = solution[offsets[si] : offsets[si + 1], np.sum(n_tri[:-1]) :] + # Multiply + sub -= 2 * np.dot(sub, ip_solution) + + # The lower right corner is a special case + sub[-n_last:, -n_last:] += mult * ip_solution + + # Final scaling + logger.info(" Scaling...") + solution *= ip_mult + return + + +def _check_complete_surface(surf, copy=False, incomplete="raise", extra=""): + surf = complete_surface_info(surf, copy=copy, verbose=_verbose_safe_false()) + fewer = np.where([len(t) < 3 for t in surf["neighbor_tri"]])[0] + if len(fewer) > 0: + fewer = list(fewer) + fewer = (fewer[:80] + ["..."]) if len(fewer) > 80 else fewer + fewer = ", ".join(str(f) for f in fewer) + msg = ( + f"Surface {_bem_surf_name[surf['id']]} has topological defects: " + f"{len(fewer)} / {len(surf['rr'])} vertices have fewer than three " + f"neighboring triangles [{fewer}]{extra}" + ) + _on_missing(on_missing=incomplete, msg=msg, name="on_defects") + return surf + + +def _fwd_bem_linear_collocation_solution(bem): + """Compute the linear collocation potential solution.""" + # first, add surface geometries + logger.info("Computing the linear collocation solution...") + logger.info(" Matrix coefficients...") + coeff = _fwd_bem_lin_pot_coeff(bem["surfs"]) + bem["nsol"] = len(coeff) + logger.info(" Inverting the coefficient matrix...") + nps = [surf["np"] for surf in bem["surfs"]] + bem["solution"] = _fwd_bem_multi_solution(coeff, bem["gamma"], nps) + if len(bem["surfs"]) == 3: + ip_mult = bem["sigma"][1] / bem["sigma"][2] + if ip_mult <= FWD.BEM_IP_APPROACH_LIMIT: + logger.info("IP approach required...") + logger.info(" Matrix coefficients (homog)...") + coeff = _fwd_bem_lin_pot_coeff([bem["surfs"][-1]]) + logger.info(" Inverting the coefficient matrix (homog)...") + ip_solution = _fwd_bem_homog_solution(coeff, [bem["surfs"][-1]["np"]]) + logger.info( + " Modify the original solution to incorporate IP approach..." + ) + _fwd_bem_ip_modify_solution(bem["solution"], ip_solution, ip_mult, nps) + bem["bem_method"] = FIFF.FIFFV_BEM_APPROX_LINEAR + bem["solver"] = "mne" + + +def _import_openmeeg(what="compute a BEM solution using OpenMEEG"): + try: + import openmeeg as om + except Exception as exc: + raise ImportError( + f"The OpenMEEG module must be installed to {what}, but " + f'"import openmeeg" resulted in: {exc}' + ) from None + if not _compare_version(om.__version__, ">=", "2.5.6"): + raise ImportError(f"OpenMEEG 2.5.6+ is required, got {om.__version__}") + return om + + +def _make_openmeeg_geometry(bem, mri_head_t=None): + # OpenMEEG + om = _import_openmeeg() + meshes = [] + for surf in bem["surfs"][::-1]: + if mri_head_t is not None: + surf = transform_surface_to(surf, "head", mri_head_t, copy=True) + points, faces = surf["rr"], surf["tris"] + faces = faces[:, [1, 0, 2]] # swap faces + meshes.append((points, faces)) + + conductivity = bem["sigma"][::-1] + return om.make_nested_geometry(meshes, conductivity) + + +def _fwd_bem_openmeeg_solution(bem): + om = _import_openmeeg() + logger.info("Creating BEM solution using OpenMEEG") + logger.info("Computing the openmeeg head matrix solution...") + logger.info(" Matrix coefficients...") + + geom = _make_openmeeg_geometry(bem) + + hm = om.HeadMat(geom) + bem["nsol"] = hm.nlin() + + logger.info(" Inverting the coefficient matrix...") + hm.invert() # invert inplace + bem["solution"] = hm.array_flat() + bem["bem_method"] = FIFF.FIFFV_BEM_APPROX_LINEAR + bem["solver"] = "openmeeg" + + +@verbose +def make_bem_solution(surfs, *, solver="mne", verbose=None): + """Create a BEM solution using the linear collocation approach. + + Parameters + ---------- + surfs : list of dict + The BEM surfaces to use (from :func:`mne.make_bem_model`). + solver : str + Can be ``'mne'`` (default) to use MNE-Python, or ``'openmeeg'`` to use the + `OpenMEEG `__ package. + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + bem : instance of ConductorModel + The BEM solution. + + See Also + -------- + make_bem_model + read_bem_surfaces + write_bem_surfaces + read_bem_solution + write_bem_solution + + Notes + ----- + .. versionadded:: 0.10.0 + """ + _validate_type(solver, str, "solver") + _check_option("method", solver.lower(), ("mne", "openmeeg")) + bem = _ensure_bem_surfaces(surfs) + _add_gamma_multipliers(bem) + if len(bem["surfs"]) == 3: + logger.info("Three-layer model surfaces loaded.") + elif len(bem["surfs"]) == 1: + logger.info("Homogeneous model surface loaded.") + else: + raise RuntimeError("Only 1- or 3-layer BEM computations supported") + _check_bem_size(bem["surfs"]) + for surf in bem["surfs"]: + _check_complete_surface(surf) + if solver.lower() == "openmeeg": + _fwd_bem_openmeeg_solution(bem) + else: + assert solver.lower() == "mne" + _fwd_bem_linear_collocation_solution(bem) + logger.info("Solution ready.") + logger.info("BEM geometry computations complete.") + return bem + + +# ############################################################################ +# Make BEM model + + +def _ico_downsample(surf, dest_grade): + """Downsample the surface if isomorphic to a subdivided icosahedron.""" + n_tri = len(surf["tris"]) + bad_msg = ( + f"Cannot decimate to requested ico grade {dest_grade}. The provided " + f"BEM surface has {n_tri} triangles, which cannot be isomorphic with " + "a subdivided icosahedron. Consider manually decimating the surface to " + "a suitable density and then use ico=None in make_bem_model." + ) + if n_tri % 20 != 0: + raise RuntimeError(bad_msg) + n_tri = n_tri // 20 + found = int(round(np.log(n_tri) / np.log(4))) + if n_tri != 4**found: + raise RuntimeError(bad_msg) + del n_tri + + if dest_grade > found: + raise RuntimeError( + f"For this surface, decimation grade should be {found} or less, " + f"not {dest_grade}." + ) + + source = _get_ico_surface(found) + dest = _get_ico_surface(dest_grade, patch_stats=True) + del dest["tri_cent"] + del dest["tri_nn"] + del dest["neighbor_tri"] + del dest["tri_area"] + if not np.array_equal(source["tris"], surf["tris"]): + raise RuntimeError( + "The source surface has a matching number of " + "triangles but ordering is wrong" + ) + logger.info( + f"Going from {found}th to {dest_grade}th subdivision of an icosahedron " + f"(n_tri: {len(surf['tris'])} -> {len(dest['tris'])})" + ) + # Find the mapping + dest["rr"] = surf["rr"][_get_ico_map(source, dest)] + return dest + + +def _get_ico_map(fro, to): + """Get a mapping between ico surfaces.""" + nearest, dists = _compute_nearest(fro["rr"], to["rr"], return_dists=True) + n_bads = (dists > 5e-3).sum() + if n_bads > 0: + raise RuntimeError(f"No matching vertex for {n_bads} destination vertices") + return nearest + + +def _order_surfaces(surfs): + """Reorder the surfaces.""" + if len(surfs) != 3: + return surfs + # we have three surfaces + surf_order = [ + FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + ] + ids = np.array([surf["id"] for surf in surfs]) + if set(ids) != set(surf_order): + raise RuntimeError(f"bad surface ids: {ids}") + order = [np.where(ids == id_)[0][0] for id_ in surf_order] + surfs = [surfs[idx] for idx in order] + return surfs + + +def _assert_complete_surface(surf, incomplete="raise"): + """Check the sum of solid angles as seen from inside.""" + # from surface_checks.c + # Center of mass.... + cm = surf["rr"].mean(axis=0) + logger.info( + f"{_bem_surf_name[surf['id']]} CM is " + f"{1000 * cm[0]:6.2f} " + f"{1000 * cm[1]:6.2f} " + f"{1000 * cm[2]:6.2f} mm" + ) + tot_angle = _get_solids(surf["rr"][surf["tris"]], cm[np.newaxis, :])[0] + prop = tot_angle / (2 * np.pi) + if np.abs(prop - 1.0) > 1e-5: + msg = ( + f'Surface {_bem_surf_name[surf["id"]]} is not complete (sum of ' + f"solid angles yielded {prop}, should be 1.)" + ) + _on_missing(incomplete, msg, name="incomplete", error_klass=RuntimeError) + + +def _assert_inside(fro, to): + """Check one set of points is inside a surface.""" + # this is "is_inside" in surface_checks.c + fro_name = _bem_surf_name[fro["id"]] + to_name = _bem_surf_name[to["id"]] + logger.info(f"Checking that surface {fro_name} is inside surface {to_name} ...") + tot_angle = _get_solids(to["rr"][to["tris"]], fro["rr"]) + if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any(): + raise RuntimeError( + f"Surface {fro_name} is not completely inside surface {to_name}" + ) + + +def _check_surfaces(surfs, incomplete="raise"): + """Check that the surfaces are complete and non-intersecting.""" + for surf in surfs: + _assert_complete_surface(surf, incomplete=incomplete) + # Then check the topology + for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): + _assert_inside(surf_2, surf_1) + + +def _check_surface_size(surf): + """Check that the coordinate limits are reasonable.""" + sizes = surf["rr"].max(axis=0) - surf["rr"].min(axis=0) + if (sizes < 0.05).any(): + raise RuntimeError( + f'Dimensions of the surface {_bem_surf_name[surf["id"]]} seem too ' + f"small ({1000 * sizes.min():9.5f}). Maybe the unit of measure" + " is meters instead of mm" + ) + + +def _check_thicknesses(surfs): + """Compute how close we are.""" + for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]): + min_dist = _compute_nearest(surf_1["rr"], surf_2["rr"], return_dists=True)[1] + min_dist = min_dist.min() + fro = _bem_surf_name[surf_1["id"]] + to = _bem_surf_name[surf_2["id"]] + logger.info(f"Checking distance between {fro} and {to} surfaces...") + logger.info( + f"Minimum distance between the {fro} and {to} surfaces is " + f"approximately {1000 * min_dist:6.1f} mm" + ) + + +def _surfaces_to_bem( + surfs, ids, sigmas, ico=None, rescale=True, incomplete="raise", extra="" +): + """Convert surfaces to a BEM.""" + # equivalent of mne_surf2bem + # surfs can be strings (filenames) or surface dicts + if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) == len(sigmas)): + raise ValueError( + "surfs, ids, and sigmas must all have the same " + "number of elements (1 or 3)" + ) + for si, surf in enumerate(surfs): + if isinstance(surf, str | Path | os.PathLike): + surfs[si] = surf = read_surface(surf, return_dict=True)[-1] + # Downsampling if the surface is isomorphic with a subdivided icosahedron + if ico is not None: + for si, surf in enumerate(surfs): + surfs[si] = _ico_downsample(surf, ico) + for surf, id_ in zip(surfs, ids): + # Do topology checks (but don't save data) to fail early + surf["id"] = id_ + _check_complete_surface(surf, copy=True, incomplete=incomplete, extra=extra) + surf["coord_frame"] = surf.get("coord_frame", FIFF.FIFFV_COORD_MRI) + surf.update(np=len(surf["rr"]), ntri=len(surf["tris"])) + if rescale: + surf["rr"] /= 1000.0 # convert to meters + + # Shifting surfaces is not implemented here... + + # Order the surfaces for the benefit of the topology checks + for surf, sigma in zip(surfs, sigmas): + surf["sigma"] = sigma + surfs = _order_surfaces(surfs) + + # Check topology as best we can + _check_surfaces(surfs, incomplete=incomplete) + for surf in surfs: + _check_surface_size(surf) + _check_thicknesses(surfs) + logger.info("Surfaces passed the basic topology checks.") + return surfs + + +@verbose +def make_bem_model( + subject, ico=4, conductivity=(0.3, 0.006, 0.3), subjects_dir=None, verbose=None +): + """Create a BEM model for a subject. + + Use :func:`~mne.make_bem_solution` to turn the returned surfaces into a + :class:`~mne.bem.ConductorModel` suitable for forward calculation. + + .. note:: To get a single layer bem corresponding to the --homog flag in + the command line tool set the ``conductivity`` parameter + to a float (e.g. ``0.3``). + + Parameters + ---------- + %(subject)s + ico : int | None + The surface ico downsampling to use, e.g. ``5=20484``, ``4=5120``, + ``3=1280``. If None, no subsampling is applied. + conductivity : float | array of float of shape (3,) or (1,) + The conductivities to use for each shell. Should be a single element + for a one-layer model, or three elements for a three-layer model. + Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a + single-layer model is ``[0.3]``. + %(subjects_dir)s + %(verbose)s + + Returns + ------- + surfaces : list of dict + The BEM surfaces. Use :func:`~mne.make_bem_solution` to turn these into a + :class:`~mne.bem.ConductorModel` suitable for forward calculation. + + See Also + -------- + make_bem_solution + make_sphere_model + read_bem_surfaces + write_bem_surfaces + + Notes + ----- + .. versionadded:: 0.10.0 + """ + conductivity = np.atleast_1d(conductivity).astype(float) + if conductivity.ndim != 1 or conductivity.size not in (1, 3): + raise ValueError( + "conductivity must be a float or a 1D array-like with 1 or 3 elements" + ) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject_dir = subjects_dir / subject + bem_dir = subject_dir / "bem" + inner_skull = bem_dir / "inner_skull.surf" + outer_skull = bem_dir / "outer_skull.surf" + outer_skin = bem_dir / "outer_skin.surf" + surfaces = [inner_skull, outer_skull, outer_skin] + ids = [ + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_HEAD, + ] + logger.info("Creating the BEM geometry...") + if len(conductivity) == 1: + surfaces = surfaces[:1] + ids = ids[:1] + surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico) + _check_bem_size(surfaces) + logger.info("Complete.\n") + return surfaces + + +# ############################################################################ +# Compute EEG sphere model + + +def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms): + """Get the model depended weighting factor for n.""" + nlayer = len(m["layers"]) + if nlayer in (0, 1): + return 1.0 + + # Initialize the arrays + c1 = np.zeros(nlayer - 1) + c2 = np.zeros(nlayer - 1) + cr = np.zeros(nlayer - 1) + cr_mult = np.zeros(nlayer - 1) + for k in range(nlayer - 1): + c1[k] = m["layers"][k]["sigma"] / m["layers"][k + 1]["sigma"] + c2[k] = c1[k] - 1.0 + cr_mult[k] = m["layers"][k]["rel_rad"] + cr[k] = cr_mult[k] + cr_mult[k] *= cr_mult[k] + + coeffs = np.zeros(n_terms - 1) + for n in range(1, n_terms): + # Increment the radius coefficients + for k in range(nlayer - 1): + cr[k] *= cr_mult[k] + + # Multiply the matrices + M = np.eye(2) + n1 = n + 1.0 + for k in range(nlayer - 2, -1, -1): + M = np.dot( + [ + [n + n1 * c1[k], n1 * c2[k] / cr[k]], + [n * c2[k] * cr[k], n1 + n * c1[k]], + ], + M, + ) + num = n * (2.0 * n + 1.0) ** (nlayer - 1) + coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0]) + return coeffs + + +def _compose_linear_fitting_data(mu, u): + """Get the linear fitting data.""" + k1 = np.arange(1, u["nterms"]) + mu1ns = mu[0] ** k1 + # data to be fitted + y = u["w"][:-1] * (u["fn"][1:] - mu1ns * u["fn"][0]) + # model matrix + M = u["w"][:-1, np.newaxis] * (mu[1:] ** k1[:, np.newaxis] - mu1ns[:, np.newaxis]) + uu, sing, vv = _safe_svd(M, full_matrices=False) + ncomp = u["nfit"] - 1 + uu, sing, vv = uu[:, :ncomp], sing[:ncomp], vv[:ncomp] + return y, uu, sing, vv + + +def _compute_linear_parameters(mu, u): + """Compute the best-fitting linear parameters.""" + y, uu, sing, vv = _compose_linear_fitting_data(mu, u) + + # Compute the residuals + vec = np.dot(y, uu) + resi = y - np.dot(uu, vec) + vec /= sing + + lambda_ = np.zeros(u["nfit"]) + lambda_[1:] = np.dot(vec, vv) + lambda_[0] = u["fn"][0] - np.sum(lambda_[1:]) + rv = np.dot(resi, resi) / np.dot(y, y) + return rv, lambda_ + + +def _one_step(mu, u): + """Evaluate the residual sum of squares fit for one set of mu values.""" + if np.abs(mu).max() >= 1.0: + return 100.0 + + # Compose the data for the linear fitting, compute SVD, then residuals + y, uu, sing, vv = _compose_linear_fitting_data(mu, u) + resi = y - np.dot(uu, np.dot(y, uu)) + return np.dot(resi, resi) + + +def _fwd_eeg_fit_berg_scherg(m, nterms, nfit): + """Fit the Berg-Scherg equivalent spherical model dipole parameters.""" + assert nfit >= 2 + u = dict(nfit=nfit, nterms=nterms) + + # (1) Calculate the coefficients of the true expansion + u["fn"] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1) + + # (2) Calculate the weighting + f = min([layer["rad"] for layer in m["layers"]]) / max( + [layer["rad"] for layer in m["layers"]] + ) + + # correct weighting + k = np.arange(1, nterms + 1) + u["w"] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) / k) * np.power(f, (k - 1.0)) + u["w"][-1] = 0 + + # Do the nonlinear minimization, constraining mu to the interval [-1, +1] + mu_0 = np.zeros(3) + fun = partial(_one_step, u=u) + catol = 1e-6 + max_ = 1.0 - 2 * catol + + def cons(x): + return max_ - np.abs(x) + + mu = fmin_cobyla(fun, mu_0, [cons], rhobeg=0.5, rhoend=1e-5, catol=catol) + + # (6) Do the final step: calculation of the linear parameters + rv, lambda_ = _compute_linear_parameters(mu, u) + order = np.argsort(mu)[::-1] + mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first + + m["mu"] = mu + # This division takes into account the actual conductivities + m["lambda"] = lambda_ / m["layers"][-1]["sigma"] + m["nfit"] = nfit + return rv + + +@verbose +def make_sphere_model( + r0=(0.0, 0.0, 0.04), + head_radius=0.09, + info=None, + relative_radii=(0.90, 0.92, 0.97, 1.0), + sigmas=(0.33, 1.0, 0.004, 0.33), + verbose=None, +): + """Create a spherical model for forward solution calculation. + + Parameters + ---------- + r0 : array-like | str + Head center to use (in head coordinates). If 'auto', the head + center will be calculated from the digitization points in info. + head_radius : float | str | None + If float, compute spherical shells for EEG using the given radius. + If ``'auto'``, estimate an appropriate radius from the dig points in the + :class:`~mne.Info` provided by the argument ``info``. + If None, exclude shells (single layer sphere model). + %(info)s Only needed if ``r0`` or ``head_radius`` are ``'auto'``. + relative_radii : array-like + Relative radii for the spherical shells. + sigmas : array-like + Sigma values for the spherical shells. + %(verbose)s + + Returns + ------- + sphere : instance of ConductorModel + The resulting spherical conductor model. + + See Also + -------- + make_bem_model + make_bem_solution + + Notes + ----- + The default model has:: + + relative_radii = (0.90, 0.92, 0.97, 1.0) + sigmas = (0.33, 1.0, 0.004, 0.33) + + These correspond to compartments (with relative radii in ``m`` and + conductivities σ in ``S/m``) for the brain, CSF, skull, and scalp, + respectively. + + .. versionadded:: 0.9.0 + """ + for name in ("r0", "head_radius"): + param = locals()[name] + if isinstance(param, str): + if param != "auto": + raise ValueError(f'{name}, if str, must be "auto" not "{param}"') + relative_radii = np.array(relative_radii, float).ravel() + sigmas = np.array(sigmas, float).ravel() + if len(relative_radii) != len(sigmas): + raise ValueError( + f"relative_radii length ({len(relative_radii)}) must match that of sigmas (" + f"{len(sigmas)})" + ) + if len(sigmas) <= 1 and head_radius is not None: + raise ValueError( + "at least 2 sigmas must be supplied if head_radius is not None, got " + f"{len(sigmas)}" + ) + if (isinstance(r0, str) and r0 == "auto") or ( + isinstance(head_radius, str) and head_radius == "auto" + ): + if info is None: + raise ValueError("Info must not be None for auto mode") + head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units="m")[:2] + if isinstance(r0, str): + r0 = r0_fit + if isinstance(head_radius, str): + head_radius = head_radius_fit + sphere = ConductorModel( + is_sphere=True, r0=np.array(r0), coord_frame=FIFF.FIFFV_COORD_HEAD + ) + sphere["layers"] = list() + if head_radius is not None: + # Eventually these could be configurable... + relative_radii = np.array(relative_radii, float) + sigmas = np.array(sigmas, float) + order = np.argsort(relative_radii) + relative_radii = relative_radii[order] + sigmas = sigmas[order] + for rel_rad, sig in zip(relative_radii, sigmas): + # sort layers by (relative) radius, and scale radii + layer = dict(rad=rel_rad, sigma=sig) + layer["rel_rad"] = layer["rad"] = rel_rad + sphere["layers"].append(layer) + + # scale the radii + R = sphere["layers"][-1]["rad"] + rR = sphere["layers"][-1]["rel_rad"] + for layer in sphere["layers"]: + layer["rad"] /= R + layer["rel_rad"] /= rR + + # + # Setup the EEG sphere model calculations + # + + # Scale the relative radii + for k in range(len(relative_radii)): + sphere["layers"][k]["rad"] = head_radius * sphere["layers"][k]["rel_rad"] + rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3) + logger.info(f"\nEquiv. model fitting -> RV = {100 * rv:g} %%") + for k in range(3): + s_k = sphere["layers"][-1]["sigma"] * sphere["lambda"][k] + logger.info(f"mu{k + 1} = {sphere['mu'][k]:g} lambda{k + 1} = {s_k:g}") + logger.info( + f"Set up EEG sphere model with scalp radius {1000 * head_radius:7.1f} mm\n" + ) + return sphere + + +# ############################################################################# +# Sphere fitting + + +@verbose +def fit_sphere_to_headshape(info, dig_kinds="auto", units="m", verbose=None): + """Fit a sphere to the headshape points to determine head center. + + Parameters + ---------- + %(info_not_none)s + %(dig_kinds)s + units : str + Can be ``"m"`` (default) or ``"mm"``. + + .. versionadded:: 0.12 + %(verbose)s + + Returns + ------- + radius : float + Sphere radius. + origin_head: ndarray, shape (3,) + Head center in head coordinates. + origin_device: ndarray, shape (3,) + Head center in device coordinates. + + Notes + ----- + This function excludes any points that are low and frontal + (``z < 0 and y > 0``) to improve the fit. + """ + if not isinstance(units, str) or units not in ("m", "mm"): + raise ValueError('units must be a "m" or "mm"') + radius, origin_head, origin_device = _fit_sphere_to_headshape(info, dig_kinds) + if units == "mm": + radius *= 1e3 + origin_head *= 1e3 + origin_device *= 1e3 + return radius, origin_head, origin_device + + +@verbose +def get_fitting_dig(info, dig_kinds="auto", exclude_frontal=True, verbose=None): + """Get digitization points suitable for sphere fitting. + + Parameters + ---------- + %(info_not_none)s + %(dig_kinds)s + %(exclude_frontal)s + Default is True. + + .. versionadded:: 0.19 + %(verbose)s + + Returns + ------- + dig : array, shape (n_pts, 3) + The digitization points (in head coordinates) to use for fitting. + + Notes + ----- + This will exclude digitization locations that have ``z < 0 and y > 0``, + i.e. points on the nose and below the nose on the face. + + .. versionadded:: 0.14 + """ + _validate_type(info, "info") + if info["dig"] is None: + raise RuntimeError( + 'Cannot fit headshape without digitization, info["dig"] is None' + ) + if isinstance(dig_kinds, str): + if dig_kinds == "auto": + # try "extra" first + try: + return get_fitting_dig(info, "extra") + except ValueError: + pass + return get_fitting_dig(info, ("extra", "eeg")) + else: + dig_kinds = (dig_kinds,) + # convert string args to ints (first make dig_kinds mutable in case tuple) + dig_kinds = list(dig_kinds) + for di, d in enumerate(dig_kinds): + dig_kinds[di] = _dig_kind_dict.get(d, d) + if dig_kinds[di] not in _dig_kind_ints: + raise ValueError( + f"dig_kinds[{di}] ({d}) must be one of {sorted(_dig_kind_dict)}" + ) + + # get head digization points of the specified kind(s) + dig = [p for p in info["dig"] if p["kind"] in dig_kinds] + if len(dig) == 0: + raise ValueError(f"No digitization points found for dig_kinds={dig_kinds}") + if any(p["coord_frame"] != FIFF.FIFFV_COORD_HEAD for p in dig): + raise RuntimeError( + f"Digitization points dig_kinds={dig_kinds} not in head " + "coordinates, contact mne-python developers" + ) + hsp = [p["r"] for p in dig] + del dig + + # exclude some frontal points (nose etc.) + if exclude_frontal: + hsp = [p for p in hsp if not (p[2] < -1e-6 and p[1] > 1e-6)] + hsp = np.array(hsp) + + if len(hsp) <= 10: + kinds_str = ", ".join([f'"{_dig_kind_rev[d]}"' for d in sorted(dig_kinds)]) + msg = ( + f"Only {len(hsp)} head digitization points of the specified " + f"kind{_pl(dig_kinds)} ({kinds_str},)" + ) + if len(hsp) < 4: + raise ValueError(msg + ", at least 4 required") + else: + warn(msg + ", fitting may be inaccurate") + return hsp + + +@verbose +def _fit_sphere_to_headshape(info, dig_kinds, verbose=None): + """Fit a sphere to the given head shape.""" + hsp = get_fitting_dig(info, dig_kinds) + radius, origin_head = _fit_sphere(np.array(hsp), disp=False) + # compute origin in device coordinates + dev_head_t = info["dev_head_t"] + if dev_head_t is None: + dev_head_t = Transform("meg", "head") + head_to_dev = _ensure_trans(dev_head_t, "head", "meg") + origin_device = apply_trans(head_to_dev, origin_head) + logger.info("Fitted sphere radius:".ljust(30) + f"{radius * 1e3:0.1f} mm") + _check_head_radius(radius) + + # > 2 cm away from head center in X or Y is strange + o_mm = origin_head * 1e3 + o_d = origin_device * 1e3 + if np.linalg.norm(origin_head[:2]) > 0.02: + warn( + f"(X, Y) fit ({o_mm[0]:0.1f}, {o_mm[1]:0.1f}) " + "more than 20 mm from head frame origin" + ) + logger.info( + "Origin head coordinates:".ljust(30) + + f"{o_mm[0]:0.1f} {o_mm[1]:0.1f} {o_mm[2]:0.1f} mm" + ) + logger.info( + "Origin device coordinates:".ljust(30) + + f"{o_d[0]:0.1f} {o_d[1]:0.1f} {o_d[2]:0.1f} mm" + ) + return radius, origin_head, origin_device + + +def _fit_sphere(points, disp="auto"): + """Fit a sphere to an arbitrary set of points.""" + if isinstance(disp, str) and disp == "auto": + disp = True if logger.level <= 20 else False + # initial guess for center and radius + radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.0 + radius_init = radii.mean() + center_init = np.median(points, axis=0) + + # optimization + x0 = np.concatenate([center_init, [radius_init]]) + + def cost_fun(center_rad): + d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3] + d *= d + return d.sum() + + def constraint(center_rad): + return center_rad[3] # radius must be >= 0 + + x_opt = fmin_cobyla( + cost_fun, + x0, + constraint, + rhobeg=radius_init, + rhoend=radius_init * 1e-6, + disp=disp, + ) + + origin, radius = x_opt[:3], x_opt[3] + return radius, origin + + +def _check_origin(origin, info, coord_frame="head", disp=False): + """Check or auto-determine the origin.""" + if isinstance(origin, str): + if origin != "auto": + raise ValueError( + f'origin must be a numerical array, or "auto", not {origin}' + ) + if coord_frame == "head": + R, origin = fit_sphere_to_headshape( + info, verbose=_verbose_safe_false(), units="m" + )[:2] + logger.info(f" Automatic origin fit: head of radius {R * 1000:0.1f} mm") + del R + else: + origin = (0.0, 0.0, 0.0) + origin = np.array(origin, float) + if origin.shape != (3,): + raise ValueError("origin must be a 3-element array") + if disp: + origin_str = ", ".join([f"{o * 1000:0.1f}" for o in origin]) + msg = f" Using origin {origin_str} mm in the {coord_frame} frame" + if coord_frame == "meg" and info["dev_head_t"] is not None: + o_dev = apply_trans(info["dev_head_t"], origin) + origin_str = ", ".join(f"{o * 1000:0.1f}" for o in o_dev) + msg += f" ({origin_str} mm in the head frame)" + logger.info(msg) + return origin + + +# ############################################################################ +# Create BEM surfaces + + +@verbose +def make_watershed_bem( + subject, + subjects_dir=None, + overwrite=False, + volume="T1", + atlas=False, + gcaatlas=False, + preflood=None, + show=False, + copy=True, + T1=None, + brainmask="ws.mgz", + verbose=None, +): + """Create BEM surfaces using the FreeSurfer watershed algorithm. + + See :ref:`bem_watershed_algorithm` for additional information. + + Parameters + ---------- + subject : str + Subject name. + %(subjects_dir)s + %(overwrite)s + volume : str + Defaults to T1. + atlas : bool + Specify the ``--atlas option`` for ``mri_watershed``. + gcaatlas : bool + Specify the ``--brain_atlas`` option for ``mri_watershed``. + preflood : int + Change the preflood height. + show : bool + Show surfaces to visually inspect all three BEM surfaces (recommended). + + .. versionadded:: 0.12 + + copy : bool + If True (default), use copies instead of symlinks for surfaces + (if they do not already exist). + + .. versionadded:: 0.18 + .. versionchanged:: 1.1 Use copies instead of symlinks. + T1 : bool | None + If True, pass the ``-T1`` flag. + By default (None), this takes the same value as ``gcaatlas``. + + .. versionadded:: 0.19 + brainmask : str + The filename for the brainmask output file relative to the + ``$SUBJECTS_DIR/$SUBJECT/bem/watershed/`` directory. + Can be for example ``"../../mri/brainmask.mgz"`` to overwrite + the brainmask obtained via ``recon-all -autorecon1``. + + .. versionadded:: 0.19 + %(verbose)s + + See Also + -------- + mne.viz.plot_bem + + Notes + ----- + If your BEM meshes do not look correct when viewed in + :func:`mne.viz.plot_alignment` or :func:`mne.viz.plot_bem`, consider + potential solutions from the :ref:`FAQ `. + + .. versionadded:: 0.10 + """ + env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) + + subjects_dir = env["SUBJECTS_DIR"] # Set by _prepare_env() above. + subject_dir = op.join(subjects_dir, subject) + ws_dir = op.join(bem_dir, "watershed") + T1_dir = op.join(mri_dir, volume) + T1_mgz = T1_dir + if not T1_dir.endswith(".mgz"): + T1_mgz += ".mgz" + + if not op.isdir(bem_dir): + os.makedirs(bem_dir) + _check_fname(T1_mgz, overwrite="read", must_exist=True, name="MRI data") + if op.isdir(ws_dir): + if not overwrite: + raise RuntimeError( + f"{ws_dir} already exists. Use the --overwrite option" + " to recreate it." + ) + else: + shutil.rmtree(ws_dir) + + # put together the command + cmd = ["mri_watershed"] + if preflood: + cmd += ["-h", f"{int(preflood)}"] + + if T1 is None: + T1 = gcaatlas + if T1: + cmd += ["-T1"] + if gcaatlas: + fname = op.join(env["FREESURFER_HOME"], "average", "RB_all_withskull_*.gca") + fname = sorted(glob.glob(fname))[::-1][0] + logger.info(f"Using GCA atlas: {fname}") + cmd += [ + "-atlas", + "-brain_atlas", + fname, + subject_dir + "/mri/transforms/talairach_with_skull.lta", + ] + elif atlas: + cmd += ["-atlas"] + if op.exists(T1_mgz): + cmd += [ + "-useSRAS", + "-surf", + op.join(ws_dir, subject), + T1_mgz, + op.join(ws_dir, brainmask), + ] + else: + cmd += [ + "-useSRAS", + "-surf", + op.join(ws_dir, subject), + T1_dir, + op.join(ws_dir, brainmask), + ] + # report and run + logger.info( + "\nRunning mri_watershed for BEM segmentation with the following parameters:\n" + f"\nResults dir = {ws_dir}\nCommand = {' '.join(cmd)}\n" + ) + os.makedirs(op.join(ws_dir)) + run_subprocess_env(cmd) + del tempdir # clean up directory + if op.isfile(T1_mgz): + new_info = _extract_volume_info(T1_mgz) + if not new_info: + warn( + "nibabel is not available or the volume info is invalid. Volume info " + "not updated in the written surface." + ) + surfs = ["brain", "inner_skull", "outer_skull", "outer_skin"] + for s in surfs: + surf_ws_out = op.join(ws_dir, f"{subject}_{s}_surface") + + rr, tris, volume_info = read_surface(surf_ws_out, read_metadata=True) + # replace volume info, 'head' stays + volume_info.update(new_info) + write_surface( + surf_ws_out, rr, tris, volume_info=volume_info, overwrite=True + ) + + # Create symbolic links + surf_out = op.join(bem_dir, f"{s}.surf") + if not overwrite and op.exists(surf_out): + skip_symlink = True + else: + if op.exists(surf_out): + os.remove(surf_out) + _symlink(surf_ws_out, surf_out, copy) + skip_symlink = False + + if skip_symlink: + logger.info( + "Unable to create all symbolic links to .surf files in bem folder. Use " + "--overwrite option to recreate them." + ) + dest = op.join(bem_dir, "watershed") + else: + logger.info("Symbolic links to .surf files created in bem folder") + dest = bem_dir + + logger.info( + "\nThank you for waiting.\nThe BEM triangulations for this subject are now " + f"available at:\n{dest}." + ) + + # Write a head file for coregistration + fname_head = op.join(bem_dir, subject + "-head.fif") + if op.isfile(fname_head): + os.remove(fname_head) + + surf = _surfaces_to_bem( + [op.join(ws_dir, subject + "_outer_skin_surface")], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + sigmas=[1], + ) + write_bem_surfaces(fname_head, surf) + + # Show computed BEM surfaces + if show: + plot_bem( + subject=subject, + subjects_dir=subjects_dir, + orientation="coronal", + slices=None, + show=True, + ) + + logger.info(f"Created {fname_head}\n\nComplete.") + + +def _extract_volume_info(mgz): + """Extract volume info from a mgz file.""" + nib = _import_nibabel() + header = nib.load(mgz).header + version = header["version"] + vol_info = dict() + if version == 1: + version = f"{version} # volume info valid" + vol_info["valid"] = version + vol_info["filename"] = mgz + vol_info["volume"] = header["dims"][:3] + vol_info["voxelsize"] = header["delta"] + vol_info["xras"], vol_info["yras"], vol_info["zras"] = header["Mdc"] + vol_info["cras"] = header["Pxyz_c"] + + return vol_info + + +# ############################################################################ +# Read + + +@verbose +def read_bem_surfaces( + fname, patch_stats=False, s_id=None, on_defects="raise", verbose=None +): + """Read the BEM surfaces from a FIF file. + + Parameters + ---------- + fname : path-like + The name of the file containing the surfaces. + patch_stats : bool, optional (default False) + Calculate and add cortical patch statistics to the surfaces. + s_id : int | None + If int, only read and return the surface with the given ``s_id``. + An error will be raised if it doesn't exist. If None, all + surfaces are read and returned. + %(on_defects)s + + .. versionadded:: 0.23 + %(verbose)s + + Returns + ------- + surf: list | dict + A list of dictionaries that each contain a surface. If ``s_id`` + is not None, only the requested surface will be returned. + + See Also + -------- + write_bem_surfaces, write_bem_solution, make_bem_model + """ + # Open the file, create directory + _validate_type(s_id, ("int-like", None), "s_id") + fname = _check_fname(fname, "read", True, "fname") + if fname.suffix == ".h5": + surf = _read_bem_surfaces_h5(fname, s_id) + else: + surf = _read_bem_surfaces_fif(fname, s_id) + if s_id is not None and len(surf) != 1: + raise ValueError(f"surface with id {s_id} not found") + for this in surf: + if patch_stats or this["nn"] is None: + _check_complete_surface(this, incomplete=on_defects) + return surf[0] if s_id is not None else surf + + +def _read_bem_surfaces_h5(fname, s_id): + read_hdf5, _ = _import_h5io_funcs() + bem = read_hdf5(fname) + try: + [s["id"] for s in bem["surfs"]] + except Exception: # not our format + raise ValueError("BEM data not found") + surf = bem["surfs"] + if s_id is not None: + surf = [s for s in surf if s["id"] == s_id] + return surf + + +def _read_bem_surfaces_fif(fname, s_id): + # Default coordinate frame + coord_frame = FIFF.FIFFV_COORD_MRI + f, tree, _ = fiff_open(fname) + with f as fid: + # Find BEM + bem = dir_tree_find(tree, FIFF.FIFFB_BEM) + if bem is None or len(bem) == 0: + raise ValueError("BEM data not found") + + bem = bem[0] + # Locate all surfaces + bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF) + if bemsurf is None: + raise ValueError("BEM surface data not found") + + logger.info(f" {len(bemsurf)} BEM surfaces found") + # Coordinate frame possibly at the top level + tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME) + if tag is not None: + coord_frame = tag.data + # Read all surfaces + if s_id is not None: + surf = [ + _read_bem_surface(fid, bsurf, coord_frame, s_id) for bsurf in bemsurf + ] + surf = [s for s in surf if s is not None] + else: + surf = list() + for bsurf in bemsurf: + logger.info(" Reading a surface...") + this = _read_bem_surface(fid, bsurf, coord_frame) + surf.append(this) + logger.info("[done]") + logger.info(f" {len(surf)} BEM surfaces read") + return surf + + +def _read_bem_surface(fid, this, def_coord_frame, s_id=None): + """Read one bem surface.""" + # fid should be open as a context manager here + res = dict() + # Read all the interesting stuff + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID) + + if tag is None: + res["id"] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN + else: + res["id"] = int(tag.data.item()) + + if s_id is not None and res["id"] != s_id: + return None + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA) + res["sigma"] = 1.0 if tag is None else float(tag.data.item()) + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE) + if tag is None: + raise ValueError("Number of vertices not found") + + res["np"] = int(tag.data.item()) + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI) + if tag is None: + raise ValueError("Number of triangles not found") + res["ntri"] = int(tag.data.item()) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME) + if tag is None: + res["coord_frame"] = def_coord_frame + else: + res["coord_frame"] = int(tag.data.item()) + else: + res["coord_frame"] = int(tag.data.item()) + + # Vertices, normals, and triangles + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES) + if tag is None: + raise ValueError("Vertex data not found") + + res["rr"] = tag.data.astype(np.float64) + if res["rr"].shape[0] != res["np"]: + raise ValueError("Vertex information is incorrect") + + tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS) + if tag is None: + res["nn"] = None + else: + res["nn"] = tag.data.astype(np.float64) + if res["nn"].shape[0] != res["np"]: + raise ValueError("Vertex normal information is incorrect") + + tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES) + if tag is None: + raise ValueError("Triangulation not found") + + res["tris"] = tag.data - 1 # index start at 0 in Python + if res["tris"].shape[0] != res["ntri"]: + raise ValueError("Triangulation information is incorrect") + + return res + + +@verbose +def read_bem_solution(fname, *, verbose=None): + """Read the BEM solution from a file. + + Parameters + ---------- + fname : path-like + The file containing the BEM solution. + %(verbose)s + + Returns + ------- + bem : instance of ConductorModel + The BEM solution. + + See Also + -------- + read_bem_surfaces + write_bem_surfaces + make_bem_solution + write_bem_solution + """ + fname = _check_fname(fname, "read", True, "fname") + # mirrors fwd_bem_load_surfaces from fwd_bem_model.c + if fname.suffix == ".h5": + read_hdf5, _ = _import_h5io_funcs() + logger.info("Loading surfaces and solution...") + bem = read_hdf5(fname) + if "solver" not in bem: + bem["solver"] = "mne" + else: + bem = _read_bem_solution_fif(fname) + + if len(bem["surfs"]) == 3: + logger.info("Three-layer model surfaces loaded.") + needed = np.array( + [ + FIFF.FIFFV_BEM_SURF_ID_HEAD, + FIFF.FIFFV_BEM_SURF_ID_SKULL, + FIFF.FIFFV_BEM_SURF_ID_BRAIN, + ] + ) + if not all(x["id"] in needed for x in bem["surfs"]): + raise RuntimeError("Could not find necessary BEM surfaces") + # reorder surfaces as necessary (shouldn't need to?) + reorder = [None] * 3 + for x in bem["surfs"]: + reorder[np.where(x["id"] == needed)[0][0]] = x + bem["surfs"] = reorder + elif len(bem["surfs"]) == 1: + if not bem["surfs"][0]["id"] == FIFF.FIFFV_BEM_SURF_ID_BRAIN: + raise RuntimeError("BEM Surfaces not found") + logger.info("Homogeneous model surface loaded.") + + assert set(bem.keys()) == set(("surfs", "solution", "bem_method", "solver")) + bem = ConductorModel(bem) + bem["is_sphere"] = False + # sanity checks and conversions + _check_option( + "BEM approximation method", bem["bem_method"], (FIFF.FIFFV_BEM_APPROX_LINEAR,) + ) # CONSTANT not supported + dim = 0 + solver = bem.get("solver", "mne") + _check_option("BEM solver", solver, ("mne", "openmeeg")) + for si, surf in enumerate(bem["surfs"]): + assert bem["bem_method"] == FIFF.FIFFV_BEM_APPROX_LINEAR + dim += surf["np"] + if solver == "openmeeg" and si != 0: + dim += surf["ntri"] + dims = bem["solution"].shape + if solver == "openmeeg": + sz = (dim * (dim + 1)) // 2 + if len(dims) != 1 or dims[0] != sz: + raise RuntimeError( + "For the given BEM surfaces, OpenMEEG should produce a " + f"solution matrix of shape ({sz},) but got {dims}" + ) + bem["nsol"] = dim + else: + if len(dims) != 2 and solver != "openmeeg": + raise RuntimeError( + "Expected a two-dimensional solution matrix " + f"instead of a {dims[0]} dimensional one" + ) + if dims[0] != dim or dims[1] != dim: + raise RuntimeError( + f"Expected a {dim} x {dim} solution matrix instead of " + f"a {dims[1]} x {dims[0]} one" + ) + bem["nsol"] = bem["solution"].shape[0] + # Gamma factors and multipliers + _add_gamma_multipliers(bem) + extra = f"made by {solver}" if solver != "mne" else "" + logger.info(f"Loaded linear collocation BEM solution{extra} from {fname}") + return bem + + +def _read_bem_solution_fif(fname): + logger.info("Loading surfaces...") + surfs = read_bem_surfaces(fname, patch_stats=True, verbose=_verbose_safe_false()) + + # convert from surfaces to solution + logger.info("\nLoading the solution matrix...\n") + solver = "mne" + f, tree, _ = fiff_open(fname) + with f as fid: + # Find the BEM data + nodes = dir_tree_find(tree, FIFF.FIFFB_BEM) + if len(nodes) == 0: + raise RuntimeError(f"No BEM data in {fname}") + bem_node = nodes[0] + + # Approximation method + tag = find_tag(f, bem_node, FIFF.FIFF_DESCRIPTION) + if tag is not None: + tag = json.loads(tag.data) + solver = tag["solver"] + tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX) + if tag is None: + raise RuntimeError(f"No BEM solution found in {fname}") + method = tag.data[0] + tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION) + sol = tag.data + + return dict(solution=sol, bem_method=method, surfs=surfs, solver=solver) + + +def _add_gamma_multipliers(bem): + """Add gamma and multipliers in-place.""" + bem["sigma"] = np.array([surf["sigma"] for surf in bem["surfs"]]) + # Dirty trick for the zero conductivity outside + sigma = np.r_[0.0, bem["sigma"]] + bem["source_mult"] = 2.0 / (sigma[1:] + sigma[:-1]) + bem["field_mult"] = sigma[1:] - sigma[:-1] + # make sure subsequent "zip"s work correctly + assert len(bem["surfs"]) == len(bem["field_mult"]) + bem["gamma"] = (sigma[1:] - sigma[:-1])[np.newaxis, :] / (sigma[1:] + sigma[:-1])[ + :, np.newaxis + ] + + +# In our BEM code we do not model the CSF so we assign the innermost surface +# the id BRAIN. Our 4-layer sphere we model CSF (at least by default), so when +# searching for and referring to surfaces we need to keep track of this. +_sm_surf_dict = OrderedDict( + [ + ("brain", FIFF.FIFFV_BEM_SURF_ID_BRAIN), + ("inner_skull", FIFF.FIFFV_BEM_SURF_ID_CSF), + ("outer_skull", FIFF.FIFFV_BEM_SURF_ID_SKULL), + ("head", FIFF.FIFFV_BEM_SURF_ID_HEAD), + ] +) +_bem_surf_dict = { + "inner_skull": FIFF.FIFFV_BEM_SURF_ID_BRAIN, + "outer_skull": FIFF.FIFFV_BEM_SURF_ID_SKULL, + "head": FIFF.FIFFV_BEM_SURF_ID_HEAD, +} +_bem_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: "inner skull", + FIFF.FIFFV_BEM_SURF_ID_SKULL: "outer skull", + FIFF.FIFFV_BEM_SURF_ID_HEAD: "outer skin ", + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: "unknown ", + FIFF.FIFFV_MNE_SURF_MEG_HELMET: "MEG helmet ", +} +_sm_surf_name = { + FIFF.FIFFV_BEM_SURF_ID_BRAIN: "brain", + FIFF.FIFFV_BEM_SURF_ID_CSF: "csf", + FIFF.FIFFV_BEM_SURF_ID_SKULL: "outer skull", + FIFF.FIFFV_BEM_SURF_ID_HEAD: "outer skin ", + FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: "unknown ", + FIFF.FIFFV_MNE_SURF_MEG_HELMET: "helmet", +} + + +def _bem_find_surface(bem, id_): + """Find surface from already-loaded conductor model.""" + if bem["is_sphere"]: + _surf_dict = _sm_surf_dict + _name_dict = _sm_surf_name + kind = "Sphere model" + tri = "boundary" + else: + _surf_dict = _bem_surf_dict + _name_dict = _bem_surf_name + kind = "BEM" + tri = "triangulation" + if isinstance(id_, str): + name = id_ + id_ = _surf_dict[id_] + else: + name = _name_dict[id_] + kind = "Sphere model" if bem["is_sphere"] else "BEM" + idx = np.where(np.array([s["id"] for s in bem["surfs"]]) == id_)[0] + if len(idx) != 1: + raise RuntimeError(f"{kind} does not have the {name} {tri}") + return bem["surfs"][idx[0]] + + +# ############################################################################ +# Write + + +@verbose +def write_bem_surfaces(fname, surfs, overwrite=False, *, verbose=None): + """Write BEM surfaces to a FIF file. + + Parameters + ---------- + fname : path-like + Filename to write. Can end with ``.h5`` to write using HDF5. + surfs : dict | list of dict + The surfaces, or a single surface. + %(overwrite)s + %(verbose)s + """ + if isinstance(surfs, dict): + surfs = [surfs] + fname = _check_fname(fname, overwrite=overwrite, name="fname") + + if fname.suffix == ".h5": + _, write_hdf5 = _import_h5io_funcs() + write_hdf5(fname, dict(surfs=surfs), overwrite=True) + else: + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_BEM) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]["coord_frame"]) + _write_bem_surfaces_block(fid, surfs) + end_block(fid, FIFF.FIFFB_BEM) + + +@verbose +def write_head_bem( + fname, rr, tris, on_defects="raise", overwrite=False, *, verbose=None +): + """Write a head surface to a FIF file. + + Parameters + ---------- + fname : path-like + Filename to write. + rr : array, shape (n_vertices, 3) + Coordinate points in the MRI coordinate system. + tris : ndarray of int, shape (n_tris, 3) + Triangulation (each line contains indices for three points which + together form a face). + %(on_defects)s + %(overwrite)s + %(verbose)s + """ + surf = _surfaces_to_bem( + [dict(rr=rr, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + [1], + rescale=False, + incomplete=on_defects, + ) + write_bem_surfaces(fname, surf, overwrite=overwrite) + + +def _write_bem_surfaces_block(fid, surfs): + """Write bem surfaces to open file handle.""" + for surf in surfs: + start_block(fid, FIFF.FIFFB_BEM_SURF) + if "sigma" in surf: + write_float(fid, FIFF.FIFF_BEM_SIGMA, surf["sigma"]) + write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf["id"]) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf["coord_frame"]) + write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf["np"]) + write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf["ntri"]) + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf["rr"]) + # index start at 0 in Python + write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES, surf["tris"] + 1) + if "nn" in surf and surf["nn"] is not None and len(surf["nn"]) > 0: + write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf["nn"]) + end_block(fid, FIFF.FIFFB_BEM_SURF) + + +@verbose +def write_bem_solution(fname, bem, overwrite=False, *, verbose=None): + """Write a BEM model with solution. + + Parameters + ---------- + fname : path-like + The filename to use. Can end with ``.h5`` to write using HDF5. + bem : instance of ConductorModel + The BEM model with solution to save. + %(overwrite)s + %(verbose)s + + See Also + -------- + read_bem_solution + """ + fname = _check_fname(fname, overwrite=overwrite, name="fname") + if fname.suffix == ".h5": + _, write_hdf5 = _import_h5io_funcs() + bem = {k: bem[k] for k in ("surfs", "solution", "bem_method")} + write_hdf5(fname, bem, overwrite=True) + else: + _write_bem_solution_fif(fname, bem) + + +def _write_bem_solution_fif(fname, bem): + _check_bem_size(bem["surfs"]) + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_BEM) + # Coordinate frame (mainly for backward compatibility) + write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, bem["surfs"][0]["coord_frame"]) + solver = bem.get("solver", "mne") + if solver != "mne": + write_string(fid, FIFF.FIFF_DESCRIPTION, json.dumps(dict(solver=solver))) + # Surfaces + _write_bem_surfaces_block(fid, bem["surfs"]) + # The potential solution + if "solution" in bem: + _check_option( + "bem_method", bem["bem_method"], (FIFF.FIFFV_BEM_APPROX_LINEAR,) + ) + write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR) + write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION, bem["solution"]) + end_block(fid, FIFF.FIFFB_BEM) + + +# ############################################################################# +# Create 3-Layers BEM model from Flash MRI images + + +def _prepare_env(subject, subjects_dir): + """Prepare an env object for subprocess calls.""" + env = os.environ.copy() + + fs_home = _check_freesurfer_home() + + _validate_type(subject, "str") + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject_dir = subjects_dir / subject + if not subject_dir.is_dir(): + raise RuntimeError(f'Could not find the subject data directory "{subject_dir}"') + env.update(SUBJECT=subject, SUBJECTS_DIR=str(subjects_dir), FREESURFER_HOME=fs_home) + mri_dir = subject_dir / "mri" + bem_dir = subject_dir / "bem" + return env, mri_dir, bem_dir + + +def _write_echos(mri_dir, flash_echos, angle): + nib = _import_nibabel("write echoes") + from nibabel.spatialimages import SpatialImage + + if _path_like(flash_echos): + flash_echos = nib.load(flash_echos) + if isinstance(flash_echos, SpatialImage): + flash_echo_imgs = [] + data = np.asanyarray(flash_echos.dataobj) + affine = flash_echos.affine + if data.ndim == 3: + data = data[..., np.newaxis] + for echo_idx in range(data.shape[3]): + this_echo_img = flash_echos.__class__( + data[..., echo_idx], affine=affine, header=deepcopy(flash_echos.header) + ) + flash_echo_imgs.append(this_echo_img) + flash_echos = flash_echo_imgs + del flash_echo_imgs + for idx, flash_echo in enumerate(flash_echos, 1): + if _path_like(flash_echo): + flash_echo = nib.load(flash_echo) + nib.save(flash_echo, op.join(mri_dir, "flash", f"mef{angle}_{idx:03d}.mgz")) + + +@verbose +def convert_flash_mris( + subject, flash30=True, unwarp=False, subjects_dir=None, flash5=True, verbose=None +): + """Synthesize the flash 5 files for use with make_flash_bem. + + This function aims to produce a synthesized flash 5 MRI from + multiecho flash (MEF) MRI data. This function can use MEF data + with 5 or 30 flip angles. If flash5 (and flash30) images are not + explicitly provided, it will assume that the different echos are available + in the mri/flash folder of the subject with the following naming + convention "mef_.mgz", e.g. "mef05_001.mgz" + or "mef30_001.mgz". + + Parameters + ---------- + %(subject)s + flash30 : bool | list of SpatialImage or path-like | SpatialImage | path-like + If False do not use 30-degree flip angle data. + The list of flash 5 echos to use. If True it will look for files + named mef30_*.mgz in the subject's mri/flash directory and if not False + the list of flash 5 echos images will be written to the mri/flash + folder with convention mef05_.mgz. If a SpatialImage object + each frame of the image will be interpreted as an echo. + unwarp : bool + Run grad_unwarp with -unwarp option on each of the converted + data sets. It requires FreeSurfer's MATLAB toolbox to be properly + installed. + %(subjects_dir)s + flash5 : list of SpatialImage or path-like | SpatialImage | path-like | True + The list of flash 5 echos to use. If True it will look for files + named mef05_*.mgz in the subject's mri/flash directory and if not None + the list of flash 5 echos images will be written to the mri/flash + folder with convention mef05_.mgz. If a SpatialImage object + each frame of the image will be interpreted as an echo. + %(verbose)s + + Returns + ------- + flash5_img : path-like + The path the synthesized flash 5 MRI. + + Notes + ----- + This function assumes that the Freesurfer segmentation of the subject + has been completed. In particular, the T1.mgz and brain.mgz MRI volumes + should be, as usual, in the subject's mri directory. + """ # noqa: E501 + env, mri_dir = _prepare_env(subject, subjects_dir)[:2] + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) + + mri_dir = Path(mri_dir) + # Step 1a : Data conversion to mgz format + flash_dir = mri_dir / "flash" + pm_dir = flash_dir / "parameter_maps" + pm_dir.mkdir(parents=True, exist_ok=True) + echos_done = 0 + + if not isinstance(flash5, bool): + _write_echos(mri_dir, flash5, angle="05") + if not isinstance(flash30, bool): + _write_echos(mri_dir, flash30, angle="30") + + # Step 1b : Run grad_unwarp on converted files + template = op.join(flash_dir, "mef*_*.mgz") + files = sorted(glob.glob(template)) + if len(files) == 0: + raise ValueError(f"No suitable source files found ({template})") + if unwarp: + logger.info("\n---- Unwarp mgz data sets ----") + for infile in files: + outfile = infile.replace(".mgz", "u.mgz") + cmd = ["grad_unwarp", "-i", infile, "-o", outfile, "-unwarp", "true"] + run_subprocess_env(cmd) + # Clear parameter maps if some of the data were reconverted + if echos_done > 0 and pm_dir.exists(): + shutil.rmtree(pm_dir) + logger.info("\nParameter maps directory cleared") + if not pm_dir.exists(): + pm_dir.mkdir(parents=True, exist_ok=True) + # Step 2 : Create the parameter maps + if flash30: + logger.info("\n---- Creating the parameter maps ----") + if unwarp: + files = sorted(glob.glob(op.join(flash_dir, "mef05_*u.mgz"))) + if len(os.listdir(pm_dir)) == 0: + cmd = ["mri_ms_fitparms"] + files + [str(pm_dir)] + run_subprocess_env(cmd) + else: + logger.info("Parameter maps were already computed") + # Step 3 : Synthesize the flash 5 images + logger.info("\n---- Synthesizing flash 5 images ----") + if not (pm_dir / "flash5.mgz").exists(): + cmd = [ + "mri_synthesize", + "20", + "5", + "5", + (pm_dir / "T1.mgz"), + (pm_dir / "PD.mgz"), + (pm_dir / "flash5.mgz"), + ] + run_subprocess_env(cmd) + (pm_dir / "flash5_reg.mgz").unlink(missing_ok=True) + else: + logger.info("Synthesized flash 5 volume is already there") + else: + logger.info("\n---- Averaging flash5 echoes ----") + template = "mef05_*u.mgz" if unwarp else "mef05_*.mgz" + files = sorted(flash_dir.glob(template)) + if len(files) == 0: + raise ValueError(f"No suitable source files found ({template})") + cmd = ["mri_average", "-noconform"] + files + [pm_dir / "flash5.mgz"] + run_subprocess_env(cmd) + (pm_dir / "flash5_reg.mgz").unlink(missing_ok=True) + del tempdir # finally done running subprocesses + assert (pm_dir / "flash5.mgz").exists() + return pm_dir / "flash5.mgz" + + +@verbose +def make_flash_bem( + subject, + overwrite=False, + show=True, + subjects_dir=None, + copy=True, + *, + flash5_img=None, + register=True, + verbose=None, +): + """Create 3-Layer BEM model from prepared flash MRI images. + + See :ref:`bem_flash_algorithm` for additional information. + + Parameters + ---------- + %(subject)s + overwrite : bool + Write over existing .surf files in bem folder. + show : bool + Show surfaces to visually inspect all three BEM surfaces (recommended). + %(subjects_dir)s + copy : bool + If True (default), use copies instead of symlinks for surfaces + (if they do not already exist). + + .. versionadded:: 0.18 + .. versionchanged:: 1.1 Use copies instead of symlinks. + flash5_img : None | path-like | Nifti1Image + The path to the synthesized flash 5 MRI image or the image itself. If + None (default), the path defaults to + ``mri/flash/parameter_maps/flash5.mgz`` within the subject + reconstruction. If not present the image is copied or written to the + default location. + + .. versionadded:: 1.1.0 + register : bool + Register the flash 5 image with T1.mgz file. If False, we assume + that the images are already coregistered. + + .. versionadded:: 1.1.0 + %(verbose)s + + See Also + -------- + convert_flash_mris + + Notes + ----- + This program assumes that FreeSurfer is installed and sourced properly. + + This function extracts the BEM surfaces (outer skull, inner skull, and + outer skin) from a FLASH 5 MRI image synthesized from multiecho FLASH + images acquired with spin angles of 5 and 30 degrees. + """ + env, mri_dir, bem_dir = _prepare_env(subject, subjects_dir) + tempdir = _TempDir() # fsl and Freesurfer create some random junk in CWD + run_subprocess_env = partial(run_subprocess, env=env, cwd=tempdir) + + mri_dir = Path(mri_dir) + bem_dir = Path(bem_dir) + subjects_dir = env["SUBJECTS_DIR"] + flash_path = (mri_dir / "flash" / "parameter_maps").resolve() + flash_path.mkdir(exist_ok=True, parents=True) + + logger.info( + "\nProcessing the flash MRI data to produce BEM meshes with the following " + f"parameters:\nSUBJECTS_DIR = {subjects_dir}\nSUBJECT = {subject}\nResult dir =" + f"{bem_dir / 'flash'}\n" + ) + # Step 4 : Register with MPRAGE + flash5 = flash_path / "flash5.mgz" + + if _path_like(flash5_img): + logger.info(f"Copying flash 5 image {flash5_img} to {flash5}") + cmd = ["mri_convert", Path(flash5_img).resolve(), flash5] + run_subprocess_env(cmd) + elif flash5_img is None: + if not flash5.exists(): + raise ValueError(f"Flash 5 image cannot be found at {flash5}.") + else: + logger.info(f"Writing flash 5 image at {flash5}") + nib = _import_nibabel("write an MRI image") + nib.save(flash5_img, flash5) + + if register: + logger.info("\n---- Registering flash 5 with T1 MPRAGE ----") + flash5_reg = flash_path / "flash5_reg.mgz" + if not flash5_reg.exists(): + if (mri_dir / "T1.mgz").exists(): + ref_volume = mri_dir / "T1.mgz" + else: + ref_volume = mri_dir / "T1" + cmd = [ + "fsl_rigid_register", + "-r", + str(ref_volume), + "-i", + str(flash5), + "-o", + str(flash5_reg), + ] + run_subprocess_env(cmd) + else: + logger.info("Registered flash 5 image is already there") + else: + flash5_reg = flash5 + + # Step 5a : Convert flash5 into COR + logger.info("\n---- Converting flash5 volume into COR format ----") + flash5_dir = mri_dir / "flash5" + shutil.rmtree(flash5_dir, ignore_errors=True) + flash5_dir.mkdir(exist_ok=True, parents=True) + cmd = ["mri_convert", flash5_reg, flash5_dir] + run_subprocess_env(cmd) + # Step 5b and c : Convert the mgz volumes into COR + convert_T1 = False + T1_dir = mri_dir / "T1" + if not T1_dir.is_dir() or next(T1_dir.glob("COR*")) is None: + convert_T1 = True + convert_brain = False + brain_dir = mri_dir / "brain" + if not brain_dir.is_dir() or next(brain_dir.glob("COR*")) is None: + convert_brain = True + logger.info("\n---- Converting T1 volume into COR format ----") + if convert_T1: + T1_fname = mri_dir / "T1.mgz" + if not T1_fname.is_file(): + raise RuntimeError("Both T1 mgz and T1 COR volumes missing.") + T1_dir.mkdir(exist_ok=True, parents=True) + cmd = ["mri_convert", T1_fname, T1_dir] + run_subprocess_env(cmd) + else: + logger.info("T1 volume is already in COR format") + logger.info("\n---- Converting brain volume into COR format ----") + if convert_brain: + brain_fname = mri_dir / "brain.mgz" + if not brain_fname.is_file(): + raise RuntimeError("Both brain mgz and brain COR volumes missing.") + brain_dir.mkdir(exist_ok=True, parents=True) + cmd = ["mri_convert", brain_fname, brain_dir] + run_subprocess_env(cmd) + else: + logger.info("Brain volume is already in COR format") + # Finally ready to go + logger.info("\n---- Creating the BEM surfaces ----") + cmd = ["mri_make_bem_surfaces", subject] + run_subprocess_env(cmd) + del tempdir # ran our last subprocess; clean up directory + + logger.info("\n---- Converting the tri files into surf files ----") + flash_bem_dir = bem_dir / "flash" + flash_bem_dir.mkdir(exist_ok=True, parents=True) + surfs = ["inner_skull", "outer_skull", "outer_skin"] + for surf in surfs: + out_fname = flash_bem_dir / (surf + ".tri") + shutil.move(bem_dir / (surf + ".tri"), out_fname) + nodes, tris = read_tri(out_fname, swap=True) + # Do not write volume info here because the tris are already in + # standard Freesurfer coords + write_surface(op.splitext(out_fname)[0] + ".surf", nodes, tris, overwrite=True) + + # Cleanup section + logger.info("\n---- Cleaning up ----") + (bem_dir / "inner_skull_tmp.tri").unlink() + if convert_T1: + shutil.rmtree(T1_dir) + logger.info("Deleted the T1 COR volume") + if convert_brain: + shutil.rmtree(brain_dir) + logger.info("Deleted the brain COR volume") + shutil.rmtree(flash5_dir) + logger.info("Deleted the flash5 COR volume") + # Create symbolic links to the .surf files in the bem folder + logger.info("\n---- Creating symbolic links ----") + # os.chdir(bem_dir) + for surf in surfs: + surf = bem_dir / (surf + ".surf") + if not overwrite and surf.exists(): + skip_symlink = True + else: + if surf.exists(): + surf.unlink() + _symlink(flash_bem_dir / surf.name, surf, copy) + skip_symlink = False + if skip_symlink: + logger.info( + "Unable to create all symbolic links to .surf files " + "in bem folder. Use --overwrite option to recreate them." + ) + dest = bem_dir / "flash" + else: + logger.info("Symbolic links to .surf files created in bem folder") + dest = bem_dir + logger.info( + "\nThank you for waiting.\nThe BEM triangulations for this " + f"subject are now available at:\n{dest}.\nWe hope the BEM meshes " + "created will facilitate your MEG and EEG data analyses." + ) + # Show computed BEM surfaces + if show: + plot_bem( + subject=subject, + subjects_dir=subjects_dir, + orientation="coronal", + slices=None, + show=True, + ) + + +def _check_bem_size(surfs): + """Check bem surface sizes.""" + if len(surfs) > 1 and surfs[0]["np"] > 10000: + warn( + f"The bem surfaces have {surfs[0]['np']} data points. 5120 (ico grade=4) " + "should be enough. Dense 3-layer bems may not save properly." + ) + + +def _symlink(src, dest, copy=False): + """Create a relative symlink (or just copy).""" + if not copy: + src_link = op.relpath(src, op.dirname(dest)) + try: + os.symlink(src_link, dest) + except OSError: + warn( + f"Could not create symbolic link {dest}. Check that your " + "partition handles symbolic links. The file will be copied " + "instead." + ) + copy = True + if copy: + shutil.copy(src, dest) + + +def _ensure_bem_surfaces(bem, extra_allow=(), name="bem"): + # by default only allow path-like and list, but handle None and + # ConductorModel properly if need be. Always return a ConductorModel + # even though it's incomplete (and might have is_sphere=True). + assert all(extra in (None, ConductorModel) for extra in extra_allow) + allowed = ("path-like", list) + extra_allow + _validate_type(bem, allowed, name) + if isinstance(bem, path_like): + # Load the surfaces + logger.info(f"Loading BEM surfaces from {bem}...") + bem = read_bem_surfaces(bem) + bem = ConductorModel(is_sphere=False, surfs=bem) + elif isinstance(bem, list): + for ii, this_surf in enumerate(bem): + _validate_type(this_surf, dict, f"{name}[{ii}]") + if isinstance(bem, list): + bem = ConductorModel(is_sphere=False, surfs=bem) + # add surfaces in the spherical case + if isinstance(bem, ConductorModel) and bem["is_sphere"]: + bem = bem.copy() + bem["surfs"] = [] + if len(bem["layers"]) == 4: + for idx, id_ in enumerate(_sm_surf_dict.values()): + bem["surfs"].append(_complete_sphere_surf(bem, idx, 4, complete=False)) + bem["surfs"][-1]["id"] = id_ + + return bem + + +def _check_file(fname, overwrite): + """Prevent overwrites.""" + if op.isfile(fname) and not overwrite: + raise OSError(f"File {fname} exists, use --overwrite to overwrite it") + + +_tri_levels = dict( + medium=30000, + sparse=2500, +) + + +@verbose +def make_scalp_surfaces( + subject, + subjects_dir=None, + force=True, + overwrite=False, + no_decimate=False, + *, + threshold=20, + mri="T1.mgz", + verbose=None, +): + """Create surfaces of the scalp and neck. + + The scalp surfaces are required for using the MNE coregistration GUI, and + allow for a visualization of the alignment between anatomy and channel + locations. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + force : bool + Force creation of the surface even if it has some topological defects. + Defaults to ``True``. See :ref:`tut-fix-meshes` for ideas on how to + fix problematic meshes. + %(overwrite)s + no_decimate : bool + Disable the "medium" and "sparse" decimations. In this case, only + a "dense" surface will be generated. Defaults to ``False``, i.e., + create surfaces for all three types of decimations. + threshold : int + The threshold to use with the MRI in the call to ``mkheadsurf``. + The default is ``20``. + + .. versionadded:: 1.1 + mri : str + The MRI to use. Should exist in ``$SUBJECTS_DIR/$SUBJECT/mri``. + + .. versionadded:: 1.1 + %(verbose)s + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + incomplete = "warn" if force else "raise" + subj_path = subjects_dir / subject + if not subj_path.exists(): + raise RuntimeError( + f"{subj_path} does not exist. Please check your subject directory path." + ) + + # Backward compat for old FreeSurfer (?) + _validate_type(mri, str, "mri") + if mri == "T1.mgz": + mri = mri if (subj_path / "mri" / mri).exists() else "T1" + + logger.info("1. Creating a dense scalp tessellation with mkheadsurf...") + + def check_seghead(surf_path=subj_path / "surf"): + surf = None + for k in ["lh.seghead", "lh.smseghead"]: + this_surf = surf_path / k + if this_surf.exists(): + surf = this_surf + break + return surf + + my_seghead = check_seghead() + threshold = _ensure_int(threshold, "threshold") + if my_seghead is None: + this_env = deepcopy(os.environ) + this_env["SUBJECTS_DIR"] = str(subjects_dir) + this_env["SUBJECT"] = subject + this_env["subjdir"] = str(subj_path) + if "FREESURFER_HOME" not in this_env: + raise RuntimeError( + "The FreeSurfer environment needs to be set up to use " + "make_scalp_surfaces to create the outer skin surface " + "lh.seghead" + ) + run_subprocess( + [ + "mkheadsurf", + "-subjid", + subject, + "-srcvol", + mri, + "-thresh1", + str(threshold), + "-thresh2", + str(threshold), + ], + env=this_env, + ) + + surf = check_seghead() + if surf is None: + raise RuntimeError("mkheadsurf did not produce the standard output file.") + + bem_dir = subjects_dir / subject / "bem" + if not bem_dir.is_dir(): + os.mkdir(bem_dir) + fname_template = bem_dir / (f"{subject}-head-{{}}.fif") + dense_fname = str(fname_template).format("dense") + logger.info(f"2. Creating {dense_fname} ...") + _check_file(dense_fname, overwrite) + # Helpful message if we get a topology error + msg = ( + "\n\nConsider using pymeshfix directly to fix the mesh, or --force " + "to ignore the problem." + ) + surf = _surfaces_to_bem( + [surf], [FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], incomplete=incomplete, extra=msg + )[0] + write_bem_surfaces(dense_fname, surf, overwrite=overwrite) + if os.getenv("_MNE_TESTING_SCALP", "false") == "true": + tris = [len(surf["tris"])] # don't actually decimate + for ii, (level, n_tri) in enumerate(_tri_levels.items(), 3): + if no_decimate: + break + logger.info(f"{ii}. Creating {level} tessellation...") + logger.info( + f"{ii}.1 Decimating the dense tessellation " + f'({len(surf["tris"])} -> {n_tri} triangles)...' + ) + points, tris = decimate_surface( + points=surf["rr"], triangles=surf["tris"], n_triangles=n_tri + ) + dec_fname = str(fname_template).format(level) + logger.info(f"{ii}.2 Creating {dec_fname}") + _check_file(dec_fname, overwrite) + dec_surf = _surfaces_to_bem( + [dict(rr=points, tris=tris)], + [FIFF.FIFFV_BEM_SURF_ID_HEAD], + [1], + rescale=False, + incomplete=incomplete, + extra=msg, + ) + write_bem_surfaces(dec_fname, dec_surf, overwrite=overwrite) + logger.info("[done]") + + +@verbose +def distance_to_bem(pos, bem, trans=None, verbose=None): + """Calculate the distance of positions to inner skull surface. + + Parameters + ---------- + pos : array, shape (..., 3) + Position(s) in m, in head coordinates. + bem : instance of ConductorModel + Conductor model. + %(trans)s If None (default), assumes bem is in head coordinates. + + .. versionchanged:: 0.19 + Support for 'fsaverage' argument. + %(verbose)s + + Returns + ------- + distances : float | array, shape (...) + The computed distance(s). A float is returned if pos is + an array of shape (3,) corresponding to a single position. + + Notes + ----- + .. versionadded:: 1.1 + """ + ndim = pos.ndim + if ndim == 1: + pos = pos[np.newaxis, :] + + n = pos.shape[0] + distance = np.zeros((n,)) + + logger.info( + "Computing distance to inner skull surface for " + f"{n} position{_pl(n)}..." + ) + + if bem["is_sphere"]: + center = bem["r0"] + + if trans: + center = apply_trans(trans, center, move=True) + radius = bem["layers"][0]["rad"] + + distance = np.abs(radius - np.linalg.norm(pos - center, axis=1)) + + else: # is BEM + surface_points = bem["surfs"][0]["rr"] + + if trans: + surface_points = apply_trans(trans, surface_points, move=True) + + _, distance = _compute_nearest(surface_points, pos, return_dists=True) + + if ndim == 1: + distance = distance[0] # return just a float if one pos is passed + + return distance diff --git a/mne/channels/__init__.py b/mne/channels/__init__.py new file mode 100644 index 0000000..901a237 --- /dev/null +++ b/mne/channels/__init__.py @@ -0,0 +1,12 @@ +"""Module dedicated to manipulation of channels. + +Can be used for setting of sensor locations used for processing and plotting. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/channels/__init__.pyi b/mne/channels/__init__.pyi new file mode 100644 index 0000000..05f273a --- /dev/null +++ b/mne/channels/__init__.pyi @@ -0,0 +1,78 @@ +__all__ = [ + "DigMontage", + "Layout", + "_EEG_SELECTIONS", + "_SELECTIONS", + "_divide_to_regions", + "combine_channels", + "compute_dev_head_t", + "compute_native_head_t", + "equalize_channels", + "find_ch_adjacency", + "find_layout", + "fix_mag_coil_types", + "generate_2d_layout", + "get_builtin_ch_adjacencies", + "get_builtin_montages", + "make_1020_channel_selections", + "make_dig_montage", + "make_eeg_layout", + "make_grid_layout", + "make_standard_montage", + "read_ch_adjacency", + "read_custom_montage", + "read_dig_captrak", + "read_dig_dat", + "read_dig_egi", + "read_dig_fif", + "read_dig_hpts", + "read_dig_localite", + "read_dig_polhemus_isotrak", + "read_layout", + "read_polhemus_fastscan", + "read_vectorview_selection", + "rename_channels", + "transform_to_head", + "unify_bad_channels", +] +from .channels import ( + _EEG_SELECTIONS, + _SELECTIONS, + _divide_to_regions, + combine_channels, + equalize_channels, + find_ch_adjacency, + fix_mag_coil_types, + get_builtin_ch_adjacencies, + make_1020_channel_selections, + read_ch_adjacency, + read_vectorview_selection, + rename_channels, + unify_bad_channels, +) +from .layout import ( + Layout, + find_layout, + generate_2d_layout, + make_eeg_layout, + make_grid_layout, + read_layout, +) +from .montage import ( + DigMontage, + compute_dev_head_t, + compute_native_head_t, + get_builtin_montages, + make_dig_montage, + make_standard_montage, + read_custom_montage, + read_dig_captrak, + read_dig_dat, + read_dig_egi, + read_dig_fif, + read_dig_hpts, + read_dig_localite, + read_dig_polhemus_isotrak, + read_polhemus_fastscan, + transform_to_head, +) diff --git a/mne/channels/_dig_montage_utils.py b/mne/channels/_dig_montage_utils.py new file mode 100644 index 0000000..a59e209 --- /dev/null +++ b/mne/channels/_dig_montage_utils.py @@ -0,0 +1,96 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..utils import Bunch, _check_fname, _soft_import, warn + + +def _read_dig_montage_egi( + fname, + _scaling, + _all_data_kwargs_are_none, +): + if not _all_data_kwargs_are_none: + raise ValueError( + "hsp, hpi, elp, point_names, fif must all be None if egi is not None" + ) + _check_fname(fname, overwrite="read", must_exist=True) + defusedxml = _soft_import("defusedxml", "reading EGI montages") + root = defusedxml.ElementTree.parse(fname).getroot() + ns = root.tag[root.tag.index("{") : root.tag.index("}") + 1] + sensors = root.find(f"{ns}sensorLayout/{ns}sensors") + fids = dict() + dig_ch_pos = dict() + + fid_name_map = { + "Nasion": "nasion", + "Right periauricular point": "rpa", + "Left periauricular point": "lpa", + } + + for s in sensors: + name, number, kind = s[0].text, int(s[1].text), int(s[2].text) + coordinates = np.array([float(s[3].text), float(s[4].text), float(s[5].text)]) + + coordinates *= _scaling + + # EEG Channels + if kind == 0: + dig_ch_pos[f"EEG {number:03d}"] = coordinates + # Reference + elif kind == 1: + dig_ch_pos[f"EEG {len(dig_ch_pos) + 1:03d}"] = coordinates + # Fiducials + elif kind == 2: + fid_name = fid_name_map[name] + fids[fid_name] = coordinates + # Unknown + else: + warn( + f"Unknown sensor type {kind} detected. Skipping sensor..." + "Proceed with caution!" + ) + + return Bunch( + # EGI stuff + nasion=fids["nasion"], + lpa=fids["lpa"], + rpa=fids["rpa"], + ch_pos=dig_ch_pos, + coord_frame="unknown", + ) + + +def _parse_brainvision_dig_montage(fname, scale): + FID_NAME_MAP = {"Nasion": "nasion", "RPA": "rpa", "LPA": "lpa"} + defusedxml = _soft_import("defusedxml", "reading BrainVision montages") + root = defusedxml.ElementTree.parse(fname).getroot() + sensors = root.find("CapTrakElectrodeList") + + fids, dig_ch_pos = dict(), dict() + + for s in sensors: + name = s.find("Name").text + + is_fid = name in FID_NAME_MAP + coordinates = scale * np.array( + [float(s.find("X").text), float(s.find("Y").text), float(s.find("Z").text)] + ) + + # Fiducials + if is_fid: + fids[FID_NAME_MAP[name]] = coordinates + # EEG Channels + else: + dig_ch_pos[name] = coordinates + + return dict( + # BVCT stuff + nasion=fids["nasion"], + lpa=fids["lpa"], + rpa=fids["rpa"], + ch_pos=dig_ch_pos, + coord_frame="unknown", + ) diff --git a/mne/channels/_standard_montage_utils.py b/mne/channels/_standard_montage_utils.py new file mode 100644 index 0000000..eb3dc10 --- /dev/null +++ b/mne/channels/_standard_montage_utils.py @@ -0,0 +1,421 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import csv +import os.path as op +from collections import OrderedDict +from functools import partial + +import numpy as np + +from .._freesurfer import get_mni_fiducials +from ..transforms import _sph_to_cart +from ..utils import _pl, _soft_import, warn +from . import __file__ as _CHANNELS_INIT_FILE +from .montage import make_dig_montage + +MONTAGE_PATH = op.join(op.dirname(_CHANNELS_INIT_FILE), "data", "montages") + +_str = "U100" + + +# In standard_1020, T9=LPA, T10=RPA, Nasion is the same as Iz with a +# sign-flipped Y value + + +def _egi_256(head_size): + fname = op.join(MONTAGE_PATH, "EGI_256.csd") + montage = _read_csd(fname, head_size) + ch_pos = montage._get_ch_pos() + + # For this cap, the Nasion is the frontmost electrode, + # LPA/RPA we approximate by putting 75% of the way (toward the front) + # between the two electrodes that are halfway down the ear holes + nasion = ch_pos["E31"] + lpa = 0.75 * ch_pos["E67"] + 0.25 * ch_pos["E94"] + rpa = 0.75 * ch_pos["E219"] + 0.25 * ch_pos["E190"] + + fids_montage = make_dig_montage( + coord_frame="unknown", + nasion=nasion, + lpa=lpa, + rpa=rpa, + ) + + montage += fids_montage # add fiducials to montage + + return montage + + +def _easycap(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + montage = _read_theta_phi_in_degrees(fname, head_size, add_fiducials=True) + return montage + + +def _hydrocel(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + return _read_sfp(fname, head_size) + + +def _str_names(ch_names): + return [str(ch_name) for ch_name in ch_names] + + +def _safe_np_loadtxt(fname, **kwargs): + out = np.genfromtxt(fname, **kwargs) + ch_names = _str_names(out["f0"]) + others = tuple(out[f"f{ii}"] for ii in range(1, len(out.dtype.fields))) + return (ch_names,) + others + + +def _biosemi(basename, head_size): + fname = op.join(MONTAGE_PATH, basename) + fid_names = ("Nz", "LPA", "RPA") + return _read_theta_phi_in_degrees(fname, head_size, fid_names) + + +def _mgh_or_standard(basename, head_size, coord_frame="unknown"): + fid_names = ("Nz", "LPA", "RPA") + fname = op.join(MONTAGE_PATH, basename) + + ch_names_, pos = [], [] + with open(fname) as fid: + # Ignore units as we will scale later using the norms anyway + for line in fid: + if "Positions\n" in line: + break + pos = [] + for line in fid: + if "Labels\n" in line: + break + pos.append(list(map(float, line.split()))) + for line in fid: + if not line or not set(line) - {" "}: + break + ch_names_.append(line.strip(" ").strip("\n")) + + pos = np.array(pos) / 1000.0 + ch_pos = _check_dupes_odict(ch_names_, pos) + nasion, lpa, rpa = (ch_pos.pop(n) for n in fid_names) + if head_size is None: + scale = 1.0 + else: + scale = head_size / np.median(np.linalg.norm(pos, axis=1)) + for value in ch_pos.values(): + value *= scale + # if we are in MRI/MNI coordinates, we need to replace nasion, LPA, and RPA + # with those of fsaverage for ``trans='fsaverage'`` to work + if coord_frame == "mri": + lpa, nasion, rpa = (x["r"].copy() for x in get_mni_fiducials("fsaverage")) + nasion *= scale + lpa *= scale + rpa *= scale + + return make_dig_montage( + ch_pos=ch_pos, coord_frame=coord_frame, nasion=nasion, lpa=lpa, rpa=rpa + ) + + +standard_montage_look_up_table = { + "EGI_256": _egi_256, + "easycap-M1": partial(_easycap, basename="easycap-M1.txt"), + "easycap-M10": partial(_easycap, basename="easycap-M10.txt"), + "easycap-M43": partial(_easycap, basename="easycap-M43.txt"), + "GSN-HydroCel-128": partial(_hydrocel, basename="GSN-HydroCel-128.sfp"), + "GSN-HydroCel-129": partial(_hydrocel, basename="GSN-HydroCel-129.sfp"), + "GSN-HydroCel-256": partial(_hydrocel, basename="GSN-HydroCel-256.sfp"), + "GSN-HydroCel-257": partial(_hydrocel, basename="GSN-HydroCel-257.sfp"), + "GSN-HydroCel-32": partial(_hydrocel, basename="GSN-HydroCel-32.sfp"), + "GSN-HydroCel-64_1.0": partial(_hydrocel, basename="GSN-HydroCel-64_1.0.sfp"), + "GSN-HydroCel-65_1.0": partial(_hydrocel, basename="GSN-HydroCel-65_1.0.sfp"), + "biosemi128": partial(_biosemi, basename="biosemi128.txt"), + "biosemi16": partial(_biosemi, basename="biosemi16.txt"), + "biosemi160": partial(_biosemi, basename="biosemi160.txt"), + "biosemi256": partial(_biosemi, basename="biosemi256.txt"), + "biosemi32": partial(_biosemi, basename="biosemi32.txt"), + "biosemi64": partial(_biosemi, basename="biosemi64.txt"), + "mgh60": partial(_mgh_or_standard, basename="mgh60.elc", coord_frame="mri"), + "mgh70": partial(_mgh_or_standard, basename="mgh70.elc", coord_frame="mri"), + "standard_1005": partial( + _mgh_or_standard, basename="standard_1005.elc", coord_frame="mri" + ), + "standard_1020": partial( + _mgh_or_standard, basename="standard_1020.elc", coord_frame="mri" + ), + "standard_alphabetic": partial( + _mgh_or_standard, basename="standard_alphabetic.elc", coord_frame="mri" + ), + "standard_postfixed": partial( + _mgh_or_standard, basename="standard_postfixed.elc", coord_frame="mri" + ), + "standard_prefixed": partial( + _mgh_or_standard, basename="standard_prefixed.elc", coord_frame="mri" + ), + "standard_primed": partial( + _mgh_or_standard, basename="standard_primed.elc", coord_frame="mri" + ), + "artinis-octamon": partial( + _mgh_or_standard, coord_frame="mri", basename="artinis-octamon.elc" + ), + "artinis-brite23": partial( + _mgh_or_standard, coord_frame="mri", basename="artinis-brite23.elc" + ), + "brainproducts-RNP-BA-128": partial( + _easycap, basename="brainproducts-RNP-BA-128.txt" + ), +} + + +def _read_sfp(fname, head_size): + """Read .sfp BESA/EGI files.""" + # fname has been already checked + fid_names = ("FidNz", "FidT9", "FidT10") + options = dict(dtype=(_str, "f4", "f4", "f4")) + ch_names, xs, ys, zs = _safe_np_loadtxt(fname, **options) + # deal with "headshape" + mask = np.array([ch_name == "headshape" for ch_name in ch_names], bool) + hsp = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) + mask = ~mask + pos = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1) + ch_names = [ch_name for ch_name, m in zip(ch_names, mask) if m] + ch_pos = _check_dupes_odict(ch_names, pos) + del xs, ys, zs, ch_names + # no one grants that fid names are there. + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) + + if head_size is not None: + scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) + for value in ch_pos.values(): + value *= scale + nasion = nasion * scale if nasion is not None else None + lpa = lpa * scale if lpa is not None else None + rpa = rpa * scale if rpa is not None else None + + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, rpa=rpa, lpa=lpa, hsp=hsp + ) + + +def _read_csd(fname, head_size): + # Label, Theta, Phi, Radius, X, Y, Z, off sphere surface + options = dict( + comments="//", dtype=(_str, "f4", "f4", "f4", "f4", "f4", "f4", "f4") + ) + ch_names, _, _, _, xs, ys, zs, _ = _safe_np_loadtxt(fname, **options) + pos = np.stack([xs, ys, zs], axis=-1) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos)) + + +def _check_dupes_odict(ch_names, pos): + """Warn if there are duplicates, then turn to ordered dict.""" + ch_names = list(ch_names) + dups = OrderedDict((ch_name, ch_names.count(ch_name)) for ch_name in ch_names) + dups = OrderedDict((ch_name, count) for ch_name, count in dups.items() if count > 1) + n = len(dups) + if n: + dups = ", ".join(f"{ch_name} ({count})" for ch_name, count in dups.items()) + warn( + f"Duplicate channel position{_pl(n)} found, the last will be " + f"used for {dups}" + ) + return OrderedDict(zip(ch_names, pos)) + + +def _read_elc(fname, head_size): + """Read .elc files. + + The `.elc` files are so-called "asa electrode files". ASA here stands for + Advances Source Analysis, and is a software package developed and sold by + the ANT Neuro company. They provide a device for sensor digitization, called + 'xensor', which produces the `.elc` files. + + Parameters + ---------- + fname : str + File extension is expected to be '.elc'. + head_size : float | None + The size of the head in [m]. If none, returns the values read from the + file with no modification. + + Returns + ------- + montage : instance of DigMontage + The montage units are [m]. + """ + fid_names = ("Nz", "LPA", "RPA") + + with open(fname) as fid: + # Read units + # _read_elc does require to detect the units. (see _mgh_or_standard) + for line in fid: + if "UnitPosition" in line: + units = line.split()[1] + scale = dict(m=1.0, mm=1e-3)[units] + break + else: + raise RuntimeError(f"Could not detect units in file {fname}") + for line in fid: + if "Positions\n" in line: + break + + # Read positions + new_style = False + pos = [] + for line in fid: + if "Labels\n" in line: + break + if ":" in line: + # Of the 'new' format: `E01 : 5.288 -3.658 119.693` + pos.append(list(map(float, line.split(":")[1].split()))) + new_style = True + else: + # Of the 'old' format: `5.288 -3.658 119.693` + pos.append(list(map(float, line.split()))) + + # Read labels + ch_names_ = [] + for line in fid: + if not line or not set(line) - {" "}: + break + if new_style: + # Not sure how this format would deal with spaces in channel labels, + # but none of my test files had this, so let's wait until it comes up. + parsed = line.strip(" ").strip("\n").split() + else: + parsed = [line.strip(" ").strip("\n")] + ch_names_.extend(parsed) + + pos = np.array(pos) * scale + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + ch_pos = _check_dupes_odict(ch_names_, pos) + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) + + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, lpa=lpa, rpa=rpa + ) + + +def _read_theta_phi_in_degrees(fname, head_size, fid_names=None, add_fiducials=False): + ch_names, theta, phi = _safe_np_loadtxt( + fname, skip_header=1, dtype=(_str, "i4", "i4") + ) + if add_fiducials: + # Add fiducials based on 10/20 spherical coordinate definitions + # http://chgd.umich.edu/wp-content/uploads/2014/06/ + # 10-20_system_positioning.pdf + # extrapolated from other sensor coordinates in the Easycap layouts + # https://www.easycap.de/wp-content/uploads/2018/02/ + # Easycap-Equidistant-Layouts.pdf + assert fid_names is None + fid_names = ["Nasion", "LPA", "RPA"] + ch_names.extend(fid_names) + theta = np.append(theta, [115, -115, 115]) + phi = np.append(phi, [90, 0, 0]) + + radii = np.full(len(phi), head_size) + pos = _sph_to_cart(np.array([radii, np.deg2rad(phi), np.deg2rad(theta)]).T) + ch_pos = _check_dupes_odict(ch_names, pos) + + nasion, lpa, rpa = None, None, None + if fid_names is not None: + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) + + return make_dig_montage( + ch_pos=ch_pos, coord_frame="unknown", nasion=nasion, lpa=lpa, rpa=rpa + ) + + +def _read_elp_besa(fname, head_size): + # This .elp is not the same as polhemus elp. see _read_isotrak_elp_points + dtype = np.dtype("S8, S8, f8, f8, f8") + data = np.loadtxt(fname, dtype=dtype) + + ch_names = data["f1"].astype(str).tolist() + az = data["f2"] + horiz = data["f3"] + radius = np.abs(az / 180.0) + az = np.deg2rad(np.array([h if a >= 0.0 else 180 + h for h, a in zip(horiz, az)])) + pol = radius * np.pi + rad = data["f4"] / 100 + pos = _sph_to_cart(np.array([rad, az, pol]).T) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + ch_pos = _check_dupes_odict(ch_names, pos) + + fid_names = ("Nz", "LPA", "RPA") + # No one grants that the fid names actually exist. + nasion, lpa, rpa = (ch_pos.pop(n, None) for n in fid_names) + + return make_dig_montage(ch_pos=ch_pos, nasion=nasion, lpa=lpa, rpa=rpa) + + +def _read_brainvision(fname, head_size): + # 'BrainVision Electrodes File' format + # Based on BrainVision Analyzer coordinate system: Defined between + # standard electrode positions: X-axis from T7 to T8, Y-axis from Oz to + # Fpz, Z-axis orthogonal from XY-plane through Cz, fit to a sphere if + # idealized (when radius=1), specified in millimeters + defusedxml = _soft_import("defusedxml", "reading BrainVision montages") + root = defusedxml.ElementTree.parse(fname).getroot() + ch_names = [s.text for s in root.findall("./Electrode/Name")] + theta = [float(s.text) for s in root.findall("./Electrode/Theta")] + pol = np.deg2rad(np.array(theta)) + phi = [float(s.text) for s in root.findall("./Electrode/Phi")] + az = np.deg2rad(np.array(phi)) + rad = [float(s.text) for s in root.findall("./Electrode/Radius")] + rad = np.array(rad) # specified in mm + pos = _sph_to_cart(np.array([rad, az, pol]).T) + + if head_size is not None: + pos *= head_size / np.median(np.linalg.norm(pos, axis=1)) + + return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos)) + + +def _read_xyz(fname): + """Import EEG channel locations from CSV, TSV, or XYZ files. + + CSV and TSV files should have columns 4 columns containing + ch_name, x, y, and z. Each row represents one channel. + XYZ files should have 5 columns containing + count, x, y, z, and ch_name. Each row represents one channel + CSV files should be separated by commas, TSV and XYZ files should be + separated by tabs. + + Parameters + ---------- + fname : str + Name of the file to read channel locations from. + + Returns + ------- + montage : instance of DigMontage + The montage. + """ + ch_names = [] + pos = [] + file_format = op.splitext(fname)[1].lower() + with open(fname) as f: + if file_format != ".xyz": + f.readline() # skip header + delimiter = "," if file_format == ".csv" else "\t" + for row in csv.reader(f, delimiter=delimiter): + if file_format == ".xyz": + _, x, y, z, ch_name, *_ = row + ch_name = ch_name.strip() # deals with variable tab size + else: + ch_name, x, y, z, *_ = row + ch_names.append(ch_name) + pos.append((x, y, z)) + d = _check_dupes_odict(ch_names, np.array(pos, dtype=float)) + return make_dig_montage(ch_pos=d) diff --git a/mne/channels/channels.py b/mne/channels/channels.py new file mode 100644 index 0000000..8fbff33 --- /dev/null +++ b/mne/channels/channels.py @@ -0,0 +1,2150 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +from __future__ import annotations # only needed for Python ≤ 3.9 + +import os.path as op +import string +import sys +from collections import OrderedDict +from copy import deepcopy +from dataclasses import dataclass +from functools import partial +from pathlib import Path + +import numpy as np +from scipy.io import loadmat +from scipy.sparse import csr_array, lil_array +from scipy.spatial import Delaunay +from scipy.stats import zscore + +from .._fiff.constants import FIFF +from .._fiff.meas_info import ( # noqa F401 + Info, + MontageMixin, + _merge_info, + _rename_comps, + _unit2human, # TODO: pybv relies on this, should be made public + create_info, +) +from .._fiff.pick import ( + _check_excludes_includes, + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + _second_rules, + channel_indices_by_type, + channel_type, + pick_channels, + pick_info, + pick_types, +) +from .._fiff.proj import setup_proj +from .._fiff.reference import add_reference_channels, set_eeg_reference +from .._fiff.tag import _rename_list +from ..bem import _check_origin +from ..defaults import HEAD_SIZE_DEFAULT, _handle_default +from ..utils import ( + _check_dict_keys, + _check_fname, + _check_option, + _check_preload, + _get_stim_channel, + _on_missing, + _validate_type, + fill_doc, + legacy, + logger, + verbose, + warn, +) + + +def _get_meg_system(info): + """Educated guess for the helmet type based on channels.""" + have_helmet = True + for ch in info["chs"]: + if ch["kind"] == FIFF.FIFFV_MEG_CH: + # Only take first 16 bits, as higher bits store CTF grad comp order + coil_type = ch["coil_type"] & 0xFFFF + nmag = np.sum([c["kind"] == FIFF.FIFFV_MEG_CH for c in info["chs"]]) + if coil_type == FIFF.FIFFV_COIL_NM_122: + system = "122m" + break + elif coil_type // 1000 == 3: # All Vectorview coils are 30xx + system = "306m" + break + elif ( + coil_type == FIFF.FIFFV_COIL_MAGNES_MAG + or coil_type == FIFF.FIFFV_COIL_MAGNES_GRAD + ): + system = "Magnes_3600wh" if nmag > 150 else "Magnes_2500wh" + break + elif coil_type == FIFF.FIFFV_COIL_CTF_GRAD: + system = "CTF_275" + break + elif coil_type == FIFF.FIFFV_COIL_KIT_GRAD: + system = "KIT" + # Our helmet does not match very well, so let's just create it + have_helmet = False + break + elif coil_type == FIFF.FIFFV_COIL_BABY_GRAD: + system = "BabySQUID" + break + elif coil_type == FIFF.FIFFV_COIL_ARTEMIS123_GRAD: + system = "ARTEMIS123" + have_helmet = False + break + elif coil_type == FIFF.FIFFV_COIL_KERNEL_OPM_MAG_GEN1: + system = "Kernel_Flux" + have_helmet = True + break + else: + system = "unknown" + have_helmet = False + return system, have_helmet + + +@verbose +def equalize_channels(instances, copy=True, verbose=None): + """Equalize channel picks and ordering across multiple MNE-Python objects. + + First, all channels that are not common to each object are dropped. Then, + using the first object in the list as a template, the channels of each + object are re-ordered to match the template. The end result is that all + given objects define the same channels, in the same order. + + Parameters + ---------- + instances : list + A list of MNE-Python objects to equalize the channels for. Objects can + be of type Raw, Epochs, Evoked, AverageTFR, Forward, Covariance, + CrossSpectralDensity or Info. + copy : bool + When dropping and/or re-ordering channels, an object will be copied + when this parameter is set to ``True``. When set to ``False`` (the + default) the dropping and re-ordering of channels happens in-place. + + .. versionadded:: 0.20.0 + %(verbose)s + + Returns + ------- + equalized_instances : list + A list of MNE-Python objects that have the same channels defined in the + same order. + + Notes + ----- + This function operates inplace. + """ + from ..cov import Covariance + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..forward import Forward + from ..io import BaseRaw + from ..time_frequency import BaseTFR, CrossSpectralDensity + + # Instances need to have a `ch_names` attribute and a `pick_channels` + # method that supports `ordered=True`. + allowed_types = ( + BaseRaw, + BaseEpochs, + Evoked, + BaseTFR, + Forward, + Covariance, + CrossSpectralDensity, + Info, + ) + allowed_types_str = ( + "Raw, Epochs, Evoked, TFR, Forward, Covariance, CrossSpectralDensity or Info" + ) + for inst in instances: + _validate_type( + inst, allowed_types, "Instances to be modified", allowed_types_str + ) + + chan_template = instances[0].ch_names + logger.info("Identifying common channels ...") + channels = [set(inst.ch_names) for inst in instances] + common_channels = set(chan_template).intersection(*channels) + all_channels = set(chan_template).union(*channels) + dropped = list(set(all_channels - common_channels)) + + # Preserve the order of chan_template + order = np.argsort([chan_template.index(ch) for ch in common_channels]) + common_channels = np.array(list(common_channels))[order].tolist() + + # Update all instances to match the common_channels list + reordered = False + equalized_instances = [] + for inst in instances: + # Only perform picking when needed + if inst.ch_names != common_channels: + if isinstance(inst, Info): + sel = pick_channels( + inst.ch_names, common_channels, exclude=[], ordered=True + ) + inst = pick_info(inst, sel, copy=copy, verbose=False) + else: + if copy: + inst = inst.copy() + # TODO change to .pick() once CSD, Cov, and Fwd have `.pick()` methods + inst.pick_channels(common_channels, ordered=True) + if len(inst.ch_names) == len(common_channels): + reordered = True + equalized_instances.append(inst) + + if dropped: + logger.info(f"Dropped the following channels:\n{dropped}") + elif reordered: + logger.info("Channels have been re-ordered.") + + return equalized_instances + + +def unify_bad_channels(insts): + """Unify bad channels across a list of instances. + + All instances must be of the same type and have matching channel names and channel + order. The ``.info["bads"]`` of each instance will be set to the union of + ``.info["bads"]`` across all instances. + + Parameters + ---------- + insts : list + List of instances (:class:`~mne.io.Raw`, :class:`~mne.Epochs`, + :class:`~mne.Evoked`, :class:`~mne.time_frequency.Spectrum`, + :class:`~mne.time_frequency.EpochsSpectrum`) across which to unify bad channels. + + Returns + ------- + insts : list + List of instances with bad channels unified across instances. + + See Also + -------- + mne.channels.equalize_channels + mne.channels.rename_channels + mne.channels.combine_channels + + Notes + ----- + This function modifies the instances in-place. + + .. versionadded:: 1.6 + """ + from ..epochs import Epochs + from ..evoked import Evoked + from ..io import BaseRaw + from ..time_frequency.spectrum import BaseSpectrum + + # ensure input is list-like + _validate_type(insts, (list, tuple), "insts") + # ensure non-empty + if len(insts) == 0: + raise ValueError("insts must not be empty") + # ensure all insts are MNE objects, and all the same type + inst_type = type(insts[0]) + valid_types = (BaseRaw, Epochs, Evoked, BaseSpectrum) + for inst in insts: + _validate_type(inst, valid_types, "each object in insts") + if type(inst) is not inst_type: + raise ValueError("All insts must be the same type") + + # ensure all insts have the same channels and channel order + ch_names = insts[0].ch_names + for inst in insts[1:]: + dif = set(inst.ch_names) ^ set(ch_names) + if len(dif): + raise ValueError( + "Channels do not match across the objects in insts. Consider calling " + "equalize_channels before calling this function." + ) + elif inst.ch_names != ch_names: + raise ValueError( + "Channel names are sorted differently across instances. Please use " + "mne.channels.equalize_channels." + ) + + # collect bads as dict keys so that insertion order is preserved, then cast to list + all_bads = dict() + for inst in insts: + all_bads.update(dict.fromkeys(inst.info["bads"])) + all_bads = list(all_bads) + + # update bads on all instances + for inst in insts: + inst.info["bads"] = all_bads + + return insts + + +class ReferenceMixin(MontageMixin): + """Mixin class for Raw, Evoked, Epochs.""" + + @verbose + def set_eeg_reference( + self, + ref_channels="average", + projection=False, + ch_type="auto", + forward=None, + *, + joint=False, + verbose=None, + ): + """Specify which reference to use for EEG data. + + Use this function to explicitly specify the desired reference for EEG. + This can be either an existing electrode or a new virtual channel. + This function will re-reference the data according to the desired + reference. + + Parameters + ---------- + %(ref_channels_set_eeg_reference)s + %(projection_set_eeg_reference)s + %(ch_type_set_eeg_reference)s + %(forward_set_eeg_reference)s + %(joint_set_eeg_reference)s + %(verbose)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + Data with EEG channels re-referenced. If ``ref_channels='average'`` + and ``projection=True`` a projection will be added instead of + directly re-referencing the data. + %(set_eeg_reference_see_also_notes)s + """ + return set_eeg_reference( + self, + ref_channels=ref_channels, + copy=False, + projection=projection, + ch_type=ch_type, + forward=forward, + joint=joint, + )[0] + + +class UpdateChannelsMixin: + """Mixin class for Raw, Evoked, Epochs, Spectrum, AverageTFR.""" + + @verbose + @legacy(alt="inst.pick(...)") + def pick_types( + self, + meg=False, + eeg=False, + stim=False, + eog=False, + ecg=False, + emg=False, + ref_meg="auto", + *, + misc=False, + resp=False, + chpi=False, + exci=False, + ias=False, + syst=False, + seeg=False, + dipole=False, + gof=False, + bio=False, + ecog=False, + fnirs=False, + csd=False, + dbs=False, + temperature=False, + gsr=False, + eyetrack=False, + include=(), + exclude="bads", + selection=None, + verbose=None, + ): + """Pick some channels by type and names. + + Parameters + ---------- + %(pick_types_params)s + %(verbose)s + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + pick_channels + + Notes + ----- + .. versionadded:: 0.9.0 + """ + idx = pick_types( + self.info, + meg=meg, + eeg=eeg, + stim=stim, + eog=eog, + ecg=ecg, + emg=emg, + ref_meg=ref_meg, + misc=misc, + resp=resp, + chpi=chpi, + exci=exci, + ias=ias, + syst=syst, + seeg=seeg, + dipole=dipole, + gof=gof, + bio=bio, + ecog=ecog, + fnirs=fnirs, + csd=csd, + dbs=dbs, + temperature=temperature, + gsr=gsr, + eyetrack=eyetrack, + include=include, + exclude=exclude, + selection=selection, + ) + + self._pick_drop_channels(idx) + + # remove dropped channel types from reject and flat + if getattr(self, "reject", None) is not None: + # use list(self.reject) to avoid RuntimeError for changing dictionary size + # during iteration + for ch_type in list(self.reject): + if ch_type not in self: + del self.reject[ch_type] + + if getattr(self, "flat", None) is not None: + for ch_type in list(self.flat): + if ch_type not in self: + del self.flat[ch_type] + + return self + + @verbose + @legacy(alt="inst.pick(...)") + def pick_channels(self, ch_names, ordered=True, *, verbose=None): + """Pick some channels. + + Parameters + ---------- + ch_names : list + The list of channels to select. + %(ordered)s + %(verbose)s + + .. versionadded:: 1.1 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + pick_types + reorder_channels + + Notes + ----- + If ``ordered`` is ``False``, the channel names given via ``ch_names`` are + assumed to be a set, that is, their order does not matter. In that case, the + original order of the channels in the data is preserved. Apart from using + ``ordered=True``, you may also use ``reorder_channels`` to set channel order, + if necessary. + + .. versionadded:: 0.9.0 + """ + picks = pick_channels(self.info["ch_names"], ch_names, ordered=ordered) + return self._pick_drop_channels(picks) + + @verbose + def pick(self, picks, exclude=(), *, verbose=None): + """Pick a subset of channels. + + Parameters + ---------- + %(picks_all)s + exclude : list | str + Set of channels to exclude, only used when picking based on + types (e.g., exclude="bads" when picks="meg"). + %(verbose)s + + .. versionadded:: 0.24.0 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + """ + picks = _picks_to_idx(self.info, picks, "all", exclude, allow_empty=False) + self._pick_drop_channels(picks) + + # remove dropped channel types from reject and flat + if getattr(self, "reject", None) is not None: + # use list(self.reject) to avoid RuntimeError for changing dictionary size + # during iteration + for ch_type in list(self.reject): + if ch_type not in self: + del self.reject[ch_type] + + if getattr(self, "flat", None) is not None: + for ch_type in list(self.flat): + if ch_type not in self: + del self.flat[ch_type] + + return self + + def reorder_channels(self, ch_names): + """Reorder channels. + + Parameters + ---------- + ch_names : list + The desired channel order. + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + pick_types + pick_channels + + Notes + ----- + Channel names must be unique. Channels that are not in ``ch_names`` + are dropped. + + .. versionadded:: 0.16.0 + """ + _check_excludes_includes(ch_names) + idx = list() + for ch_name in ch_names: + ii = self.ch_names.index(ch_name) + if ii in idx: + raise ValueError(f"Channel name repeated: {ch_name}") + idx.append(ii) + return self._pick_drop_channels(idx) + + @fill_doc + def drop_channels(self, ch_names, on_missing="raise"): + """Drop channel(s). + + Parameters + ---------- + ch_names : iterable or str + Iterable (e.g. list) of channel name(s) or channel name to remove. + %(on_missing_ch_names)s + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + reorder_channels + pick_channels + pick_types + + Notes + ----- + .. versionadded:: 0.9.0 + """ + if isinstance(ch_names, str): + ch_names = [ch_names] + + try: + all_str = all([isinstance(ch, str) for ch in ch_names]) + except TypeError: + raise ValueError( + f"'ch_names' must be iterable, got type {type(ch_names)} ({ch_names})." + ) + + if not all_str: + raise ValueError( + "Each element in 'ch_names' must be str, got " + f"{[type(ch) for ch in ch_names]}." + ) + + missing = [ch for ch in ch_names if ch not in self.ch_names] + if len(missing) > 0: + msg = "Channel(s) {0} not found, nothing dropped." + _on_missing(on_missing, msg.format(", ".join(missing))) + + bad_idx = [self.ch_names.index(ch) for ch in ch_names if ch in self.ch_names] + idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx) + if len(idx) == 0: + raise ValueError("All channels would be dropped.") + return self._pick_drop_channels(idx) + + @verbose + def _pick_drop_channels(self, idx, *, verbose=None): + # avoid circular imports + from ..io import BaseRaw + + msg = "adding, dropping, or reordering channels" + if isinstance(self, BaseRaw): + if self._projector is not None: + _check_preload(self, f"{msg} after calling .apply_proj()") + else: + _check_preload(self, msg) + + if getattr(self, "picks", None) is not None: + self.picks = self.picks[idx] + + if getattr(self, "_read_picks", None) is not None: + self._read_picks = [r[idx] for r in self._read_picks] + + if hasattr(self, "_cals"): + self._cals = self._cals[idx] + + pick_info(self.info, idx, copy=False) + + for key in ("_comp", "_projector"): + mat = getattr(self, key, None) + if mat is not None: + setattr(self, key, mat[idx][:, idx]) + + if hasattr(self, "_dims"): # Spectrum and "new-style" TFRs + axis = self._dims.index("channel") + else: # All others (Evoked, Epochs, Raw) have chs axis=-2 + axis = -2 + if hasattr(self, "_data"): # skip non-preloaded Raw + self._data = self._data.take(idx, axis=axis) + else: + assert isinstance(self, BaseRaw) and not self.preload + + if isinstance(self, BaseRaw): + self.annotations._prune_ch_names(self.info, on_missing="ignore") + self._orig_units = { + k: v for k, v in self._orig_units.items() if k in self.ch_names + } + + self._pick_projs() + return self + + def _pick_projs(self): + """Keep only projectors which apply to at least 1 data channel.""" + drop_idx = [] + for idx, proj in enumerate(self.info["projs"]): + if not set(self.info["ch_names"]) & set(proj["data"]["col_names"]): + drop_idx.append(idx) + + for idx in drop_idx: + logger.info(f"Removing projector {self.info['projs'][idx]}") + + if drop_idx and hasattr(self, "del_proj"): + self.del_proj(drop_idx) + + return self + + def add_channels(self, add_list, force_update_info=False): + """Append new channels to the instance. + + Parameters + ---------- + add_list : list + A list of objects to append to self. Must contain all the same + type as the current object. + force_update_info : bool + If True, force the info for objects to be appended to match the + values in ``self``. This should generally only be used when adding + stim channels for which important metadata won't be overwritten. + + .. versionadded:: 0.12 + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + See Also + -------- + drop_channels + + Notes + ----- + If ``self`` is a Raw instance that has been preloaded into a + :obj:`numpy.memmap` instance, the memmap will be resized. + """ + # avoid circular imports + from ..epochs import BaseEpochs + from ..io import BaseRaw + from ..time_frequency import EpochsTFR + + _validate_type(add_list, (list, tuple), "Input") + + # Object-specific checks + for inst in add_list + [self]: + _check_preload(inst, "adding channels") + if isinstance(self, BaseRaw): + con_axis = 0 + comp_class = BaseRaw + elif isinstance(self, BaseEpochs): + con_axis = 1 + comp_class = BaseEpochs + elif isinstance(self, EpochsTFR): + con_axis = 1 + comp_class = EpochsTFR + else: + con_axis = 0 + comp_class = type(self) + for inst in add_list: + _validate_type(inst, comp_class, "All input") + data = [inst._data for inst in [self] + add_list] + + # Make sure that all dimensions other than channel axis are the same + compare_axes = [i for i in range(data[0].ndim) if i != con_axis] + shapes = np.array([dat.shape for dat in data])[:, compare_axes] + for shape in shapes: + if not ((shapes[0] - shape) == 0).all(): + raise ValueError( + "All data dimensions except channels must match, got " + f"{shapes[0]} != {shape}" + ) + del shapes + + # Create final data / info objects + infos = [self.info] + [inst.info for inst in add_list] + new_info = _merge_info(infos, force_update_to_first=force_update_info) + + # Now update the attributes + if ( + isinstance(self._data, np.memmap) + and con_axis == 0 + and sys.platform != "darwin" + ): # resizing not available--no mremap + # Use a resize and fill in other ones + out_shape = (sum(d.shape[0] for d in data),) + data[0].shape[1:] + n_bytes = np.prod(out_shape) * self._data.dtype.itemsize + self._data.flush() + self._data.base.resize(n_bytes) + self._data = np.memmap( + self._data.filename, mode="r+", dtype=self._data.dtype, shape=out_shape + ) + assert self._data.shape == out_shape + assert self._data.nbytes == n_bytes + offset = len(data[0]) + for d in data[1:]: + this_len = len(d) + self._data[offset : offset + this_len] = d + offset += this_len + else: + self._data = np.concatenate(data, axis=con_axis) + self.info = new_info + if isinstance(self, BaseRaw): + self._cals = np.concatenate( + [getattr(inst, "_cals") for inst in [self] + add_list] + ) + # We should never use these since data are preloaded, let's just + # set it to something large and likely to break (2 ** 31 - 1) + extra_idx = [2147483647] * sum(info["nchan"] for info in infos[1:]) + assert all(len(r) == infos[0]["nchan"] for r in self._read_picks) + self._read_picks = [ + np.concatenate([r, extra_idx]) for r in self._read_picks + ] + assert all(len(r) == self.info["nchan"] for r in self._read_picks) + for other in add_list: + self._orig_units.update(other._orig_units) + elif isinstance(self, BaseEpochs): + self.picks = np.arange(self._data.shape[1]) + if hasattr(self, "_projector"): + activate = False if self._do_delayed_proj else self.proj + self._projector, self.info = setup_proj( + self.info, False, activate=activate + ) + + return self + + @fill_doc + def add_reference_channels(self, ref_channels): + """Add reference channels to data that consists of all zeros. + + Adds reference channels to data that were not included during + recording. This is useful when you need to re-reference your data + to different channels. These added channels will consist of all zeros. + + Parameters + ---------- + %(ref_channels)s + + Returns + ------- + inst : instance of Raw | Epochs | Evoked + The modified instance. + """ + return add_reference_channels(self, ref_channels, copy=False) + + +class InterpolationMixin: + """Mixin class for Raw, Evoked, Epochs.""" + + @verbose + def interpolate_bads( + self, + reset_bads=True, + mode="accurate", + origin="auto", + method=None, + exclude=(), + verbose=None, + ): + """Interpolate bad MEG and EEG channels. + + Operates in place. + + Parameters + ---------- + reset_bads : bool + If True, remove the bads from info. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used for interpolation of channels + using the minimum-norm method. + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'`` (default), which means a head-digitization-based + origin fit. + + .. versionadded:: 0.17 + method : dict | str | None + Method to use for each channel type. + + - ``"meg"`` channels support ``"MNE"`` (default) and ``"nan"`` + - ``"eeg"`` channels support ``"spline"`` (default), ``"MNE"`` and ``"nan"`` + - ``"fnirs"`` channels support ``"nearest"`` (default) and ``"nan"`` + - ``"ecog"`` channels support ``"spline"`` (default) and ``"nan"`` + - ``"seeg"`` channels support ``"spline"`` (default) and ``"nan"`` + + None is an alias for:: + + method=dict(meg="MNE", eeg="spline", fnirs="nearest") + + If a :class:`str` is provided, the method will be applied to all channel + types supported and available in the instance. The method ``"nan"`` will + replace the channel data with ``np.nan``. + + .. warning:: + Be careful when using ``method="nan"``; the default value + ``reset_bads=True`` may not be what you want. + + .. versionadded:: 0.21 + exclude : list | tuple + The channels to exclude from interpolation. If excluded a bad + channel will stay in bads. + %(verbose)s + + Returns + ------- + inst : instance of Raw, Epochs, or Evoked + The modified instance. + + Notes + ----- + The ``"MNE"`` method uses minimum-norm projection to a sphere and back. + + .. versionadded:: 0.9.0 + """ + from .interpolation import ( + _interpolate_bads_ecog, + _interpolate_bads_eeg, + _interpolate_bads_meeg, + _interpolate_bads_nan, + _interpolate_bads_nirs, + _interpolate_bads_seeg, + ) + + _check_preload(self, "interpolation") + _validate_type(method, (dict, str, None), "method") + method = _handle_default("interpolation_method", method) + ch_types = self.get_channel_types(unique=True) + # figure out if we have "mag" for "meg", "hbo" for "fnirs", ... to filter the + # "method" dictionary and keep only keys that correspond to existing channels. + for ch_type in ("meg", "fnirs"): + for sub_ch_type in _second_rules[ch_type][1].values(): + if sub_ch_type in ch_types: + ch_types.remove(sub_ch_type) + if ch_type not in ch_types: + ch_types.append(ch_type) + keys2delete = set(method) - set(ch_types) + for key in keys2delete: + del method[key] + valids = { + "eeg": ("spline", "MNE", "nan"), + "meg": ("MNE", "nan"), + "fnirs": ("nearest", "nan"), + "ecog": ("spline", "nan"), + "seeg": ("spline", "nan"), + } + for key in method: + _check_option("method[key]", key, tuple(valids)) + _check_option(f"method['{key}']", method[key], valids[key]) + logger.info("Setting channel interpolation method to %s.", method) + idx = _picks_to_idx(self.info, list(method), exclude=(), allow_empty=True) + if idx.size == 0 or len(pick_info(self.info, idx)["bads"]) == 0: + warn("No bad channels to interpolate. Doing nothing...") + return self + for ch_type in method.copy(): + idx = _picks_to_idx(self.info, ch_type, exclude=(), allow_empty=True) + if len(pick_info(self.info, idx)["bads"]) == 0: + method.pop(ch_type) + logger.info("Interpolating bad channels.") + needs_origin = [key != "seeg" and val != "nan" for key, val in method.items()] + if any(needs_origin): + origin = _check_origin(origin, self.info) + for ch_type, interp in method.items(): + if interp == "nan": + _interpolate_bads_nan(self, ch_type, exclude=exclude) + if method.get("eeg", "") == "spline": + _interpolate_bads_eeg(self, origin=origin, exclude=exclude) + meg_mne = method.get("meg", "") == "MNE" + eeg_mne = method.get("eeg", "") == "MNE" + if meg_mne or eeg_mne: + _interpolate_bads_meeg( + self, + mode=mode, + meg=meg_mne, + eeg=eeg_mne, + origin=origin, + exclude=exclude, + method=method, + ) + if method.get("fnirs", "") == "nearest": + _interpolate_bads_nirs(self, exclude=exclude) + if method.get("ecog", "") == "spline": + _interpolate_bads_ecog(self, origin=origin, exclude=exclude) + if method.get("seeg", "") == "spline": + _interpolate_bads_seeg(self, exclude=exclude) + + if reset_bads is True: + if "nan" in method.values(): + warn( + "interpolate_bads was called with method='nan' and " + "reset_bads=True. Consider setting reset_bads=False so that the " + "nan-containing channels can be easily excluded from later " + "computations." + ) + self.info["bads"] = [ch for ch in self.info["bads"] if ch in exclude] + + return self + + +@verbose +def rename_channels(info, mapping, allow_duplicates=False, *, verbose=None): + """Rename channels. + + Parameters + ---------- + %(info_not_none)s Note: modified in place. + %(mapping_rename_channels_duplicates)s + %(verbose)s + """ + _validate_type(info, Info, "info") + info._check_consistency() + bads = list(info["bads"]) # make our own local copies + ch_names = list(info["ch_names"]) + + # first check and assemble clean mappings of index and name + if isinstance(mapping, dict): + _check_dict_keys( + mapping, + ch_names, + key_description="channel name(s)", + valid_key_source="info", + ) + new_names = [ + (ch_names.index(ch_name), new_name) for ch_name, new_name in mapping.items() + ] + elif callable(mapping): + new_names = [(ci, mapping(ch_name)) for ci, ch_name in enumerate(ch_names)] + else: + raise ValueError(f"mapping must be callable or dict, not {type(mapping)}") + + # check we got all strings out of the mapping + for new_name in new_names: + _validate_type(new_name[1], "str", "New channel mappings") + + # do the remapping locally + for c_ind, new_name in new_names: + for bi, bad in enumerate(bads): + if bad == ch_names[c_ind]: + bads[bi] = new_name + ch_names[c_ind] = new_name + + # check that all the channel names are unique + if len(ch_names) != len(np.unique(ch_names)) and not allow_duplicates: + raise ValueError("New channel names are not unique, renaming failed") + + # do the remapping in info + info["bads"] = [] + ch_names_mapping = dict() + for ch, ch_name in zip(info["chs"], ch_names): + ch_names_mapping[ch["ch_name"]] = ch_name + ch["ch_name"] = ch_name + # .get b/c fwd info omits it + _rename_comps(info.get("comps", []), ch_names_mapping) + if "projs" in info: # fwd might omit it + for proj in info["projs"]: + proj["data"]["col_names"][:] = _rename_list( + proj["data"]["col_names"], ch_names_mapping + ) + info._update_redundant() + info["bads"] = bads + info._check_consistency() + + +def _recursive_flatten(cell, dtype): + """Unpack mat files in Python.""" + if len(cell) > 0: + while not isinstance(cell[0], dtype): + cell = [c for d in cell for c in d] + return cell + + +@dataclass +class _BuiltinChannelAdjacency: + name: str + description: str + fname: str + source_url: str | None + + +_ft_neighbor_url_t = string.Template( + "https://github.com/fieldtrip/fieldtrip/raw/master/template/neighbours/$fname" +) + +_BUILTIN_CHANNEL_ADJACENCIES = [ + _BuiltinChannelAdjacency( + name="biosemi16", + description="Biosemi 16-electrode cap", + fname="biosemi16_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi16_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="biosemi32", + description="Biosemi 32-electrode cap", + fname="biosemi32_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi32_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="biosemi64", + description="Biosemi 64-electrode cap", + fname="biosemi64_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="biosemi64_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="bti148", + description="BTI 148-channel system", + fname="bti148_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti148_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="bti248", + description="BTI 248-channel system", + fname="bti248_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti248_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="bti248grad", + description="BTI 248 gradiometer system", + fname="bti248grad_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="bti248grad_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="ctf64", + description="CTF 64 axial gradiometer", + fname="ctf64_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf64_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="ctf151", + description="CTF 151 axial gradiometer", + fname="ctf151_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf151_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="ctf275", + description="CTF 275 axial gradiometer", + fname="ctf275_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ctf275_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="easycap32ch-avg", + description="", + fname="easycap32ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap32ch-avg_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="easycap64ch-avg", + description="", + fname="easycap64ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap64ch-avg_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="easycap128ch-avg", + description="", + fname="easycap128ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycap128ch-avg_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="easycapM1", + description="Easycap M1", + fname="easycapM1_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM1_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="easycapM11", + description="Easycap M11", + fname="easycapM11_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM11_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="easycapM14", + description="Easycap M14", + fname="easycapM14_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM14_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="easycapM15", + description="Easycap M15", + fname="easycapM15_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="easycapM15_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="KIT-157", + description="", + fname="KIT-157_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-208", + description="", + fname="KIT-208_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-NYU-2019", + description="", + fname="KIT-NYU-2019_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-UMD-1", + description="", + fname="KIT-UMD-1_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-UMD-2", + description="", + fname="KIT-UMD-2_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-UMD-3", + description="", + fname="KIT-UMD-3_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="KIT-UMD-4", + description="", + fname="KIT-UMD-4_neighb.mat", + source_url=None, + ), + _BuiltinChannelAdjacency( + name="neuromag306mag", + description="Neuromag306, only magnetometers", + fname="neuromag306mag_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306mag_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="neuromag306planar", + description="Neuromag306, only planar gradiometers", + fname="neuromag306planar_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306planar_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="neuromag122cmb", + description="Neuromag122, only combined planar gradiometers", + fname="neuromag122cmb_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag122cmb_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="neuromag306cmb", + description="Neuromag306, only combined planar gradiometers", + fname="neuromag306cmb_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="neuromag306cmb_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="ecog256", + description="ECOG 256channels, average referenced", + fname="ecog256_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ecog256_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="ecog256bipolar", + description="ECOG 256channels, bipolar referenced", + fname="ecog256bipolar_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="ecog256bipolar_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="eeg1010_neighb", + description="", + fname="eeg1010_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="eeg1010_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="elec1005", + description="Standard 10-05 system", + fname="elec1005_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1005_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="elec1010", + description="Standard 10-10 system", + fname="elec1010_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1010_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="elec1020", + description="Standard 10-20 system", + fname="elec1020_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="elec1020_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="itab28", + description="ITAB 28-channel system", + fname="itab28_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="itab28_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="itab153", + description="ITAB 153-channel system", + fname="itab153_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="itab153_neighb.mat"), + ), + _BuiltinChannelAdjacency( + name="language29ch-avg", + description="MPI for Psycholinguistic: Averaged 29-channel cap", + fname="language29ch-avg_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="language29ch-avg_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="mpi_59_channels", + description="MPI for Psycholinguistic: 59-channel cap", + fname="mpi_59_channels_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="mpi_59_channels_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="yokogawa160", + description="", + fname="yokogawa160_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="yokogawa160_neighb.mat"), # noqa: E501 + ), + _BuiltinChannelAdjacency( + name="yokogawa440", + description="", + fname="yokogawa440_neighb.mat", + source_url=_ft_neighbor_url_t.substitute(fname="yokogawa440_neighb.mat"), # noqa: E501 + ), +] + + +@fill_doc +def get_builtin_ch_adjacencies(*, descriptions=False): + """Get a list of all FieldTrip neighbor definitions shipping with MNE. + + The names of the these neighbor definitions can be passed to + :func:`read_ch_adjacency`. + + Parameters + ---------- + descriptions : bool + Whether to return not only the neighbor definition names, but also + their corresponding descriptions. If ``True``, a list of tuples is + returned, where the first tuple element is the neighbor definition name + and the second is the description. If ``False`` (default), only the + names are returned. + + Returns + ------- + neighbor_name : list of str | list of tuple + If ``descriptions=False``, the names of all builtin FieldTrip neighbor + definitions that can be loaded directly via :func:`read_ch_adjacency`. + + If ``descriptions=True``, a list of tuples ``(name, description)``. + + Notes + ----- + .. versionadded:: 1.1 + """ + if descriptions: + return sorted( + [(m.name, m.description) for m in _BUILTIN_CHANNEL_ADJACENCIES], + key=lambda x: x[0].casefold(), # only sort based on name + ) + else: + return sorted([m.name for m in _BUILTIN_CHANNEL_ADJACENCIES], key=str.casefold) + + +@fill_doc +def read_ch_adjacency(fname, picks=None): + """Read a channel adjacency ("neighbors") file that ships with MNE. + + More information on these neighbor definitions can be found on the related + `FieldTrip documentation pages + `__. + + Parameters + ---------- + fname : path-like | str + The path to the file to load, or the name of a channel adjacency + matrix that ships with MNE-Python. + + .. note:: + You can retrieve the names of all + built-in channel adjacencies via + :func:`mne.channels.get_builtin_ch_adjacencies`. + %(picks_all_notypes)s + + Returns + ------- + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + + See Also + -------- + get_builtin_ch_adjacencies + mne.viz.plot_ch_adjacency + find_ch_adjacency + mne.stats.combine_adjacency + + Notes + ----- + If the neighbor definition you need is not shipped by MNE-Python, + you may use :func:`find_ch_adjacency` to compute the + adjacency matrix based on your 2D sensor locations. + + Note that depending on your use case, you may need to additionally use + :func:`mne.stats.combine_adjacency` to prepare a final "adjacency" + to pass to the eventual function. + """ + if op.isabs(fname): + fname = str( + _check_fname( + fname=fname, + overwrite="read", + must_exist=True, + ) + ) + else: # built-in FieldTrip neighbors + ch_adj_name = fname + del fname + if ch_adj_name.endswith("_neighb.mat"): # backward-compat + ch_adj_name = ch_adj_name.replace("_neighb.mat", "") + + if ch_adj_name not in get_builtin_ch_adjacencies(): + raise ValueError( + f"No built-in channel adjacency matrix found with name: " + f"{ch_adj_name}. Valid names are: " + f'{", ".join(get_builtin_ch_adjacencies())}' + ) + + ch_adj = [a for a in _BUILTIN_CHANNEL_ADJACENCIES if a.name == ch_adj_name][0] + fname = ch_adj.fname + templates_dir = Path(__file__).resolve().parent / "data" / "neighbors" + fname = str( + _check_fname( # only needed to convert to a string + fname=templates_dir / fname, + overwrite="read", + must_exist=True, + ) + ) + + nb = loadmat(fname)["neighbours"] + ch_names = _recursive_flatten(nb["label"], str) + temp_info = create_info(ch_names, 1.0) + picks = _picks_to_idx(temp_info, picks, none="all") + neighbors = [_recursive_flatten(c, str) for c in nb["neighblabel"].flatten()] + assert len(ch_names) == len(neighbors) + adjacency = _ch_neighbor_adjacency(ch_names, neighbors) + # picking before constructing matrix is buggy + adjacency = adjacency[picks][:, picks] + ch_names = [ch_names[p] for p in picks] + + return adjacency, ch_names + + +def _ch_neighbor_adjacency(ch_names, neighbors): + """Compute sensor adjacency matrix. + + Parameters + ---------- + ch_names : list of str + The channel names. + neighbors : list of list + A list of list of channel names. The neighbors to + which the channels in ch_names are connected with. + Must be of the same length as ch_names. + + Returns + ------- + ch_adjacency : scipy.sparse.spmatrix + The adjacency matrix. + """ + if len(ch_names) != len(neighbors): + raise ValueError("`ch_names` and `neighbors` must have the same length") + set_neighbors = {c for d in neighbors for c in d} + rest = set_neighbors - set(ch_names) + if len(rest) > 0: + raise ValueError( + "Some of your neighbors are not present in the list of channel names" + ) + + for neigh in neighbors: + if not isinstance(neigh, list) and not all(isinstance(c, str) for c in neigh): + raise ValueError("`neighbors` must be a list of lists of str") + + ch_adjacency = np.eye(len(ch_names), dtype=bool) + for ii, neigbs in enumerate(neighbors): + ch_adjacency[ii, [ch_names.index(i) for i in neigbs]] = True + ch_adjacency = csr_array(ch_adjacency) + return ch_adjacency + + +@fill_doc +def find_ch_adjacency(info, ch_type): + """Find the adjacency matrix for the given channels. + + This function tries to infer the appropriate adjacency matrix template + for the given channels. If a template is not found, the adjacency matrix + is computed using Delaunay triangulation based on 2D sensor locations. + + Parameters + ---------- + %(info_not_none)s + ch_type : str | None + The channel type for computing the adjacency matrix. Currently + supports ``'mag'``, ``'grad'``, ``'eeg'`` and ``None``. + If ``None``, the info must contain only one channel type. + + Returns + ------- + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + + See Also + -------- + mne.viz.plot_ch_adjacency + mne.stats.combine_adjacency + get_builtin_ch_adjacencies + read_ch_adjacency + + Notes + ----- + .. versionadded:: 0.15 + + Automatic detection of an appropriate adjacency matrix template only + works for MEG data at the moment. This means that the adjacency matrix + is always computed for EEG data and never loaded from a template file. If + you want to load a template for a given montage use + :func:`read_ch_adjacency` directly. + + .. warning:: + If Delaunay triangulation is used to calculate the adjacency matrix it + may yield partially unexpected results (e.g., include unwanted edges + between non-adjacent sensors). Therefore, it is recommended to check + (and, if necessary, manually modify) the result by inspecting it + via :func:`mne.viz.plot_ch_adjacency`. + + Note that depending on your use case, you may need to additionally use + :func:`mne.stats.combine_adjacency` to prepare a final "adjacency" + to pass to the eventual function. + """ + from ..io.kit.constants import KIT_NEIGHBORS + + if ch_type is None: + picks = channel_indices_by_type(info) + if sum([len(p) != 0 for p in picks.values()]) != 1: + raise ValueError( + "info must contain only one channel type if ch_type is None." + ) + ch_type = channel_type(info, 0) + else: + _check_option("ch_type", ch_type, ["mag", "grad", "eeg"]) + ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) = _get_ch_info(info) + conn_name = None + if has_vv_mag and ch_type == "mag": + conn_name = "neuromag306mag" + elif has_vv_grad and ch_type == "grad": + conn_name = "neuromag306planar" + elif has_4D_mag: + if "MEG 248" in info["ch_names"]: + idx = info["ch_names"].index("MEG 248") + grad = info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_GRAD + mag = info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_MAG + if ch_type == "grad" and grad: + conn_name = "bti248grad" + elif ch_type == "mag" and mag: + conn_name = "bti248" + elif "MEG 148" in info["ch_names"] and ch_type == "mag": + idx = info["ch_names"].index("MEG 148") + if info["chs"][idx]["coil_type"] == FIFF.FIFFV_COIL_MAGNES_MAG: + conn_name = "bti148" + elif has_CTF_grad and ch_type == "mag": + if info["nchan"] < 100: + conn_name = "ctf64" + elif info["nchan"] > 200: + conn_name = "ctf275" + else: + conn_name = "ctf151" + elif n_kit_grads > 0: + conn_name = KIT_NEIGHBORS.get(info["kit_system_id"]) + + if conn_name is not None: + logger.info(f"Reading adjacency matrix for {conn_name}.") + adjacency, ch_names = read_ch_adjacency(conn_name) + if conn_name.startswith("neuromag") and info["ch_names"][0].startswith("MEG "): + ch_names = [ch_name.replace("MEG", "MEG ") for ch_name in ch_names] + return adjacency, ch_names + logger.info( + "Could not find a adjacency matrix for the data. " + "Computing adjacency based on Delaunay triangulations." + ) + return _compute_ch_adjacency(info, ch_type) + + +@fill_doc +def _compute_ch_adjacency(info, ch_type): + """Compute channel adjacency matrix using Delaunay triangulations. + + Parameters + ---------- + %(info_not_none)s + ch_type : str + The channel type for computing the adjacency matrix. Currently + supports ``'mag'``, ``'grad'`` and ``'eeg'``. + + Returns + ------- + ch_adjacency : scipy.sparse.csr_array, shape (n_channels, n_channels) + The adjacency matrix. + ch_names : list + The list of channel names present in adjacency matrix. + """ + from ..channels.layout import _find_topomap_coords, _pair_grad_sensors + from ..source_estimate import spatial_tris_adjacency + + combine_grads = ch_type == "grad" and any( + [ + coil_type in [ch["coil_type"] for ch in info["chs"]] + for coil_type in [FIFF.FIFFV_COIL_VV_PLANAR_T1, FIFF.FIFFV_COIL_NM_122] + ] + ) + + picks = dict(_picks_by_type(info, exclude=[]))[ch_type] + ch_names = [info["ch_names"][pick] for pick in picks] + if combine_grads: + pairs = _pair_grad_sensors(info, topomap_coords=False, exclude=[]) + if len(pairs) != len(picks): + raise RuntimeError( + "Cannot find a pair for some of the " + "gradiometers. Cannot compute adjacency " + "matrix." + ) + # only for one of the pair + xy = _find_topomap_coords(info, picks[::2], sphere=HEAD_SIZE_DEFAULT) + else: + xy = _find_topomap_coords(info, picks, sphere=HEAD_SIZE_DEFAULT) + tri = Delaunay(xy) + neighbors = spatial_tris_adjacency(tri.simplices) + + if combine_grads: + ch_adjacency = np.eye(len(picks), dtype=bool) + for idx, neigbs in zip(neighbors.row, neighbors.col): + for ii in range(2): # make sure each pair is included + for jj in range(2): + ch_adjacency[idx * 2 + ii, neigbs * 2 + jj] = True + ch_adjacency[idx * 2 + ii, idx * 2 + jj] = True # pair + ch_adjacency = csr_array(ch_adjacency) + else: + ch_adjacency = lil_array(neighbors) + ch_adjacency.setdiag(np.repeat(1, ch_adjacency.shape[0])) + ch_adjacency = ch_adjacency.tocsr() + + return ch_adjacency, ch_names + + +@fill_doc +def fix_mag_coil_types(info, use_cal=False): + """Fix magnetometer coil types. + + Parameters + ---------- + %(info_not_none)s Corrections are done in-place. + use_cal : bool + If True, further refine the check for old coil types by checking + ``info['chs'][ii]['cal']``. + + Notes + ----- + This function changes magnetometer coil types 3022 (T1: SQ20483N) and + 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition + records in the info structure. + + Neuromag Vectorview systems can contain magnetometers with two + different coil sizes (3022 and 3023 vs. 3024). The systems + incorporating coils of type 3024 were introduced last and are used at + the majority of MEG sites. At some sites with 3024 magnetometers, + the data files have still defined the magnetometers to be of type + 3022 to ensure compatibility with older versions of Neuromag software. + In the MNE software as well as in the present version of Neuromag + software coil type 3024 is fully supported. Therefore, it is now safe + to upgrade the data files to use the true coil type. + + .. note:: The effect of the difference between the coil sizes on the + current estimates computed by the MNE software is very small. + Therefore the use of ``fix_mag_coil_types`` is not mandatory. + """ + old_mag_inds = _get_T1T2_mag_inds(info, use_cal) + n_mag = len(pick_types(info, meg="mag", exclude=[])) + for ii in old_mag_inds: + info["chs"][ii]["coil_type"] = FIFF.FIFFV_COIL_VV_MAG_T3 + logger.info(f"{len(old_mag_inds)} of {n_mag} magnetometer types replaced with T3.") + info._check_consistency() + + +def _get_T1T2_mag_inds(info, use_cal=False): + """Find T1/T2 magnetometer coil types.""" + picks = pick_types(info, meg="mag", exclude=[]) + old_mag_inds = [] + # From email exchanges, systems with the larger T2 coil only use the cal + # value of 2.09e-11. Newer T3 magnetometers use 4.13e-11 or 1.33e-10 + # (Triux). So we can use a simple check for > 3e-11. + for ii in picks: + ch = info["chs"][ii] + if ch["coil_type"] in (FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2): + if use_cal: + if ch["cal"] > 3e-11: + old_mag_inds.append(ii) + else: + old_mag_inds.append(ii) + return old_mag_inds + + +def _get_ch_info(info): + """Get channel info for inferring acquisition device.""" + chs = info["chs"] + # Only take first 16 bits, as higher bits store CTF comp order + coil_types = {ch["coil_type"] & 0xFFFF for ch in chs} + channel_types = {ch["kind"] for ch in chs} + + has_vv_mag = any( + k in coil_types + for k in [ + FIFF.FIFFV_COIL_VV_MAG_T1, + FIFF.FIFFV_COIL_VV_MAG_T2, + FIFF.FIFFV_COIL_VV_MAG_T3, + ] + ) + has_vv_grad = any( + k in coil_types + for k in [ + FIFF.FIFFV_COIL_VV_PLANAR_T1, + FIFF.FIFFV_COIL_VV_PLANAR_T2, + FIFF.FIFFV_COIL_VV_PLANAR_T3, + ] + ) + has_neuromag_122_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_NM_122]) + + is_old_vv = " " in chs[0]["ch_name"] + + has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types + ctf_other_types = ( + FIFF.FIFFV_COIL_CTF_REF_MAG, + FIFF.FIFFV_COIL_CTF_REF_GRAD, + FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD, + ) + has_CTF_grad = FIFF.FIFFV_COIL_CTF_GRAD in coil_types or ( + FIFF.FIFFV_MEG_CH in channel_types + and any(k in ctf_other_types for k in coil_types) + ) + # hack due to MNE-C bug in IO of CTF + # only take first 16 bits, as higher bits store CTF comp order + n_kit_grads = sum( + ch["coil_type"] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD for ch in chs + ) + + has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad, n_kit_grads]) + has_eeg_coils = ( + FIFF.FIFFV_COIL_EEG in coil_types and FIFF.FIFFV_EEG_CH in channel_types + ) + has_eeg_coils_and_meg = has_eeg_coils and has_any_meg + has_eeg_coils_only = has_eeg_coils and not has_any_meg + has_csd_coils = ( + FIFF.FIFFV_COIL_EEG_CSD in coil_types and FIFF.FIFFV_EEG_CH in channel_types + ) + + return ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) + + +@fill_doc +def make_1020_channel_selections(info, midline="z", *, return_ch_names=False): + """Map hemisphere names to corresponding EEG channel names or indices. + + This function uses a simple heuristic to separate channel names into three + Region of Interest-based selections: ``Left``, ``Midline`` and ``Right``. + + The heuristic is that any of the channel names ending + with odd numbers are filed under ``Left``; those ending with even numbers + are filed under ``Right``; and those ending with the character(s) specified + in ``midline`` are filed under ``Midline``. Other channels are ignored. + + This is appropriate for 10/20, 10/10, 10/05, …, sensor arrangements, but + not for other naming conventions. + + Parameters + ---------- + %(info_not_none)s If channel locations are present, the channel lists will + be sorted from posterior to anterior; otherwise, the order specified in + ``info["ch_names"]`` will be kept. + midline : str + Names ending in any of these characters are stored under the + ``Midline`` key. Defaults to ``'z'``. Capitalization is ignored. + return_ch_names : bool + Whether to return channel names instead of channel indices. + + .. versionadded:: 1.4.0 + + Returns + ------- + selections : dict + A dictionary mapping from region of interest name to a list of channel + indices (if ``return_ch_names=False``) or to a list of channel names + (if ``return_ch_names=True``). + """ + _validate_type(info, "info") + + try: + from .layout import find_layout + + layout = find_layout(info) + pos = layout.pos + ch_names = layout.names + except RuntimeError: # no channel positions found + ch_names = info["ch_names"] + pos = None + + selections = dict(Left=[], Midline=[], Right=[]) + for pick, channel in enumerate(ch_names): + last_char = channel[-1].lower() # in 10/20, last char codes hemisphere + if last_char in midline: + selection = "Midline" + elif last_char.isdigit(): + selection = "Left" if int(last_char) % 2 else "Right" + else: # ignore the channel + continue + selections[selection].append(pick) + + if pos is not None: + # sort channels from front to center + # (y-coordinate of the position info in the layout) + selections = { + selection: np.array(picks)[pos[picks, 1].argsort()] + for selection, picks in selections.items() + } + + # convert channel indices to names if requested + if return_ch_names: + for selection, ch_indices in selections.items(): + selections[selection] = [info.ch_names[idx] for idx in ch_indices] + + return selections + + +@verbose +def combine_channels( + inst, groups, method="mean", keep_stim=False, drop_bad=False, verbose=None +): + """Combine channels based on specified channel grouping. + + Parameters + ---------- + inst : instance of Raw, Epochs, or Evoked + An MNE-Python object to combine the channels for. The object can be of + type Raw, Epochs, or Evoked. + groups : dict + Specifies which channels are aggregated into a single channel, with + aggregation method determined by the ``method`` parameter. One new + pseudo-channel is made per dict entry; the dict values must be lists of + picks (integer indices of ``ch_names``). For example:: + + groups=dict(Left=[1, 2, 3, 4], Right=[5, 6, 7, 8]) + + Note that within a dict entry all channels must have the same type. + method : str | callable + Which method to use to combine channels. If a :class:`str`, must be one + of 'mean', 'median', or 'std' (standard deviation). If callable, the + callable must accept one positional input (data of shape ``(n_channels, + n_times)``, or ``(n_epochs, n_channels, n_times)``) and return an + :class:`array ` of shape ``(n_times,)``, or ``(n_epochs, + n_times)``. For example with an instance of Raw or Evoked:: + + method = lambda data: np.mean(data, axis=0) + + Another example with an instance of Epochs:: + + method = lambda data: np.median(data, axis=1) + + Defaults to ``'mean'``. + keep_stim : bool + If ``True``, include stimulus channels in the resulting object. + Defaults to ``False``. + drop_bad : bool + If ``True``, drop channels marked as bad before combining. Defaults to + ``False``. + %(verbose)s + + Returns + ------- + combined_inst : instance of Raw, Epochs, or Evoked + An MNE-Python object of the same type as the input ``inst``, containing + one virtual channel for each group in ``groups`` (and, if ``keep_stim`` + is ``True``, also containing stimulus channels). + """ + from ..epochs import BaseEpochs, EpochsArray + from ..evoked import Evoked, EvokedArray + from ..io import BaseRaw, RawArray + + ch_axis = 1 if isinstance(inst, BaseEpochs) else 0 + ch_idx = list(range(inst.info["nchan"])) + ch_names = inst.info["ch_names"] + ch_types = inst.get_channel_types() + kwargs = dict() + if isinstance(inst, BaseEpochs): + kwargs["copy"] = False + inst_data = inst.get_data(**kwargs) + groups = OrderedDict(deepcopy(groups)) + + # Convert string values of ``method`` into callables + # XXX Possibly de-duplicate with _make_combine_callable of mne/viz/utils.py + if isinstance(method, str): + method_dict = { + key: partial(getattr(np, key), axis=ch_axis) + for key in ("mean", "median", "std") + } + try: + method = method_dict[method] + except KeyError: + raise ValueError( + '"method" must be a callable, or one of "mean", ' + f'"median", or "std"; got "{method}".' + ) + + # Instantiate channel info and data + new_ch_names, new_ch_types, new_data = [], [], [] + if not isinstance(keep_stim, bool): + raise TypeError(f'"keep_stim" must be of type bool, not {type(keep_stim)}.') + if keep_stim: + stim_ch_idx = list(pick_types(inst.info, meg=False, stim=True)) + if stim_ch_idx: + new_ch_names = [ch_names[idx] for idx in stim_ch_idx] + new_ch_types = [ch_types[idx] for idx in stim_ch_idx] + new_data = [np.take(inst_data, idx, axis=ch_axis) for idx in stim_ch_idx] + else: + warn("Could not find stimulus channels.") + + # Get indices of bad channels + ch_idx_bad = [] + if not isinstance(drop_bad, bool): + raise TypeError(f'"drop_bad" must be of type bool, not {type(drop_bad)}.') + if drop_bad and inst.info["bads"]: + ch_idx_bad = pick_channels(ch_names, inst.info["bads"]) + + # Check correctness of combinations + for this_group, this_picks in groups.items(): + # Check if channel indices are out of bounds + if not all(idx in ch_idx for idx in this_picks): + raise ValueError("Some channel indices are out of bounds.") + # Check if heterogeneous sensor type combinations + this_ch_type = np.array(ch_types)[this_picks] + if len(set(this_ch_type)) > 1: + types = ", ".join(set(this_ch_type)) + raise ValueError( + "Cannot combine sensors of different types; " + f'"{this_group}" contains types {types}.' + ) + # Remove bad channels + these_bads = [idx for idx in this_picks if idx in ch_idx_bad] + this_picks = [idx for idx in this_picks if idx not in ch_idx_bad] + if these_bads: + logger.info( + f"Dropped the following channels in group {this_group}: {these_bads}" + ) + # Check if combining less than 2 channel + if len(set(this_picks)) < 2: + warn( + f'Less than 2 channels in group "{this_group}" when ' + f'combining by method "{method}".' + ) + # If all good create more detailed dict without bad channels + groups[this_group] = dict(picks=this_picks, ch_type=this_ch_type[0]) + + # Combine channels and add them to the new instance + for this_group, this_group_dict in groups.items(): + new_ch_names.append(this_group) + new_ch_types.append(this_group_dict["ch_type"]) + this_picks = this_group_dict["picks"] + this_data = np.take(inst_data, this_picks, axis=ch_axis) + new_data.append(method(this_data)) + new_data = np.swapaxes(new_data, 0, ch_axis) + info = create_info( + sfreq=inst.info["sfreq"], ch_names=new_ch_names, ch_types=new_ch_types + ) + # create new instances and make sure to copy important attributes + if isinstance(inst, BaseRaw): + combined_inst = RawArray(new_data, info, first_samp=inst.first_samp) + elif isinstance(inst, BaseEpochs): + combined_inst = EpochsArray( + new_data, + info, + events=inst.events, + event_id=inst.event_id, + tmin=inst.times[0], + baseline=inst.baseline, + ) + if inst.metadata is not None: + combined_inst.metadata = inst.metadata.copy() + elif isinstance(inst, Evoked): + combined_inst = EvokedArray( + new_data, info, tmin=inst.times[0], baseline=inst.baseline + ) + + return combined_inst + + +# NeuroMag channel groupings +_SELECTIONS = [ + "Vertex", + "Left-temporal", + "Right-temporal", + "Left-parietal", + "Right-parietal", + "Left-occipital", + "Right-occipital", + "Left-frontal", + "Right-frontal", +] +_EEG_SELECTIONS = ["EEG 1-32", "EEG 33-64", "EEG 65-96", "EEG 97-128"] + + +def _divide_to_regions(info, add_stim=True): + """Divide channels to regions by positions.""" + picks = _pick_data_channels(info, exclude=[]) + chs_in_lobe = len(picks) // 4 + pos = np.array([ch["loc"][:3] for ch in info["chs"]]) + x, y, z = pos.T + + frontal = picks[np.argsort(y[picks])[-chs_in_lobe:]] + picks = np.setdiff1d(picks, frontal) + + occipital = picks[np.argsort(y[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, occipital) + + temporal = picks[np.argsort(z[picks])[:chs_in_lobe]] + picks = np.setdiff1d(picks, temporal) + + lt, rt = _divide_side(temporal, x) + lf, rf = _divide_side(frontal, x) + lo, ro = _divide_side(occipital, x) + lp, rp = _divide_side(picks, x) # Parietal lobe from the remaining picks. + + # Because of the way the sides are divided, there may be outliers in the + # temporal lobes. Here we switch the sides for these outliers. For other + # lobes it is not a big problem because of the vicinity of the lobes. + with np.errstate(invalid="ignore"): # invalid division, greater compare + zs = np.abs(zscore(x[rt])) + outliers = np.array(rt)[np.where(zs > 2.0)[0]] + rt = list(np.setdiff1d(rt, outliers)) + + with np.errstate(invalid="ignore"): # invalid division, greater compare + zs = np.abs(zscore(x[lt])) + outliers = np.append(outliers, (np.array(lt)[np.where(zs > 2.0)[0]])) + lt = list(np.setdiff1d(lt, outliers)) + + l_mean = np.mean(x[lt]) + r_mean = np.mean(x[rt]) + for outlier in outliers: + if abs(l_mean - x[outlier]) < abs(r_mean - x[outlier]): + lt.append(outlier) + else: + rt.append(outlier) + + if add_stim: + stim_ch = _get_stim_channel(None, info, raise_error=False) + if len(stim_ch) > 0: + for region in [lf, rf, lo, ro, lp, rp, lt, rt]: + region.append(info["ch_names"].index(stim_ch[0])) + return OrderedDict( + [ + ("Left-frontal", lf), + ("Right-frontal", rf), + ("Left-parietal", lp), + ("Right-parietal", rp), + ("Left-occipital", lo), + ("Right-occipital", ro), + ("Left-temporal", lt), + ("Right-temporal", rt), + ] + ) + + +def _divide_side(lobe, x): + """Make a separation between left and right lobe evenly.""" + lobe = np.asarray(lobe) + median = np.median(x[lobe]) + + left = lobe[np.where(x[lobe] < median)[0]] + right = lobe[np.where(x[lobe] > median)[0]] + medians = np.where(x[lobe] == median)[0] + + left = np.sort(np.concatenate([left, lobe[medians[1::2]]])) + right = np.sort(np.concatenate([right, lobe[medians[::2]]])) + return list(left), list(right) + + +@verbose +def read_vectorview_selection(name, fname=None, info=None, verbose=None): + """Read Neuromag Vector View channel selection from a file. + + Parameters + ---------- + name : str | list of str + Name of the selection. If a list, the selections are combined. + Supported selections are: ``'Vertex'``, ``'Left-temporal'``, + ``'Right-temporal'``, ``'Left-parietal'``, ``'Right-parietal'``, + ``'Left-occipital'``, ``'Right-occipital'``, ``'Left-frontal'`` and + ``'Right-frontal'``. Selections can also be matched and combined by + spcecifying common substrings. For example, ``name='temporal`` will + produce a combination of ``'Left-temporal'`` and ``'Right-temporal'``. + fname : path-like + Filename of the selection file (if ``None``, built-in selections are + used). + %(info)s Used to determine which channel naming convention to use, e.g. + ``'MEG 0111'`` (with space) for old Neuromag systems and ``'MEG0111'`` + (without space) for new ones. + %(verbose)s + + Returns + ------- + sel : list of str + List with channel names in the selection. + """ + # convert name to list of string + if not isinstance(name, list | tuple): + name = [name] + if isinstance(info, Info): + picks = pick_types(info, meg=True, exclude=()) + if len(picks) > 0 and " " not in info["ch_names"][picks[0]]: + spacing = "new" + else: + spacing = "old" + elif info is not None: + raise TypeError(f"info must be an instance of Info or None, not {type(info)}") + else: # info is None + spacing = "old" + + # use built-in selections by default + if fname is None: + fname = op.join(op.dirname(__file__), "..", "data", "mne_analyze.sel") + + fname = str(_check_fname(fname, must_exist=True, overwrite="read")) + + # use this to make sure we find at least one match for each name + name_found = {n: False for n in name} + with open(fname) as fid: + sel = [] + for line in fid: + line = line.strip() + # skip blank lines and comments + if len(line) == 0 or line[0] == "#": + continue + # get the name of the selection in the file + pos = line.find(":") + if pos < 0: + logger.info('":" delimiter not found in selections file, skipping line') + continue + sel_name_file = line[:pos] + # search for substring match with name provided + for n in name: + if sel_name_file.find(n) >= 0: + sel.extend(line[pos + 1 :].split("|")) + name_found[n] = True + break + + # make sure we found at least one match for each name + for n, found in name_found.items(): + if not found: + raise ValueError(f'No match for selection name "{n}" found') + + # make the selection a sorted list with unique elements + sel = list(set(sel)) + sel.sort() + if spacing == "new": # "new" or "old" by now, "old" is default + sel = [s.replace("MEG ", "MEG") for s in sel] + return sel diff --git a/mne/channels/data/layouts/CTF-275.lout b/mne/channels/data/layouts/CTF-275.lout new file mode 100644 index 0000000..53d924c --- /dev/null +++ b/mne/channels/data/layouts/CTF-275.lout @@ -0,0 +1,276 @@ + -42.27 42.33 -39.99 31.80 +001 -4.09 10.91 4.00 3.00 MLC11-2622 +002 -7.25 8.87 4.00 3.00 MLC12-2622 +003 -10.79 7.43 4.00 3.00 MLC13-2622 +004 -14.40 5.31 4.00 3.00 MLC14-2622 +005 -17.45 2.88 4.00 3.00 MLC15-2622 +006 -19.94 -0.21 4.00 3.00 MLC16-2622 +007 -22.30 -3.88 4.00 3.00 MLC17-2622 +008 -7.70 5.16 4.00 3.00 MLC21-2622 +009 -11.18 3.69 4.00 3.00 MLC22-2622 +010 -14.17 1.40 4.00 3.00 MLC23-2622 +011 -16.42 -1.52 4.00 3.00 MLC24-2622 +012 -18.64 -4.88 4.00 3.00 MLC25-2622 +013 -12.55 -2.00 4.00 3.00 MLC31-2622 +014 -15.13 -5.41 4.00 3.00 MLC32-2622 +015 -9.57 0.28 4.00 3.00 MLC41-2622 +016 -11.51 -5.56 4.00 3.00 MLC42-2622 +017 -4.04 4.58 4.00 3.00 MLC51-2622 +018 -6.04 1.35 4.00 3.00 MLC52-2622 +019 -8.79 -3.34 4.00 3.00 MLC53-2622 +020 -8.32 -7.10 4.00 3.00 MLC54-2622 +021 -6.60 -10.22 4.00 3.00 MLC55-2622 +022 -4.01 -1.76 4.00 3.00 MLC61-2622 +023 -5.55 -4.97 4.00 3.00 MLC62-2622 +024 -3.74 -8.12 4.00 3.00 MLC63-2622 +025 -7.63 28.14 4.00 3.00 MLF11-2622 +026 -12.92 27.01 4.00 3.00 MLF12-2622 +027 -18.14 25.41 4.00 3.00 MLF13-2622 +028 -23.34 23.65 4.00 3.00 MLF14-2622 +029 -4.64 25.47 4.00 3.00 MLF21-2622 +030 -9.22 24.68 4.00 3.00 MLF22-2622 +031 -13.60 23.41 4.00 3.00 MLF23-2622 +032 -18.31 21.53 4.00 3.00 MLF24-2622 +033 -22.68 19.69 4.00 3.00 MLF25-2622 +034 -6.57 22.14 4.00 3.00 MLF31-2622 +035 -10.75 21.22 4.00 3.00 MLF32-2622 +036 -15.16 19.49 4.00 3.00 MLF33-2622 +037 -19.01 17.57 4.00 3.00 MLF34-2622 +038 -22.93 15.25 4.00 3.00 MLF35-2622 +039 -4.25 19.38 4.00 3.00 MLF41-2622 +040 -8.17 18.80 4.00 3.00 MLF42-2622 +041 -12.29 17.37 4.00 3.00 MLF43-2622 +042 -15.93 15.49 4.00 3.00 MLF44-2622 +043 -19.89 13.39 4.00 3.00 MLF45-2622 +044 -24.12 10.50 4.00 3.00 MLF46-2622 +045 -5.48 16.15 4.00 3.00 MLF51-2622 +046 -9.58 15.10 4.00 3.00 MLF52-2622 +047 -13.17 13.43 4.00 3.00 MLF53-2622 +048 -16.66 11.39 4.00 3.00 MLF54-2622 +049 -20.76 9.06 4.00 3.00 MLF55-2622 +050 -24.71 5.73 4.00 3.00 MLF56-2622 +051 -7.17 12.78 4.00 3.00 MLF61-2622 +052 -10.58 11.08 4.00 3.00 MLF62-2622 +053 -13.93 9.16 4.00 3.00 MLF63-2622 +054 -17.37 7.29 4.00 3.00 MLF64-2622 +055 -20.83 4.87 4.00 3.00 MLF65-2622 +056 -23.40 1.59 4.00 3.00 MLF66-2622 +057 -25.90 -2.51 4.00 3.00 MLF67-2622 +058 -6.96 -27.32 4.00 3.00 MLO11-2622 +059 -11.88 -25.97 4.00 3.00 MLO12-2622 +060 -16.48 -23.69 4.00 3.00 MLO13-2622 +061 -20.64 -20.44 4.00 3.00 MLO14-2622 +062 -4.82 -30.75 4.00 3.00 MLO21-2622 +063 -10.11 -29.77 4.00 3.00 MLO22-2622 +064 -15.52 -27.87 4.00 3.00 MLO23-2622 +065 -20.40 -24.85 4.00 3.00 MLO24-2622 +066 -7.92 -33.45 4.00 3.00 MLO31-2622 +067 -13.84 -31.94 4.00 3.00 MLO32-2622 +068 -19.61 -29.16 4.00 3.00 MLO33-2622 +069 -24.70 -25.44 4.00 3.00 MLO34-2622 +070 -5.16 -36.86 4.00 3.00 MLO41-2622 +071 -11.67 -35.84 4.00 3.00 MLO42-2622 +072 -17.98 -33.55 4.00 3.00 MLO43-2622 +073 -23.91 -30.00 4.00 3.00 MLO44-2622 +074 -8.79 -39.34 4.00 3.00 MLO51-2622 +075 -15.83 -37.54 4.00 3.00 MLO52-2622 +076 -22.47 -34.34 4.00 3.00 MLO53-2622 +077 -4.98 -13.36 4.00 3.00 MLP11-2622 +078 -10.20 -10.01 4.00 3.00 MLP12-2622 +079 -3.80 -16.69 4.00 3.00 MLP21-2622 +080 -8.73 -13.30 4.00 3.00 MLP22-2622 +081 -13.58 -8.80 4.00 3.00 MLP23-2622 +082 -5.66 -19.72 4.00 3.00 MLP31-2622 +083 -8.41 -16.83 4.00 3.00 MLP32-2622 +084 -12.08 -14.80 4.00 3.00 MLP33-2622 +085 -15.13 -11.95 4.00 3.00 MLP34-2622 +086 -17.18 -8.63 4.00 3.00 MLP35-2622 +087 -9.92 -20.16 4.00 3.00 MLP41-2622 +088 -13.37 -18.09 4.00 3.00 MLP42-2622 +089 -16.59 -15.58 4.00 3.00 MLP43-2622 +090 -19.06 -11.87 4.00 3.00 MLP44-2622 +091 -20.87 -8.06 4.00 3.00 MLP45-2622 +092 -4.02 -24.07 4.00 3.00 MLP51-2622 +093 -8.77 -23.79 4.00 3.00 MLP52-2622 +094 -12.92 -22.08 4.00 3.00 MLP53-2622 +095 -16.83 -19.50 4.00 3.00 MLP54-2622 +096 -20.23 -16.32 4.00 3.00 MLP55-2622 +097 -22.76 -11.97 4.00 3.00 MLP56-2622 +098 -24.58 -7.58 4.00 3.00 MLP57-2622 +099 -27.14 12.98 4.00 3.00 MLT11-2622 +100 -28.19 7.51 4.00 3.00 MLT12-2622 +101 -28.08 2.09 4.00 3.00 MLT13-2622 +102 -28.56 -5.98 4.00 3.00 MLT14-2622 +103 -26.96 -11.17 4.00 3.00 MLT15-2622 +104 -24.11 -16.46 4.00 3.00 MLT16-2622 +105 -27.30 17.85 4.00 3.00 MLT21-2622 +106 -31.47 10.04 4.00 3.00 MLT22-2622 +107 -31.85 3.70 4.00 3.00 MLT23-2622 +108 -32.08 -2.62 4.00 3.00 MLT24-2622 +109 -31.09 -9.80 4.00 3.00 MLT25-2622 +110 -28.71 -15.38 4.00 3.00 MLT26-2622 +111 -24.78 -20.78 4.00 3.00 MLT27-2622 +112 -28.61 21.64 4.00 3.00 MLT31-2622 +113 -32.09 15.32 4.00 3.00 MLT32-2622 +114 -35.40 5.79 4.00 3.00 MLT33-2622 +115 -35.85 -1.29 4.00 3.00 MLT34-2622 +116 -34.97 -7.76 4.00 3.00 MLT35-2622 +117 -32.89 -13.91 4.00 3.00 MLT36-2622 +118 -29.32 -20.20 4.00 3.00 MLT37-2622 +119 -33.87 18.93 4.00 3.00 MLT41-2622 +120 -36.68 11.37 4.00 3.00 MLT42-2622 +121 -38.92 2.11 4.00 3.00 MLT43-2622 +122 -38.70 -5.16 4.00 3.00 MLT44-2622 +123 -36.95 -12.13 4.00 3.00 MLT45-2622 +124 -33.72 -18.79 4.00 3.00 MLT46-2622 +125 -29.28 -25.28 4.00 3.00 MLT47-2622 +126 -38.78 14.74 4.00 3.00 MLT51-2622 +127 -41.29 6.62 4.00 3.00 MLT52-2622 +128 -41.87 -1.80 4.00 3.00 MLT53-2622 +129 -40.62 -9.63 4.00 3.00 MLT54-2622 +130 -37.78 -16.89 4.00 3.00 MLT55-2622 +131 -33.73 -24.02 4.00 3.00 MLT56-2622 +132 -28.51 -29.92 4.00 3.00 MLT57-2622 +133 -0.24 10.97 4.00 3.00 MRC11-2622 +134 2.99 8.95 4.00 3.00 MRC12-2622 +135 6.57 7.62 4.00 3.00 MRC13-2622 +136 10.22 5.56 4.00 3.00 MRC14-2622 +137 13.27 3.22 4.00 3.00 MRC15-2622 +138 15.86 0.21 4.00 3.00 MRC16-2622 +139 18.32 -3.45 4.00 3.00 MRC17-2622 +140 3.53 5.28 4.00 3.00 MRC21-2622 +141 7.00 3.85 4.00 3.00 MRC22-2622 +142 10.06 1.68 4.00 3.00 MRC23-2622 +143 12.33 -1.20 4.00 3.00 MRC24-2622 +144 14.73 -4.52 4.00 3.00 MRC25-2622 +145 8.51 -1.76 4.00 3.00 MRC31-2622 +146 11.17 -5.14 4.00 3.00 MRC32-2622 +147 5.51 0.46 4.00 3.00 MRC41-2622 +148 7.56 -5.33 4.00 3.00 MRC42-2622 +149 -0.17 4.62 4.00 3.00 MRC51-2622 +150 1.93 1.46 4.00 3.00 MRC52-2622 +151 4.78 -3.16 4.00 3.00 MRC53-2622 +152 4.39 -6.98 4.00 3.00 MRC54-2622 +153 2.73 -10.10 4.00 3.00 MRC55-2622 +154 -0.07 -1.75 4.00 3.00 MRC61-2622 +155 1.58 -4.86 4.00 3.00 MRC62-2622 +156 -0.15 -8.08 4.00 3.00 MRC63-2622 +157 2.97 28.24 4.00 3.00 MRF11-2622 +158 8.25 27.25 4.00 3.00 MRF12-2622 +159 13.54 25.74 4.00 3.00 MRF13-2622 +160 18.74 24.12 4.00 3.00 MRF14-2622 +161 0.03 25.52 4.00 3.00 MRF21-2622 +162 4.63 24.85 4.00 3.00 MRF22-2622 +163 9.03 23.67 4.00 3.00 MRF23-2622 +164 13.78 21.87 4.00 3.00 MRF24-2622 +165 18.19 20.13 4.00 3.00 MRF25-2622 +166 2.05 22.22 4.00 3.00 MRF31-2622 +167 6.27 21.38 4.00 3.00 MRF32-2622 +168 10.63 19.79 4.00 3.00 MRF33-2622 +169 14.57 17.90 4.00 3.00 MRF34-2622 +170 18.54 15.70 4.00 3.00 MRF35-2622 +171 -0.22 19.42 4.00 3.00 MRF41-2622 +172 3.75 18.84 4.00 3.00 MRF42-2622 +173 7.86 17.57 4.00 3.00 MRF43-2622 +174 11.53 15.78 4.00 3.00 MRF44-2622 +175 15.55 13.76 4.00 3.00 MRF45-2622 +176 19.83 10.96 4.00 3.00 MRF46-2622 +177 1.08 16.23 4.00 3.00 MRF51-2622 +178 5.20 15.33 4.00 3.00 MRF52-2622 +179 8.81 13.68 4.00 3.00 MRF53-2622 +180 12.37 11.71 4.00 3.00 MRF54-2622 +181 16.53 9.44 4.00 3.00 MRF55-2622 +182 20.54 6.21 4.00 3.00 MRF56-2622 +183 2.82 12.87 4.00 3.00 MRF61-2622 +184 6.27 11.29 4.00 3.00 MRF62-2622 +185 9.66 9.43 4.00 3.00 MRF63-2622 +186 13.14 7.59 4.00 3.00 MRF64-2622 +187 16.52 5.22 4.00 3.00 MRF65-2622 +188 19.31 2.05 4.00 3.00 MRF66-2622 +189 21.91 -1.92 4.00 3.00 MRF67-2622 +190 3.46 -27.20 4.00 3.00 MRO11-2622 +191 8.35 -25.76 4.00 3.00 MRO12-2622 +192 12.92 -23.40 4.00 3.00 MRO13-2622 +193 17.02 -20.06 4.00 3.00 MRO14-2622 +194 1.43 -30.69 4.00 3.00 MRO21-2622 +195 6.66 -29.60 4.00 3.00 MRO22-2622 +196 12.02 -27.57 4.00 3.00 MRO23-2622 +197 16.88 -24.46 4.00 3.00 MRO24-2622 +198 4.55 -33.35 4.00 3.00 MRO31-2622 +199 10.46 -31.70 4.00 3.00 MRO32-2622 +200 16.07 -28.88 4.00 3.00 MRO33-2622 +201 21.16 -24.93 4.00 3.00 MRO34-2622 +202 1.88 -36.78 4.00 3.00 MRO41-2622 +203 8.37 -35.64 4.00 3.00 MRO42-2622 +204 14.63 -33.19 4.00 3.00 MRO43-2622 +205 20.45 -29.57 4.00 3.00 MRO44-2622 +206 5.57 -39.20 4.00 3.00 MRO51-2622 +207 12.57 -37.26 4.00 3.00 MRO52-2622 +208 19.11 -33.96 4.00 3.00 MRO53-2622 +209 1.20 -13.27 4.00 3.00 MRP11-2622 +210 6.34 -9.81 4.00 3.00 MRP12-2622 +211 0.06 -16.65 4.00 3.00 MRP21-2622 +212 4.94 -13.15 4.00 3.00 MRP22-2622 +213 9.72 -8.56 4.00 3.00 MRP23-2622 +214 2.03 -19.64 4.00 3.00 MRP31-2622 +215 4.72 -16.72 4.00 3.00 MRP32-2622 +216 8.28 -14.64 4.00 3.00 MRP33-2622 +217 11.32 -11.68 4.00 3.00 MRP34-2622 +218 13.30 -8.29 4.00 3.00 MRP35-2622 +219 6.32 -19.99 4.00 3.00 MRP41-2622 +220 9.66 -17.86 4.00 3.00 MRP42-2622 +221 12.83 -15.29 4.00 3.00 MRP43-2622 +222 15.21 -11.53 4.00 3.00 MRP44-2622 +223 16.99 -7.64 4.00 3.00 MRP45-2622 +224 0.42 -24.03 4.00 3.00 MRP51-2622 +225 5.29 -23.71 4.00 3.00 MRP52-2622 +226 9.32 -21.86 4.00 3.00 MRP53-2622 +227 13.19 -19.21 4.00 3.00 MRP54-2622 +228 16.49 -15.99 4.00 3.00 MRP55-2622 +229 18.98 -11.54 4.00 3.00 MRP56-2622 +230 20.69 -7.11 4.00 3.00 MRP57-2622 +231 22.81 13.51 4.00 3.00 MRT11-2622 +232 23.97 8.09 4.00 3.00 MRT12-2622 +233 23.97 2.65 4.00 3.00 MRT13-2622 +234 24.63 -5.42 4.00 3.00 MRT14-2622 +235 23.16 -10.65 4.00 3.00 MRT15-2622 +236 20.37 -16.02 4.00 3.00 MRT16-2622 +237 22.88 18.38 4.00 3.00 MRT21-2622 +238 27.23 10.62 4.00 3.00 MRT22-2622 +239 27.73 4.35 4.00 3.00 MRT23-2622 +240 28.08 -1.95 4.00 3.00 MRT24-2622 +241 27.24 -9.21 4.00 3.00 MRT25-2622 +242 24.97 -14.84 4.00 3.00 MRT26-2622 +243 21.15 -20.30 4.00 3.00 MRT27-2622 +244 24.07 22.26 4.00 3.00 MRT31-2622 +245 27.72 15.94 4.00 3.00 MRT32-2622 +246 31.24 6.55 4.00 3.00 MRT33-2622 +247 31.84 -0.55 4.00 3.00 MRT34-2622 +248 31.09 -7.10 4.00 3.00 MRT35-2622 +249 29.13 -13.33 4.00 3.00 MRT36-2622 +250 25.63 -19.73 4.00 3.00 MRT37-2622 +251 29.40 19.66 4.00 3.00 MRT41-2622 +252 32.38 12.17 4.00 3.00 MRT42-2622 +253 34.86 2.97 4.00 3.00 MRT43-2622 +254 34.80 -4.39 4.00 3.00 MRT44-2622 +255 33.11 -11.36 4.00 3.00 MRT45-2622 +256 30.03 -18.16 4.00 3.00 MRT46-2622 +257 25.54 -24.88 4.00 3.00 MRT47-2622 +258 34.47 15.52 4.00 3.00 MRT51-2622 +259 37.12 7.54 4.00 3.00 MRT52-2622 +260 37.93 -0.94 4.00 3.00 MRT53-2622 +261 36.82 -8.89 4.00 3.00 MRT54-2622 +262 34.10 -16.25 4.00 3.00 MRT55-2622 +263 30.13 -23.45 4.00 3.00 MRT56-2622 +264 25.07 -29.43 4.00 3.00 MRT57-2622 +265 -2.13 7.84 4.00 3.00 MZC01-2622 +266 -2.05 1.38 4.00 3.00 MZC02-2622 +267 -1.99 -5.04 4.00 3.00 MZC03-2622 +268 -1.93 -11.44 4.00 3.00 MZC04-2622 +269 -2.33 28.50 4.00 3.00 MZF01-2622 +270 -2.28 22.54 4.00 3.00 MZF02-2622 +271 -2.20 14.52 4.00 3.00 MZF03-2622 +272 -1.77 -27.22 4.00 3.00 MZO01-2622 +273 -1.71 -34.04 4.00 3.00 MZO02-2622 +274 -1.66 -39.69 4.00 3.00 MZO03-2622 +275 -1.81 -21.05 4.00 3.00 MZP01-2622 diff --git a/mne/channels/data/layouts/CTF151.lay b/mne/channels/data/layouts/CTF151.lay new file mode 100644 index 0000000..c9d68f3 --- /dev/null +++ b/mne/channels/data/layouts/CTF151.lay @@ -0,0 +1,153 @@ +1 -0.440000 -4.000000 0.551100 0.351100 MLC11 +2 -1.200000 -4.130000 0.551100 0.351100 MLC12 +3 -2.220000 -4.270000 0.551100 0.351100 MLC13 +4 -2.820000 -4.710000 0.551100 0.351100 MLC14 +5 -3.340000 -5.230000 0.551100 0.351100 MLC15 +6 -0.820000 -4.550000 0.551100 0.351100 MLC21 +7 -1.620000 -4.570000 0.551100 0.351100 MLC22 +8 -2.160000 -4.970000 0.551100 0.351100 MLC23 +9 -2.640000 -5.370000 0.551100 0.351100 MLC24 +10 -1.270000 -5.050000 0.551100 0.351100 MLC31 +11 -1.780000 -5.450000 0.551100 0.351100 MLC32 +12 -1.300000 -5.930000 0.551100 0.351100 MLC33 +13 -0.440000 -5.050000 0.551100 0.351100 MLC41 +14 -0.820000 -5.530000 0.551100 0.351100 MLC42 +15 -0.400000 -6.010000 0.551100 0.351100 MLC43 +16 -1.170000 -2.010000 0.551100 0.351100 MLF11 +17 -2.260000 -2.230000 0.551100 0.351100 MLF12 +18 -0.490000 -2.300000 0.551100 0.351100 MLF21 +19 -1.540000 -2.470000 0.551100 0.351100 MLF22 +20 -2.540000 -2.750000 0.551100 0.351100 MLF23 +21 -1.000000 -2.750000 0.551100 0.351100 MLF31 +22 -1.950000 -2.980000 0.551100 0.351100 MLF32 +23 -2.780000 -3.300000 0.551100 0.351100 MLF33 +24 -3.440000 -3.770000 0.551100 0.351100 MLF34 +25 -0.450000 -3.100000 0.551100 0.351100 MLF41 +26 -1.380000 -3.260000 0.551100 0.351100 MLF42 +27 -2.280000 -3.570000 0.551100 0.351100 MLF43 +28 -2.870000 -4.060000 0.551100 0.351100 MLF44 +29 -3.500000 -4.510000 0.551100 0.351100 MLF45 +30 -0.850000 -3.580000 0.551100 0.351100 MLF51 +31 -1.700000 -3.790000 0.551100 0.351100 MLF52 +32 -0.470000 -7.690000 0.551100 0.351100 MLO11 +33 -1.650000 -7.420000 0.551100 0.351100 MLO12 +34 -1.210000 -7.930000 0.551100 0.351100 MLO21 +35 -2.350000 -7.580000 0.551100 0.351100 MLO22 +36 -0.600000 -8.400000 0.551100 0.351100 MLO31 +37 -1.920000 -8.120000 0.551100 0.351100 MLO32 +38 -3.110000 -7.670000 0.551100 0.351100 MLO33 +39 -1.400000 -8.560000 0.551100 0.351100 MLO41 +40 -2.750000 -8.210000 0.551100 0.351100 MLO42 +41 -3.910000 -7.620000 0.551100 0.351100 MLO43 +42 -0.840000 -6.390000 0.551100 0.351100 MLP11 +43 -1.710000 -6.320000 0.551100 0.351100 MLP12 +44 -2.240000 -5.870000 0.551100 0.351100 MLP13 +45 -0.440000 -6.900000 0.551100 0.351100 MLP21 +46 -1.220000 -6.760000 0.551100 0.351100 MLP22 +47 -0.970000 -7.220000 0.551100 0.351100 MLP31 +48 -1.900000 -6.880000 0.551100 0.351100 MLP32 +49 -2.470000 -6.390000 0.551100 0.351100 MLP33 +50 -2.990000 -5.850000 0.551100 0.351100 MLP34 +51 -3.420000 -3.120000 0.551100 0.351100 MLT11 +52 -4.100000 -4.200000 0.551100 0.351100 MLT12 +53 -4.040000 -5.030000 0.551100 0.351100 MLT13 +54 -3.780000 -5.770000 0.551100 0.351100 MLT14 +55 -3.210000 -6.440000 0.551100 0.351100 MLT15 +56 -2.570000 -7.010000 0.551100 0.351100 MLT16 +57 -3.320000 -2.550000 0.551100 0.351100 MLT21 +58 -4.260000 -3.520000 0.551100 0.351100 MLT22 +59 -4.720000 -4.710000 0.551100 0.351100 MLT23 +60 -4.520000 -5.590000 0.551100 0.351100 MLT24 +61 -4.040000 -6.350000 0.551100 0.351100 MLT25 +62 -3.280000 -7.060000 0.551100 0.351100 MLT26 +63 -4.340000 -2.900000 0.551100 0.351100 MLT31 +64 -5.040000 -4.050000 0.551100 0.351100 MLT32 +65 -5.200000 -5.210000 0.551100 0.351100 MLT33 +66 -4.820000 -6.140000 0.551100 0.351100 MLT34 +67 -4.090000 -7.000000 0.551100 0.351100 MLT35 +68 -5.210000 -3.450000 0.551100 0.351100 MLT41 +69 -5.640000 -4.620000 0.551100 0.351100 MLT42 +70 -5.500000 -5.730000 0.551100 0.351100 MLT43 +71 -4.910000 -6.720000 0.551100 0.351100 MLT44 +72 0.410000 -4.000000 0.551100 0.351100 MRC11 +73 1.170000 -4.130000 0.551100 0.351100 MRC12 +74 2.200000 -4.270000 0.551100 0.351100 MRC13 +75 2.800000 -4.710000 0.551100 0.351100 MRC14 +76 3.320000 -5.230000 0.551100 0.351100 MRC15 +77 0.800000 -4.560000 0.551100 0.351100 MRC21 +78 1.600000 -4.570000 0.551100 0.351100 MRC22 +79 2.140000 -4.970000 0.551100 0.351100 MRC23 +80 2.620000 -5.370000 0.551100 0.351100 MRC24 +81 1.260000 -5.050000 0.551100 0.351100 MRC31 +82 1.760000 -5.450000 0.551100 0.351100 MRC32 +83 1.280000 -5.930000 0.551100 0.351100 MRC33 +84 0.420000 -5.050000 0.551100 0.351100 MRC41 +85 0.810000 -5.540000 0.551100 0.351100 MRC42 +86 0.380000 -6.010000 0.551100 0.351100 MRC43 +87 1.130000 -2.010000 0.551100 0.351100 MRF11 +88 2.240000 -2.230000 0.551100 0.351100 MRF12 +89 0.460000 -2.290000 0.551100 0.351100 MRF21 +90 1.510000 -2.470000 0.551100 0.351100 MRF22 +91 2.520000 -2.740000 0.551100 0.351100 MRF23 +92 0.970000 -2.740000 0.551100 0.351100 MRF31 +93 1.920000 -2.980000 0.551100 0.351100 MRF32 +94 2.760000 -3.300000 0.551100 0.351100 MRF33 +95 3.420000 -3.770000 0.551100 0.351100 MRF34 +96 0.420000 -3.100000 0.551100 0.351100 MRF41 +97 1.360000 -3.260000 0.551100 0.351100 MRF42 +98 2.260000 -3.570000 0.551100 0.351100 MRF43 +99 2.840000 -4.050000 0.551100 0.351100 MRF44 +100 3.480000 -4.510000 0.551100 0.351100 MRF45 +101 0.820000 -3.580000 0.551100 0.351100 MRF51 +102 1.670000 -3.790000 0.551100 0.351100 MRF52 +103 0.470000 -7.690000 0.551100 0.351100 MRO11 +104 1.640000 -7.420000 0.551100 0.351100 MRO12 +105 1.200000 -7.930000 0.551100 0.351100 MRO21 +106 2.350000 -7.580000 0.551100 0.351100 MRO22 +107 0.580000 -8.390000 0.551100 0.351100 MRO31 +108 1.910000 -8.110000 0.551100 0.351100 MRO32 +109 3.110000 -7.670000 0.551100 0.351100 MRO33 +110 1.380000 -8.570000 0.551100 0.351100 MRO41 +111 2.750000 -8.220000 0.551100 0.351100 MRO42 +112 3.900000 -7.610000 0.551100 0.351100 MRO43 +113 0.820000 -6.380000 0.551100 0.351100 MRP11 +114 1.700000 -6.320000 0.551100 0.351100 MRP12 +115 2.220000 -5.870000 0.551100 0.351100 MRP13 +116 0.420000 -6.900000 0.551100 0.351100 MRP21 +117 1.200000 -6.750000 0.551100 0.351100 MRP22 +118 0.960000 -7.220000 0.551100 0.351100 MRP31 +119 1.880000 -6.870000 0.551100 0.351100 MRP32 +120 2.470000 -6.390000 0.551100 0.351100 MRP33 +121 2.990000 -5.850000 0.551100 0.351100 MRP34 +122 3.390000 -3.120000 0.551100 0.351100 MRT11 +123 4.070000 -4.190000 0.551100 0.351100 MRT12 +124 4.020000 -5.030000 0.551100 0.351100 MRT13 +125 3.760000 -5.770000 0.551100 0.351100 MRT14 +126 3.200000 -6.430000 0.551100 0.351100 MRT15 +127 2.570000 -7.010000 0.551100 0.351100 MRT16 +128 3.300000 -2.540000 0.551100 0.351100 MRT21 +129 4.230000 -3.510000 0.551100 0.351100 MRT22 +130 4.700000 -4.710000 0.551100 0.351100 MRT23 +131 4.500000 -5.590000 0.551100 0.351100 MRT24 +132 4.020000 -6.360000 0.551100 0.351100 MRT25 +133 3.260000 -7.060000 0.551100 0.351100 MRT26 +134 4.310000 -2.900000 0.551100 0.351100 MRT31 +135 5.020000 -4.050000 0.551100 0.351100 MRT32 +136 5.180000 -5.210000 0.551100 0.351100 MRT33 +137 4.800000 -6.140000 0.551100 0.351100 MRT34 +138 4.080000 -7.000000 0.551100 0.351100 MRT35 +139 5.200000 -3.450000 0.551100 0.351100 MRT41 +140 5.620000 -4.610000 0.551100 0.351100 MRT42 +141 5.480000 -5.730000 0.551100 0.351100 MRT43 +142 4.900000 -6.710000 0.551100 0.351100 MRT44 +143 0.000000 -4.510000 0.551100 0.351100 MZC01 +144 0.000000 -5.550000 0.551100 0.351100 MZC02 +145 0.000000 -1.930000 0.551100 0.351100 MZF01 +146 0.000000 -2.660000 0.551100 0.351100 MZF02 +147 0.000000 -3.510000 0.551100 0.351100 MZF03 +148 0.000000 -8.050000 0.551100 0.351100 MZO01 +149 0.000000 -8.660000 0.551100 0.351100 MZO02 +150 0.000000 -6.470000 0.551100 0.351100 MZP01 +151 0.000000 -7.290000 0.551100 0.351100 MZP02 +152 5.000000 -2.000000 0.551100 0.351100 SCALE +153 -5.50000 -1.500000 0.551100 0.351100 COMNT diff --git a/mne/channels/data/layouts/CTF275.lay b/mne/channels/data/layouts/CTF275.lay new file mode 100644 index 0000000..2af28d3 --- /dev/null +++ b/mne/channels/data/layouts/CTF275.lay @@ -0,0 +1,275 @@ +1 -0.029414 0.428191 0.100000 0.040000 MLC11 +2 -0.105398 0.378716 0.100000 0.040000 MLC12 +3 -0.187924 0.341472 0.100000 0.040000 MLC13 +4 -0.268071 0.285079 0.100000 0.040000 MLC14 +5 -0.330692 0.221374 0.100000 0.040000 MLC15 +6 -0.378697 0.144627 0.100000 0.040000 MLC16 +7 -0.411309 0.049716 0.100000 0.040000 MLC17 +8 -0.112105 0.295427 0.100000 0.040000 MLC21 +9 -0.189457 0.259287 0.100000 0.040000 MLC22 +10 -0.254180 0.203140 0.100000 0.040000 MLC23 +11 -0.298355 0.137997 0.100000 0.040000 MLC24 +12 -0.337649 0.050767 0.100000 0.040000 MLC25 +13 -0.213750 0.138862 0.100000 0.040000 MLC31 +14 -0.266243 0.056433 0.100000 0.040000 MLC32 +15 -0.150010 0.191395 0.100000 0.040000 MLC41 +16 -0.188739 0.067511 0.100000 0.040000 MLC42 +17 -0.027405 0.285532 0.100000 0.040000 MLC51 +18 -0.072194 0.217381 0.100000 0.040000 MLC52 +19 -0.130467 0.119358 0.100000 0.040000 MLC53 +20 -0.119656 0.041473 0.100000 0.040000 MLC54 +21 -0.083927 -0.021961 0.100000 0.040000 MLC55 +22 -0.027810 0.155198 0.100000 0.040000 MLC61 +23 -0.062042 0.088583 0.100000 0.040000 MLC62 +24 -0.025587 0.023975 0.100000 0.040000 MLC63 +25 -0.154623 0.879985 0.100000 0.040000 MLF11 +26 -0.322264 0.823233 0.100000 0.040000 MLF12 +27 -0.478342 0.740223 0.100000 0.040000 MLF13 +28 -0.622338 0.633371 0.100000 0.040000 MLF14 +29 -0.052995 0.810917 0.100000 0.040000 MLF21 +30 -0.193258 0.778479 0.100000 0.040000 MLF22 +31 -0.319702 0.726613 0.100000 0.040000 MLF23 +32 -0.447065 0.639878 0.100000 0.040000 MLF24 +33 -0.551024 0.545805 0.100000 0.040000 MLF25 +34 -0.106993 0.717661 0.100000 0.040000 MLF31 +35 -0.227303 0.683510 0.100000 0.040000 MLF32 +36 -0.344973 0.613898 0.100000 0.040000 MLF33 +37 -0.437794 0.535071 0.100000 0.040000 MLF34 +38 -0.516944 0.440135 0.100000 0.040000 MLF35 +39 -0.037498 0.646457 0.100000 0.040000 MLF41 +40 -0.145663 0.629747 0.100000 0.040000 MLF42 +41 -0.257022 0.575998 0.100000 0.040000 MLF43 +42 -0.344741 0.511350 0.100000 0.040000 MLF44 +43 -0.434608 0.430669 0.100000 0.040000 MLF45 +44 -0.512928 0.325699 0.100000 0.040000 MLF46 +45 -0.065241 0.564676 0.100000 0.040000 MLF51 +46 -0.176866 0.530203 0.100000 0.040000 MLF52 +47 -0.264799 0.476609 0.100000 0.040000 MLF53 +48 -0.344149 0.409817 0.100000 0.040000 MLF54 +49 -0.432009 0.328939 0.100000 0.040000 MLF55 +50 -0.502082 0.225317 0.100000 0.040000 MLF56 +51 -0.108196 0.473300 0.100000 0.040000 MLF61 +52 -0.191454 0.428184 0.100000 0.040000 MLF62 +53 -0.268505 0.371569 0.100000 0.040000 MLF63 +54 -0.343162 0.314227 0.100000 0.040000 MLF64 +55 -0.415355 0.241209 0.100000 0.040000 MLF65 +56 -0.459435 0.157639 0.100000 0.040000 MLF66 +57 -0.484998 0.050963 0.100000 0.040000 MLF67 +58 -0.086701 -0.382545 0.100000 0.040000 MLO11 +59 -0.173621 -0.361571 0.100000 0.040000 MLO12 +60 -0.257557 -0.329066 0.100000 0.040000 MLO13 +61 -0.337129 -0.278810 0.100000 0.040000 MLO14 +62 -0.050176 -0.456757 0.100000 0.040000 MLO21 +63 -0.138937 -0.440153 0.100000 0.040000 MLO22 +64 -0.234625 -0.414329 0.100000 0.040000 MLO23 +65 -0.323700 -0.370345 0.100000 0.040000 MLO24 +66 -0.099528 -0.519048 0.100000 0.040000 MLO31 +67 -0.201576 -0.499713 0.100000 0.040000 MLO32 +68 -0.300736 -0.464088 0.100000 0.040000 MLO33 +69 -0.395767 -0.412426 0.100000 0.040000 MLO34 +70 -0.054171 -0.598130 0.100000 0.040000 MLO41 +71 -0.162924 -0.587463 0.100000 0.040000 MLO42 +72 -0.270457 -0.559057 0.100000 0.040000 MLO43 +73 -0.375045 -0.514503 0.100000 0.040000 MLO44 +74 -0.114841 -0.674066 0.100000 0.040000 MLO51 +75 -0.232779 -0.654920 0.100000 0.040000 MLO52 +76 -0.347032 -0.617457 0.100000 0.040000 MLO53 +77 -0.050706 -0.086860 0.100000 0.040000 MLP11 +78 -0.157880 -0.022819 0.100000 0.040000 MLP12 +79 -0.027384 -0.156541 0.100000 0.040000 MLP21 +80 -0.125969 -0.090281 0.100000 0.040000 MLP22 +81 -0.229468 -0.007021 0.100000 0.040000 MLP23 +82 -0.063851 -0.221282 0.100000 0.040000 MLP31 +83 -0.117483 -0.164444 0.100000 0.040000 MLP32 +84 -0.191075 -0.130343 0.100000 0.040000 MLP33 +85 -0.256310 -0.076997 0.100000 0.040000 MLP34 +86 -0.301408 -0.017428 0.100000 0.040000 MLP35 +87 -0.145628 -0.236552 0.100000 0.040000 MLP41 +88 -0.211609 -0.201084 0.100000 0.040000 MLP42 +89 -0.277557 -0.161143 0.100000 0.040000 MLP43 +90 -0.330491 -0.093163 0.100000 0.040000 MLP44 +91 -0.372987 -0.024823 0.100000 0.040000 MLP45 +92 -0.032003 -0.311166 0.100000 0.040000 MLP51 +93 -0.120201 -0.309697 0.100000 0.040000 MLP52 +94 -0.197411 -0.282930 0.100000 0.040000 MLP53 +95 -0.273221 -0.242434 0.100000 0.040000 MLP54 +96 -0.341326 -0.192353 0.100000 0.040000 MLP55 +97 -0.397869 -0.117824 0.100000 0.040000 MLP56 +98 -0.439023 -0.040798 0.100000 0.040000 MLP57 +99 -0.600517 0.341742 0.100000 0.040000 MLT11 +100 -0.583854 0.221014 0.100000 0.040000 MLT12 +101 -0.546672 0.118228 0.100000 0.040000 MLT13 +102 -0.525679 -0.043954 0.100000 0.040000 MLT14 +103 -0.482366 -0.132402 0.100000 0.040000 MLT15 +104 -0.408785 -0.217740 0.100000 0.040000 MLT16 +105 -0.657080 0.441193 0.100000 0.040000 MLT21 +106 -0.681569 0.225254 0.100000 0.040000 MLT22 +107 -0.647357 0.101107 0.100000 0.040000 MLT23 +108 -0.618158 -0.017119 0.100000 0.040000 MLT24 +109 -0.570925 -0.147553 0.100000 0.040000 MLT25 +110 -0.505869 -0.237678 0.100000 0.040000 MLT26 +111 -0.406336 -0.310886 0.100000 0.040000 MLT27 +112 -0.758025 0.508412 0.100000 0.040000 MLT31 +113 -0.761740 0.316423 0.100000 0.040000 MLT32 +114 -0.751268 0.088675 0.100000 0.040000 MLT33 +115 -0.712573 -0.047448 0.100000 0.040000 MLT34 +116 -0.658112 -0.159355 0.100000 0.040000 MLT35 +117 -0.592395 -0.256839 0.100000 0.040000 MLT36 +118 -0.495312 -0.345113 0.100000 0.040000 MLT37 +119 -0.885393 0.353401 0.100000 0.040000 MLT41 +120 -0.847844 0.160648 0.100000 0.040000 MLT42 +121 -0.823787 -0.043736 0.100000 0.040000 MLT43 +122 -0.758805 -0.175411 0.100000 0.040000 MLT44 +123 -0.684634 -0.280647 0.100000 0.040000 MLT45 +124 -0.591783 -0.373867 0.100000 0.040000 MLT46 +125 -0.476572 -0.454666 0.100000 0.040000 MLT47 +126 -0.983285 0.161080 0.100000 0.040000 MLT51 +127 -0.944753 -0.028756 0.100000 0.040000 MLT52 +128 -0.872989 -0.188195 0.100000 0.040000 MLT53 +129 -0.785517 -0.310620 0.100000 0.040000 MLT54 +130 -0.688014 -0.407791 0.100000 0.040000 MLT55 +131 -0.571347 -0.497554 0.100000 0.040000 MLT56 +132 -0.457303 -0.565438 0.100000 0.040000 MLT57 +133 0.063389 0.426606 0.100000 0.040000 MRC11 +134 0.137902 0.375428 0.100000 0.040000 MRC12 +135 0.219516 0.336386 0.100000 0.040000 MRC13 +136 0.297688 0.277771 0.100000 0.040000 MRC14 +137 0.355955 0.213304 0.100000 0.040000 MRC15 +138 0.404150 0.135598 0.100000 0.040000 MRC16 +139 0.434870 0.040656 0.100000 0.040000 MRC17 +140 0.142678 0.292126 0.100000 0.040000 MRC21 +141 0.219470 0.254066 0.100000 0.040000 MRC22 +142 0.281922 0.196472 0.100000 0.040000 MRC23 +143 0.325059 0.128269 0.100000 0.040000 MRC24 +144 0.361805 0.044213 0.100000 0.040000 MRC25 +145 0.240157 0.132538 0.100000 0.040000 MRC31 +146 0.290750 0.048681 0.100000 0.040000 MRC32 +147 0.178346 0.187415 0.100000 0.040000 MRC41 +148 0.213493 0.062545 0.100000 0.040000 MRC42 +149 0.058440 0.284194 0.100000 0.040000 MRC51 +150 0.101359 0.215083 0.100000 0.040000 MRC52 +151 0.156968 0.115486 0.100000 0.040000 MRC53 +152 0.144211 0.038238 0.100000 0.040000 MRC54 +153 0.106635 -0.024115 0.100000 0.040000 MRC55 +154 0.055338 0.153928 0.100000 0.040000 MRC61 +155 0.088138 0.086634 0.100000 0.040000 MRC62 +156 0.049557 0.022680 0.100000 0.040000 MRC63 +157 0.197726 0.874477 0.100000 0.040000 MRF11 +158 0.364689 0.811426 0.100000 0.040000 MRF12 +159 0.518245 0.722181 0.100000 0.040000 MRF13 +160 0.658136 0.611411 0.100000 0.040000 MRF14 +161 0.095713 0.807816 0.100000 0.040000 MRF21 +162 0.233999 0.772267 0.100000 0.040000 MRF22 +163 0.358821 0.715911 0.100000 0.040000 MRF23 +164 0.484765 0.623142 0.100000 0.040000 MRF24 +165 0.585405 0.526324 0.100000 0.040000 MRF25 +166 0.147633 0.713396 0.100000 0.040000 MRF31 +167 0.265823 0.676341 0.100000 0.040000 MRF32 +168 0.382256 0.601823 0.100000 0.040000 MRF33 +169 0.473850 0.521768 0.100000 0.040000 MRF34 +170 0.548726 0.424836 0.100000 0.040000 MRF35 +171 0.075451 0.644959 0.100000 0.040000 MRF41 +172 0.182924 0.624842 0.100000 0.040000 MRF42 +173 0.292900 0.568899 0.100000 0.040000 MRF43 +174 0.379529 0.501620 0.100000 0.040000 MRF44 +175 0.465778 0.418231 0.100000 0.040000 MRF45 +176 0.541913 0.311405 0.100000 0.040000 MRF46 +177 0.102375 0.561860 0.100000 0.040000 MRF51 +178 0.212879 0.524802 0.100000 0.040000 MRF52 +179 0.299077 0.468924 0.100000 0.040000 MRF53 +180 0.376186 0.400507 0.100000 0.040000 MRF54 +181 0.461150 0.316311 0.100000 0.040000 MRF55 +182 0.527532 0.213125 0.100000 0.040000 MRF56 +183 0.143360 0.469857 0.100000 0.040000 MRF61 +184 0.224730 0.422291 0.100000 0.040000 MRF62 +185 0.301012 0.364856 0.100000 0.040000 MRF63 +186 0.373056 0.305526 0.100000 0.040000 MRF64 +187 0.443172 0.230008 0.100000 0.040000 MRF65 +188 0.482916 0.144546 0.100000 0.040000 MRF66 +189 0.509363 0.039864 0.100000 0.040000 MRF67 +190 0.101312 -0.384464 0.100000 0.040000 MRO11 +191 0.188777 -0.365285 0.100000 0.040000 MRO12 +192 0.274286 -0.333994 0.100000 0.040000 MRO13 +193 0.354824 -0.285987 0.100000 0.040000 MRO14 +194 0.062633 -0.457476 0.100000 0.040000 MRO21 +195 0.152570 -0.440791 0.100000 0.040000 MRO22 +196 0.248565 -0.418432 0.100000 0.040000 MRO23 +197 0.338845 -0.376241 0.100000 0.040000 MRO24 +198 0.111160 -0.521375 0.100000 0.040000 MRO31 +199 0.212466 -0.502957 0.100000 0.040000 MRO32 +200 0.313063 -0.468465 0.100000 0.040000 MRO33 +201 0.409385 -0.418933 0.100000 0.040000 MRO34 +202 0.063270 -0.599845 0.100000 0.040000 MRO41 +203 0.172480 -0.589865 0.100000 0.040000 MRO42 +204 0.279919 -0.563495 0.100000 0.040000 MRO43 +205 0.386742 -0.520993 0.100000 0.040000 MRO44 +206 0.121969 -0.676100 0.100000 0.040000 MRO51 +207 0.240331 -0.658743 0.100000 0.040000 MRO52 +208 0.356156 -0.623026 0.100000 0.040000 MRO53 +209 0.071855 -0.088269 0.100000 0.040000 MRP11 +210 0.180874 -0.026656 0.100000 0.040000 MRP12 +211 0.047839 -0.157479 0.100000 0.040000 MRP21 +212 0.147221 -0.093053 0.100000 0.040000 MRP22 +213 0.252807 -0.012686 0.100000 0.040000 MRP23 +214 0.082012 -0.222790 0.100000 0.040000 MRP31 +215 0.136825 -0.166819 0.100000 0.040000 MRP32 +216 0.210796 -0.134697 0.100000 0.040000 MRP33 +217 0.277587 -0.083946 0.100000 0.040000 MRP34 +218 0.322867 -0.024718 0.100000 0.040000 MRP35 +219 0.162954 -0.240118 0.100000 0.040000 MRP41 +220 0.230510 -0.205793 0.100000 0.040000 MRP42 +221 0.296283 -0.169213 0.100000 0.040000 MRP43 +222 0.351532 -0.101316 0.100000 0.040000 MRP44 +223 0.395383 -0.032706 0.100000 0.040000 MRP45 +224 0.048690 -0.312307 0.100000 0.040000 MRP51 +225 0.137008 -0.312230 0.100000 0.040000 MRP52 +226 0.214275 -0.287336 0.100000 0.040000 MRP53 +227 0.290637 -0.248388 0.100000 0.040000 MRP54 +228 0.360555 -0.199475 0.100000 0.040000 MRP55 +229 0.419086 -0.126737 0.100000 0.040000 MRP56 +230 0.463976 -0.050387 0.100000 0.040000 MRP57 +231 0.628409 0.323946 0.100000 0.040000 MRT11 +232 0.609835 0.205866 0.100000 0.040000 MRT12 +233 0.571838 0.105198 0.100000 0.040000 MRT13 +234 0.544252 -0.054539 0.100000 0.040000 MRT14 +235 0.500732 -0.143104 0.100000 0.040000 MRT15 +236 0.427582 -0.225716 0.100000 0.040000 MRT16 +237 0.685440 0.421411 0.100000 0.040000 MRT21 +238 0.705800 0.208084 0.100000 0.040000 MRT22 +239 0.667392 0.088109 0.100000 0.040000 MRT23 +240 0.637062 -0.030086 0.100000 0.040000 MRT24 +241 0.588417 -0.159092 0.100000 0.040000 MRT25 +242 0.522350 -0.247039 0.100000 0.040000 MRT26 +243 0.422093 -0.318167 0.100000 0.040000 MRT27 +244 0.789789 0.482334 0.100000 0.040000 MRT31 +245 0.786599 0.293212 0.100000 0.040000 MRT32 +246 0.770320 0.070984 0.100000 0.040000 MRT33 +247 0.731214 -0.061690 0.100000 0.040000 MRT34 +248 0.674802 -0.172109 0.100000 0.040000 MRT35 +249 0.607500 -0.268226 0.100000 0.040000 MRT36 +250 0.510484 -0.353209 0.100000 0.040000 MRT37 +251 0.910695 0.324672 0.100000 0.040000 MRT41 +252 0.867982 0.137317 0.100000 0.040000 MRT42 +253 0.839920 -0.060661 0.100000 0.040000 MRT43 +254 0.773256 -0.189639 0.100000 0.040000 MRT44 +255 0.698444 -0.293384 0.100000 0.040000 MRT45 +256 0.604482 -0.385347 0.100000 0.040000 MRT46 +257 0.489291 -0.462983 0.100000 0.040000 MRT47 +258 1.000000 0.135648 0.100000 0.040000 MRT51 +259 0.959092 -0.049055 0.100000 0.040000 MRT52 +260 0.886964 -0.204289 0.100000 0.040000 MRT53 +261 0.796842 -0.324881 0.100000 0.040000 MRT54 +262 0.698769 -0.420596 0.100000 0.040000 MRT55 +263 0.582500 -0.506810 0.100000 0.040000 MRT56 +264 0.467934 -0.572706 0.100000 0.040000 MRT57 +265 0.016063 0.355556 0.100000 0.040000 MZC01 +266 0.014747 0.217488 0.100000 0.040000 MZC02 +267 0.013199 0.087763 0.100000 0.040000 MZC03 +268 0.011197 -0.046263 0.100000 0.040000 MZC04 +269 0.022267 0.897778 0.100000 0.040000 MZF01 +270 0.019840 0.730557 0.100000 0.040000 MZF02 +271 0.017559 0.517279 0.100000 0.040000 MZF03 +272 0.007392 -0.378522 0.100000 0.040000 MZO01 +273 0.005634 -0.528155 0.100000 0.040000 MZO02 +274 0.003722 -0.675585 0.100000 0.040000 MZO03 +275 0.008864 -0.248776 0.100000 0.040000 MZP01 diff --git a/mne/channels/data/layouts/EEG1005.lay b/mne/channels/data/layouts/EEG1005.lay new file mode 100644 index 0000000..a600468 --- /dev/null +++ b/mne/channels/data/layouts/EEG1005.lay @@ -0,0 +1,337 @@ +1 -0.485328 1.493835 0.069221 0.051916 Fp1 +2 0.000000 1.570696 0.069221 0.051916 Fpz +3 0.485501 1.493884 0.069221 0.051916 Fp2 +4 -1.154207 1.588656 0.069221 0.051916 AF9 +5 -0.923319 1.270781 0.069221 0.051916 AF7 +6 -0.706117 1.226029 0.069221 0.051916 AF5 +7 -0.477022 1.197254 0.069221 0.051916 AF3 +8 -0.240008 1.182594 0.069221 0.051916 AF1 +9 0.000000 1.178022 0.069221 0.051916 AFz +10 0.240008 1.182594 0.069221 0.051916 AF2 +11 0.476904 1.197159 0.069221 0.051916 AF4 +12 0.706117 1.226029 0.069221 0.051916 AF6 +13 0.923319 1.270781 0.069221 0.051916 AF8 +14 1.154207 1.588656 0.069221 0.051916 AF10 +15 -1.588376 1.154294 0.069221 0.051916 F9 +16 -1.270781 0.923319 0.069221 0.051916 F7 +17 -0.968950 0.852434 0.069221 0.051916 F5 +18 -0.652084 0.812357 0.069221 0.051916 F3 +19 -0.327689 0.791876 0.069221 0.051916 F1 +20 0.000000 0.785398 0.069221 0.051916 Fz +21 0.327689 0.791876 0.069221 0.051916 F2 +22 0.652084 0.812357 0.069221 0.051916 F4 +23 0.968950 0.852434 0.069221 0.051916 F6 +24 1.270781 0.923319 0.069221 0.051916 F8 +25 1.588496 1.154168 0.069221 0.051916 F10 +26 -1.867677 0.606883 0.069221 0.051916 FT9 +27 -1.493930 0.485359 0.069221 0.051916 FT7 +28 -1.126134 0.436152 0.069221 0.051916 FC5 +29 -0.752811 0.409634 0.069221 0.051916 FC3 +30 -0.376942 0.396836 0.069221 0.051916 FC1 +31 0.000000 0.392844 0.069221 0.051916 FCz +32 0.376942 0.396836 0.069221 0.051916 FC2 +33 0.752811 0.409634 0.069221 0.051916 FC4 +34 1.126134 0.436152 0.069221 0.051916 FC6 +35 1.493930 0.485359 0.069221 0.051916 FT8 +36 1.867677 0.606883 0.069221 0.051916 FT10 +37 -1.963487 -0.000213 0.069221 0.051916 T9 +38 -1.570796 0.000000 0.069221 0.051916 T7 +39 -1.178106 0.000128 0.069221 0.051916 C5 +40 -0.785398 0.000111 0.069221 0.051916 C3 +41 -0.392736 0.000205 0.069221 0.051916 C1 +42 0.000000 0.000200 0.069221 0.051916 Cz +43 0.392736 0.000103 0.069221 0.051916 C2 +44 0.785398 0.000111 0.069221 0.051916 C4 +45 1.178106 0.000128 0.069221 0.051916 C6 +46 1.570796 -0.000000 0.069221 0.051916 T8 +47 1.963487 -0.000000 0.069221 0.051916 T10 +48 -1.867677 -0.606883 0.069221 0.051916 TP9 +49 -1.494026 -0.485389 0.069221 0.051916 TP7 +50 -1.126048 -0.435839 0.069221 0.051916 CP5 +51 -0.752775 -0.409460 0.069221 0.051916 CP3 +52 -0.376804 -0.396486 0.069221 0.051916 CP1 +53 -0.000000 -0.392551 0.069221 0.051916 CPz +54 0.376804 -0.396486 0.069221 0.051916 CP2 +55 0.752795 -0.409357 0.069221 0.051916 CP4 +56 1.126048 -0.435839 0.069221 0.051916 CP6 +57 1.494026 -0.485389 0.069221 0.051916 TP8 +58 1.867603 -0.607072 0.069221 0.051916 TP10 +59 -1.588496 -1.154168 0.069221 0.051916 P9 +60 -1.270862 -0.923378 0.069221 0.051916 P7 +61 -0.969077 -0.852293 0.069221 0.051916 P5 +62 -0.652231 -0.811998 0.069221 0.051916 P3 +63 -0.327776 -0.791360 0.069221 0.051916 P1 +64 -0.000000 -0.785257 0.069221 0.051916 Pz +65 0.327776 -0.791360 0.069221 0.051916 P2 +66 0.652231 -0.811998 0.069221 0.051916 P4 +67 0.969077 -0.852293 0.069221 0.051916 P6 +68 1.270862 -0.923378 0.069221 0.051916 P8 +69 1.588496 -1.154168 0.069221 0.051916 P10 +70 -1.154207 -1.588656 0.069221 0.051916 PO9 +71 -0.923319 -1.270781 0.069221 0.051916 PO7 +72 -0.706303 -1.225606 0.069221 0.051916 PO5 +73 -0.476710 -1.197888 0.069221 0.051916 PO3 +74 -0.240097 -1.182523 0.069221 0.051916 PO1 +75 -0.000000 -1.178022 0.069221 0.051916 POz +76 0.240223 -1.182505 0.069221 0.051916 PO2 +77 0.476710 -1.197888 0.069221 0.051916 PO4 +78 0.706303 -1.225606 0.069221 0.051916 PO6 +79 0.923319 -1.270781 0.069221 0.051916 PO8 +80 1.154207 -1.588656 0.069221 0.051916 PO10 +81 -0.485359 -1.493930 0.069221 0.051916 O1 +82 -0.000000 -1.570796 0.069221 0.051916 Oz +83 0.485359 -1.493930 0.069221 0.051916 O2 +84 -0.606613 -1.867239 0.069221 0.051916 I1 +85 -0.000000 -1.963478 0.069221 0.051916 Iz +86 0.606613 -1.867239 0.069221 0.051916 I2 +87 -0.802226 1.574520 0.069221 0.051916 AFp9h +88 -0.626475 1.393612 0.069221 0.051916 AFp7h +89 -0.451133 1.382849 0.069221 0.051916 AFp5h +90 -0.271959 1.376738 0.069221 0.051916 AFp3h +91 -0.090887 1.374548 0.069221 0.051916 AFp1h +92 0.090887 1.374548 0.069221 0.051916 AFp2h +93 0.271959 1.376738 0.069221 0.051916 AFp4h +94 0.451133 1.382849 0.069221 0.051916 AFp6h +95 0.626475 1.393612 0.069221 0.051916 AFp8h +96 0.802226 1.574520 0.069221 0.051916 AFp10h +97 -1.249550 1.249550 0.069221 0.051916 AFF9h +98 -0.982948 1.075122 0.069221 0.051916 AFF7h +99 -0.713694 1.024626 0.069221 0.051916 AFF5h +100 -0.432315 0.996167 0.069221 0.051916 AFF3h +101 -0.144727 0.983315 0.069221 0.051916 AFF1h +102 0.144727 0.983315 0.069221 0.051916 AFF2h +103 0.432315 0.996167 0.069221 0.051916 AFF4h +104 0.713694 1.024626 0.069221 0.051916 AFF6h +105 0.982881 1.075049 0.069221 0.051916 AFF8h +106 1.249550 1.249550 0.069221 0.051916 AFF10h +107 -1.574645 0.802293 0.069221 0.051916 FFT9h +108 -1.232019 0.675885 0.069221 0.051916 FFT7h +109 -0.886990 0.627578 0.069221 0.051916 FFC5h +110 -0.534535 0.601827 0.069221 0.051916 FFC3h +111 -0.178478 0.590622 0.069221 0.051916 FFC1h +112 0.178478 0.590622 0.069221 0.051916 FFC2h +113 0.534535 0.601827 0.069221 0.051916 FFC4h +114 0.886990 0.627578 0.069221 0.051916 FFC6h +115 1.232019 0.675885 0.069221 0.051916 FFT8h +116 1.574645 0.802293 0.069221 0.051916 FFT10h +117 -1.745475 0.276484 0.069221 0.051916 FTT9h +118 -1.358553 0.230430 0.069221 0.051916 FTT7h +119 -0.971386 0.211155 0.069221 0.051916 FCC5h +120 -0.583084 0.201295 0.069221 0.051916 FCC3h +121 -0.194460 0.196994 0.069221 0.051916 FCC1h +122 0.194460 0.196994 0.069221 0.051916 FCC2h +123 0.583084 0.201295 0.069221 0.051916 FCC4h +124 0.971386 0.211155 0.069221 0.051916 FCC6h +125 1.358553 0.230430 0.069221 0.051916 FTT8h +126 1.745475 0.276484 0.069221 0.051916 FTT10h +127 -1.745506 -0.276309 0.069221 0.051916 TTP9h +128 -1.358573 -0.230293 0.069221 0.051916 TTP7h +129 -0.971375 -0.211008 0.069221 0.051916 CCP5h +130 -0.583085 -0.200906 0.069221 0.051916 CCP3h +131 -0.194448 -0.196679 0.069221 0.051916 CCP1h +132 0.194448 -0.196679 0.069221 0.051916 CCP2h +133 0.583078 -0.201010 0.069221 0.051916 CCP4h +134 0.971375 -0.211008 0.069221 0.051916 CCP6h +135 1.358573 -0.230293 0.069221 0.051916 TTP8h +136 1.745475 -0.276484 0.069221 0.051916 TTP10h +137 -1.574667 -0.802213 0.069221 0.051916 TPP9h +138 -1.232021 -0.675979 0.069221 0.051916 TPP7h +139 -0.887025 -0.627306 0.069221 0.051916 CPP5h +140 -0.534524 -0.601312 0.069221 0.051916 CPP3h +141 -0.178473 -0.590144 0.069221 0.051916 CPP1h +142 0.178473 -0.590144 0.069221 0.051916 CPP2h +143 0.534524 -0.601312 0.069221 0.051916 CPP4h +144 0.887025 -0.627306 0.069221 0.051916 CPP6h +145 1.231976 -0.676032 0.069221 0.051916 TPP8h +146 1.574586 -0.802352 0.069221 0.051916 TPP10h +147 -1.249639 -1.249639 0.069221 0.051916 PPO9h +148 -0.983137 -1.074700 0.069221 0.051916 PPO7h +149 -0.713821 -1.024109 0.069221 0.051916 PPO5h +150 -0.432363 -0.995909 0.069221 0.051916 PPO3h +151 -0.144761 -0.982953 0.069221 0.051916 PPO1h +152 0.144761 -0.982953 0.069221 0.051916 PPO2h +153 0.432253 -0.995937 0.069221 0.051916 PPO4h +154 0.713967 -1.023998 0.069221 0.051916 PPO6h +155 0.983137 -1.074700 0.069221 0.051916 PPO8h +156 1.249639 -1.249639 0.069221 0.051916 PPO10h +157 -0.802293 -1.574645 0.069221 0.051916 POO9h +158 -0.626849 -1.393237 0.069221 0.051916 POO7h +159 -0.451236 -1.382715 0.069221 0.051916 POO5h +160 -0.271951 -1.377572 0.069221 0.051916 POO3h +161 -0.090910 -1.374606 0.069221 0.051916 POO1h +162 0.090910 -1.374606 0.069221 0.051916 POO2h +163 0.271951 -1.377572 0.069221 0.051916 POO4h +164 0.451236 -1.382715 0.069221 0.051916 POO6h +165 0.626849 -1.393237 0.069221 0.051916 POO8h +166 0.802293 -1.574645 0.069221 0.051916 POO10h +167 -0.276453 -1.745460 0.069221 0.051916 OI1h +168 0.276453 -1.745460 0.069221 0.051916 OI2h +169 -0.245655 1.551367 0.069221 0.051916 Fp1h +170 0.245655 1.551367 0.069221 0.051916 Fp2h +171 -1.038573 1.429729 0.069221 0.051916 AF9h +172 -0.816811 1.245775 0.069221 0.051916 AF7h +173 -0.592502 1.210176 0.069221 0.051916 AF5h +174 -0.359066 1.188527 0.069221 0.051916 AF3h +175 -0.120203 1.179114 0.069221 0.051916 AF1h +176 0.120212 1.179076 0.069221 0.051916 AF2h +177 0.359066 1.188527 0.069221 0.051916 AF4h +178 0.592545 1.210263 0.069221 0.051916 AF6h +179 0.816811 1.245775 0.069221 0.051916 AF8h +180 1.038668 1.429679 0.069221 0.051916 AF10h +181 -1.429588 1.038701 0.069221 0.051916 F9h +182 -1.122287 0.883303 0.069221 0.051916 F7h +183 -0.811863 0.829210 0.069221 0.051916 F5h +184 -0.490601 0.800049 0.069221 0.051916 F3h +185 -0.164017 0.787126 0.069221 0.051916 F1h +186 0.164017 0.787126 0.069221 0.051916 F2h +187 0.490601 0.800049 0.069221 0.051916 F4h +188 0.811863 0.829210 0.069221 0.051916 F6h +189 1.122287 0.883303 0.069221 0.051916 F8h +190 1.429588 1.038701 0.069221 0.051916 F10h +191 -1.680799 0.546075 0.069221 0.051916 FT9h +192 -1.310995 0.457012 0.069221 0.051916 FT7h +193 -0.939857 0.420814 0.069221 0.051916 FC5h +194 -0.565142 0.401905 0.069221 0.051916 FC3h +195 -0.188491 0.393826 0.069221 0.051916 FC1h +196 0.188491 0.393826 0.069221 0.051916 FC2h +197 0.565142 0.401905 0.069221 0.051916 FC4h +198 0.939857 0.420814 0.069221 0.051916 FC6h +199 1.310995 0.457012 0.069221 0.051916 FT8h +200 1.680740 0.546236 0.069221 0.051916 FT10h +201 -1.767191 0.000000 0.069221 0.051916 T9h +202 -1.374500 0.000000 0.069221 0.051916 T7h +203 -0.981850 0.000118 0.069221 0.051916 C5h +204 -0.589058 0.000212 0.069221 0.051916 C3h +205 -0.196395 0.000101 0.069221 0.051916 C1h +206 0.196395 0.000201 0.069221 0.051916 C2h +207 0.589058 0.000212 0.069221 0.051916 C4h +208 0.981850 0.000118 0.069221 0.051916 C6h +209 1.374500 -0.000000 0.069221 0.051916 T8h +210 1.767191 -0.000000 0.069221 0.051916 T10h +211 -1.680646 -0.546088 0.069221 0.051916 TP9h +212 -1.310970 -0.456960 0.069221 0.051916 TP7h +213 -0.939815 -0.420500 0.069221 0.051916 CP5h +214 -0.565062 -0.401491 0.069221 0.051916 CP3h +215 -0.188515 -0.393352 0.069221 0.051916 CP1h +216 0.188515 -0.393352 0.069221 0.051916 CP2h +217 0.565062 -0.401491 0.069221 0.051916 CP4h +218 0.939815 -0.420500 0.069221 0.051916 CP6h +219 1.310970 -0.456960 0.069221 0.051916 TP8h +220 1.680646 -0.546088 0.069221 0.051916 TP10h +221 -1.429668 -1.038758 0.069221 0.051916 P9h +222 -1.122286 -0.883271 0.069221 0.051916 P7h +223 -0.812037 -0.829137 0.069221 0.051916 P5h +224 -0.490726 -0.799336 0.069221 0.051916 P3h +225 -0.164146 -0.786762 0.069221 0.051916 P1h +226 0.164146 -0.786762 0.069221 0.051916 P2h +227 0.490600 -0.799436 0.069221 0.051916 P4h +228 0.812037 -0.829137 0.069221 0.051916 P6h +229 1.122286 -0.883271 0.069221 0.051916 P8h +230 1.429668 -1.038758 0.069221 0.051916 P10h +231 -1.038821 -1.429709 0.069221 0.051916 PO9h +232 -0.816502 -1.246067 0.069221 0.051916 PO7h +233 -0.593079 -1.209372 0.069221 0.051916 PO5h +234 -0.359230 -1.188332 0.069221 0.051916 PO3h +235 -0.120221 -1.179168 0.069221 0.051916 PO1h +236 0.120348 -1.179159 0.069221 0.051916 PO2h +237 0.359230 -1.188332 0.069221 0.051916 PO4h +238 0.593079 -1.209372 0.069221 0.051916 PO6h +239 0.816502 -1.246067 0.069221 0.051916 PO8h +240 1.038710 -1.429804 0.069221 0.051916 PO10h +241 -0.245671 -1.551466 0.069221 0.051916 O1h +242 0.245671 -1.551466 0.069221 0.051916 O2h +243 -0.307129 -1.939338 0.069221 0.051916 I1h +244 0.307129 -1.939338 0.069221 0.051916 I2h +245 -0.891328 1.749684 0.069221 0.051916 AFp9 +246 -0.713143 1.399582 0.069221 0.051916 AFp7 +247 -0.539182 1.387878 0.069221 0.051916 AFp5 +248 -0.361777 1.379743 0.069221 0.051916 AFp3 +249 -0.181624 1.374948 0.069221 0.051916 AFp1 +250 0.000000 1.374461 0.069221 0.051916 AFpz +251 0.181624 1.374948 0.069221 0.051916 AFp2 +252 0.361802 1.379839 0.069221 0.051916 AFp4 +253 0.539182 1.387878 0.069221 0.051916 AFp6 +254 0.713143 1.399582 0.069221 0.051916 AFp8 +255 0.891489 1.749582 0.069221 0.051916 AFp10 +256 -1.388504 1.388504 0.069221 0.051916 AFF9 +257 -1.110721 1.110721 0.069221 0.051916 AFF7 +258 -0.850463 1.046170 0.069221 0.051916 AFF5 +259 -0.574170 1.008058 0.069221 0.051916 AFF3 +260 -0.288981 0.988233 0.069221 0.051916 AFF1 +261 0.000000 0.981739 0.069221 0.051916 AFFz +262 0.288981 0.988233 0.069221 0.051916 AFF2 +263 0.574170 1.008058 0.069221 0.051916 AFF4 +264 0.850463 1.046170 0.069221 0.051916 AFF6 +265 1.110721 1.110721 0.069221 0.051916 AFF8 +266 1.388504 1.388504 0.069221 0.051916 AFF10 +267 -1.749576 0.891591 0.069221 0.051916 FFT9 +268 -1.399582 0.713143 0.069221 0.051916 FFT7 +269 -1.060830 0.648168 0.069221 0.051916 FFC5 +270 -0.711350 0.612390 0.069221 0.051916 FFC3 +271 -0.356750 0.594619 0.069221 0.051916 FFC1 +272 0.000000 0.589085 0.069221 0.051916 FFCz +273 0.356750 0.594619 0.069221 0.051916 FFC2 +274 0.711350 0.612390 0.069221 0.051916 FFC4 +275 1.060749 0.648119 0.069221 0.051916 FFC6 +276 1.399582 0.713143 0.069221 0.051916 FFT8 +277 1.749576 0.891591 0.069221 0.051916 FFT10 +278 -1.939489 0.307119 0.069221 0.051916 FTT9 +279 -1.551442 0.245824 0.069221 0.051916 FTT7 +280 -1.165132 0.219351 0.069221 0.051916 FCC5 +281 -0.777319 0.205363 0.069221 0.051916 FCC3 +282 -0.388766 0.198515 0.069221 0.051916 FCC1 +283 0.000000 0.196434 0.069221 0.051916 FCCz +284 0.388766 0.198515 0.069221 0.051916 FCC2 +285 0.777319 0.205363 0.069221 0.051916 FCC4 +286 1.165132 0.219351 0.069221 0.051916 FCC6 +287 1.551466 0.245671 0.069221 0.051916 FTT8 +288 1.939489 0.307119 0.069221 0.051916 FTT10 +289 -1.939553 -0.307197 0.069221 0.051916 TTP9 +290 -1.551565 -0.245687 0.069221 0.051916 TTP7 +291 -1.165206 -0.219084 0.069221 0.051916 CCP5 +292 -0.777275 -0.205069 0.069221 0.051916 CCP3 +293 -0.388806 -0.198175 0.069221 0.051916 CCP1 +294 -0.000000 -0.196218 0.069221 0.051916 CCPz +295 0.388801 -0.198275 0.069221 0.051916 CCP2 +296 0.777275 -0.205069 0.069221 0.051916 CCP4 +297 1.165206 -0.219084 0.069221 0.051916 CCP6 +298 1.551565 -0.245687 0.069221 0.051916 TTP8 +299 1.939553 -0.307197 0.069221 0.051916 TTP10 +300 -1.749664 -0.891531 0.069221 0.051916 TPP9 +301 -1.399671 -0.713188 0.069221 0.051916 TPP7 +302 -1.060852 -0.647970 0.069221 0.051916 CPP5 +303 -0.711356 -0.612379 0.069221 0.051916 CPP3 +304 -0.356663 -0.594548 0.069221 0.051916 CPP1 +305 -0.000000 -0.588863 0.069221 0.051916 CPPz +306 0.356778 -0.594448 0.069221 0.051916 CPP2 +307 0.711384 -0.612287 0.069221 0.051916 CPP4 +308 1.060852 -0.647970 0.069221 0.051916 CPP6 +309 1.399671 -0.713188 0.069221 0.051916 TPP8 +310 1.749664 -0.891531 0.069221 0.051916 TPP10 +311 -1.388427 -1.388427 0.069221 0.051916 PPO9 +312 -1.110721 -1.110721 0.069221 0.051916 PPO7 +313 -0.850511 -1.046155 0.069221 0.051916 PPO5 +314 -0.574228 -1.007462 0.069221 0.051916 PPO3 +315 -0.289055 -0.987715 0.069221 0.051916 PPO1 +316 -0.000000 -0.981655 0.069221 0.051916 PPOz +317 0.289055 -0.987715 0.069221 0.051916 PPO2 +318 0.574228 -1.007462 0.069221 0.051916 PPO4 +319 0.850454 -1.046223 0.069221 0.051916 PPO6 +320 1.110721 -1.110721 0.069221 0.051916 PPO8 +321 1.388427 -1.388427 0.069221 0.051916 PPO10 +322 -0.891143 -1.749540 0.069221 0.051916 POO9 +323 -0.713143 -1.399582 0.069221 0.051916 POO7 +324 -0.539360 -1.387717 0.069221 0.051916 POO5 +325 -0.362020 -1.379310 0.069221 0.051916 POO3 +326 -0.181486 -1.375484 0.069221 0.051916 POO1 +327 -0.000000 -1.374422 0.069221 0.051916 POOz +328 0.181626 -1.375468 0.069221 0.051916 POO2 +329 0.362020 -1.379310 0.069221 0.051916 POO4 +330 0.539360 -1.387717 0.069221 0.051916 POO6 +331 0.713143 -1.399582 0.069221 0.051916 POO8 +332 0.891143 -1.749540 0.069221 0.051916 POO10 +333 -0.546073 -1.680586 0.069221 0.051916 OI1 +334 -0.000000 -1.767132 0.069221 0.051916 OIz +335 0.546073 -1.680586 0.069221 0.051916 OI2 +336 -1.963487 1.749684 0.069221 0.051916 COMNT +337 1.963487 1.749684 0.069221 0.051916 SCALE diff --git a/mne/channels/data/layouts/EGI256.lout b/mne/channels/data/layouts/EGI256.lout new file mode 100644 index 0000000..bc9076a --- /dev/null +++ b/mne/channels/data/layouts/EGI256.lout @@ -0,0 +1,259 @@ +-42.19 43.52 -41.70 28.71 +001 0.235020883 0.231411875 0.023840595 0.024283894 EEG 001 +002 0.180062322 0.24066255 0.023840595 0.024283894 EEG 002 +003 0.134498312 0.239722125 0.023840595 0.024283894 EEG 003 +004 0.098183698 0.230899463 0.023840595 0.024283894 EEG 004 +005 0.066117291 0.206774428 0.023840595 0.024283894 EEG 005 +006 0.038417416 0.175224454 0.023840595 0.024283894 EEG 006 +007 0.019093339 0.142334211 0.023840595 0.024283894 EEG 007 +008 0 0.106825455 0.023840595 0.024283894 EEG 008 +009 -0.017539353 0.062826857 0.023840595 0.024283894 EEG 009 +010 0.181942866 0.296413546 0.023840595 0.024283894 EEG 010 +011 0.13038807 0.293232492 0.023840595 0.024283894 EEG 011 +012 0.084273706 0.277147412 0.023840595 0.024283894 EEG 012 +013 0.050175359 0.251802841 0.023840595 0.024283894 EEG 013 +014 0.021773201 0.21699757 0.023840595 0.024283894 EEG 014 +015 0 0.180469732 0.023840595 0.024283894 EEG 015 +016 -0.019093339 0.142334211 0.023840595 0.024283894 EEG 016 +017 -0.036255497 0.09269913 0.023840595 0.024283894 EEG 017 +018 0.113098849 0.348229946 0.023840595 0.024283894 EEG 018 +019 0.069000992 0.329792276 0.023840595 0.024283894 EEG 019 +020 0.029776066 0.297506089 0.023840595 0.024283894 EEG 020 +021 0 0.258687873 0.023840595 0.024283894 EEG 021 +022 -0.021773201 0.21699757 0.023840595 0.024283894 EEG 022 +023 -0.038417416 0.175224454 0.023840595 0.024283894 EEG 023 +024 -0.055153266 0.126645408 0.023840595 0.024283894 EEG 024 +025 0.036940443 0.37703699 0.023840595 0.024283894 EEG 025 +026 0 0.343720309 0.023840595 0.024283894 EEG 026 +027 -0.029776066 0.297506089 0.023840595 0.024283894 EEG 027 +028 -0.050175359 0.251802841 0.023840595 0.024283894 EEG 028 +029 -0.066117291 0.206774428 0.023840595 0.024283894 EEG 029 +030 -0.079525249 0.158534511 0.023840595 0.024283894 EEG 030 +031 0 0.415202995 0.023840595 0.024283894 EEG 031 +032 -0.036940443 0.37703699 0.023840595 0.024283894 EEG 032 +033 -0.069000992 0.329792276 0.023840595 0.024283894 EEG 033 +034 -0.084273706 0.277147412 0.023840595 0.024283894 EEG 034 +035 -0.098183698 0.230899463 0.023840595 0.024283894 EEG 035 +036 -0.098479668 0.187945851 0.023840595 0.024283894 EEG 036 +037 -0.113098849 0.348229946 0.023840595 0.024283894 EEG 037 +038 -0.13038807 0.293232492 0.023840595 0.024283894 EEG 038 +039 -0.134498312 0.239722125 0.023840595 0.024283894 EEG 039 +040 -0.130890927 0.191286703 0.023840595 0.024283894 EEG 040 +041 -0.116009122 0.150111634 0.023840595 0.024283894 EEG 041 +042 -0.094840856 0.116834626 0.023840595 0.024283894 EEG 042 +043 -0.076990927 0.086006856 0.023840595 0.024283894 EEG 043 +044 -0.055587556 0.053147386 0.023840595 0.024283894 EEG 044 +045 -0.029699902 0.019405615 0.023840595 0.024283894 EEG 045 +046 -0.181942866 0.296413546 0.023840595 0.024283894 EEG 046 +047 -0.180062322 0.24066255 0.023840595 0.024283894 EEG 047 +048 -0.17285275 0.187572361 0.023840595 0.024283894 EEG 048 +049 -0.156410469 0.141423921 0.023840595 0.024283894 EEG 049 +050 -0.132742164 0.104084677 0.023840595 0.024283894 EEG 050 +051 -0.108362109 0.07207399 0.023840595 0.024283894 EEG 051 +052 -0.087032894 0.041560718 0.023840595 0.024283894 EEG 052 +053 -0.057033727 0.006635523 0.023840595 0.024283894 EEG 053 +054 -0.235020883 0.231411875 0.023840595 0.024283894 EEG 054 +055 -0.21721779 0.1735557 0.023840595 0.024283894 EEG 055 +056 -0.196096643 0.121848964 0.023840595 0.024283894 EEG 056 +057 -0.169122926 0.084563661 0.023840595 0.024283894 EEG 057 +058 -0.142622009 0.056366314 0.023840595 0.024283894 EEG 058 +059 -0.11607512 0.026701856 0.023840595 0.024283894 EEG 059 +060 -0.086703907 -0.006962228 0.023840595 0.024283894 EEG 060 +061 -0.271241865 0.131933691 0.023840595 0.024283894 EEG 061 +062 -0.237546771 0.082946276 0.023840595 0.024283894 EEG 062 +063 -0.20434592 0.049982898 0.023840595 0.024283894 EEG 063 +064 -0.175001011 0.027246728 0.023840595 0.024283894 EEG 064 +065 -0.144183544 0.006552794 0.023840595 0.024283894 EEG 065 +066 -0.117629392 -0.020953359 0.023840595 0.024283894 EEG 066 +067 -0.32017538 0.064356008 0.023840595 0.024283894 EEG 067 +068 -0.277394242 0.035815905 0.023840595 0.024283894 EEG 068 +069 -0.241320281 0.000293927 0.023840595 0.024283894 EEG 069 +070 -0.202988841 -0.017932839 0.023840595 0.024283894 EEG 070 +071 -0.170816713 -0.027588171 0.023840595 0.024283894 EEG 071 +072 -0.142940198 -0.038849379 0.023840595 0.024283894 EEG 072 +073 -0.364333595 -0.009526546 0.023840595 0.024283894 EEG 073 +074 -0.227828247 -0.074709585 0.023840595 0.024283894 EEG 074 +075 -0.186334435 -0.079063391 0.023840595 0.024283894 EEG 075 +076 -0.152612576 -0.080357072 0.023840595 0.024283894 EEG 076 +077 -0.122986168 -0.070147895 0.023840595 0.024283894 EEG 077 +078 -0.092860036 -0.059724481 0.023840595 0.024283894 EEG 078 +079 -0.063373134 -0.044961361 0.023840595 0.024283894 EEG 079 +080 -0.033138055 -0.028518783 0.023840595 0.024283894 EEG 080 +081 0 -0.006448832 0.023840595 0.024283894 EEG 081 +082 -0.384631539 -0.115563191 0.023840595 0.024283894 EEG 082 +083 -0.230231782 -0.157310034 0.023840595 0.024283894 EEG 083 +084 -0.201004697 -0.132397774 0.023840595 0.024283894 EEG 084 +085 -0.158874627 -0.130476761 0.023840595 0.024283894 EEG 085 +086 -0.125435162 -0.117006671 0.023840595 0.024283894 EEG 086 +087 -0.093818787 -0.102184911 0.023840595 0.024283894 EEG 087 +088 -0.063690231 -0.085009427 0.023840595 0.024283894 EEG 088 +089 -0.034226984 -0.069230419 0.023840595 0.024283894 EEG 089 +090 0 -0.043222928 0.023840595 0.024283894 EEG 090 +091 -0.376606255 -0.236283155 0.023840595 0.024283894 EEG 091 +092 -0.320841548 -0.246056831 0.023840595 0.024283894 EEG 092 +093 -0.264511728 -0.247963981 0.023840595 0.024283894 EEG 093 +094 -0.235119884 -0.22133859 0.023840595 0.024283894 EEG 094 +095 -0.200260526 -0.201104991 0.023840595 0.024283894 EEG 095 +096 -0.16089296 -0.182074387 0.023840595 0.024283894 EEG 096 +097 -0.123315473 -0.169463521 0.023840595 0.024283894 EEG 097 +098 -0.093577895 -0.148219199 0.023840595 0.024283894 EEG 098 +099 -0.062757092 -0.127508907 0.023840595 0.024283894 EEG 099 +100 -0.033465994 -0.105718695 0.023840595 0.024283894 EEG 100 +101 0 -0.123212516 0.023840595 0.024283894 EEG 101 +102 -0.309236143 -0.330394078 0.023840595 0.024283894 EEG 102 +103 -0.264402365 -0.317489099 0.023840595 0.024283894 EEG 103 +104 -0.215607267 -0.297916345 0.023840595 0.024283894 EEG 104 +105 -0.194042397 -0.266008675 0.023840595 0.024283894 EEG 105 +106 -0.156365562 -0.241406814 0.023840595 0.024283894 EEG 106 +107 -0.117304936 -0.222733874 0.023840595 0.024283894 EEG 107 +108 -0.08375779 -0.200153314 0.023840595 0.024283894 EEG 108 +109 -0.056791169 -0.173578646 0.023840595 0.024283894 EEG 109 +110 -0.028490371 -0.146436894 0.023840595 0.024283894 EEG 110 +111 -0.235425173 -0.391140875 0.023840595 0.024283894 EEG 111 +112 -0.20031364 -0.367491502 0.023840595 0.024283894 EEG 112 +113 -0.160198907 -0.335751192 0.023840595 0.024283894 EEG 113 +114 -0.148968879 -0.297338854 0.023840595 0.024283894 EEG 114 +115 -0.09913078 -0.279612547 0.023840595 0.024283894 EEG 115 +116 -0.06561825 -0.2506161 0.023840595 0.024283894 EEG 116 +117 -0.036528871 -0.219887692 0.023840595 0.024283894 EEG 117 +118 -0.01914107 -0.187670154 0.023840595 0.024283894 EEG 118 +119 0 -0.159638357 0.023840595 0.024283894 EEG 119 +120 -0.178151028 -0.424680349 0.023840595 0.024283894 EEG 120 +121 -0.142872329 -0.395550026 0.023840595 0.024283894 EEG 121 +122 -0.106134228 -0.360226213 0.023840595 0.024283894 EEG 122 +123 -0.074015552 -0.317797572 0.023840595 0.024283894 EEG 123 +124 -0.049414286 -0.292978277 0.023840595 0.024283894 EEG 124 +125 -0.020856534 -0.260833466 0.023840595 0.024283894 EEG 125 +126 0 -0.223512279 0.023840595 0.024283894 EEG 126 +127 0.01914107 -0.187670154 0.023840595 0.024283894 EEG 127 +128 0.028490371 -0.146436894 0.023840595 0.024283894 EEG 128 +129 0.033465994 -0.105718695 0.023840595 0.024283894 EEG 129 +130 0.034226984 -0.069230419 0.023840595 0.024283894 EEG 130 +131 0.033138055 -0.028518783 0.023840595 0.024283894 EEG 131 +132 0.029699902 0.019405615 0.023840595 0.024283894 EEG 132 +133 -0.11640639 -0.433892117 0.023840595 0.024283894 EEG 133 +134 -0.085226238 -0.411234759 0.023840595 0.024283894 EEG 134 +135 -0.054701526 -0.36252645 0.023840595 0.024283894 EEG 135 +136 -0.02321088 -0.335534555 0.023840595 0.024283894 EEG 136 +137 0 -0.303018075 0.023840595 0.024283894 EEG 137 +138 0.020856534 -0.260833466 0.023840595 0.024283894 EEG 138 +139 0.036528871 -0.219887692 0.023840595 0.024283894 EEG 139 +140 0.056791169 -0.173578646 0.023840595 0.024283894 EEG 140 +141 0.062757092 -0.127508907 0.023840595 0.024283894 EEG 141 +142 0.063690231 -0.085009427 0.023840595 0.024283894 EEG 142 +143 0.063373134 -0.044961361 0.023840595 0.024283894 EEG 143 +144 0.057033727 0.006635523 0.023840595 0.024283894 EEG 144 +145 -0.061719572 -0.45 0.023840595 0.024283894 EEG 145 +146 -0.032116421 -0.419782634 0.023840595 0.024283894 EEG 146 +147 -9.99E-17 -0.379508917 0.023840595 0.024283894 EEG 147 +148 0.02321088 -0.335534555 0.023840595 0.024283894 EEG 148 +149 0.049414286 -0.292978277 0.023840595 0.024283894 EEG 149 +150 0.06561825 -0.2506161 0.023840595 0.024283894 EEG 150 +151 0.08375779 -0.200153314 0.023840595 0.024283894 EEG 151 +152 0.093577895 -0.148219199 0.023840595 0.024283894 EEG 152 +153 0.093818787 -0.102184911 0.023840595 0.024283894 EEG 153 +154 0.092860036 -0.059724481 0.023840595 0.024283894 EEG 154 +155 0.086703907 -0.006962228 0.023840595 0.024283894 EEG 155 +156 0.032116421 -0.419782634 0.023840595 0.024283894 EEG 156 +157 0.054701526 -0.36252645 0.023840595 0.024283894 EEG 157 +158 0.074015552 -0.317797572 0.023840595 0.024283894 EEG 158 +159 0.09913078 -0.279612547 0.023840595 0.024283894 EEG 159 +160 0.117304936 -0.222733874 0.023840595 0.024283894 EEG 160 +161 0.123315473 -0.169463521 0.023840595 0.024283894 EEG 161 +162 0.125435162 -0.117006671 0.023840595 0.024283894 EEG 162 +163 0.122986168 -0.070147895 0.023840595 0.024283894 EEG 163 +164 0.117629392 -0.020953359 0.023840595 0.024283894 EEG 164 +165 0.061719572 -0.45 0.023840595 0.024283894 EEG 165 +166 0.085226238 -0.411234759 0.023840595 0.024283894 EEG 166 +167 0.106134228 -0.360226213 0.023840595 0.024283894 EEG 167 +168 0.148968879 -0.297338854 0.023840595 0.024283894 EEG 168 +169 0.156365562 -0.241406814 0.023840595 0.024283894 EEG 169 +170 0.16089296 -0.182074387 0.023840595 0.024283894 EEG 170 +171 0.158874627 -0.130476761 0.023840595 0.024283894 EEG 171 +172 0.152612576 -0.080357072 0.023840595 0.024283894 EEG 172 +173 0.142940198 -0.038849379 0.023840595 0.024283894 EEG 173 +174 0.11640639 -0.433892117 0.023840595 0.024283894 EEG 174 +175 0.142872329 -0.395550026 0.023840595 0.024283894 EEG 175 +176 0.160198907 -0.335751192 0.023840595 0.024283894 EEG 176 +177 0.194042397 -0.266008675 0.023840595 0.024283894 EEG 177 +178 0.200260526 -0.201104991 0.023840595 0.024283894 EEG 178 +179 0.201004697 -0.132397774 0.023840595 0.024283894 EEG 179 +180 0.186334435 -0.079063391 0.023840595 0.024283894 EEG 180 +181 0.170816713 -0.027588171 0.023840595 0.024283894 EEG 181 +182 0.144183544 0.006552794 0.023840595 0.024283894 EEG 182 +183 0.11607512 0.026701856 0.023840595 0.024283894 EEG 183 +184 0.087032894 0.041560718 0.023840595 0.024283894 EEG 184 +185 0.055587556 0.053147386 0.023840595 0.024283894 EEG 185 +186 0.017539353 0.062826857 0.023840595 0.024283894 EEG 186 +187 0.178151028 -0.424680349 0.023840595 0.024283894 EEG 187 +188 0.20031364 -0.367491502 0.023840595 0.024283894 EEG 188 +189 0.215607267 -0.297916345 0.023840595 0.024283894 EEG 189 +190 0.235119884 -0.22133859 0.023840595 0.024283894 EEG 190 +191 0.230231782 -0.157310034 0.023840595 0.024283894 EEG 191 +192 0.227828247 -0.074709585 0.023840595 0.024283894 EEG 192 +193 0.202988841 -0.017932839 0.023840595 0.024283894 EEG 193 +194 0.175001011 0.027246728 0.023840595 0.024283894 EEG 194 +195 0.142622009 0.056366314 0.023840595 0.024283894 EEG 195 +196 0.108362109 0.07207399 0.023840595 0.024283894 EEG 196 +197 0.076990927 0.086006856 0.023840595 0.024283894 EEG 197 +198 0.036255497 0.09269913 0.023840595 0.024283894 EEG 198 +199 0.235425173 -0.391140875 0.023840595 0.024283894 EEG 199 +200 0.264402365 -0.317489099 0.023840595 0.024283894 EEG 200 +201 0.264511728 -0.247963981 0.023840595 0.024283894 EEG 201 +202 0.241320281 0.000293927 0.023840595 0.024283894 EEG 202 +203 0.20434592 0.049982898 0.023840595 0.024283894 EEG 203 +204 0.169122926 0.084563661 0.023840595 0.024283894 EEG 204 +205 0.132742164 0.104084677 0.023840595 0.024283894 EEG 205 +206 0.094840856 0.116834626 0.023840595 0.024283894 EEG 206 +207 0.055153266 0.126645408 0.023840595 0.024283894 EEG 207 +208 0.309236143 -0.330394078 0.023840595 0.024283894 EEG 208 +209 0.320841548 -0.246056831 0.023840595 0.024283894 EEG 209 +210 0.277394242 0.035815905 0.023840595 0.024283894 EEG 210 +211 0.237546771 0.082946276 0.023840595 0.024283894 EEG 211 +212 0.196096643 0.121848964 0.023840595 0.024283894 EEG 212 +213 0.156410469 0.141423921 0.023840595 0.024283894 EEG 213 +214 0.116009122 0.150111634 0.023840595 0.024283894 EEG 214 +215 0.079525249 0.158534511 0.023840595 0.024283894 EEG 215 +216 0.376606255 -0.236283155 0.023840595 0.024283894 EEG 216 +217 0.384631539 -0.115563191 0.023840595 0.024283894 EEG 217 +218 0.364333595 -0.009526546 0.023840595 0.024283894 EEG 218 +219 0.32017538 0.064356008 0.023840595 0.024283894 EEG 219 +220 0.271241865 0.131933691 0.023840595 0.024283894 EEG 220 +221 0.21721779 0.1735557 0.023840595 0.024283894 EEG 221 +222 0.17285275 0.187572361 0.023840595 0.024283894 EEG 222 +223 0.130890927 0.191286703 0.023840595 0.024283894 EEG 223 +224 0.098479668 0.187945851 0.023840595 0.024283894 EEG 224 +225 0.316289645 0.145736715 0.023840595 0.024283894 EEG 225 +226 0.302702771 0.230332844 0.023840595 0.024283894 EEG 226 +227 0.368412876 0.104246485 0.023840595 0.024283894 EEG 227 +228 0.409165374 0.012374488 0.023840595 0.024283894 EEG 228 +229 0.423731189 -0.12797492 0.023840595 0.024283894 EEG 229 +230 0.298254153 0.303894316 0.023840595 0.024283894 EEG 230 +231 0.362100214 0.20909316 0.023840595 0.024283894 EEG 231 +232 0.410199617 0.143137194 0.023840595 0.024283894 EEG 232 +233 0.447869069 0.013249996 0.023840595 0.024283894 EEG 233 +234 0.269381414 0.382730951 0.023840595 0.024283894 EEG 234 +235 0.342518502 0.308483235 0.023840595 0.024283894 EEG 235 +236 0.395968691 0.254174349 0.023840595 0.024283894 EEG 236 +237 0.45 0.157922288 0.023840595 0.024283894 EEG 237 +238 0.2187115 0.45 0.023840595 0.024283894 EEG 238 +239 0.327880174 0.384827106 0.023840595 0.024283894 EEG 239 +240 0.38583302 0.329449945 0.023840595 0.024283894 EEG 240 +241 -0.2187115 0.45 0.023840595 0.024283894 EEG 241 +242 -0.327880174 0.384827106 0.023840595 0.024283894 EEG 242 +243 -0.38583302 0.329449945 0.023840595 0.024283894 EEG 243 +244 -0.269381414 0.382730951 0.023840595 0.024283894 EEG 244 +245 -0.342518502 0.308483235 0.023840595 0.024283894 EEG 245 +246 -0.395968691 0.254174349 0.023840595 0.024283894 EEG 246 +247 -0.45 0.157922288 0.023840595 0.024283894 EEG 247 +248 -0.298254153 0.303894316 0.023840595 0.024283894 EEG 248 +249 -0.362100214 0.20909316 0.023840595 0.024283894 EEG 249 +250 -0.410199617 0.143137194 0.023840595 0.024283894 EEG 250 +251 -0.447869069 0.013249996 0.023840595 0.024283894 EEG 251 +252 -0.302702771 0.230332844 0.023840595 0.024283894 EEG 252 +253 -0.316289645 0.145736715 0.023840595 0.024283894 EEG 253 +254 -0.368412876 0.104246485 0.023840595 0.024283894 EEG 254 +255 -0.409165374 0.012374488 0.023840595 0.024283894 EEG 255 +256 -0.423731189 -0.12797492 0.023840595 0.024283894 EEG 256 +257 -0.45 -0.45 0.023840595 0.024283894 EEG 257 +258 0.45 -0.45 0.023840595 0.024283894 EEG 258 diff --git a/mne/channels/data/layouts/GeodesicHeadWeb-130.lout b/mne/channels/data/layouts/GeodesicHeadWeb-130.lout new file mode 100644 index 0000000..32358fe --- /dev/null +++ b/mne/channels/data/layouts/GeodesicHeadWeb-130.lout @@ -0,0 +1,132 @@ + -42.19 43.52 -41.70 28.71 +001 0.50 0.74 0.02 0.02 E1 +002 0.53 0.79 0.02 0.02 E2 +003 0.55 0.73 0.02 0.02 E3 +004 0.50 0.69 0.02 0.02 E4 +005 0.45 0.73 0.02 0.02 E5 +006 0.46 0.79 0.02 0.02 E6 +007 0.50 0.85 0.02 0.02 E7 +008 0.58 0.84 0.02 0.02 E8 +009 0.60 0.77 0.02 0.02 E9 +010 0.60 0.70 0.02 0.02 E10 +011 0.55 0.68 0.02 0.02 E11 +012 0.50 0.64 0.02 0.02 E12 +013 0.45 0.68 0.02 0.02 E13 +014 0.40 0.70 0.02 0.02 E14 +015 0.39 0.77 0.02 0.02 E15 +016 0.41 0.84 0.02 0.02 E16 +017 0.44 0.91 0.02 0.02 E17 +018 0.66 0.79 0.02 0.02 E18 +019 0.60 0.64 0.02 0.02 E19 +020 0.45 0.62 0.02 0.02 E20 +021 0.34 0.72 0.02 0.02 E21 +022 0.73 0.87 0.02 0.02 E22 +023 0.76 0.93 0.02 0.02 E23 +024 0.85 0.83 0.02 0.02 E24 +025 0.86 0.74 0.02 0.02 E25 +026 0.80 0.72 0.02 0.02 E26 +027 0.74 0.81 0.02 0.02 E27 +028 0.65 0.88 0.02 0.02 E28 +029 0.55 0.91 0.02 0.02 E29 +030 0.50 0.96 0.02 0.02 E30 +031 0.63 0.98 0.02 0.02 E31 +032 0.83 0.63 0.02 0.02 E32 +033 0.72 0.73 0.02 0.02 E33 +034 0.67 0.62 0.02 0.02 E34 +035 0.72 0.61 0.02 0.02 E35 +036 0.68 0.56 0.02 0.02 E36 +037 0.62 0.59 0.02 0.02 E37 +038 0.63 0.66 0.02 0.02 E38 +039 0.69 0.67 0.02 0.02 E39 +040 0.76 0.64 0.02 0.02 E40 +041 0.77 0.55 0.02 0.02 E41 +042 0.72 0.51 0.02 0.02 E42 +043 0.67 0.49 0.02 0.02 E43 +044 0.63 0.53 0.02 0.02 E44 +045 0.58 0.58 0.02 0.02 E45 +046 0.66 0.72 0.02 0.02 E46 +047 0.90 0.49 0.02 0.02 E47 +048 0.84 0.51 0.02 0.02 E48 +049 0.88 0.23 0.02 0.02 E49 +050 0.79 0.23 0.02 0.02 E50 +051 0.74 0.29 0.02 0.02 E51 +052 0.72 0.37 0.02 0.02 E52 +053 0.76 0.46 0.02 0.02 E53 +054 0.61 0.39 0.02 0.02 E54 +055 0.61 0.33 0.02 0.02 E55 +056 0.56 0.37 0.02 0.02 E56 +057 0.57 0.43 0.02 0.02 E57 +058 0.64 0.44 0.02 0.02 E58 +059 0.65 0.37 0.02 0.02 E59 +060 0.66 0.30 0.02 0.02 E60 +061 0.58 0.26 0.02 0.02 E61 +062 0.54 0.30 0.02 0.02 E62 +063 0.53 0.41 0.02 0.02 E63 +064 0.55 0.47 0.02 0.02 E64 +065 0.61 0.48 0.02 0.02 E65 +066 0.70 0.44 0.02 0.02 E66 +067 0.50 0.12 0.02 0.02 E67 +068 0.45 0.11 0.02 0.02 E68 +069 0.47 0.18 0.02 0.02 E69 +070 0.53 0.18 0.02 0.02 E70 +071 0.55 0.11 0.02 0.02 E71 +072 0.73 0.08 0.02 0.02 E72 +073 0.50 0.02 0.02 0.02 E73 +074 0.28 0.07 0.02 0.02 E74 +075 0.22 0.22 0.02 0.02 E75 +076 0.35 0.22 0.02 0.02 E76 +077 0.41 0.26 0.02 0.02 E77 +078 0.50 0.25 0.02 0.02 E78 +079 0.65 0.22 0.02 0.02 E79 +080 0.39 0.39 0.02 0.02 E80 +081 0.34 0.36 0.02 0.02 E81 +082 0.36 0.43 0.02 0.02 E82 +083 0.42 0.44 0.02 0.02 E83 +084 0.45 0.37 0.02 0.02 E84 +085 0.40 0.33 0.02 0.02 E85 +086 0.34 0.30 0.02 0.02 E86 +087 0.28 0.37 0.02 0.02 E87 +088 0.30 0.43 0.02 0.02 E88 +089 0.33 0.49 0.02 0.02 E89 +090 0.39 0.48 0.02 0.02 E90 +091 0.45 0.47 0.02 0.02 E91 +092 0.47 0.41 0.02 0.02 E92 +093 0.46 0.30 0.02 0.02 E93 +094 0.47 0.51 0.02 0.02 E94 +095 0.46 0.57 0.02 0.02 E95 +096 0.50 0.60 0.02 0.02 E96 +097 0.54 0.57 0.02 0.02 E97 +098 0.52 0.51 0.02 0.02 E98 +099 0.50 0.46 0.02 0.02 E99 +100 0.42 0.52 0.02 0.02 E100 +101 0.42 0.58 0.02 0.02 E101 +102 0.55 0.62 0.02 0.02 E102 +103 0.58 0.52 0.02 0.02 E103 +104 0.16 0.52 0.02 0.02 E104 +105 0.10 0.49 0.02 0.02 E105 +106 0.09 0.27 0.02 0.02 E106 +107 0.15 0.75 0.02 0.02 E107 +108 0.17 0.63 0.02 0.02 E108 +109 0.22 0.55 0.02 0.02 E109 +110 0.24 0.45 0.02 0.02 E110 +111 0.26 0.29 0.02 0.02 E111 +112 0.33 0.61 0.02 0.02 E112 +113 0.30 0.66 0.02 0.02 E113 +114 0.36 0.66 0.02 0.02 E114 +115 0.37 0.59 0.02 0.02 E115 +116 0.33 0.56 0.02 0.02 E116 +117 0.28 0.60 0.02 0.02 E117 +118 0.24 0.64 0.02 0.02 E118 +119 0.27 0.73 0.02 0.02 E119 +120 0.40 0.64 0.02 0.02 E120 +121 0.37 0.53 0.02 0.02 E121 +122 0.27 0.51 0.02 0.02 E122 +123 0.27 0.88 0.02 0.02 E123 +124 0.26 0.81 0.02 0.02 E124 +125 0.20 0.72 0.02 0.02 E125 +126 0.16 0.83 0.02 0.02 E126 +127 0.25 0.93 0.02 0.02 E127 +128 0.37 0.98 0.02 0.02 E128 +129 0.35 0.88 0.02 0.02 E129 +130 0.33 0.79 0.02 0.02 E130 +131 0.50 0.55 0.02 0.02 E131 diff --git a/mne/channels/data/layouts/GeodesicHeadWeb-280.lout b/mne/channels/data/layouts/GeodesicHeadWeb-280.lout new file mode 100644 index 0000000..7585787 --- /dev/null +++ b/mne/channels/data/layouts/GeodesicHeadWeb-280.lout @@ -0,0 +1,282 @@ + -42.19 43.52 -41.70 28.71 +001 0.49 0.77 0.02 0.02 E1 +002 0.52 0.80 0.02 0.02 E2 +003 0.53 0.76 0.02 0.02 E3 +004 0.49 0.73 0.02 0.02 E4 +005 0.46 0.76 0.02 0.02 E5 +006 0.47 0.80 0.02 0.02 E6 +007 0.49 0.84 0.02 0.02 E7 +008 0.54 0.84 0.02 0.02 E8 +009 0.55 0.80 0.02 0.02 E9 +010 0.57 0.74 0.02 0.02 E10 +011 0.53 0.71 0.02 0.02 E11 +012 0.49 0.68 0.02 0.02 E12 +013 0.46 0.71 0.02 0.02 E13 +014 0.42 0.74 0.02 0.02 E14 +015 0.43 0.79 0.02 0.02 E15 +016 0.45 0.84 0.02 0.02 E16 +017 0.47 0.87 0.02 0.02 E17 +018 0.52 0.87 0.02 0.02 E18 +019 0.57 0.86 0.02 0.02 E19 +020 0.59 0.83 0.02 0.02 E20 +021 0.59 0.79 0.02 0.02 E21 +022 0.60 0.73 0.02 0.02 E22 +023 0.57 0.71 0.02 0.02 E23 +024 0.53 0.67 0.02 0.02 E24 +025 0.49 0.65 0.02 0.02 E25 +026 0.46 0.67 0.02 0.02 E26 +027 0.43 0.70 0.02 0.02 E27 +028 0.39 0.73 0.02 0.02 E28 +029 0.39 0.78 0.02 0.02 E29 +030 0.40 0.83 0.02 0.02 E30 +031 0.41 0.86 0.02 0.02 E31 +032 0.49 0.93 0.02 0.02 E32 +033 0.64 0.81 0.02 0.02 E33 +034 0.56 0.66 0.02 0.02 E34 +035 0.43 0.66 0.02 0.02 E35 +036 0.35 0.80 0.02 0.02 E36 +037 0.73 0.86 0.02 0.02 E37 +038 0.76 0.89 0.02 0.02 E38 +039 0.76 0.84 0.02 0.02 E39 +040 0.72 0.82 0.02 0.02 E40 +041 0.69 0.86 0.02 0.02 E41 +042 0.67 0.99 0.02 0.02 E42 +043 0.79 0.91 0.02 0.02 E43 +044 0.85 0.83 0.02 0.02 E44 +045 0.84 0.77 0.02 0.02 E45 +046 0.82 0.74 0.02 0.02 E46 +047 0.76 0.77 0.02 0.02 E47 +048 0.72 0.76 0.02 0.02 E48 +049 0.68 0.81 0.02 0.02 E49 +050 0.64 0.83 0.02 0.02 E50 +051 0.62 0.85 0.02 0.02 E51 +052 0.58 0.98 0.02 0.02 E52 +053 0.90 0.82 0.02 0.02 E53 +054 0.80 0.70 0.02 0.02 E54 +055 0.76 0.70 0.02 0.02 E55 +056 0.68 0.64 0.02 0.02 E56 +057 0.69 0.68 0.02 0.02 E57 +058 0.71 0.64 0.02 0.02 E58 +059 0.68 0.60 0.02 0.02 E59 +060 0.65 0.62 0.02 0.02 E60 +061 0.65 0.67 0.02 0.02 E61 +062 0.67 0.71 0.02 0.02 E62 +063 0.71 0.72 0.02 0.02 E63 +064 0.73 0.68 0.02 0.02 E64 +065 0.75 0.63 0.02 0.02 E65 +066 0.72 0.59 0.02 0.02 E66 +067 0.69 0.56 0.02 0.02 E67 +068 0.65 0.58 0.02 0.02 E68 +069 0.61 0.60 0.02 0.02 E69 +070 0.61 0.66 0.02 0.02 E70 +071 0.62 0.70 0.02 0.02 E71 +072 0.64 0.76 0.02 0.02 E72 +073 0.69 0.76 0.02 0.02 E73 +074 0.79 0.61 0.02 0.02 E74 +075 0.76 0.57 0.02 0.02 E75 +076 0.73 0.53 0.02 0.02 E76 +077 0.69 0.49 0.02 0.02 E77 +078 0.65 0.53 0.02 0.02 E78 +079 0.62 0.54 0.02 0.02 E79 +080 0.59 0.57 0.02 0.02 E80 +081 0.59 0.63 0.02 0.02 E81 +082 0.60 0.69 0.02 0.02 E82 +083 0.76 0.48 0.02 0.02 E83 +084 0.61 0.50 0.02 0.02 E84 +085 0.90 0.51 0.02 0.02 E85 +086 0.91 0.63 0.02 0.02 E86 +087 0.94 0.58 0.02 0.02 E87 +088 0.97 0.54 0.02 0.02 E88 +089 0.94 0.51 0.02 0.02 E89 +090 0.91 0.46 0.02 0.02 E90 +091 0.87 0.48 0.02 0.02 E91 +092 0.82 0.49 0.02 0.02 E92 +093 0.91 0.73 0.02 0.02 E93 +094 0.97 0.68 0.02 0.02 E94 +095 0.94 0.33 0.02 0.02 E95 +096 0.88 0.31 0.02 0.02 E96 +097 0.77 0.22 0.02 0.02 E97 +098 0.82 0.34 0.02 0.02 E98 +099 0.79 0.37 0.02 0.02 E99 +100 0.77 0.41 0.02 0.02 E100 +101 0.79 0.50 0.02 0.02 E101 +102 0.83 0.17 0.02 0.02 E102 +103 0.74 0.29 0.02 0.02 E103 +104 0.62 0.36 0.02 0.02 E104 +105 0.64 0.39 0.02 0.02 E105 +106 0.66 0.35 0.02 0.02 E106 +107 0.62 0.32 0.02 0.02 E107 +108 0.59 0.35 0.02 0.02 E108 +109 0.60 0.39 0.02 0.02 E109 +110 0.62 0.43 0.02 0.02 E110 +111 0.67 0.43 0.02 0.02 E111 +112 0.68 0.39 0.02 0.02 E112 +113 0.70 0.33 0.02 0.02 E113 +114 0.66 0.31 0.02 0.02 E114 +115 0.62 0.27 0.02 0.02 E115 +116 0.58 0.31 0.02 0.02 E116 +117 0.55 0.34 0.02 0.02 E117 +118 0.56 0.39 0.02 0.02 E118 +119 0.58 0.43 0.02 0.02 E119 +120 0.59 0.46 0.02 0.02 E120 +121 0.65 0.48 0.02 0.02 E121 +122 0.71 0.44 0.02 0.02 E122 +123 0.73 0.40 0.02 0.02 E123 +124 0.73 0.34 0.02 0.02 E124 +125 0.67 0.27 0.02 0.02 E125 +126 0.60 0.23 0.02 0.02 E126 +127 0.56 0.25 0.02 0.02 E127 +128 0.53 0.28 0.02 0.02 E128 +129 0.49 0.31 0.02 0.02 E129 +130 0.52 0.36 0.02 0.02 E130 +131 0.53 0.41 0.02 0.02 E131 +132 0.55 0.45 0.02 0.02 E132 +133 0.49 0.24 0.02 0.02 E133 +134 0.49 0.40 0.02 0.02 E134 +135 0.49 0.13 0.02 0.02 E135 +136 0.51 0.16 0.02 0.02 E136 +137 0.52 0.11 0.02 0.02 E137 +138 0.49 0.08 0.02 0.02 E138 +139 0.46 0.11 0.02 0.02 E139 +140 0.47 0.16 0.02 0.02 E140 +141 0.49 0.19 0.02 0.02 E141 +142 0.54 0.19 0.02 0.02 E142 +143 0.55 0.14 0.02 0.02 E143 +144 0.57 0.09 0.02 0.02 E144 +145 0.53 0.06 0.02 0.02 E145 +146 0.49 0.03 0.02 0.02 E146 +147 0.45 0.06 0.02 0.02 E147 +148 0.42 0.09 0.02 0.02 E148 +149 0.43 0.15 0.02 0.02 E149 +150 0.45 0.19 0.02 0.02 E150 +151 0.47 0.21 0.02 0.02 E151 +152 0.52 0.21 0.02 0.02 E152 +153 0.62 0.21 0.02 0.02 E153 +154 0.68 0.19 0.02 0.02 E154 +155 0.75 0.15 0.02 0.02 E155 +156 0.68 0.06 0.02 0.02 E156 +157 0.49 0.01 0.02 0.02 E157 +158 0.31 0.05 0.02 0.02 E158 +159 0.28 0.12 0.02 0.02 E159 +160 0.21 0.21 0.02 0.02 E160 +161 0.29 0.20 0.02 0.02 E161 +162 0.34 0.21 0.02 0.02 E162 +163 0.38 0.24 0.02 0.02 E163 +164 0.15 0.16 0.02 0.02 E164 +165 0.25 0.28 0.02 0.02 E165 +166 0.47 0.51 0.02 0.02 E166 +167 0.46 0.55 0.02 0.02 E167 +168 0.49 0.58 0.02 0.02 E168 +169 0.52 0.55 0.02 0.02 E169 +170 0.51 0.52 0.02 0.02 E170 +171 0.46 0.48 0.02 0.02 E171 +172 0.44 0.53 0.02 0.02 E172 +173 0.43 0.56 0.02 0.02 E173 +174 0.46 0.59 0.02 0.02 E174 +175 0.49 0.61 0.02 0.02 E175 +176 0.52 0.59 0.02 0.02 E176 +177 0.55 0.56 0.02 0.02 E177 +178 0.54 0.53 0.02 0.02 E178 +179 0.53 0.48 0.02 0.02 E179 +180 0.51 0.44 0.02 0.02 E180 +181 0.47 0.44 0.02 0.02 E181 +182 0.43 0.46 0.02 0.02 E182 +183 0.41 0.49 0.02 0.02 E183 +184 0.40 0.53 0.02 0.02 E184 +185 0.41 0.58 0.02 0.02 E185 +186 0.43 0.61 0.02 0.02 E186 +187 0.46 0.64 0.02 0.02 E187 +188 0.53 0.64 0.02 0.02 E188 +189 0.56 0.61 0.02 0.02 E189 +190 0.59 0.53 0.02 0.02 E190 +191 0.57 0.48 0.02 0.02 E191 +192 0.37 0.50 0.02 0.02 E192 +193 0.36 0.36 0.02 0.02 E193 +194 0.39 0.39 0.02 0.02 E194 +195 0.40 0.35 0.02 0.02 E195 +196 0.36 0.32 0.02 0.02 E196 +197 0.33 0.35 0.02 0.02 E197 +198 0.34 0.39 0.02 0.02 E198 +199 0.36 0.44 0.02 0.02 E199 +200 0.41 0.43 0.02 0.02 E200 +201 0.43 0.38 0.02 0.02 E201 +202 0.44 0.33 0.02 0.02 E202 +203 0.40 0.31 0.02 0.02 E203 +204 0.36 0.28 0.02 0.02 E204 +205 0.33 0.31 0.02 0.02 E205 +206 0.29 0.34 0.02 0.02 E206 +207 0.31 0.39 0.02 0.02 E207 +208 0.32 0.43 0.02 0.02 E208 +209 0.35 0.48 0.02 0.02 E209 +210 0.39 0.46 0.02 0.02 E210 +211 0.45 0.41 0.02 0.02 E211 +212 0.47 0.36 0.02 0.02 E212 +213 0.46 0.28 0.02 0.02 E213 +214 0.42 0.25 0.02 0.02 E214 +215 0.31 0.28 0.02 0.02 E215 +216 0.26 0.34 0.02 0.02 E216 +217 0.22 0.44 0.02 0.02 E217 +218 0.26 0.40 0.02 0.02 E218 +219 0.28 0.45 0.02 0.02 E219 +220 0.30 0.49 0.02 0.02 E220 +221 0.23 0.48 0.02 0.02 E221 +222 0.10 0.51 0.02 0.02 E222 +223 0.18 0.49 0.02 0.02 E223 +224 0.13 0.48 0.02 0.02 E224 +225 0.09 0.46 0.02 0.02 E225 +226 0.06 0.51 0.02 0.02 E226 +227 0.03 0.54 0.02 0.02 E227 +228 0.06 0.58 0.02 0.02 E228 +229 0.09 0.63 0.02 0.02 E229 +230 0.18 0.76 0.02 0.02 E230 +231 0.19 0.71 0.02 0.02 E231 +232 0.21 0.64 0.02 0.02 E232 +233 0.20 0.54 0.02 0.02 E233 +234 0.19 0.39 0.02 0.02 E234 +235 0.17 0.33 0.02 0.02 E235 +236 0.10 0.31 0.02 0.02 E236 +237 0.05 0.33 0.02 0.02 E237 +238 0.03 0.67 0.02 0.02 E238 +239 0.09 0.74 0.02 0.02 E239 +240 0.15 0.79 0.02 0.02 E240 +241 0.23 0.78 0.02 0.02 E241 +242 0.08 0.81 0.02 0.02 E242 +243 0.31 0.64 0.02 0.02 E243 +244 0.34 0.67 0.02 0.02 E244 +245 0.34 0.62 0.02 0.02 E245 +246 0.31 0.60 0.02 0.02 E246 +247 0.27 0.63 0.02 0.02 E247 +248 0.29 0.67 0.02 0.02 E248 +249 0.32 0.71 0.02 0.02 E249 +250 0.37 0.70 0.02 0.02 E250 +251 0.37 0.65 0.02 0.02 E251 +252 0.38 0.60 0.02 0.02 E252 +253 0.34 0.58 0.02 0.02 E253 +254 0.30 0.55 0.02 0.02 E254 +255 0.27 0.58 0.02 0.02 E255 +256 0.23 0.62 0.02 0.02 E256 +257 0.25 0.67 0.02 0.02 E257 +258 0.28 0.72 0.02 0.02 E258 +259 0.30 0.76 0.02 0.02 E259 +260 0.35 0.75 0.02 0.02 E260 +261 0.40 0.68 0.02 0.02 E261 +262 0.40 0.64 0.02 0.02 E262 +263 0.37 0.55 0.02 0.02 E263 +264 0.34 0.52 0.02 0.02 E264 +265 0.26 0.53 0.02 0.02 E265 +266 0.23 0.56 0.02 0.02 E266 +267 0.23 0.70 0.02 0.02 E267 +268 0.28 0.77 0.02 0.02 E268 +269 0.32 0.81 0.02 0.02 E269 +270 0.27 0.87 0.02 0.02 E270 +271 0.23 0.89 0.02 0.02 E271 +272 0.24 0.84 0.02 0.02 E272 +273 0.28 0.83 0.02 0.02 E273 +274 0.30 0.87 0.02 0.02 E274 +275 0.32 0.99 0.02 0.02 E275 +276 0.21 0.91 0.02 0.02 E276 +277 0.16 0.87 0.02 0.02 E277 +278 0.34 0.83 0.02 0.02 E278 +279 0.37 0.84 0.02 0.02 E279 +280 0.41 0.98 0.02 0.02 E280 +281 0.49 0.54 0.02 0.02 E281 diff --git a/mne/channels/data/layouts/KIT-125.lout b/mne/channels/data/layouts/KIT-125.lout new file mode 100644 index 0000000..5b1e987 --- /dev/null +++ b/mne/channels/data/layouts/KIT-125.lout @@ -0,0 +1,126 @@ + -0.50 0.50 -0.50 0.50 +001 0.12 -0.10 0.04 0.03 MEG 001 +002 0.15 -0.06 0.04 0.03 MEG 002 +003 0.03 -0.15 0.04 0.03 MEG 003 +004 -0.22 -0.29 0.04 0.03 MEG 004 +005 -0.28 -0.23 0.04 0.03 MEG 005 +006 -0.33 -0.15 0.04 0.03 MEG 006 +007 -0.07 -0.36 0.04 0.03 MEG 007 +008 0.09 -0.36 0.04 0.03 MEG 008 +009 -0.06 -0.25 0.04 0.03 MEG 009 +010 -0.18 0.18 0.04 0.03 MEG 010 +011 -0.10 0.25 0.04 0.03 MEG 011 +012 -0.15 0.22 0.04 0.03 MEG 012 +013 -0.37 0.13 0.04 0.03 MEG 013 +014 -0.36 -0.06 0.04 0.03 MEG 014 +015 -0.18 -0.41 0.04 0.03 MEG 015 +016 -0.27 -0.35 0.04 0.03 MEG 016 +017 -0.16 -0.19 0.04 0.03 MEG 017 +018 -0.18 -0.10 0.04 0.03 MEG 018 +019 -0.14 -0.14 0.04 0.03 MEG 019 +020 -0.30 -0.04 0.04 0.03 MEG 020 +021 -0.31 0.11 0.04 0.03 MEG 021 +022 -0.37 0.04 0.04 0.03 MEG 022 +023 -0.20 -0.14 0.04 0.03 MEG 023 +024 -0.11 -0.23 0.04 0.03 MEG 024 +025 -0.11 -0.11 0.04 0.03 MEG 025 +026 -0.02 -0.14 0.04 0.03 MEG 026 +027 -0.13 -0.28 0.04 0.03 MEG 027 +028 -0.24 -0.18 0.04 0.03 MEG 028 +029 -0.19 -0.23 0.04 0.03 MEG 029 +030 -0.21 0.01 0.04 0.03 MEG 030 +031 -0.21 0.07 0.04 0.03 MEG 031 +032 -0.20 0.13 0.04 0.03 MEG 032 +033 -0.12 0.08 0.04 0.03 MEG 033 +034 -0.09 -0.07 0.04 0.03 MEG 034 +035 -0.12 0.03 0.04 0.03 MEG 035 +036 -0.25 0.11 0.04 0.03 MEG 036 +037 -0.25 -0.03 0.04 0.03 MEG 037 +038 -0.23 -0.09 0.04 0.03 MEG 038 +039 -0.26 0.04 0.04 0.03 MEG 039 +040 -0.14 -0.06 0.04 0.03 MEG 040 +041 -0.00 0.29 0.04 0.03 MEG 041 +042 0.09 0.26 0.04 0.03 MEG 042 +043 -0.07 -0.00 0.04 0.03 MEG 043 +044 -0.07 0.09 0.04 0.03 MEG 044 +045 -0.34 -0.28 0.04 0.03 MEG 045 +046 -0.43 -0.09 0.04 0.03 MEG 046 +047 -0.45 0.03 0.04 0.03 MEG 047 +048 -0.44 0.14 0.04 0.03 MEG 048 +049 -0.07 0.21 0.04 0.03 MEG 049 +050 -0.15 0.15 0.04 0.03 MEG 050 +051 -0.16 -0.02 0.04 0.03 MEG 051 +052 -0.17 0.04 0.04 0.03 MEG 052 +053 0.07 0.17 0.04 0.03 MEG 053 +054 -0.07 0.17 0.04 0.03 MEG 054 +055 0.00 0.14 0.04 0.03 MEG 055 +056 0.08 0.09 0.04 0.03 MEG 056 +057 0.21 -0.39 0.04 0.03 MEG 057 +058 0.09 -0.06 0.04 0.03 MEG 058 +059 -0.04 -0.09 0.04 0.03 MEG 059 +060 0.05 -0.09 0.04 0.03 MEG 060 +061 0.17 -0.18 0.04 0.03 MEG 061 +062 0.06 -0.19 0.04 0.03 MEG 062 +063 -0.04 -0.19 0.04 0.03 MEG 063 +064 0.01 -0.20 0.04 0.03 MEG 064 +065 0.19 -0.09 0.04 0.03 MEG 065 +066 0.01 -0.31 0.04 0.03 MEG 066 +067 0.14 -0.27 0.04 0.03 MEG 067 +068 0.24 -0.28 0.04 0.03 MEG 068 +069 0.34 -0.13 0.04 0.03 MEG 069 +070 0.29 -0.21 0.04 0.03 MEG 070 +071 0.30 -0.33 0.04 0.03 MEG 071 +072 0.02 -0.45 0.04 0.03 MEG 072 +073 0.21 -0.14 0.04 0.03 MEG 073 +074 0.24 -0.08 0.04 0.03 MEG 074 +075 0.26 0.12 0.04 0.03 MEG 075 +076 0.26 0.05 0.04 0.03 MEG 076 +077 0.07 -0.24 0.04 0.03 MEG 077 +078 0.12 -0.22 0.04 0.03 MEG 078 +079 0.22 0.02 0.04 0.03 MEG 079 +080 0.15 -0.13 0.04 0.03 MEG 080 +081 0.43 0.16 0.04 0.03 MEG 081 +082 0.22 0.09 0.04 0.03 MEG 082 +083 0.18 0.19 0.04 0.03 MEG 083 +084 0.14 0.23 0.04 0.03 MEG 084 +085 0.20 0.14 0.04 0.03 MEG 085 +086 0.36 -0.04 0.04 0.03 MEG 086 +087 0.36 0.15 0.04 0.03 MEG 087 +088 0.26 -0.02 0.04 0.03 MEG 088 +089 0.25 -0.17 0.04 0.03 MEG 089 +090 0.30 0.13 0.04 0.03 MEG 090 +091 0.30 -0.03 0.04 0.03 MEG 091 +092 0.37 0.05 0.04 0.03 MEG 092 +093 0.14 0.15 0.04 0.03 MEG 093 +094 0.17 0.05 0.04 0.03 MEG 094 +095 0.17 -0.01 0.04 0.03 MEG 095 +096 0.45 0.06 0.04 0.03 MEG 096 +097 0.13 0.03 0.04 0.03 MEG 097 +098 -0.04 0.07 0.04 0.03 MEG 098 +099 0.04 0.07 0.04 0.03 MEG 099 +100 -0.04 0.01 0.04 0.03 MEG 100 +101 0.04 0.02 0.04 0.03 MEG 101 +102 0.36 -0.25 0.04 0.03 MEG 102 +103 0.44 -0.05 0.04 0.03 MEG 103 +104 0.20 -0.23 0.04 0.03 MEG 104 +105 0.08 -0.00 0.04 0.03 MEG 105 +106 -0.04 -0.04 0.04 0.03 MEG 106 +107 0.00 -0.05 0.04 0.03 MEG 107 +108 0.05 -0.04 0.04 0.03 MEG 108 +109 0.02 0.23 0.04 0.03 MEG 109 +110 -0.03 0.23 0.04 0.03 MEG 110 +111 0.07 0.22 0.04 0.03 MEG 111 +112 0.13 0.09 0.04 0.03 MEG 112 +113 0.18 -0.33 0.04 0.03 MEG 113 +114 -0.10 0.13 0.04 0.03 MEG 114 +115 0.11 -0.43 0.04 0.03 MEG 115 +116 0.29 -0.10 0.04 0.03 MEG 116 +117 -0.06 -0.30 0.04 0.03 MEG 117 +118 0.11 0.13 0.04 0.03 MEG 118 +119 0.21 -0.03 0.04 0.03 MEG 119 +120 0.08 -0.30 0.04 0.03 MEG 120 +121 -0.20 -0.05 0.04 0.03 MEG 121 +122 -0.08 -0.44 0.04 0.03 MEG 122 +123 -0.15 -0.34 0.04 0.03 MEG 123 +124 0.02 -0.37 0.04 0.03 MEG 124 +125 -0.28 -0.11 0.04 0.03 MEG 125 diff --git a/mne/channels/data/layouts/KIT-157.lout b/mne/channels/data/layouts/KIT-157.lout new file mode 100644 index 0000000..2cf5637 --- /dev/null +++ b/mne/channels/data/layouts/KIT-157.lout @@ -0,0 +1,158 @@ +-42.19 43.52 -41.7 28.71 +001 9.78 -14.18 4.00 3.00 MEG 001 +002 3.31 -16.56 4.00 3.00 MEG 002 +003 12.02 -19.42 4.00 3.00 MEG 003 +004 8.08 -21.05 4.00 3.00 MEG 004 +005 4.12 -22.01 4.00 3.00 MEG 005 +006 15.80 -16.63 4.00 3.00 MEG 006 +007 10.21 -12.01 4.00 3.00 MEG 007 +008 7.23 -13.67 4.00 3.00 MEG 008 +009 -22.12 -3.07 4.00 3.00 MEG 009 +010 -13.99 -13.09 4.00 3.00 MEG 010 +011 -21.05 -7.51 4.00 3.00 MEG 011 +012 -18.85 -12.06 4.00 3.00 MEG 012 +013 -0.14 -16.77 4.00 3.00 MEG 013 +014 -6.69 -15.41 4.00 3.00 MEG 014 +015 -10.69 -15.56 4.00 3.00 MEG 015 +016 -3.91 -10.00 4.00 3.00 MEG 016 +017 0.80 -6.66 4.00 3.00 MEG 017 +018 3.74 -20.66 4.00 3.00 MEG 018 +019 15.01 -15.63 4.00 3.00 MEG 019 +020 4.16 -14.75 4.00 3.00 MEG 020 +021 16.72 -0.60 4.00 3.00 MEG 021 +022 14.31 -7.30 4.00 3.00 MEG 022 +023 1.27 -13.23 4.00 3.00 MEG 023 +024 9.63 -10.10 4.00 3.00 MEG 024 +025 -1.74 -14.94 4.00 3.00 MEG 025 +026 -4.68 -14.12 4.00 3.00 MEG 026 +027 -1.65 -8.33 4.00 3.00 MEG 027 +028 -6.53 -8.53 4.00 3.00 MEG 028 +029 -8.52 -6.61 4.00 3.00 MEG 029 +030 -10.18 -4.27 4.00 3.00 MEG 030 +031 -11.14 -1.21 4.00 3.00 MEG 031 +032 -4.02 -18.39 4.00 3.00 MEG 032 +033 19.69 0.13 4.00 3.00 MEG 033 +034 4.03 -8.21 4.00 3.00 MEG 034 +035 3.56 0.14 4.00 3.00 MEG 035 +036 4.19 -12.79 4.00 3.00 MEG 036 +037 19.43 -3.03 4.00 3.00 MEG 037 +038 20.99 -9.54 4.00 3.00 MEG 038 +039 15.93 -11.27 4.00 3.00 MEG 039 +040 22.46 -5.52 4.00 3.00 MEG 040 +041 -9.37 -8.82 4.00 3.00 MEG 041 +042 -6.93 -10.92 4.00 3.00 MEG 042 +043 -1.56 -13.07 4.00 3.00 MEG 043 +044 -7.75 -20.89 4.00 3.00 MEG 044 +045 -11.74 -19.07 4.00 3.00 MEG 045 +046 0.31 -22.23 4.00 3.00 MEG 046 +047 -3.75 -21.89 4.00 3.00 MEG 047 +048 -3.89 -5.28 4.00 3.00 MEG 048 +049 23.23 -0.95 4.00 3.00 MEG 049 +050 13.94 -14.13 4.00 3.00 MEG 050 +051 7.41 -17.72 4.00 3.00 MEG 051 +052 19.50 -8.59 4.00 3.00 MEG 052 +053 18.26 -7.47 4.00 3.00 MEG 053 +054 18.19 -2.34 4.00 3.00 MEG 054 +055 14.76 -9.91 4.00 3.00 MEG 055 +056 21.32 -0.18 4.00 3.00 MEG 056 +057 -1.88 -3.98 4.00 3.00 MEG 057 +058 3.56 -3.73 4.00 3.00 MEG 058 +059 -12.57 -8.25 4.00 3.00 MEG 059 +060 -7.56 -12.70 4.00 3.00 MEG 060 +061 -15.02 -1.73 4.00 3.00 MEG 061 +062 -11.53 -17.47 4.00 3.00 MEG 062 +063 -0.18 -18.90 4.00 3.00 MEG 063 +064 -6.61 -0.05 4.00 3.00 MEG 064 +065 6.73 -9.47 4.00 3.00 MEG 065 +066 1.16 -8.63 4.00 3.00 MEG 066 +067 18.43 8.05 4.00 3.00 MEG 067 +068 16.27 12.00 4.00 3.00 MEG 068 +069 19.53 3.47 4.00 3.00 MEG 069 +070 11.49 5.68 4.00 3.00 MEG 070 +071 12.54 -0.07 4.00 3.00 MEG 071 +072 12.40 3.05 4.00 3.00 MEG 072 +073 -15.98 -9.55 4.00 3.00 MEG 073 +074 -18.65 -1.75 4.00 3.00 MEG 074 +075 -17.81 -5.83 4.00 3.00 MEG 075 +076 -1.09 0.06 4.00 3.00 MEG 076 +077 -1.11 2.07 4.00 3.00 MEG 077 +078 -17.59 -10.78 4.00 3.00 MEG 078 +079 -20.36 -2.47 4.00 3.00 MEG 079 +080 -16.06 10.29 4.00 3.00 MEG 080 +081 10.71 -5.93 4.00 3.00 MEG 081 +082 12.02 -3.35 4.00 3.00 MEG 082 +083 19.99 8.66 4.00 3.00 MEG 083 +084 15.61 15.53 4.00 3.00 MEG 084 +085 5.76 -4.95 4.00 3.00 MEG 085 +086 12.48 13.62 4.00 3.00 MEG 086 +087 18.03 3.69 4.00 3.00 MEG 087 +088 14.69 11.11 4.00 3.00 MEG 088 +089 -19.42 6.89 4.00 3.00 MEG 089 +090 -16.09 14.39 4.00 3.00 MEG 090 +091 -6.70 -5.77 4.00 3.00 MEG 091 +092 -12.37 -11.31 4.00 3.00 MEG 092 +093 -1.72 9.34 4.00 3.00 MEG 093 +094 -4.12 1.65 4.00 3.00 MEG 094 +095 -18.66 2.58 4.00 3.00 MEG 095 +096 -17.76 6.59 4.00 3.00 MEG 096 +097 8.82 -5.11 4.00 3.00 MEG 097 +098 8.79 -7.85 4.00 3.00 MEG 098 +099 15.43 6.10 4.00 3.00 MEG 099 +100 11.93 11.57 4.00 3.00 MEG 100 +101 16.58 7.80 4.00 3.00 MEG 101 +102 8.27 6.69 4.00 3.00 MEG 102 +103 11.62 -8.00 4.00 3.00 MEG 103 +104 13.11 -5.40 4.00 3.00 MEG 104 +105 -13.38 0.11 4.00 3.00 MEG 105 +106 -12.78 -3.22 4.00 3.00 MEG 106 +107 -12.98 3.35 4.00 3.00 MEG 107 +108 -11.84 6.58 4.00 3.00 MEG 108 +109 -10.08 9.11 4.00 3.00 MEG 109 +110 -16.27 -5.03 4.00 3.00 MEG 110 +111 -11.45 -6.21 4.00 3.00 MEG 111 +112 -0.59 5.83 4.00 3.00 MEG 112 +113 14.18 -2.06 4.00 3.00 MEG 113 +114 14.48 1.15 4.00 3.00 MEG 114 +115 12.68 7.37 4.00 3.00 MEG 115 +116 13.93 4.46 4.00 3.00 MEG 116 +117 8.98 11.57 4.00 3.00 MEG 117 +118 6.35 12.95 4.00 3.00 MEG 118 +119 11.01 9.71 4.00 3.00 MEG 119 +120 0.01 16.08 4.00 3.00 MEG 120 +121 -16.87 2.69 4.00 3.00 MEG 121 +122 -16.02 6.38 4.00 3.00 MEG 122 +123 -14.38 9.83 4.00 3.00 MEG 123 +124 -12.23 12.65 4.00 3.00 MEG 124 +125 -10.14 5.19 4.00 3.00 MEG 125 +126 -5.63 12.72 4.00 3.00 MEG 126 +127 -2.90 13.72 4.00 3.00 MEG 127 +128 -7.93 11.11 4.00 3.00 MEG 128 +129 6.83 14.86 4.00 3.00 MEG 129 +130 7.63 3.51 4.00 3.00 MEG 130 +131 8.56 0.40 4.00 3.00 MEG 131 +132 -2.70 7.01 4.00 3.00 MEG 132 +133 3.09 11.73 4.00 3.00 MEG 133 +134 8.14 9.62 4.00 3.00 MEG 134 +135 2.84 2.47 4.00 3.00 MEG 135 +136 4.05 6.89 4.00 3.00 MEG 136 +137 -6.16 14.64 4.00 3.00 MEG 137 +138 -11.02 2.49 4.00 3.00 MEG 138 +139 -6.78 6.65 4.00 3.00 MEG 139 +140 -6.24 3.18 4.00 3.00 MEG 140 +141 -6.83 9.47 4.00 3.00 MEG 141 +142 -2.48 11.64 4.00 3.00 MEG 142 +143 -17.59 14.92 4.00 3.00 MEG 143 +144 -22.23 2.07 4.00 3.00 MEG 144 +145 3.20 13.71 4.00 3.00 MEG 145 +146 2.06 5.84 4.00 3.00 MEG 146 +147 5.76 1.93 4.00 3.00 MEG 147 +148 23.08 3.86 4.00 3.00 MEG 148 +149 21.96 8.34 4.00 3.00 MEG 149 +150 20.00 12.43 4.00 3.00 MEG 150 +151 17.22 16.08 4.00 3.00 MEG 151 +152 3.91 9.37 4.00 3.00 MEG 152 +153 -21.58 6.32 4.00 3.00 MEG 153 +154 -20.17 10.61 4.00 3.00 MEG 154 +155 -11.01 10.95 4.00 3.00 MEG 155 +156 -14.51 5.43 4.00 3.00 MEG 156 +157 1.28 9.74 4.00 3.00 MEG 157 diff --git a/mne/channels/data/layouts/KIT-160.lay b/mne/channels/data/layouts/KIT-160.lay new file mode 100644 index 0000000..1f5780f --- /dev/null +++ b/mne/channels/data/layouts/KIT-160.lay @@ -0,0 +1,162 @@ +001 -0.0758202152 0.3520500341 0.03188472676 0.02713339699 MEG 001 +002 -0.1261117022 0.328933222 0.03188472676 0.02713339699 MEG 002 +003 -0.1696053658 0.2965692769 0.03188472676 0.02713339699 MEG 003 +004 0.1650030446 0.2798950608 0.03188472676 0.02713339699 MEG 004 +005 -0.1114275357 0.2868555816 0.03188472676 0.02713339699 MEG 005 +006 -0.06544380774 0.2622312709 0.03188472676 0.02713339699 MEG 006 +007 -0.1353647314 0.2073255917 0.03188472676 0.02713339699 MEG 007 +008 -0.17422271 0.1205755843 0.03188472676 0.02713339699 MEG 008 +009 -0.01368858767 0.2301473849 0.03188472676 0.02713339699 MEG 009 +010 -0.06470805562 0.2189319658 0.03188472676 0.02713339699 MEG 010 +011 -0.1247701784 0.1499178411 0.03188472676 0.02713339699 MEG 011 +012 -0.03961772545 0.1793694653 0.03188472676 0.02713339699 MEG 012 +013 -0.0711276654 0.1599000923 0.03188472676 0.02713339699 MEG 013 +014 -0.09668684076 0.1249745081 0.03188472676 0.02713339699 MEG 014 +015 -0.1103655395 0.08862749713 0.03188472676 0.02713339699 MEG 015 +016 -0.03953495363 0.1363424548 0.03188472676 0.02713339699 MEG 016 +017 -0.1781804786 0.01931847664 0.03188472676 0.02713339699 MEG 017 +018 -0.142520225 -0.06752066402 0.03188472676 0.02713339699 MEG 018 +019 -0.08088893708 -0.1241365481 0.03188472676 0.02713339699 MEG 019 +020 -0.04456843369 -0.1415706457 0.03188472676 0.02713339699 MEG 020 +021 -0.1426655535 0.05244256024 0.03188472676 0.02713339699 MEG 021 +022 -0.1340581452 0.000388349131 0.03188472676 0.02713339699 MEG 022 +023 -0.08512707038 -0.07696214533 0.03188472676 0.02713339699 MEG 023 +024 -0.04736054836 -0.09618399923 0.03188472676 0.02713339699 MEG 024 +025 -0.114643504 -0.04085422212 0.03188472676 0.02713339699 MEG 025 +026 -0.1103503321 0.03823179105 0.03188472676 0.02713339699 MEG 026 +027 -0.03958310463 -0.05556958642 0.03188472676 0.02713339699 MEG 027 +028 -0.06993629917 0.01943095503 0.03188472676 0.02713339699 MEG 028 +029 -0.04398320652 -0.01300040853 0.03188472676 0.02713339699 MEG 029 +030 -0.004227454924 -0.01962159408 0.03188472676 0.02713339699 MEG 030 +031 -0.01056467818 0.1062293634 0.03188472676 0.02713339699 MEG 031 +032 -0.04021127484 0.08385147042 0.03188472676 0.02713339699 MEG 032 +033 -0.3500780541 0.3978039282 0.03188472676 0.02713339699 MEG 033 +034 -0.400516673 0.3077821901 0.03188472676 0.02713339699 MEG 034 +035 -0.4325895921 0.2136911051 0.03188472676 0.02713339699 MEG 035 +036 -0.45 0.1074214926 0.03188472676 0.02713339699 MEG 036 +037 -0.3046138565 0.3570489454 0.03188472676 0.02713339699 MEG 037 +038 -0.3775870934 0.1989319321 0.03188472676 0.02713339699 MEG 038 +039 -0.3470032996 0.2860012743 0.03188472676 0.02713339699 MEG 039 +040 -0.2596101607 0.2361677074 0.03188472676 0.02713339699 MEG 040 +041 -0.3370312654 0.1080933205 0.03188472676 0.02713339699 MEG 041 +042 -0.2054494635 0.2529931344 0.03188472676 0.02713339699 MEG 042 +043 -0.2819761985 0.1711718789 0.03188472676 0.02713339699 MEG 043 +044 -0.2293126541 0.2020325726 0.03188472676 0.02713339699 MEG 044 +045 -0.253186216 0.08822084019 0.03188472676 0.02713339699 MEG 045 +046 -0.177239753 0.2148932642 0.03188472676 0.02713339699 MEG 046 +047 -0.1982663002 0.1636997157 0.03188472676 0.02713339699 MEG 047 +048 -0.2443663193 0.1429437606 0.03188472676 0.02713339699 MEG 048 +049 -0.3888843678 0.1059383909 0.03188472676 0.02713339699 MEG 049 +050 -0.4270282413 -0.09637491351 0.03188472676 0.02713339699 MEG 050 +051 -0.2842037041 -0.02907823435 0.03188472676 0.02713339699 MEG 051 +052 -0.3447270537 -0.2595887593 0.03188472676 0.02713339699 MEG 052 +053 -0.3909909615 0.01655882049 0.03188472676 0.02713339699 MEG 053 +054 -0.2988307343 -0.1206055812 0.03188472676 0.02713339699 MEG 054 +055 -0.2625165926 -0.18469877 0.03188472676 0.02713339699 MEG 055 +056 -0.3742205763 -0.06701211297 0.03188472676 0.02713339699 MEG 056 +057 -0.3368815045 0.02914339448 0.03188472676 0.02713339699 MEG 057 +058 -0.2614922293 -0.09693316038 0.03188472676 0.02713339699 MEG 058 +059 -0.2296354398 -0.1520887173 0.03188472676 0.02713339699 MEG 059 +060 -0.2424341314 -0.03356215166 0.03188472676 0.02713339699 MEG 060 +061 -0.1876464844 -0.1390883676 0.03188472676 0.02713339699 MEG 061 +062 -0.2141382597 0.06121102293 0.03188472676 0.02713339699 MEG 062 +063 -0.210559287 0.002243140577 0.03188472676 0.02713339699 MEG 063 +064 -0.1972138638 -0.04829819556 0.03188472676 0.02713339699 MEG 064 +065 0.1239897025 0.3184822507 0.03188472676 0.02713339699 MEG 065 +066 0.07602269198 0.346841814 0.03188472676 0.02713339699 MEG 066 +067 0.02730949028 0.3618289046 0.03188472676 0.02713339699 MEG 067 +068 -0.02876209065 0.3665275653 0.03188472676 0.02713339699 MEG 068 +069 0.06023566248 0.305037035 0.03188472676 0.02713339699 MEG 069 +070 0.01553893996 0.3208156125 0.03188472676 0.02713339699 MEG 070 +071 0.1455353008 0.1519564037 0.03188472676 0.02713339699 MEG 071 +072 0.09261086754 0.2300225572 0.03188472676 0.02713339699 MEG 072 +073 0.01800727232 0.2722816956 0.03188472676 0.02713339699 MEG 073 +074 0.09471660492 0.1660243591 0.03188472676 0.02713339699 MEG 074 +075 0.02256442482 0.2241822666 0.03188472676 0.02713339699 MEG 075 +076 0.1172275823 0.121354496 0.03188472676 0.02713339699 MEG 076 +077 0.06434989605 0.1443350384 0.03188472676 0.02713339699 MEG 077 +078 0.03192340214 0.1736460766 0.03188472676 0.02713339699 MEG 078 +079 0.002050178715 0.1879975831 0.03188472676 0.02713339699 MEG 079 +080 0.003697062517 0.143421051 0.03188472676 0.02713339699 MEG 080 +081 -0.003899772644 -0.1490601771 0.03188472676 0.02713339699 MEG 081 +082 0.0711394085 -0.1177609441 0.03188472676 0.02713339699 MEG 082 +083 0.1339233002 -0.04641972764 0.03188472676 0.02713339699 MEG 083 +084 0.1624045334 0.04808542023 0.03188472676 0.02713339699 MEG 084 +085 0.03165333222 -0.09469832945 0.03188472676 0.02713339699 MEG 085 +086 0.07137560881 -0.06228631109 0.03188472676 0.02713339699 MEG 086 +087 0.1185350219 0.01900269558 0.03188472676 0.02713339699 MEG 087 +088 0.1266742656 0.06930579768 0.03188472676 0.02713339699 MEG 088 +089 -0.004420218989 -0.06278528823 0.03188472676 0.02713339699 MEG 089 +090 0.06173501644 -0.0229233209 0.03188472676 0.02713339699 MEG 090 +091 0.08381840152 0.01449327322 0.03188472676 0.02713339699 MEG 091 +092 0.02911019425 -0.004626517545 0.03188472676 0.02713339699 MEG 092 +093 0.05293614742 0.03703628974 0.03188472676 0.02713339699 MEG 093 +094 0.08589158435 0.06075797622 0.03188472676 0.02713339699 MEG 094 +095 -0.007916726978 0.01526702488 0.03188472676 0.02713339699 MEG 095 +096 0.02152774438 0.08873530965 0.03188472676 0.02713339699 MEG 096 +097 0.4428288832 0.1535592899 0.03188472676 0.02713339699 MEG 097 +098 0.4162971034 0.267171892 0.03188472676 0.02713339699 MEG 098 +099 0.3668411201 0.3681231645 0.03188472676 0.02713339699 MEG 099 +100 0.3105057204 0.45 0.03188472676 0.02713339699 MEG 100 +101 0.3522874782 0.2435209125 0.03188472676 0.02713339699 MEG 101 +102 0.2611099363 0.4005591579 0.03188472676 0.02713339699 MEG 102 +103 0.3199035742 0.1404206704 0.03188472676 0.02713339699 MEG 103 +104 0.2620111685 0.2979083124 0.03188472676 0.02713339699 MEG 104 +105 0.2183237036 0.3586832133 0.03188472676 0.02713339699 MEG 105 +106 0.253424964 0.2038387029 0.03188472676 0.02713339699 MEG 106 +107 0.1821371767 0.3235092766 0.03188472676 0.02713339699 MEG 107 +108 0.2320142778 0.1055366971 0.03188472676 0.02713339699 MEG 108 +109 0.1969983757 0.2287392657 0.03188472676 0.02713339699 MEG 109 +110 0.1892169645 0.138858437 0.03188472676 0.02713339699 MEG 110 +111 0.1718069319 0.1949879663 0.03188472676 0.02713339699 MEG 111 +112 0.1412427238 0.2390846129 0.03188472676 0.02713339699 MEG 112 +113 0.3413341674 -0.2749014578 0.03188472676 0.02713339699 MEG 113 +114 0.3957163081 -0.1867381122 0.03188472676 0.02713339699 MEG 114 +115 0.4343284389 -0.0791376981 0.03188472676 0.02713339699 MEG 115 +116 0.45 0.04115848657 0.03188472676 0.02713339699 MEG 116 +117 0.294108122 -0.2248146657 0.03188472676 0.02713339699 MEG 117 +118 0.3408651095 -0.144258791 0.03188472676 0.02713339699 MEG 118 +119 0.3829160873 0.05059902865 0.03188472676 0.02713339699 MEG 119 +120 0.3147171717 -0.02712556599 0.03188472676 0.02713339699 MEG 120 +121 0.2904098027 -0.1079793618 0.03188472676 0.02713339699 MEG 121 +122 0.2489284377 -0.08302604569 0.03188472676 0.02713339699 MEG 122 +123 0.2791450822 0.06065203717 0.03188472676 0.02713339699 MEG 123 +124 0.1699476764 -0.1323891552 0.03188472676 0.02713339699 MEG 124 +125 0.2213406675 -0.02048593019 0.03188472676 0.02713339699 MEG 125 +126 0.1793293141 -0.03692175528 0.03188472676 0.02713339699 MEG 126 +127 0.2340431716 0.04272096725 0.03188472676 0.02713339699 MEG 127 +128 0.1989990214 0.07992534312 0.03188472676 0.02713339699 MEG 128 +129 -0.2893372271 -0.3287872551 0.03188472676 0.02713339699 MEG 129 +130 -0.2271848223 -0.3869397712 0.03188472676 0.02713339699 MEG 130 +131 -0.1541857375 -0.4262216107 0.03188472676 0.02713339699 MEG 131 +132 -0.07352758894 -0.45 0.03188472676 0.02713339699 MEG 132 +133 -0.2537949872 -0.2821367359 0.03188472676 0.02713339699 MEG 133 +134 -0.05789427224 -0.3854591968 0.03188472676 0.02713339699 MEG 134 +135 -0.2180547699 -0.2381599549 0.03188472676 0.02713339699 MEG 135 +136 -0.1654458556 -0.2836078687 0.03188472676 0.02713339699 MEG 136 +137 -0.1282115583 -0.3636295479 0.03188472676 0.02713339699 MEG 137 +138 -0.1869048087 -0.2008317859 0.03188472676 0.02713339699 MEG 138 +139 -0.092414085 -0.2712662931 0.03188472676 0.02713339699 MEG 139 +140 -0.04464737067 -0.3262626614 0.03188472676 0.02713339699 MEG 140 +141 -0.1084802139 -0.2080301215 0.03188472676 0.02713339699 MEG 141 +142 -0.008347885446 -0.2337992621 0.03188472676 0.02713339699 MEG 142 +143 -0.1346335691 -0.1372039628 0.03188472676 0.02713339699 MEG 143 +144 -0.05019800217 -0.1837431338 0.03188472676 0.02713339699 MEG 144 +145 0.08934861049 -0.3726540907 0.03188472676 0.02713339699 MEG 145 +146 0.106172944 -0.4435639299 0.03188472676 0.02713339699 MEG 146 +147 0.2218612721 0.2682623426 0.03188472676 0.02713339699 MEG 147 +148 0.2670027616 -0.3555067924 0.03188472676 0.02713339699 MEG 148 +149 0.01625933145 -0.3877305948 0.03188472676 0.02713339699 MEG 149 +150 0.1626843509 -0.342433819 0.03188472676 0.02713339699 MEG 150 +151 0.07960240502 -0.3134489525 0.03188472676 0.02713339699 MEG 151 +152 0.1456745581 -0.2843789795 0.03188472676 0.02713339699 MEG 152 +153 0.2343957441 -0.2951725192 0.03188472676 0.02713339699 MEG 153 +154 0.01783071962 -0.2738066839 0.03188472676 0.02713339699 MEG 154 +155 0.1274451621 -0.2352949445 0.03188472676 0.02713339699 MEG 155 +156 0.1746647823 -0.1970760538 0.03188472676 0.02713339699 MEG 156 +157 0.0872449245 -0.2076405522 0.03188472676 0.02713339699 MEG 157 +158 0.03888945369 -0.183846741 0.03188472676 0.02713339699 MEG 158 +159 0.08527772847 -0.1590694194 0.03188472676 0.02713339699 MEG 159 +160 0.1230026134 -0.1283845973 0.03188472676 0.02713339699 MEG 160 +161 -0.547000000 -0.5000000000 0.03188472676 0.02713339699 COMNT +162 -0.547000000 -0.5000000000 0.03188472676 0.02713339699 SCALE diff --git a/mne/channels/data/layouts/KIT-AD.lout b/mne/channels/data/layouts/KIT-AD.lout new file mode 100644 index 0000000..e06356a --- /dev/null +++ b/mne/channels/data/layouts/KIT-AD.lout @@ -0,0 +1,209 @@ + 0.00 1.00 0.00 1.00 +001 0.61 0.56 0.02 0.04 MEG 001 +002 0.59 0.50 0.02 0.04 MEG 002 +003 0.48 0.42 0.02 0.04 MEG 003 +004 0.52 0.43 0.02 0.04 MEG 004 +005 0.43 0.44 0.02 0.04 MEG 005 +006 0.39 0.48 0.02 0.04 MEG 006 +007 0.52 0.70 0.02 0.04 MEG 007 +008 0.58 0.59 0.02 0.04 MEG 008 +009 0.47 0.71 0.02 0.04 MEG 009 +010 0.53 0.49 0.02 0.04 MEG 010 +011 0.57 0.53 0.02 0.04 MEG 011 +012 0.43 0.50 0.02 0.04 MEG 012 +013 0.40 0.55 0.02 0.04 MEG 013 +014 0.57 0.39 0.02 0.04 MEG 014 +015 0.38 0.41 0.02 0.04 MEG 015 +016 0.48 0.37 0.02 0.04 MEG 016 +017 0.16 0.84 0.02 0.04 MEG 017 +018 0.53 0.63 0.02 0.04 MEG 018 +019 0.48 0.53 0.02 0.04 MEG 019 +020 0.44 0.63 0.02 0.04 MEG 020 +021 0.53 0.56 0.02 0.04 MEG 021 +022 0.44 0.57 0.02 0.04 MEG 022 +023 0.56 0.46 0.02 0.04 MEG 023 +024 0.59 0.68 0.02 0.04 MEG 024 +025 0.34 0.86 0.02 0.04 MEG 025 +026 0.39 0.89 0.02 0.04 MEG 026 +027 0.50 0.91 0.02 0.04 MEG 027 +028 0.61 0.87 0.02 0.04 MEG 028 +029 0.66 0.84 0.02 0.04 MEG 029 +030 0.59 0.76 0.02 0.04 MEG 030 +031 0.39 0.62 0.02 0.04 MEG 031 +032 0.55 0.85 0.02 0.04 MEG 032 +033 0.28 0.39 0.02 0.04 MEG 033 +034 0.37 0.52 0.02 0.04 MEG 034 +035 0.36 0.59 0.02 0.04 MEG 035 +036 0.38 0.70 0.02 0.04 MEG 036 +037 0.07 0.87 0.02 0.04 MEG 037 +038 0.24 0.61 0.02 0.04 MEG 038 +039 0.32 0.68 0.02 0.04 MEG 039 +040 0.30 0.81 0.02 0.04 MEG 040 +041 0.43 0.96 0.02 0.04 MEG 041 +042 0.55 0.95 0.02 0.04 MEG 042 +043 0.42 0.74 0.02 0.04 MEG 043 +044 0.56 0.72 0.02 0.04 MEG 044 +045 0.47 0.76 0.02 0.04 MEG 045 +046 0.52 0.75 0.02 0.04 MEG 046 +047 0.45 0.85 0.02 0.04 MEG 047 +048 0.40 0.79 0.02 0.04 MEG 048 +049 0.24 0.79 0.02 0.04 MEG 049 +050 0.21 0.46 0.02 0.04 MEG 050 +051 0.32 0.76 0.02 0.04 MEG 051 +052 0.20 0.63 0.02 0.04 MEG 052 +053 0.27 0.33 0.02 0.04 MEG 053 +054 0.17 0.74 0.02 0.04 MEG 054 +055 0.05 0.65 0.02 0.04 MEG 055 +056 0.28 0.63 0.02 0.04 MEG 056 +057 0.70 0.62 0.02 0.04 MEG 057 +058 0.94 0.38 0.02 0.04 MEG 058 +059 0.91 0.73 0.02 0.04 MEG 059 +060 0.82 0.93 0.02 0.04 MEG 060 +061 0.93 0.63 0.02 0.04 MEG 061 +062 0.75 0.78 0.02 0.04 MEG 062 +063 0.69 0.78 0.02 0.04 MEG 063 +064 0.43 0.00 0.02 0.04 MEG 064 +065 0.18 0.40 0.02 0.04 MEG 065 +066 0.19 0.29 0.02 0.04 MEG 066 +067 0.15 0.56 0.02 0.04 MEG 067 +068 0.33 0.53 0.02 0.04 MEG 068 +069 0.35 0.47 0.02 0.04 MEG 069 +070 0.25 0.89 0.02 0.04 MEG 070 +071 0.24 0.53 0.02 0.04 MEG 071 +072 0.16 0.95 0.02 0.04 MEG 072 +073 0.67 0.75 0.02 0.04 MEG 073 +074 0.74 0.86 0.02 0.04 MEG 074 +075 0.81 0.71 0.02 0.04 MEG 075 +076 0.78 0.62 0.02 0.04 MEG 076 +077 0.65 0.65 0.02 0.04 MEG 077 +078 0.83 0.81 0.02 0.04 MEG 078 +079 0.82 0.53 0.02 0.04 MEG 079 +080 0.78 0.36 0.02 0.04 MEG 080 +081 0.56 0.65 0.02 0.04 MEG 081 +082 0.35 0.74 0.02 0.04 MEG 082 +083 0.21 0.71 0.02 0.04 MEG 083 +084 0.12 0.75 0.02 0.04 MEG 084 +085 0.11 0.66 0.02 0.04 MEG 085 +086 0.21 0.92 0.02 0.04 MEG 086 +087 0.13 0.96 0.02 0.04 MEG 087 +088 0.03 0.76 0.02 0.04 MEG 088 +089 0.66 0.89 0.02 0.04 MEG 089 +090 0.61 0.93 0.02 0.04 MEG 090 +091 0.63 0.79 0.02 0.04 MEG 091 +092 0.71 0.84 0.02 0.04 MEG 092 +093 0.44 0.91 0.02 0.04 MEG 093 +094 0.56 0.89 0.02 0.04 MEG 094 +095 0.42 0.68 0.02 0.04 MEG 095 +096 0.54 0.79 0.02 0.04 MEG 096 +097 0.11 0.86 0.02 0.04 MEG 097 +098 0.14 0.36 0.02 0.04 MEG 098 +099 0.32 0.60 0.02 0.04 MEG 099 +100 0.25 0.45 0.02 0.04 MEG 100 +101 0.19 0.54 0.02 0.04 MEG 101 +102 0.27 0.85 0.02 0.04 MEG 102 +103 0.27 0.75 0.02 0.04 MEG 103 +104 0.01 0.64 0.02 0.04 MEG 104 +105 0.69 0.68 0.02 0.04 MEG 105 +106 0.88 0.82 0.02 0.04 MEG 106 +107 0.45 0.80 0.02 0.04 MEG 107 +108 0.50 0.86 0.02 0.04 MEG 108 +109 0.36 0.80 0.02 0.04 MEG 109 +110 0.49 0.96 0.02 0.04 MEG 110 +111 0.37 0.93 0.02 0.04 MEG 111 +112 0.32 0.90 0.02 0.04 MEG 112 +113 0.07 0.42 0.02 0.04 MEG 113 +114 0.73 0.72 0.02 0.04 MEG 114 +115 0.19 0.12 0.02 0.04 MEG 115 +116 0.01 0.51 0.02 0.04 MEG 116 +117 0.07 0.29 0.02 0.04 MEG 117 +118 0.16 0.47 0.02 0.04 MEG 118 +119 0.22 0.33 0.02 0.04 MEG 119 +120 0.10 0.54 0.02 0.04 MEG 120 +121 0.78 0.89 0.02 0.04 MEG 121 +122 0.87 0.63 0.02 0.04 MEG 122 +123 0.86 0.72 0.02 0.04 MEG 123 +124 0.77 0.70 0.02 0.04 MEG 124 +125 0.63 0.71 0.02 0.04 MEG 125 +126 0.89 0.27 0.02 0.04 MEG 126 +127 0.97 0.62 0.02 0.04 MEG 127 +128 0.83 0.62 0.02 0.04 MEG 128 +129 0.77 0.11 0.02 0.04 MEG 129 +130 0.86 0.95 0.02 0.04 MEG 130 +131 0.71 0.42 0.02 0.04 MEG 131 +132 0.78 0.53 0.02 0.04 MEG 132 +133 0.65 0.57 0.02 0.04 MEG 133 +134 0.16 0.67 0.02 0.04 MEG 134 +135 0.29 0.71 0.02 0.04 MEG 135 +136 0.16 0.23 0.02 0.04 MEG 136 +137 0.82 0.34 0.02 0.04 MEG 137 +138 0.87 0.52 0.02 0.04 MEG 138 +139 0.81 0.22 0.02 0.04 MEG 139 +140 0.90 0.40 0.02 0.04 MEG 140 +141 0.97 0.49 0.02 0.04 MEG 141 +142 0.74 0.30 0.02 0.04 MEG 142 +143 0.81 0.44 0.02 0.04 MEG 143 +144 0.95 0.75 0.02 0.04 MEG 144 +145 0.13 0.19 0.02 0.04 MEG 145 +146 0.28 0.56 0.02 0.04 MEG 146 +147 0.74 0.15 0.02 0.04 MEG 147 +148 0.10 0.33 0.02 0.04 MEG 148 +149 0.35 0.02 0.02 0.04 MEG 149 +150 0.03 0.39 0.02 0.04 MEG 150 +151 0.27 0.06 0.02 0.04 MEG 151 +152 0.31 0.43 0.02 0.04 MEG 152 +153 0.77 0.26 0.02 0.04 MEG 153 +154 0.67 0.10 0.02 0.04 MEG 154 +155 0.76 0.44 0.02 0.04 MEG 155 +156 0.83 0.18 0.02 0.04 MEG 156 +157 0.61 0.02 0.02 0.04 MEG 157 +158 0.91 0.86 0.02 0.04 MEG 158 +159 0.92 0.51 0.02 0.04 MEG 159 +160 0.86 0.30 0.02 0.04 MEG 160 +161 0.44 0.12 0.02 0.04 MEG 161 +162 0.37 0.30 0.02 0.04 MEG 162 +163 0.30 0.17 0.02 0.04 MEG 163 +164 0.36 0.25 0.02 0.04 MEG 164 +165 0.41 0.22 0.02 0.04 MEG 165 +166 0.31 0.28 0.02 0.04 MEG 166 +167 0.05 0.53 0.02 0.04 MEG 167 +168 0.08 0.76 0.02 0.04 MEG 168 +169 0.69 0.24 0.02 0.04 MEG 169 +170 0.57 0.18 0.02 0.04 MEG 170 +171 0.50 0.17 0.02 0.04 MEG 171 +172 0.64 0.20 0.02 0.04 MEG 172 +173 0.65 0.42 0.02 0.04 MEG 173 +174 0.69 0.53 0.02 0.04 MEG 174 +175 0.61 0.44 0.02 0.04 MEG 175 +176 0.70 0.32 0.02 0.04 MEG 176 +177 0.44 0.17 0.02 0.04 MEG 177 +178 0.38 0.18 0.02 0.04 MEG 178 +179 0.32 0.22 0.02 0.04 MEG 179 +180 0.44 0.06 0.02 0.04 MEG 180 +181 0.22 0.16 0.02 0.04 MEG 181 +182 0.36 0.07 0.02 0.04 MEG 182 +183 0.28 0.11 0.02 0.04 MEG 183 +184 0.42 0.27 0.02 0.04 MEG 184 +185 0.52 0.32 0.02 0.04 MEG 185 +186 0.57 0.33 0.02 0.04 MEG 186 +187 0.47 0.32 0.02 0.04 MEG 187 +188 0.62 0.37 0.02 0.04 MEG 188 +189 0.73 0.49 0.02 0.04 MEG 189 +190 0.67 0.36 0.02 0.04 MEG 190 +191 0.74 0.57 0.02 0.04 MEG 191 +192 0.64 0.49 0.02 0.04 MEG 192 +193 0.59 0.06 0.02 0.04 MEG 193 +194 0.52 -0.00 0.02 0.04 MEG 194 +195 0.58 0.29 0.02 0.04 MEG 195 +196 0.53 0.27 0.02 0.04 MEG 196 +197 0.47 0.26 0.02 0.04 MEG 197 +198 0.34 0.39 0.02 0.04 MEG 198 +199 0.42 0.33 0.02 0.04 MEG 199 +200 0.38 0.35 0.02 0.04 MEG 200 +201 0.53 0.22 0.02 0.04 MEG 201 +202 0.59 0.24 0.02 0.04 MEG 202 +203 0.65 0.27 0.02 0.04 MEG 203 +204 0.27 0.26 0.02 0.04 MEG 204 +205 0.51 0.11 0.02 0.04 MEG 205 +206 0.65 0.15 0.02 0.04 MEG 206 +207 0.51 0.05 0.02 0.04 MEG 207 +208 0.69 0.05 0.02 0.04 MEG 208 diff --git a/mne/channels/data/layouts/KIT-AS-2008.lout b/mne/channels/data/layouts/KIT-AS-2008.lout new file mode 100644 index 0000000..84b135e --- /dev/null +++ b/mne/channels/data/layouts/KIT-AS-2008.lout @@ -0,0 +1,158 @@ + 0.00 0.00 0.04 0.02 +000 0.43 0.98 0.10 0.05 MEG 001 +001 0.38 0.96 0.10 0.05 MEG 002 +002 0.32 0.92 0.10 0.05 MEG 003 +003 0.44 0.93 0.10 0.05 MEG 004 +004 0.39 0.91 0.10 0.05 MEG 005 +005 0.45 0.88 0.10 0.05 MEG 006 +006 0.36 0.82 0.10 0.05 MEG 007 +007 0.32 0.78 0.10 0.05 MEG 008 +008 0.33 0.68 0.10 0.05 MEG 009 +009 0.40 0.79 0.10 0.05 MEG 010 +010 0.36 0.74 0.10 0.05 MEG 011 +011 0.48 0.78 0.10 0.05 MEG 012 +012 0.39 0.71 0.10 0.05 MEG 013 +013 0.37 0.66 0.10 0.05 MEG 014 +014 0.48 0.72 0.10 0.05 MEG 015 +015 0.44 0.69 0.10 0.05 MEG 016 +016 0.28 0.57 0.10 0.05 MEG 017 +017 0.29 0.51 0.10 0.05 MEG 018 +018 0.32 0.45 0.10 0.05 MEG 019 +019 0.40 0.36 0.10 0.05 MEG 020 +020 0.46 0.44 0.10 0.05 MEG 021 +021 0.33 0.60 0.10 0.05 MEG 022 +022 0.34 0.53 0.10 0.05 MEG 023 +023 0.41 0.42 0.10 0.05 MEG 024 +024 0.46 0.51 0.10 0.05 MEG 025 +025 0.38 0.59 0.10 0.05 MEG 026 +026 0.50 0.38 0.10 0.05 MEG 027 +027 0.41 0.48 0.10 0.05 MEG 028 +028 0.42 0.56 0.10 0.05 MEG 029 +029 0.51 0.49 0.10 0.05 MEG 030 +030 0.46 0.58 0.10 0.05 MEG 031 +031 0.47 0.64 0.10 0.05 MEG 032 +032 0.12 0.99 0.10 0.05 MEG 033 +033 0.07 0.90 0.10 0.05 MEG 034 +034 0.11 0.88 0.10 0.05 MEG 035 +035 0.13 0.77 0.10 0.05 MEG 036 +036 0.16 0.97 0.10 0.05 MEG 037 +037 0.07 0.78 0.10 0.05 MEG 038 +038 0.20 0.94 0.10 0.05 MEG 039 +039 0.16 0.86 0.10 0.05 MEG 040 +040 0.10 0.67 0.10 0.05 MEG 041 +041 0.25 0.90 0.10 0.05 MEG 042 +042 0.20 0.83 0.10 0.05 MEG 043 +043 0.17 0.76 0.10 0.05 MEG 044 +044 0.24 0.80 0.10 0.05 MEG 045 +045 0.20 0.65 0.10 0.05 MEG 046 +046 0.29 0.82 0.10 0.05 MEG 047 +047 0.25 0.69 0.10 0.05 MEG 048 +048 0.00 0.52 0.10 0.05 MEG 049 +049 0.02 0.40 0.10 0.05 MEG 050 +050 0.07 0.30 0.10 0.05 MEG 051 +051 0.12 0.20 0.10 0.05 MEG 052 +052 0.05 0.53 0.10 0.05 MEG 053 +053 0.07 0.42 0.10 0.05 MEG 054 +054 0.16 0.24 0.10 0.05 MEG 055 +055 0.10 0.56 0.10 0.05 MEG 056 +056 0.15 0.37 0.10 0.05 MEG 057 +057 0.16 0.56 0.10 0.05 MEG 058 +058 0.17 0.48 0.10 0.05 MEG 059 +059 0.20 0.40 0.10 0.05 MEG 060 +060 0.21 0.48 0.10 0.05 MEG 061 +061 0.28 0.34 0.10 0.05 MEG 062 +062 0.24 0.61 0.10 0.05 MEG 063 +063 0.30 0.39 0.10 0.05 MEG 064 +064 0.67 0.93 0.10 0.05 MEG 065 +065 0.62 0.96 0.10 0.05 MEG 066 +066 0.56 0.98 0.10 0.05 MEG 067 +067 0.50 0.99 0.10 0.05 MEG 068 +068 0.60 0.86 0.10 0.05 MEG 069 +069 0.56 0.93 0.10 0.05 MEG 070 +070 0.71 0.72 0.10 0.05 MEG 071 +071 0.65 0.83 0.10 0.05 MEG 072 +072 0.56 0.88 0.10 0.05 MEG 073 +073 0.65 0.76 0.10 0.05 MEG 074 +074 0.56 0.83 0.10 0.05 MEG 075 +075 0.64 0.66 0.10 0.05 MEG 076 +076 0.62 0.71 0.10 0.05 MEG 077 +077 0.53 0.78 0.10 0.05 MEG 078 +078 0.57 0.68 0.10 0.05 MEG 079 +079 0.53 0.72 0.10 0.05 MEG 080 +080 0.50 0.33 0.10 0.05 MEG 081 +081 0.55 0.34 0.10 0.05 MEG 082 +082 0.60 0.36 0.10 0.05 MEG 083 +083 0.69 0.44 0.10 0.05 MEG 084 +084 0.72 0.57 0.10 0.05 MEG 085 +085 0.61 0.42 0.10 0.05 MEG 086 +086 0.67 0.53 0.10 0.05 MEG 087 +087 0.69 0.61 0.10 0.05 MEG 088 +088 0.56 0.45 0.10 0.05 MEG 089 +089 0.60 0.48 0.10 0.05 MEG 090 +090 0.64 0.59 0.10 0.05 MEG 091 +091 0.56 0.51 0.10 0.05 MEG 092 +092 0.59 0.55 0.10 0.05 MEG 093 +093 0.51 0.55 0.10 0.05 MEG 094 +094 0.54 0.58 0.10 0.05 MEG 095 +095 0.54 0.64 0.10 0.05 MEG 096 +096 1.00 0.69 0.10 0.05 MEG 097 +097 0.97 0.81 0.10 0.05 MEG 098 +098 0.93 0.92 0.10 0.05 MEG 099 +099 0.87 1.00 0.10 0.05 MEG 100 +100 0.93 0.80 0.10 0.05 MEG 101 +101 0.83 0.97 0.10 0.05 MEG 102 +102 0.89 0.68 0.10 0.05 MEG 103 +103 0.84 0.87 0.10 0.05 MEG 104 +104 0.79 0.94 0.10 0.05 MEG 105 +105 0.85 0.68 0.10 0.05 MEG 106 +106 0.83 0.76 0.10 0.05 MEG 107 +107 0.76 0.91 0.10 0.05 MEG 108 +108 0.74 0.76 0.10 0.05 MEG 109 +109 0.76 0.81 0.10 0.05 MEG 110 +110 0.76 0.69 0.10 0.05 MEG 111 +111 0.71 0.83 0.10 0.05 MEG 112 +112 0.88 0.22 0.10 0.05 MEG 113 +113 0.94 0.32 0.10 0.05 MEG 114 +114 0.98 0.42 0.10 0.05 MEG 115 +115 1.00 0.54 0.10 0.05 MEG 116 +116 0.84 0.26 0.10 0.05 MEG 117 +117 0.93 0.45 0.10 0.05 MEG 118 +118 0.95 0.56 0.10 0.05 MEG 119 +119 0.81 0.30 0.10 0.05 MEG 120 +120 0.85 0.38 0.10 0.05 MEG 121 +121 0.81 0.41 0.10 0.05 MEG 122 +122 0.83 0.49 0.10 0.05 MEG 123 +123 0.85 0.58 0.10 0.05 MEG 124 +124 0.73 0.35 0.10 0.05 MEG 125 +125 0.79 0.49 0.10 0.05 MEG 126 +126 0.74 0.46 0.10 0.05 MEG 127 +127 0.77 0.61 0.10 0.05 MEG 128 +128 0.20 0.12 0.10 0.05 MEG 129 +129 0.37 0.02 0.10 0.05 MEG 130 +130 0.46 0.00 0.10 0.05 MEG 131 +131 0.30 0.11 0.10 0.05 MEG 132 +132 0.47 0.06 0.10 0.05 MEG 133 +133 0.25 0.21 0.10 0.05 MEG 134 +134 0.32 0.17 0.10 0.05 MEG 135 +135 0.39 0.13 0.10 0.05 MEG 136 +136 0.29 0.26 0.10 0.05 MEG 137 +137 0.41 0.19 0.10 0.05 MEG 138 +138 0.47 0.18 0.10 0.05 MEG 139 +139 0.39 0.26 0.10 0.05 MEG 140 +140 0.50 0.22 0.10 0.05 MEG 141 +141 0.33 0.29 0.10 0.05 MEG 142 +142 0.45 0.29 0.10 0.05 MEG 143 +143 0.50 0.28 0.10 0.05 MEG 144 +144 0.65 0.03 0.10 0.05 MEG 145 +145 0.82 0.13 0.10 0.05 MEG 146 +146 0.55 0.06 0.10 0.05 MEG 147 +147 0.71 0.12 0.10 0.05 MEG 148 +148 0.62 0.14 0.10 0.05 MEG 149 +149 0.69 0.18 0.10 0.05 MEG 150 +150 0.76 0.23 0.10 0.05 MEG 151 +151 0.54 0.18 0.10 0.05 MEG 152 +152 0.61 0.20 0.10 0.05 MEG 153 +153 0.73 0.27 0.10 0.05 MEG 154 +154 0.63 0.25 0.10 0.05 MEG 155 +155 0.56 0.28 0.10 0.05 MEG 156 +156 0.67 0.35 0.10 0.05 MEG 157 diff --git a/mne/channels/data/layouts/KIT-UMD-3.lout b/mne/channels/data/layouts/KIT-UMD-3.lout new file mode 100644 index 0000000..72cd69f --- /dev/null +++ b/mne/channels/data/layouts/KIT-UMD-3.lout @@ -0,0 +1,158 @@ + -25.00 28.00 -21.35 23.75 +000 -23.42 20.48 3.20 2.40 MEG 001 +001 -22.32 15.16 3.20 2.40 MEG 002 +002 -24.20 10.24 3.20 2.40 MEG 003 +003 -25.00 5.27 3.20 2.40 MEG 004 +004 -24.75 -0.21 3.20 2.40 MEG 005 +005 -23.41 -5.22 3.20 2.40 MEG 006 +006 -22.35 -11.37 3.20 2.40 MEG 007 +007 -14.06 -15.64 3.20 2.40 MEG 008 +008 -15.12 -18.15 3.20 2.40 MEG 009 +009 -11.26 -20.73 3.20 2.40 MEG 010 +010 -6.28 -20.94 3.20 2.40 MEG 011 +011 -2.04 -21.35 3.20 2.40 MEG 012 +012 2.04 -21.35 3.20 2.40 MEG 013 +013 6.28 -20.94 3.20 2.40 MEG 014 +014 11.26 -20.73 3.20 2.40 MEG 015 +015 15.12 -18.15 3.20 2.40 MEG 016 +016 19.41 -14.06 3.20 2.40 MEG 017 +017 22.35 -11.37 3.20 2.40 MEG 018 +018 24.06 -3.70 3.20 2.40 MEG 019 +019 24.23 1.80 3.20 2.40 MEG 020 +020 24.80 5.19 3.20 2.40 MEG 021 +021 22.03 13.42 3.20 2.40 MEG 022 +022 21.58 16.68 3.20 2.40 MEG 023 +023 23.42 20.48 3.20 2.40 MEG 024 +024 20.15 19.33 3.20 2.40 MEG 025 +025 7.46 -2.58 3.20 2.40 MEG 026 +026 22.86 7.70 3.20 2.40 MEG 027 +027 20.76 2.91 3.20 2.40 MEG 028 +028 19.70 -8.80 3.20 2.40 MEG 029 +029 3.41 -5.91 3.20 2.40 MEG 030 +030 14.06 -15.64 3.20 2.40 MEG 031 +031 0.12 -5.34 3.20 2.40 MEG 032 +032 1.80 -18.87 3.20 2.40 MEG 033 +033 -1.80 -18.87 3.20 2.40 MEG 034 +034 -10.12 -18.16 3.20 2.40 MEG 035 +035 -3.41 -5.91 3.20 2.40 MEG 036 +036 -18.35 -13.97 3.20 2.40 MEG 037 +037 -19.70 -8.80 3.20 2.40 MEG 038 +038 -20.76 2.91 3.20 2.40 MEG 039 +039 -22.86 7.70 3.20 2.40 MEG 040 +040 -7.46 -2.58 3.20 2.40 MEG 041 +041 -20.15 19.33 3.20 2.40 MEG 042 +042 -16.84 18.53 3.20 2.40 MEG 043 +043 -18.55 14.46 3.20 2.40 MEG 044 +044 -20.31 10.64 3.20 2.40 MEG 045 +045 -10.05 0.17 3.20 2.40 MEG 046 +046 -20.62 -2.66 3.20 2.40 MEG 047 +047 -17.20 -6.26 3.20 2.40 MEG 048 +048 -16.21 -11.50 3.20 2.40 MEG 049 +049 -8.92 -15.60 3.20 2.40 MEG 050 +050 -5.79 -18.42 3.20 2.40 MEG 051 +051 -1.62 -16.14 3.20 2.40 MEG 052 +052 -8.25 6.10 3.20 2.40 MEG 053 +053 5.79 -18.42 3.20 2.40 MEG 054 +054 8.92 -15.60 3.20 2.40 MEG 055 +055 16.21 -11.50 3.20 2.40 MEG 056 +056 17.20 -6.26 3.20 2.40 MEG 057 +057 20.62 -2.66 3.20 2.40 MEG 058 +058 -6.11 13.61 3.20 2.40 MEG 059 +059 20.31 10.64 3.20 2.40 MEG 060 +060 17.58 15.92 3.20 2.40 MEG 061 +061 16.84 18.53 3.20 2.40 MEG 062 +062 13.49 18.47 3.20 2.40 MEG 063 +063 15.28 13.32 3.20 2.40 MEG 064 +064 -4.11 11.13 3.20 2.40 MEG 065 +065 19.39 7.54 3.20 2.40 MEG 066 +066 17.50 3.47 3.20 2.40 MEG 067 +067 -6.54 8.57 3.20 2.40 MEG 068 +068 11.44 -8.04 3.20 2.40 MEG 069 +069 12.41 -13.14 3.20 2.40 MEG 070 +070 8.16 -13.13 3.20 2.40 MEG 071 +071 -7.60 2.77 3.20 2.40 MEG 072 +072 1.62 -16.14 3.20 2.40 MEG 073 +073 -6.80 0.14 3.20 2.40 MEG 074 +074 -5.40 -15.93 3.20 2.40 MEG 075 +075 -8.16 -13.13 3.20 2.40 MEG 076 +076 -12.41 -13.14 3.20 2.40 MEG 077 +077 -14.81 -8.97 3.20 2.40 MEG 078 +078 -3.23 -2.94 3.20 2.40 MEG 079 +079 -17.50 3.47 3.20 2.40 MEG 080 +080 -19.39 7.54 3.20 2.40 MEG 081 +081 4.03 -2.84 3.20 2.40 MEG 082 +082 -15.28 13.32 3.20 2.40 MEG 083 +083 -13.49 18.47 3.20 2.40 MEG 084 +084 -12.29 15.99 3.20 2.40 MEG 085 +085 -16.74 10.63 3.20 2.40 MEG 086 +086 6.80 0.14 3.20 2.40 MEG 087 +087 -17.30 -2.88 3.20 2.40 MEG 088 +088 -13.99 -4.86 3.20 2.40 MEG 089 +089 11.58 6.13 3.20 2.40 MEG 090 +090 -11.44 -8.04 3.20 2.40 MEG 091 +091 -3.30 -13.45 3.20 2.40 MEG 092 +092 6.54 8.57 3.20 2.40 MEG 093 +093 -9.52 -10.67 3.20 2.40 MEG 094 +094 9.52 -10.67 3.20 2.40 MEG 095 +095 4.11 11.13 3.20 2.40 MEG 096 +096 13.99 -4.86 3.20 2.40 MEG 097 +097 18.10 -0.17 3.20 2.40 MEG 098 +098 0.74 11.38 3.20 2.40 MEG 099 +099 16.74 10.63 3.20 2.40 MEG 100 +100 12.29 15.99 3.20 2.40 MEG 101 +101 10.11 18.86 3.20 2.40 MEG 102 +102 6.83 19.80 3.20 2.40 MEG 103 +103 3.48 21.35 3.20 2.40 MEG 104 +104 0.00 21.35 3.20 2.40 MEG 105 +105 -3.48 21.35 3.20 2.40 MEG 106 +106 -6.83 19.80 3.20 2.40 MEG 107 +107 -10.11 18.86 3.20 2.40 MEG 108 +108 -12.03 13.52 3.20 2.40 MEG 109 +109 -1.63 8.64 3.20 2.40 MEG 110 +110 -3.36 18.88 3.20 2.40 MEG 111 +111 -0.02 18.88 3.20 2.40 MEG 112 +112 3.36 18.88 3.20 2.40 MEG 113 +113 1.63 8.64 3.20 2.40 MEG 114 +114 9.01 16.34 3.20 2.40 MEG 115 +115 4.97 5.29 3.20 2.40 MEG 116 +116 13.28 10.76 3.20 2.40 MEG 117 +117 15.78 7.58 3.20 2.40 MEG 118 +118 14.24 3.60 3.20 2.40 MEG 119 +119 14.69 -0.31 3.20 2.40 MEG 120 +120 3.37 -0.21 3.20 2.40 MEG 121 +121 8.20 -8.14 3.20 2.40 MEG 122 +122 6.11 -10.67 3.20 2.40 MEG 123 +123 2.77 -10.98 3.20 2.40 MEG 124 +124 0.10 -13.43 3.20 2.40 MEG 125 +125 0.02 -0.57 3.20 2.40 MEG 126 +126 -2.77 -10.98 3.20 2.40 MEG 127 +127 -8.20 -8.14 3.20 2.40 MEG 128 +128 -3.37 -0.21 3.20 2.40 MEG 129 +129 -14.69 -0.31 3.20 2.40 MEG 130 +130 -14.24 3.60 3.20 2.40 MEG 131 +131 -15.78 7.58 3.20 2.40 MEG 132 +132 -13.28 10.76 3.20 2.40 MEG 133 +133 -4.97 5.29 3.20 2.40 MEG 134 +134 -9.46 11.02 3.20 2.40 MEG 135 +135 -12.21 7.84 3.20 2.40 MEG 136 +136 -10.93 3.58 3.20 2.40 MEG 137 +137 -10.71 -3.82 3.20 2.40 MEG 138 +138 -6.89 -5.51 3.20 2.40 MEG 139 +139 -1.66 5.24 3.20 2.40 MEG 140 +140 -2.40 -8.39 3.20 2.40 MEG 141 +141 2.40 -8.39 3.20 2.40 MEG 142 +142 -4.29 2.66 3.20 2.40 MEG 143 +143 6.89 -5.51 3.20 2.40 MEG 144 +144 10.71 -3.82 3.20 2.40 MEG 145 +145 10.93 3.58 3.20 2.40 MEG 146 +146 4.29 2.66 3.20 2.40 MEG 147 +147 9.46 11.02 3.20 2.40 MEG 148 +148 5.70 16.39 3.20 2.40 MEG 149 +149 1.66 5.24 3.20 2.40 MEG 150 +150 -2.37 16.38 3.20 2.40 MEG 151 +151 -5.70 16.39 3.20 2.40 MEG 152 +152 8.25 6.10 3.20 2.40 MEG 153 +153 -0.58 13.96 3.20 2.40 MEG 154 +154 2.81 13.89 3.20 2.40 MEG 155 +155 6.11 13.61 3.20 2.40 MEG 156 +156 2.37 16.38 3.20 2.40 MEG 157 diff --git a/mne/channels/data/layouts/Neuromag_122.lout b/mne/channels/data/layouts/Neuromag_122.lout new file mode 100644 index 0000000..c97746a --- /dev/null +++ b/mne/channels/data/layouts/Neuromag_122.lout @@ -0,0 +1,123 @@ +-3 28 -17 15 +1 25.381295 -0.771781 2 1.5 MEG 001 +2 25.381295 0.727697 2 1.5 MEG 002 +3 22.715372 -0.733246 2 1.5 MEG 003 +4 22.715372 0.766753 2 1.5 MEG 004 +5 19.911143 -0.608748 2 1.5 MEG 005 +6 19.911143 0.891252 2 1.5 MEG 006 +7 24.481102 4.347077 2 1.5 MEG 007 +8 24.481102 5.847077 2 1.5 MEG 008 +9 21.9673 3.613717 2 1.5 MEG 009 +10 21.9673 5.113717 2 1.5 MEG 010 +11 19.345958 3.110359 2 1.5 MEG 011 +12 19.345958 4.610058 2 1.5 MEG 012 +13 16.706588 2.875744 2 1.5 MEG 013 +14 16.706588 4.375643 2 1.5 MEG 014 +15 14.09047 2.753697 2 1.5 MEG 015 +16 14.09047 4.253697 2 1.5 MEG 016 +17 19.559995 7.243332 2 1.5 MEG 017 +18 19.559995 8.743163 2 1.5 MEG 018 +19 16.942979 6.237191 2 1.5 MEG 019 +20 16.942979 7.737225 2 1.5 MEG 020 +21 14.204774 5.792745 2 1.5 MEG 021 +22 14.204774 7.292858 2 1.5 MEG 022 +23 11.5 5.70429 2 1.5 MEG 023 +24 11.5 7.204446 2 1.5 MEG 024 +25 16.662514 9.87843 2 1.5 MEG 025 +26 16.662514 11.37843 2 1.5 MEG 026 +27 13.466339 11.859999 2 1.5 MEG 027 +28 13.466339 13.359952 2 1.5 MEG 028 +29 13.450371 8.807222 2 1.5 MEG 029 +30 13.450371 10.307518 2 1.5 MEG 030 +31 9.533661 11.859999 2 1.5 MEG 031 +32 9.533661 13.359952 2 1.5 MEG 032 +33 9.54963 8.807222 2 1.5 MEG 033 +34 9.54963 10.307518 2 1.5 MEG 034 +35 6.3374865 9.87843 2 1.5 MEG 035 +36 6.337486 11.37843 2 1.5 MEG 036 +37 3.440005 7.243332 2 1.5 MEG 037 +38 3.440005 8.743163 2 1.5 MEG 038 +39 6.057021 6.237192 2 1.5 MEG 039 +40 6.057021 7.737225 2 1.5 MEG 040 +41 8.795226 5.792745 2 1.5 MEG 041 +42 8.795226 7.292858 2 1.5 MEG 042 +43 -1.481102 4.347078 2 1.5 MEG 043 +44 -1.481102 5.847078 2 1.5 MEG 044 +45 1.0327 3.613581 2 1.5 MEG 045 +46 1.0327 5.113717 2 1.5 MEG 046 +47 3.654042 3.11036 2 1.5 MEG 047 +48 3.654042 4.610058 2 1.5 MEG 048 +49 6.293412 2.875744 2 1.5 MEG 049 +50 6.293412 4.375643 2 1.5 MEG 050 +51 8.90953 2.753697 2 1.5 MEG 051 +52 8.90953 4.253697 2 1.5 MEG 052 +53 11.5 2.731327 2 1.5 MEG 053 +54 11.5 4.231464 2 1.5 MEG 054 +55 -2.381295 -0.771781 2 1.5 MEG 055 +56 -2.381295 0.727697 2 1.5 MEG 056 +57 0.284628 -0.733246 2 1.5 MEG 057 +58 0.284628 0.766753 2 1.5 MEG 058 +59 3.088857 -0.608748 2 1.5 MEG 059 +60 3.088857 0.891252 2 1.5 MEG 060 +61 5.895393 -0.521429 2 1.5 MEG 061 +62 5.895393 0.978571 2 1.5 MEG 062 +63 8.696664 -0.481488 2 1.5 MEG 063 +64 8.696664 1.018793 2 1.5 MEG 064 +65 11.5 -0.46314 2 1.5 MEG 065 +66 11.5 1.036853 2 1.5 MEG 066 +67 -1.590015 -6.177621 2 1.5 MEG 067 +68 -1.590015 -4.677286 2 1.5 MEG 068 +69 0.893853 -5.313065 2 1.5 MEG 069 +70 0.893853 -3.813065 2 1.5 MEG 070 +71 3.788197 -4.494587 2 1.5 MEG 071 +72 3.788197 -2.994811 2 1.5 MEG 072 +73 6.749538 -3.95458 2 1.5 MEG 073 +74 6.749538 -2.454261 2 1.5 MEG 074 +75 1.096738 -10.894836 2 1.5 MEG 075 +76 1.096738 -9.394836 2 1.5 MEG 076 +77 3.402274 -9.346367 2 1.5 MEG 077 +78 3.402274 -7.846579 2 1.5 MEG 078 +79 6.182132 -8.131419 2 1.5 MEG 079 +80 6.182132 -6.631304 2 1.5 MEG 080 +81 6.102499 -15.409053 2 1.5 MEG 081 +82 6.102499 -13.908834 2 1.5 MEG 082 +83 6.914234 -12.406122 2 1.5 MEG 083 +84 6.914234 -10.906034 2 1.5 MEG 084 +85 9.307503 -10.644013 2 1.5 MEG 085 +86 9.307503 -9.143762 2 1.5 MEG 086 +87 9.660984 -7.199067 2 1.5 MEG 087 +88 9.660984 -5.699067 2 1.5 MEG 088 +89 9.807536 -3.822648 2 1.5 MEG 089 +90 9.807536 -2.322552 2 1.5 MEG 090 +91 11.5 -16.259918 2 1.5 MEG 091 +92 11.5 -14.759918 2 1.5 MEG 092 +93 11.5 -13.097164 2 1.5 MEG 093 +94 11.5 -11.597439 2 1.5 MEG 094 +95 13.692497 -10.644013 2 1.5 MEG 095 +96 13.692497 -9.143762 2 1.5 MEG 096 +97 13.339016 -7.199067 2 1.5 MEG 097 +98 13.339016 -5.699067 2 1.5 MEG 098 +99 13.192464 -3.822648 2 1.5 MEG 099 +100 13.192464 -2.322552 2 1.5 MEG 100 +101 16.897501 -15.409053 2 1.5 MEG 101 +102 16.897501 -13.908834 2 1.5 MEG 102 +103 16.085766 -12.406122 2 1.5 MEG 103 +104 16.085766 -10.906034 2 1.5 MEG 104 +105 21.903262 -10.894836 2 1.5 MEG 105 +106 21.903262 -9.394836 2 1.5 MEG 106 +107 19.597726 -9.346367 2 1.5 MEG 107 +108 19.597726 -7.846579 2 1.5 MEG 108 +109 16.817868 -8.131419 2 1.5 MEG 109 +110 16.817868 -6.631304 2 1.5 MEG 110 +111 24.590015 -6.177621 2 1.5 MEG 111 +112 24.590015 -4.677286 2 1.5 MEG 112 +113 22.106147 -5.313065 2 1.5 MEG 113 +114 22.106147 -3.813065 2 1.5 MEG 114 +115 19.211802 -4.494588 2 1.5 MEG 115 +116 19.211802 -2.994811 2 1.5 MEG 116 +117 16.250462 -3.95458 2 1.5 MEG 117 +118 16.250462 -2.454261 2 1.5 MEG 118 +119 17.104607 -0.521429 2 1.5 MEG 119 +120 17.104607 0.978571 2 1.5 MEG 120 +121 14.303336 -0.481488 2 1.5 MEG 121 +122 14.303336 1.018792 2 1.5 MEG 122 diff --git a/mne/channels/data/layouts/Vectorview-all.lout b/mne/channels/data/layouts/Vectorview-all.lout new file mode 100644 index 0000000..b6395fb --- /dev/null +++ b/mne/channels/data/layouts/Vectorview-all.lout @@ -0,0 +1,307 @@ +-85.000000 90.000000 -83.000000 75.000000 +113 -73.416206 33.416687 6.000000 5.000000 MEG 0113 +112 -73.416206 38.416687 6.000000 5.000000 MEG 0112 +111 -67.416206 35.916687 6.000000 5.000000 MEG 0111 +122 -59.602242 38.489067 6.000000 5.000000 MEG 0122 +123 -59.602242 43.489067 6.000000 5.000000 MEG 0123 +121 -53.602242 40.989067 6.000000 5.000000 MEG 0121 +132 -68.018288 18.676970 6.000000 5.000000 MEG 0132 +133 -68.018288 23.676970 6.000000 5.000000 MEG 0133 +131 -62.018288 21.176970 6.000000 5.000000 MEG 0131 +143 -80.582848 8.095787 6.000000 5.000000 MEG 0143 +142 -80.582848 13.095787 6.000000 5.000000 MEG 0142 +141 -74.582848 10.595787 6.000000 5.000000 MEG 0141 +213 -56.595154 17.019251 6.000000 5.000000 MEG 0213 +212 -56.595154 22.019251 6.000000 5.000000 MEG 0212 +211 -50.595154 19.519251 6.000000 5.000000 MEG 0211 +222 -44.599728 17.543873 6.000000 5.000000 MEG 0222 +223 -44.599728 22.543873 6.000000 5.000000 MEG 0223 +221 -38.599728 20.043873 6.000000 5.000000 MEG 0221 +232 -47.416420 -0.216784 6.000000 5.000000 MEG 0232 +233 -47.416420 4.783216 6.000000 5.000000 MEG 0233 +231 -41.416420 2.283216 6.000000 5.000000 MEG 0231 +243 -59.280643 -2.761772 6.000000 5.000000 MEG 0243 +242 -59.280643 2.238228 6.000000 5.000000 MEG 0242 +241 -53.280643 -0.261772 6.000000 5.000000 MEG 0241 +313 -39.790501 47.430138 6.000000 5.000000 MEG 0313 +312 -39.790501 52.430138 6.000000 5.000000 MEG 0312 +311 -33.790501 49.930138 6.000000 5.000000 MEG 0311 +322 -38.014336 32.768585 6.000000 5.000000 MEG 0322 +323 -38.014336 37.768585 6.000000 5.000000 MEG 0323 +321 -32.014336 35.268585 6.000000 5.000000 MEG 0321 +333 -27.679966 28.868065 6.000000 5.000000 MEG 0333 +332 -27.679966 33.868065 6.000000 5.000000 MEG 0332 +331 -21.679966 31.368065 6.000000 5.000000 MEG 0331 +343 -49.684467 34.078434 6.000000 5.000000 MEG 0343 +342 -49.684467 39.078434 6.000000 5.000000 MEG 0342 +341 -43.684467 36.578434 6.000000 5.000000 MEG 0341 +413 -32.997990 15.607347 6.000000 5.000000 MEG 0413 +412 -32.997990 20.607347 6.000000 5.000000 MEG 0412 +411 -26.997990 18.107347 6.000000 5.000000 MEG 0411 +422 -21.084751 13.953575 6.000000 5.000000 MEG 0422 +423 -21.084751 18.953575 6.000000 5.000000 MEG 0423 +421 -15.084751 16.453575 6.000000 5.000000 MEG 0421 +432 -21.930935 -0.085500 6.000000 5.000000 MEG 0432 +433 -21.930935 4.914500 6.000000 5.000000 MEG 0433 +431 -15.930935 2.414500 6.000000 5.000000 MEG 0431 +443 -34.824663 0.362587 6.000000 5.000000 MEG 0443 +442 -34.824663 5.362587 6.000000 5.000000 MEG 0442 +441 -28.824663 2.862587 6.000000 5.000000 MEG 0441 +513 -27.861498 55.439636 6.000000 5.000000 MEG 0513 +512 -27.861498 60.439636 6.000000 5.000000 MEG 0512 +511 -21.861498 57.939636 6.000000 5.000000 MEG 0511 +523 -15.506709 59.619865 6.000000 5.000000 MEG 0523 +522 -15.506709 64.619865 6.000000 5.000000 MEG 0522 +521 -9.506709 62.119865 6.000000 5.000000 MEG 0521 +532 -14.616095 49.308380 6.000000 5.000000 MEG 0532 +533 -14.616095 54.308380 6.000000 5.000000 MEG 0533 +531 -8.616095 51.808380 6.000000 5.000000 MEG 0531 +542 -27.240477 43.863430 6.000000 5.000000 MEG 0542 +543 -27.240477 48.863430 6.000000 5.000000 MEG 0543 +541 -21.240477 46.363430 6.000000 5.000000 MEG 0541 +613 -14.782405 38.147827 6.000000 5.000000 MEG 0613 +612 -14.782405 43.147827 6.000000 5.000000 MEG 0612 +611 -8.782405 40.647827 6.000000 5.000000 MEG 0611 +622 -2.967276 27.260933 6.000000 5.000000 MEG 0622 +623 -2.967276 32.260933 6.000000 5.000000 MEG 0623 +621 3.032724 29.760933 6.000000 5.000000 MEG 0621 +633 -9.094766 14.700909 6.000000 5.000000 MEG 0633 +632 -9.094766 19.700909 6.000000 5.000000 MEG 0632 +631 -3.094766 17.200909 6.000000 5.000000 MEG 0631 +642 -15.199021 26.631405 6.000000 5.000000 MEG 0642 +643 -15.199021 31.631405 6.000000 5.000000 MEG 0643 +641 -9.199021 29.131405 6.000000 5.000000 MEG 0641 +713 -9.246834 1.693846 6.000000 5.000000 MEG 0713 +712 -9.246834 6.693846 6.000000 5.000000 MEG 0712 +711 -3.246834 4.193846 6.000000 5.000000 MEG 0711 +723 3.314525 1.573887 6.000000 5.000000 MEG 0723 +722 3.314525 6.573887 6.000000 5.000000 MEG 0722 +721 9.314525 4.073887 6.000000 5.000000 MEG 0721 +733 3.387173 -10.588106 6.000000 5.000000 MEG 0733 +732 3.387173 -5.588106 6.000000 5.000000 MEG 0732 +731 9.387173 -8.088106 6.000000 5.000000 MEG 0731 +743 -9.422897 -10.519942 6.000000 5.000000 MEG 0743 +742 -9.422897 -5.519942 6.000000 5.000000 MEG 0742 +741 -3.422897 -8.019942 6.000000 5.000000 MEG 0741 +813 -2.962408 61.007698 6.000000 5.000000 MEG 0813 +812 -2.962408 66.007698 6.000000 5.000000 MEG 0812 +811 3.037592 63.507698 6.000000 5.000000 MEG 0811 +822 -2.965545 50.641838 6.000000 5.000000 MEG 0822 +823 -2.965545 55.641838 6.000000 5.000000 MEG 0823 +821 3.034455 53.141838 6.000000 5.000000 MEG 0821 +913 9.504830 59.655254 6.000000 5.000000 MEG 0913 +912 9.504830 64.655254 6.000000 5.000000 MEG 0912 +911 15.504830 62.155254 6.000000 5.000000 MEG 0911 +923 21.967310 55.408710 6.000000 5.000000 MEG 0923 +922 21.967310 60.408710 6.000000 5.000000 MEG 0922 +921 27.967310 57.908710 6.000000 5.000000 MEG 0921 +932 21.254196 43.889683 6.000000 5.000000 MEG 0932 +933 21.254196 48.889683 6.000000 5.000000 MEG 0933 +931 27.254196 46.389683 6.000000 5.000000 MEG 0931 +942 8.661931 49.358044 6.000000 5.000000 MEG 0942 +943 8.661931 54.358044 6.000000 5.000000 MEG 0943 +941 14.661931 51.858044 6.000000 5.000000 MEG 0941 +1013 -2.967087 39.669956 6.000000 5.000000 MEG 1013 +1012 -2.967087 44.669956 6.000000 5.000000 MEG 1012 +1011 3.032913 42.169956 6.000000 5.000000 MEG 1011 +1023 8.751018 38.154079 6.000000 5.000000 MEG 1023 +1022 8.751018 43.154079 6.000000 5.000000 MEG 1022 +1021 14.751018 40.654079 6.000000 5.000000 MEG 1021 +1032 9.123913 26.648697 6.000000 5.000000 MEG 1032 +1033 9.123913 31.648697 6.000000 5.000000 MEG 1033 +1031 15.123913 29.148697 6.000000 5.000000 MEG 1031 +1043 3.200539 14.795620 6.000000 5.000000 MEG 1043 +1042 3.200539 19.795620 6.000000 5.000000 MEG 1042 +1041 9.200539 17.295620 6.000000 5.000000 MEG 1041 +1112 15.014965 13.912239 6.000000 5.000000 MEG 1112 +1113 15.014965 18.912239 6.000000 5.000000 MEG 1113 +1111 21.014965 16.412239 6.000000 5.000000 MEG 1111 +1123 26.958527 15.562130 6.000000 5.000000 MEG 1123 +1122 26.958527 20.562130 6.000000 5.000000 MEG 1122 +1121 32.958527 18.062130 6.000000 5.000000 MEG 1121 +1133 28.757563 0.227141 6.000000 5.000000 MEG 1133 +1132 28.757563 5.227141 6.000000 5.000000 MEG 1132 +1131 34.757563 2.727141 6.000000 5.000000 MEG 1131 +1142 15.882982 0.037700 6.000000 5.000000 MEG 1142 +1143 15.882982 5.037700 6.000000 5.000000 MEG 1143 +1141 21.882982 2.537700 6.000000 5.000000 MEG 1141 +1213 33.958897 47.388790 6.000000 5.000000 MEG 1213 +1212 33.958897 52.388790 6.000000 5.000000 MEG 1212 +1211 39.958897 49.888790 6.000000 5.000000 MEG 1211 +1223 43.923473 33.914738 6.000000 5.000000 MEG 1223 +1222 43.923473 38.914738 6.000000 5.000000 MEG 1222 +1221 49.923473 36.414738 6.000000 5.000000 MEG 1221 +1232 32.014336 32.768585 6.000000 5.000000 MEG 1232 +1233 32.014336 37.768585 6.000000 5.000000 MEG 1233 +1231 38.014336 35.268585 6.000000 5.000000 MEG 1231 +1243 21.600079 28.898149 6.000000 5.000000 MEG 1243 +1242 21.600079 33.898149 6.000000 5.000000 MEG 1242 +1241 27.600079 31.398149 6.000000 5.000000 MEG 1241 +1312 38.599728 17.543867 6.000000 5.000000 MEG 1312 +1313 38.599728 22.543867 6.000000 5.000000 MEG 1313 +1311 44.599728 20.043867 6.000000 5.000000 MEG 1311 +1323 50.558392 16.887651 6.000000 5.000000 MEG 1323 +1322 50.558392 21.887651 6.000000 5.000000 MEG 1322 +1321 56.558392 19.387651 6.000000 5.000000 MEG 1321 +1333 53.420483 -2.919475 6.000000 5.000000 MEG 1333 +1332 53.420483 2.080525 6.000000 5.000000 MEG 1332 +1331 59.420483 -0.419475 6.000000 5.000000 MEG 1331 +1342 41.371586 -0.216817 6.000000 5.000000 MEG 1342 +1343 41.371586 4.783183 6.000000 5.000000 MEG 1343 +1341 47.371586 2.283183 6.000000 5.000000 MEG 1341 +1412 53.704369 38.563030 6.000000 5.000000 MEG 1412 +1413 53.704369 43.563030 6.000000 5.000000 MEG 1413 +1411 59.704369 41.063030 6.000000 5.000000 MEG 1411 +1423 67.119286 33.843739 6.000000 5.000000 MEG 1423 +1422 67.119286 38.843739 6.000000 5.000000 MEG 1422 +1421 73.119286 36.343739 6.000000 5.000000 MEG 1421 +1433 74.438919 8.335863 6.000000 5.000000 MEG 1433 +1432 74.438919 13.335863 6.000000 5.000000 MEG 1432 +1431 80.438919 10.835863 6.000000 5.000000 MEG 1431 +1442 61.883209 18.562304 6.000000 5.000000 MEG 1442 +1443 61.883209 23.562304 6.000000 5.000000 MEG 1443 +1441 67.883209 21.062304 6.000000 5.000000 MEG 1441 +1512 -71.298943 -4.707253 6.000000 5.000000 MEG 1512 +1513 -71.298943 0.292747 6.000000 5.000000 MEG 1513 +1511 -65.298943 -2.207253 6.000000 5.000000 MEG 1511 +1522 -67.281609 -25.407852 6.000000 5.000000 MEG 1522 +1523 -67.281609 -20.407852 6.000000 5.000000 MEG 1523 +1521 -61.281609 -22.907852 6.000000 5.000000 MEG 1521 +1533 -71.702820 -40.152336 6.000000 5.000000 MEG 1533 +1532 -71.702820 -35.152336 6.000000 5.000000 MEG 1532 +1531 -65.702820 -37.652336 6.000000 5.000000 MEG 1531 +1543 -79.907913 -17.418098 6.000000 5.000000 MEG 1543 +1542 -79.907913 -12.418098 6.000000 5.000000 MEG 1542 +1541 -73.907913 -14.918098 6.000000 5.000000 MEG 1541 +1613 -56.916454 -20.312164 6.000000 5.000000 MEG 1613 +1612 -56.916454 -15.312164 6.000000 5.000000 MEG 1612 +1611 -50.916454 -17.812164 6.000000 5.000000 MEG 1611 +1622 -45.631779 -16.320436 6.000000 5.000000 MEG 1622 +1623 -45.631779 -11.320436 6.000000 5.000000 MEG 1623 +1621 -39.631779 -13.820436 6.000000 5.000000 MEG 1621 +1632 -37.896103 -30.578358 6.000000 5.000000 MEG 1632 +1633 -37.896103 -25.578358 6.000000 5.000000 MEG 1633 +1631 -31.896103 -28.078358 6.000000 5.000000 MEG 1631 +1643 -48.859089 -36.176094 6.000000 5.000000 MEG 1643 +1642 -48.859089 -31.176094 6.000000 5.000000 MEG 1642 +1641 -42.859089 -33.676094 6.000000 5.000000 MEG 1641 +1713 -56.796040 -59.082275 6.000000 5.000000 MEG 1713 +1712 -56.796040 -54.082275 6.000000 5.000000 MEG 1712 +1711 -50.796040 -56.582275 6.000000 5.000000 MEG 1711 +1722 -57.188797 -44.057373 6.000000 5.000000 MEG 1722 +1723 -57.188797 -39.057373 6.000000 5.000000 MEG 1723 +1721 -51.188797 -41.557373 6.000000 5.000000 MEG 1721 +1732 -41.902962 -58.279526 6.000000 5.000000 MEG 1732 +1733 -41.902962 -53.279526 6.000000 5.000000 MEG 1733 +1731 -35.902962 -55.779526 6.000000 5.000000 MEG 1731 +1743 -37.408134 -72.449036 6.000000 5.000000 MEG 1743 +1742 -37.408134 -67.449036 6.000000 5.000000 MEG 1742 +1741 -31.408134 -69.949036 6.000000 5.000000 MEG 1741 +1813 -33.801163 -13.768716 6.000000 5.000000 MEG 1813 +1812 -33.801163 -8.768716 6.000000 5.000000 MEG 1812 +1811 -27.801163 -11.268716 6.000000 5.000000 MEG 1811 +1822 -21.685101 -12.619589 6.000000 5.000000 MEG 1822 +1823 -21.685101 -7.619589 6.000000 5.000000 MEG 1823 +1821 -15.685101 -10.119589 6.000000 5.000000 MEG 1821 +1832 -9.600111 -22.190945 6.000000 5.000000 MEG 1832 +1833 -9.600111 -17.190945 6.000000 5.000000 MEG 1833 +1831 -3.600111 -19.690945 6.000000 5.000000 MEG 1831 +1843 -24.483526 -26.850609 6.000000 5.000000 MEG 1843 +1842 -24.483526 -21.850609 6.000000 5.000000 MEG 1842 +1841 -18.483526 -24.350609 6.000000 5.000000 MEG 1841 +1912 -25.866816 -40.850040 6.000000 5.000000 MEG 1912 +1913 -25.866816 -35.850040 6.000000 5.000000 MEG 1913 +1911 -19.866816 -38.350040 6.000000 5.000000 MEG 1911 +1923 -20.513481 -56.355225 6.000000 5.000000 MEG 1923 +1922 -20.513481 -51.355225 6.000000 5.000000 MEG 1922 +1921 -14.513481 -53.855225 6.000000 5.000000 MEG 1921 +1932 -23.428471 -67.375893 6.000000 5.000000 MEG 1932 +1933 -23.428471 -62.375893 6.000000 5.000000 MEG 1933 +1931 -17.428471 -64.875893 6.000000 5.000000 MEG 1931 +1943 -36.237587 -48.444530 6.000000 5.000000 MEG 1943 +1942 -36.237587 -43.444530 6.000000 5.000000 MEG 1942 +1941 -30.237587 -45.944530 6.000000 5.000000 MEG 1941 +2013 -10.441930 -34.308243 6.000000 5.000000 MEG 2013 +2012 -10.441930 -29.308243 6.000000 5.000000 MEG 2012 +2011 -4.441930 -31.808243 6.000000 5.000000 MEG 2011 +2023 4.357624 -34.289736 6.000000 5.000000 MEG 2023 +2022 4.357624 -29.289736 6.000000 5.000000 MEG 2022 +2021 10.357624 -31.789736 6.000000 5.000000 MEG 2021 +2032 4.645295 -46.290749 6.000000 5.000000 MEG 2032 +2033 4.645295 -41.290749 6.000000 5.000000 MEG 2033 +2031 10.645295 -43.790749 6.000000 5.000000 MEG 2031 +2042 -10.645079 -46.244335 6.000000 5.000000 MEG 2042 +2043 -10.645079 -41.244335 6.000000 5.000000 MEG 2043 +2041 -4.645079 -43.744335 6.000000 5.000000 MEG 2041 +2113 -3.052351 -58.889515 6.000000 5.000000 MEG 2113 +2112 -3.052351 -53.889515 6.000000 5.000000 MEG 2112 +2111 2.947649 -56.389515 6.000000 5.000000 MEG 2111 +2122 -2.999999 -70.362061 6.000000 5.000000 MEG 2122 +2123 -2.999999 -65.362061 6.000000 5.000000 MEG 2123 +2121 3.000001 -67.862061 6.000000 5.000000 MEG 2121 +2133 8.918572 -79.441826 6.000000 5.000000 MEG 2133 +2132 8.918572 -74.441826 6.000000 5.000000 MEG 2132 +2131 14.918572 -76.941826 6.000000 5.000000 MEG 2131 +2143 -14.987089 -79.428932 6.000000 5.000000 MEG 2143 +2142 -14.987089 -74.428932 6.000000 5.000000 MEG 2142 +2141 -8.987089 -76.928932 6.000000 5.000000 MEG 2141 +2212 15.641460 -12.579389 6.000000 5.000000 MEG 2212 +2213 15.641460 -7.579389 6.000000 5.000000 MEG 2213 +2211 21.641460 -10.079389 6.000000 5.000000 MEG 2211 +2223 27.786499 -13.669980 6.000000 5.000000 MEG 2223 +2222 27.786499 -8.669980 6.000000 5.000000 MEG 2222 +2221 33.786499 -11.169980 6.000000 5.000000 MEG 2221 +2233 18.501518 -26.949615 6.000000 5.000000 MEG 2233 +2232 18.501518 -21.949615 6.000000 5.000000 MEG 2232 +2231 24.501518 -24.449615 6.000000 5.000000 MEG 2231 +2242 3.641699 -22.206125 6.000000 5.000000 MEG 2242 +2243 3.641699 -17.206125 6.000000 5.000000 MEG 2243 +2241 9.641699 -19.706125 6.000000 5.000000 MEG 2241 +2312 19.852789 -40.871220 6.000000 5.000000 MEG 2312 +2313 19.852789 -35.871220 6.000000 5.000000 MEG 2313 +2311 25.852789 -38.371220 6.000000 5.000000 MEG 2311 +2323 30.078903 -48.474960 6.000000 5.000000 MEG 2323 +2322 30.078903 -43.474960 6.000000 5.000000 MEG 2322 +2321 36.078903 -45.974960 6.000000 5.000000 MEG 2321 +2332 17.363274 -67.365387 6.000000 5.000000 MEG 2332 +2333 17.363274 -62.365387 6.000000 5.000000 MEG 2333 +2331 23.363274 -64.865387 6.000000 5.000000 MEG 2331 +2343 14.329920 -56.380260 6.000000 5.000000 MEG 2343 +2342 14.329920 -51.380260 6.000000 5.000000 MEG 2342 +2341 20.329920 -53.880260 6.000000 5.000000 MEG 2341 +2412 39.644810 -16.175139 6.000000 5.000000 MEG 2412 +2413 39.644810 -11.175139 6.000000 5.000000 MEG 2413 +2411 45.644810 -13.675139 6.000000 5.000000 MEG 2411 +2423 50.812263 -20.401899 6.000000 5.000000 MEG 2423 +2422 50.812263 -15.401899 6.000000 5.000000 MEG 2422 +2421 56.812263 -17.901899 6.000000 5.000000 MEG 2421 +2433 42.694180 -36.278580 6.000000 5.000000 MEG 2433 +2432 42.694180 -31.278580 6.000000 5.000000 MEG 2432 +2431 48.694180 -33.778580 6.000000 5.000000 MEG 2431 +2442 31.896111 -30.578348 6.000000 5.000000 MEG 2442 +2443 31.896111 -25.578348 6.000000 5.000000 MEG 2443 +2441 37.896111 -28.078348 6.000000 5.000000 MEG 2441 +2512 35.812634 -58.300888 6.000000 5.000000 MEG 2512 +2513 35.812634 -53.300888 6.000000 5.000000 MEG 2513 +2511 41.812634 -55.800888 6.000000 5.000000 MEG 2511 +2522 51.171906 -43.981274 6.000000 5.000000 MEG 2522 +2523 51.171906 -38.981274 6.000000 5.000000 MEG 2523 +2521 57.171906 -41.481274 6.000000 5.000000 MEG 2521 +2533 50.704624 -59.132656 6.000000 5.000000 MEG 2533 +2532 50.704624 -54.132656 6.000000 5.000000 MEG 2532 +2531 56.704624 -56.632656 6.000000 5.000000 MEG 2531 +2543 31.320171 -72.484848 6.000000 5.000000 MEG 2543 +2542 31.320171 -67.484848 6.000000 5.000000 MEG 2542 +2541 37.320171 -69.984848 6.000000 5.000000 MEG 2541 +2612 65.137360 -4.702045 6.000000 5.000000 MEG 2612 +2613 65.137360 0.297955 6.000000 5.000000 MEG 2613 +2611 71.137360 -2.202045 6.000000 5.000000 MEG 2611 +2623 73.822243 -17.329140 6.000000 5.000000 MEG 2623 +2622 73.822243 -12.329140 6.000000 5.000000 MEG 2622 +2621 79.822243 -14.829140 6.000000 5.000000 MEG 2621 +2633 65.490112 -40.332645 6.000000 5.000000 MEG 2633 +2632 65.490112 -35.332645 6.000000 5.000000 MEG 2632 +2631 71.490112 -37.832645 6.000000 5.000000 MEG 2631 +2642 61.220192 -25.385981 6.000000 5.000000 MEG 2642 +2643 61.220192 -20.385981 6.000000 5.000000 MEG 2643 +2641 67.220192 -22.885981 6.000000 5.000000 MEG 2641 diff --git a/mne/channels/data/layouts/Vectorview-grad.lout b/mne/channels/data/layouts/Vectorview-grad.lout new file mode 100644 index 0000000..1f133a1 --- /dev/null +++ b/mne/channels/data/layouts/Vectorview-grad.lout @@ -0,0 +1,205 @@ +-55.000000 55.000000 -65.000000 60.000000 +113 -48.186871 26.886379 6.000000 5.000000 MEG 0113 +112 -48.186871 31.886379 6.000000 5.000000 MEG 0112 +122 -39.322296 31.036510 6.000000 5.000000 MEG 0122 +123 -39.322296 36.036510 6.000000 5.000000 MEG 0123 +132 -44.722965 14.826612 6.000000 5.000000 MEG 0132 +133 -44.722965 19.826612 6.000000 5.000000 MEG 0133 +143 -52.785782 6.169280 6.000000 5.000000 MEG 0143 +142 -52.785782 11.169280 6.000000 5.000000 MEG 0142 +213 -37.392612 13.470296 6.000000 5.000000 MEG 0213 +212 -37.392612 18.470296 6.000000 5.000000 MEG 0212 +222 -29.695013 13.899532 6.000000 5.000000 MEG 0222 +223 -29.695013 18.899532 6.000000 5.000000 MEG 0223 +232 -31.502516 -0.631914 6.000000 5.000000 MEG 0232 +233 -31.502516 4.368086 6.000000 5.000000 MEG 0233 +243 -39.115921 -2.709978 6.000000 5.000000 MEG 0243 +242 -39.115921 2.290022 6.000000 5.000000 MEG 0242 +313 -26.608879 38.351933 6.000000 5.000000 MEG 0313 +312 -26.608879 43.351933 6.000000 5.000000 MEG 0312 +322 -25.469093 26.356115 6.000000 5.000000 MEG 0322 +323 -25.469093 31.356115 6.000000 5.000000 MEG 0323 +333 -18.837411 23.164780 6.000000 5.000000 MEG 0333 +332 -18.837411 28.164780 6.000000 5.000000 MEG 0332 +343 -32.957949 27.427811 6.000000 5.000000 MEG 0343 +342 -32.957949 32.427811 6.000000 5.000000 MEG 0342 +413 -22.250046 12.315103 6.000000 5.000000 MEG 0413 +412 -22.250046 17.315103 6.000000 5.000000 MEG 0412 +422 -14.605187 10.962016 6.000000 5.000000 MEG 0422 +423 -14.605187 15.962016 6.000000 5.000000 MEG 0423 +432 -15.148193 -0.524500 6.000000 5.000000 MEG 0432 +433 -15.148193 4.475500 6.000000 5.000000 MEG 0433 +443 -23.422245 -0.157884 6.000000 5.000000 MEG 0443 +442 -23.422245 4.842116 6.000000 5.000000 MEG 0442 +513 -18.953902 44.905155 6.000000 5.000000 MEG 0513 +512 -18.953902 49.905155 6.000000 5.000000 MEG 0512 +523 -11.025696 48.325344 6.000000 5.000000 MEG 0523 +522 -11.025696 53.325344 6.000000 5.000000 MEG 0522 +532 -10.454178 39.888676 6.000000 5.000000 MEG 0532 +533 -10.454178 44.888676 6.000000 5.000000 MEG 0533 +542 -18.555386 35.433716 6.000000 5.000000 MEG 0542 +543 -18.555386 40.433716 6.000000 5.000000 MEG 0543 +613 -10.560901 30.757313 6.000000 5.000000 MEG 0613 +612 -10.560901 35.757313 6.000000 5.000000 MEG 0612 +622 -2.979000 21.849854 6.000000 5.000000 MEG 0622 +623 -2.979000 26.849854 6.000000 5.000000 MEG 0623 +633 -6.911079 11.573471 6.000000 5.000000 MEG 0633 +632 -6.911079 16.573471 6.000000 5.000000 MEG 0632 +642 -10.828249 21.334785 6.000000 5.000000 MEG 0642 +643 -10.828249 26.334785 6.000000 5.000000 MEG 0643 +713 -7.008664 0.931329 6.000000 5.000000 MEG 0713 +712 -7.008664 5.931329 6.000000 5.000000 MEG 0712 +723 1.052102 0.833180 6.000000 5.000000 MEG 0723 +722 1.052102 5.833180 6.000000 5.000000 MEG 0722 +733 1.098721 -8.987786 6.000000 5.000000 MEG 0733 +732 1.098721 -3.987786 6.000000 5.000000 MEG 0732 +743 -7.121645 -8.933109 6.000000 5.000000 MEG 0743 +742 -7.121645 -3.933109 6.000000 5.000000 MEG 0742 +813 -2.975877 49.460842 6.000000 5.000000 MEG 0813 +812 -2.975877 54.460842 6.000000 5.000000 MEG 0812 +822 -2.977890 40.979687 6.000000 5.000000 MEG 0822 +823 -2.977890 45.979687 6.000000 5.000000 MEG 0823 +913 5.024490 48.354298 6.000000 5.000000 MEG 0913 +912 5.024490 53.354298 6.000000 5.000000 MEG 0912 +923 13.021803 44.879852 6.000000 5.000000 MEG 0923 +922 13.021803 49.879852 6.000000 5.000000 MEG 0922 +932 12.564190 35.455193 6.000000 5.000000 MEG 0932 +933 12.564190 40.455193 6.000000 5.000000 MEG 0933 +942 4.483593 39.929310 6.000000 5.000000 MEG 0942 +943 4.483593 44.929310 6.000000 5.000000 MEG 0943 +1013 -2.978879 32.002693 6.000000 5.000000 MEG 1013 +1012 -2.978879 37.002693 6.000000 5.000000 MEG 1012 +1023 4.540760 30.762428 6.000000 5.000000 MEG 1023 +1022 4.540760 35.762428 6.000000 5.000000 MEG 1022 +1032 4.780051 21.348934 6.000000 5.000000 MEG 1032 +1033 4.780051 26.348934 6.000000 5.000000 MEG 1033 +1043 0.978956 11.650963 6.000000 5.000000 MEG 1043 +1042 0.978956 16.650963 6.000000 5.000000 MEG 1042 +1112 8.560405 10.928195 6.000000 5.000000 MEG 1112 +1113 8.560405 15.928195 6.000000 5.000000 MEG 1113 +1123 16.224724 12.278107 6.000000 5.000000 MEG 1123 +1122 16.224724 17.278107 6.000000 5.000000 MEG 1122 +1133 17.379185 -0.268703 6.000000 5.000000 MEG 1133 +1132 17.379185 4.731297 6.000000 5.000000 MEG 1132 +1142 9.117422 -0.423700 6.000000 5.000000 MEG 1142 +1143 9.117422 4.576300 6.000000 5.000000 MEG 1143 +1213 20.716938 38.318100 6.000000 5.000000 MEG 1213 +1212 20.716938 43.318100 6.000000 5.000000 MEG 1212 +1223 27.111319 27.293877 6.000000 5.000000 MEG 1223 +1222 27.111319 32.293877 6.000000 5.000000 MEG 1222 +1232 19.469093 26.356115 6.000000 5.000000 MEG 1232 +1233 19.469093 31.356115 6.000000 5.000000 MEG 1233 +1243 12.786146 23.189396 6.000000 5.000000 MEG 1243 +1242 12.786146 28.189396 6.000000 5.000000 MEG 1242 +1312 23.695013 13.899529 6.000000 5.000000 MEG 1312 +1313 23.695013 18.899529 6.000000 5.000000 MEG 1313 +1323 31.369019 13.362624 6.000000 5.000000 MEG 1323 +1322 31.369019 18.362624 6.000000 5.000000 MEG 1322 +1333 33.205658 -2.836478 6.000000 5.000000 MEG 1333 +1332 33.205658 2.163522 6.000000 5.000000 MEG 1332 +1342 25.473745 -0.631941 6.000000 5.000000 MEG 1342 +1343 25.473745 4.368059 6.000000 5.000000 MEG 1343 +1412 33.387833 31.097027 6.000000 5.000000 MEG 1412 +1413 33.387833 36.097027 6.000000 5.000000 MEG 1413 +1423 41.996334 27.235786 6.000000 5.000000 MEG 1423 +1422 41.996334 32.235786 6.000000 5.000000 MEG 1422 +1433 46.693424 6.365705 6.000000 5.000000 MEG 1433 +1432 46.693424 11.365705 6.000000 5.000000 MEG 1432 +1442 38.636284 14.732794 6.000000 5.000000 MEG 1442 +1443 38.636284 19.732794 6.000000 5.000000 MEG 1443 +1512 -46.828197 -4.270524 6.000000 5.000000 MEG 1512 +1513 -46.828197 0.729476 6.000000 5.000000 MEG 1513 +1522 -44.250233 -20.875282 6.000000 5.000000 MEG 1522 +1523 -44.250233 -15.875282 6.000000 5.000000 MEG 1523 +1533 -47.087372 -32.702410 6.000000 5.000000 MEG 1533 +1532 -47.087372 -27.702410 6.000000 5.000000 MEG 1532 +1543 -52.352669 -14.466389 6.000000 5.000000 MEG 1543 +1542 -52.352669 -9.466389 6.000000 5.000000 MEG 1542 +1613 -37.598797 -16.787832 6.000000 5.000000 MEG 1613 +1612 -37.598797 -11.787832 6.000000 5.000000 MEG 1612 +1622 -30.357292 -13.585911 6.000000 5.000000 MEG 1622 +1623 -30.357292 -8.585911 6.000000 5.000000 MEG 1623 +1632 -25.393221 -25.022747 6.000000 5.000000 MEG 1632 +1633 -25.393221 -20.022747 6.000000 5.000000 MEG 1633 +1643 -32.428291 -29.512911 6.000000 5.000000 MEG 1643 +1642 -32.428291 -24.512911 6.000000 5.000000 MEG 1642 +1713 -37.521523 -47.886852 6.000000 5.000000 MEG 1713 +1712 -37.521523 -42.886852 6.000000 5.000000 MEG 1712 +1722 -37.773560 -35.834789 6.000000 5.000000 MEG 1722 +1723 -37.773560 -30.834789 6.000000 5.000000 MEG 1723 +1732 -27.964468 -47.242935 6.000000 5.000000 MEG 1732 +1733 -27.964468 -42.242935 6.000000 5.000000 MEG 1733 +1743 -25.080088 -58.608849 6.000000 5.000000 MEG 1743 +1742 -25.080088 -53.608849 6.000000 5.000000 MEG 1742 +1813 -22.765453 -11.539077 6.000000 5.000000 MEG 1813 +1812 -22.765453 -6.539077 6.000000 5.000000 MEG 1812 +1822 -14.990439 -10.617317 6.000000 5.000000 MEG 1822 +1823 -14.990439 -5.617317 6.000000 5.000000 MEG 1823 +1832 -7.235366 -18.294876 6.000000 5.000000 MEG 1832 +1833 -7.235366 -13.294876 6.000000 5.000000 MEG 1833 +1843 -16.786220 -22.032574 6.000000 5.000000 MEG 1843 +1842 -16.786220 -17.032574 6.000000 5.000000 MEG 1842 +1912 -17.673892 -33.262066 6.000000 5.000000 MEG 1912 +1913 -17.673892 -28.262066 6.000000 5.000000 MEG 1913 +1923 -14.238597 -45.699379 6.000000 5.000000 MEG 1923 +1922 -14.238597 -40.699379 6.000000 5.000000 MEG 1922 +1932 -16.109179 -54.539486 6.000000 5.000000 MEG 1932 +1933 -16.109179 -49.539486 6.000000 5.000000 MEG 1933 +1943 -24.328934 -39.353901 6.000000 5.000000 MEG 1943 +1942 -24.328934 -34.353901 6.000000 5.000000 MEG 1942 +2013 -7.775570 -28.014633 6.000000 5.000000 MEG 2013 +2012 -7.775570 -23.014633 6.000000 5.000000 MEG 2012 +2023 1.721470 -27.999788 6.000000 5.000000 MEG 2023 +2022 1.721470 -22.999788 6.000000 5.000000 MEG 2022 +2032 1.906072 -37.626270 6.000000 5.000000 MEG 2032 +2033 1.906072 -32.626270 6.000000 5.000000 MEG 2033 +2042 -7.905933 -37.589039 6.000000 5.000000 MEG 2042 +2043 -7.905933 -32.589039 6.000000 5.000000 MEG 2043 +2113 -3.033595 -47.732231 6.000000 5.000000 MEG 2113 +2112 -3.033595 -42.732231 6.000000 5.000000 MEG 2112 +2122 -2.999999 -56.934807 6.000000 5.000000 MEG 2122 +2123 -2.999999 -51.934807 6.000000 5.000000 MEG 2123 +2133 4.648282 -64.218044 6.000000 5.000000 MEG 2133 +2132 4.648282 -59.218044 6.000000 5.000000 MEG 2132 +2143 -10.692250 -64.207703 6.000000 5.000000 MEG 2143 +2142 -10.692250 -59.207703 6.000000 5.000000 MEG 2142 +2212 8.962435 -10.585071 6.000000 5.000000 MEG 2212 +2213 8.962435 -5.585071 6.000000 5.000000 MEG 2213 +2223 16.756042 -11.459877 6.000000 5.000000 MEG 2223 +2222 16.756042 -6.459877 6.000000 5.000000 MEG 2222 +2233 10.797766 -22.111992 6.000000 5.000000 MEG 2233 +2232 10.797766 -17.111992 6.000000 5.000000 MEG 2232 +2242 1.262053 -18.307052 6.000000 5.000000 MEG 2242 +2243 1.262053 -13.307052 6.000000 5.000000 MEG 2243 +2312 11.664891 -33.279053 6.000000 5.000000 MEG 2312 +2313 11.664891 -28.279053 6.000000 5.000000 MEG 2313 +2323 18.227104 -39.378311 6.000000 5.000000 MEG 2323 +2322 18.227104 -34.378311 6.000000 5.000000 MEG 2322 +2332 10.067341 -54.531059 6.000000 5.000000 MEG 2332 +2333 10.067341 -49.531059 6.000000 5.000000 MEG 2333 +2343 8.120804 -45.719460 6.000000 5.000000 MEG 2343 +2342 8.120804 -40.719460 6.000000 5.000000 MEG 2342 +2412 24.365654 -13.469363 6.000000 5.000000 MEG 2412 +2413 24.365654 -8.469363 6.000000 5.000000 MEG 2413 +2423 31.531933 -16.859812 6.000000 5.000000 MEG 2423 +2422 31.531933 -11.859812 6.000000 5.000000 MEG 2422 +2433 26.322470 -29.595119 6.000000 5.000000 MEG 2433 +2432 26.322470 -24.595119 6.000000 5.000000 MEG 2432 +2442 19.393225 -25.022739 6.000000 5.000000 MEG 2442 +2443 19.393225 -20.022739 6.000000 5.000000 MEG 2443 +2512 21.906504 -47.260071 6.000000 5.000000 MEG 2512 +2513 21.906504 -42.260071 6.000000 5.000000 MEG 2513 +2522 31.762718 -35.773750 6.000000 5.000000 MEG 2522 +2523 31.762718 -30.773750 6.000000 5.000000 MEG 2523 +2533 31.462860 -47.927265 6.000000 5.000000 MEG 2533 +2532 31.462860 -42.927265 6.000000 5.000000 MEG 2532 +2543 19.023640 -58.637577 6.000000 5.000000 MEG 2543 +2542 19.023640 -53.637577 6.000000 5.000000 MEG 2542 +2612 40.724506 -4.266347 6.000000 5.000000 MEG 2612 +2613 40.724506 0.733653 6.000000 5.000000 MEG 2613 +2623 46.297695 -14.395032 6.000000 5.000000 MEG 2623 +2622 46.297695 -9.395032 6.000000 5.000000 MEG 2622 +2633 40.950874 -32.847042 6.000000 5.000000 MEG 2633 +2632 40.950874 -27.847042 6.000000 5.000000 MEG 2632 +2642 38.210819 -20.857738 6.000000 5.000000 MEG 2642 +2643 38.210819 -15.857738 6.000000 5.000000 MEG 2643 diff --git a/mne/channels/data/layouts/Vectorview-grad_norm.lout b/mne/channels/data/layouts/Vectorview-grad_norm.lout new file mode 100644 index 0000000..d06ce01 --- /dev/null +++ b/mne/channels/data/layouts/Vectorview-grad_norm.lout @@ -0,0 +1,103 @@ +-50.000000 50.000000 -50.000000 38.000000 +11 -41.408840 17.090919 6.000000 5.000000 MEG 011X +12 -33.873951 19.857674 6.000000 5.000000 MEG 012X +13 -38.464523 9.051075 6.000000 5.000000 MEG 013X +14 -45.317917 3.279520 6.000000 5.000000 MEG 014X +21 -32.233719 8.146864 6.000000 5.000000 MEG 021X +22 -25.690760 8.433022 6.000000 5.000000 MEG 022X +23 -27.227139 -1.254610 6.000000 5.000000 MEG 023X +24 -33.698534 -2.642785 6.000000 5.000000 MEG 024X +31 -23.067547 24.734621 6.000000 5.000000 MEG 031X +32 -22.098728 16.737410 6.000000 5.000000 MEG 032X +33 -16.461800 14.609854 6.000000 5.000000 MEG 033X +34 -28.464256 17.451874 6.000000 5.000000 MEG 034X +41 -19.362539 7.376735 6.000000 5.000000 MEG 041X +42 -12.864409 6.474677 6.000000 5.000000 MEG 042X +43 -13.325964 -1.183000 6.000000 5.000000 MEG 043X +44 -20.358908 -0.938589 6.000000 5.000000 MEG 044X +51 -16.560817 29.103437 6.000000 5.000000 MEG 051X +52 -9.821842 31.383564 6.000000 5.000000 MEG 052X +53 -9.336051 25.759117 6.000000 5.000000 MEG 053X +54 -16.222077 22.789145 6.000000 5.000000 MEG 054X +61 -9.426766 19.671541 6.000000 5.000000 MEG 061X +62 -2.982150 13.733236 6.000000 5.000000 MEG 062X +63 -6.324418 6.882314 6.000000 5.000000 MEG 063X +64 -9.654012 13.389857 6.000000 5.000000 MEG 064X +71 -6.407364 -0.212448 6.000000 5.000000 MEG 071X +72 0.444286 -0.277880 6.000000 5.000000 MEG 072X +73 0.483912 -6.911695 6.000000 5.000000 MEG 073X +74 -6.503398 -6.874514 6.000000 5.000000 MEG 074X +81 -2.979496 32.140564 6.000000 5.000000 MEG 081X +82 -2.981206 26.486458 6.000000 5.000000 MEG 082X +91 3.820817 31.402866 6.000000 5.000000 MEG 091X +92 10.618533 29.086569 6.000000 5.000000 MEG 092X +93 10.229562 22.803463 6.000000 5.000000 MEG 093X +94 3.361053 25.786205 6.000000 5.000000 MEG 094X +101 -2.982047 20.501795 6.000000 5.000000 MEG 101X +102 3.409646 19.674952 6.000000 5.000000 MEG 102X +103 3.613043 13.399289 6.000000 5.000000 MEG 103X +104 0.382112 6.933975 6.000000 5.000000 MEG 104X +111 6.826344 6.452130 6.000000 5.000000 MEG 111X +112 13.341015 7.352071 6.000000 5.000000 MEG 112X +113 14.322306 -1.012468 6.000000 5.000000 MEG 113X +114 7.299809 -1.115800 6.000000 5.000000 MEG 114X +121 17.159397 24.712067 6.000000 5.000000 MEG 121X +122 22.594622 17.362583 6.000000 5.000000 MEG 122X +123 16.098728 16.737411 6.000000 5.000000 MEG 123X +124 10.418224 14.626265 6.000000 5.000000 MEG 124X +131 19.690762 8.433019 6.000000 5.000000 MEG 131X +132 26.213667 8.075083 6.000000 5.000000 MEG 132X +133 27.774809 -2.728805 6.000000 5.000000 MEG 133X +134 21.202684 -1.254627 6.000000 5.000000 MEG 134X +141 27.929657 19.898018 6.000000 5.000000 MEG 141X +142 35.246883 17.323858 6.000000 5.000000 MEG 142X +143 39.239410 3.410470 6.000000 5.000000 MEG 143X +144 32.390839 8.988529 6.000000 5.000000 MEG 144X +151 -40.253967 -3.703956 6.000000 5.000000 MEG 151X +152 -38.062698 -14.995193 6.000000 5.000000 MEG 152X +153 -40.474266 -23.037640 6.000000 5.000000 MEG 153X +154 -44.949768 -10.637144 6.000000 5.000000 MEG 154X +161 -32.408976 -12.215726 6.000000 5.000000 MEG 161X +162 -26.253698 -10.038419 6.000000 5.000000 MEG 162X +163 -22.034237 -17.815468 6.000000 5.000000 MEG 163X +164 -28.014048 -20.868780 6.000000 5.000000 MEG 164X +171 -32.343294 -33.363060 6.000000 5.000000 MEG 171X +172 -32.557526 -25.167658 6.000000 5.000000 MEG 172X +173 -24.219797 -32.925196 6.000000 5.000000 MEG 173X +174 -21.768074 -40.654018 6.000000 5.000000 MEG 174X +181 -19.800634 -8.646573 6.000000 5.000000 MEG 181X +182 -13.191874 -8.019776 6.000000 5.000000 MEG 182X +183 -6.600061 -13.240516 6.000000 5.000000 MEG 183X +184 -14.718287 -15.782150 6.000000 5.000000 MEG 184X +191 -15.472808 -23.418205 6.000000 5.000000 MEG 191X +192 -12.552808 -31.875578 6.000000 5.000000 MEG 192X +193 -14.142802 -37.886852 6.000000 5.000000 MEG 193X +194 -21.129593 -27.560652 6.000000 5.000000 MEG 194X +201 -7.059234 -19.849951 6.000000 5.000000 MEG 201X +202 1.013249 -19.839857 6.000000 5.000000 MEG 202X +203 1.170161 -26.385864 6.000000 5.000000 MEG 203X +204 -7.170043 -26.360546 6.000000 5.000000 MEG 204X +211 -3.028555 -33.257917 6.000000 5.000000 MEG 211X +212 -3.000000 -39.515667 6.000000 5.000000 MEG 212X +213 3.501040 -44.468269 6.000000 5.000000 MEG 213X +214 -9.538412 -44.461239 6.000000 5.000000 MEG 214X +221 7.168070 -7.997848 6.000000 5.000000 MEG 221X +222 13.792637 -8.592716 6.000000 5.000000 MEG 222X +223 8.728101 -15.836154 6.000000 5.000000 MEG 223X +224 0.622745 -13.248796 6.000000 5.000000 MEG 224X +231 9.465158 -23.429756 6.000000 5.000000 MEG 231X +232 15.043037 -27.577251 6.000000 5.000000 MEG 232X +233 8.107240 -37.881119 6.000000 5.000000 MEG 233X +234 6.452683 -31.889233 6.000000 5.000000 MEG 234X +241 20.260805 -9.959167 6.000000 5.000000 MEG 241X +242 26.352144 -12.264672 6.000000 5.000000 MEG 242X +243 21.924099 -20.924681 6.000000 5.000000 MEG 243X +244 16.034241 -17.815463 6.000000 5.000000 MEG 244X +251 18.170528 -32.936850 6.000000 5.000000 MEG 251X +252 26.548311 -25.126150 6.000000 5.000000 MEG 252X +253 26.293430 -33.390539 6.000000 5.000000 MEG 253X +254 15.720093 -40.673553 6.000000 5.000000 MEG 254X +261 34.165833 -3.701116 6.000000 5.000000 MEG 261X +262 38.903042 -10.588621 6.000000 5.000000 MEG 262X +263 34.358242 -23.135988 6.000000 5.000000 MEG 263X +264 32.029198 -14.983262 6.000000 5.000000 MEG 264X diff --git a/mne/channels/data/layouts/Vectorview-mag.lout b/mne/channels/data/layouts/Vectorview-mag.lout new file mode 100644 index 0000000..c5f4c60 --- /dev/null +++ b/mne/channels/data/layouts/Vectorview-mag.lout @@ -0,0 +1,103 @@ +-50.000000 50.000000 -50.000000 38.000000 +111 -41.408840 17.090919 6.000000 5.000000 MEG 0111 +121 -33.873951 19.857674 6.000000 5.000000 MEG 0121 +131 -38.464523 9.051075 6.000000 5.000000 MEG 0131 +141 -45.317917 3.279520 6.000000 5.000000 MEG 0141 +211 -32.233719 8.146864 6.000000 5.000000 MEG 0211 +221 -25.690760 8.433022 6.000000 5.000000 MEG 0221 +231 -27.227139 -1.254610 6.000000 5.000000 MEG 0231 +241 -33.698534 -2.642785 6.000000 5.000000 MEG 0241 +311 -23.067547 24.734621 6.000000 5.000000 MEG 0311 +321 -22.098728 16.737410 6.000000 5.000000 MEG 0321 +331 -16.461800 14.609854 6.000000 5.000000 MEG 0331 +341 -28.464256 17.451874 6.000000 5.000000 MEG 0341 +411 -19.362539 7.376735 6.000000 5.000000 MEG 0411 +421 -12.864409 6.474677 6.000000 5.000000 MEG 0421 +431 -13.325964 -1.183000 6.000000 5.000000 MEG 0431 +441 -20.358908 -0.938589 6.000000 5.000000 MEG 0441 +511 -16.560817 29.103437 6.000000 5.000000 MEG 0511 +521 -9.821842 31.383564 6.000000 5.000000 MEG 0521 +531 -9.336051 25.759117 6.000000 5.000000 MEG 0531 +541 -16.222077 22.789145 6.000000 5.000000 MEG 0541 +611 -9.426766 19.671541 6.000000 5.000000 MEG 0611 +621 -2.982150 13.733236 6.000000 5.000000 MEG 0621 +631 -6.324418 6.882314 6.000000 5.000000 MEG 0631 +641 -9.654012 13.389857 6.000000 5.000000 MEG 0641 +711 -6.407364 -0.212448 6.000000 5.000000 MEG 0711 +721 0.444286 -0.277880 6.000000 5.000000 MEG 0721 +731 0.483912 -6.911695 6.000000 5.000000 MEG 0731 +741 -6.503398 -6.874514 6.000000 5.000000 MEG 0741 +811 -2.979496 32.140564 6.000000 5.000000 MEG 0811 +821 -2.981206 26.486458 6.000000 5.000000 MEG 0821 +911 3.820817 31.402866 6.000000 5.000000 MEG 0911 +921 10.618533 29.086569 6.000000 5.000000 MEG 0921 +931 10.229562 22.803463 6.000000 5.000000 MEG 0931 +941 3.361053 25.786205 6.000000 5.000000 MEG 0941 +1011 -2.982047 20.501795 6.000000 5.000000 MEG 1011 +1021 3.409646 19.674952 6.000000 5.000000 MEG 1021 +1031 3.613043 13.399289 6.000000 5.000000 MEG 1031 +1041 0.382112 6.933975 6.000000 5.000000 MEG 1041 +1111 6.826344 6.452130 6.000000 5.000000 MEG 1111 +1121 13.341015 7.352071 6.000000 5.000000 MEG 1121 +1131 14.322306 -1.012468 6.000000 5.000000 MEG 1131 +1141 7.299809 -1.115800 6.000000 5.000000 MEG 1141 +1211 17.159397 24.712067 6.000000 5.000000 MEG 1211 +1221 22.594622 17.362583 6.000000 5.000000 MEG 1221 +1231 16.098728 16.737411 6.000000 5.000000 MEG 1231 +1241 10.418224 14.626265 6.000000 5.000000 MEG 1241 +1311 19.690762 8.433019 6.000000 5.000000 MEG 1311 +1321 26.213667 8.075083 6.000000 5.000000 MEG 1321 +1331 27.774809 -2.728805 6.000000 5.000000 MEG 1331 +1341 21.202684 -1.254627 6.000000 5.000000 MEG 1341 +1411 27.929657 19.898018 6.000000 5.000000 MEG 1411 +1421 35.246883 17.323858 6.000000 5.000000 MEG 1421 +1431 39.239410 3.410470 6.000000 5.000000 MEG 1431 +1441 32.390839 8.988529 6.000000 5.000000 MEG 1441 +1511 -40.253967 -3.703956 6.000000 5.000000 MEG 1511 +1521 -38.062698 -14.995193 6.000000 5.000000 MEG 1521 +1531 -40.474266 -23.037640 6.000000 5.000000 MEG 1531 +1541 -44.949768 -10.637144 6.000000 5.000000 MEG 1541 +1611 -32.408976 -12.215726 6.000000 5.000000 MEG 1611 +1621 -26.253698 -10.038419 6.000000 5.000000 MEG 1621 +1631 -22.034237 -17.815468 6.000000 5.000000 MEG 1631 +1641 -28.014048 -20.868780 6.000000 5.000000 MEG 1641 +1711 -32.343294 -33.363060 6.000000 5.000000 MEG 1711 +1721 -32.557526 -25.167658 6.000000 5.000000 MEG 1721 +1731 -24.219797 -32.925196 6.000000 5.000000 MEG 1731 +1741 -21.768074 -40.654018 6.000000 5.000000 MEG 1741 +1811 -19.800634 -8.646573 6.000000 5.000000 MEG 1811 +1821 -13.191874 -8.019776 6.000000 5.000000 MEG 1821 +1831 -6.600061 -13.240516 6.000000 5.000000 MEG 1831 +1841 -14.718287 -15.782150 6.000000 5.000000 MEG 1841 +1911 -15.472808 -23.418205 6.000000 5.000000 MEG 1911 +1921 -12.552808 -31.875578 6.000000 5.000000 MEG 1921 +1931 -14.142802 -37.886852 6.000000 5.000000 MEG 1931 +1941 -21.129593 -27.560652 6.000000 5.000000 MEG 1941 +2011 -7.059234 -19.849951 6.000000 5.000000 MEG 2011 +2021 1.013249 -19.839857 6.000000 5.000000 MEG 2021 +2031 1.170161 -26.385864 6.000000 5.000000 MEG 2031 +2041 -7.170043 -26.360546 6.000000 5.000000 MEG 2041 +2111 -3.028555 -33.257917 6.000000 5.000000 MEG 2111 +2121 -3.000000 -39.515667 6.000000 5.000000 MEG 2121 +2131 3.501040 -44.468269 6.000000 5.000000 MEG 2131 +2141 -9.538412 -44.461239 6.000000 5.000000 MEG 2141 +2211 7.168070 -7.997848 6.000000 5.000000 MEG 2211 +2221 13.792637 -8.592716 6.000000 5.000000 MEG 2221 +2231 8.728101 -15.836154 6.000000 5.000000 MEG 2231 +2241 0.622745 -13.248796 6.000000 5.000000 MEG 2241 +2311 9.465158 -23.429756 6.000000 5.000000 MEG 2311 +2321 15.043037 -27.577251 6.000000 5.000000 MEG 2321 +2331 8.107240 -37.881119 6.000000 5.000000 MEG 2331 +2341 6.452683 -31.889233 6.000000 5.000000 MEG 2341 +2411 20.260805 -9.959167 6.000000 5.000000 MEG 2411 +2421 26.352144 -12.264672 6.000000 5.000000 MEG 2421 +2431 21.924099 -20.924681 6.000000 5.000000 MEG 2431 +2441 16.034241 -17.815463 6.000000 5.000000 MEG 2441 +2511 18.170528 -32.936850 6.000000 5.000000 MEG 2511 +2521 26.548311 -25.126150 6.000000 5.000000 MEG 2521 +2531 26.293430 -33.390539 6.000000 5.000000 MEG 2531 +2541 15.720093 -40.673553 6.000000 5.000000 MEG 2541 +2611 34.165833 -3.701116 6.000000 5.000000 MEG 2611 +2621 38.903042 -10.588621 6.000000 5.000000 MEG 2621 +2631 34.358242 -23.135988 6.000000 5.000000 MEG 2631 +2641 32.029198 -14.983262 6.000000 5.000000 MEG 2641 diff --git a/mne/channels/data/layouts/biosemi.lay b/mne/channels/data/layouts/biosemi.lay new file mode 100644 index 0000000..ca74816 --- /dev/null +++ b/mne/channels/data/layouts/biosemi.lay @@ -0,0 +1,64 @@ +1 -0.496189 1.527114 0.290000 0.230000 Fp1 +2 -0.943808 1.299041 0.290000 0.230000 AF7 +3 -0.545830 1.170536 0.290000 0.230000 AF3 +4 -0.326906 0.809121 0.290000 0.230000 F1 +5 -0.659023 0.813825 0.290000 0.230000 F3 +6 -0.987913 0.858779 0.290000 0.230000 F5 +7 -1.299041 0.943808 0.290000 0.230000 F7 +8 -1.527114 0.496189 0.290000 0.230000 FT7 +9 -1.173172 0.450338 0.290000 0.230000 FC5 +10 -0.770517 0.409691 0.290000 0.230000 FC3 +11 -0.394923 0.394923 0.290000 0.230000 FC1 +12 -0.401426 -0.000000 0.290000 0.230000 C1 +13 -0.802851 -0.000000 0.290000 0.230000 C3 +14 -1.204277 -0.000000 0.290000 0.230000 C5 +15 -1.605703 -0.000000 0.290000 0.230000 T7 +16 -1.527114 -0.496189 0.290000 0.230000 TP7 +17 -1.173172 -0.450338 0.290000 0.230000 CP5 +18 -0.770517 -0.409691 0.290000 0.230000 CP3 +19 -0.394923 -0.394923 0.290000 0.230000 CP1 +20 -0.326906 -0.809121 0.290000 0.230000 P1 +21 -0.659023 -0.813825 0.290000 0.230000 P3 +22 -0.987913 -0.858779 0.290000 0.230000 P5 +23 -1.299041 -0.943808 0.290000 0.230000 P7 +24 -1.537550 -1.290157 0.290000 0.230000 P9 +25 -0.943808 -1.299041 0.290000 0.230000 PO7 +26 -0.545830 -1.170536 0.290000 0.230000 PO3 +27 -0.496189 -1.527114 0.290000 0.230000 O1 +28 0.000000 -2.007129 0.290000 0.230000 Iz +29 0.000000 -1.605703 0.290000 0.230000 Oz +30 0.000000 -1.204277 0.290000 0.230000 POz +31 0.000000 -0.802851 0.290000 0.230000 Pz +32 0.000000 -0.401426 0.290000 0.230000 CPz +33 0.000000 1.605703 0.290000 0.230000 Fpz +34 0.496189 1.527114 0.290000 0.230000 Fp2 +35 0.943808 1.299041 0.290000 0.230000 AF8 +36 0.545830 1.170536 0.290000 0.230000 AF4 +37 0.000000 1.204277 0.290000 0.230000 AFz +38 0.000000 0.802851 0.290000 0.230000 Fz +39 0.326906 0.809121 0.290000 0.230000 F2 +40 0.659023 0.813825 0.290000 0.230000 F4 +41 0.987913 0.858779 0.290000 0.230000 F6 +42 1.299041 0.943808 0.290000 0.230000 F8 +43 1.527114 0.496189 0.290000 0.230000 FT8 +44 1.173172 0.450338 0.290000 0.230000 FC6 +45 0.770517 0.409691 0.290000 0.230000 FC4 +46 0.394923 0.394923 0.290000 0.230000 FC2 +47 0.000000 0.401426 0.290000 0.230000 FCz +48 0.000000 0.000000 0.290000 0.230000 Cz +49 0.401426 0.000000 0.290000 0.230000 C2 +50 0.802851 0.000000 0.290000 0.230000 C4 +51 1.204277 0.000000 0.290000 0.230000 C6 +52 1.605703 0.000000 0.290000 0.230000 T8 +53 1.527114 -0.496189 0.290000 0.230000 TP8 +54 1.173172 -0.450338 0.290000 0.230000 CP6 +55 0.770517 -0.409691 0.290000 0.230000 CP4 +56 0.394923 -0.394923 0.290000 0.230000 CP2 +57 0.326906 -0.809121 0.290000 0.230000 P2 +58 0.659023 -0.813825 0.290000 0.230000 P4 +59 0.987913 -0.858779 0.290000 0.230000 P6 +60 1.299041 -0.943808 0.290000 0.230000 P8 +61 1.537550 -1.290157 0.290000 0.230000 P10 +62 0.943808 -1.299041 0.290000 0.230000 PO8 +63 0.545830 -1.170536 0.290000 0.230000 PO4 +64 0.496189 -1.527114 0.290000 0.230000 O2 \ No newline at end of file diff --git a/mne/channels/data/layouts/magnesWH3600.lout b/mne/channels/data/layouts/magnesWH3600.lout new file mode 100644 index 0000000..577e953 --- /dev/null +++ b/mne/channels/data/layouts/magnesWH3600.lout @@ -0,0 +1,249 @@ + -42.19 43.52 -41.70 28.71 +001 -1.28 -5.13 4.00 3.00 MEG 001 +002 -1.22 -1.43 4.00 3.00 MEG 002 +003 -1.37 2.53 4.00 3.00 MEG 003 +004 -1.36 5.90 4.00 3.00 MEG 004 +005 -1.45 9.27 4.00 3.00 MEG 005 +006 -4.89 9.36 4.00 3.00 MEG 006 +007 -5.20 5.86 4.00 3.00 MEG 007 +008 -5.26 2.40 4.00 3.00 MEG 008 +009 -5.34 -1.29 4.00 3.00 MEG 009 +010 -5.12 -5.08 4.00 3.00 MEG 010 +011 -4.73 -8.47 4.00 3.00 MEG 011 +012 -1.31 -8.81 4.00 3.00 MEG 012 +013 2.04 -8.49 4.00 3.00 MEG 013 +014 2.54 -5.16 4.00 3.00 MEG 014 +015 2.69 -1.43 4.00 3.00 MEG 015 +016 2.62 2.56 4.00 3.00 MEG 016 +017 2.50 5.89 4.00 3.00 MEG 017 +018 2.10 9.34 4.00 3.00 MEG 018 +019 -1.45 12.55 4.00 3.00 MEG 019 +020 -5.76 12.42 4.00 3.00 MEG 020 +021 -8.30 9.98 4.00 3.00 MEG 021 +022 -9.16 5.97 4.00 3.00 MEG 022 +023 -9.32 2.49 4.00 3.00 MEG 023 +024 -9.42 -1.32 4.00 3.00 MEG 024 +025 -9.13 -5.11 4.00 3.00 MEG 025 +026 -8.43 -9.18 4.00 3.00 MEG 026 +027 -5.45 -12.10 4.00 3.00 MEG 027 +028 -1.40 -12.51 4.00 3.00 MEG 028 +029 2.64 -12.08 4.00 3.00 MEG 029 +030 5.77 -9.29 4.00 3.00 MEG 030 +031 6.50 -5.19 4.00 3.00 MEG 031 +032 6.85 -1.37 4.00 3.00 MEG 032 +033 6.70 2.65 4.00 3.00 MEG 033 +034 6.46 6.18 4.00 3.00 MEG 034 +035 5.61 10.08 4.00 3.00 MEG 035 +036 2.95 12.49 4.00 3.00 MEG 036 +037 -1.47 15.77 4.00 3.00 MEG 037 +038 -5.48 15.52 4.00 3.00 MEG 038 +039 -8.97 13.31 4.00 3.00 MEG 039 +040 -11.91 10.42 4.00 3.00 MEG 040 +041 -12.96 6.84 4.00 3.00 MEG 041 +042 -13.39 3.21 4.00 3.00 MEG 042 +043 -13.58 -0.70 4.00 3.00 MEG 043 +044 -13.08 -4.42 4.00 3.00 MEG 044 +045 -12.52 -8.05 4.00 3.00 MEG 045 +046 -11.13 -11.34 4.00 3.00 MEG 046 +047 -8.45 -14.21 4.00 3.00 MEG 047 +048 -5.08 -15.56 4.00 3.00 MEG 048 +049 -1.60 -16.17 4.00 3.00 MEG 049 +050 2.22 -15.61 4.00 3.00 MEG 050 +051 5.63 -14.28 4.00 3.00 MEG 051 +052 8.38 -11.70 4.00 3.00 MEG 052 +053 9.89 -8.24 4.00 3.00 MEG 053 +054 10.43 -4.42 4.00 3.00 MEG 054 +055 10.94 -0.62 4.00 3.00 MEG 055 +056 10.72 3.35 4.00 3.00 MEG 056 +057 10.22 7.01 4.00 3.00 MEG 057 +058 9.04 10.61 4.00 3.00 MEG 058 +059 6.20 13.42 4.00 3.00 MEG 059 +060 2.52 15.65 4.00 3.00 MEG 060 +061 -1.53 18.91 4.00 3.00 MEG 061 +062 -5.68 18.61 4.00 3.00 MEG 062 +063 -9.46 16.89 4.00 3.00 MEG 063 +064 -12.95 14.48 4.00 3.00 MEG 064 +065 -15.67 11.24 4.00 3.00 MEG 065 +066 -17.06 7.05 4.00 3.00 MEG 066 +067 -17.65 3.16 4.00 3.00 MEG 067 +068 -17.98 -1.20 4.00 3.00 MEG 068 +069 -17.13 -5.53 4.00 3.00 MEG 069 +070 -16.60 -9.33 4.00 3.00 MEG 070 +071 -14.32 -12.91 4.00 3.00 MEG 071 +072 -11.85 -15.75 4.00 3.00 MEG 072 +073 -8.78 -17.93 4.00 3.00 MEG 073 +074 -5.30 -19.40 4.00 3.00 MEG 074 +075 -1.58 -19.85 4.00 3.00 MEG 075 +076 2.41 -19.42 4.00 3.00 MEG 076 +077 5.94 -18.13 4.00 3.00 MEG 077 +078 9.16 -15.98 4.00 3.00 MEG 078 +079 11.79 -13.08 4.00 3.00 MEG 079 +080 13.62 -9.59 4.00 3.00 MEG 080 +081 14.57 -5.64 4.00 3.00 MEG 081 +082 15.42 -1.35 4.00 3.00 MEG 082 +083 15.05 3.30 4.00 3.00 MEG 083 +084 14.29 7.20 4.00 3.00 MEG 084 +085 12.81 11.43 4.00 3.00 MEG 085 +086 9.96 14.67 4.00 3.00 MEG 086 +087 6.46 17.06 4.00 3.00 MEG 087 +088 2.60 18.73 4.00 3.00 MEG 088 +089 -1.60 22.21 4.00 3.00 MEG 089 +090 -5.83 21.82 4.00 3.00 MEG 090 +091 -9.75 20.43 4.00 3.00 MEG 091 +092 -13.45 18.45 4.00 3.00 MEG 092 +093 -16.67 15.62 4.00 3.00 MEG 093 +094 -19.33 12.13 4.00 3.00 MEG 094 +095 -20.94 7.82 4.00 3.00 MEG 095 +096 -21.81 3.65 4.00 3.00 MEG 096 +097 -22.23 -1.27 4.00 3.00 MEG 097 +098 -21.14 -5.87 4.00 3.00 MEG 098 +099 -20.30 -9.97 4.00 3.00 MEG 099 +100 -18.46 -13.84 4.00 3.00 MEG 100 +101 -16.07 -17.08 4.00 3.00 MEG 101 +102 -12.88 -19.71 4.00 3.00 MEG 102 +103 -9.34 -21.89 4.00 3.00 MEG 103 +104 -5.64 -23.02 4.00 3.00 MEG 104 +105 -1.72 -23.54 4.00 3.00 MEG 105 +106 2.48 -23.24 4.00 3.00 MEG 106 +107 6.42 -22.00 4.00 3.00 MEG 107 +108 9.86 -20.19 4.00 3.00 MEG 108 +109 13.22 -17.32 4.00 3.00 MEG 109 +110 15.75 -14.15 4.00 3.00 MEG 110 +111 17.67 -10.19 4.00 3.00 MEG 111 +112 18.65 -6.08 4.00 3.00 MEG 112 +113 19.69 -1.27 4.00 3.00 MEG 113 +114 19.27 3.70 4.00 3.00 MEG 114 +115 18.30 8.05 4.00 3.00 MEG 115 +116 16.46 12.48 4.00 3.00 MEG 116 +117 13.74 15.93 4.00 3.00 MEG 117 +118 10.41 18.72 4.00 3.00 MEG 118 +119 6.64 20.69 4.00 3.00 MEG 119 +120 2.67 22.02 4.00 3.00 MEG 120 +121 -1.74 25.41 4.00 3.00 MEG 121 +122 -6.59 24.84 4.00 3.00 MEG 122 +123 -11.16 23.37 4.00 3.00 MEG 123 +124 -15.46 21.07 4.00 3.00 MEG 124 +125 -19.25 17.84 4.00 3.00 MEG 125 +126 -22.45 13.89 4.00 3.00 MEG 126 +127 -24.89 8.96 4.00 3.00 MEG 127 +128 -26.13 4.36 4.00 3.00 MEG 128 +129 -26.65 -1.22 4.00 3.00 MEG 129 +130 -25.30 -6.36 4.00 3.00 MEG 130 +131 -24.16 -11.45 4.00 3.00 MEG 131 +132 -21.98 -15.88 4.00 3.00 MEG 132 +133 -18.81 -19.82 4.00 3.00 MEG 133 +134 -15.20 -22.99 4.00 3.00 MEG 134 +135 -11.11 -25.29 4.00 3.00 MEG 135 +136 -6.51 -26.74 4.00 3.00 MEG 136 +137 -1.86 -27.28 4.00 3.00 MEG 137 +138 3.17 -26.90 4.00 3.00 MEG 138 +139 7.79 -25.55 4.00 3.00 MEG 139 +140 12.07 -23.15 4.00 3.00 MEG 140 +141 15.93 -20.09 4.00 3.00 MEG 141 +142 19.04 -16.25 4.00 3.00 MEG 142 +143 21.39 -11.67 4.00 3.00 MEG 143 +144 22.75 -6.58 4.00 3.00 MEG 144 +145 23.99 -1.23 4.00 3.00 MEG 145 +146 23.36 4.49 4.00 3.00 MEG 146 +147 22.02 9.37 4.00 3.00 MEG 147 +148 19.51 14.31 4.00 3.00 MEG 148 +149 16.20 18.23 4.00 3.00 MEG 149 +150 12.16 21.54 4.00 3.00 MEG 150 +151 7.85 23.69 4.00 3.00 MEG 151 +152 3.16 25.01 4.00 3.00 MEG 152 +153 -23.01 18.82 4.00 3.00 MEG 153 +154 -26.06 15.31 4.00 3.00 MEG 154 +155 -28.76 10.18 4.00 3.00 MEG 155 +156 -31.71 3.39 4.00 3.00 MEG 156 +157 -32.05 -2.89 4.00 3.00 MEG 157 +158 -31.42 -8.67 4.00 3.00 MEG 158 +159 -26.22 -15.24 4.00 3.00 MEG 159 +160 -23.31 -19.72 4.00 3.00 MEG 160 +161 -19.33 -23.66 4.00 3.00 MEG 161 +162 -14.75 -26.73 4.00 3.00 MEG 162 +163 -9.92 -28.91 4.00 3.00 MEG 163 +164 -4.52 -30.10 4.00 3.00 MEG 164 +165 1.25 -30.15 4.00 3.00 MEG 165 +166 6.17 -29.40 4.00 3.00 MEG 166 +167 11.43 -27.39 4.00 3.00 MEG 167 +168 16.20 -24.37 4.00 3.00 MEG 168 +169 20.37 -20.27 4.00 3.00 MEG 169 +170 23.54 -15.56 4.00 3.00 MEG 170 +171 28.66 -8.94 4.00 3.00 MEG 171 +172 29.46 -3.00 4.00 3.00 MEG 172 +173 29.04 3.51 4.00 3.00 MEG 173 +174 25.94 10.77 4.00 3.00 MEG 174 +175 23.08 15.80 4.00 3.00 MEG 175 +176 19.78 19.54 4.00 3.00 MEG 176 +177 -26.70 20.52 4.00 3.00 MEG 177 +178 -29.66 16.81 4.00 3.00 MEG 178 +179 -32.55 11.68 4.00 3.00 MEG 179 +180 -32.47 -13.23 4.00 3.00 MEG 180 +181 -27.63 -19.12 4.00 3.00 MEG 181 +182 -23.75 -23.89 4.00 3.00 MEG 182 +183 -18.94 -27.77 4.00 3.00 MEG 183 +184 -13.64 -30.59 4.00 3.00 MEG 184 +185 -7.93 -32.70 4.00 3.00 MEG 185 +186 -2.12 -33.31 4.00 3.00 MEG 186 +187 4.06 -32.74 4.00 3.00 MEG 187 +188 10.04 -31.14 4.00 3.00 MEG 188 +189 15.57 -28.41 4.00 3.00 MEG 189 +190 20.44 -24.69 4.00 3.00 MEG 190 +191 24.62 -19.81 4.00 3.00 MEG 191 +192 29.49 -13.87 4.00 3.00 MEG 192 +193 29.48 12.54 4.00 3.00 MEG 193 +194 26.49 17.54 4.00 3.00 MEG 194 +195 23.28 21.40 4.00 3.00 MEG 195 +196 -36.84 4.15 4.00 3.00 MEG 196 +197 -37.22 -3.16 4.00 3.00 MEG 197 +198 -36.14 -9.68 4.00 3.00 MEG 198 +199 -28.42 -23.63 4.00 3.00 MEG 199 +200 -23.68 -28.05 4.00 3.00 MEG 200 +201 -18.03 -31.89 4.00 3.00 MEG 201 +202 -11.97 -34.42 4.00 3.00 MEG 202 +203 -5.32 -35.88 4.00 3.00 MEG 203 +204 1.03 -36.08 4.00 3.00 MEG 204 +205 7.92 -35.00 4.00 3.00 MEG 205 +206 13.99 -32.64 4.00 3.00 MEG 206 +207 19.78 -29.06 4.00 3.00 MEG 207 +208 24.79 -24.52 4.00 3.00 MEG 208 +209 33.39 -10.13 4.00 3.00 MEG 209 +210 34.62 -3.11 4.00 3.00 MEG 210 +211 34.23 4.57 4.00 3.00 MEG 211 +212 -32.38 19.14 4.00 3.00 MEG 212 +213 -35.90 13.21 4.00 3.00 MEG 213 +214 -36.70 -14.70 4.00 3.00 MEG 214 +215 -32.93 -22.44 4.00 3.00 MEG 215 +216 -28.17 -28.07 4.00 3.00 MEG 216 +217 -22.65 -32.41 4.00 3.00 MEG 217 +218 -16.53 -35.71 4.00 3.00 MEG 218 +219 -9.52 -37.92 4.00 3.00 MEG 219 +220 -2.58 -38.82 4.00 3.00 MEG 220 +221 4.65 -38.54 4.00 3.00 MEG 221 +222 11.78 -36.65 4.00 3.00 MEG 222 +223 18.43 -33.60 4.00 3.00 MEG 223 +224 24.26 -29.21 4.00 3.00 MEG 224 +225 29.52 -23.44 4.00 3.00 MEG 225 +226 33.73 -15.36 4.00 3.00 MEG 226 +227 33.02 14.20 4.00 3.00 MEG 227 +228 29.24 19.93 4.00 3.00 MEG 228 +229 -36.80 18.24 4.00 3.00 MEG 229 +230 -40.03 12.76 4.00 3.00 MEG 230 +231 -41.35 5.03 4.00 3.00 MEG 231 +232 -41.79 -3.17 4.00 3.00 MEG 232 +233 -40.48 -10.59 4.00 3.00 MEG 233 +234 -32.92 -26.79 4.00 3.00 MEG 234 +235 -27.40 -32.12 4.00 3.00 MEG 235 +236 -20.92 -36.72 4.00 3.00 MEG 236 +237 -14.11 -39.49 4.00 3.00 MEG 237 +238 -6.76 -41.18 4.00 3.00 MEG 238 +239 1.45 -41.40 4.00 3.00 MEG 239 +240 8.96 -40.25 4.00 3.00 MEG 240 +241 16.27 -37.84 4.00 3.00 MEG 241 +242 22.75 -33.68 4.00 3.00 MEG 242 +243 29.08 -28.20 4.00 3.00 MEG 243 +244 37.59 -11.05 4.00 3.00 MEG 244 +245 39.12 -3.16 4.00 3.00 MEG 245 +246 38.59 5.47 4.00 3.00 MEG 246 +247 37.16 13.60 4.00 3.00 MEG 247 +248 33.62 18.93 4.00 3.00 MEG 248 diff --git a/mne/channels/data/montages/EGI_256.csd b/mne/channels/data/montages/EGI_256.csd new file mode 100644 index 0000000..3da8637 --- /dev/null +++ b/mne/channels/data/montages/EGI_256.csd @@ -0,0 +1,258 @@ +// MatLab Sphere coordinates [degrees] Cartesian coordinates +// Label Theta Phi Radius X Y Z off sphere surface + E1 37.700 -14.000 1.000 0.7677 0.5934 -0.2419 -0.00000000000000011 + E2 44.600 -0.880 1.000 0.7119 0.7021 -0.0154 0.00000000000000000 + E3 51.700 11.000 1.000 0.6084 0.7704 0.1908 0.00000000000000000 + E4 58.200 21.800 1.000 0.4893 0.7891 0.3714 -0.00000000000000011 + E5 64.200 33.600 1.000 0.3625 0.7499 0.5534 0.00000000000000000 + E6 70.800 45.500 1.000 0.2305 0.6619 0.7133 -0.00000000000000022 + E7 77.200 56.700 1.000 0.1216 0.5354 0.8358 0.00000000000000000 + E8 90.000 67.700 1.000 0.0000 0.3795 0.9252 0.00000000000000000 + E9 127.300 78.300 1.000 -0.1229 0.1613 0.9792 0.00000000000000000 + E10 51.200 -9.080 1.000 0.6188 0.7696 -0.1578 0.00000000000000000 + E11 58.800 2.370 1.000 0.5176 0.8546 0.0414 0.00000000000000000 + E12 66.800 14.300 1.000 0.3817 0.8907 0.2470 -0.00000000000000011 + E13 73.800 25.200 1.000 0.2524 0.8689 0.4258 0.00000000000000022 + E14 81.360 36.400 1.000 0.1209 0.7958 0.5934 0.00000000000000022 + E15 90.000 46.900 1.000 0.0000 0.6833 0.7302 0.00000000000000000 + E16 102.800 56.700 1.000 -0.1216 0.5354 0.8358 0.00000000000000000 + E17 128.200 66.600 1.000 -0.2456 0.3121 0.9178 -0.00000000000000011 + E18 66.600 -4.970 1.000 0.3957 0.9143 -0.0866 -0.00000000000000011 + E19 74.000 4.680 1.000 0.2747 0.9581 0.0816 -0.00000000000000011 + E20 81.960 15.700 1.000 0.1346 0.9532 0.2706 0.00000000000000022 + E21 90.000 26.400 1.000 0.0000 0.8957 0.4446 0.00000000000000000 + E22 98.640 36.400 1.000 -0.1209 0.7958 0.5934 0.00000000000000022 + E23 109.200 45.500 1.000 -0.2305 0.6619 0.7133 0.00000000000000000 + E24 127.200 54.200 1.000 -0.3537 0.4659 0.8111 0.00000000000000000 + E25 82.540 -3.260 1.000 0.1296 0.9899 -0.0569 0.00000000000000000 + E26 90.000 5.370 1.000 0.0000 0.9956 0.0936 0.00000000000000000 + E27 98.040 15.700 1.000 -0.1346 0.9532 0.2706 0.00000000000000022 + E28 106.200 25.200 1.000 -0.2524 0.8689 0.4258 0.00000000000000022 + E29 115.800 33.600 1.000 -0.3625 0.7499 0.5534 0.00000000000000000 + E30 128.800 41.200 1.000 -0.4715 0.5864 0.6587 0.00000000000000000 + E31 90.000 -11.000 1.000 0.0000 0.9816 -0.1908 0.00000000000000000 + E32 97.460 -3.260 1.000 -0.1296 0.9899 -0.0569 0.00000000000000000 + E33 106.000 4.680 1.000 -0.2747 0.9581 0.0816 -0.00000000000000011 + E34 113.200 14.300 1.000 -0.3817 0.8907 0.2470 -0.00000000000000022 + E35 121.800 21.800 1.000 -0.4893 0.7891 0.3714 -0.00000000000000011 + E36 128.500 30.200 1.000 -0.5380 0.6764 0.5030 0.00000000000000022 + E37 113.400 -4.970 1.000 -0.3957 0.9143 -0.0866 0.00000000000000000 + E38 121.200 2.370 1.000 -0.5176 0.8546 0.0414 0.00000000000000000 + E39 128.300 11.000 1.000 -0.6084 0.7704 0.1908 0.00000000000000000 + E40 135.300 20.800 1.000 -0.6645 0.6576 0.3551 0.00000000000000000 + E41 140.600 32.000 1.000 -0.6553 0.5383 0.5299 0.00000000000000000 + E42 144.500 44.000 1.000 -0.5856 0.4177 0.6947 0.00000000000000000 + E43 151.000 54.800 1.000 -0.5042 0.2795 0.8171 0.00000000000000000 + E44 163.200 66.400 1.000 -0.3833 0.1157 0.9164 0.00000000000000000 + E45 197.000 77.300 1.000 -0.2102 -0.0643 0.9755 0.00000000000000000 + E46 128.800 -9.080 1.000 -0.6188 0.7696 -0.1578 0.00000000000000000 + E47 135.400 -0.880 1.000 -0.7119 0.7021 -0.0154 -0.00000000000000011 + E48 142.500 8.460 1.000 -0.7847 0.6021 0.1471 0.00000000000000000 + E49 149.200 19.400 1.000 -0.8102 0.4830 0.3322 0.00000000000000000 + E50 155.300 32.200 1.000 -0.7688 0.3536 0.5329 0.00000000000000000 + E51 162.400 44.200 1.000 -0.6834 0.2168 0.6972 0.00000000000000000 + E52 173.500 54.500 1.000 -0.5770 0.0657 0.8141 0.00000000000000000 + E53 197.000 65.600 1.000 -0.3951 -0.1208 0.9107 0.00000000000000000 + E54 142.300 -14.000 1.000 -0.7677 0.5934 -0.2419 0.00000000000000000 + E55 149.100 -4.100 1.000 -0.8559 0.5122 -0.0715 0.00000000000000000 + E56 156.700 7.130 1.000 -0.9113 0.3925 0.1241 0.00000000000000022 + E57 163.200 19.500 1.000 -0.9024 0.2725 0.3338 0.00000000000000000 + E58 169.700 31.600 1.000 -0.8380 0.1523 0.5240 0.00000000000000000 + E59 179.500 43.000 1.000 -0.7313 0.0064 0.6820 0.00000000000000000 + E60 197.000 53.000 1.000 -0.5755 -0.1760 0.7986 0.00000000000000000 + E61 158.000 -17.200 1.000 -0.8857 0.3579 -0.2957 -0.00000000000000022 + E62 165.100 -5.730 1.000 -0.9615 0.2558 -0.0998 0.00000000000000022 + E63 171.400 6.890 1.000 -0.9816 0.1485 0.1200 0.00000000000000022 + E64 177.200 19.000 1.000 -0.9444 0.0462 0.3256 0.00000000000000000 + E65 184.300 31.100 1.000 -0.8539 -0.0642 0.5165 0.00000000000000000 + E66 196.000 39.900 1.000 -0.7374 -0.2115 0.6414 0.00000000000000000 + E67 167.300 -27.900 1.000 -0.8621 0.1943 -0.4679 0.00000000000000000 + E68 172.300 -17.500 1.000 -0.9451 0.1278 -0.3007 0.00000000000000000 + E69 179.500 -6.970 1.000 -0.9926 0.0087 -0.1213 0.00000000000000000 + E70 185.400 5.990 1.000 -0.9901 -0.0936 0.1044 0.00000000000000022 + E71 191.000 18.700 1.000 -0.9298 -0.1807 0.3206 0.00000000000000000 + E72 197.000 28.500 1.000 -0.8404 -0.2569 0.4772 0.00000000000000000 + E73 174.500 -38.200 1.000 -0.7822 0.0753 -0.6184 0.00000000000000022 + E74 193.000 -6.630 1.000 -0.9679 -0.2234 -0.1155 0.00000000000000000 + E75 199.000 7.590 1.000 -0.9372 -0.3227 0.1321 0.00000000000000000 + E76 205.000 19.800 1.000 -0.8527 -0.3976 0.3387 -0.00000000000000011 + E77 209.000 31.900 1.000 -0.7425 -0.4116 0.5284 0.00000000000000000 + E78 214.000 43.600 1.000 -0.6004 -0.4050 0.6896 0.00000000000000000 + E79 221.000 55.600 1.000 -0.4264 -0.3707 0.8251 -0.00000000000000011 + E80 233.000 67.400 1.000 -0.2313 -0.3069 0.9232 0.00000000000000000 + E81 -90.000 78.400 1.000 0.0000 -0.2011 0.9796 -0.00000000000000011 + E82 183.900 -45.800 1.000 -0.6956 -0.0474 -0.7169 0.00000000000000000 + E83 205.000 -15.000 1.000 -0.8754 -0.4082 -0.2588 0.00000000000000000 + E84 206.000 -3.510 1.000 -0.8971 -0.4375 -0.0612 -0.00000000000000022 + E85 213.000 10.000 1.000 -0.8259 -0.5364 0.1736 -0.00000000000000011 + E86 218.000 22.700 1.000 -0.7270 -0.5680 0.3859 0.00000000000000000 + E87 225.000 35.300 1.000 -0.5771 -0.5771 0.5779 0.00000000000000000 + E88 232.000 46.800 1.000 -0.4214 -0.5394 0.7290 -0.00000000000000011 + E89 245.000 56.900 1.000 -0.2308 -0.4949 0.8377 -0.00000000000000011 + E90 -90.000 67.500 1.000 0.0000 -0.3827 0.9239 0.00000000000000000 + E91 195.000 -50.900 1.000 -0.6092 -0.1632 -0.7760 0.00000000000000000 + E92 203.000 -42.700 1.000 -0.6765 -0.2872 -0.6782 -0.00000000000000022 + E93 211.000 -32.500 1.000 -0.7229 -0.4344 -0.5373 0.00000000000000000 + E94 212.000 -23.100 1.000 -0.7801 -0.4874 -0.3923 0.00000000000000000 + E95 216.000 -12.400 1.000 -0.7901 -0.5741 -0.2147 0.00000000000000000 + E96 221.000 0.666 1.000 -0.7547 -0.6560 0.0116 0.00000000000000000 + E97 228.000 12.900 1.000 -0.6522 -0.7244 0.2233 0.00000000000000022 + E98 233.000 24.900 1.000 -0.5459 -0.7244 0.4210 0.00000000000000000 + E99 241.000 36.400 1.000 -0.3902 -0.7040 0.5934 0.00000000000000000 + E100 251.000 46.900 1.000 -0.2225 -0.6460 0.7302 0.00000000000000000 + E101 -90.000 44.200 1.000 0.0000 -0.7169 0.6972 0.00000000000000000 + E102 211.000 -47.800 1.000 -0.5758 -0.3460 -0.7408 0.00000000000000000 + E103 217.000 -39.900 1.000 -0.6127 -0.4617 -0.6414 0.00000000000000000 + E104 223.000 -29.500 1.000 -0.6365 -0.5936 -0.4924 0.00000000000000000 + E105 224.000 -20.500 1.000 -0.6738 -0.6507 -0.3502 0.00000000000000000 + E106 228.000 -8.840 1.000 -0.6612 -0.7343 -0.1537 0.00000000000000022 + E107 235.000 2.900 1.000 -0.5728 -0.8181 0.0506 0.00000000000000000 + E108 242.000 14.600 1.000 -0.4543 -0.8544 0.2521 0.00000000000000000 + E109 248.000 25.700 1.000 -0.3375 -0.8355 0.4337 -0.00000000000000011 + E110 257.000 36.000 1.000 -0.1820 -0.7883 0.5878 0.00000000000000000 + E111 226.000 -43.800 1.000 -0.5014 -0.5192 -0.6921 0.00000000000000000 + E112 230.000 -36.300 1.000 -0.5180 -0.6174 -0.5920 0.00000000000000000 + E113 235.000 -25.900 1.000 -0.5160 -0.7369 -0.4368 -0.00000000000000022 + E114 235.000 -17.500 1.000 -0.5470 -0.7812 -0.3007 0.00000000000000000 + E115 244.000 -6.240 1.000 -0.4358 -0.8935 -0.1087 0.00000000000000000 + E116 251.000 4.850 1.000 -0.3244 -0.9421 0.0845 0.00000000000000000 + E117 258.000 15.500 1.000 -0.2004 -0.9426 0.2672 0.00000000000000000 + E118 263.000 25.200 1.000 -0.1103 -0.8981 0.4258 0.00000000000000000 + E119 -90.000 33.400 1.000 0.0000 -0.8348 0.5505 -0.00000000000000011 + E120 237.000 -41.100 1.000 -0.4104 -0.6320 -0.6574 0.00000000000000022 + E121 242.000 -33.400 1.000 -0.3919 -0.7371 -0.5505 -0.00000000000000022 + E122 247.000 -23.400 1.000 -0.3586 -0.8448 -0.3971 0.00000000000000000 + E123 252.000 -11.200 1.000 -0.3031 -0.9329 -0.1942 0.00000000000000000 + E124 257.000 -3.660 1.000 -0.2245 -0.9724 -0.0638 0.00000000000000022 + E125 264.000 5.580 1.000 -0.1040 -0.9898 0.0972 0.00000000000000000 + E126 -90.000 15.400 1.000 0.0000 -0.9641 0.2656 0.00000000000000000 + E127 -83.000 25.200 1.000 0.1103 -0.8981 0.4258 0.00000000000000000 + E128 -77.000 36.000 1.000 0.1820 -0.7883 0.5878 0.00000000000000000 + E129 -71.000 46.900 1.000 0.2225 -0.6460 0.7302 0.00000000000000000 + E130 -65.000 56.900 1.000 0.2308 -0.4949 0.8377 -0.00000000000000011 + E131 -53.000 67.400 1.000 0.2313 -0.3069 0.9232 0.00000000000000000 + E132 -17.000 77.300 1.000 0.2102 -0.0643 0.9755 0.00000000000000000 + E133 248.000 -36.400 1.000 -0.3015 -0.7463 -0.5934 0.00000000000000022 + E134 253.000 -30.700 1.000 -0.2514 -0.8223 -0.5105 -0.00000000000000011 + E135 258.000 -19.400 1.000 -0.1961 -0.9226 -0.3322 -0.00000000000000011 + E136 265.000 -12.900 1.000 -0.0850 -0.9711 -0.2233 0.00000000000000000 + E137 -90.000 -5.280 1.000 0.0000 -0.9958 -0.0920 0.00000000000000000 + E138 -84.000 5.580 1.000 0.1040 -0.9898 0.0972 -0.00000000000000022 + E139 -78.000 15.500 1.000 0.2004 -0.9426 0.2672 -0.00000000000000011 + E140 -68.000 25.700 1.000 0.3375 -0.8355 0.4337 -0.00000000000000011 + E141 -61.000 36.400 1.000 0.3902 -0.7040 0.5934 0.00000000000000000 + E142 -52.000 46.800 1.000 0.4214 -0.5394 0.7290 0.00000000000000000 + E143 -41.000 55.600 1.000 0.4264 -0.3707 0.8251 0.00000000000000000 + E144 -17.000 65.600 1.000 0.3951 -0.1208 0.9107 0.00000000000000000 + E145 258.000 -35.800 1.000 -0.1686 -0.7933 -0.5850 0.00000000000000000 + E146 264.000 -29.600 1.000 -0.0909 -0.8647 -0.4939 0.00000000000000000 + E147 -90.000 -22.100 1.000 0.0000 -0.9265 -0.3762 0.00000000000000000 + E148 -85.000 -12.900 1.000 0.0850 -0.9711 -0.2233 0.00000000000000000 + E149 -77.000 -3.660 1.000 0.2245 -0.9724 -0.0638 0.00000000000000022 + E150 -71.000 4.850 1.000 0.3244 -0.9421 0.0845 -0.00000000000000022 + E151 -62.000 14.600 1.000 0.4543 -0.8544 0.2521 0.00000000000000000 + E152 -53.000 24.900 1.000 0.5459 -0.7244 0.4210 0.00000000000000000 + E153 -45.000 35.300 1.000 0.5771 -0.5771 0.5779 0.00000000000000000 + E154 -34.000 43.600 1.000 0.6004 -0.4050 0.6896 0.00000000000000000 + E155 -17.000 53.000 1.000 0.5755 -0.1760 0.7986 0.00000000000000000 + E156 -84.000 -29.600 1.000 0.0909 -0.8647 -0.4939 -0.00000000000000011 + E157 -78.000 -19.400 1.000 0.1961 -0.9226 -0.3322 -0.00000000000000022 + E158 -72.000 -11.200 1.000 0.3031 -0.9329 -0.1942 0.00000000000000000 + E159 -64.000 -6.240 1.000 0.4358 -0.8935 -0.1087 0.00000000000000022 + E160 -55.000 2.900 1.000 0.5728 -0.8181 0.0506 0.00000000000000022 + E161 -48.000 12.900 1.000 0.6522 -0.7244 0.2233 0.00000000000000000 + E162 -38.000 22.700 1.000 0.7270 -0.5680 0.3859 0.00000000000000000 + E163 -29.000 31.900 1.000 0.7425 -0.4116 0.5284 0.00000000000000000 + E164 -16.000 39.900 1.000 0.7374 -0.2115 0.6414 0.00000000000000000 + E165 -78.000 -35.800 1.000 0.1686 -0.7933 -0.5850 0.00000000000000000 + E166 -73.000 -30.700 1.000 0.2514 -0.8223 -0.5105 0.00000000000000000 + E167 -67.000 -23.400 1.000 0.3586 -0.8448 -0.3971 0.00000000000000000 + E168 -55.000 -17.500 1.000 0.5470 -0.7812 -0.3007 0.00000000000000000 + E169 -48.000 -8.840 1.000 0.6612 -0.7343 -0.1537 0.00000000000000022 + E170 -41.000 0.666 1.000 0.7547 -0.6560 0.0116 0.00000000000000000 + E171 -33.000 10.000 1.000 0.8259 -0.5364 0.1736 -0.00000000000000011 + E172 -25.000 19.800 1.000 0.8527 -0.3976 0.3387 -0.00000000000000022 + E173 -17.000 28.500 1.000 0.8404 -0.2569 0.4772 0.00000000000000000 + E174 -68.000 -36.400 1.000 0.3015 -0.7463 -0.5934 0.00000000000000000 + E175 -62.000 -33.400 1.000 0.3919 -0.7371 -0.5505 -0.00000000000000011 + E176 -55.000 -25.900 1.000 0.5160 -0.7369 -0.4368 -0.00000000000000022 + E177 -44.000 -20.500 1.000 0.6738 -0.6507 -0.3502 0.00000000000000000 + E178 -36.000 -12.400 1.000 0.7901 -0.5741 -0.2147 0.00000000000000000 + E179 -26.000 -3.510 1.000 0.8971 -0.4375 -0.0612 -0.00000000000000011 + E180 -19.000 7.590 1.000 0.9372 -0.3227 0.1321 0.00000000000000022 + E181 -11.000 18.700 1.000 0.9298 -0.1807 0.3206 0.00000000000000022 + E182 -4.300 31.100 1.000 0.8539 -0.0642 0.5165 0.00000000000000000 + E183 0.500 43.000 1.000 0.7313 0.0064 0.6820 0.00000000000000000 + E184 6.500 54.500 1.000 0.5770 0.0657 0.8141 0.00000000000000000 + E185 16.800 66.400 1.000 0.3833 0.1157 0.9164 0.00000000000000000 + E186 52.700 78.300 1.000 0.1229 0.1613 0.9792 0.00000000000000000 + E187 -57.000 -41.100 1.000 0.4104 -0.6320 -0.6574 0.00000000000000022 + E188 -50.000 -36.300 1.000 0.5180 -0.6174 -0.5920 -0.00000000000000022 + E189 -43.000 -29.500 1.000 0.6365 -0.5936 -0.4924 0.00000000000000000 + E190 -32.000 -23.100 1.000 0.7801 -0.4874 -0.3923 0.00000000000000000 + E191 -25.000 -15.000 1.000 0.8754 -0.4082 -0.2588 0.00000000000000000 + E192 -13.000 -6.630 1.000 0.9679 -0.2234 -0.1155 0.00000000000000000 + E193 -5.400 5.990 1.000 0.9901 -0.0936 0.1044 0.00000000000000022 + E194 2.800 19.000 1.000 0.9444 0.0462 0.3256 0.00000000000000022 + E195 10.300 31.600 1.000 0.8380 0.1523 0.5240 0.00000000000000000 + E196 17.600 44.200 1.000 0.6834 0.2168 0.6972 0.00000000000000000 + E197 29.000 54.800 1.000 0.5042 0.2795 0.8171 0.00000000000000000 + E198 51.800 66.600 1.000 0.2456 0.3121 0.9178 0.00000000000000000 + E199 -46.000 -43.800 1.000 0.5014 -0.5192 -0.6921 0.00000000000000000 + E200 -37.000 -39.900 1.000 0.6127 -0.4617 -0.6414 0.00000000000000000 + E201 -31.000 -32.500 1.000 0.7229 -0.4344 -0.5373 0.00000000000000000 + E202 0.500 -6.970 1.000 0.9926 0.0087 -0.1213 0.00000000000000000 + E203 8.600 6.890 1.000 0.9816 0.1485 0.1200 0.00000000000000044 + E204 16.800 19.500 1.000 0.9024 0.2725 0.3338 0.00000000000000000 + E205 24.700 32.200 1.000 0.7688 0.3536 0.5329 0.00000000000000000 + E206 35.500 44.000 1.000 0.5856 0.4177 0.6947 0.00000000000000000 + E207 52.800 54.200 1.000 0.3537 0.4659 0.8111 0.00000000000000000 + E208 -31.000 -47.800 1.000 0.5758 -0.3460 -0.7408 0.00000000000000000 + E209 -23.000 -42.700 1.000 0.6765 -0.2872 -0.6782 0.00000000000000000 + E210 7.700 -17.500 1.000 0.9451 0.1278 -0.3007 0.00000000000000000 + E211 14.900 -5.730 1.000 0.9615 0.2558 -0.0998 -0.00000000000000011 + E212 23.300 7.130 1.000 0.9113 0.3925 0.1241 0.00000000000000022 + E213 30.800 19.400 1.000 0.8102 0.4830 0.3322 0.00000000000000000 + E214 39.400 32.000 1.000 0.6553 0.5383 0.5299 0.00000000000000000 + E215 51.200 41.200 1.000 0.4715 0.5864 0.6587 0.00000000000000000 + E216 -15.000 -50.900 1.000 0.6092 -0.1632 -0.7760 0.00000000000000000 + E217 -3.900 -45.800 1.000 0.6956 -0.0474 -0.7169 0.00000000000000000 + E218 5.500 -38.200 1.000 0.7822 0.0753 -0.6184 0.00000000000000022 + E219 12.700 -27.900 1.000 0.8621 0.1943 -0.4679 0.00000000000000000 + E220 22.000 -17.200 1.000 0.8857 0.3579 -0.2957 -0.00000000000000022 + E221 30.900 -4.100 1.000 0.8559 0.5122 -0.0715 0.00000000000000000 + E222 37.500 8.460 1.000 0.7847 0.6021 0.1471 0.00000000000000000 + E223 44.700 20.800 1.000 0.6645 0.6576 0.3551 0.00000000000000000 + E224 51.500 30.200 1.000 0.5380 0.6764 0.5030 0.00000000000000000 + E225 23.100 -28.000 1.000 0.8122 0.3464 -0.4695 0.00000000000000000 + E226 33.500 -28.800 1.000 0.7307 0.4837 -0.4818 0.00000000000000000 + E227 18.500 -38.200 1.000 0.7452 0.2494 -0.6184 0.00000000000000000 + E228 10.400 -46.300 1.000 0.6795 0.1247 -0.7230 0.00000000000000000 + E229 -1.200 -53.100 1.000 0.6003 -0.0126 -0.7997 0.00000000000000000 + E230 41.600 -32.900 1.000 0.6279 0.5574 -0.5432 0.00000000000000000 + E231 29.900 -39.600 1.000 0.6680 0.3841 -0.6374 0.00000000000000000 + E232 23.600 -46.600 1.000 0.6296 0.2751 -0.7266 0.00000000000000000 + E233 13.200 -53.300 1.000 0.5818 0.1365 -0.8018 0.00000000000000022 + E234 50.800 -35.100 1.000 0.5171 0.6340 -0.5750 -0.00000000000000011 + E235 40.300 -41.300 1.000 0.5730 0.4859 -0.6600 0.00000000000000022 + E236 34.400 -47.800 1.000 0.5542 0.3795 -0.7408 0.00000000000000000 + E237 26.900 -54.600 1.000 0.5166 0.2621 -0.8151 0.00000000000000000 + E238 60.300 -35.600 1.000 0.4029 0.7063 -0.5821 0.00000000000000022 + E239 47.800 -45.000 1.000 0.4750 0.5238 -0.7071 -0.00000000000000011 + E240 41.600 -50.500 1.000 0.4757 0.4223 -0.7716 0.00000000000000000 + E241 119.700 -35.600 1.000 -0.4029 0.7063 -0.5821 0.00000000000000000 + E242 132.200 -45.000 1.000 -0.4750 0.5238 -0.7071 -0.00000000000000011 + E243 138.400 -50.500 1.000 -0.4757 0.4223 -0.7716 0.00000000000000000 + E244 129.200 -35.100 1.000 -0.5171 0.6340 -0.5750 -0.00000000000000011 + E245 139.700 -41.300 1.000 -0.5730 0.4859 -0.6600 0.00000000000000000 + E246 145.600 -47.800 1.000 -0.5542 0.3795 -0.7408 -0.00000000000000011 + E247 153.100 -54.600 1.000 -0.5166 0.2621 -0.8151 0.00000000000000000 + E248 138.400 -32.900 1.000 -0.6279 0.5574 -0.5432 -0.00000000000000022 + E249 150.100 -39.600 1.000 -0.6680 0.3841 -0.6374 0.00000000000000000 + E250 156.400 -46.600 1.000 -0.6296 0.2751 -0.7266 -0.00000000000000011 + E251 166.800 -53.300 1.000 -0.5818 0.1365 -0.8018 0.00000000000000022 + E252 146.500 -28.800 1.000 -0.7307 0.4837 -0.4818 0.00000000000000000 + E253 156.900 -28.000 1.000 -0.8122 0.3464 -0.4695 0.00000000000000000 + E254 161.500 -38.200 1.000 -0.7452 0.2494 -0.6184 0.00000000000000000 + E255 169.600 -46.300 1.000 -0.6795 0.1247 -0.7230 0.00000000000000000 + E256 181.200 -53.100 1.000 -0.6003 -0.0126 -0.7997 0.00000000000000000 diff --git a/mne/channels/data/montages/GSN-HydroCel-128.sfp b/mne/channels/data/montages/GSN-HydroCel-128.sfp new file mode 100644 index 0000000..56c94f8 --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-128.sfp @@ -0,0 +1,131 @@ +FidNz 0 9.071585155 -2.359754454 +FidT9 -6.711765 0.040402876 -3.251600355 +FidT10 6.711765 0.040402876 -3.251600355 +E1 5.787677636 5.520863216 -2.577468644 +E2 5.291804727 6.709097557 0.307434896 +E3 3.864122447 7.63424051 3.067770143 +E4 2.868837559 7.145708546 4.989564557 +E5 1.479340453 5.68662139 6.812878187 +E6 0 3.806770224 7.891304964 +E7 -1.223800252 1.558864431 8.44043914 +E8 4.221901505 7.998817387 -1.354789681 +E9 2.695405558 8.884820317 1.088308144 +E10 1.830882336 8.708839134 3.18709115 +E11 0 7.96264703 5.044718001 +E12 -1.479340453 5.68662139 6.812878187 +E13 -2.435870762 3.254307219 7.608766206 +E14 1.270447661 9.479016328 -0.947183306 +E15 0 9.087440894 1.333345013 +E16 0 9.076490798 3.105438474 +E17 0 9.271139705 -2.211516434 +E18 -1.830882336 8.708839134 3.18709115 +E19 -2.868837559 7.145708546 4.989564557 +E20 -3.825797111 5.121648995 5.942844877 +E21 -1.270447661 9.479016328 -0.947183306 +E22 -2.695405558 8.884820317 1.088308144 +E23 -3.864122447 7.63424051 3.067770143 +E24 -4.459387187 6.021159964 4.365321482 +E25 -4.221901505 7.998817387 -1.354789681 +E26 -5.291804727 6.709097557 0.307434896 +E27 -5.682547954 5.453384344 2.836565436 +E28 -5.546670402 4.157847823 4.627615703 +E29 -4.762196763 2.697832099 6.297663028 +E30 -3.695490968 0.960411022 7.627828134 +E31 -1.955187826 -0.684381878 8.564858511 +E32 -5.787677636 5.520863216 -2.577468644 +E33 -6.399087198 4.127248875 -0.356852241 +E34 -6.823959684 2.968422112 2.430080351 +E35 -6.414469893 1.490027747 4.741794544 +E36 -5.47913021 0.284948655 6.38332782 +E37 -3.909902609 -1.519049882 7.764134929 +E38 -6.550732888 3.611543152 -3.353155926 +E39 -7.191620108 0.850096251 -0.882936903 +E40 -7.391919265 0.032151584 2.143634599 +E41 -6.905051715 -0.800953972 4.600056501 +E42 -5.956055073 -2.338984312 6.00361353 +E43 -6.518995129 2.417299399 -5.253637073 +E44 -6.840717711 1.278489412 -3.5553823 +E45 -7.304625099 -1.866238006 -0.629182006 +E46 -7.312517928 -2.298574078 2.385298838 +E47 -6.737313764 -3.011819533 4.178390203 +E48 -5.934584124 2.22697797 -7.934360742 +E49 -6.298127313 0.41663451 -6.069156425 +E50 -6.78248072 -4.023512045 -0.232191092 +E51 -6.558030032 -4.667036048 2.749989597 +E52 -5.831241498 -4.494821698 4.955347697 +E53 -4.193518856 -4.037020083 6.982920038 +E54 -2.270752074 -3.414835627 8.204556551 +E55 0 -2.138343513 8.791875902 +E56 -6.174969392 -2.458138877 -5.637380998 +E57 -6.580438308 -3.739554155 -2.991084431 +E58 -6.034746843 -5.755782196 0.051843011 +E59 -5.204501802 -6.437833018 2.984444293 +E60 -4.116929504 -6.061561438 5.365757296 +E61 -2.344914884 -5.481057427 7.057748614 +E62 0 -6.676694032 6.465208258 +E63 -5.333266171 -4.302240169 -5.613509789 +E64 -5.404091392 -5.870302681 -2.891640039 +E65 -4.645302298 -7.280552408 0.130139701 +E66 -3.608293164 -7.665487704 3.129931648 +E67 -1.844644417 -7.354417376 5.224001733 +E68 -3.784983913 -6.401014415 -5.260040689 +E69 -3.528848027 -7.603010836 -2.818037873 +E70 -2.738838019 -8.607966849 0.239368223 +E71 -1.404967401 -8.437486994 3.277284901 +E72 0 -7.829896826 4.687622229 +E73 -1.929652202 -7.497197868 -5.136777648 +E74 -1.125731192 -8.455208629 -2.632832329 +E75 0 -8.996686498 0.487952047 +E76 1.404967401 -8.437486994 3.277284901 +E77 1.844644417 -7.354417376 5.224001733 +E78 2.344914884 -5.481057427 7.057748614 +E79 2.270752074 -3.414835627 8.204556551 +E80 1.955187826 -0.684381878 8.564858511 +E81 0 -7.85891896 -4.945387489 +E82 1.125731192 -8.455208629 -2.632832329 +E83 2.738838019 -8.607966849 0.239368223 +E84 3.608293164 -7.665487704 3.129931648 +E85 4.116929504 -6.061561438 5.365757296 +E86 4.193518856 -4.037020083 6.982920038 +E87 3.909902609 -1.519049882 7.764134929 +E88 1.929652202 -7.497197868 -5.136777648 +E89 3.528848027 -7.603010836 -2.818037873 +E90 4.645302298 -7.280552408 0.130139701 +E91 5.204501802 -6.437833018 2.984444293 +E92 5.831241498 -4.494821698 4.955347697 +E93 5.956055073 -2.338984312 6.00361353 +E94 3.784983913 -6.401014415 -5.260040689 +E95 5.404091392 -5.870302681 -2.891640039 +E96 6.034746843 -5.755782196 0.051843011 +E97 6.558030032 -4.667036048 2.749989597 +E98 6.737313764 -3.011819533 4.178390203 +E99 5.333266171 -4.302240169 -5.613509789 +E100 6.580438308 -3.739554155 -2.991084431 +E101 6.78248072 -4.023512045 -0.232191092 +E102 7.312517928 -2.298574078 2.385298838 +E103 6.905051715 -0.800953972 4.600056501 +E104 5.47913021 0.284948655 6.38332782 +E105 3.695490968 0.960411022 7.627828134 +E106 1.223800252 1.558864431 8.44043914 +E107 6.174969392 -2.458138877 -5.637380998 +E108 7.304625099 -1.866238006 -0.629182006 +E109 7.391919265 0.032151584 2.143634599 +E110 6.414469893 1.490027747 4.741794544 +E111 4.762196763 2.697832099 6.297663028 +E112 2.435870762 3.254307219 7.608766206 +E113 6.298127313 0.41663451 -6.069156425 +E114 6.840717711 1.278489412 -3.5553823 +E115 7.191620108 0.850096251 -0.882936903 +E116 6.823959684 2.968422112 2.430080351 +E117 5.546670402 4.157847823 4.627615703 +E118 3.825797111 5.121648995 5.942844877 +E119 5.934584124 2.22697797 -7.934360742 +E120 6.518995129 2.417299399 -5.253637073 +E121 6.550732888 3.611543152 -3.353155926 +E122 6.399087198 4.127248875 -0.356852241 +E123 5.682547954 5.453384344 2.836565436 +E124 4.459387187 6.021159964 4.365321482 +E125 6.118458137 4.523870113 -4.409174427 +E126 3.743504949 6.649204911 -6.530243068 +E127 -3.743504949 6.649204911 -6.530243068 +E128 -6.118458137 4.523870113 -4.409174427 diff --git a/mne/channels/data/montages/GSN-HydroCel-129.sfp b/mne/channels/data/montages/GSN-HydroCel-129.sfp new file mode 100644 index 0000000..fb222db --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-129.sfp @@ -0,0 +1,132 @@ +FidNz 0 9.071585155 -2.359754454 +FidT9 -6.711765 0.040402876 -3.251600355 +FidT10 6.711765 0.040402876 -3.251600355 +E1 5.787677636 5.520863216 -2.577468644 +E2 5.291804727 6.709097557 0.307434896 +E3 3.864122447 7.63424051 3.067770143 +E4 2.868837559 7.145708546 4.989564557 +E5 1.479340453 5.68662139 6.812878187 +E6 0 3.806770224 7.891304964 +E7 -1.223800252 1.558864431 8.44043914 +E8 4.221901505 7.998817387 -1.354789681 +E9 2.695405558 8.884820317 1.088308144 +E10 1.830882336 8.708839134 3.18709115 +E11 0 7.96264703 5.044718001 +E12 -1.479340453 5.68662139 6.812878187 +E13 -2.435870762 3.254307219 7.608766206 +E14 1.270447661 9.479016328 -0.947183306 +E15 0 9.087440894 1.333345013 +E16 0 9.076490798 3.105438474 +E17 0 9.271139705 -2.211516434 +E18 -1.830882336 8.708839134 3.18709115 +E19 -2.868837559 7.145708546 4.989564557 +E20 -3.825797111 5.121648995 5.942844877 +E21 -1.270447661 9.479016328 -0.947183306 +E22 -2.695405558 8.884820317 1.088308144 +E23 -3.864122447 7.63424051 3.067770143 +E24 -4.459387187 6.021159964 4.365321482 +E25 -4.221901505 7.998817387 -1.354789681 +E26 -5.291804727 6.709097557 0.307434896 +E27 -5.682547954 5.453384344 2.836565436 +E28 -5.546670402 4.157847823 4.627615703 +E29 -4.762196763 2.697832099 6.297663028 +E30 -3.695490968 0.960411022 7.627828134 +E31 -1.955187826 -0.684381878 8.564858511 +E32 -5.787677636 5.520863216 -2.577468644 +E33 -6.399087198 4.127248875 -0.356852241 +E34 -6.823959684 2.968422112 2.430080351 +E35 -6.414469893 1.490027747 4.741794544 +E36 -5.47913021 0.284948655 6.38332782 +E37 -3.909902609 -1.519049882 7.764134929 +E38 -6.550732888 3.611543152 -3.353155926 +E39 -7.191620108 0.850096251 -0.882936903 +E40 -7.391919265 0.032151584 2.143634599 +E41 -6.905051715 -0.800953972 4.600056501 +E42 -5.956055073 -2.338984312 6.00361353 +E43 -6.518995129 2.417299399 -5.253637073 +E44 -6.840717711 1.278489412 -3.5553823 +E45 -7.304625099 -1.866238006 -0.629182006 +E46 -7.312517928 -2.298574078 2.385298838 +E47 -6.737313764 -3.011819533 4.178390203 +E48 -5.934584124 2.22697797 -7.934360742 +E49 -6.298127313 0.41663451 -6.069156425 +E50 -6.78248072 -4.023512045 -0.232191092 +E51 -6.558030032 -4.667036048 2.749989597 +E52 -5.831241498 -4.494821698 4.955347697 +E53 -4.193518856 -4.037020083 6.982920038 +E54 -2.270752074 -3.414835627 8.204556551 +E55 0 -2.138343513 8.791875902 +E56 -6.174969392 -2.458138877 -5.637380998 +E57 -6.580438308 -3.739554155 -2.991084431 +E58 -6.034746843 -5.755782196 0.051843011 +E59 -5.204501802 -6.437833018 2.984444293 +E60 -4.116929504 -6.061561438 5.365757296 +E61 -2.344914884 -5.481057427 7.057748614 +E62 0 -6.676694032 6.465208258 +E63 -5.333266171 -4.302240169 -5.613509789 +E64 -5.404091392 -5.870302681 -2.891640039 +E65 -4.645302298 -7.280552408 0.130139701 +E66 -3.608293164 -7.665487704 3.129931648 +E67 -1.844644417 -7.354417376 5.224001733 +E68 -3.784983913 -6.401014415 -5.260040689 +E69 -3.528848027 -7.603010836 -2.818037873 +E70 -2.738838019 -8.607966849 0.239368223 +E71 -1.404967401 -8.437486994 3.277284901 +E72 0 -7.829896826 4.687622229 +E73 -1.929652202 -7.497197868 -5.136777648 +E74 -1.125731192 -8.455208629 -2.632832329 +E75 0 -8.996686498 0.487952047 +E76 1.404967401 -8.437486994 3.277284901 +E77 1.844644417 -7.354417376 5.224001733 +E78 2.344914884 -5.481057427 7.057748614 +E79 2.270752074 -3.414835627 8.204556551 +E80 1.955187826 -0.684381878 8.564858511 +E81 0 -7.85891896 -4.945387489 +E82 1.125731192 -8.455208629 -2.632832329 +E83 2.738838019 -8.607966849 0.239368223 +E84 3.608293164 -7.665487704 3.129931648 +E85 4.116929504 -6.061561438 5.365757296 +E86 4.193518856 -4.037020083 6.982920038 +E87 3.909902609 -1.519049882 7.764134929 +E88 1.929652202 -7.497197868 -5.136777648 +E89 3.528848027 -7.603010836 -2.818037873 +E90 4.645302298 -7.280552408 0.130139701 +E91 5.204501802 -6.437833018 2.984444293 +E92 5.831241498 -4.494821698 4.955347697 +E93 5.956055073 -2.338984312 6.00361353 +E94 3.784983913 -6.401014415 -5.260040689 +E95 5.404091392 -5.870302681 -2.891640039 +E96 6.034746843 -5.755782196 0.051843011 +E97 6.558030032 -4.667036048 2.749989597 +E98 6.737313764 -3.011819533 4.178390203 +E99 5.333266171 -4.302240169 -5.613509789 +E100 6.580438308 -3.739554155 -2.991084431 +E101 6.78248072 -4.023512045 -0.232191092 +E102 7.312517928 -2.298574078 2.385298838 +E103 6.905051715 -0.800953972 4.600056501 +E104 5.47913021 0.284948655 6.38332782 +E105 3.695490968 0.960411022 7.627828134 +E106 1.223800252 1.558864431 8.44043914 +E107 6.174969392 -2.458138877 -5.637380998 +E108 7.304625099 -1.866238006 -0.629182006 +E109 7.391919265 0.032151584 2.143634599 +E110 6.414469893 1.490027747 4.741794544 +E111 4.762196763 2.697832099 6.297663028 +E112 2.435870762 3.254307219 7.608766206 +E113 6.298127313 0.41663451 -6.069156425 +E114 6.840717711 1.278489412 -3.5553823 +E115 7.191620108 0.850096251 -0.882936903 +E116 6.823959684 2.968422112 2.430080351 +E117 5.546670402 4.157847823 4.627615703 +E118 3.825797111 5.121648995 5.942844877 +E119 5.934584124 2.22697797 -7.934360742 +E120 6.518995129 2.417299399 -5.253637073 +E121 6.550732888 3.611543152 -3.353155926 +E122 6.399087198 4.127248875 -0.356852241 +E123 5.682547954 5.453384344 2.836565436 +E124 4.459387187 6.021159964 4.365321482 +E125 6.118458137 4.523870113 -4.409174427 +E126 3.743504949 6.649204911 -6.530243068 +E127 -3.743504949 6.649204911 -6.530243068 +E128 -6.118458137 4.523870113 -4.409174427 +Cz 0 0 8.899186843 diff --git a/mne/channels/data/montages/GSN-HydroCel-256.sfp b/mne/channels/data/montages/GSN-HydroCel-256.sfp new file mode 100644 index 0000000..2464e89 --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-256.sfp @@ -0,0 +1,259 @@ +FidNz 0.00000 10.56381 -2.05108 +FidT9 -7.82694 0.45386 -3.76056 +FidT10 7.82694 0.45386 -3.76056 +E1 6.96223 5.38242 -2.19061 +E2 6.48414 6.40424 -0.14004 +E3 5.69945 7.20796 1.79088 +E4 4.81093 7.77321 3.65006 +E5 3.61962 7.47782 5.50947 +E6 2.25278 6.46157 6.96317 +E7 1.18879 5.21755 8.13378 +E8 0.00000 3.59608 8.75111 +E9 -1.15339 1.51369 9.19904 +E10 5.94022 7.38337 -1.51513 +E11 5.07624 8.37264 0.40595 +E12 3.87946 9.03611 2.51559 +E13 2.60756 8.97868 4.39107 +E14 1.23344 8.11574 6.06161 +E15 0.00000 6.81181 7.28186 +E16 -1.18879 5.21755 8.13378 +E17 -2.29559 2.91372 8.55810 +E18 4.06489 9.40559 -0.89098 +E19 2.86784 10.01456 0.85212 +E20 1.42153 10.06322 2.84803 +E21 0.00000 9.40339 4.65829 +E22 -1.23344 8.11574 6.06161 +E23 -2.25278 6.46157 6.96317 +E24 -3.34467 4.40891 7.67253 +E25 1.39547 10.65281 -0.61138 +E26 0.00000 10.68996 1.00542 +E27 -1.42153 10.06322 2.84803 +E28 -2.60756 8.97868 4.39107 +E29 -3.61962 7.47782 5.50947 +E30 -4.49828 5.59395 6.28801 +E31 0.00000 10.56381 -2.05108 +E32 -1.39547 10.65281 -0.61138 +E33 -2.86784 10.01456 0.85212 +E34 -3.87946 9.03611 2.51559 +E35 -4.81093 7.77321 3.65006 +E36 -5.10466 6.41586 4.77815 +E37 -4.06489 9.40559 -0.89098 +E38 -5.07624 8.37264 0.40595 +E39 -5.69945 7.20796 1.79088 +E40 -6.16984 6.11292 3.29612 +E41 -6.01447 4.93908 4.85771 +E42 -5.33943 3.80220 6.32664 +E43 -4.64127 2.57224 7.50868 +E44 -3.53746 1.07133 8.47419 +E45 -1.99458 -0.60998 9.28870 +E46 -5.94022 7.38337 -1.51513 +E47 -6.48414 6.40424 -0.14004 +E48 -6.97545 5.35131 1.30741 +E49 -7.10064 4.23342 2.91874 +E50 -6.86564 3.16240 4.76800 +E51 -6.11380 1.94213 6.23844 +E52 -5.31389 0.60081 7.48811 +E53 -3.72368 -1.14573 8.58697 +E54 -6.96223 5.38242 -2.19061 +E55 -7.31613 4.37155 -0.61128 +E56 -7.66385 3.29619 1.04415 +E57 -7.62423 2.30205 2.81799 +E58 -7.36570 1.34368 4.60382 +E59 -6.70292 0.06004 6.23992 +E60 -5.40372 -1.61247 7.47343 +E61 -7.54098 3.05323 -2.51935 +E62 -7.77059 2.06323 -0.80729 +E63 -7.96921 1.20744 0.97332 +E64 -8.06621 0.40109 2.78565 +E65 -7.60767 -0.56840 4.59939 +E66 -6.81554 -1.94522 5.93053 +E67 -7.69315 1.74041 -4.18153 +E68 -7.74468 1.05291 -2.47059 +E69 -7.93758 0.07220 -0.96992 +E70 -7.98893 -0.75212 0.84194 +E71 -8.05947 -1.50296 2.76753 +E72 -7.56445 -2.31141 4.30327 +E73 -7.52646 0.73096 -5.96025 +E74 -7.76752 -1.84131 -0.92719 +E75 -7.79279 -2.73175 1.10033 +E76 -7.46191 -3.49308 2.95937 +E77 -6.86934 -3.79448 4.89401 +E78 -5.65276 -3.84604 6.52108 +E79 -4.12465 -3.54800 7.95405 +E80 -2.23647 -2.95809 8.92461 +E81 0.00000 -1.93834 9.45867 +E82 -7.12806 -0.49186 -7.34929 +E83 -7.37920 -3.49709 -2.18347 +E84 -7.52183 -3.70044 -0.51432 +E85 -7.15214 -4.71132 1.51762 +E86 -6.48817 -5.15829 3.47294 +E87 -5.53051 -5.46184 5.50189 +E88 -4.03809 -5.23807 7.04455 +E89 -2.29514 -4.87829 8.27223 +E90 0.00000 -3.74195 9.02791 +E91 -6.82585 -1.86426 -8.69399 +E92 -6.74047 -2.84840 -6.74712 +E93 -6.78379 -4.01784 -5.01755 +E94 -7.03346 -4.45090 -3.54895 +E95 -6.99052 -5.01694 -1.88810 +E96 -6.67571 -5.73608 0.10234 +E97 -5.96851 -6.52864 2.03293 +E98 -5.10822 -6.74936 3.92134 +E99 -3.75216 -6.67734 5.63719 +E100 -2.14874 -6.29190 7.11453 +E101 0.00000 -7.15042 6.95434 +E102 -6.36989 -3.82470 -8.20622 +E103 -6.24349 -4.62250 -6.49623 +E104 -6.09726 -5.61090 -4.67894 +E105 -6.31441 -6.01299 -3.25921 +E106 -5.98418 -6.74733 -1.40314 +E107 -5.23709 -7.57398 0.46627 +E108 -4.29098 -8.11323 2.38442 +E109 -3.24277 -8.15293 4.22025 +E110 -1.73181 -7.63850 5.69360 +E111 -5.63580 -5.80367 -7.74857 +E112 -5.38718 -6.45180 -6.16689 +E113 -5.08285 -7.32643 -4.32109 +E114 -5.27282 -7.46584 -2.87485 +E115 -4.13620 -8.61230 -1.04503 +E116 -3.13323 -9.13629 0.81878 +E117 -1.94503 -9.23415 2.62135 +E118 -1.09312 -8.74110 4.13810 +E119 0.00000 -8.09146 5.34087 +E120 -4.70608 -7.21970 -7.52955 +E121 -4.20415 -7.81153 -5.84368 +E122 -3.62234 -8.59338 -4.04243 +E123 -3.02717 -9.45363 -1.95941 +E124 -2.20152 -9.70916 -0.63755 +E125 -1.01682 -9.71656 0.95467 +E126 0.00000 -9.23206 2.54671 +E127 1.09312 -8.74110 4.13810 +E128 1.73181 -7.63850 5.69360 +E129 2.14874 -6.29190 7.11453 +E130 2.29514 -4.87829 8.27223 +E131 2.23647 -2.95809 8.92461 +E132 1.99458 -0.60998 9.28870 +E133 -3.45625 -8.57317 -6.82654 +E134 -2.71528 -8.94646 -5.55376 +E135 -2.03205 -9.56166 -3.44989 +E136 -0.91885 -9.62744 -2.21054 +E137 0.00000 -9.58535 -0.88629 +E138 1.01682 -9.71656 0.95467 +E139 1.94503 -9.23415 2.62135 +E140 3.24277 -8.15293 4.22025 +E141 3.75216 -6.67734 5.63719 +E142 4.03809 -5.23807 7.04455 +E143 4.12465 -3.54800 7.95405 +E144 3.72368 -1.14573 8.58697 +E145 -1.88533 -9.22031 -6.79889 +E146 -1.06111 -9.53369 -5.45325 +E147 0.00000 -9.48329 -3.84204 +E148 0.91885 -9.62744 -2.21054 +E149 2.20152 -9.70916 -0.63755 +E150 3.13323 -9.13629 0.81878 +E151 4.29098 -8.11323 2.38442 +E152 5.10822 -6.74936 3.92134 +E153 5.53051 -5.46184 5.50189 +E154 5.65276 -3.84604 6.52108 +E155 5.40372 -1.61247 7.47343 +E156 1.06111 -9.53369 -5.45325 +E157 2.03205 -9.56166 -3.44989 +E158 3.02717 -9.45363 -1.95941 +E159 4.13620 -8.61230 -1.04503 +E160 5.23709 -7.57398 0.46627 +E161 5.96851 -6.52864 2.03293 +E162 6.48817 -5.15829 3.47294 +E163 6.86934 -3.79448 4.89401 +E164 6.81554 -1.94522 5.93053 +E165 1.88533 -9.22031 -6.79889 +E166 2.71528 -8.94646 -5.55376 +E167 3.62234 -8.59338 -4.04243 +E168 5.27282 -7.46584 -2.87485 +E169 5.98418 -6.74733 -1.40314 +E170 6.67571 -5.73608 0.10234 +E171 7.15214 -4.71132 1.51762 +E172 7.46191 -3.49308 2.95937 +E173 7.56445 -2.31141 4.30327 +E174 3.45625 -8.57317 -6.82654 +E175 4.20415 -7.81153 -5.84368 +E176 5.08285 -7.32643 -4.32109 +E177 6.31441 -6.01299 -3.25921 +E178 6.99052 -5.01694 -1.88810 +E179 7.52183 -3.70044 -0.51432 +E180 7.79279 -2.73175 1.10033 +E181 8.05947 -1.50296 2.76753 +E182 7.60767 -0.56840 4.59939 +E183 6.70292 0.06004 6.23992 +E184 5.31389 0.60081 7.48811 +E185 3.53746 1.07133 8.47419 +E186 1.15339 1.51369 9.19904 +E187 4.70608 -7.21970 -7.52955 +E188 5.38718 -6.45180 -6.16689 +E189 6.09726 -5.61090 -4.67894 +E190 7.03346 -4.45090 -3.54895 +E191 7.37920 -3.49709 -2.18347 +E192 7.76752 -1.84131 -0.92719 +E193 7.98893 -0.75212 0.84194 +E194 8.06621 0.40109 2.78565 +E195 7.36570 1.34368 4.60382 +E196 6.11380 1.94213 6.23844 +E197 4.64127 2.57224 7.50868 +E198 2.29559 2.91372 8.55810 +E199 5.63580 -5.80367 -7.74857 +E200 6.24349 -4.62250 -6.49623 +E201 6.78379 -4.01784 -5.01755 +E202 7.93758 0.07220 -0.96992 +E203 7.96921 1.20744 0.97332 +E204 7.62423 2.30205 2.81799 +E205 6.86564 3.16240 4.76800 +E206 5.33943 3.80220 6.32664 +E207 3.34467 4.40891 7.67253 +E208 6.36989 -3.82470 -8.20622 +E209 6.74047 -2.84840 -6.74712 +E210 7.74468 1.05291 -2.47059 +E211 7.77059 2.06323 -0.80729 +E212 7.66385 3.29619 1.04415 +E213 7.10064 4.23342 2.91874 +E214 6.01447 4.93908 4.85771 +E215 4.49828 5.59395 6.28801 +E216 6.82585 -1.86426 -8.69399 +E217 7.12806 -0.49186 -7.34929 +E218 7.52646 0.73096 -5.96025 +E219 7.69315 1.74041 -4.18153 +E220 7.54098 3.05323 -2.51935 +E221 7.31613 4.37155 -0.61128 +E222 6.97545 5.35131 1.30741 +E223 6.16984 6.11292 3.29612 +E224 5.10466 6.41586 4.77815 +E225 7.62652 3.24782 -4.40493 +E226 7.24346 4.80120 -4.77214 +E227 7.55603 2.52648 -6.26962 +E228 7.38028 1.35743 -7.84943 +E229 6.86103 -0.14155 -9.14913 +E230 6.74159 5.99080 -5.83258 +E231 7.22458 4.14855 -6.88918 +E232 7.31422 3.19647 -8.44268 +E233 7.09051 1.66694 -9.77213 +E234 5.88750 7.22674 -6.54736 +E235 6.65934 5.64059 -7.65729 +E236 6.75138 4.62427 -9.03070 +E237 6.58044 3.33743 -10.39707 +E238 4.69146 8.22723 -6.78260 +E239 5.81346 6.42065 -8.65026 +E240 6.04363 5.37051 -9.81363 +E241 -4.69146 8.22723 -6.78260 +E242 -5.81346 6.42065 -8.65026 +E243 -6.04363 5.37051 -9.81363 +E244 -5.88750 7.22674 -6.54736 +E245 -6.65934 5.64059 -7.65729 +E246 -6.75138 4.62427 -9.03070 +E247 -6.58044 3.33743 -10.39707 +E248 -6.74159 5.99080 -5.83258 +E249 -7.22458 4.14855 -6.88918 +E250 -7.31422 3.19647 -8.44268 +E251 -7.09051 1.66694 -9.77213 +E252 -7.24346 4.80120 -4.77214 +E253 -7.62652 3.24782 -4.40493 +E254 -7.55603 2.52648 -6.26962 +E255 -7.38028 1.35743 -7.84943 +E256 -6.86103 -0.14155 -9.14913 diff --git a/mne/channels/data/montages/GSN-HydroCel-257.sfp b/mne/channels/data/montages/GSN-HydroCel-257.sfp new file mode 100644 index 0000000..98c6b1a --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-257.sfp @@ -0,0 +1,260 @@ +FidNz 0.00000 10.56381 -2.05108 +FidT9 -7.82694 0.45386 -3.76056 +FidT10 7.82694 0.45386 -3.76056 +E1 6.96223 5.38242 -2.19061 +E2 6.48414 6.40424 -0.14004 +E3 5.69945 7.20796 1.79088 +E4 4.81093 7.77321 3.65006 +E5 3.61962 7.47782 5.50947 +E6 2.25278 6.46157 6.96317 +E7 1.18879 5.21755 8.13378 +E8 0.00000 3.59608 8.75111 +E9 -1.15339 1.51369 9.19904 +E10 5.94022 7.38337 -1.51513 +E11 5.07624 8.37264 0.40595 +E12 3.87946 9.03611 2.51559 +E13 2.60756 8.97868 4.39107 +E14 1.23344 8.11574 6.06161 +E15 0.00000 6.81181 7.28186 +E16 -1.18879 5.21755 8.13378 +E17 -2.29559 2.91372 8.55810 +E18 4.06489 9.40559 -0.89098 +E19 2.86784 10.01456 0.85212 +E20 1.42153 10.06322 2.84803 +E21 0.00000 9.40339 4.65829 +E22 -1.23344 8.11574 6.06161 +E23 -2.25278 6.46157 6.96317 +E24 -3.34467 4.40891 7.67253 +E25 1.39547 10.65281 -0.61138 +E26 0.00000 10.68996 1.00542 +E27 -1.42153 10.06322 2.84803 +E28 -2.60756 8.97868 4.39107 +E29 -3.61962 7.47782 5.50947 +E30 -4.49828 5.59395 6.28801 +E31 0.00000 10.56381 -2.05108 +E32 -1.39547 10.65281 -0.61138 +E33 -2.86784 10.01456 0.85212 +E34 -3.87946 9.03611 2.51559 +E35 -4.81093 7.77321 3.65006 +E36 -5.10466 6.41586 4.77815 +E37 -4.06489 9.40559 -0.89098 +E38 -5.07624 8.37264 0.40595 +E39 -5.69945 7.20796 1.79088 +E40 -6.16984 6.11292 3.29612 +E41 -6.01447 4.93908 4.85771 +E42 -5.33943 3.80220 6.32664 +E43 -4.64127 2.57224 7.50868 +E44 -3.53746 1.07133 8.47419 +E45 -1.99458 -0.60998 9.28870 +E46 -5.94022 7.38337 -1.51513 +E47 -6.48414 6.40424 -0.14004 +E48 -6.97545 5.35131 1.30741 +E49 -7.10064 4.23342 2.91874 +E50 -6.86564 3.16240 4.76800 +E51 -6.11380 1.94213 6.23844 +E52 -5.31389 0.60081 7.48811 +E53 -3.72368 -1.14573 8.58697 +E54 -6.96223 5.38242 -2.19061 +E55 -7.31613 4.37155 -0.61128 +E56 -7.66385 3.29619 1.04415 +E57 -7.62423 2.30205 2.81799 +E58 -7.36570 1.34368 4.60382 +E59 -6.70292 0.06004 6.23992 +E60 -5.40372 -1.61247 7.47343 +E61 -7.54098 3.05323 -2.51935 +E62 -7.77059 2.06323 -0.80729 +E63 -7.96921 1.20744 0.97332 +E64 -8.06621 0.40109 2.78565 +E65 -7.60767 -0.56840 4.59939 +E66 -6.81554 -1.94522 5.93053 +E67 -7.69315 1.74041 -4.18153 +E68 -7.74468 1.05291 -2.47059 +E69 -7.93758 0.07220 -0.96992 +E70 -7.98893 -0.75212 0.84194 +E71 -8.05947 -1.50296 2.76753 +E72 -7.56445 -2.31141 4.30327 +E73 -7.52646 0.73096 -5.96025 +E74 -7.76752 -1.84131 -0.92719 +E75 -7.79279 -2.73175 1.10033 +E76 -7.46191 -3.49308 2.95937 +E77 -6.86934 -3.79448 4.89401 +E78 -5.65276 -3.84604 6.52108 +E79 -4.12465 -3.54800 7.95405 +E80 -2.23647 -2.95809 8.92461 +E81 0.00000 -1.93834 9.45867 +E82 -7.12806 -0.49186 -7.34929 +E83 -7.37920 -3.49709 -2.18347 +E84 -7.52183 -3.70044 -0.51432 +E85 -7.15214 -4.71132 1.51762 +E86 -6.48817 -5.15829 3.47294 +E87 -5.53051 -5.46184 5.50189 +E88 -4.03809 -5.23807 7.04455 +E89 -2.29514 -4.87829 8.27223 +E90 0.00000 -3.74195 9.02791 +E91 -6.82585 -1.86426 -8.69399 +E92 -6.74047 -2.84840 -6.74712 +E93 -6.78379 -4.01784 -5.01755 +E94 -7.03346 -4.45090 -3.54895 +E95 -6.99052 -5.01694 -1.88810 +E96 -6.67571 -5.73608 0.10234 +E97 -5.96851 -6.52864 2.03293 +E98 -5.10822 -6.74936 3.92134 +E99 -3.75216 -6.67734 5.63719 +E100 -2.14874 -6.29190 7.11453 +E101 0.00000 -7.15042 6.95434 +E102 -6.36989 -3.82470 -8.20622 +E103 -6.24349 -4.62250 -6.49623 +E104 -6.09726 -5.61090 -4.67894 +E105 -6.31441 -6.01299 -3.25921 +E106 -5.98418 -6.74733 -1.40314 +E107 -5.23709 -7.57398 0.46627 +E108 -4.29098 -8.11323 2.38442 +E109 -3.24277 -8.15293 4.22025 +E110 -1.73181 -7.63850 5.69360 +E111 -5.63580 -5.80367 -7.74857 +E112 -5.38718 -6.45180 -6.16689 +E113 -5.08285 -7.32643 -4.32109 +E114 -5.27282 -7.46584 -2.87485 +E115 -4.13620 -8.61230 -1.04503 +E116 -3.13323 -9.13629 0.81878 +E117 -1.94503 -9.23415 2.62135 +E118 -1.09312 -8.74110 4.13810 +E119 0.00000 -8.09146 5.34087 +E120 -4.70608 -7.21970 -7.52955 +E121 -4.20415 -7.81153 -5.84368 +E122 -3.62234 -8.59338 -4.04243 +E123 -3.02717 -9.45363 -1.95941 +E124 -2.20152 -9.70916 -0.63755 +E125 -1.01682 -9.71656 0.95467 +E126 0.00000 -9.23206 2.54671 +E127 1.09312 -8.74110 4.13810 +E128 1.73181 -7.63850 5.69360 +E129 2.14874 -6.29190 7.11453 +E130 2.29514 -4.87829 8.27223 +E131 2.23647 -2.95809 8.92461 +E132 1.99458 -0.60998 9.28870 +E133 -3.45625 -8.57317 -6.82654 +E134 -2.71528 -8.94646 -5.55376 +E135 -2.03205 -9.56166 -3.44989 +E136 -0.91885 -9.62744 -2.21054 +E137 0.00000 -9.58535 -0.88629 +E138 1.01682 -9.71656 0.95467 +E139 1.94503 -9.23415 2.62135 +E140 3.24277 -8.15293 4.22025 +E141 3.75216 -6.67734 5.63719 +E142 4.03809 -5.23807 7.04455 +E143 4.12465 -3.54800 7.95405 +E144 3.72368 -1.14573 8.58697 +E145 -1.88533 -9.22031 -6.79889 +E146 -1.06111 -9.53369 -5.45325 +E147 0.00000 -9.48329 -3.84204 +E148 0.91885 -9.62744 -2.21054 +E149 2.20152 -9.70916 -0.63755 +E150 3.13323 -9.13629 0.81878 +E151 4.29098 -8.11323 2.38442 +E152 5.10822 -6.74936 3.92134 +E153 5.53051 -5.46184 5.50189 +E154 5.65276 -3.84604 6.52108 +E155 5.40372 -1.61247 7.47343 +E156 1.06111 -9.53369 -5.45325 +E157 2.03205 -9.56166 -3.44989 +E158 3.02717 -9.45363 -1.95941 +E159 4.13620 -8.61230 -1.04503 +E160 5.23709 -7.57398 0.46627 +E161 5.96851 -6.52864 2.03293 +E162 6.48817 -5.15829 3.47294 +E163 6.86934 -3.79448 4.89401 +E164 6.81554 -1.94522 5.93053 +E165 1.88533 -9.22031 -6.79889 +E166 2.71528 -8.94646 -5.55376 +E167 3.62234 -8.59338 -4.04243 +E168 5.27282 -7.46584 -2.87485 +E169 5.98418 -6.74733 -1.40314 +E170 6.67571 -5.73608 0.10234 +E171 7.15214 -4.71132 1.51762 +E172 7.46191 -3.49308 2.95937 +E173 7.56445 -2.31141 4.30327 +E174 3.45625 -8.57317 -6.82654 +E175 4.20415 -7.81153 -5.84368 +E176 5.08285 -7.32643 -4.32109 +E177 6.31441 -6.01299 -3.25921 +E178 6.99052 -5.01694 -1.88810 +E179 7.52183 -3.70044 -0.51432 +E180 7.79279 -2.73175 1.10033 +E181 8.05947 -1.50296 2.76753 +E182 7.60767 -0.56840 4.59939 +E183 6.70292 0.06004 6.23992 +E184 5.31389 0.60081 7.48811 +E185 3.53746 1.07133 8.47419 +E186 1.15339 1.51369 9.19904 +E187 4.70608 -7.21970 -7.52955 +E188 5.38718 -6.45180 -6.16689 +E189 6.09726 -5.61090 -4.67894 +E190 7.03346 -4.45090 -3.54895 +E191 7.37920 -3.49709 -2.18347 +E192 7.76752 -1.84131 -0.92719 +E193 7.98893 -0.75212 0.84194 +E194 8.06621 0.40109 2.78565 +E195 7.36570 1.34368 4.60382 +E196 6.11380 1.94213 6.23844 +E197 4.64127 2.57224 7.50868 +E198 2.29559 2.91372 8.55810 +E199 5.63580 -5.80367 -7.74857 +E200 6.24349 -4.62250 -6.49623 +E201 6.78379 -4.01784 -5.01755 +E202 7.93758 0.07220 -0.96992 +E203 7.96921 1.20744 0.97332 +E204 7.62423 2.30205 2.81799 +E205 6.86564 3.16240 4.76800 +E206 5.33943 3.80220 6.32664 +E207 3.34467 4.40891 7.67253 +E208 6.36989 -3.82470 -8.20622 +E209 6.74047 -2.84840 -6.74712 +E210 7.74468 1.05291 -2.47059 +E211 7.77059 2.06323 -0.80729 +E212 7.66385 3.29619 1.04415 +E213 7.10064 4.23342 2.91874 +E214 6.01447 4.93908 4.85771 +E215 4.49828 5.59395 6.28801 +E216 6.82585 -1.86426 -8.69399 +E217 7.12806 -0.49186 -7.34929 +E218 7.52646 0.73096 -5.96025 +E219 7.69315 1.74041 -4.18153 +E220 7.54098 3.05323 -2.51935 +E221 7.31613 4.37155 -0.61128 +E222 6.97545 5.35131 1.30741 +E223 6.16984 6.11292 3.29612 +E224 5.10466 6.41586 4.77815 +E225 7.62652 3.24782 -4.40493 +E226 7.24346 4.80120 -4.77214 +E227 7.55603 2.52648 -6.26962 +E228 7.38028 1.35743 -7.84943 +E229 6.86103 -0.14155 -9.14913 +E230 6.74159 5.99080 -5.83258 +E231 7.22458 4.14855 -6.88918 +E232 7.31422 3.19647 -8.44268 +E233 7.09051 1.66694 -9.77213 +E234 5.88750 7.22674 -6.54736 +E235 6.65934 5.64059 -7.65729 +E236 6.75138 4.62427 -9.03070 +E237 6.58044 3.33743 -10.39707 +E238 4.69146 8.22723 -6.78260 +E239 5.81346 6.42065 -8.65026 +E240 6.04363 5.37051 -9.81363 +E241 -4.69146 8.22723 -6.78260 +E242 -5.81346 6.42065 -8.65026 +E243 -6.04363 5.37051 -9.81363 +E244 -5.88750 7.22674 -6.54736 +E245 -6.65934 5.64059 -7.65729 +E246 -6.75138 4.62427 -9.03070 +E247 -6.58044 3.33743 -10.39707 +E248 -6.74159 5.99080 -5.83258 +E249 -7.22458 4.14855 -6.88918 +E250 -7.31422 3.19647 -8.44268 +E251 -7.09051 1.66694 -9.77213 +E252 -7.24346 4.80120 -4.77214 +E253 -7.62652 3.24782 -4.40493 +E254 -7.55603 2.52648 -6.26962 +E255 -7.38028 1.35743 -7.84943 +E256 -6.86103 -0.14155 -9.14913 +Cz 0.00000 0.00000 9.68308 diff --git a/mne/channels/data/montages/GSN-HydroCel-32.sfp b/mne/channels/data/montages/GSN-HydroCel-32.sfp new file mode 100644 index 0000000..214fb1b --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-32.sfp @@ -0,0 +1,36 @@ +FidNz 0 9.071585155 -2.359754454 +FidT9 -6.711765 0.040402876 -3.251600355 +FidT10 6.711765 0.040402876 -3.251600355 +E1 -2.695405558 8.884820317 1.088308144 +E2 2.695405558 8.884820317 1.088308144 +E3 -4.459387187 6.021159964 4.365321482 +E4 4.459387187 6.021159964 4.365321482 +E5 -5.47913021 0.284948655 6.38332782 +E6 5.47913021 0.284948655 6.38332782 +E7 -5.831241498 -4.494821698 4.955347697 +E8 5.831241498 -4.494821698 4.955347697 +E9 -2.738838019 -8.607966849 0.239368223 +E10 2.738838019 -8.607966849 0.239368223 +E11 -6.399087198 4.127248875 -0.356852241 +E12 6.399087198 4.127248875 -0.356852241 +E13 -7.304625099 -1.866238006 -0.629182006 +E14 7.304625099 -1.866238006 -0.629182006 +E15 -6.034746843 -5.755782196 0.051843011 +E16 6.034746843 -5.755782196 0.051843011 +E17 0 7.96264703 5.044718001 +E18 0 9.271139705 -2.211516434 +E19 0 -6.676694032 6.465208258 +E20 0 -8.996686498 0.487952047 +E21 -6.518995129 2.417299399 -5.253637073 +E22 6.518995129 2.417299399 -5.253637073 +E23 -6.174969392 -2.458138877 -5.637380998 +E24 6.174969392 -2.458138877 -5.637380998 +E25 -3.784983913 -6.401014415 -5.260040689 +E26 3.784983913 -6.401014415 -5.260040689 +E27 0 9.087440894 1.333345013 +E28 0 3.806770224 7.891304964 +E29 -3.743504949 6.649204911 -6.530243068 +E30 3.743504949 6.649204911 -6.530243068 +E31 -6.118458137 4.523870113 -4.409174427 +E32 6.118458137 4.523870113 -4.409174427 +Cz 0 0 8.899186843 diff --git a/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp b/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp new file mode 100644 index 0000000..004dcb0 --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-64_1.0.sfp @@ -0,0 +1,67 @@ +FidNz 0.00000 10.3556 -2.69376 +FidT9 -7.18083 0.0461216 -3.71184 +FidT10 6.24270 0.0461216 -3.71184 +E1 6.60688 6.30230 -2.94229 +E2 4.41106 8.71481 3.50199 +E3 3.27490 8.15713 5.69580 +E4 0.00000 4.34559 9.00826 +E5 3.07692 10.1424 1.24235 +E6 0.00000 9.08970 5.75876 +E7 -2.78065 3.71493 8.68573 +E8 0.00000 10.3612 3.54499 +E9 -3.2749 8.15713 5.6958 +E10 -3.07692 10.1424 1.24235 +E11 -4.41106 8.71481 3.50199 +E12 -5.09058 6.87341 4.98320 +E13 -6.48687 6.22527 3.23806 +E14 -6.33176 4.74636 5.28262 +E15 -5.43625 3.07969 7.18905 +E16 -4.21856 1.09635 8.70749 +E17 -6.60688 6.30230 -2.94229 +E18 -7.30483 4.71143 -0.407362 +E19 -7.78984 3.38858 2.77404 +E20 -6.25466 0.325281 7.28684 +E21 -4.46332 -1.73406 8.86309 +E22 -7.88241 -0.914323 5.25116 +E23 -7.80897 1.45945 -4.05862 +E24 -8.33854 -2.13039 -0.718238 +E25 -8.34755 -2.62392 2.72292 +E26 -7.69093 -3.43812 4.76981 +E27 -7.48627 -5.32762 3.13923 +E28 -6.65661 -5.13103 5.65674 +E29 -7.51185 -4.26886 -3.41445 +E30 -6.88892 -6.57047 0.0591810 +E31 -4.69965 -6.91953 6.12524 +E32 -6.16900 -6.70120 -3.30093 +E33 -2.10574 -8.39538 5.96342 +E34 0.00000 -4.98271 9.28085 +E35 -3.12650 -9.82636 0.273249 +E36 0.00000 -8.93816 5.35112 +E37 0.00000 -10.2701 0.557018 +E38 2.10574 -8.39538 5.96342 +E39 3.12650 -9.82636 0.273249 +E40 4.69965 -6.91953 6.12524 +E41 4.46332 -1.73406 8.86309 +E42 6.65661 -5.13103 5.65674 +E43 6.16900 -6.70120 -3.30093 +E44 6.88892 -6.57047 0.0591810 +E45 7.48627 -5.32762 3.13923 +E46 7.69093 -3.43812 4.76981 +E47 7.51185 -4.26886 -3.41445 +E48 8.34755 -2.62392 2.72292 +E49 7.88241 -0.914323 5.25116 +E50 6.25466 0.325281 7.28684 +E51 4.21856 1.09635 8.70749 +E52 8.33854 -2.13039 -0.718238 +E53 5.43625 3.07969 7.18905 +E54 2.78065 3.71493 8.68573 +E55 7.80897 1.45945 -4.05862 +E56 7.78984 3.38858 2.77404 +E57 6.33176 4.74636 5.28262 +E58 7.30483 4.71143 -0.407362 +E59 6.48687 6.22527 3.23806 +E60 5.09058 6.87341 4.98320 +E61 6.98448 5.16419 -5.03326 +E62 4.27337 7.59035 -7.45455 +E63 -4.27337 7.59035 -7.45455 +E64 -6.98448 5.16419 -5.03326 diff --git a/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp b/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp new file mode 100644 index 0000000..c1c455d --- /dev/null +++ b/mne/channels/data/montages/GSN-HydroCel-65_1.0.sfp @@ -0,0 +1,68 @@ +FidNz 0.00000 10.3556 -2.69376 +FidT9 -7.18083 0.0461216 -3.71184 +FidT10 6.24270 0.0461216 -3.71184 +E1 6.60688 6.30230 -2.94229 +E2 4.41106 8.71481 3.50199 +E3 3.27490 8.15713 5.69580 +E4 0.00000 4.34559 9.00826 +E5 3.07692 10.1424 1.24235 +E6 0.00000 9.08970 5.75876 +E7 -2.78065 3.71493 8.68573 +E8 0.00000 10.3612 3.54499 +E9 -3.2749 8.15713 5.6958 +E10 -3.07692 10.1424 1.24235 +E11 -4.41106 8.71481 3.50199 +E12 -5.09058 6.87341 4.98320 +E13 -6.48687 6.22527 3.23806 +E14 -6.33176 4.74636 5.28262 +E15 -5.43625 3.07969 7.18905 +E16 -4.21856 1.09635 8.70749 +E17 -6.60688 6.30230 -2.94229 +E18 -7.30483 4.71143 -0.407362 +E19 -7.78984 3.38858 2.77404 +E20 -6.25466 0.325281 7.28684 +E21 -4.46332 -1.73406 8.86309 +E22 -7.88241 -0.914323 5.25116 +E23 -7.80897 1.45945 -4.05862 +E24 -8.33854 -2.13039 -0.718238 +E25 -8.34755 -2.62392 2.72292 +E26 -7.69093 -3.43812 4.76981 +E27 -7.48627 -5.32762 3.13923 +E28 -6.65661 -5.13103 5.65674 +E29 -7.51185 -4.26886 -3.41445 +E30 -6.88892 -6.57047 0.0591810 +E31 -4.69965 -6.91953 6.12524 +E32 -6.16900 -6.70120 -3.30093 +E33 -2.10574 -8.39538 5.96342 +E34 0.00000 -4.98271 9.28085 +E35 -3.12650 -9.82636 0.273249 +E36 0.00000 -8.93816 5.35112 +E37 0.00000 -10.2701 0.557018 +E38 2.10574 -8.39538 5.96342 +E39 3.12650 -9.82636 0.273249 +E40 4.69965 -6.91953 6.12524 +E41 4.46332 -1.73406 8.86309 +E42 6.65661 -5.13103 5.65674 +E43 6.16900 -6.70120 -3.30093 +E44 6.88892 -6.57047 0.0591810 +E45 7.48627 -5.32762 3.13923 +E46 7.69093 -3.43812 4.76981 +E47 7.51185 -4.26886 -3.41445 +E48 8.34755 -2.62392 2.72292 +E49 7.88241 -0.914323 5.25116 +E50 6.25466 0.325281 7.28684 +E51 4.21856 1.09635 8.70749 +E52 8.33854 -2.13039 -0.718238 +E53 5.43625 3.07969 7.18905 +E54 2.78065 3.71493 8.68573 +E55 7.80897 1.45945 -4.05862 +E56 7.78984 3.38858 2.77404 +E57 6.33176 4.74636 5.28262 +E58 7.30483 4.71143 -0.407362 +E59 6.48687 6.22527 3.23806 +E60 5.09058 6.87341 4.98320 +E61 6.98448 5.16419 -5.03326 +E62 4.27337 7.59035 -7.45455 +E63 -4.27337 7.59035 -7.45455 +E64 -6.98448 5.16419 -5.03326 +Cz 0.00000 0.00000 10.1588 diff --git a/mne/channels/data/montages/artinis-brite23.elc b/mne/channels/data/montages/artinis-brite23.elc new file mode 100644 index 0000000..1e14b0a --- /dev/null +++ b/mne/channels/data/montages/artinis-brite23.elc @@ -0,0 +1,48 @@ +# ASA optode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 21 +Positions +-4.62 82.33 -45.74 +79.66 -18.72 -45.89 +-81.41 -17.18 -45.56 +65.18 27.28 35.31 +48.62 59.71 22.68 +18.95 72.41 38.32 +-3.97 79.74 30.28 +-25.96 72.19 35.16 +-52.51 60.53 14.54 +-66.37 32.04 31.08 +76.10 -0.29 31.24 +65.61 -0.26 56.15 +64.93 42.43 8.29 +43.32 46.36 50.77 +21.58 82.45 1.06 +-2.91 59.57 61.59 +-29.62 79.35 2.38 +-48.13 44.76 49.15 +-67.68 43.26 -3.18 +-65.37 4.89 56.36 +-77.24 5.88 27.58 +Labels +Nz +RPA +LPA +D1 +D2 +D3 +D4 +D5 +D6 +D7 +S1 +S2 +S3 +S4 +S5 +S6 +S7 +S8 +S9 +S10 +S11 diff --git a/mne/channels/data/montages/artinis-octamon.elc b/mne/channels/data/montages/artinis-octamon.elc new file mode 100644 index 0000000..748a19d --- /dev/null +++ b/mne/channels/data/montages/artinis-octamon.elc @@ -0,0 +1,32 @@ +# ASA optode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 13 +Positions +0.96 83.56 -48.63 +80.25 -19.67 -43.88 +-82.58 -20.09 -43.10 +47.77 65.28 7.28 +-46.45 67.76 8.81 +63.88 34.84 28.34 +64.96 45.02 -10.31 +22.07 74.86 31.03 +17.84 84.96 -10.84 +-10.81 77.96 32.10 +-15.96 85.24 -7.41 +-61.78 40.78 29.92 +-65.28 48.14 -10.73 +Labels +Nz +RPA +LPA +D1 +D2 +S1 +S2 +S3 +S4 +S5 +S6 +S7 +S8 diff --git a/mne/channels/data/montages/biosemi128.txt b/mne/channels/data/montages/biosemi128.txt new file mode 100644 index 0000000..69739c6 --- /dev/null +++ b/mne/channels/data/montages/biosemi128.txt @@ -0,0 +1,132 @@ +Site Theta Phi +A1 0 0 +A2 11.5 -90 +A3 23 -90 +A4 34.5 -90 +A5 -46 67.5 +A6 -46 45 +A7 -57.5 45 +A8 -69 54 +A9 -80.5 54 +A10 -92 54 +A11 -103.5 54 +A12 -115 54 +A13 -115 72 +A14 -103.5 72 +A15 -92 72 +A16 -80.5 72 +A17 -69 72 +A18 -57.5 67.5 +A19 46 -90 +A20 57.5 -90 +A21 69 -90 +A22 80.5 -90 +A23 92 -90 +A24 103.5 -90 +A25 115 -90 +A26 115 -72 +A27 103.5 -72 +A28 92 -72 +A29 80.5 -72 +A30 69 -72 +A31 57.5 -67.5 +A32 46 -67.5 +B1 11.5 -18 +B2 23 -45 +B3 46 -45 +B4 57.5 -45 +B5 69 -54 +B6 80.5 -54 +B7 92 -54 +B8 103.5 -54 +B9 115 -54 +B10 103.5 -36 +B11 92 -36 +B12 80.5 -36 +B13 69 -36 +B14 92 -18 +B15 80.5 -18 +B16 69 -18 +B17 57.5 -22.5 +B18 46 -22.5 +B19 34.5 -30 +B20 23 0 +B21 34.5 0 +B22 46 0 +B23 57.5 0 +B24 69 0 +B25 80.5 0 +B26 92 0 +B27 92 18 +B28 80.5 18 +B29 69 18 +B30 57.5 22.5 +B31 46 22.5 +B32 34.5 30 +C1 11.5 54 +C2 23 45 +C3 46 45 +C4 57.5 45 +C5 69 36 +C6 80.5 36 +C7 92 36 +C8 92 54 +C9 80.5 54 +C10 69 54 +C11 34.5 60 +C12 46 67.5 +C13 57.5 67.5 +C14 69 72 +C15 80.5 72 +C16 92 72 +C17 92 90 +C18 80.5 90 +C19 69 90 +C20 57.5 90 +C21 46 90 +C22 34.5 90 +C23 23 90 +C24 -34.5 -60 +C25 -46 -67.5 +C26 -57.5 -67.5 +C27 -69 -72 +C28 -80.5 -72 +C29 -92 -72 +C30 -92 -54 +C31 -80.5 -54 +C32 -69 -54 +D1 -11.5 -54 +D2 -23 -45 +D3 -46 -45 +D4 -57.5 -45 +D5 -69 -36 +D6 -80.5 -36 +D7 -92 -36 +D8 -92 -18 +D9 -80.5 -18 +D10 -69 -18 +D11 -57.5 -22.5 +D12 -46 -22.5 +D13 -34.5 -30 +D14 -23 0 +D15 -11.5 18 +D16 -23 45 +D17 -34.5 30 +D18 -34.5 0 +D19 -46 0 +D20 -57.5 0 +D21 -69 0 +D22 -80.5 0 +D23 -92 0 +D24 -92 18 +D25 -80.5 18 +D26 -69 18 +D27 -57.5 22.5 +D28 -46 22.5 +D29 -69 36 +D30 -80.5 36 +D31 -92 36 +D32 -103.5 36 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/biosemi16.txt b/mne/channels/data/montages/biosemi16.txt new file mode 100644 index 0000000..d8a6769 --- /dev/null +++ b/mne/channels/data/montages/biosemi16.txt @@ -0,0 +1,20 @@ +Site Theta Phi +Fp1 -92 -72 +Fp2 92 72 +F4 60 51 +Fz 46 90 +F3 -60 -51 +T7 -92 0 +C3 -46 0 +Cz 0 0 +C4 46 0 +T8 92 0 +P4 60 -51 +Pz 46 -90 +P3 -60 51 +O1 -92 72 +Oz 92 -90 +O2 92 -72 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/biosemi160.txt b/mne/channels/data/montages/biosemi160.txt new file mode 100644 index 0000000..04fefc7 --- /dev/null +++ b/mne/channels/data/montages/biosemi160.txt @@ -0,0 +1,164 @@ +Site Theta Phi +A1 0 0 +A2 11.5 -90 +A3 23 -90 +A4 34.5 -90 +A5 -46 72 +A6 -46 54 +A7 -57.5 54 +A8 -69 60 +A9 -80.5 60 +A10 -92 60 +A11 -103.5 60 +A12 -115 60 +A13 -115 75 +A14 -103.5 75 +A15 -92 75 +A16 -80.5 75 +A17 -69 75 +A18 -57.5 72 +A19 46 -90 +A20 57.5 -90 +A21 69 -90 +A22 80.5 -90 +A23 92 -90 +A24 103.5 -90 +A25 115 -90 +A26 115 -75 +A27 103.5 -75 +A28 92 -75 +A29 80.5 -75 +A30 69 -75 +A31 57.5 -72 +A32 46 -72 +B1 11.5 -18 +B2 23 -60 +B3 46 -54 +B4 57.5 -54 +B5 69 -60 +B6 80.5 -60 +B7 92 -60 +B8 103.5 -60 +B9 115 -60 +B10 115 -45 +B11 103.5 -45 +B12 92 -45 +B13 80.5 -45 +B14 69 -45 +B15 69 -30 +B16 80.5 -30 +B17 92 -30 +B18 103.5 -30 +B19 92 -15 +B20 80.5 -15 +B21 69 -15 +B22 57.5 -36 +B23 46 -36 +B24 34.5 -45 +B25 23 -30 +B26 34.5 -22.5 +B27 46 -18 +B28 57.5 -18 +B29 57.5 0 +B30 69 0 +B31 80.5 0 +B32 92 0 +C1 11.5 54 +C2 23 30 +C3 23 0 +C4 34.5 0 +C5 34.5 22.5 +C6 46 18 +C7 46 0 +C8 57.5 18 +C9 69 15 +C10 80.5 15 +C11 92 15 +C12 92 30 +C13 80.5 30 +C14 69 30 +C15 69 45 +C16 80.5 45 +C17 92 45 +C18 92 60 +C19 80.5 60 +C20 69 60 +C21 57.5 54 +C22 57.5 36 +C23 46 36 +C24 34.5 45 +C25 23 60 +C26 34.5 67.5 +C27 46 54 +C28 46 72 +C29 57.5 72 +C30 69 75 +C31 80.5 75 +C32 92 75 +D1 -11.5 -54 +D2 23 90 +D3 34.5 90 +D4 46 90 +D5 57.5 90 +D6 69 90 +D7 80.5 90 +D8 92 90 +D9 -92 -75 +D10 -80.5 -75 +D11 -69 -75 +D12 -57.5 -72 +D13 -46 -72 +D14 -34.5 -67.5 +D15 -23 -60 +D16 -23 -30 +D17 -34.5 -45 +D18 -46 -54 +D19 -57.5 -54 +D20 -69 -60 +D21 -80.5 -60 +D22 -92 -60 +D23 -92 -45 +D24 -80.5 -45 +D25 -69 -45 +D26 -57.5 -36 +D27 -46 -36 +D28 -34.5 -22.5 +D29 -46 -18 +D30 -69 -30 +D31 -80.5 -30 +D32 -92 -30 +E1 -11.5 18 +E2 -23 0 +E3 -34.5 0 +E4 -46 0 +E5 -57.5 -18 +E6 -69 -15 +E7 -80.5 -15 +E8 -92 -15 +E9 -92 0 +E10 -80.5 0 +E11 -69 0 +E12 -57.5 0 +E13 -57.5 18 +E14 -69 15 +E15 -80.5 15 +E16 -92 15 +E17 -103.5 30 +E18 -92 30 +E19 -80.5 30 +E20 -69 30 +E21 -46 18 +E22 -34.5 22.5 +E23 -23 30 +E24 -23 60 +E25 -34.5 45 +E26 -46 36 +E27 -57.5 36 +E28 -69 45 +E29 -80.5 45 +E30 -92 45 +E31 -103.5 45 +E32 -115 45 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/biosemi256.txt b/mne/channels/data/montages/biosemi256.txt new file mode 100644 index 0000000..50085a2 --- /dev/null +++ b/mne/channels/data/montages/biosemi256.txt @@ -0,0 +1,260 @@ +Site Theta Phi +A1 0 0 +A2 9.2 -90 +A3 18.4 -90 +A4 27.6 -90 +A5 36.8 -90 +A6 46 -90 +A7 -46 75 +A8 -55.2 75 +A9 -64.4 78 +A10 -73.6 78 +A11 -82.8 78.75 +A12 -92 78.75 +A13 -101.2 78.75 +A14 -110.4 78 +A15 -119.6 78 +A16 119.6 -90 +A17 110.4 -90 +A18 101.2 -90 +A19 92 -90 +A20 82.8 -90 +A21 73.6 -90 +A22 64.4 -90 +A23 55.2 -90 +A24 46 -75 +A25 55.2 -75 +A26 64.4 -78 +A27 73.6 -78 +A28 82.8 -78.75 +A29 92 -78.75 +A30 101.2 -78.75 +A31 110.4 -78 +A32 119.6 -78 +B1 18.4 -54 +B2 27.6 -66 +B3 36.8 -54 +B4 46 -60 +B5 55.2 -60 +B6 64.4 -66 +B7 73.6 -66 +B8 82.8 -67.5 +B9 92 -67.5 +B10 101.2 -67.5 +B11 110.4 -66 +B12 119.6 -66 +B13 110.4 -54 +B14 101.2 -56.25 +B15 92 -56.25 +B16 82.8 -56.25 +B17 73.6 -54 +B18 64.4 -54 +B19 55.2 -45 +B20 46 -45 +B21 27.6 -42 +B22 36.8 -36 +B23 46 -30 +B24 55.2 -30 +B25 64.4 -42 +B26 73.6 -42 +B27 82.8 -45 +B28 92 -45 +B29 101.2 -45 +B30 110.4 -42 +B31 110.4 -30 +B32 101.2 -33.75 +C1 9.2 -18 +C2 18.4 -18 +C3 27.6 -18 +C4 36.8 -18 +C5 46 -15 +C6 55.2 -15 +C7 64.4 -18 +C8 64.4 -30 +C9 73.6 -30 +C10 82.8 -33.75 +C11 92 -33.75 +C12 101.2 -22.5 +C13 92 -22.5 +C14 82.8 -22.5 +C15 73.6 -18 +C16 82.8 -11.25 +C17 92 -11.25 +C18 92 0 +C19 82.8 0 +C20 73.6 -6 +C21 64.4 -6 +C22 55.2 0 +C23 46 0 +C24 36.8 0 +C25 27.6 6 +C26 36.8 18 +C27 46 15 +C28 55.2 15 +C29 64.4 6 +C30 73.6 6 +C31 82.8 11.25 +C32 92 11.25 +D1 9.2 54 +D2 18.4 18 +D3 27.6 30 +D4 36.8 36 +D5 46 30 +D6 64.4 18 +D7 73.6 18 +D8 82.8 22.5 +D9 92 22.5 +D10 101.2 22.5 +D11 101.2 33.75 +D12 92 33.75 +D13 82.8 33.75 +D14 73.6 30 +D15 64.4 30 +D16 55.2 30 +D17 46 45 +D18 55.2 45 +D19 64.4 42 +D20 73.6 42 +D21 82.8 45 +D22 92 45 +D23 101.2 45 +D24 92 56.25 +D25 82.8 56.25 +D26 73.6 54 +D27 64.4 54 +D28 55.2 60 +D29 64.4 66 +D30 73.6 66 +D31 82.8 67.5 +D32 92 67.5 +E1 18.4 90 +E2 18.4 54 +E3 27.6 54 +E4 36.8 54 +E5 46 60 +E6 46 75 +E7 55.2 75 +E8 64.4 78 +E9 73.6 78 +E10 82.8 78.75 +E11 92 78.75 +E12 92 90 +E13 82.8 90 +E14 73.6 90 +E15 64.4 90 +E16 55.2 90 +E17 46 90 +E18 36.8 90 +E19 36.8 72 +E20 27.6 78 +E21 -27.6 -78 +E22 -36.8 -72 +E23 -46 -75 +E24 -55.2 -75 +E25 -64.4 -78 +E26 -73.6 -78 +E27 -82.8 -78.75 +E28 -92 -78.75 +E29 -92 -67.5 +E30 -82.8 -67.5 +E31 -73.6 -66 +E32 -64.4 -66 +F1 -9.2 -54 +F2 -18.4 -54 +F3 -27.6 -54 +F4 -36.8 -54 +F5 -46 -60 +F6 -55.2 -60 +F7 -64.4 -54 +F8 -73.6 -54 +F9 -82.8 -56.25 +F10 -92 -56.25 +F11 -101.2 -45 +F12 -92 -45 +F13 -82.8 -45 +F14 -73.6 -42 +F15 -64.4 -42 +F16 -55.2 -45 +F17 -46 -45 +F18 -36.8 -36 +F19 -27.6 -30 +F20 -18.4 -18 +F21 -27.6 -6 +F22 -36.8 -18 +F23 -46 -30 +F24 -55.2 -30 +F25 -64.4 -30 +F26 -73.6 -30 +F27 -82.8 -33.75 +F28 -92 -33.75 +F29 -101.2 -33.75 +F30 -101.2 -22.5 +F31 -92 -22.5 +F32 -82.8 -22.5 +G1 -9.2 18 +G2 -18.4 18 +G3 -27.6 18 +G4 -36.8 0 +G5 -46 -15 +G6 -55.2 -15 +G7 -64.4 -18 +G8 -73.6 -18 +G9 -82.8 -11.25 +G10 -92 -11.25 +G11 -92 0 +G12 -82.8 0 +G13 -73.6 -6 +G14 -64.4 -6 +G15 -55.2 0 +G16 -46 0 +G17 -55.2 15 +G18 -64.4 6 +G19 -73.6 6 +G20 -82.8 11.25 +G21 -92 11.25 +G22 -101.2 22.5 +G23 -92 22.5 +G24 -82.8 22.5 +G25 -73.6 18 +G26 -64.4 18 +G27 -64.4 30 +G28 -73.6 30 +G29 -82.8 33.75 +G30 -92 33.75 +G31 -101.2 33.75 +G32 -110.4 30 +H1 -18.4 54 +H2 -27.6 42 +H3 -36.8 36 +H4 -36.8 18 +H5 -46 15 +H6 -46 30 +H7 -55.2 30 +H8 -64.4 42 +H9 -73.6 42 +H10 -82.8 45 +H11 -92 45 +H12 -101.2 45 +H13 -110.4 42 +H14 -110.4 54 +H15 -101.2 56.25 +H16 -92 56.25 +H17 -82.8 56.25 +H18 -73.6 54 +H19 -64.4 54 +H20 -55.2 45 +H21 -46 45 +H22 -36.8 54 +H23 -27.6 66 +H24 -46 60 +H25 -55.2 60 +H26 -64.4 66 +H27 -73.6 66 +H28 -82.8 67.5 +H29 -92 67.5 +H30 -101.2 67.5 +H31 -110.4 66 +H32 -119.6 66 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/biosemi32.txt b/mne/channels/data/montages/biosemi32.txt new file mode 100644 index 0000000..d2e0a14 --- /dev/null +++ b/mne/channels/data/montages/biosemi32.txt @@ -0,0 +1,36 @@ +Site Theta Phi +Fp1 -92 -72 +AF3 -74 -65 +F7 -92 -36 +F3 -60 -51 +FC1 -32 -45 +FC5 -72 -21 +T7 -92 0 +C3 -46 0 +CP1 -32 45 +CP5 -72 21 +P7 -92 36 +P3 -60 51 +Pz 46 -90 +PO3 -74 65 +O1 -92 72 +Oz 92 -90 +O2 92 -72 +PO4 74 -65 +P4 60 -51 +P8 92 -36 +CP6 72 -21 +CP2 32 -45 +C4 46 0 +T8 92 0 +FC6 72 21 +FC2 32 45 +F4 60 51 +F8 92 36 +AF4 74 65 +Fp2 92 72 +Fz 46 90 +Cz 0 0 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/biosemi64.txt b/mne/channels/data/montages/biosemi64.txt new file mode 100644 index 0000000..4071cfb --- /dev/null +++ b/mne/channels/data/montages/biosemi64.txt @@ -0,0 +1,68 @@ +Site Theta Phi +Fp1 -92 -72 +AF7 -92 -54 +AF3 -74 -65 +F1 -50 -68 +F3 -60 -51 +F5 -75 -41 +F7 -92 -36 +FT7 -92 -18 +FC5 -72 -21 +FC3 -50 -28 +FC1 -32 -45 +C1 -23 0 +C3 -46 0 +C5 -69 0 +T7 -92 0 +TP7 -92 18 +CP5 -72 21 +CP3 -50 28 +CP1 -32 45 +P1 -50 68 +P3 -60 51 +P5 -75 41 +P7 -92 36 +P9 -115 36 +PO7 -92 54 +PO3 -74 65 +O1 -92 72 +Iz 115 -90 +Oz 92 -90 +POz 69 -90 +Pz 46 -90 +CPz 23 -90 +Fpz 92 90 +Fp2 92 72 +AF8 92 54 +AF4 74 65 +AFz 69 90 +Fz 46 90 +F2 50 68 +F4 60 51 +F6 75 41 +F8 92 36 +FT8 92 18 +FC6 72 21 +FC4 50 28 +FC2 32 45 +FCz 23 90 +Cz 0 0 +C2 23 0 +C4 46 0 +C6 69 0 +T8 92 0 +TP8 92 -18 +CP6 72 -21 +CP4 50 -28 +CP2 32 -45 +P2 50 -68 +P4 60 -51 +P6 75 -41 +P8 92 -36 +P10 115 -36 +PO8 92 -54 +PO4 74 -65 +O2 92 -72 +Nz 115 90 +LPA -115 0 +RPA 115 0 diff --git a/mne/channels/data/montages/brainproducts-RNP-BA-128.txt b/mne/channels/data/montages/brainproducts-RNP-BA-128.txt new file mode 100644 index 0000000..907eb5a --- /dev/null +++ b/mne/channels/data/montages/brainproducts-RNP-BA-128.txt @@ -0,0 +1,131 @@ +Name Theta Phi +Fp1 -90 -72 +Fz 45 90 +F3 -60 -51 +F7 -90 -36 +F9 -113 -36 +FC5 -69 -21 +FC1 -31 -46 +C3 -45 0 +T7 -90 0 +CP5 -69 21 +CP1 -31 46 +Pz 45 -90 +P3 -60 51 +P7 -90 36 +P9 -113 36 +O1 -90 72 +Oz 90 -90 +O2 90 -72 +P10 113 -36 +P8 90 -36 +P4 60 -51 +CP2 31 -46 +CP6 69 -21 +T8 90 0 +C4 45 0 +Cz 0 0 +FC2 31 46 +FC6 69 21 +F10 113 36 +F8 90 36 +F4 60 51 +Fp2 90 72 +AF7 -90 -54 +AF3 -74 -68 +AFz 67 90 +F1 -49 -68 +F5 -74 -41 +FT7 -90 -18 +FC3 -49 -29 +C1 -23 0 +C5 -68 0 +TP7 -90 18 +CP3 -49 29 +P1 -49 68 +P5 -74 41 +PO7 -90 54 +PO3 -74 68 +Iz 112 -90 +POz 67 -90 +PO4 74 -68 +PO8 90 -54 +P6 74 -41 +P2 49 -68 +CPz 22 -90 +CP4 49 -29 +TP8 90 -18 +C6 68 0 +C2 23 0 +FC4 49 29 +FT8 90 18 +F6 74 41 +F2 49 68 +AF4 74 68 +AF8 90 54 +AFF3h -62 -67 +FFC1h -35 -73 +FFC5h -62 -35 +FT9 -113 -18 +FTT7h -79 -10 +FCC3h -35 -19 +CCP1h -16 45 +CCP5h -57 12 +TP9 -113 18 +TPP7h -81 29 +CPP3h -46 48 +PPO3h -62 67 +PPO9h -101 45 +POO1 -79 82 +PO9 -113 54 +I1 -112 72 +I2 112 -72 +PO10 113 -54 +POO2 79 -82 +PPO10h 101 -45 +PPO4h 62 -67 +CPP4h 46 -48 +TPP8h 81 -29 +TP10 113 -18 +CCP6h 57 -12 +CCP2h 16 -45 +FCC4h 35 19 +FTT8h 79 10 +FT10 113 18 +FFC6h 62 35 +FFC2h 35 73 +AFF4h 62 67 +AFp1 -79 -82 +AFF1h -57 -82 +AFF5h -72 -55 +FFT7h -81 -29 +FFC3h -46 -48 +FCC1h -16 -45 +FCC5h -57 -12 +TTP7h -79 10 +CCP3h -35 19 +CPP1h -35 73 +CPP5h -62 35 +TPP9h -101 27 +PPO5h -72 55 +PPO1h -57 82 +POO9h -101 63 +OI1h -101 81 +OI2h 101 -81 +POO10h 101 -63 +PPO2h 57 -82 +PPO6h 72 -55 +TPP10h 101 -27 +CPP6h 62 -35 +CPP2h 35 -73 +CCP4h 35 -19 +TTP8h 79 -10 +FCC6h 57 12 +FCC2h 16 45 +FFC4h 46 48 +FFT8h 81 29 +AFF6h 72 55 +AFF2h 57 82 +AFp2 79 82 +FCz 23 90 +Fpz 90 90 diff --git a/mne/channels/data/montages/easycap-M1.txt b/mne/channels/data/montages/easycap-M1.txt new file mode 100644 index 0000000..271dc0f --- /dev/null +++ b/mne/channels/data/montages/easycap-M1.txt @@ -0,0 +1,75 @@ +Site Theta Phi +Fp1 -92 -72 +Fp2 92 72 +F3 -60 -51 +F4 60 51 +C3 -46 0 +C4 46 0 +P3 -60 51 +P4 60 -51 +O1 -92 72 +O2 92 -72 +F7 -92 -36 +F8 92 36 +T7 -92 0 +T8 92 0 +P7 -92 36 +P8 92 -36 +Fz 46 90 +Cz 0 0 +Pz 46 -90 +F1 -50 -68 +F2 50 68 +FC1 -32 -45 +FC2 32 45 +C1 -23 0 +C2 23 0 +CP1 -32 45 +CP2 32 -45 +P1 -50 68 +P2 50 -68 +AF3 -74 -65 +AF4 74 65 +FC3 -53 -33 +FC4 53 33 +CP3 -52 33 +CP4 52 -33 +PO3 -74 65 +PO4 74 -65 +F5 -75 -41 +F6 75 41 +FC5 -72 -21 +FC6 72 21 +C5 -69 0 +C6 69 0 +CP5 -72 21 +CP6 72 -21 +P5 -75 41 +P6 75 -41 +AF7 -92 -54 +AF8 92 54 +FT7 -92 -18 +FT8 92 18 +TP7 -92 18 +TP8 92 -18 +PO7 -92 54 +PO8 92 -54 +F9 -115 -36 +F10 115 36 +FT9 -115 -18 +FT10 115 18 +TP9 -115 18 +TP10 115 -18 +P9 -115 36 +P10 115 -36 +PO9 -115 54 +PO10 115 -54 +O9 -115 72 +O10 115 -72 +Fpz 92 90 +AFz 69 90 +FCz 23 90 +CPz 23 -90 +POz 69 -90 +Oz 92 -90 +Iz 115 -90 diff --git a/mne/channels/data/montages/easycap-M10.txt b/mne/channels/data/montages/easycap-M10.txt new file mode 100644 index 0000000..7019fc6 --- /dev/null +++ b/mne/channels/data/montages/easycap-M10.txt @@ -0,0 +1,62 @@ +Site Theta Phi +1 0 0 +2 23 90 +3 23 30 +4 23 -30 +5 23 -90 +6 -23 30 +7 -23 -30 +8 46 90 +9 46 66 +10 46 33 +11 46 0 +12 46 -33 +13 46 -66 +14 46 -90 +15 -46 66 +16 -46 33 +17 -46 0 +18 -46 -33 +19 -46 -66 +20 69 90 +21 69 66 +22 69 42 +23 69 18 +24 69 -6 +25 69 -30 +26 69 -54 +27 69 -78 +28 -69 78 +29 -69 54 +30 -69 30 +31 -69 6 +32 -69 -18 +33 -69 -42 +34 -69 -66 +35 92 90 +36 92 68 +37 92 45 +38 92 22 +39 92 0 +40 92 -22 +41 92 -45 +42 92 -68 +43 92 -90 +44 -92 68 +45 -92 45 +46 -92 22 +47 -92 0 +48 -92 -22 +49 -92 -45 +50 -92 -68 +51 115 35 +52 115 10 +53 115 -15 +54 115 -40 +55 115 -65 +56 115 -90 +57 -115 65 +58 -115 40 +59 -115 15 +60 -115 -10 +61 -115 -35 diff --git a/mne/channels/data/montages/easycap-M43.txt b/mne/channels/data/montages/easycap-M43.txt new file mode 100644 index 0000000..47bbad7 --- /dev/null +++ b/mne/channels/data/montages/easycap-M43.txt @@ -0,0 +1,65 @@ +Site Theta Phi +1 23 90 +2 23 30 +3 23 -30 +4 23 -90 +5 -23 30 +6 -23 -30 +7 46 74 +8 46 41 +9 46 8 +10 46 -25 +11 46 -57 +12 46 -90 +13 -46 57 +14 -46 25 +15 -46 -8 +16 -46 -41 +17 -46 -74 +18 69 76 +19 69 49 +20 69 21 +21 69 -7 +22 69 -35 +23 69 -62 +24 69 -90 +25 -69 62 +26 -69 35 +27 -69 7 +28 -69 -21 +29 -69 -49 +30 -69 -76 +31 92 90 +32 92 62 +33 92 34 +34 92 6 +35 92 -21 +36 92 -49 +37 92 -76 +38 -92 76 +39 -92 49 +40 -92 21 +41 -92 -6 +42 -92 -34 +43 -92 -62 +44 115 35 +45 115 10 +46 115 -15 +47 115 -40 +48 115 -65 +49 115 -90 +50 -115 65 +51 -115 40 +52 -115 15 +53 -115 -10 +54 -115 -35 +55 138 23 +56 138 -15 +57 138 -40 +58 138 -65 +59 138 -90 +60 -138 65 +61 -138 40 +62 -138 15 +63 -138 -23 +Ref 0 0 diff --git a/mne/channels/data/montages/mgh60.elc b/mne/channels/data/montages/mgh60.elc new file mode 100644 index 0000000..854ab37 --- /dev/null +++ b/mne/channels/data/montages/mgh60.elc @@ -0,0 +1,132 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 63 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-54.8397 68.5722 -10.5900 +-33.7007 76.8371 21.2270 +35.7123 77.7259 21.9560 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-34.0619 26.0111 79.9870 +34.7841 26.4379 78.8080 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-85.8941 -15.8287 -48.2830 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +85.5599 -16.3613 -48.2710 +-85.6192 -46.5147 -45.7070 +-84.8302 -46.0217 -7.0560 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +85.5488 -45.5453 -7.1300 +86.1618 -47.0353 -45.8690 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +-54.8404 -97.5279 2.7920 +-36.5114 -100.8529 37.1670 +36.7816 -100.8491 36.3970 +55.6666 -97.6251 2.7300 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +0.0045 -118.5650 -23.0780 +Labels +LPA +RPA +Nz +EEG001 +EEG002 +EEG003 +EEG004 +EEG005 +EEG006 +EEG007 +EEG008 +EEG009 +EEG010 +EEG011 +EEG012 +EEG013 +EEG014 +EEG015 +EEG016 +EEG017 +EEG018 +EEG019 +EEG020 +EEG021 +EEG022 +EEG023 +EEG024 +EEG025 +EEG026 +EEG027 +EEG028 +EEG029 +EEG030 +EEG031 +EEG032 +EEG033 +EEG034 +EEG035 +EEG036 +EEG037 +EEG038 +EEG039 +EEG040 +EEG041 +EEG042 +EEG043 +EEG044 +EEG045 +EEG046 +EEG047 +EEG048 +EEG049 +EEG050 +EEG051 +EEG052 +EEG053 +EEG054 +EEG055 +EEG056 +EEG057 +EEG058 +EEG059 +EEG060 \ No newline at end of file diff --git a/mne/channels/data/montages/mgh70.elc b/mne/channels/data/montages/mgh70.elc new file mode 100644 index 0000000..aba48a4 --- /dev/null +++ b/mne/channels/data/montages/mgh70.elc @@ -0,0 +1,152 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 73 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-54.8397 68.5722 -10.5900 +-33.7007 76.8371 21.2270 +0.2313 80.7710 35.4170 +35.7123 77.7259 21.9560 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-85.8941 -15.8287 -48.2830 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +85.5599 -16.3613 -48.2710 +-85.6192 -46.5147 -45.7070 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +86.1618 -47.0353 -45.8690 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.8404 -97.5279 2.7920 +-36.5114 -100.8529 37.1670 +0.2156 -102.1780 50.6080 +36.7816 -100.8491 36.3970 +55.6666 -97.6251 2.7300 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +0.0045 -118.5650 -23.0780 +Labels +LPA +RPA +Nz +EEG001 +EEG002 +EEG003 +EEG004 +EEG005 +EEG006 +EEG007 +EEG008 +EEG009 +EEG010 +EEG011 +EEG012 +EEG013 +EEG014 +EEG015 +EEG016 +EEG017 +EEG018 +EEG019 +EEG020 +EEG021 +EEG022 +EEG023 +EEG024 +EEG025 +EEG026 +EEG027 +EEG028 +EEG029 +EEG030 +EEG031 +EEG032 +EEG033 +EEG034 +EEG035 +EEG036 +EEG037 +EEG038 +EEG039 +EEG040 +EEG041 +EEG042 +EEG043 +EEG044 +EEG045 +EEG046 +EEG047 +EEG048 +EEG049 +EEG050 +EEG051 +EEG052 +EEG053 +EEG054 +EEG055 +EEG056 +EEG057 +EEG058 +EEG059 +EEG060 +EEG065 +EEG066 +EEG067 +EEG068 +EEG069 +EEG070 +EEG071 +EEG072 +EEG073 +EEG074 \ No newline at end of file diff --git a/mne/channels/data/montages/standard_1005.elc b/mne/channels/data/montages/standard_1005.elc new file mode 100644 index 0000000..4e69532 --- /dev/null +++ b/mne/channels/data/montages/standard_1005.elc @@ -0,0 +1,698 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 346 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-48.9708 64.0872 -47.6830 +-54.8397 68.5722 -10.5900 +-45.4307 72.8622 5.9780 +-33.7007 76.8371 21.2270 +-18.4717 79.9041 32.7520 +0.2313 80.7710 35.4170 +19.8203 80.3019 32.7640 +35.7123 77.7259 21.9560 +46.5843 73.8078 6.0340 +55.7433 69.6568 -10.7550 +50.4352 63.8698 -48.0050 +-70.1019 41.6523 -49.9520 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +72.1141 42.0667 -50.4520 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-85.8941 -15.8287 -48.2830 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +85.5599 -16.3613 -48.2710 +-85.6192 -46.5147 -45.7070 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +86.1618 -47.0353 -45.8690 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.9104 -98.0448 -35.4650 +-54.8404 -97.5279 2.7920 +-48.4244 -99.3408 21.5990 +-36.5114 -100.8529 37.1670 +-18.9724 -101.7680 46.5360 +0.2156 -102.1780 50.6080 +19.8776 -101.7930 46.3930 +36.7816 -100.8491 36.3970 +49.8196 -99.4461 21.7270 +55.6666 -97.6251 2.7300 +54.9876 -98.0911 -35.5410 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +-29.8184 -114.5700 -29.2160 +0.0045 -118.5650 -23.0780 +29.7416 -114.2600 -29.2560 +-43.2897 75.8552 -28.2440 +-38.5517 79.9532 -4.9950 +-27.9857 82.4591 2.7020 +-17.1947 84.8491 10.0270 +-5.9317 86.8780 16.2000 +7.1053 87.0740 16.4690 +18.9233 85.5969 11.4430 +28.6443 82.9759 2.8280 +39.3203 80.6868 -4.7250 +43.8223 76.5418 -28.3070 +-63.2538 53.8573 -30.3160 +-61.3508 58.7992 0.8970 +-50.7998 64.0412 23.0890 +-34.3157 68.3931 41.1880 +-11.4357 70.7561 50.3480 +13.4793 71.2010 51.1750 +36.1833 69.1509 41.2540 +52.3972 65.0708 22.8620 +62.9152 60.0448 0.6300 +64.3342 54.5998 -30.4440 +-79.0669 28.0813 -31.2530 +-74.4999 31.3003 4.8460 +-65.2379 36.4282 36.1440 +-44.4098 40.7622 61.6900 +-15.4238 43.6600 77.6820 +17.5922 44.0540 77.7880 +45.8532 41.6228 60.6470 +67.1281 37.7998 35.2960 +78.0531 32.9817 4.4830 +80.0971 28.5137 -31.3380 +-84.1250 -1.8467 -29.7940 +-82.3550 0.8263 8.5790 +-74.6920 4.3033 45.3070 +-51.0509 7.1772 74.3770 +-18.2190 9.0941 92.5290 +18.7870 9.2479 91.5620 +51.8851 7.7978 73.5070 +77.0020 5.3357 45.3500 +83.8880 1.9457 8.5010 +84.1230 -1.8083 -29.6380 +-86.9731 -32.2157 -27.8480 +-85.5651 -30.6287 11.1530 +-76.4071 -29.7307 49.2170 +-52.9281 -28.9058 80.3040 +-18.3541 -28.3219 98.2200 +20.2199 -28.1481 98.1720 +55.1139 -28.3862 80.4740 +79.0059 -28.9863 49.6280 +85.9999 -29.8203 11.2480 +88.6249 -32.2723 -28.0000 +-78.1602 -60.7567 -23.8240 +-76.6802 -60.8317 12.8800 +-68.1152 -62.9747 47.2520 +-46.9142 -64.6908 75.2960 +-15.8202 -65.5999 91.1640 +19.4198 -65.5950 92.4050 +50.6738 -64.4822 76.1300 +71.0958 -62.6243 47.3280 +78.5198 -60.4323 12.9020 +78.9027 -60.9553 -23.8050 +-64.5973 -87.6558 -19.0140 +-62.9593 -87.5028 12.9520 +-54.0103 -89.8988 37.3320 +-35.8874 -91.6669 55.5040 +-12.0474 -92.6069 65.5080 +13.9226 -92.6940 66.9580 +37.7986 -91.6291 56.7330 +54.6087 -89.6402 37.0350 +63.1117 -87.2282 12.8560 +65.0137 -87.8062 -18.9520 +-42.8624 -108.0730 -13.1510 +-40.1204 -107.1290 12.0610 +-31.9514 -108.2520 23.0470 +-19.8624 -108.9420 29.7600 +-6.9194 -109.2600 32.7100 +6.8036 -109.1630 31.5820 +20.2936 -108.9140 28.9440 +32.1756 -108.2520 22.2550 +41.0976 -107.2450 12.1380 +43.8946 -109.1270 -13.1700 +-14.8504 -117.9870 -6.9200 +15.0946 -118.0180 -6.9330 +-14.8107 87.2351 -4.4770 +15.1623 88.0910 -4.5510 +-54.8298 66.4132 -29.7040 +-51.1757 70.8362 -1.7550 +-39.6407 74.8671 13.6780 +-27.2187 78.7091 28.3750 +-9.1977 80.6051 35.1330 +10.4823 80.8650 35.3590 +28.5803 79.3029 28.4700 +40.9403 75.7399 13.8600 +52.0293 71.8468 -1.9200 +55.7542 67.1698 -29.8240 +-71.5079 41.1193 -30.8540 +-68.5558 45.2843 3.0020 +-58.4878 50.6722 30.1920 +-39.9798 55.2601 52.6000 +-13.3838 57.9021 64.3320 +15.8342 58.4559 64.9920 +41.7942 56.2259 51.4990 +60.0522 52.0858 28.7080 +71.9592 47.1917 2.4750 +72.7981 41.8218 -31.0260 +-82.9559 13.3203 -30.8080 +-80.1139 16.3903 6.8500 +-71.2099 20.8203 41.3240 +-48.5119 24.5292 69.1360 +-17.3439 27.0241 86.9230 +18.4181 27.2709 86.4370 +49.5481 25.2378 68.4300 +73.2191 22.0067 41.2970 +81.5801 17.6837 6.5640 +83.3711 13.5477 -30.7490 +-85.1321 -17.0557 -28.7310 +-82.9461 -14.8827 10.0090 +-75.2941 -12.6397 47.9040 +-51.5811 -10.7548 78.0350 +-18.2790 -9.4319 97.3560 +19.6780 -9.3041 95.7060 +53.8059 -10.1442 77.7300 +78.1249 -11.7353 47.8400 +85.1369 -13.9063 9.8900 +86.0999 -17.0883 -28.7560 +-84.8102 -47.2457 -26.2200 +-82.7042 -46.2977 11.9740 +-73.3012 -46.7917 49.1090 +-51.0492 -47.1758 80.0160 +-17.3542 -47.3419 97.4100 +20.6798 -47.2321 98.0720 +53.9968 -46.8902 80.0770 +76.5498 -46.3733 49.1400 +85.1998 -45.8073 12.1020 +85.4428 -47.2213 -26.1760 +-72.1773 -74.6277 -21.5360 +-70.1133 -74.8677 12.9990 +-61.7283 -77.6238 43.0280 +-41.6733 -79.7528 66.7150 +-13.9613 -81.0029 81.0030 +17.2977 -80.9810 81.6410 +44.7477 -79.6111 67.6550 +63.6267 -77.3022 43.1190 +72.1037 -74.4993 13.0250 +73.2817 -75.0773 -21.5760 +-54.7754 -98.9768 -16.1930 +-51.9284 -98.4438 12.3040 +-43.3424 -100.1629 30.0090 +-28.0074 -101.3610 42.3790 +-9.5034 -102.0600 49.4180 +10.2356 -102.0290 48.9420 +28.6476 -101.3901 42.1380 +44.2206 -100.2191 29.8080 +52.8386 -98.5360 12.2500 +55.8596 -99.8940 -16.2080 +-14.8054 -115.1000 11.8290 +15.1456 -115.1910 11.8330 +-15.1584 -118.2420 -26.0480 +15.1286 -118.1510 -26.0810 +-36.1247 72.3801 -45.8520 +-43.5117 78.5802 -9.2400 +-33.2847 81.2071 -1.1400 +-22.3517 83.5621 6.0710 +-12.2417 86.1941 14.1880 +0.1703 87.3220 17.4420 +13.6223 86.7579 15.3020 +24.1013 84.3769 7.4330 +33.9133 81.8119 -1.0350 +43.9483 79.2958 -9.3000 +37.7123 72.1679 -46.1970 +-59.3398 52.6802 -48.7700 +-63.2618 55.9922 -11.1730 +-55.8198 61.3962 11.8840 +-43.3817 66.3672 32.8110 +-23.5817 69.9171 47.2930 +0.2763 71.2800 52.0920 +25.5583 70.5559 47.8270 +45.1522 67.2748 32.7310 +58.0002 62.5998 11.9000 +64.6732 57.2738 -11.4600 +60.6012 52.2668 -49.0380 +-78.4839 28.7703 -50.5220 +-76.6149 28.6533 -11.5080 +-71.5059 33.9263 20.9930 +-55.9399 38.7162 49.7880 +-30.6548 42.4151 71.0400 +0.3512 44.0740 79.1410 +32.6451 43.1009 70.7950 +57.5042 39.8518 48.8110 +74.2501 35.4997 20.3800 +79.0341 30.3437 -11.9970 +79.9201 28.9417 -50.9140 +-87.3620 -0.5147 -49.8370 +-82.6680 -0.9417 -10.2840 +-80.1330 2.5853 27.3120 +-64.1610 5.8313 60.8850 +-35.7490 8.3091 85.4590 +0.3911 9.5080 95.5600 +36.0700 8.6519 83.8320 +65.1640 6.6198 60.0520 +81.5440 3.6637 27.2010 +83.1680 0.1817 -10.3640 +85.3930 -0.9523 -49.5200 +-86.6321 -31.2377 -47.1780 +-85.9331 -31.0927 -8.4740 +-81.5431 -30.1727 30.2730 +-66.1281 -29.2957 65.8980 +-36.9301 -28.5699 91.7340 +0.3959 -28.1630 101.2690 +38.5399 -28.2251 90.9760 +68.8539 -28.6403 66.4100 +84.5529 -29.3783 30.8780 +85.9999 -30.2803 -8.4350 +86.7619 -31.7313 -47.2530 +-80.7152 -60.6457 -43.5940 +-78.5992 -59.7237 -4.7580 +-73.6642 -61.9227 30.3800 +-59.4112 -63.9248 62.6720 +-32.7283 -65.3199 85.9440 +0.3658 -65.7500 94.0580 +35.8918 -65.1381 85.9800 +62.2558 -63.6152 62.7190 +76.6708 -61.5483 30.5430 +79.3188 -59.3033 -4.8400 +81.5598 -61.2153 -43.8000 +-64.5703 -86.4318 -38.3240 +-64.5833 -86.2218 0.0330 +-58.7123 -88.7048 25.1930 +-46.1603 -90.8878 47.4460 +-24.6483 -92.2919 62.0760 +0.2727 -92.7580 67.3420 +26.4367 -92.2951 63.1990 +47.1437 -90.7122 47.6780 +60.8127 -88.5042 25.6620 +65.1517 -85.9432 -0.0090 +65.0377 -86.7182 -38.4480 +-43.1284 -107.5160 -32.3870 +-42.9764 -106.4930 5.7730 +-36.2344 -107.7160 17.7500 +-25.9844 -108.6160 26.5440 +-13.6644 -109.2660 32.8560 +0.1676 -109.2760 32.7900 +13.6506 -109.1060 30.9360 +26.6636 -108.6680 26.4150 +37.7006 -107.8400 18.0690 +43.6696 -106.5990 5.7260 +43.1766 -107.4440 -32.4630 +-29.3914 -114.5110 -10.0200 +0.0525 -119.3430 -3.9360 +29.5526 -113.6360 -10.0510 +-84.1611 -16.0187 -9.3460 +-72.4343 -73.4527 -2.4870 +85.0799 -15.0203 -9.4900 +73.0557 -73.0683 -2.5400 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +AF9 +AF7 +AF5 +AF3 +AF1 +AFz +AF2 +AF4 +AF6 +AF8 +AF10 +F9 +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +F10 +FT9 +FT7 +FC5 +FC3 +FC1 +FCz +FC2 +FC4 +FC6 +FT8 +FT10 +T9 +T7 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T8 +T10 +TP9 +TP7 +CP5 +CP3 +CP1 +CPz +CP2 +CP4 +CP6 +TP8 +TP10 +P9 +P7 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +P8 +P10 +PO9 +PO7 +PO5 +PO3 +PO1 +POz +PO2 +PO4 +PO6 +PO8 +PO10 +O1 +Oz +O2 +I1 +Iz +I2 +AFp9h +AFp7h +AFp5h +AFp3h +AFp1h +AFp2h +AFp4h +AFp6h +AFp8h +AFp10h +AFF9h +AFF7h +AFF5h +AFF3h +AFF1h +AFF2h +AFF4h +AFF6h +AFF8h +AFF10h +FFT9h +FFT7h +FFC5h +FFC3h +FFC1h +FFC2h +FFC4h +FFC6h +FFT8h +FFT10h +FTT9h +FTT7h +FCC5h +FCC3h +FCC1h +FCC2h +FCC4h +FCC6h +FTT8h +FTT10h +TTP9h +TTP7h +CCP5h +CCP3h +CCP1h +CCP2h +CCP4h +CCP6h +TTP8h +TTP10h +TPP9h +TPP7h +CPP5h +CPP3h +CPP1h +CPP2h +CPP4h +CPP6h +TPP8h +TPP10h +PPO9h +PPO7h +PPO5h +PPO3h +PPO1h +PPO2h +PPO4h +PPO6h +PPO8h +PPO10h +POO9h +POO7h +POO5h +POO3h +POO1h +POO2h +POO4h +POO6h +POO8h +POO10h +OI1h +OI2h +Fp1h +Fp2h +AF9h +AF7h +AF5h +AF3h +AF1h +AF2h +AF4h +AF6h +AF8h +AF10h +F9h +F7h +F5h +F3h +F1h +F2h +F4h +F6h +F8h +F10h +FT9h +FT7h +FC5h +FC3h +FC1h +FC2h +FC4h +FC6h +FT8h +FT10h +T9h +T7h +C5h +C3h +C1h +C2h +C4h +C6h +T8h +T10h +TP9h +TP7h +CP5h +CP3h +CP1h +CP2h +CP4h +CP6h +TP8h +TP10h +P9h +P7h +P5h +P3h +P1h +P2h +P4h +P6h +P8h +P10h +PO9h +PO7h +PO5h +PO3h +PO1h +PO2h +PO4h +PO6h +PO8h +PO10h +O1h +O2h +I1h +I2h +AFp9 +AFp7 +AFp5 +AFp3 +AFp1 +AFpz +AFp2 +AFp4 +AFp6 +AFp8 +AFp10 +AFF9 +AFF7 +AFF5 +AFF3 +AFF1 +AFFz +AFF2 +AFF4 +AFF6 +AFF8 +AFF10 +FFT9 +FFT7 +FFC5 +FFC3 +FFC1 +FFCz +FFC2 +FFC4 +FFC6 +FFT8 +FFT10 +FTT9 +FTT7 +FCC5 +FCC3 +FCC1 +FCCz +FCC2 +FCC4 +FCC6 +FTT8 +FTT10 +TTP9 +TTP7 +CCP5 +CCP3 +CCP1 +CCPz +CCP2 +CCP4 +CCP6 +TTP8 +TTP10 +TPP9 +TPP7 +CPP5 +CPP3 +CPP1 +CPPz +CPP2 +CPP4 +CPP6 +TPP8 +TPP10 +PPO9 +PPO7 +PPO5 +PPO3 +PPO1 +PPOz +PPO2 +PPO4 +PPO6 +PPO8 +PPO10 +POO9 +POO7 +POO5 +POO3 +POO1 +POOz +POO2 +POO4 +POO6 +POO8 +POO10 +OI1 +OIz +OI2 +T3 +T5 +T4 +T6 +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/montages/standard_1020.elc b/mne/channels/data/montages/standard_1020.elc new file mode 100644 index 0000000..2f68b51 --- /dev/null +++ b/mne/channels/data/montages/standard_1020.elc @@ -0,0 +1,200 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 97 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-48.9708 64.0872 -47.6830 +-54.8397 68.5722 -10.5900 +-45.4307 72.8622 5.9780 +-33.7007 76.8371 21.2270 +-18.4717 79.9041 32.7520 +0.2313 80.7710 35.4170 +19.8203 80.3019 32.7640 +35.7123 77.7259 21.9560 +46.5843 73.8078 6.0340 +55.7433 69.6568 -10.7550 +50.4352 63.8698 -48.0050 +-70.1019 41.6523 -49.9520 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +72.1141 42.0667 -50.4520 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-85.8941 -15.8287 -48.2830 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +85.5599 -16.3613 -48.2710 +-85.6192 -46.5147 -45.7070 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +86.1618 -47.0353 -45.8690 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.9104 -98.0448 -35.4650 +-54.8404 -97.5279 2.7920 +-48.4244 -99.3408 21.5990 +-36.5114 -100.8529 37.1670 +-18.9724 -101.7680 46.5360 +0.2156 -102.1780 50.6080 +19.8776 -101.7930 46.3930 +36.7816 -100.8491 36.3970 +49.8196 -99.4461 21.7270 +55.6666 -97.6251 2.7300 +54.9876 -98.0911 -35.5410 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +-29.8184 -114.5700 -29.2160 +0.0045 -118.5650 -23.0780 +29.7416 -114.2600 -29.2560 +-84.1611 -16.0187 -9.3460 +-72.4343 -73.4527 -2.4870 +85.0799 -15.0203 -9.4900 +73.0557 -73.0683 -2.5400 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +AF9 +AF7 +AF5 +AF3 +AF1 +AFz +AF2 +AF4 +AF6 +AF8 +AF10 +F9 +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +F10 +FT9 +FT7 +FC5 +FC3 +FC1 +FCz +FC2 +FC4 +FC6 +FT8 +FT10 +T9 +T7 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T8 +T10 +TP9 +TP7 +CP5 +CP3 +CP1 +CPz +CP2 +CP4 +CP6 +TP8 +TP10 +P9 +P7 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +P8 +P10 +PO9 +PO7 +PO5 +PO3 +PO1 +POz +PO2 +PO4 +PO6 +PO8 +PO10 +O1 +Oz +O2 +O9 +Iz +O10 +T3 +T5 +T4 +T6 +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/montages/standard_alphabetic.elc b/mne/channels/data/montages/standard_alphabetic.elc new file mode 100644 index 0000000..55367e4 --- /dev/null +++ b/mne/channels/data/montages/standard_alphabetic.elc @@ -0,0 +1,142 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 68 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-54.8397 68.5722 -10.5900 +-33.7007 76.8371 21.2270 +0.2313 80.7710 35.4170 +35.7123 77.7259 21.9560 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +-54.8404 -97.5279 2.7920 +-36.5114 -100.8529 37.1670 +0.2156 -102.1780 50.6080 +36.7816 -100.8491 36.3970 +55.6666 -97.6251 2.7300 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +B3 +B1 +Bz +B2 +B4 +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +D7 +D5 +D3 +D1 +Dz +D2 +D4 +D6 +D8 +T3 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T4 +E7 +E5 +E3 +E1 +Ez +E2 +E4 +E6 +E8 +T5 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +T6 +H3 +H1 +Hz +H2 +H4 +O1 +Oz +O2 +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/montages/standard_postfixed.elc b/mne/channels/data/montages/standard_postfixed.elc new file mode 100644 index 0000000..3ed4d32 --- /dev/null +++ b/mne/channels/data/montages/standard_postfixed.elc @@ -0,0 +1,212 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 103 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-54.8397 68.5722 -10.5900 +-45.4307 72.8622 5.9780 +-33.7007 76.8371 21.2270 +-18.4717 79.9041 32.7520 +0.2313 80.7710 35.4170 +19.8203 80.3019 32.7640 +35.7123 77.7259 21.9560 +46.5843 73.8078 6.0340 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.9104 -98.0448 -35.4650 +-54.8404 -97.5279 2.7920 +-48.4244 -99.3408 21.5990 +-36.5114 -100.8529 37.1670 +-18.9724 -101.7680 46.5360 +0.2156 -102.1780 50.6080 +19.8776 -101.7930 46.3930 +36.7816 -100.8491 36.3970 +49.8196 -99.4461 21.7270 +55.6666 -97.6251 2.7300 +54.9876 -98.0911 -35.5410 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +-29.8184 -114.5700 -29.2160 +0.0045 -118.5650 -23.0780 +29.7416 -114.2600 -29.2560 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +F7a +F5a +F3a +F1a +Fza +F2a +F4a +F6a +F8a +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +F7p +F5p +F3p +F1p +Fzp +F2p +F4p +F6p +F8p +T1 +T3a +C5a +C3a +C1a +Cza +C2a +C4a +C6a +T4a +T2 +T3 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T4 +T3p +C5p +C3p +C1p +Czp +C2p +C4p +C6p +T4p +T5a +P5a +P3a +P1a +Pza +P2a +P4a +P6a +T6a +Cb1a +T5 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +T6 +Cb2a +Cb1 +O1a +P5p +P3p +P1p +Pzp +P2p +P4p +P6p +O2a +Cb2 +O1 +Oz +O2 +Cb1p +Iz +Cb2p +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/montages/standard_prefixed.elc b/mne/channels/data/montages/standard_prefixed.elc new file mode 100644 index 0000000..67563c0 --- /dev/null +++ b/mne/channels/data/montages/standard_prefixed.elc @@ -0,0 +1,160 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 77 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +55.7433 69.6568 -10.7550 +-33.7007 76.8371 21.2270 +0.2313 80.7710 35.4170 +35.7123 77.7259 21.9560 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-85.8941 -15.8287 -48.2830 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +85.5599 -16.3613 -48.2710 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.9104 -98.0448 -35.4650 +-54.8404 -97.5279 2.7920 +-36.5114 -100.8529 37.1670 +0.2156 -102.1780 50.6080 +36.7816 -100.8491 36.3970 +55.6666 -97.6251 2.7300 +54.9876 -98.0911 -35.5410 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +0.0045 -118.5650 -23.0780 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +aF3 +aF1 +aFz +aF2 +aF4 +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +iT1 +T1 +pF5 +pF3 +pF1 +pFz +pF2 +pF4 +pF6 +T2 +iT2 +iT3 +T3 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T4 +iT4 +T3A +pC5 +pC3 +pC1 +pCz +pC2 +pC4 +pC6 +T4A +iT5 +T5 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +T6 +iT6 +pO5 +pO3 +pO1 +pOz +pO2 +pO4 +pO6 +O1 +Oz +O2 +Iz +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/montages/standard_primed.elc b/mne/channels/data/montages/standard_primed.elc new file mode 100644 index 0000000..00ec918 --- /dev/null +++ b/mne/channels/data/montages/standard_primed.elc @@ -0,0 +1,212 @@ +# ASA electrode file +ReferenceLabel avg +UnitPosition mm +NumberPositions= 103 +Positions +-86.0761 -19.9897 -47.9860 +85.7939 -20.0093 -48.0310 +0.0083 86.8110 -39.9830 +-29.4367 83.9171 -6.9900 +0.1123 88.2470 -1.7130 +29.8723 84.8959 -7.0800 +-54.8397 68.5722 -10.5900 +-45.4307 72.8622 5.9780 +-33.7007 76.8371 21.2270 +-18.4717 79.9041 32.7520 +0.2313 80.7710 35.4170 +19.8203 80.3019 32.7640 +35.7123 77.7259 21.9560 +46.5843 73.8078 6.0340 +55.7433 69.6568 -10.7550 +-70.2629 42.4743 -11.4200 +-64.4658 48.0353 16.9210 +-50.2438 53.1112 42.1920 +-27.4958 56.9311 60.3420 +0.3122 58.5120 66.4620 +29.5142 57.6019 59.5400 +51.8362 54.3048 40.8140 +67.9142 49.8297 16.3670 +73.0431 44.4217 -12.0000 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +-84.0759 14.5673 -50.4290 +-80.7750 14.1203 -11.1350 +-77.2149 18.6433 24.4600 +-60.1819 22.7162 55.5440 +-34.0619 26.0111 79.9870 +0.3761 27.3900 88.6680 +34.7841 26.4379 78.8080 +62.2931 23.7228 55.6300 +79.5341 19.9357 24.4380 +81.8151 15.4167 -11.3300 +84.1131 14.3647 -50.5380 +-84.1611 -16.0187 -9.3460 +-80.2801 -13.7597 29.1600 +-65.3581 -11.6317 64.3580 +-36.1580 -9.9839 89.7520 +0.4009 -9.1670 100.2440 +37.6720 -9.6241 88.4120 +67.1179 -10.9003 63.5800 +83.4559 -12.7763 29.2080 +85.0799 -15.0203 -9.4900 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-84.8302 -46.0217 -7.0560 +-79.5922 -46.5507 30.9490 +-63.5562 -47.0088 65.6240 +-35.5131 -47.2919 91.3150 +0.3858 -47.3180 99.4320 +38.3838 -47.0731 90.6950 +66.6118 -46.6372 65.5800 +83.3218 -46.1013 31.2060 +85.5488 -45.5453 -7.1300 +-73.0093 -73.7657 -40.9980 +-72.4343 -73.4527 -2.4870 +-67.2723 -76.2907 28.3820 +-53.0073 -78.7878 55.9400 +-28.6203 -80.5249 75.4360 +0.3247 -81.1150 82.6150 +31.9197 -80.4871 76.7160 +55.6667 -78.5602 56.5610 +67.8877 -75.9043 28.0910 +73.0557 -73.0683 -2.5400 +73.8947 -74.3903 -41.2200 +-54.9104 -98.0448 -35.4650 +-54.8404 -97.5279 2.7920 +-48.4244 -99.3408 21.5990 +-36.5114 -100.8529 37.1670 +-18.9724 -101.7680 46.5360 +0.2156 -102.1780 50.6080 +19.8776 -101.7930 46.3930 +36.7816 -100.8491 36.3970 +49.8196 -99.4461 21.7270 +55.6666 -97.6251 2.7300 +54.9876 -98.0911 -35.5410 +-29.4134 -112.4490 8.8390 +0.1076 -114.8920 14.6570 +29.8426 -112.1560 8.8000 +-29.8184 -114.5700 -29.2160 +0.0045 -118.5650 -23.0780 +29.7416 -114.2600 -29.2560 +-86.0761 -44.9897 -67.9860 + 85.7939 -45.0093 -68.0310 +-86.0761 -24.9897 -67.9860 + 85.7939 -25.0093 -68.0310 +Labels +LPA +RPA +Nz +Fp1 +Fpz +Fp2 +F7' +F5' +F3' +F1' +Fz' +F2' +F4' +F6' +F8' +F7 +F5 +F3 +F1 +Fz +F2 +F4 +F6 +F8 +F7'' +F5'' +F3'' +F1'' +Fz'' +F2'' +F4'' +F6'' +F8'' +T1 +T3' +C5' +C3' +C1' +Cz' +C2' +C4' +C6' +T4' +T2 +T3 +C5 +C3 +C1 +Cz +C2 +C4 +C6 +T4 +T3'' +C5'' +C3'' +C1'' +Cz'' +C2'' +C4'' +C6'' +T4'' +T5' +P5' +P3' +P1' +Pz' +P2' +P4' +P6' +T6' +Cb1' +T5 +P5 +P3 +P1 +Pz +P2 +P4 +P6 +T6 +Cb2' +Cb1 +O1' +P5'' +P3'' +P1'' +Pz'' +P2'' +P4'' +P6'' +O2' +Cb2 +O1 +Oz +O2 +Cb1'' +Iz +Cb2'' +M1 +M2 +A1 +A2 diff --git a/mne/channels/data/neighbors/KIT-157_neighb.mat b/mne/channels/data/neighbors/KIT-157_neighb.mat new file mode 100644 index 0000000..1cae3fc Binary files /dev/null and b/mne/channels/data/neighbors/KIT-157_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-208_neighb.mat b/mne/channels/data/neighbors/KIT-208_neighb.mat new file mode 100644 index 0000000..81de840 Binary files /dev/null and b/mne/channels/data/neighbors/KIT-208_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat b/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat new file mode 100644 index 0000000..700d193 Binary files /dev/null and b/mne/channels/data/neighbors/KIT-NYU-2019_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat b/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat new file mode 100644 index 0000000..f860666 Binary files /dev/null and b/mne/channels/data/neighbors/KIT-UMD-1_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat b/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat new file mode 100644 index 0000000..19ad03c Binary files /dev/null and b/mne/channels/data/neighbors/KIT-UMD-2_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat b/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat new file mode 100644 index 0000000..c7ded3d Binary files /dev/null and b/mne/channels/data/neighbors/KIT-UMD-3_neighb.mat differ diff --git a/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat b/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat new file mode 100644 index 0000000..55158e7 Binary files /dev/null and b/mne/channels/data/neighbors/KIT-UMD-4_neighb.mat differ diff --git a/mne/channels/data/neighbors/__init__.py b/mne/channels/data/neighbors/__init__.py new file mode 100644 index 0000000..a07b134 --- /dev/null +++ b/mne/channels/data/neighbors/__init__.py @@ -0,0 +1,13 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Neighbor definitions for clustering permutation analysis.""" +# This is a selection of files from http://fieldtrip.fcdonders.nl/template +# Additional definitions can be obtained through the FieldTrip software. +# For additional information on how these definitions were computed, please +# consider the related fieldtrip documentation: +# http://fieldtrip.fcdonders.nl/template/neighbours. +# +# KIT neighbor files were computed with ft_prepare_neighbours using the +# triangulation method. diff --git a/mne/channels/data/neighbors/biosemi16_neighb.mat b/mne/channels/data/neighbors/biosemi16_neighb.mat new file mode 100644 index 0000000..56b7fb6 Binary files /dev/null and b/mne/channels/data/neighbors/biosemi16_neighb.mat differ diff --git a/mne/channels/data/neighbors/biosemi32_neighb.mat b/mne/channels/data/neighbors/biosemi32_neighb.mat new file mode 100644 index 0000000..1c29040 Binary files /dev/null and b/mne/channels/data/neighbors/biosemi32_neighb.mat differ diff --git a/mne/channels/data/neighbors/biosemi64_neighb.mat b/mne/channels/data/neighbors/biosemi64_neighb.mat new file mode 100644 index 0000000..4afbf6f Binary files /dev/null and b/mne/channels/data/neighbors/biosemi64_neighb.mat differ diff --git a/mne/channels/data/neighbors/bti148_neighb.mat b/mne/channels/data/neighbors/bti148_neighb.mat new file mode 100644 index 0000000..527e435 Binary files /dev/null and b/mne/channels/data/neighbors/bti148_neighb.mat differ diff --git a/mne/channels/data/neighbors/bti248_neighb.mat b/mne/channels/data/neighbors/bti248_neighb.mat new file mode 100644 index 0000000..9bde76b Binary files /dev/null and b/mne/channels/data/neighbors/bti248_neighb.mat differ diff --git a/mne/channels/data/neighbors/bti248grad_neighb.mat b/mne/channels/data/neighbors/bti248grad_neighb.mat new file mode 100644 index 0000000..4e5d620 Binary files /dev/null and b/mne/channels/data/neighbors/bti248grad_neighb.mat differ diff --git a/mne/channels/data/neighbors/ctf151_neighb.mat b/mne/channels/data/neighbors/ctf151_neighb.mat new file mode 100644 index 0000000..611a0bc Binary files /dev/null and b/mne/channels/data/neighbors/ctf151_neighb.mat differ diff --git a/mne/channels/data/neighbors/ctf275_neighb.mat b/mne/channels/data/neighbors/ctf275_neighb.mat new file mode 100644 index 0000000..91cf84e Binary files /dev/null and b/mne/channels/data/neighbors/ctf275_neighb.mat differ diff --git a/mne/channels/data/neighbors/ctf64_neighb.mat b/mne/channels/data/neighbors/ctf64_neighb.mat new file mode 100644 index 0000000..fd001e6 Binary files /dev/null and b/mne/channels/data/neighbors/ctf64_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat new file mode 100644 index 0000000..020392d Binary files /dev/null and b/mne/channels/data/neighbors/easycap128ch-avg_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat new file mode 100644 index 0000000..62c88f0 Binary files /dev/null and b/mne/channels/data/neighbors/easycap32ch-avg_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat b/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat new file mode 100644 index 0000000..e59536c Binary files /dev/null and b/mne/channels/data/neighbors/easycap64ch-avg_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycapM11_neighb.mat b/mne/channels/data/neighbors/easycapM11_neighb.mat new file mode 100644 index 0000000..28131e7 Binary files /dev/null and b/mne/channels/data/neighbors/easycapM11_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycapM14_neighb.mat b/mne/channels/data/neighbors/easycapM14_neighb.mat new file mode 100644 index 0000000..be2ad3d Binary files /dev/null and b/mne/channels/data/neighbors/easycapM14_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycapM15_neighb.mat b/mne/channels/data/neighbors/easycapM15_neighb.mat new file mode 100644 index 0000000..7dfa554 Binary files /dev/null and b/mne/channels/data/neighbors/easycapM15_neighb.mat differ diff --git a/mne/channels/data/neighbors/easycapM1_neighb.mat b/mne/channels/data/neighbors/easycapM1_neighb.mat new file mode 100644 index 0000000..010337b Binary files /dev/null and b/mne/channels/data/neighbors/easycapM1_neighb.mat differ diff --git a/mne/channels/data/neighbors/ecog256_neighb.mat b/mne/channels/data/neighbors/ecog256_neighb.mat new file mode 100644 index 0000000..a78f40e Binary files /dev/null and b/mne/channels/data/neighbors/ecog256_neighb.mat differ diff --git a/mne/channels/data/neighbors/ecog256bipolar_neighb.mat b/mne/channels/data/neighbors/ecog256bipolar_neighb.mat new file mode 100644 index 0000000..36de619 Binary files /dev/null and b/mne/channels/data/neighbors/ecog256bipolar_neighb.mat differ diff --git a/mne/channels/data/neighbors/eeg1010_neighb.mat b/mne/channels/data/neighbors/eeg1010_neighb.mat new file mode 100644 index 0000000..fa639f4 Binary files /dev/null and b/mne/channels/data/neighbors/eeg1010_neighb.mat differ diff --git a/mne/channels/data/neighbors/elec1005_neighb.mat b/mne/channels/data/neighbors/elec1005_neighb.mat new file mode 100644 index 0000000..a035749 Binary files /dev/null and b/mne/channels/data/neighbors/elec1005_neighb.mat differ diff --git a/mne/channels/data/neighbors/elec1010_neighb.mat b/mne/channels/data/neighbors/elec1010_neighb.mat new file mode 100644 index 0000000..181a795 Binary files /dev/null and b/mne/channels/data/neighbors/elec1010_neighb.mat differ diff --git a/mne/channels/data/neighbors/elec1020_neighb.mat b/mne/channels/data/neighbors/elec1020_neighb.mat new file mode 100644 index 0000000..6ed27bc Binary files /dev/null and b/mne/channels/data/neighbors/elec1020_neighb.mat differ diff --git a/mne/channels/data/neighbors/itab153_neighb.mat b/mne/channels/data/neighbors/itab153_neighb.mat new file mode 100644 index 0000000..7f92401 Binary files /dev/null and b/mne/channels/data/neighbors/itab153_neighb.mat differ diff --git a/mne/channels/data/neighbors/itab28_neighb.mat b/mne/channels/data/neighbors/itab28_neighb.mat new file mode 100644 index 0000000..d7608aa Binary files /dev/null and b/mne/channels/data/neighbors/itab28_neighb.mat differ diff --git a/mne/channels/data/neighbors/language29ch-avg_neighb.mat b/mne/channels/data/neighbors/language29ch-avg_neighb.mat new file mode 100644 index 0000000..69e38e6 Binary files /dev/null and b/mne/channels/data/neighbors/language29ch-avg_neighb.mat differ diff --git a/mne/channels/data/neighbors/mpi_59_channels_neighb.mat b/mne/channels/data/neighbors/mpi_59_channels_neighb.mat new file mode 100644 index 0000000..eb5c226 Binary files /dev/null and b/mne/channels/data/neighbors/mpi_59_channels_neighb.mat differ diff --git a/mne/channels/data/neighbors/neuromag122cmb_neighb.mat b/mne/channels/data/neighbors/neuromag122cmb_neighb.mat new file mode 100644 index 0000000..d4ae1a3 Binary files /dev/null and b/mne/channels/data/neighbors/neuromag122cmb_neighb.mat differ diff --git a/mne/channels/data/neighbors/neuromag306cmb_neighb.mat b/mne/channels/data/neighbors/neuromag306cmb_neighb.mat new file mode 100644 index 0000000..f5a1875 Binary files /dev/null and b/mne/channels/data/neighbors/neuromag306cmb_neighb.mat differ diff --git a/mne/channels/data/neighbors/neuromag306mag_neighb.mat b/mne/channels/data/neighbors/neuromag306mag_neighb.mat new file mode 100644 index 0000000..d7ffc98 Binary files /dev/null and b/mne/channels/data/neighbors/neuromag306mag_neighb.mat differ diff --git a/mne/channels/data/neighbors/neuromag306planar_neighb.mat b/mne/channels/data/neighbors/neuromag306planar_neighb.mat new file mode 100644 index 0000000..aa0529e Binary files /dev/null and b/mne/channels/data/neighbors/neuromag306planar_neighb.mat differ diff --git a/mne/channels/data/neighbors/yokogawa160_neighb.mat b/mne/channels/data/neighbors/yokogawa160_neighb.mat new file mode 100644 index 0000000..ac47eab Binary files /dev/null and b/mne/channels/data/neighbors/yokogawa160_neighb.mat differ diff --git a/mne/channels/data/neighbors/yokogawa440_neighb.mat b/mne/channels/data/neighbors/yokogawa440_neighb.mat new file mode 100644 index 0000000..e40ef57 Binary files /dev/null and b/mne/channels/data/neighbors/yokogawa440_neighb.mat differ diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py new file mode 100644 index 0000000..7d5d1a9 --- /dev/null +++ b/mne/channels/interpolation.py @@ -0,0 +1,417 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from numpy.polynomial.legendre import legval +from scipy.interpolate import RectBivariateSpline +from scipy.linalg import pinv +from scipy.spatial.distance import pdist, squareform + +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_channels, pick_info, pick_types +from ..surface import _normalize_vectors +from ..utils import _validate_type, logger, verbose, warn + + +def _calc_h(cosang, stiffness=4, n_legendre_terms=50): + """Calculate spherical spline h function between points on a sphere. + + Parameters + ---------- + cosang : array-like | float + cosine of angles between pairs of points on a spherical surface. This + is equivalent to the dot product of unit vectors. + stiffness : float + stiffnes of the spline. Also referred to as ``m``. + n_legendre_terms : int + number of Legendre terms to evaluate. + """ + factors = [ + (2 * n + 1) / (n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi) + for n in range(1, n_legendre_terms + 1) + ] + return legval(cosang, [0] + factors) + + +def _calc_g(cosang, stiffness=4, n_legendre_terms=50): + """Calculate spherical spline g function between points on a sphere. + + Parameters + ---------- + cosang : array-like of float, shape(n_channels, n_channels) + cosine of angles between pairs of points on a spherical surface. This + is equivalent to the dot product of unit vectors. + stiffness : float + stiffness of the spline. + n_legendre_terms : int + number of Legendre terms to evaluate. + + Returns + ------- + G : np.ndrarray of float, shape(n_channels, n_channels) + The G matrix. + """ + factors = [ + (2 * n + 1) / (n**stiffness * (n + 1) ** stiffness * 4 * np.pi) + for n in range(1, n_legendre_terms + 1) + ] + return legval(cosang, [0] + factors) + + +def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5): + """Compute interpolation matrix based on spherical splines. + + Implementation based on [1] + + Parameters + ---------- + pos_from : np.ndarray of float, shape(n_good_sensors, 3) + The positions to interpolate from. + pos_to : np.ndarray of float, shape(n_bad_sensors, 3) + The positions to interpolate. + alpha : float + Regularization parameter. Defaults to 1e-5. + + Returns + ------- + interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to)) + The interpolation matrix that maps good signals to the location + of bad signals. + + References + ---------- + [1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989). + Spherical splines for scalp potential and current density mapping. + Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7. + """ + pos_from = pos_from.copy() + pos_to = pos_to.copy() + n_from = pos_from.shape[0] + n_to = pos_to.shape[0] + + # normalize sensor positions to sphere + _normalize_vectors(pos_from) + _normalize_vectors(pos_to) + + # cosine angles between source positions + cosang_from = pos_from.dot(pos_from.T) + cosang_to_from = pos_to.dot(pos_from.T) + G_from = _calc_g(cosang_from) + G_to_from = _calc_g(cosang_to_from) + assert G_from.shape == (n_from, n_from) + assert G_to_from.shape == (n_to, n_from) + + if alpha is not None: + G_from.flat[:: len(G_from) + 1] += alpha + + C = np.vstack( + [ + np.hstack([G_from, np.ones((n_from, 1))]), + np.hstack([np.ones((1, n_from)), [[0]]]), + ] + ) + C_inv = pinv(C) + + interpolation = np.hstack([G_to_from, np.ones((n_to, 1))]) @ C_inv[:, :-1] + assert interpolation.shape == (n_to, n_from) + return interpolation + + +def _do_interp_dots(inst, interpolation, goods_idx, bads_idx): + """Dot product of channel mapping matrix to channel data.""" + from ..epochs import BaseEpochs + from ..evoked import Evoked + from ..io import BaseRaw + + _validate_type(inst, (BaseRaw, BaseEpochs, Evoked), "inst") + inst._data[..., bads_idx, :] = np.matmul( + interpolation, inst._data[..., goods_idx, :] + ) + + +@verbose +def _interpolate_bads_eeg(inst, origin, exclude=None, ecog=False, verbose=None): + if exclude is None: + exclude = list() + bads_idx = np.zeros(len(inst.ch_names), dtype=bool) + goods_idx = np.zeros(len(inst.ch_names), dtype=bool) + + picks = pick_types(inst.info, meg=False, eeg=not ecog, ecog=ecog, exclude=exclude) + inst.info._check_consistency() + bads_idx[picks] = [inst.ch_names[ch] in inst.info["bads"] for ch in picks] + + if len(picks) == 0 or bads_idx.sum() == 0: + return + + goods_idx[picks] = True + goods_idx[bads_idx] = False + + pos = inst._get_channel_positions(picks) + + # Make sure only EEG are used + bads_idx_pos = bads_idx[picks] + goods_idx_pos = goods_idx[picks] + + # test spherical fit + distance = np.linalg.norm(pos - origin, axis=-1) + distance = np.mean(distance / np.mean(distance)) + if np.abs(1.0 - distance) > 0.1: + warn( + "Your spherical fit is poor, interpolation results are " + "likely to be inaccurate." + ) + + pos_good = pos[goods_idx_pos] - origin + pos_bad = pos[bads_idx_pos] - origin + logger.info(f"Computing interpolation matrix from {len(pos_good)} sensor positions") + interpolation = _make_interpolation_matrix(pos_good, pos_bad) + + logger.info(f"Interpolating {len(pos_bad)} sensors") + _do_interp_dots(inst, interpolation, goods_idx, bads_idx) + + +@verbose +def _interpolate_bads_ecog(inst, origin, exclude=None, verbose=None): + _interpolate_bads_eeg(inst, origin, exclude=exclude, ecog=True, verbose=verbose) + + +def _interpolate_bads_meg( + inst, mode="accurate", origin=(0.0, 0.0, 0.04), verbose=None, ref_meg=False +): + return _interpolate_bads_meeg( + inst, mode, origin, ref_meg=ref_meg, eeg=False, verbose=verbose + ) + + +@verbose +def _interpolate_bads_nan( + inst, + ch_type, + ref_meg=False, + exclude=(), + *, + verbose=None, +): + info = _simplify_info(inst.info) + picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **{ch_type: True}) + use_ch_names = [inst.info["ch_names"][p] for p in picks_type] + bads_type = [ch for ch in inst.info["bads"] if ch in use_ch_names] + if len(bads_type) == 0 or len(picks_type) == 0: + return + # select the bad channels to be interpolated + picks_bad = pick_channels(inst.info["ch_names"], bads_type, exclude=[]) + inst._data[..., picks_bad, :] = np.nan + + +@verbose +def _interpolate_bads_meeg( + inst, + mode="accurate", + origin=(0.0, 0.0, 0.04), + meg=True, + eeg=True, + ref_meg=False, + exclude=(), + *, + method=None, + verbose=None, +): + from ..forward import _map_meg_or_eeg_channels + + if method is None: + method = {"meg": "MNE", "eeg": "MNE"} + bools = dict(meg=meg, eeg=eeg) + info = _simplify_info(inst.info) + for ch_type, do in bools.items(): + if not do: + continue + kw = dict(meg=False, eeg=False) + kw[ch_type] = True + picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **kw) + picks_good = pick_types(info, ref_meg=ref_meg, exclude="bads", **kw) + use_ch_names = [inst.info["ch_names"][p] for p in picks_type] + bads_type = [ch for ch in inst.info["bads"] if ch in use_ch_names] + if len(bads_type) == 0 or len(picks_type) == 0: + continue + # select the bad channels to be interpolated + picks_bad = pick_channels(inst.info["ch_names"], bads_type, exclude=[]) + + # do MNE based interpolation + if ch_type == "eeg": + picks_to = picks_type + bad_sel = np.isin(picks_type, picks_bad) + else: + picks_to = picks_bad + bad_sel = slice(None) + info_from = pick_info(inst.info, picks_good) + info_to = pick_info(inst.info, picks_to) + mapping = _map_meg_or_eeg_channels(info_from, info_to, mode=mode, origin=origin) + mapping = mapping[bad_sel] + _do_interp_dots(inst, mapping, picks_good, picks_bad) + + +@verbose +def _interpolate_bads_nirs(inst, exclude=(), verbose=None): + from mne.preprocessing.nirs import _validate_nirs_info + + if len(pick_types(inst.info, fnirs=True, exclude=())) == 0: + return + + # Returns pick of all nirs and ensures channels are correctly ordered + picks_nirs = _validate_nirs_info(inst.info) + nirs_ch_names = [inst.info["ch_names"][p] for p in picks_nirs] + nirs_ch_names = [ch for ch in nirs_ch_names if ch not in exclude] + bads_nirs = [ch for ch in inst.info["bads"] if ch in nirs_ch_names] + if len(bads_nirs) == 0: + return + picks_bad = pick_channels(inst.info["ch_names"], bads_nirs, exclude=[]) + bads_mask = [p in picks_bad for p in picks_nirs] + + chs = [inst.info["chs"][i] for i in picks_nirs] + locs3d = np.array([ch["loc"][:3] for ch in chs]) + + dist = pdist(locs3d) + dist = squareform(dist) + + for bad in picks_bad: + dists_to_bad = dist[bad] + # Ignore distances to self + dists_to_bad[dists_to_bad == 0] = np.inf + # Ignore distances to other bad channels + dists_to_bad[bads_mask] = np.inf + # Find closest remaining channels for same frequency + closest_idx = np.argmin(dists_to_bad) + (bad % 2) + inst._data[bad] = inst._data[closest_idx] + + # TODO: this seems like a bug because it does not respect reset_bads + inst.info["bads"] = [ch for ch in inst.info["bads"] if ch in exclude] + + return inst + + +def _find_seeg_electrode_shaft(pos, tol_shaft=0.002, tol_spacing=1): + # 1) find nearest neighbor to define the electrode shaft line + # 2) find all contacts on the same line + # 3) remove contacts with large distances + + dist = squareform(pdist(pos)) + np.fill_diagonal(dist, np.inf) + + shafts = list() + shaft_ts = list() + for i, n1 in enumerate(pos): + if any([i in shaft for shaft in shafts]): + continue + n2 = pos[np.argmin(dist[i])] # 1 + # https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html + shaft_dists = np.linalg.norm( + np.cross((pos - n1), (pos - n2)), axis=1 + ) / np.linalg.norm(n2 - n1) + shaft = np.where(shaft_dists < tol_shaft)[0] # 2 + shaft_prev = None + for _ in range(10): # avoid potential cycles + if np.array_equal(shaft, shaft_prev): + break + shaft_prev = shaft + # compute median shaft line + v = np.median( + [ + pos[i] - pos[j] + for idx, i in enumerate(shaft) + for j in shaft[idx + 1 :] + ], + axis=0, + ) + c = np.median(pos[shaft], axis=0) + # recompute distances + shaft_dists = np.linalg.norm( + np.cross((pos - c), (pos - c + v)), axis=1 + ) / np.linalg.norm(v) + shaft = np.where(shaft_dists < tol_shaft)[0] + ts = np.array([np.dot(c - n0, v) / np.linalg.norm(v) ** 2 for n0 in pos[shaft]]) + shaft_order = np.argsort(ts) + shaft = shaft[shaft_order] + ts = ts[shaft_order] + + # only include the largest group with spacing with the error tolerance + # avoid interpolating across spans between contacts + t_diffs = np.diff(ts) + t_diff_med = np.median(t_diffs) + spacing_errors = (t_diffs - t_diff_med) / t_diff_med + groups = list() + group = [shaft[0]] + for j in range(len(shaft) - 1): + if spacing_errors[j] > tol_spacing: + groups.append(group) + group = [shaft[j + 1]] + else: + group.append(shaft[j + 1]) + groups.append(group) + group = [group for group in groups if i in group][0] + ts = ts[np.isin(shaft, group)] + shaft = np.array(group, dtype=int) + + shafts.append(shaft) + shaft_ts.append(ts) + return shafts, shaft_ts + + +@verbose +def _interpolate_bads_seeg( + inst, exclude=None, tol_shaft=0.002, tol_spacing=1, verbose=None +): + if exclude is None: + exclude = list() + picks = pick_types(inst.info, meg=False, seeg=True, exclude=exclude) + inst.info._check_consistency() + bads_idx = np.isin(np.array(inst.ch_names)[picks], inst.info["bads"]) + + if len(picks) == 0 or bads_idx.sum() == 0: + return + + pos = inst._get_channel_positions(picks) + + # Make sure only sEEG are used + bads_idx_pos = bads_idx[picks] + + shafts, shaft_ts = _find_seeg_electrode_shaft( + pos, tol_shaft=tol_shaft, tol_spacing=tol_spacing + ) + + # interpolate the bad contacts + picks_bad = list(np.where(bads_idx_pos)[0]) + for shaft, ts in zip(shafts, shaft_ts): + bads_shaft = np.array([idx for idx in picks_bad if idx in shaft]) + if bads_shaft.size == 0: + continue + goods_shaft = shaft[np.isin(shaft, bads_shaft, invert=True)] + if goods_shaft.size < 4: # cubic spline requires 3 channels + msg = "No shaft" if shaft.size < 4 else "Not enough good channels" + no_shaft_chs = " and ".join(np.array(inst.ch_names)[bads_shaft]) + raise RuntimeError( + f"{msg} found in a line with {no_shaft_chs} " + "at least 3 good channels on the same line " + f"are required for interpolation, {goods_shaft.size} found. " + f"Dropping {no_shaft_chs} is recommended." + ) + logger.debug( + f"Interpolating {np.array(inst.ch_names)[bads_shaft]} using " + f"data from {np.array(inst.ch_names)[goods_shaft]}" + ) + bads_shaft_idx = np.where(np.isin(shaft, bads_shaft))[0] + goods_shaft_idx = np.where(~np.isin(shaft, bads_shaft))[0] + + z = inst._data[..., goods_shaft, :] + is_epochs = z.ndim == 3 + if is_epochs: + z = z.swapaxes(0, 1) + z = z.reshape(z.shape[0], -1) + y = np.arange(z.shape[-1]) + out = RectBivariateSpline(x=ts[goods_shaft_idx], y=y, z=z)( + x=ts[bads_shaft_idx], y=y + ) + if is_epochs: + out = out.reshape(bads_shaft.size, inst._data.shape[0], -1) + out = out.swapaxes(0, 1) + inst._data[..., bads_shaft, :] = out diff --git a/mne/channels/layout.py b/mne/channels/layout.py new file mode 100644 index 0000000..31d0650 --- /dev/null +++ b/mne/channels/layout.py @@ -0,0 +1,1276 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import logging +from collections import defaultdict +from copy import deepcopy +from itertools import combinations +from pathlib import Path + +import numpy as np +from scipy.spatial.distance import pdist, squareform + +from .._fiff.constants import FIFF +from .._fiff.meas_info import Info +from .._fiff.pick import _FNIRS_CH_TYPES_SPLIT, _picks_to_idx, pick_types +from ..transforms import _cart_to_sph, _pol_to_cart +from ..utils import ( + _check_ch_locs, + _check_fname, + _check_option, + _check_sphere, + _clean_names, + _ensure_int, + fill_doc, + logger, + verbose, + warn, +) +from ..viz.topomap import plot_layout +from .channels import _get_ch_info + + +class Layout: + """Sensor layouts. + + Layouts are typically loaded from a file using + :func:`~mne.channels.read_layout`. Only use this class directly if you're + constructing a new layout. + + Parameters + ---------- + box : tuple of length 4 + The box dimension (x_min, x_max, y_min, y_max). + pos : array, shape=(n_channels, 4) + The unit-normalized positions of the channels in 2d + (x, y, width, height). + names : list of str + The channel names. + ids : array-like of int + The channel ids. + kind : str + The type of Layout (e.g. 'Vectorview-all'). + """ + + def __init__(self, box, pos, names, ids, kind): + self.box = box + self.pos = pos + self.names = names + self.ids = np.array(ids) + if self.ids.ndim != 1: + raise ValueError("The channel indices should be a 1D array-like.") + self.kind = kind + + def copy(self): + """Return a copy of the layout. + + Returns + ------- + layout : instance of Layout + A deepcopy of the layout. + + Notes + ----- + .. versionadded:: 1.7 + """ + return deepcopy(self) + + def save(self, fname, overwrite=False): + """Save Layout to disk. + + Parameters + ---------- + fname : path-like + The file name (e.g. ``'my_layout.lout'``). + overwrite : bool + If True, overwrites the destination file if it exists. + + See Also + -------- + read_layout + """ + x = self.pos[:, 0] + y = self.pos[:, 1] + width = self.pos[:, 2] + height = self.pos[:, 3] + fname = _check_fname(fname, overwrite=overwrite, name=fname) + if fname.suffix == ".lout": + out_str = "{:8.2f} {:8.2f} {:8.2f} {:8.2f}\n".format(*self.box) + elif fname.suffix == ".lay": + out_str = "" + else: + raise ValueError("Unknown layout type. Should be of type .lout or .lay.") + + for ii in range(x.shape[0]): + out_str += ( + f"{self.ids[ii]:03d} {x[ii]:8.2f} {y[ii]:8.2f} " + f"{width[ii]:8.2f} {height[ii]:8.2f} {self.names[ii]}\n" + ) + + f = open(fname, "w") + f.write(out_str) + f.close() + + def __repr__(self): + """Return the string representation.""" + return "".format( + self.kind, + ", ".join(self.names[:3]), + ) + + @fill_doc + def plot(self, picks=None, show_axes=False, show=True): + """Plot the sensor positions. + + Parameters + ---------- + %(picks_nostr)s + show_axes : bool + Show layout axes if True. Defaults to False. + show : bool + Show figure if True. Defaults to True. + + Returns + ------- + fig : instance of matplotlib.figure.Figure + Figure containing the sensor topography. + + Notes + ----- + .. versionadded:: 0.12.0 + """ + return plot_layout(self, picks=picks, show_axes=show_axes, show=show) + + @verbose + def pick(self, picks=None, exclude=(), *, verbose=None): + """Pick a subset of channels. + + Parameters + ---------- + %(picks_layout)s + exclude : str | int | array-like of str or int + Set of channels to exclude, only used when ``picks`` is set to ``'all'`` or + ``None``. Exclude will not drop channels explicitly provided in ``picks``. + %(verbose)s + + Returns + ------- + layout : instance of Layout + The modified layout. + + Notes + ----- + .. versionadded:: 1.7 + """ + # TODO: all the picking functions operates on an 'info' object which is missing + # for a layout, thus we have to do the extra work here. The logic below can be + # replaced when https://github.com/mne-tools/mne-python/issues/11913 is solved. + if (isinstance(picks, str) and picks == "all") or (picks is None): + picks = deepcopy(self.names) + apply_exclude = True + elif isinstance(picks, str): + picks = [picks] + apply_exclude = False + elif isinstance(picks, slice): + try: + picks = np.arange(len(self.names))[picks] + except TypeError: + raise TypeError( + "If a slice is provided, it must be a slice of integers." + ) + apply_exclude = False + else: + try: + picks = [_ensure_int(picks)] + except TypeError: + picks = ( + list(picks) if isinstance(picks, tuple | set) else deepcopy(picks) + ) + apply_exclude = False + if apply_exclude: + if isinstance(exclude, str): + exclude = [exclude] + else: + try: + exclude = [_ensure_int(exclude)] + except TypeError: + exclude = ( + list(exclude) + if isinstance(exclude, tuple | set) + else deepcopy(exclude) + ) + for var, var_name in ((picks, "picks"), (exclude, "exclude")): + if var_name == "exclude" and not apply_exclude: + continue + if not isinstance(var, list | tuple | set | np.ndarray): + raise TypeError( + f"'{var_name}' must be a list, tuple, set or ndarray. " + f"Got {type(var)} instead." + ) + if isinstance(var, np.ndarray) and var.ndim != 1: + raise ValueError( + f"'{var_name}' must be a 1D array-like. Got {var.ndim}D instead." + ) + for k, elt in enumerate(var): + if isinstance(elt, str) and elt in self.names: + var[k] = self.names.index(elt) + continue + elif isinstance(elt, str): + raise ValueError( + f"The channel name {elt} provided in {var_name} does not match " + "any channels from the layout." + ) + try: + var[k] = _ensure_int(elt) + except TypeError: + raise TypeError( + f"All elements in '{var_name}' must be integers or strings." + ) + if not (0 <= var[k] < len(self.names)): + raise ValueError( + f"The value {elt} provided in {var_name} does not match any " + f"channels from the layout. The layout has {len(self.names)} " + "channels." + ) + if len(var) != len(set(var)): + warn( + f"The provided '{var_name}' has duplicates which will be ignored.", + RuntimeWarning, + ) + picks = picks.astype(int) if isinstance(picks, np.ndarray) else picks + exclude = exclude.astype(int) if isinstance(exclude, np.ndarray) else exclude + if apply_exclude: + picks = np.array(list(set(picks) - set(exclude)), dtype=int) + if len(picks) == 0: + raise RuntimeError( + "The channel selection yielded no remaining channels. Please edit " + "the arguments 'picks' and 'exclude' to include at least one " + "channel." + ) + else: + picks = np.array(list(set(picks)), dtype=int) + self.pos = self.pos[picks] + self.ids = self.ids[picks] + self.names = [self.names[k] for k in picks] + return self + + +def _read_lout(fname): + """Aux function.""" + with open(fname) as f: + box_line = f.readline() # first line contains box dimension + box = tuple(map(float, box_line.split())) + names, pos, ids = [], [], [] + for line in f: + splits = line.split() + if len(splits) == 7: + cid, x, y, dx, dy, chkind, nb = splits + name = chkind + " " + nb + else: + cid, x, y, dx, dy, name = splits + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) + names.append(name) + ids.append(int(cid)) + + pos = np.array(pos) + + return box, pos, names, ids + + +def _read_lay(fname): + """Aux function.""" + with open(fname) as f: + box = None + names, pos, ids = [], [], [] + for line in f: + splits = line.split() + if len(splits) == 7: + cid, x, y, dx, dy, chkind, nb = splits + name = chkind + " " + nb + else: + cid, x, y, dx, dy, name = splits + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) + names.append(name) + ids.append(int(cid)) + + pos = np.array(pos) + + return box, pos, names, ids + + +def read_layout(fname=None, *, scale=True): + """Read layout from a file. + + Parameters + ---------- + fname : path-like | str + Either the path to a ``.lout`` or ``.lay`` file or the name of a + built-in layout. c.f. Notes for a list of the available built-in + layouts. + scale : bool + Apply useful scaling for out the box plotting using ``layout.pos``. + Defaults to True. + + Returns + ------- + layout : instance of Layout + The layout. + + See Also + -------- + Layout.save + + Notes + ----- + Valid ``fname`` arguments are: + + .. table:: + :widths: auto + + +----------------------+ + | Kind | + +======================+ + | biosemi | + +----------------------+ + | CTF151 | + +----------------------+ + | CTF275 | + +----------------------+ + | CTF-275 | + +----------------------+ + | EEG1005 | + +----------------------+ + | EGI256 | + +----------------------+ + | GeodesicHeadWeb-130 | + +----------------------+ + | GeodesicHeadWeb-280 | + +----------------------+ + | KIT-125 | + +----------------------+ + | KIT-157 | + +----------------------+ + | KIT-160 | + +----------------------+ + | KIT-AD | + +----------------------+ + | KIT-AS-2008 | + +----------------------+ + | KIT-UMD-3 | + +----------------------+ + | magnesWH3600 | + +----------------------+ + | Neuromag_122 | + +----------------------+ + | Vectorview-all | + +----------------------+ + | Vectorview-grad | + +----------------------+ + | Vectorview-grad_norm | + +----------------------+ + | Vectorview-mag | + +----------------------+ + """ + readers = {".lout": _read_lout, ".lay": _read_lay} + + if isinstance(fname, str): + # is it a built-in layout? + directory = Path(__file__).parent / "data" / "layouts" + for suffix in ("", ".lout", ".lay"): + _fname = (directory / fname).with_suffix(suffix) + if _fname.exists(): + fname = _fname + break + # if not, it must be a valid path provided as str or Path + fname = _check_fname(fname, "read", must_exist=True, name="layout") + # and it must have a valid extension + _check_option("fname extension", fname.suffix, readers) + kind = fname.stem + box, pos, names, ids = readers[fname.suffix](fname) + + if scale: + pos[:, 0] -= np.min(pos[:, 0]) + pos[:, 1] -= np.min(pos[:, 1]) + scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2] + pos /= scaling + pos[:, :2] += 0.03 + pos[:, :2] *= 0.97 / 1.03 + pos[:, 2:] *= 0.94 + + return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids) + + +@fill_doc +def make_eeg_layout( + info, radius=0.5, width=None, height=None, exclude="bads", csd=False +): + """Create .lout file from EEG electrode digitization. + + Parameters + ---------- + %(info_not_none)s + radius : float + Viewport radius as a fraction of main figure height. Defaults to 0.5. + width : float | None + Width of sensor axes as a fraction of main figure height. By default, + this will be the maximum width possible without axes overlapping. + height : float | None + Height of sensor axes as a fraction of main figure height. By default, + this will be the maximum height possible without axes overlapping. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any. + If 'bads', exclude channels in info['bads'] (default). + csd : bool + Whether the channels contain current-source-density-transformed data. + + Returns + ------- + layout : Layout + The generated Layout. + + See Also + -------- + make_grid_layout, generate_2d_layout + """ + if not (0 <= radius <= 0.5): + raise ValueError("The radius parameter should be between 0 and 0.5.") + if width is not None and not (0 <= width <= 1.0): + raise ValueError("The width parameter should be between 0 and 1.") + if height is not None and not (0 <= height <= 1.0): + raise ValueError("The height parameter should be between 0 and 1.") + + pick_kwargs = dict(meg=False, eeg=True, ref_meg=False, exclude=exclude) + if csd: + pick_kwargs.update(csd=True, eeg=False) + picks = pick_types(info, **pick_kwargs) + loc2d = _find_topomap_coords(info, picks) + names = [info["chs"][i]["ch_name"] for i in picks] + + # Scale [x, y] to be in the range [-0.5, 0.5] + # Don't mess with the origin or aspect ratio + scale = np.maximum(-np.min(loc2d, axis=0), np.max(loc2d, axis=0)).max() * 2 + loc2d /= scale + + # If no width or height specified, calculate the maximum value possible + # without axes overlapping. + if width is None or height is None: + width, height = _box_size(loc2d, width, height, padding=0.1) + + # Scale to viewport radius + loc2d *= 2 * radius + + # Some subplot centers will be at the figure edge. Shrink everything so it + # fits in the figure. + scaling = min(1 / (1.0 + width), 1 / (1.0 + height)) + loc2d *= scaling + width *= scaling + height *= scaling + + # Shift to center + loc2d += 0.5 + + n_channels = loc2d.shape[0] + pos = np.c_[ + loc2d[:, 0] - 0.5 * width, + loc2d[:, 1] - 0.5 * height, + width * np.ones(n_channels), + height * np.ones(n_channels), + ] + + box = (0, 1, 0, 1) + ids = 1 + np.arange(n_channels) + layout = Layout(box=box, pos=pos, names=names, kind="EEG", ids=ids) + return layout + + +@fill_doc +def make_grid_layout(info, picks=None, n_col=None): + """Generate .lout file for custom data, i.e., ICA sources. + + Parameters + ---------- + %(info_not_none)s + %(picks_base)s all good misc channels. + n_col : int | None + Number of columns to generate. If None, a square grid will be produced. + + Returns + ------- + layout : Layout + The generated layout. + + See Also + -------- + make_eeg_layout, generate_2d_layout + """ + picks = _picks_to_idx(info, picks, "misc") + + names = [info["chs"][k]["ch_name"] for k in picks] + + if not names: + raise ValueError("No misc data channels found.") + + ids = list(range(len(picks))) + size = len(picks) + + if n_col is None: + # prepare square-like layout + n_row = n_col = np.sqrt(size) # try square + if n_col % 1: + # try n * (n-1) rectangle + n_col, n_row = int(n_col + 1), int(n_row) + + if n_col * n_row < size: # jump to the next full square + n_row += 1 + else: + n_row = int(np.ceil(size / float(n_col))) + + # setup position grid + x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col), np.linspace(-0.5, 0.5, n_row)) + x, y = x.ravel()[:size], y.ravel()[:size] + width, height = _box_size(np.c_[x, y], padding=0.1) + + # Some axes will be at the figure edge. Shrink everything so it fits in the + # figure. Add 0.01 border around everything + border_x, border_y = (0.01, 0.01) + x_scaling = 1 / (1.0 + width + border_x) + y_scaling = 1 / (1.0 + height + border_y) + x = x * x_scaling + y = y * y_scaling + width *= x_scaling + height *= y_scaling + + # Shift to center + x += 0.5 + y += 0.5 + + # calculate pos + pos = np.c_[ + x - 0.5 * width, y - 0.5 * height, width * np.ones(size), height * np.ones(size) + ] + box = (0, 1, 0, 1) + + layout = Layout(box=box, pos=pos, names=names, kind="grid-misc", ids=ids) + return layout + + +@fill_doc +def find_layout(info, ch_type=None, exclude="bads"): + """Choose a layout based on the channels in the info 'chs' field. + + Parameters + ---------- + %(info_not_none)s + ch_type : {'mag', 'grad', 'meg', 'eeg'} | None + The channel type for selecting single channel layouts. + Defaults to None. Note, this argument will only be considered for + VectorView type layout. Use ``'meg'`` to force using the full layout + in situations where the info does only contain one sensor type. + exclude : list of str | str + List of channels to exclude. If empty do not exclude any. + If 'bads', exclude channels in info['bads'] (default). + + Returns + ------- + layout : Layout instance | None + None if layout not found. + """ + _check_option("ch_type", ch_type, [None, "mag", "grad", "meg", "eeg", "csd"]) + + ( + has_vv_mag, + has_vv_grad, + is_old_vv, + has_4D_mag, + ctf_other_types, + has_CTF_grad, + n_kit_grads, + has_any_meg, + has_eeg_coils, + has_eeg_coils_and_meg, + has_eeg_coils_only, + has_neuromag_122_grad, + has_csd_coils, + ) = _get_ch_info(info) + has_vv_meg = has_vv_mag and has_vv_grad + has_vv_only_mag = has_vv_mag and not has_vv_grad + has_vv_only_grad = has_vv_grad and not has_vv_mag + if ch_type == "meg" and not has_any_meg: + raise RuntimeError("No MEG channels present. Cannot find MEG layout.") + + if ch_type == "eeg" and not has_eeg_coils: + raise RuntimeError("No EEG channels present. Cannot find EEG layout.") + + layout_name = None + if (has_vv_meg and ch_type is None) or ( + any([has_vv_mag, has_vv_grad]) and ch_type == "meg" + ): + layout_name = "Vectorview-all" + elif has_vv_only_mag or (has_vv_meg and ch_type == "mag"): + layout_name = "Vectorview-mag" + elif has_vv_only_grad or (has_vv_meg and ch_type == "grad"): + if info["ch_names"][0].endswith("X"): + layout_name = "Vectorview-grad_norm" + else: + layout_name = "Vectorview-grad" + elif has_neuromag_122_grad: + layout_name = "Neuromag_122" + elif (has_eeg_coils_only and ch_type in [None, "eeg"]) or ( + has_eeg_coils_and_meg and ch_type == "eeg" + ): + if not isinstance(info, dict | Info): + raise RuntimeError( + "Cannot make EEG layout, no measurement info " + "was passed to `find_layout`" + ) + return make_eeg_layout(info, exclude=exclude) + elif has_csd_coils and ch_type in [None, "csd"]: + return make_eeg_layout(info, exclude=exclude, csd=True) + elif has_4D_mag: + layout_name = "magnesWH3600" + elif has_CTF_grad: + layout_name = "CTF-275" + elif n_kit_grads > 0: + layout_name = _find_kit_layout(info, n_kit_grads) + + # If no known layout is found, fall back on automatic layout + if layout_name is None: + picks = _picks_to_idx(info, "data", exclude=(), with_ref_meg=False) + ch_names = [info["ch_names"][pick] for pick in picks] + xy = _find_topomap_coords(info, picks=picks, ignore_overlap=True) + return generate_2d_layout(xy, ch_names=ch_names, name="custom", normalize=True) + + layout = read_layout(fname=layout_name) + if not is_old_vv: + layout.names = _clean_names(layout.names, remove_whitespace=True) + if has_CTF_grad: + layout.names = _clean_names(layout.names, before_dash=True) + + # Apply mask for excluded channels. + if exclude == "bads": + exclude = info["bads"] + idx = [ii for ii, name in enumerate(layout.names) if name not in exclude] + layout.names = [layout.names[ii] for ii in idx] + layout.pos = layout.pos[idx] + layout.ids = layout.ids[idx] + + return layout + + +@fill_doc +def _find_kit_layout(info, n_grads): + """Determine the KIT layout. + + Parameters + ---------- + %(info_not_none)s + n_grads : int + Number of KIT-gradiometers in the info. + + Returns + ------- + kit_layout : str | None + String naming the detected KIT layout or ``None`` if layout is missing. + """ + from ..io.kit.constants import KIT_LAYOUT + + if info["kit_system_id"] is not None: + # avoid circular import + return KIT_LAYOUT.get(info["kit_system_id"]) + elif n_grads == 160: + return "KIT-160" + elif n_grads == 125: + return "KIT-125" + elif n_grads > 157: + return "KIT-AD" + + # channels which are on the left hemisphere for NY and right for UMD + test_chs = ( + "MEG 13", + "MEG 14", + "MEG 15", + "MEG 16", + "MEG 25", + "MEG 26", + "MEG 27", + "MEG 28", + "MEG 29", + "MEG 30", + "MEG 31", + "MEG 32", + "MEG 57", + "MEG 60", + "MEG 61", + "MEG 62", + "MEG 63", + "MEG 64", + "MEG 73", + "MEG 90", + "MEG 93", + "MEG 95", + "MEG 96", + "MEG 105", + "MEG 112", + "MEG 120", + "MEG 121", + "MEG 122", + "MEG 123", + "MEG 124", + "MEG 125", + "MEG 126", + "MEG 142", + "MEG 144", + "MEG 153", + "MEG 154", + "MEG 155", + "MEG 156", + ) + x = [ch["loc"][0] < 0 for ch in info["chs"] if ch["ch_name"] in test_chs] + if np.all(x): + return "KIT-157" # KIT-NY + elif np.all(np.invert(x)): + raise NotImplementedError( + "Guessing sensor layout for legacy UMD " + "files is not implemented. Please convert " + "your files using MNE-Python 0.13 or " + "higher." + ) + else: + raise RuntimeError("KIT system could not be determined for data") + + +def _box_size(points, width=None, height=None, padding=0.0): + """Given a series of points, calculate an appropriate box size. + + Parameters + ---------- + points : array, shape (n_points, 2) + The centers of the axes as a list of (x, y) coordinate pairs. Normally + these are points in the range [0, 1] centered at 0.5. + width : float | None + An optional box width to enforce. When set, only the box height will be + calculated by the function. + height : float | None + An optional box height to enforce. When set, only the box width will be + calculated by the function. + padding : float + Portion of the box to reserve for padding. The value can range between + 0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding). + + Returns + ------- + width : float + Width of the box + height : float + Height of the box + """ + + def xdiff(a, b): + return np.abs(a[0] - b[0]) + + def ydiff(a, b): + return np.abs(a[1] - b[1]) + + points = np.asarray(points) + all_combinations = list(combinations(points, 2)) + + if width is None and height is None: + if len(points) <= 1: + # Trivial case first + width = 1.0 + height = 1.0 + else: + # Find the closest two points A and B. + a, b = all_combinations[np.argmin(pdist(points))] + + # The closest points define either the max width or max height. + w, h = xdiff(a, b), ydiff(a, b) + if w > h: + width = w + else: + height = h + + # At this point, either width or height is known, or both are known. + if height is None: + # Find all axes that could potentially overlap horizontally. + hdist = pdist(points, xdiff) + candidates = [all_combinations[i] for i, d in enumerate(hdist) if d < width] + + if len(candidates) == 0: + # No axes overlap, take all the height you want. + height = 1.0 + else: + # Find an appropriate height so all none of the found axes will + # overlap. + height = np.min([ydiff(*c) for c in candidates]) + + elif width is None: + # Find all axes that could potentially overlap vertically. + vdist = pdist(points, ydiff) + candidates = [all_combinations[i] for i, d in enumerate(vdist) if d < height] + + if len(candidates) == 0: + # No axes overlap, take all the width you want. + width = 1.0 + else: + # Find an appropriate width so all none of the found axes will + # overlap. + width = np.min([xdiff(*c) for c in candidates]) + + # Add a bit of padding between boxes + width *= 1 - padding + height *= 1 - padding + + return width, height + + +@fill_doc +def _find_topomap_coords( + info, picks, layout=None, ignore_overlap=False, to_sphere=True, sphere=None +): + """Guess the E/MEG layout and return appropriate topomap coordinates. + + Parameters + ---------- + %(info_not_none)s + picks : str | list | slice | None + None will choose all channels. + layout : None | instance of Layout + Enforce using a specific layout. With None, a new map is generated + and a layout is chosen based on the channels in the picks + parameter. + sphere : array-like | str + Definition of the head sphere. + + Returns + ------- + coords : array, shape = (n_chs, 2) + 2 dimensional coordinates for each sensor for a topomap plot. + """ + picks = _picks_to_idx(info, picks, "all", exclude=(), allow_empty=False) + + if layout is not None: + chs = [info["chs"][i] for i in picks] + pos = [layout.pos[layout.names.index(ch["ch_name"])] for ch in chs] + pos = np.asarray(pos) + else: + pos = _auto_topomap_coords( + info, + picks, + ignore_overlap=ignore_overlap, + to_sphere=to_sphere, + sphere=sphere, + ) + + return pos + + +@fill_doc +def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): + """Make a 2 dimensional sensor map from sensor positions in an info dict. + + The default is to use the electrode locations. The fallback option is to + attempt using digitization points of kind FIFFV_POINT_EEG. This only works + with EEG and requires an equal number of digitization points and sensors. + + Parameters + ---------- + %(info_not_none)s + picks : list | str | slice | None + None will pick all channels. + ignore_overlap : bool + Whether to ignore overlapping positions in the layout. If False and + positions overlap, an error is thrown. + to_sphere : bool + If True, the radial distance of spherical coordinates is ignored, in + effect fitting the xyz-coordinates to a sphere. + sphere : array-like | str + The head sphere definition. + + Returns + ------- + locs : array, shape = (n_sensors, 2) + An array of positions of the 2 dimensional map. + """ + sphere = _check_sphere(sphere, info) + logger.debug(f"Generating coords using: {sphere}") + + picks = _picks_to_idx(info, picks, "all", exclude=(), allow_empty=False) + chs = [info["chs"][i] for i in picks] + + # Use channel locations if available + locs3d = np.array([ch["loc"][:3] for ch in chs]) + + # If electrode locations are not available, use digization points + if not _check_ch_locs(info=info, picks=picks): + logging.warning( + "Did not find any electrode locations (in the info " + "object), will attempt to use digitization points " + "instead. However, if digitization points do not " + "correspond to the EEG electrodes, this will lead to " + "bad results. Please verify that the sensor locations " + "in the plot are accurate." + ) + + # MEG/EOG/ECG sensors don't have digitization points; all requested + # channels must be EEG + for ch in chs: + if ch["kind"] != FIFF.FIFFV_EEG_CH: + raise ValueError( + "Cannot determine location of MEG/EOG/ECG " + "channels using digitization points." + ) + + eeg_ch_names = [ + ch["ch_name"] for ch in info["chs"] if ch["kind"] == FIFF.FIFFV_EEG_CH + ] + + # Get EEG digitization points + if info["dig"] is None or len(info["dig"]) == 0: + raise RuntimeError("No digitization points found.") + + locs3d = np.array( + [ + point["r"] + for point in info["dig"] + if point["kind"] == FIFF.FIFFV_POINT_EEG + ] + ) + + if len(locs3d) == 0: + raise RuntimeError( + "Did not find any digitization points of " + f"kind {FIFF.FIFFV_POINT_EEG} in the info." + ) + + if len(locs3d) != len(eeg_ch_names): + raise ValueError( + f"Number of EEG digitization points ({len(locs3d)}) doesn't match the " + f"number of EEG channels ({len(eeg_ch_names)})" + ) + + # We no longer center digitization points on head origin, as we work + # in head coordinates always + + # Match the digitization points with the requested + # channels. + eeg_ch_locs = dict(zip(eeg_ch_names, locs3d)) + locs3d = np.array([eeg_ch_locs[ch["ch_name"]] for ch in chs]) + + # Sometimes we can get nans + locs3d[~np.isfinite(locs3d)] = 0.0 + + # Duplicate points cause all kinds of trouble during visualization + dist = pdist(locs3d) + if len(locs3d) > 1 and np.min(dist) < 1e-10 and not ignore_overlap: + problematic_electrodes = [ + chs[elec_i]["ch_name"] + for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0] + ] + + raise ValueError( + "The following electrodes have overlapping positions," + " which causes problems during visualization:\n" + + ", ".join(problematic_electrodes) + ) + + if to_sphere: + # translate to sphere origin, transform/flatten Z, translate back + locs3d -= sphere[:3] + # use spherical (theta, pol) as (r, theta) for polar->cartesian + cart_coords = _cart_to_sph(locs3d) + out = _pol_to_cart(cart_coords[:, 1:][:, ::-1]) + # scale from radians to mm + out *= cart_coords[:, [0]] / (np.pi / 2.0) + out += sphere[:2] + else: + out = _pol_to_cart(_cart_to_sph(locs3d)) + return out + + +def _topo_to_sphere(pos, eegs): + """Transform xy-coordinates to sphere. + + Parameters + ---------- + pos : array-like, shape (n_channels, 2) + xy-oordinates to transform. + eegs : list of int + Indices of EEG channels that are included when calculating the sphere. + + Returns + ------- + coords : array, shape (n_channels, 3) + xyz-coordinates. + """ + xs, ys = np.array(pos).T + + sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2))) + xs /= sqs # Shape to a sphere and normalize + ys /= sqs + + xs += 0.5 - np.mean(xs[eegs]) # Center the points + ys += 0.5 - np.mean(ys[eegs]) + + xs = xs * 2.0 - 1.0 # Values ranging from -1 to 1 + ys = ys * 2.0 - 1.0 + + rs = np.clip(np.sqrt(xs**2 + ys**2), 0.0, 1.0) + alphas = np.arccos(rs) + zs = np.sin(alphas) + return np.column_stack([xs, ys, zs]) + + +@fill_doc +def _pair_grad_sensors( + info, layout=None, topomap_coords=True, exclude="bads", raise_error=True +): + """Find the picks for pairing grad channels. + + Parameters + ---------- + %(info_not_none)s + layout : Layout | None + The layout if available. Defaults to None. + topomap_coords : bool + Return the coordinates for a topomap plot along with the picks. If + False, only picks are returned. Defaults to True. + exclude : list of str | str + List of channels to exclude. If empty, do not exclude any. + If 'bads', exclude channels in info['bads']. Defaults to 'bads'. + raise_error : bool + Whether to raise an error when no pairs are found. If False, raises a + warning. + + Returns + ------- + picks : array of int + Picks for the grad channels, ordered in pairs. + coords : array, shape = (n_grad_channels, 3) + Coordinates for a topomap plot (optional, only returned if + topomap_coords == True). + """ + # find all complete pairs of grad channels + pairs = defaultdict(list) + grad_picks = pick_types(info, meg="grad", ref_meg=False, exclude=exclude) + + _, has_vv_grad, *_, has_neuromag_122_grad, _ = _get_ch_info(info) + + for i in grad_picks: + ch = info["chs"][i] + name = ch["ch_name"] + if has_vv_grad and name.startswith("MEG"): + if name.endswith(("2", "3")): + key = name[-4:-1] + pairs[key].append(ch) + if has_neuromag_122_grad and name.startswith("MEG"): + key = (int(name[-3:]) - 1) // 2 + pairs[key].append(ch) + + pairs = [p for p in pairs.values() if len(p) == 2] + if len(pairs) == 0: + if raise_error: + raise ValueError("No 'grad' channel pairs found.") + else: + warn("No 'grad' channel pairs found.") + return list() + + # find the picks corresponding to the grad channels + grad_chs = sum(pairs, []) + ch_names = info["ch_names"] + picks = [ch_names.index(c["ch_name"]) for c in grad_chs] + + if topomap_coords: + shape = (len(pairs), 2, -1) + coords = _find_topomap_coords(info, picks, layout).reshape(shape).mean(axis=1) + return picks, coords + else: + return picks + + +def _merge_ch_data(data, ch_type, names, method="rms"): + """Merge data from channel pairs. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels, ordered in pairs. + ch_type : str + Channel type. + names : list + List of channel names. + method : str + Can be 'rms' or 'mean'. + + Returns + ------- + data : array, shape = (n_channels / 2, ..., n_times) + The root mean square or mean for each pair. + names : list + List of channel names. + """ + if ch_type == "grad": + data = _merge_grad_data(data, method) + else: + assert ch_type in _FNIRS_CH_TYPES_SPLIT + data, names = _merge_nirs_data(data, names) + return data, names + + +def _merge_grad_data(data, method="rms"): + """Merge data from channel pairs using the RMS or mean. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels, ordered in pairs. + method : str + Can be 'rms' or 'mean'. + + Returns + ------- + data : array, shape = (n_channels / 2, ..., n_times) + The root mean square or mean for each pair. + """ + data, orig_shape = data.reshape((len(data) // 2, 2, -1)), data.shape + if method == "mean": + data = np.mean(data, axis=1) + elif method == "rms": + data = np.sqrt(np.sum(data**2, axis=1) / 2) + else: + raise ValueError(f'method must be "rms" or "mean", got {method}.') + return data.reshape(data.shape[:1] + orig_shape[1:]) + + +def _merge_nirs_data(data, merged_names): + """Merge data from multiple nirs channel using the mean. + + Channel names that have an x in them will be merged. The first channel in + the name is replaced with the mean of all listed channels. The other + channels are removed. + + Parameters + ---------- + data : array, shape = (n_channels, ..., n_times) + Data for channels. + merged_names : list + List of strings containing the channel names. Channels that are to be + merged contain an x between them. + + Returns + ------- + data : array + Data for channels with requested channels merged. Channels used in the + merge are removed from the array. + """ + to_remove = np.empty(0, dtype=np.int32) + for idx, ch in enumerate(merged_names): + if "x" in ch: + indices = np.empty(0, dtype=np.int32) + channels = ch.split("x") + for sub_ch in channels[1:]: + indices = np.append(indices, merged_names.index(sub_ch)) + data[idx] = np.mean(data[np.append(idx, indices)], axis=0) + to_remove = np.append(to_remove, indices) + to_remove = np.unique(to_remove) + for rem in sorted(to_remove, reverse=True): + del merged_names[rem] + data = np.delete(data, rem, 0) + return data, merged_names + + +def generate_2d_layout( + xy, + w=0.07, + h=0.05, + pad=0.02, + ch_names=None, + ch_indices=None, + name="ecog", + bg_image=None, + normalize=True, +): + """Generate a custom 2D layout from xy points. + + Generates a 2-D layout for plotting with plot_topo methods and + functions. XY points will be normalized between 0 and 1, where + normalization extremes will be either the min/max of xy, or + the width/height of bg_image. + + Parameters + ---------- + xy : ndarray, shape (N, 2) + The xy coordinates of sensor locations. + w : float + The width of each sensor's axis (between 0 and 1). + h : float + The height of each sensor's axis (between 0 and 1). + pad : float + Portion of the box to reserve for padding. The value can range between + 0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding). + ch_names : list + The names of each channel. Must be a list of strings, with one + string per channel. + ch_indices : list + Index of each channel - must be a collection of unique integers, + one index per channel. + name : str + The name of this layout type. + bg_image : path-like | ndarray + The image over which sensor axes will be plotted. Either a path to an + image file, or an array that can be plotted with plt.imshow. If + provided, xy points will be normalized by the width/height of this + image. If not, xy points will be normalized by their own min/max. + normalize : bool + Whether to normalize the coordinates to run from 0 to 1. Defaults to + True. + + Returns + ------- + layout : Layout + A Layout object that can be plotted with plot_topo + functions and methods. + + See Also + -------- + make_eeg_layout, make_grid_layout + + Notes + ----- + .. versionadded:: 0.9.0 + """ + import matplotlib.pyplot as plt + + if ch_indices is None: + ch_indices = np.arange(xy.shape[0]) + if ch_names is None: + ch_names = list(map(str, ch_indices)) + + if len(ch_names) != len(ch_indices): + raise ValueError("# channel names and indices must be equal") + if len(ch_names) != len(xy): + raise ValueError("# channel names and xy vals must be equal") + + x, y = xy.copy().astype(float).T + + # Normalize xy to 0-1 + if bg_image is not None: + # Normalize by image dimensions + img = plt.imread(bg_image) if isinstance(bg_image, str) else bg_image + x /= img.shape[1] + y /= img.shape[0] + elif normalize: + # Normalize x and y by their maxes + for i_dim in [x, y]: + i_dim -= i_dim.min(0) + i_dim /= i_dim.max(0) - i_dim.min(0) + + # Create box and pos variable + box = _box_size(np.vstack([x, y]).T, padding=pad) + box = (0, 0, box[0], box[1]) + w, h = (np.array([i] * x.shape[0]) for i in [w, h]) + loc_params = np.vstack([x, y, w, h]).T + + layout = Layout(box, loc_params, ch_names, ch_indices, name) + return layout diff --git a/mne/channels/montage.py b/mne/channels/montage.py new file mode 100644 index 0000000..a6ded68 --- /dev/null +++ b/mne/channels/montage.py @@ -0,0 +1,1846 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +import re +from collections import OrderedDict +from copy import deepcopy +from dataclasses import dataclass + +import numpy as np + +from .._fiff._digitization import ( + _coord_frame_const, + _count_points_by_type, + _ensure_fiducials_head, + _format_dig_points, + _get_data_as_dict_from_dig, + _get_dig_eeg, + _get_fid_coords, + _make_dig_points, + _read_dig_fif, + write_dig, +) +from .._fiff.constants import CHANNEL_LOC_ALIASES, FIFF +from .._fiff.meas_info import create_info +from .._fiff.open import fiff_open +from .._fiff.pick import _picks_to_idx, channel_type, pick_types +from .._freesurfer import get_mni_fiducials +from ..defaults import HEAD_SIZE_DEFAULT +from ..transforms import ( + Transform, + _ensure_trans, + _fit_matched_points, + _frame_to_str, + _quat_to_affine, + _sph_to_cart, + _topo_to_sph, + _verbose_frames, + apply_trans, + get_ras_to_neuromag_trans, +) +from ..utils import ( + _check_fname, + _check_option, + _on_missing, + _pl, + _validate_type, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + verbose, + warn, +) +from ..utils.docs import docdict +from ..viz import plot_montage +from ._dig_montage_utils import _parse_brainvision_dig_montage, _read_dig_montage_egi + + +@dataclass +class _BuiltinStandardMontage: + name: str + description: str + + +_BUILTIN_STANDARD_MONTAGES = [ + _BuiltinStandardMontage( + name="standard_1005", + description="Electrodes are named and positioned according to the " + "international 10-05 system (343+3 locations)", + ), + _BuiltinStandardMontage( + name="standard_1020", + description="Electrodes are named and positioned according to the " + "international 10-20 system (94+3 locations)", + ), + _BuiltinStandardMontage( + name="standard_alphabetic", + description="Electrodes are named with LETTER-NUMBER combinations " + "(A1, B2, F4, …) (65+3 locations)", + ), + _BuiltinStandardMontage( + name="standard_postfixed", + description="Electrodes are named according to the international " + "10-20 system using postfixes for intermediate positions " + "(100+3 locations)", + ), + _BuiltinStandardMontage( + name="standard_prefixed", + description="Electrodes are named according to the international " + "10-20 system using prefixes for intermediate positions " + "(74+3 locations)", + ), + _BuiltinStandardMontage( + name="standard_primed", + description="Electrodes are named according to the international " + "10-20 system using prime marks (' and '') for " + "intermediate positions (100+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi16", + description="BioSemi cap with 16 electrodes (16+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi32", + description="BioSemi cap with 32 electrodes (32+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi64", + description="BioSemi cap with 64 electrodes (64+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi128", + description="BioSemi cap with 128 electrodes (128+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi160", + description="BioSemi cap with 160 electrodes (160+3 locations)", + ), + _BuiltinStandardMontage( + name="biosemi256", + description="BioSemi cap with 256 electrodes (256+3 locations)", + ), + _BuiltinStandardMontage( + name="easycap-M1", + description="EasyCap with 10-05 electrode names (74 locations)", + ), + _BuiltinStandardMontage( + name="easycap-M10", + description="EasyCap with numbered electrodes (61 locations)", + ), + _BuiltinStandardMontage( + name="easycap-M43", + description="EasyCap with numbered electrodes (64 locations)", + ), + _BuiltinStandardMontage( + name="EGI_256", + description="Geodesic Sensor Net (256 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-32", + description="HydroCel Geodesic Sensor Net and Cz (33+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-64_1.0", + description="HydroCel Geodesic Sensor Net (64+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-65_1.0", + description="HydroCel Geodesic Sensor Net and Cz (65+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-128", + description="HydroCel Geodesic Sensor Net (128+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-129", + description="HydroCel Geodesic Sensor Net and Cz (129+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-256", + description="HydroCel Geodesic Sensor Net (256+3 locations)", + ), + _BuiltinStandardMontage( + name="GSN-HydroCel-257", + description="HydroCel Geodesic Sensor Net and Cz (257+3 locations)", + ), + _BuiltinStandardMontage( + name="mgh60", + description="The (older) 60-channel cap used at MGH (60+3 locations)", + ), + _BuiltinStandardMontage( + name="mgh70", + description="The (newer) 70-channel BrainVision cap used at MGH " + "(70+3 locations)", + ), + _BuiltinStandardMontage( + name="artinis-octamon", + description="Artinis OctaMon fNIRS (8 sources, 2 detectors)", + ), + _BuiltinStandardMontage( + name="artinis-brite23", + description="Artinis Brite23 fNIRS (11 sources, 7 detectors)", + ), + _BuiltinStandardMontage( + name="brainproducts-RNP-BA-128", + description="Brain Products with 10-10 electrode names (128 channels)", + ), +] + + +def _check_get_coord_frame(dig): + dig_coord_frames = sorted(set(d["coord_frame"] for d in dig)) + if len(dig_coord_frames) != 1: + raise RuntimeError( + "Only a single coordinate frame in dig is supported, got " + f"{dig_coord_frames}" + ) + return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None + + +def get_builtin_montages(*, descriptions=False): + """Get a list of all standard montages shipping with MNE-Python. + + The names of the montages can be passed to :func:`make_standard_montage`. + + Parameters + ---------- + descriptions : bool + Whether to return not only the montage names, but also their + corresponding descriptions. If ``True``, a list of tuples is returned, + where the first tuple element is the montage name and the second is + the montage description. If ``False`` (default), only the names are + returned. + + .. versionadded:: 1.1 + + Returns + ------- + montages : list of str | list of tuple + If ``descriptions=False``, the names of all builtin montages that can + be used by :func:`make_standard_montage`. + + If ``descriptions=True``, a list of tuples ``(name, description)``. + """ + if descriptions: + return [(m.name, m.description) for m in _BUILTIN_STANDARD_MONTAGES] + else: + return [m.name for m in _BUILTIN_STANDARD_MONTAGES] + + +def make_dig_montage( + ch_pos=None, + nasion=None, + lpa=None, + rpa=None, + hsp=None, + hpi=None, + coord_frame="unknown", +): + r"""Make montage from arrays. + + Parameters + ---------- + ch_pos : dict | None + Dictionary of channel positions. Keys are channel names and values + are 3D coordinates - array of shape (3,) - in native digitizer space + in m. + nasion : None | array, shape (3,) + The position of the nasion fiducial point. + This point is assumed to be in the native digitizer space in m. + lpa : None | array, shape (3,) + The position of the left periauricular fiducial point. + This point is assumed to be in the native digitizer space in m. + rpa : None | array, shape (3,) + The position of the right periauricular fiducial point. + This point is assumed to be in the native digitizer space in m. + hsp : None | array, shape (n_points, 3) + This corresponds to an array of positions of the headshape points in + 3d. These points are assumed to be in the native digitizer space in m. + hpi : None | array, shape (n_hpi, 3) + This corresponds to an array of HPI points in the native digitizer + space. They only necessary if computation of a ``compute_dev_head_t`` + is True. + coord_frame : str + The coordinate frame of the points. Usually this is ``'unknown'`` + for native digitizer space. + Other valid values are: ``'head'``, ``'meg'``, ``'mri'``, + ``'mri_voxel'``, ``'mni_tal'``, ``'ras'``, ``'fs_tal'``, + ``'ctf_head'``, and ``'ctf_meg'``. + + .. note:: + For custom montages without fiducials, this parameter must be set + to ``'head'``. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_egi + read_dig_fif + read_dig_localite + read_dig_polhemus_isotrak + """ + _validate_type(ch_pos, (dict, None), "ch_pos") + if ch_pos is None: + ch_names = None + else: + ch_names = list(ch_pos) + dig = _make_dig_points( + nasion=nasion, + lpa=lpa, + rpa=rpa, + hpi=hpi, + extra_points=hsp, + dig_ch_pos=ch_pos, + coord_frame=coord_frame, + ) + + return DigMontage(dig=dig, ch_names=ch_names) + + +class DigMontage: + """Montage for digitized electrode and headshape position data. + + .. warning:: Montages are typically created using one of the helper + functions in the ``See Also`` section below instead of + instantiating this class directly. + + Parameters + ---------- + dig : list of dict + The object containing all the dig points. + ch_names : list of str + The names of the EEG channels. + + See Also + -------- + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + .. versionadded:: 0.9.0 + """ + + def __init__(self, *, dig=None, ch_names=None): + dig = list() if dig is None else dig + _validate_type(item=dig, types=list, item_name="dig") + ch_names = list() if ch_names is None else ch_names + n_eeg = sum([1 for d in dig if d["kind"] == FIFF.FIFFV_POINT_EEG]) + if n_eeg != len(ch_names): + raise ValueError( + f"The number of EEG channels ({n_eeg}) does not match the number" + f" of channel names provided ({len(ch_names)})" + ) + + self.dig = dig + self.ch_names = ch_names + + def __repr__(self): + """Return string representation.""" + n_points = _count_points_by_type(self.dig) + return ( + "" + ).format(**n_points) + + @copy_function_doc_to_method_doc(plot_montage) + def plot( + self, + *, + scale=None, + scale_factor=None, + show_names=True, + kind="topomap", + show=True, + sphere=None, + axes=None, + verbose=None, + ): + return plot_montage( + self, + scale=scale, + scale_factor=scale_factor, + show_names=show_names, + kind=kind, + show=show, + sphere=sphere, + axes=axes, + ) + + @fill_doc + def rename_channels(self, mapping, allow_duplicates=False): + """Rename the channels. + + Parameters + ---------- + %(mapping_rename_channels_duplicates)s + + Returns + ------- + inst : instance of DigMontage + The instance. Operates in-place. + """ + from .channels import rename_channels + + temp_info = create_info(list(self._get_ch_pos()), 1000.0, "eeg") + rename_channels(temp_info, mapping, allow_duplicates) + self.ch_names = temp_info["ch_names"] + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save digitization points to FIF. + + Parameters + ---------- + fname : path-like + The filename to use. Should end in .fif or .fif.gz. + %(overwrite)s + %(verbose)s + + See Also + -------- + mne.channels.read_dig_fif + + Notes + ----- + .. versionchanged:: 1.9 + Added support for saving the associated channel names. + """ + fname = _check_fname(fname, overwrite=overwrite) + check_fname(fname, "montage", ("-dig.fif", "-dig.fif.gz")) + coord_frame = _check_get_coord_frame(self.dig) + write_dig( + fname, self.dig, coord_frame, overwrite=overwrite, ch_names=self.ch_names + ) + + def __iadd__(self, other): + """Add two DigMontages in place. + + Notes + ----- + Two DigMontages can only be added if there are no duplicated ch_names + and if fiducials are present they should share the same coordinate + system and location values. + """ + + def is_fid_defined(fid): + return not (fid.nasion is None and fid.lpa is None and fid.rpa is None) + + # Check for none duplicated ch_names + ch_names_intersection = set(self.ch_names).intersection(other.ch_names) + if ch_names_intersection: + raise RuntimeError( + ( + "Cannot add two DigMontage objects if they contain duplicated" + " channel names. Duplicated channel(s) found: {}." + ).format(", ".join([f"{v!r}" for v in sorted(ch_names_intersection)])) + ) + + # Check for unique matching fiducials + self_fid, self_coord = _get_fid_coords(self.dig) + other_fid, other_coord = _get_fid_coords(other.dig) + + if is_fid_defined(self_fid) and is_fid_defined(other_fid): + if self_coord != other_coord: + raise RuntimeError( + "Cannot add two DigMontage objects if " + "fiducial locations are not in the same " + "coordinate system." + ) + + for kk in self_fid: + if not np.array_equal(self_fid[kk], other_fid[kk]): + raise RuntimeError( + "Cannot add two DigMontage objects if " + "fiducial locations do not match " + f"({kk})" + ) + + # keep self + self.dig = _format_dig_points( + self.dig + + [d for d in other.dig if d["kind"] != FIFF.FIFFV_POINT_CARDINAL] + ) + else: + self.dig = _format_dig_points(self.dig + other.dig) + + self.ch_names += other.ch_names + return self + + def copy(self): + """Copy the DigMontage object. + + Returns + ------- + dig : instance of DigMontage + The copied DigMontage instance. + """ + return deepcopy(self) + + def __add__(self, other): + """Add two DigMontages.""" + out = self.copy() + out += other + return out + + def __eq__(self, other): + """Compare different DigMontage objects for equality. + + Returns + ------- + Boolean output from comparison of .dig + """ + return self.dig == other.dig and self.ch_names == other.ch_names + + def _get_ch_pos(self): + pos = [d["r"] for d in _get_dig_eeg(self.dig)] + assert len(self.ch_names) == len(pos) + return OrderedDict(zip(self.ch_names, pos)) + + def _get_dig_names(self): + NAMED_KIND = (FIFF.FIFFV_POINT_EEG,) + is_eeg = np.array([d["kind"] in NAMED_KIND for d in self.dig]) + assert len(self.ch_names) == is_eeg.sum() + dig_names = [None] * len(self.dig) + for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]): + dig_names[dig_idx] = self.ch_names[ch_name_idx] + + return dig_names + + def get_positions(self): + """Get all channel and fiducial positions. + + Returns + ------- + positions : dict + A dictionary of the positions for channels (``ch_pos``), + coordinate frame (``coord_frame``), nasion (``nasion``), + left preauricular point (``lpa``), + right preauricular point (``rpa``), + Head Shape Polhemus (``hsp``), and + Head Position Indicator(``hpi``). + E.g.:: + + { + 'ch_pos': {'EEG061': [0, 0, 0]}, + 'nasion': [0, 0, 1], + 'coord_frame': 'mni_tal', + 'lpa': [0, 1, 0], + 'rpa': [1, 0, 0], + 'hsp': None, + 'hpi': None + } + """ + # get channel positions as dict + ch_pos = self._get_ch_pos() + + # get coordframe and fiducial coordinates + montage_bunch = _get_data_as_dict_from_dig(self.dig) + coord_frame = _frame_to_str.get(montage_bunch.coord_frame) + + # return dictionary + positions = dict( + ch_pos=ch_pos, + coord_frame=coord_frame, + nasion=montage_bunch.nasion, + lpa=montage_bunch.lpa, + rpa=montage_bunch.rpa, + hsp=montage_bunch.hsp, + hpi=montage_bunch.hpi, + ) + return positions + + @verbose + def apply_trans(self, trans, verbose=None): + """Apply a transformation matrix to the montage. + + Parameters + ---------- + trans : instance of mne.transforms.Transform + The transformation matrix to be applied. + %(verbose)s + """ + _validate_type(trans, Transform, "trans") + coord_frame = self.get_positions()["coord_frame"] + trans = _ensure_trans(trans, fro=coord_frame, to=trans["to"]) + for d in self.dig: + d["r"] = apply_trans(trans, d["r"]) + d["coord_frame"] = trans["to"] + + @verbose + def add_estimated_fiducials(self, subject, subjects_dir=None, verbose=None): + """Estimate fiducials based on FreeSurfer ``fsaverage`` subject. + + This takes a montage with the ``mri`` coordinate frame, + corresponding to the FreeSurfer RAS (xyz in the volume) T1w + image of the specific subject. It will call + :func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and + Nasion fiducial points. + + Parameters + ---------- + %(subject)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + See Also + -------- + :ref:`tut-source-alignment` + + Notes + ----- + Since MNE uses the FIF data structure, it relies on the ``head`` + coordinate frame. Any coordinate frame can be transformed + to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are + defined. One can use this function to estimate those fiducials + and then use ``mne.channels.compute_native_head_t(montage)`` + to get the head <-> MRI transform. + """ + # get coordframe and fiducial coordinates + montage_bunch = _get_data_as_dict_from_dig(self.dig) + + # get the coordinate frame and check that it's MRI + if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + f'Montage should be in the "mri" coordinate frame ' + f"to use `add_estimated_fiducials`. The current coordinate " + f"frame is {montage_bunch.coord_frame}" + ) + + # estimate LPA, nasion, RPA from FreeSurfer fsaverage + fids_mri = list(get_mni_fiducials(subject, subjects_dir)) + + # add those digpoints to front of montage + self.dig = fids_mri + self.dig + return self + + @verbose + def add_mni_fiducials(self, subjects_dir=None, verbose=None): + """Add fiducials to a montage in MNI space. + + Parameters + ---------- + %(subjects_dir)s + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + Notes + ----- + ``fsaverage`` is in MNI space and so its fiducials can be + added to a montage in "mni_tal". MNI is an ACPC-aligned + coordinate system (the posterior commissure is the origin) + so since BIDS requires channel locations for ECoG, sEEG and + DBS to be in ACPC space, this function can be used to allow + those coordinate to be transformed to "head" space (origin + between LPA and RPA). + """ + montage_bunch = _get_data_as_dict_from_dig(self.dig) + + # get the coordinate frame and check that it's MNI TAL + if montage_bunch.coord_frame != FIFF.FIFFV_MNE_COORD_MNI_TAL: + raise RuntimeError( + f'Montage should be in the "mni_tal" coordinate frame ' + f"to use `add_estimated_fiducials`. The current coordinate " + f"frame is {montage_bunch.coord_frame}" + ) + + fids_mni = get_mni_fiducials("fsaverage", subjects_dir) + for fid in fids_mni: + # "mri" and "mni_tal" are equivalent for fsaverage + assert fid["coord_frame"] == FIFF.FIFFV_COORD_MRI + fid["coord_frame"] = FIFF.FIFFV_MNE_COORD_MNI_TAL + self.dig = fids_mni + self.dig + return self + + @verbose + def remove_fiducials(self, verbose=None): + """Remove the fiducial points from a montage. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + inst : instance of DigMontage + The instance, modified in-place. + + Notes + ----- + MNE will transform a montage to the internal "head" coordinate + frame if the fiducials are present. Under most circumstances, this + is ideal as it standardizes the coordinate frame for things like + plotting. However, in some circumstances, such as saving a ``raw`` + with intracranial data to BIDS format, the coordinate frame + should not be changed by removing fiducials. + """ + for d in self.dig.copy(): + if d["kind"] == FIFF.FIFFV_POINT_CARDINAL: + self.dig.remove(d) + return self + + +VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1) + + +def _check_unit_and_get_scaling(unit): + _check_option("unit", unit, sorted(VALID_SCALES.keys())) + return VALID_SCALES[unit] + + +def transform_to_head(montage): + """Transform a DigMontage object into head coordinate. + + Parameters + ---------- + montage : instance of DigMontage + The montage. + + Returns + ------- + montage : instance of DigMontage + The montage after transforming the points to head + coordinate system. + + Notes + ----- + This function requires that the LPA, RPA and Nasion fiducial + points are available. If they are not, they will be added based by + projecting the fiducials onto a sphere with radius equal to the average + distance of each point to the origin (in the given coordinate frame). + + This function assumes that all fiducial points are in the same coordinate + frame (e.g. 'unknown') and it will convert all the point in this coordinate + system to Neuromag head coordinate system. + + .. versionchanged:: 1.2 + Fiducial points will be added automatically if the montage does not + have them. + """ + # Get fiducial points and their coord_frame + native_head_t = compute_native_head_t(montage) + montage = montage.copy() # to avoid inplace modification + if native_head_t["from"] != FIFF.FIFFV_COORD_HEAD: + for d in montage.dig: + if d["coord_frame"] == native_head_t["from"]: + d["r"] = apply_trans(native_head_t, d["r"]) + d["coord_frame"] = FIFF.FIFFV_COORD_HEAD + _ensure_fiducials_head(montage.dig) + return montage + + +def read_dig_dat(fname): + r"""Read electrode positions from a ``*.dat`` file. + + .. Warning:: + This function was implemented based on ``*.dat`` files available from + `Compumedics `__ and might not work + as expected with novel files. If it does not read your files correctly + please contact the MNE-Python developers. + + Parameters + ---------- + fname : path-like + File from which to read electrode locations. + + Returns + ------- + montage : DigMontage + The montage. + + See Also + -------- + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + ``*.dat`` files are plain text files and can be inspected and amended with + a plain text editor. + """ + from ._standard_montage_utils import _check_dupes_odict + + fname = _check_fname(fname, overwrite="read", must_exist=True) + + with open(fname) as fid: + lines = fid.readlines() + + ch_names, poss = list(), list() + nasion = lpa = rpa = None + for i, line in enumerate(lines): + items = line.split() + if not items: + continue + elif len(items) != 5: + raise ValueError( + f"Error reading {fname}, line {i} has unexpected number of entries:\n" + f"{line.rstrip()}" + ) + num = items[1] + if num == "67": + continue # centroid + pos = np.array([float(item) for item in items[2:]]) + if num == "78": + nasion = pos + elif num == "76": + lpa = pos + elif num == "82": + rpa = pos + else: + ch_names.append(items[0]) + poss.append(pos) + electrodes = _check_dupes_odict(ch_names, poss) + return make_dig_montage(electrodes, nasion, lpa, rpa) + + +@verbose +def read_dig_fif(fname, *, verbose=None): + r"""Read digitized points from a .fif file. + + Parameters + ---------- + fname : path-like + FIF file from which to read digitization locations. + %(verbose)s + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_dat + read_dig_egi + read_dig_captrak + read_dig_polhemus_isotrak + read_dig_hpts + read_dig_localite + make_dig_montage + + Notes + ----- + .. versionchanged:: 1.9 + Added support for reading the associated channel names, if present. + + In some files, electrode names are not present (e.g., in older files). + For those files, the channel names are defined with the convention from + VectorView systems (EEG001, EEG002, etc.). + """ + check_fname(fname, "montage", ("-dig.fif", "-dig.fif.gz")) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + # Load the dig data + f, tree = fiff_open(fname)[:2] + with f as fid: + dig, ch_names = _read_dig_fif(fid, tree, return_ch_names=True) + + if ch_names is None: # backward compat from when we didn't save the names + ch_names = [] + for d in dig: + if d["kind"] == FIFF.FIFFV_POINT_EEG: + ch_names.append(f"EEG{d['ident']:03d}") + + montage = DigMontage(dig=dig, ch_names=ch_names) + return montage + + +def read_dig_hpts(fname, unit="mm"): + """Read historical ``.hpts`` MNE-C files. + + Parameters + ---------- + fname : path-like + The filepath of .hpts file. + unit : ``'m'`` | ``'cm'`` | ``'mm'`` + Unit of the positions. Defaults to ``'mm'``. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + + Notes + ----- + The hpts format digitzer data file may contain comment lines starting + with the pound sign (#) and data lines of the form:: + + <*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*> + + where: + + ``<*category*>`` + defines the type of points. Allowed categories are: ``hpi``, + ``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to + head-position indicator coil locations, cardinal landmarks, EEG + electrode locations, and additional head surface points, + respectively. + + ``<*identifier*>`` + identifies the point. The identifiers are usually sequential + numbers. For cardinal landmarks, 1 = left auricular point, + 2 = nasion, and 3 = right auricular point. For EEG electrodes, + identifier = 0 signifies the reference electrode. + + ``<*x/mm*> , <*y/mm*> , <*z/mm*>`` + Location of the point, usually in the head coordinate system + in millimeters. If your points are in [m] then unit parameter can + be changed. + + For example:: + + cardinal 2 -5.6729 -12.3873 -30.3671 + cardinal 1 -37.6782 -10.4957 91.5228 + cardinal 3 -131.3127 9.3976 -22.2363 + hpi 1 -30.4493 -11.8450 83.3601 + hpi 2 -122.5353 9.2232 -28.6828 + hpi 3 -6.8518 -47.0697 -37.0829 + hpi 4 7.3744 -50.6297 -12.1376 + hpi 5 -33.4264 -43.7352 -57.7756 + eeg FP1 3.8676 -77.0439 -13.0212 + eeg FP2 -31.9297 -70.6852 -57.4881 + eeg F7 -6.1042 -68.2969 45.4939 + ... + """ + from ._standard_montage_utils import _str, _str_names + + fname = _check_fname(fname, overwrite="read", must_exist=True) + _scale = _check_unit_and_get_scaling(unit) + + out = np.genfromtxt(fname, comments="#", dtype=(_str, _str, "f8", "f8", "f8")) + kind, label = _str_names(out["f0"]), _str_names(out["f1"]) + kind = [k.lower() for k in kind] + xyz = np.array([out[f"f{ii}"] for ii in range(2, 5)]).T + xyz *= _scale + del _scale + fid_idx_to_label = {"1": "lpa", "2": "nasion", "3": "rpa"} + fid = { + fid_idx_to_label[label[ii]]: this_xyz + for ii, this_xyz in enumerate(xyz) + if kind[ii] == "cardinal" + } + ch_pos = { + label[ii]: this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "eeg" + } + hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "hpi"]) + hpi.shape = (-1, 3) # in case it's empty + hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz) if kind[ii] == "extra"]) + hsp.shape = (-1, 3) # in case it's empty + return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp) + + +def read_dig_egi(fname): + """Read electrode locations from EGI system. + + Parameters + ---------- + fname : path-like + EGI MFF XML coordinates file from which to read digitization locations. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + """ + _check_fname(fname, overwrite="read", must_exist=True) + + data = _read_dig_montage_egi( + fname=fname, _scaling=1.0, _all_data_kwargs_are_none=True + ) + return make_dig_montage(**data) + + +def read_dig_captrak(fname): + """Read electrode locations from CapTrak Brain Products system. + + Parameters + ---------- + fname : path-like + BrainVision CapTrak coordinates file from which to read EEG electrode + locations. This is typically in XML format with the .bvct extension. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_localite + read_dig_polhemus_isotrak + make_dig_montage + """ + _check_fname(fname, overwrite="read", must_exist=True) + data = _parse_brainvision_dig_montage(fname, scale=1e-3) + + return make_dig_montage(**data) + + +def read_dig_localite(fname, nasion=None, lpa=None, rpa=None): + """Read Localite .csv file. + + Parameters + ---------- + fname : path-like + File name. + nasion : str | None + Name of nasion fiducial point. + lpa : str | None + Name of left preauricular fiducial point. + rpa : str | None + Name of right preauricular fiducial point. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_hpts + read_dig_polhemus_isotrak + make_dig_montage + """ + ch_pos = {} + with open(fname) as f: + f.readline() # skip first row + for row in f: + _, name, x, y, z = row.split(",") + ch_pos[name] = np.array((float(x), float(y), float(z))) / 1000 + + if nasion is not None: + nasion = ch_pos.pop(nasion) + if lpa is not None: + lpa = ch_pos.pop(lpa) + if rpa is not None: + rpa = ch_pos.pop(rpa) + + return make_dig_montage(ch_pos, nasion, lpa, rpa) + + +def _get_montage_in_head(montage): + coords = set([d["coord_frame"] for d in montage.dig]) + montage = montage.copy() + if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD: + _ensure_fiducials_head(montage.dig) + return montage + else: + return transform_to_head(montage) + + +def _set_montage_fnirs(info, montage): + """Set the montage for fNIRS data. + + This needs to be different to electrodes as each channel has three + coordinates that need to be set. For each channel there is a source optode + location, a detector optode location, and a channel midpoint that must be + stored. This function modifies info['chs'][#]['loc'] and info['dig'] in + place. + """ + from ..preprocessing.nirs import _validate_nirs_info + + # Validate that the fNIRS info is correctly formatted + picks = _validate_nirs_info(info) + + # Modify info['chs'][#]['loc'] in place + num_ficiduals = len(montage.dig) - len(montage.ch_names) + for ch_idx in picks: + ch = info["chs"][ch_idx]["ch_name"] + source, detector = ch.split(" ")[0].split("_") + source_pos = montage.dig[montage.ch_names.index(source) + num_ficiduals]["r"] + detector_pos = montage.dig[montage.ch_names.index(detector) + num_ficiduals][ + "r" + ] + + info["chs"][ch_idx]["loc"][3:6] = source_pos + info["chs"][ch_idx]["loc"][6:9] = detector_pos + midpoint = (source_pos + detector_pos) / 2 + info["chs"][ch_idx]["loc"][:3] = midpoint + info["chs"][ch_idx]["coord_frame"] = FIFF.FIFFV_COORD_HEAD + + # Modify info['dig'] in place + with info._unlock(): + info["dig"] = montage.dig + + +@fill_doc +def _set_montage(info, montage, match_case=True, match_alias=False, on_missing="raise"): + """Apply montage to data. + + With a DigMontage, this function will replace the digitizer info with + the values specified for the particular montage. + + Usually, a montage is expected to contain the positions of all EEG + electrodes and a warning is raised when this is not the case. + + Parameters + ---------- + %(info_not_none)s + %(montage)s + %(match_case)s + %(match_alias)s + %(on_missing_montage)s + + Notes + ----- + This function will change the info variable in place. + """ + _validate_type(montage, (DigMontage, None, str), "montage") + if montage is None: + # Next line modifies info['dig'] in place + with info._unlock(): + info["dig"] = None + for ch in info["chs"]: + # Next line modifies info['chs'][#]['loc'] in place + ch["loc"] = np.full(12, np.nan) + return + if isinstance(montage, str): # load builtin montage + _check_option( + parameter="montage", + value=montage, + allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES], + ) + montage = make_standard_montage(montage) + + mnt_head = _get_montage_in_head(montage) + del montage + + def _backcompat_value(pos, ref_pos): + if any(np.isnan(pos)): + return np.full(6, np.nan) + else: + return np.concatenate((pos, ref_pos)) + + # get the channels in the montage in head + ch_pos = mnt_head._get_ch_pos() + + # only get the eeg, seeg, dbs, ecog channels + picks = pick_types( + info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True, exclude=() + ) + non_picks = np.setdiff1d(np.arange(info["nchan"]), picks) + + # get the reference position from the loc[3:6] + chs = [info["chs"][ii] for ii in picks] + non_names = [info["chs"][ii]["ch_name"] for ii in non_picks] + del picks + ref_pos = np.array([ch["loc"][3:6] for ch in chs]) + + # keep reference location from EEG-like channels if they + # already exist and are all the same. + # Note: ref position is an empty list for fieldtrip data + if len(ref_pos) and ref_pos[0].any() and (ref_pos[0] == ref_pos).all(): + eeg_ref_pos = ref_pos[0] + # since we have an EEG reference position, we have + # to add it into the info['dig'] as EEG000 + custom_eeg_ref_dig = True + else: + refs = set(ch_pos) & {"EEG000", "REF"} + assert len(refs) <= 1 + eeg_ref_pos = np.zeros(3) if not refs else ch_pos.pop(refs.pop()) + custom_eeg_ref_dig = False + del ref_pos + + # This raises based on info being subset/superset of montage + info_names = [ch["ch_name"] for ch in chs] + dig_names = mnt_head._get_dig_names() + ref_names = [None, "EEG000", "REF"] + + if match_case: + info_names_use = info_names + dig_names_use = dig_names + non_names_use = non_names + else: + ch_pos_use = OrderedDict((name.lower(), pos) for name, pos in ch_pos.items()) + info_names_use = [name.lower() for name in info_names] + dig_names_use = [ + name.lower() if name is not None else name for name in dig_names + ] + non_names_use = [name.lower() for name in non_names] + ref_names = [name.lower() if name is not None else name for name in ref_names] + n_dup = len(ch_pos) - len(ch_pos_use) + if n_dup: + raise ValueError( + f"Cannot use match_case=False as {n_dup} montage " + "name(s) require case sensitivity" + ) + n_dup = len(info_names_use) - len(set(info_names_use)) + if n_dup: + raise ValueError( + f"Cannot use match_case=False as {n_dup} channel " + "name(s) require case sensitivity" + ) + ch_pos = ch_pos_use + del ch_pos_use + del dig_names + + # use lookup table to match unrecognized channel names to known aliases + if match_alias: + alias_dict = ( + match_alias if isinstance(match_alias, dict) else CHANNEL_LOC_ALIASES + ) + if not match_case: + alias_dict = { + ch_name.lower(): ch_alias.lower() + for ch_name, ch_alias in alias_dict.items() + } + + # excluded ch_alias not in info, to prevent unnecessary mapping and + # warning messages based on aliases. + alias_dict = {ch_name: ch_alias for ch_name, ch_alias in alias_dict.items()} + info_names_use = [ + alias_dict.get(ch_name, ch_name) for ch_name in info_names_use + ] + non_names_use = [alias_dict.get(ch_name, ch_name) for ch_name in non_names_use] + + # warn user if there is not a full overlap of montage with info_chs + missing = np.where([use not in ch_pos for use in info_names_use])[0] + if len(missing): # DigMontage is subset of info + missing_names = [info_names[ii] for ii in missing] + pl = _pl(missing) + are_is = "are" if pl else "is" + missing_coord_msg = ( + f"DigMontage is only a subset of info. There {are_is} " + f"{len(missing)} channel position{pl} not present in the " + f"DigMontage. The channel{pl} missing from the montage {are_is}:" + f"\n\n{missing_names}.\n\nConsider using inst.rename_channels to " + "match the montage nomenclature, or inst.set_channel_types if " + f"{'these' if pl else 'this'} {are_is} not {'' if pl else 'an '}" + f"EEG channel{pl}, or use the on_missing parameter if the channel " + f"position{pl} {are_is} allowed to be unknown in your analyses." + ) + _on_missing(on_missing, missing_coord_msg) + + # set ch coordinates and names from digmontage or nan coords + for ii in missing: + ch_pos[info_names_use[ii]] = [np.nan] * 3 + del info_names + + assert len(non_names_use) == len(non_names) + # There are no issues here with fNIRS being in non_names_use because + # these names are like "D1_S1_760" and the ch_pos for a fNIRS montage + # will have entries "D1" and "S1". + extra = np.where([non in ch_pos for non in non_names_use])[0] + if len(extra): + types = "/".join(sorted(set(channel_type(info, non_picks[ii]) for ii in extra))) + names = [non_names[ii] for ii in extra] + warn( + f"Not setting position{_pl(extra)} of {len(extra)} {types} " + f"channel{_pl(extra)} found in montage:\n{names}\n" + "Consider setting the channel types to be of " + f'{docdict["montage_types"]} ' + "using inst.set_channel_types before calling inst.set_montage, " + "or omit these channels when creating your montage." + ) + + for ch, use in zip(chs, info_names_use): + # Next line modifies info['chs'][#]['loc'] in place + if use in ch_pos: + ch["loc"][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos) + ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + del ch_pos + + # XXX this is probably wrong as it uses the order from the montage + # rather than the order of our info['ch_names'] ... + digpoints = [ + mnt_head.dig[ii] + for ii, name in enumerate(dig_names_use) + if name in (info_names_use + ref_names) + ] + + # get a copy of the old dig + if info["dig"] is not None: + old_dig = info["dig"].copy() + else: + old_dig = [] + + # determine if needed to add an extra EEG REF DigPoint + if custom_eeg_ref_dig: + # ref_name = 'EEG000' if match_case else 'eeg000' + ref_dig_dict = { + "kind": FIFF.FIFFV_POINT_EEG, + "r": eeg_ref_pos, + "ident": 0, + "coord_frame": info["dig"].pop()["coord_frame"], + } + ref_dig_point = _format_dig_points([ref_dig_dict])[0] + # only append the reference dig point if it was already + # in the old dig + if ref_dig_point in old_dig: + digpoints.append(ref_dig_point) + # Next line modifies info['dig'] in place + with info._unlock(): + info["dig"] = _format_dig_points(digpoints, enforce_order=True) + del digpoints + + # TODO: Ideally we would have a check like this, but read_raw_bids for ECoG + # allows for a montage to be set without any fiducials, then silently the + # info['dig'] can end up in the MNI_TAL frame... only because in our + # conversion code, UNKNOWN is treated differently from any other frame + # (e.g., MNI_TAL). We should clean this up at some point... + # missing_fids = sum( + # d['kind'] == FIFF.FIFFV_POINT_CARDINAL for d in info['dig'][:3]) != 3 + # if missing_fids: + # raise RuntimeError( + # 'Could not find all three fiducials in the montage, this should ' + # 'not happen. Please contact MNE-Python developers.') + + # Handle fNIRS with source, detector and channel + fnirs_picks = _picks_to_idx(info, "fnirs", allow_empty=True) + if len(fnirs_picks) > 0: + _set_montage_fnirs(info, mnt_head) + + +def _read_isotrak_elp_points(fname): + """Read Polhemus Isotrak digitizer data from a ``.elp`` file. + + Parameters + ---------- + fname : path-like + The filepath of .elp Polhemus Isotrak file. + + Returns + ------- + out : dict of arrays + The dictionary containing locations for 'nasion', 'lpa', 'rpa' + and 'points'. + """ + value_pattern = r"\-?\d+\.?\d*e?\-?\d*" + coord_pattern = rf"({value_pattern})\s+({value_pattern})\s+({value_pattern})\s*$" + + with open(fname) as fid: + file_str = fid.read() + + points_str = [ + m.groups() for m in re.finditer(coord_pattern, file_str, re.MULTILINE) + ] + points = np.array(points_str, dtype=float) + + return { + "nasion": points[0], + "lpa": points[1], + "rpa": points[2], + "points": points[3:], + } + + +def _read_isotrak_hsp_points(fname): + """Read Polhemus Isotrak digitizer data from a ``.hsp`` file. + + Parameters + ---------- + fname : path-like + The filepath of .hsp Polhemus Isotrak file. + + Returns + ------- + out : dict of arrays + The dictionary containing locations for 'nasion', 'lpa', 'rpa' + and 'points'. + """ + + def get_hsp_fiducial(line): + return np.fromstring(line.replace("%F", ""), dtype=float, sep="\t") + + with open(fname) as ff: + for line in ff: + if "position of fiducials" in line.lower(): + break + + nasion = get_hsp_fiducial(ff.readline()) + lpa = get_hsp_fiducial(ff.readline()) + rpa = get_hsp_fiducial(ff.readline()) + + _ = ff.readline() + line = ff.readline() + if line: + n_points, n_cols = np.fromstring(line, dtype=int, sep="\t") + points = np.fromstring( + string=ff.read(), + dtype=float, + sep="\t", + ).reshape(-1, n_cols) + assert points.shape[0] == n_points + else: + points = np.empty((0, 3)) + + return {"nasion": nasion, "lpa": lpa, "rpa": rpa, "points": points} + + +def read_dig_polhemus_isotrak(fname, ch_names=None, unit="m"): + """Read Polhemus digitizer data from a file. + + Parameters + ---------- + fname : path-like + The filepath of Polhemus ISOTrak formatted file. + File extension is expected to be ``'.hsp'``, ``'.elp'`` or ``'.eeg'``. + ch_names : None | list of str + The names of the points. This will make the points + considered as EEG channels. If None, channels will be assumed + to be HPI if the extension is ``'.elp'``, and extra headshape + points otherwise. + unit : ``'m'`` | ``'cm'`` | ``'mm'`` + Unit of the digitizer file. Polhemus ISOTrak systems data is usually + exported in meters. Defaults to ``'m'``. + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + DigMontage + make_dig_montage + read_polhemus_fastscan + read_dig_captrak + read_dig_dat + read_dig_egi + read_dig_fif + read_dig_localite + """ + VALID_FILE_EXT = (".hsp", ".elp", ".eeg") + fname = str(_check_fname(fname, overwrite="read", must_exist=True)) + _scale = _check_unit_and_get_scaling(unit) + + _, ext = op.splitext(fname) + _check_option("fname", ext, VALID_FILE_EXT) + + if ext == ".elp": + data = _read_isotrak_elp_points(fname) + else: + # Default case we read points as hsp since is the most likely scenario + data = _read_isotrak_hsp_points(fname) + + if _scale != 1: + data = {key: val * _scale for key, val in data.items()} + else: + pass # noqa + + if ch_names is None: + keyword = "hpi" if ext == ".elp" else "hsp" + data[keyword] = data.pop("points") + + else: + points = data.pop("points") + if points.shape[0] == len(ch_names): + data["ch_pos"] = OrderedDict(zip(ch_names, points)) + else: + raise ValueError( + "Length of ``ch_names`` does not match the number of points in " + f"{fname}. Expected ``ch_names`` length {points.shape[0]}, given " + f"{len(ch_names)}" + ) + + return make_dig_montage(**data) + + +def _is_polhemus_fastscan(fname): + header = "" + with open(fname) as fid: + for line in fid: + if not line.startswith("%"): + break + header += line + + return "FastSCAN" in header + + +@verbose +def read_polhemus_fastscan( + fname, unit="mm", on_header_missing="raise", *, verbose=None +): + """Read Polhemus FastSCAN digitizer data from a ``.txt`` file. + + Parameters + ---------- + fname : path-like + The path of ``.txt`` Polhemus FastSCAN file. + unit : ``'m'`` | ``'cm'`` | ``'mm'`` + Unit of the digitizer file. Polhemus FastSCAN systems data is usually + exported in millimeters. Defaults to ``'mm'``. + %(on_header_missing)s + %(verbose)s + + Returns + ------- + points : array, shape (n_points, 3) + The digitization points in digitizer coordinates. + + See Also + -------- + read_dig_polhemus_isotrak + make_dig_montage + """ + VALID_FILE_EXT = [".txt"] + fname = str(_check_fname(fname, overwrite="read", must_exist=True)) + _scale = _check_unit_and_get_scaling(unit) + + _, ext = op.splitext(fname) + _check_option("fname", ext, VALID_FILE_EXT) + + if not _is_polhemus_fastscan(fname): + msg = f"{fname} does not contain a valid Polhemus FastSCAN header" + _on_missing(on_header_missing, msg) + + points = _scale * np.loadtxt(fname, comments="%", ndmin=2) + _check_dig_shape(points) + return points + + +def _read_eeglab_locations(fname): + ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist() + topo = np.loadtxt(fname, dtype=float, usecols=[1, 2]) + sph = _topo_to_sph(topo) + pos = _sph_to_cart(sph) + pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1] + + return ch_names, pos + + +@verbose +def read_custom_montage( + fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None, *, verbose=None +): + """Read a montage from a file. + + Parameters + ---------- + fname : path-like + File extension is expected to be: + ``'.loc'`` or ``'.locs'`` or ``'.eloc'`` (for EEGLAB files), + ``'.sfp'`` (BESA/EGI files), ``'.csd'``, + ``'.elc'``, ``'.txt'``, ``'.csd'``, ``'.elp'`` (BESA spherical), + ``'.bvef'`` (BrainVision files), + ``'.csv'``, ``'.tsv'``, ``'.xyz'`` (XYZ coordinates). + head_size : float | None + The size of the head (radius, in [m]). If ``None``, returns the values + read from the montage file with no modification. Defaults to 0.095m. + coord_frame : str | None + The coordinate frame of the points. Usually this is ``"unknown"`` + for native digitizer space. Defaults to None, which is ``"unknown"`` + for most readers but ``"head"`` for EEGLAB. + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + make_dig_montage + make_standard_montage + read_dig_fif + + Notes + ----- + The function is a helper to read electrode positions you may have + in various formats. Most of these format are weakly specified + in terms of units, coordinate systems. It implies that setting + a montage using a DigMontage produced by this function may + be problematic. If you use a standard/template (eg. 10/20, + 10/10 or 10/05) we recommend you use :func:`make_standard_montage`. + If you can have positions in memory you can also use + :func:`make_dig_montage` that takes arrays as input. + """ + from ._standard_montage_utils import ( + _read_brainvision, + _read_csd, + _read_elc, + _read_elp_besa, + _read_sfp, + _read_theta_phi_in_degrees, + _read_xyz, + ) + + SUPPORTED_FILE_EXT = { + "eeglab": ( + ".loc", + ".locs", + ".eloc", + ), + "hydrocel": (".sfp",), + "matlab": (".csd",), + "asa electrode": (".elc",), + "generic (Theta-phi in degrees)": (".txt",), + "standard BESA spherical": (".elp",), # NB: not same as polhemus elp + "brainvision": (".bvef",), + "xyz": (".csv", ".tsv", ".xyz"), + } + + fname = str(_check_fname(fname, overwrite="read", must_exist=True)) + _, ext = op.splitext(fname) + _check_option("fname", ext, list(sum(SUPPORTED_FILE_EXT.values(), ()))) + + if ext in SUPPORTED_FILE_EXT["eeglab"]: + if head_size is None: + raise ValueError(f"``head_size`` cannot be None for '{ext}'") + ch_names, pos = _read_eeglab_locations(fname) + scale = head_size / np.median(np.linalg.norm(pos, axis=-1)) + pos *= scale + + montage = make_dig_montage( + ch_pos=OrderedDict(zip(ch_names, pos)), + coord_frame="head", + ) + + elif ext in SUPPORTED_FILE_EXT["hydrocel"]: + montage = _read_sfp(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT["matlab"]: + montage = _read_csd(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT["asa electrode"]: + montage = _read_elc(fname, head_size=head_size) + + elif ext in SUPPORTED_FILE_EXT["generic (Theta-phi in degrees)"]: + if head_size is None: + raise ValueError(f"``head_size`` cannot be None for '{ext}'") + montage = _read_theta_phi_in_degrees( + fname, head_size=head_size, fid_names=("Nz", "LPA", "RPA") + ) + + elif ext in SUPPORTED_FILE_EXT["standard BESA spherical"]: + montage = _read_elp_besa(fname, head_size) + + elif ext in SUPPORTED_FILE_EXT["brainvision"]: + montage = _read_brainvision(fname, head_size) + + elif ext in SUPPORTED_FILE_EXT["xyz"]: + montage = _read_xyz(fname) + + if coord_frame is not None: + coord_frame = _coord_frame_const(coord_frame) + for d in montage.dig: + d["coord_frame"] = coord_frame + + return montage + + +def compute_dev_head_t(montage): + """Compute device to head transform from a DigMontage. + + Parameters + ---------- + montage : DigMontage + The `~mne.channels.DigMontage` must contain the fiducials in head + coordinate system and hpi points in both head and + meg device coordinate system. + + Returns + ------- + dev_head_t : Transform + A Device-to-Head transformation matrix. + """ + _, coord_frame = _get_fid_coords(montage.dig) + if coord_frame != FIFF.FIFFV_COORD_HEAD: + raise ValueError( + "montage should have been set to head coordinate " + "system with transform_to_head function." + ) + + hpi_head = np.array( + [ + d["r"] + for d in montage.dig + if ( + d["kind"] == FIFF.FIFFV_POINT_HPI + and d["coord_frame"] == FIFF.FIFFV_COORD_HEAD + ) + ], + float, + ) + hpi_dev = np.array( + [ + d["r"] + for d in montage.dig + if ( + d["kind"] == FIFF.FIFFV_POINT_HPI + and d["coord_frame"] == FIFF.FIFFV_COORD_DEVICE + ) + ], + float, + ) + + if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0): + raise ValueError( + "To compute Device-to-Head transformation, the same number of HPI" + f" points in device and head coordinates is required. (Got {len(hpi_dev)}" + f" points in device and {len(hpi_head)} points in head coordinate systems)" + ) + + trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0]) + return Transform(fro="meg", to="head", trans=trans) + + +@verbose +def compute_native_head_t(montage, *, on_missing="warn", verbose=None): + """Compute the native-to-head transformation for a montage. + + This uses the fiducials in the native space to transform to compute the + transform to the head coordinate frame. + + Parameters + ---------- + montage : instance of DigMontage + The montage. + %(on_missing_fiducials)s + + .. versionadded:: 1.2 + %(verbose)s + + Returns + ------- + native_head_t : instance of Transform + A native-to-head transformation matrix. + """ + # Get fiducial points and their coord_frame + fid_coords, coord_frame = _get_fid_coords(montage.dig, raise_error=False) + if coord_frame is None: + coord_frame = FIFF.FIFFV_COORD_UNKNOWN + if coord_frame == FIFF.FIFFV_COORD_HEAD: + native_head_t = np.eye(4) + else: + fid_keys = ("nasion", "lpa", "rpa") + for key in fid_keys: + this_coord = fid_coords[key] + if this_coord is None or np.any(np.isnan(this_coord)): + msg = ( + f"Fiducial point {key} not found, assuming identity " + f"{_verbose_frames[coord_frame]} to head transformation" + ) + _on_missing(on_missing, msg, error_klass=RuntimeError) + native_head_t = np.eye(4) + break + else: + native_head_t = get_ras_to_neuromag_trans( + *[fid_coords[key] for key in fid_keys] + ) + return Transform(coord_frame, "head", native_head_t) + + +def make_standard_montage(kind, head_size="auto"): + """Read a generic (built-in) standard montage that ships with MNE-Python. + + Parameters + ---------- + kind : str + The name of the montage to use. + + .. note:: + You can retrieve the names of all + built-in montages via :func:`mne.channels.get_builtin_montages`. + head_size : float | None | str + The head size (radius, in meters) to use for spherical montages. + Can be None to not scale the read sizes. ``'auto'`` (default) will + use 95mm for all montages except the ``'standard*'``, ``'mgh*'``, and + ``'artinis*'``, which are already in fsaverage's MRI coordinates + (same as MNI). + + Returns + ------- + montage : instance of DigMontage + The montage. + + See Also + -------- + get_builtin_montages + make_dig_montage + read_custom_montage + + Notes + ----- + Individualized (digitized) electrode positions should be read in using + :func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`, + :func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`, + :func:`read_dig_hpts`, or manually made with :func:`make_dig_montage`. + + .. versionadded:: 0.19.0 + """ + from ._standard_montage_utils import standard_montage_look_up_table + + _validate_type(kind, str, "kind") + _check_option( + parameter="kind", + value=kind, + allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES], + ) + _validate_type(head_size, ("numeric", str, None), "head_size") + if isinstance(head_size, str): + _check_option("head_size", head_size, ("auto",), extra="when str") + if kind.startswith(("standard", "mgh", "artinis")): + head_size = None + else: + head_size = HEAD_SIZE_DEFAULT + return standard_montage_look_up_table[kind](head_size=head_size) + + +def _check_dig_shape(pts): + _validate_type(pts, np.ndarray, "points") + if pts.ndim != 2 or pts.shape[-1] != 3: + raise ValueError(f"Points must be of shape (n, 3) instead of {pts.shape}") diff --git a/mne/chpi.py b/mne/chpi.py new file mode 100644 index 0000000..d040817 --- /dev/null +++ b/mne/chpi.py @@ -0,0 +1,1608 @@ +"""Functions for fitting head positions with (c)HPI coils. + +``compute_head_pos`` can be used to: + +1. Drop coils whose GOF are below ``gof_limit``. If fewer than 3 coils + remain, abandon fitting for the chunk. +2. Fit dev_head_t quaternion (using ``_fit_chpi_quat_subset``), + iteratively dropping coils (as long as 3 remain) to find the best GOF + (using ``_fit_chpi_quat``). +3. If fewer than 3 coils meet the ``dist_limit`` criteria following + projection of the fitted device coil locations into the head frame, + abandon fitting for the chunk. + +The function ``filter_chpi`` uses the same linear model to filter cHPI +and (optionally) line frequencies from the data. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import copy +import itertools +from functools import partial + +import numpy as np +from scipy.linalg import orth +from scipy.optimize import fmin_cobyla +from scipy.spatial.distance import cdist + +from ._fiff.constants import FIFF +from ._fiff.meas_info import Info, _simplify_info +from ._fiff.pick import ( + _picks_to_idx, + pick_channels, + pick_channels_regexp, + pick_info, + pick_types, +) +from ._fiff.proj import Projection, setup_proj +from .channels.channels import _get_meg_system +from .cov import compute_whitener, make_ad_hoc_cov +from .dipole import _make_guesses +from .event import find_events +from .fixes import jit +from .forward import _concatenate_coils, _create_meg_coils, _magnetic_dipole_field_vec +from .io import BaseRaw +from .io.ctf.trans import _make_ctf_coord_trans_set +from .io.kit.constants import KIT +from .io.kit.kit import RawKIT as _RawKIT +from .preprocessing.maxwell import ( + _get_mf_picks_fix_mags, + _prep_mf_coils, + _regularize_out, + _sss_basis, +) +from .transforms import ( + _angle_between_quats, + _fit_matched_points, + _quat_to_affine, + als_ras_trans, + apply_trans, + invert_transform, + quat_to_rot, + rot_to_quat, +) +from .utils import ( + ProgressBar, + _check_fname, + _check_option, + _on_missing, + _pl, + _validate_type, + _verbose_safe_false, + logger, + use_log_level, + verbose, + warn, +) + +# Eventually we should add: +# hpicons +# high-passing of data during fits +# parsing cHPI coil information from acq pars, then to PSD if necessary + + +# ############################################################################ +# Reading from text or FIF file + + +def read_head_pos(fname): + """Read MaxFilter-formatted head position parameters. + + Parameters + ---------- + fname : path-like + The filename to read. This can be produced by e.g., + ``maxfilter -headpos .pos``. + + Returns + ------- + pos : array, shape (N, 10) + The position and quaternion parameters from cHPI fitting. + + See Also + -------- + write_head_pos + head_pos_to_trans_rot_t + + Notes + ----- + .. versionadded:: 0.12 + """ + _check_fname(fname, must_exist=True, overwrite="read") + data = np.loadtxt(fname, skiprows=1) # first line is header, skip it + data.shape = (-1, 10) # ensure it's the right size even if empty + if np.isnan(data).any(): # make sure we didn't do something dumb + raise RuntimeError(f"positions could not be read properly from {fname}") + return data + + +def write_head_pos(fname, pos): + """Write MaxFilter-formatted head position parameters. + + Parameters + ---------- + fname : path-like + The filename to write. + pos : array, shape (N, 10) + The position and quaternion parameters from cHPI fitting. + + See Also + -------- + read_head_pos + head_pos_to_trans_rot_t + + Notes + ----- + .. versionadded:: 0.12 + """ + _check_fname(fname, overwrite=True) + pos = np.array(pos, np.float64) + if pos.ndim != 2 or pos.shape[1] != 10: + raise ValueError("pos must be a 2D array of shape (N, 10)") + with open(fname, "wb") as fid: + fid.write( + " Time q1 q2 q3 q4 q5 " + "q6 g-value error velocity\n".encode("ASCII") + ) + for p in pos: + fmts = ["% 9.3f"] + ["% 8.5f"] * 9 + fid.write(((" " + " ".join(fmts) + "\n") % tuple(p)).encode("ASCII")) + + +def head_pos_to_trans_rot_t(quats): + """Convert Maxfilter-formatted head position quaternions. + + Parameters + ---------- + quats : ndarray, shape (N, 10) + MaxFilter-formatted position and quaternion parameters. + + Returns + ------- + translation : ndarray, shape (N, 3) + Translations at each time point. + rotation : ndarray, shape (N, 3, 3) + Rotations at each time point. + t : ndarray, shape (N,) + The time points. + + See Also + -------- + read_head_pos + write_head_pos + """ + t = quats[..., 0].copy() + rotation = quat_to_rot(quats[..., 1:4]) + translation = quats[..., 4:7].copy() + return translation, rotation, t + + +@verbose +def extract_chpi_locs_ctf(raw, verbose=None): + r"""Extract cHPI locations from CTF data. + + Parameters + ---------- + raw : instance of Raw + Raw data with CTF cHPI information. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + CTF continuous head monitoring stores the x,y,z location (m) of each chpi + coil as separate channels in the dataset: + + - ``HLC001[123]\\*`` - nasion + - ``HLC002[123]\\*`` - lpa + - ``HLC003[123]\\*`` - rpa + + This extracts these positions for use with + :func:`~mne.chpi.compute_head_pos`. + + .. versionadded:: 0.20 + """ + # Pick channels corresponding to the cHPI positions + hpi_picks = pick_channels_regexp(raw.info["ch_names"], "HLC00[123][123].*") + + # make sure we get 9 channels + if len(hpi_picks) != 9: + raise RuntimeError("Could not find all 9 cHPI channels") + + # get indices in alphabetical order + sorted_picks = np.array(sorted(hpi_picks, key=lambda k: raw.info["ch_names"][k])) + + # make picks to match order of dig cardinial ident codes. + # LPA (HPIC002[123]-*), NAS(HPIC001[123]-*), RPA(HPIC003[123]-*) + hpi_picks = sorted_picks[[3, 4, 5, 0, 1, 2, 6, 7, 8]] + del sorted_picks + + # process the entire run + time_sl = slice(0, len(raw.times)) + chpi_data = raw[hpi_picks, time_sl][0] + + # transforms + tmp_trans = _make_ctf_coord_trans_set(None, None) + ctf_dev_dev_t = tmp_trans["t_ctf_dev_dev"] + del tmp_trans + + # find indices where chpi locations change + indices = [0] + indices.extend(np.where(np.all(np.diff(chpi_data, axis=1), axis=0))[0] + 1) + # data in channels are in ctf device coordinates (cm) + rrs = chpi_data[:, indices].T.reshape(len(indices), 3, 3) # m + # map to mne device coords + rrs = apply_trans(ctf_dev_dev_t, rrs) + gofs = np.ones(rrs.shape[:2]) # not encoded, set all good + moments = np.zeros(rrs.shape) # not encoded, set all zero + times = raw.times[indices] + raw._first_time + return dict(rrs=rrs, gofs=gofs, times=times, moments=moments) + + +@verbose +def extract_chpi_locs_kit(raw, stim_channel="MISC 064", *, verbose=None): + """Extract cHPI locations from KIT data. + + Parameters + ---------- + raw : instance of RawKIT + Raw data with KIT cHPI information. + stim_channel : str + The stimulus channel that encodes HPI measurement intervals. + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + Notes + ----- + .. versionadded:: 0.23 + """ + _validate_type(raw, (_RawKIT,), "raw") + stim_chs = [ + raw.info["ch_names"][pick] + for pick in pick_types(raw.info, stim=True, misc=True, ref_meg=False) + ] + _validate_type(stim_channel, str, "stim_channel") + _check_option("stim_channel", stim_channel, stim_chs) + idx = raw.ch_names.index(stim_channel) + safe_false = _verbose_safe_false() + events_on = find_events( + raw, stim_channel=raw.ch_names[idx], output="onset", verbose=safe_false + )[:, 0] + events_off = find_events( + raw, stim_channel=raw.ch_names[idx], output="offset", verbose=safe_false + )[:, 0] + bad = False + if len(events_on) == 0 or len(events_off) == 0: + bad = True + else: + if events_on[-1] > events_off[-1]: + events_on = events_on[:-1] + if events_on.size != events_off.size or not (events_on < events_off).all(): + bad = True + if bad: + raise RuntimeError( + f"Could not find appropriate cHPI intervals from {stim_channel}" + ) + # use the midpoint for times + times = (events_on + events_off) / (2 * raw.info["sfreq"]) + del events_on, events_off + # XXX remove first two rows. It is unknown currently if there is a way to + # determine from the con file the number of initial pulses that + # indicate the start of reading. The number is shown by opening the con + # file in MEG160, but I couldn't find the value in the .con file, so it + # may just always be 2... + times = times[2:] + n_coils = 5 # KIT always has 5 (hard-coded in reader) + header = raw._raw_extras[0]["dirs"][KIT.DIR_INDEX_CHPI_DATA] + dtype = np.dtype([("good", " 0 else None + # grab codes indicating a coil is active + hpi_on = [coil["event_bits"][0] for coil in hpi_sub["hpi_coils"]] + # not all HPI coils will actually be used + hpi_on = np.array([hpi_on[hc["number"] - 1] for hc in hpi_coils]) + # mask for coils that may be active + hpi_mask = np.array([event_bit != 0 for event_bit in hpi_on]) + hpi_on = hpi_on[hpi_mask] + hpi_freqs = hpi_freqs[hpi_mask] + else: + hpi_on = np.zeros(len(hpi_freqs)) + + return hpi_freqs, hpi_pick, hpi_on + + +@verbose +def _get_hpi_initial_fit(info, adjust=False, verbose=None): + """Get HPI fit locations from raw.""" + if info["hpi_results"] is None or len(info["hpi_results"]) == 0: + raise RuntimeError("no initial cHPI head localization performed") + + hpi_result = info["hpi_results"][-1] + hpi_dig = sorted( + [d for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], + key=lambda x: x["ident"], + ) # ascending (dig) order + if len(hpi_dig) == 0: # CTF data, probably + msg = "HPIFIT: No HPI dig points, using hpifit result" + hpi_dig = sorted(hpi_result["dig_points"], key=lambda x: x["ident"]) + if all( + d["coord_frame"] in (FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_UNKNOWN) + for d in hpi_dig + ): + # Do not modify in place! + hpi_dig = copy.deepcopy(hpi_dig) + msg += " transformed to head coords" + for dig in hpi_dig: + dig.update( + r=apply_trans(info["dev_head_t"], dig["r"]), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + logger.debug(msg) + + # zero-based indexing, dig->info + # CTF does not populate some entries so we use .get here + pos_order = hpi_result.get("order", np.arange(1, len(hpi_dig) + 1)) - 1 + used = hpi_result.get("used", np.arange(len(hpi_dig))) + dist_limit = hpi_result.get("dist_limit", 0.005) + good_limit = hpi_result.get("good_limit", 0.98) + goodness = hpi_result.get("goodness", np.ones(len(hpi_dig))) + + # this shouldn't happen, eventually we could add the transforms + # necessary to put it in head coords + if not all(d["coord_frame"] == FIFF.FIFFV_COORD_HEAD for d in hpi_dig): + raise RuntimeError("cHPI coordinate frame incorrect") + # Give the user some info + logger.info( + f"HPIFIT: {len(pos_order)} coils digitized in order " + f"{' '.join(str(o + 1) for o in pos_order)}" + ) + logger.debug( + f"HPIFIT: {len(used)} coils accepted: {' '.join(str(h) for h in used)}" + ) + hpi_rrs = np.array([d["r"] for d in hpi_dig])[pos_order] + assert len(hpi_rrs) >= 3 + + # Fitting errors + hpi_rrs_fit = sorted( + [d for d in info["hpi_results"][-1]["dig_points"]], key=lambda x: x["ident"] + ) + hpi_rrs_fit = np.array([d["r"] for d in hpi_rrs_fit]) + # hpi_result['dig_points'] are in FIFFV_COORD_UNKNOWN coords, but this + # is probably a misnomer because it should be FIFFV_COORD_DEVICE for this + # to work + assert hpi_result["coord_trans"]["to"] == FIFF.FIFFV_COORD_HEAD + hpi_rrs_fit = apply_trans(hpi_result["coord_trans"]["trans"], hpi_rrs_fit) + if "moments" in hpi_result: + logger.debug(f"Hpi coil moments {hpi_result['moments'].shape[::-1]}:") + for moment in hpi_result["moments"]: + logger.debug(f"{moment[0]:g} {moment[1]:g} {moment[2]:g}") + errors = np.linalg.norm(hpi_rrs - hpi_rrs_fit, axis=1) + logger.debug(f"HPIFIT errors: {', '.join(f'{1000 * e:0.1f}' for e in errors)} mm.") + if errors.sum() < len(errors) * dist_limit: + logger.info("HPI consistency of isotrak and hpifit is OK.") + elif not adjust and (len(used) == len(hpi_dig)): + warn("HPI consistency of isotrak and hpifit is poor.") + else: + # adjust HPI coil locations using the hpifit transformation + for hi, (err, r_fit) in enumerate(zip(errors, hpi_rrs_fit)): + # transform to head frame + d = 1000 * err + if not adjust: + if err >= dist_limit: + warn( + f"Discrepancy of HPI coil {hi + 1} isotrak and hpifit is " + f"{d:.1f} mm!" + ) + elif hi + 1 not in used: + if goodness[hi] >= good_limit: + logger.info( + f"Note: HPI coil {hi + 1} isotrak is adjusted by {d:.1f} mm!" + ) + hpi_rrs[hi] = r_fit + else: + warn( + f"Discrepancy of HPI coil {hi + 1} isotrak and hpifit of " + f"{d:.1f} mm was not adjusted!" + ) + logger.debug( + f"HP fitting limits: err = {1000 * dist_limit:.1f} mm, gval = {good_limit:.3f}." + ) + + return hpi_rrs.astype(float) + + +def _magnetic_dipole_objective( + x, B, B2, coils, whitener, too_close, return_moment=False +): + """Project data onto right eigenvectors of whitened forward.""" + fwd = _magnetic_dipole_field_vec(x[np.newaxis], coils, too_close) + out, u, s, one = _magnetic_dipole_delta(fwd, whitener, B, B2) + if return_moment: + one /= s + Q = np.dot(one, u.T) + out = (out, Q) + return out + + +@jit() +def _magnetic_dipole_delta(fwd, whitener, B, B2): + # Here we use .T to get whitener to Fortran order, which speeds things up + fwd = np.dot(fwd, whitener.T) + u, s, v = np.linalg.svd(fwd, full_matrices=False) + one = np.dot(v, B) + Bm2 = np.dot(one, one) + return B2 - Bm2, u, s, one + + +def _magnetic_dipole_delta_multi(whitened_fwd_svd, B, B2): + # Here we use .T to get whitener to Fortran order, which speeds things up + one = np.matmul(whitened_fwd_svd, B) + Bm2 = np.sum(one * one, axis=1) + return B2 - Bm2 + + +def _fit_magnetic_dipole(B_orig, x0, too_close, whitener, coils, guesses): + """Fit a single bit of data (x0 = pos).""" + B = np.dot(whitener, B_orig) + B2 = np.dot(B, B) + objective = partial( + _magnetic_dipole_objective, + B=B, + B2=B2, + coils=coils, + whitener=whitener, + too_close=too_close, + ) + if guesses is not None: + res0 = objective(x0) + res = _magnetic_dipole_delta_multi(guesses["whitened_fwd_svd"], B, B2) + assert res.shape == (guesses["rr"].shape[0],) + idx = np.argmin(res) + if res[idx] < res0: + x0 = guesses["rr"][idx] + x = fmin_cobyla(objective, x0, (), rhobeg=1e-3, rhoend=1e-5, disp=False) + gof, moment = objective(x, return_moment=True) + gof = 1.0 - gof / B2 + return x, gof, moment + + +@jit() +def _chpi_objective(x, coil_dev_rrs, coil_head_rrs): + """Compute objective function.""" + d = np.dot(coil_dev_rrs, quat_to_rot(x[:3]).T) + d += x[3:] + d -= coil_head_rrs + d *= d + return d.sum() + + +def _fit_chpi_quat(coil_dev_rrs, coil_head_rrs): + """Fit rotation and translation (quaternion) parameters for cHPI coils.""" + denom = np.linalg.norm(coil_head_rrs - np.mean(coil_head_rrs, axis=0)) + denom *= denom + # We could try to solve it the analytic way: + # XXX someday we could choose to weight these points by their goodness + # of fit somehow. + quat = _fit_matched_points(coil_dev_rrs, coil_head_rrs)[0] + gof = 1.0 - _chpi_objective(quat, coil_dev_rrs, coil_head_rrs) / denom + return quat, gof + + +def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, bias=True): + """Compute Device to Head transform allowing for permutiatons of points.""" + id_quat = np.zeros(6) + best_order = None + best_g = -999 + best_quat = id_quat + for this_order in itertools.permutations(np.arange(len(head_pnts))): + head_pnts_tmp = head_pnts[np.array(this_order)] + this_quat, g = _fit_chpi_quat(dev_pnts, head_pnts_tmp) + assert np.linalg.det(quat_to_rot(this_quat[:3])) > 0.9999 + if bias: + # For symmetrical arrangements, flips can produce roughly + # equivalent g values. To avoid this, heavily penalize + # large rotations. + rotation = _angle_between_quats(this_quat[:3], np.zeros(3)) + check_g = g * max(1.0 - rotation / np.pi, 0) ** 0.25 + else: + check_g = g + if check_g > best_g: + out_g = g + best_g = check_g + best_order = np.array(this_order) + best_quat = this_quat + + # Convert Quaterion to transform + dev_head_t = _quat_to_affine(best_quat) + return dev_head_t, best_order, out_g + + +@verbose +def _setup_hpi_amplitude_fitting( + info, t_window, remove_aliased=False, ext_order=1, allow_empty=False, verbose=None +): + """Generate HPI structure for HPI localization.""" + # grab basic info. + on_missing = "raise" if not allow_empty else "ignore" + hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing=on_missing) + + # check for maxwell filtering + for ent in info["proc_history"]: + for key in ("sss_info", "max_st"): + if len(ent["max_info"]["sss_info"]) > 0: + warn( + "Fitting cHPI amplitudes after Maxwell filtering may not work, " + "consider fitting on the original data." + ) + break + + _validate_type(t_window, (str, "numeric"), "t_window") + if info["line_freq"] is not None: + line_freqs = np.arange( + info["line_freq"], info["sfreq"] / 3.0, info["line_freq"] + ) + else: + line_freqs = np.zeros([0]) + lfs = " ".join(f"{lf}" for lf in line_freqs) + logger.info(f"Line interference frequencies: {lfs} Hz") + # worry about resampled/filtered data. + # What to do e.g. if Raw has been resampled and some of our + # HPI freqs would now be aliased + highest = info.get("lowpass") + highest = info["sfreq"] / 2.0 if highest is None else highest + keepers = hpi_freqs <= highest + if remove_aliased: + hpi_freqs = hpi_freqs[keepers] + hpi_ons = hpi_ons[keepers] + elif not keepers.all(): + raise RuntimeError( + f"Found HPI frequencies {hpi_freqs[~keepers].tolist()} above the lowpass (" + f"or Nyquist) frequency {highest:0.1f}" + ) + # calculate optimal window length. + if isinstance(t_window, str): + _check_option("t_window", t_window, ("auto",), extra="if a string") + if len(hpi_freqs): + all_freqs = np.concatenate((hpi_freqs, line_freqs)) + delta_freqs = np.diff(np.unique(all_freqs)) + t_window = max(5.0 / all_freqs.min(), 1.0 / delta_freqs.min()) + else: + t_window = 0.2 + t_window = float(t_window) + if t_window <= 0: + raise ValueError(f"t_window ({t_window}) must be > 0") + logger.info(f"Using time window: {1000 * t_window:0.1f} ms") + window_nsamp = np.rint(t_window * info["sfreq"]).astype(int) + model = _setup_hpi_glm(hpi_freqs, line_freqs, info["sfreq"], window_nsamp) + inv_model = np.linalg.pinv(model) + inv_model_reord = _reorder_inv_model(inv_model, len(hpi_freqs)) + proj, proj_op, meg_picks = _setup_ext_proj(info, ext_order) + # include mag and grad picks separately, for SNR computations + mag_subpicks = _picks_to_idx(info, "mag", allow_empty=True) + mag_subpicks = np.searchsorted(meg_picks, mag_subpicks) + grad_subpicks = _picks_to_idx(info, "grad", allow_empty=True) + grad_subpicks = np.searchsorted(meg_picks, grad_subpicks) + # Set up magnetic dipole fits + hpi = dict( + meg_picks=meg_picks, + mag_subpicks=mag_subpicks, + grad_subpicks=grad_subpicks, + hpi_pick=hpi_pick, + model=model, + inv_model=inv_model, + t_window=t_window, + inv_model_reord=inv_model_reord, + on=hpi_ons, + n_window=window_nsamp, + proj=proj, + proj_op=proj_op, + freqs=hpi_freqs, + line_freqs=line_freqs, + ) + return hpi + + +def _setup_hpi_glm(hpi_freqs, line_freqs, sfreq, window_nsamp): + """Initialize a general linear model for HPI amplitude estimation.""" + slope = np.linspace(-0.5, 0.5, window_nsamp)[:, np.newaxis] + radians_per_sec = 2 * np.pi * np.arange(window_nsamp, dtype=float) / sfreq + f_t = hpi_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] + l_t = line_freqs[np.newaxis, :] * radians_per_sec[:, np.newaxis] + model = [ + np.sin(f_t), + np.cos(f_t), # hpi freqs + np.sin(l_t), + np.cos(l_t), # line freqs + slope, + np.ones_like(slope), + ] # drift, DC + return np.hstack(model) + + +@jit() +def _reorder_inv_model(inv_model, n_freqs): + # Reorder for faster computation + idx = np.arange(2 * n_freqs).reshape(2, n_freqs).T.ravel() + return inv_model[idx] + + +def _setup_ext_proj(info, ext_order): + meg_picks = pick_types(info, meg=True, eeg=False, exclude="bads") + info = pick_info(_simplify_info(info), meg_picks) # makes a copy + _, _, _, _, mag_or_fine = _get_mf_picks_fix_mags( + info, int_order=0, ext_order=ext_order, ignore_ref=True, verbose="error" + ) + mf_coils = _prep_mf_coils(info, verbose="error") + ext = _sss_basis( + dict(origin=(0.0, 0.0, 0.0), int_order=0, ext_order=ext_order), mf_coils + ).T + out_removes = _regularize_out(0, 1, mag_or_fine, []) + ext = ext[~np.isin(np.arange(len(ext)), out_removes)] + ext = orth(ext.T).T + assert ext.shape[1] == len(meg_picks) + proj = Projection( + kind=FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD, + desc="SSS", + active=False, + data=dict( + data=ext, ncol=info["nchan"], col_names=info["ch_names"], nrow=len(ext) + ), + ) + with info._unlock(): + info["projs"] = [proj] + proj_op, _ = setup_proj( + info, add_eeg_ref=False, activate=False, verbose=_verbose_safe_false() + ) + assert proj_op.shape == (len(meg_picks),) * 2 + return proj, proj_op, meg_picks + + +def _time_prefix(fit_time): + """Format log messages.""" + return (f" t={fit_time:0.3f}:").ljust(17) + + +def _fit_chpi_amplitudes(raw, time_sl, hpi, snr=False): + """Fit amplitudes for each channel from each of the N cHPI sinusoids. + + Returns + ------- + sin_fit : ndarray, shape (n_freqs, n_channels) + The sin amplitudes matching each cHPI frequency. + Will be all nan if this time window should be skipped. + snr : ndarray, shape (n_freqs, 2) + Estimated SNR for this window, separately for mag and grad channels. + """ + # No need to detrend the data because our model has a DC term + with use_log_level(False): + # loads good channels + this_data = raw[hpi["meg_picks"], time_sl][0] + + # which HPI coils to use + if hpi["hpi_pick"] is not None: + with use_log_level(False): + # loads hpi_stim channel + chpi_data = raw[hpi["hpi_pick"], time_sl][0] + + ons = (np.round(chpi_data).astype(np.int64) & hpi["on"][:, np.newaxis]).astype( + bool + ) + n_on = ons.all(axis=-1).sum(axis=0) + if not (n_on >= 3).all(): + return None + if snr: + return _fast_fit_snr( + this_data, + len(hpi["freqs"]), + hpi["model"], + hpi["inv_model"], + hpi["mag_subpicks"], + hpi["grad_subpicks"], + ) + return _fast_fit( + this_data, + hpi["proj_op"], + len(hpi["freqs"]), + hpi["model"], + hpi["inv_model_reord"], + ) + + +@jit() +def _fast_fit(this_data, proj, n_freqs, model, inv_model_reord): + # first or last window + if this_data.shape[1] != model.shape[0]: + model = model[: this_data.shape[1]] + inv_model_reord = _reorder_inv_model(np.linalg.pinv(model), n_freqs) + proj_data = proj @ this_data + X = inv_model_reord @ proj_data.T + + sin_fit = np.zeros((n_freqs, X.shape[1])) + for fi in range(n_freqs): + # use SVD across all sensors to estimate the sinusoid phase + u, s, vt = np.linalg.svd(X[2 * fi : 2 * fi + 2], full_matrices=False) + # the first component holds the predominant phase direction + # (so ignore the second, effectively doing s[1] = 0): + sin_fit[fi] = vt[0] * s[0] + return sin_fit + + +@jit() +def _fast_fit_snr(this_data, n_freqs, model, inv_model, mag_picks, grad_picks): + # first or last window + if this_data.shape[1] != model.shape[0]: + model = model[: this_data.shape[1]] + inv_model = np.linalg.pinv(model) + coefs = np.ascontiguousarray(inv_model) @ np.ascontiguousarray(this_data.T) + # average sin & cos terms (special property of sinusoids: power=A²/2) + hpi_power = (coefs[:n_freqs] ** 2 + coefs[n_freqs : (2 * n_freqs)] ** 2) / 2 + resid = this_data - np.ascontiguousarray((model @ coefs).T) + # can't use np.var(..., axis=1) with Numba, so do it manually: + resid_mean = np.atleast_2d(resid.sum(axis=1) / resid.shape[1]).T + squared_devs = np.abs(resid - resid_mean) ** 2 + resid_var = squared_devs.sum(axis=1) / squared_devs.shape[1] + # output array will be (n_freqs, 3 * n_ch_types). The 3 columns for each + # channel type are the SNR, the mean cHPI power and the residual variance + # (which gets tiled to shape (n_freqs,) because it's a scalar). + snrs = np.empty((n_freqs, 0)) + # average power & compute residual variance separately for each ch type + for _picks in (mag_picks, grad_picks): + if len(_picks): + avg_power = hpi_power[:, _picks].sum(axis=1) / len(_picks) + avg_resid = resid_var[_picks].mean() * np.ones(n_freqs) + snr = 10 * np.log10(avg_power / avg_resid) + snrs = np.hstack((snrs, np.stack((snr, avg_power, avg_resid), 1))) + return snrs + + +def _check_chpi_param(chpi_, name): + if name == "chpi_locs": + want_ndims = dict(times=1, rrs=3, moments=3, gofs=2) + extra_keys = list() + else: + assert name == "chpi_amplitudes" + want_ndims = dict(times=1, slopes=3) + extra_keys = ["proj"] + + _validate_type(chpi_, dict, name) + want_keys = list(want_ndims.keys()) + extra_keys + if set(want_keys).symmetric_difference(chpi_): + raise ValueError( + f"{name} must be a dict with entries {want_keys}, got " + f"{sorted(chpi_.keys())}" + ) + n_times = None + for key, want_ndim in want_ndims.items(): + key_str = f"{name}[{key}]" + val = chpi_[key] + _validate_type(val, np.ndarray, key_str) + shape = val.shape + if val.ndim != want_ndim: + raise ValueError(f"{key_str} must have ndim={want_ndim}, got {val.ndim}") + if n_times is None and key != "proj": + n_times = shape[0] + if n_times != shape[0] and key != "proj": + raise ValueError( + f"{name} have inconsistent number of time points in {want_keys}" + ) + if name == "chpi_locs": + n_coils = chpi_["rrs"].shape[1] + for key in ("gofs", "moments"): + val = chpi_[key] + if val.shape[1] != n_coils: + raise ValueError( + f'chpi_locs["rrs"] had values for {n_coils} coils but ' + f'chpi_locs["{key}"] had values for {val.shape[1]} coils' + ) + for key in ("rrs", "moments"): + val = chpi_[key] + if val.shape[2] != 3: + raise ValueError( + f'chpi_locs["{key}"].shape[2] must be 3, got shape {shape}' + ) + else: + assert name == "chpi_amplitudes" + slopes, proj = chpi_["slopes"], chpi_["proj"] + _validate_type(proj, Projection, 'chpi_amplitudes["proj"]') + n_ch = len(proj["data"]["col_names"]) + if slopes.shape[0] != n_times or slopes.shape[2] != n_ch: + raise ValueError( + f"slopes must have shape[0]=={n_times} and shape[2]=={n_ch}, got shape " + f"{slopes.shape}" + ) + + +@verbose +def compute_head_pos( + info, chpi_locs, dist_limit=0.005, gof_limit=0.98, adjust_dig=False, verbose=None +): + """Compute time-varying head positions. + + Parameters + ---------- + %(info_not_none)s + %(chpi_locs)s + Typically obtained by :func:`~mne.chpi.compute_chpi_locs` or + :func:`~mne.chpi.extract_chpi_locs_ctf`. + dist_limit : float + Minimum distance (m) to accept for coil position fitting. + gof_limit : float + Minimum goodness of fit to accept for each coil. + %(adjust_dig_chpi)s + %(verbose)s + + Returns + ------- + quats : ndarray, shape (n_pos, 10) + The ``[t, q1, q2, q3, x, y, z, gof, err, v]`` for each fit. + + See Also + -------- + compute_chpi_locs + extract_chpi_locs_ctf + read_head_pos + write_head_pos + + Notes + ----- + .. versionadded:: 0.20 + """ + _check_chpi_param(chpi_locs, "chpi_locs") + _validate_type(info, Info, "info") + hpi_dig_head_rrs = _get_hpi_initial_fit(info, adjust=adjust_dig, verbose="error") + n_coils = len(hpi_dig_head_rrs) + coil_dev_rrs = apply_trans(invert_transform(info["dev_head_t"]), hpi_dig_head_rrs) + dev_head_t = info["dev_head_t"]["trans"] + pos_0 = dev_head_t[:3, 3] + last = dict( + quat_fit_time=-0.1, + coil_dev_rrs=coil_dev_rrs, + quat=np.concatenate([rot_to_quat(dev_head_t[:3, :3]), dev_head_t[:3, 3]]), + ) + del coil_dev_rrs + quats = [] + for fit_time, this_coil_dev_rrs, g_coils in zip( + *(chpi_locs[key] for key in ("times", "rrs", "gofs")) + ): + use_idx = np.where(g_coils >= gof_limit)[0] + + # + # 1. Check number of good ones + # + if len(use_idx) < 3: + gofs = ", ".join(f"{g:0.2f}" for g in g_coils) + warn( + f"{_time_prefix(fit_time)}{len(use_idx)}/{n_coils} " + "good HPI fits, cannot determine the transformation " + f"({gofs} GOF)!" + ) + continue + + # + # 2. Fit the head translation and rotation params (minimize error + # between coil positions and the head coil digitization + # positions) iteratively using different sets of coils. + # + this_quat, g, use_idx = _fit_chpi_quat_subset( + this_coil_dev_rrs, hpi_dig_head_rrs, use_idx + ) + + # + # 3. Stop if < 3 good + # + + # Convert quaterion to transform + this_dev_head_t = _quat_to_affine(this_quat) + est_coil_head_rrs = apply_trans(this_dev_head_t, this_coil_dev_rrs) + errs = np.linalg.norm(hpi_dig_head_rrs - est_coil_head_rrs, axis=1) + n_good = ((g_coils >= gof_limit) & (errs < dist_limit)).sum() + if n_good < 3: + warn_str = ", ".join( + f"{1000 * e:0.1f}::{g:0.2f}" for e, g in zip(errs, g_coils) + ) + warn( + f"{_time_prefix(fit_time)}{n_good}/{n_coils} good HPI fits, cannot " + f"determine the transformation ({warn_str} mm/GOF)!" + ) + continue + + # velocities, in device coords, of HPI coils + dt = fit_time - last["quat_fit_time"] + vs = tuple( + 1000.0 + * np.linalg.norm(last["coil_dev_rrs"] - this_coil_dev_rrs, axis=1) + / dt + ) + logger.info( + _time_prefix(fit_time) + + ( + "%s/%s good HPI fits, movements [mm/s] = " + + " / ".join(["% 8.1f"] * n_coils) + ) + % ((n_good, n_coils) + vs) + ) + + # Log results + # MaxFilter averages over a 200 ms window for display, but we don't + for ii in range(n_coils): + if ii in use_idx: + start, end = " ", "/" + else: + start, end = "(", ")" + log_str = ( + " " + + start + + "{0:6.1f} {1:6.1f} {2:6.1f} / " + + "{3:6.1f} {4:6.1f} {5:6.1f} / " + + "g = {6:0.3f} err = {7:4.1f} " + + end + ) + vals = np.concatenate( + ( + 1000 * hpi_dig_head_rrs[ii], + 1000 * est_coil_head_rrs[ii], + [g_coils[ii], 1000 * errs[ii]], + ) + ) + if len(use_idx) >= 3: + if ii <= 2: + log_str += "{8:6.3f} {9:6.3f} {10:6.3f}" + vals = np.concatenate((vals, this_dev_head_t[ii, :3])) + elif ii == 3: + log_str += "{8:6.1f} {9:6.1f} {10:6.1f}" + vals = np.concatenate((vals, this_dev_head_t[:3, 3] * 1000.0)) + logger.debug(log_str.format(*vals)) + + # resulting errors in head coil positions + d = np.linalg.norm(last["quat"][3:] - this_quat[3:]) # m + r = _angle_between_quats(last["quat"][:3], this_quat[:3]) / dt + v = d / dt # m/s + d = 100 * np.linalg.norm(this_quat[3:] - pos_0) # dis from 1st + logger.debug( + f" #t = {fit_time:0.3f}, #e = {100 * errs.mean():0.2f} cm, #g = {g:0.3f}" + f", #v = {100 * v:0.2f} cm/s, #r = {r:0.2f} rad/s, #d = {d:0.2f} cm" + ) + q_rep = " ".join(f"{qq:8.5f}" for qq in this_quat) + logger.debug(f" #t = {fit_time:0.3f}, #q = {q_rep}") + + quats.append( + np.concatenate(([fit_time], this_quat, [g], [errs[use_idx].mean()], [v])) + ) + last["quat_fit_time"] = fit_time + last["quat"] = this_quat + last["coil_dev_rrs"] = this_coil_dev_rrs + quats = np.array(quats, np.float64) + quats = np.zeros((0, 10)) if quats.size == 0 else quats + return quats + + +def _fit_chpi_quat_subset(coil_dev_rrs, coil_head_rrs, use_idx): + quat, g = _fit_chpi_quat(coil_dev_rrs[use_idx], coil_head_rrs[use_idx]) + out_idx = use_idx.copy() + if len(use_idx) > 3: # try dropping one (recursively) + for di in range(len(use_idx)): + this_use_idx = list(use_idx[:di]) + list(use_idx[di + 1 :]) + this_quat, this_g, this_use_idx = _fit_chpi_quat_subset( + coil_dev_rrs, coil_head_rrs, this_use_idx + ) + if this_g > g: + quat, g, out_idx = this_quat, this_g, this_use_idx + return quat, g, np.array(out_idx, int) + + +@verbose +def compute_chpi_snr( + raw, t_step_min=0.01, t_window="auto", ext_order=1, tmin=0, tmax=None, verbose=None +): + """Compute time-varying estimates of cHPI SNR. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. + t_step_min : float + Minimum time step to use. + %(t_window_chpi_t)s + %(ext_order_chpi)s + %(tmin_raw)s + %(tmax_raw)s + %(verbose)s + + Returns + ------- + chpi_snrs : dict + The time-varying cHPI SNR estimates, with entries "times", "freqs", + "snr_mag", "power_mag", and "resid_mag" (and/or "snr_grad", + "power_grad", and "resid_grad", depending on which channel types are + present in ``raw``). + + See Also + -------- + mne.chpi.compute_chpi_locs, mne.chpi.compute_chpi_amplitudes + + Notes + ----- + .. versionadded:: 0.24 + """ + return _compute_chpi_amp_or_snr( + raw, t_step_min, t_window, ext_order, tmin, tmax, verbose, snr=True + ) + + +@verbose +def compute_chpi_amplitudes( + raw, t_step_min=0.01, t_window="auto", ext_order=1, tmin=0, tmax=None, verbose=None +): + """Compute time-varying cHPI amplitudes. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. + t_step_min : float + Minimum time step to use. + %(t_window_chpi_t)s + %(ext_order_chpi)s + %(tmin_raw)s + %(tmax_raw)s + %(verbose)s + + Returns + ------- + %(chpi_amplitudes)s + + See Also + -------- + mne.chpi.compute_chpi_locs, mne.chpi.compute_chpi_snr + + Notes + ----- + This function will: + + 1. Get HPI frequencies, HPI status channel, HPI status bits, + and digitization order using ``_setup_hpi_amplitude_fitting``. + 2. Window data using ``t_window`` (half before and half after ``t``) and + ``t_step_min``. + 3. Use a linear model (DC + linear slope + sin + cos terms) to fit + sinusoidal amplitudes to MEG channels. + It uses SVD to determine the phase/amplitude of the sinusoids. + + In "auto" mode, ``t_window`` will be set to the longer of: + + 1. Five cycles of the lowest HPI or line frequency. + Ensures that the frequency estimate is stable. + 2. The reciprocal of the smallest difference between HPI and line freqs. + Ensures that neighboring frequencies can be disambiguated. + + The output is meant to be used with :func:`~mne.chpi.compute_chpi_locs`. + + .. versionadded:: 0.20 + """ + return _compute_chpi_amp_or_snr( + raw, t_step_min, t_window, ext_order, tmin, tmax, verbose + ) + + +def _compute_chpi_amp_or_snr( + raw, + t_step_min=0.01, + t_window="auto", + ext_order=1, + tmin=0, + tmax=None, + verbose=None, + snr=False, +): + """Compute cHPI amplitude or SNR. + + See compute_chpi_amplitudes for parameter descriptions. One additional + boolean parameter ``snr`` signals whether to return SNR instead of + amplitude. + """ + hpi = _setup_hpi_amplitude_fitting(raw.info, t_window, ext_order=ext_order) + tmin, tmax = raw._tmin_tmax_to_start_stop(tmin, tmax) + tmin = tmin / raw.info["sfreq"] + tmax = tmax / raw.info["sfreq"] + need_win = hpi["t_window"] / 2.0 + fit_idxs = raw.time_as_index( + np.arange(tmin + need_win, tmax, t_step_min), use_rounding=True + ) + logger.info( + f"Fitting {len(hpi['freqs'])} HPI coil locations at up to " + f"{len(fit_idxs)} time points ({tmax - tmin:.1f} s duration)" + ) + del tmin, tmax + sin_fits = dict() + sin_fits["proj"] = hpi["proj"] + sin_fits["times"] = ( + np.round(fit_idxs + raw.first_samp - hpi["n_window"] / 2.0) / raw.info["sfreq"] + ) + n_times = len(sin_fits["times"]) + n_freqs = len(hpi["freqs"]) + n_chans = len(sin_fits["proj"]["data"]["col_names"]) + if snr: + del sin_fits["proj"] + sin_fits["freqs"] = hpi["freqs"] + ch_types = raw.get_channel_types() + grad_offset = 3 if "mag" in ch_types else 0 + for ch_type in ("mag", "grad"): + if ch_type in ch_types: + for key in ("snr", "power", "resid"): + cols = 1 if key == "resid" else n_freqs + sin_fits[f"{ch_type}_{key}"] = np.empty((n_times, cols)) + else: + sin_fits["slopes"] = np.empty((n_times, n_freqs, n_chans)) + message = f"cHPI {'SNRs' if snr else 'amplitudes'}" + for mi, midpt in enumerate(ProgressBar(fit_idxs, mesg=message)): + # + # 0. determine samples to fit. + # + time_sl = midpt - hpi["n_window"] // 2 + time_sl = slice(max(time_sl, 0), min(time_sl + hpi["n_window"], len(raw.times))) + + # + # 1. Fit amplitudes for each channel from each of the N sinusoids + # + amps_or_snrs = _fit_chpi_amplitudes(raw, time_sl, hpi, snr) + if snr: + if amps_or_snrs is None: + amps_or_snrs = np.full((n_freqs, grad_offset + 3), np.nan) + # unpack the SNR estimates. mag & grad are returned in one array + # (because of Numba) so take care with which column is which. + # note that mean residual is a scalar (same for all HPI freqs) but + # is returned as a (tiled) vector (again, because Numba) so that's + # why below we take amps_or_snrs[0, 2] instead of [:, 2] + ch_types = raw.get_channel_types() + if "mag" in ch_types: + sin_fits["mag_snr"][mi] = amps_or_snrs[:, 0] # SNR + sin_fits["mag_power"][mi] = amps_or_snrs[:, 1] # mean power + sin_fits["mag_resid"][mi] = amps_or_snrs[0, 2] # mean resid + if "grad" in ch_types: + sin_fits["grad_snr"][mi] = amps_or_snrs[:, grad_offset] + sin_fits["grad_power"][mi] = amps_or_snrs[:, grad_offset + 1] + sin_fits["grad_resid"][mi] = amps_or_snrs[0, grad_offset + 2] + else: + sin_fits["slopes"][mi] = amps_or_snrs + return sin_fits + + +@verbose +def compute_chpi_locs( + info, + chpi_amplitudes, + t_step_max=1.0, + too_close="raise", + adjust_dig=False, + verbose=None, +): + """Compute locations of each cHPI coils over time. + + Parameters + ---------- + %(info_not_none)s + %(chpi_amplitudes)s + Typically obtained by :func:`mne.chpi.compute_chpi_amplitudes`. + t_step_max : float + Maximum time step to use. + too_close : str + How to handle HPI positions too close to the sensors, + can be ``'raise'`` (default), ``'warning'``, or ``'info'``. + %(adjust_dig_chpi)s + %(verbose)s + + Returns + ------- + %(chpi_locs)s + + See Also + -------- + compute_chpi_amplitudes + compute_head_pos + read_head_pos + write_head_pos + extract_chpi_locs_ctf + + Notes + ----- + This function is designed to take the output of + :func:`mne.chpi.compute_chpi_amplitudes` and: + + 1. Get HPI coil locations (as digitized in ``info['dig']``) in head coords. + 2. If the amplitudes are 98%% correlated with last position + (and Δt < t_step_max), skip fitting. + 3. Fit magnetic dipoles using the amplitudes for each coil frequency. + + The number of fitted points ``n_pos`` will depend on the velocity of head + movements as well as ``t_step_max`` (and ``t_step_min`` from + :func:`mne.chpi.compute_chpi_amplitudes`). + + .. versionadded:: 0.20 + """ + # Set up magnetic dipole fits + _check_option("too_close", too_close, ["raise", "warning", "info"]) + _check_chpi_param(chpi_amplitudes, "chpi_amplitudes") + _validate_type(info, Info, "info") + sin_fits = chpi_amplitudes # use the old name below + del chpi_amplitudes + proj = sin_fits["proj"] + meg_picks = pick_channels(info["ch_names"], proj["data"]["col_names"], ordered=True) + info = pick_info(info, meg_picks) # makes a copy + with info._unlock(): + info["projs"] = [proj] + del meg_picks, proj + meg_coils = _concatenate_coils(_create_meg_coils(info["chs"], "accurate")) + + # Set up external model for interference suppression + safe_false = _verbose_safe_false() + cov = make_ad_hoc_cov(info, verbose=safe_false) + whitener, _ = compute_whitener(cov, info, verbose=safe_false) + + # Make some location guesses (1 cm grid) + R = np.linalg.norm(meg_coils[0], axis=1).min() + guesses = _make_guesses( + dict(R=R, r0=np.zeros(3)), 0.01, 0.0, 0.005, verbose=safe_false + )[0]["rr"] + logger.info( + f"Computing {len(guesses)} HPI location guesses " + f"(1 cm grid in a {R * 100:.1f} cm sphere)" + ) + fwd = _magnetic_dipole_field_vec(guesses, meg_coils, too_close) + fwd = np.dot(fwd, whitener.T) + fwd.shape = (guesses.shape[0], 3, -1) + fwd = np.linalg.svd(fwd, full_matrices=False)[2] + guesses = dict(rr=guesses, whitened_fwd_svd=fwd) + del fwd, R + + iter_ = list(zip(sin_fits["times"], sin_fits["slopes"])) + chpi_locs = dict(times=[], rrs=[], gofs=[], moments=[]) + # setup last iteration structure + hpi_dig_dev_rrs = apply_trans( + invert_transform(info["dev_head_t"])["trans"], + _get_hpi_initial_fit(info, adjust=adjust_dig), + ) + last = dict( + sin_fit=None, + coil_fit_time=sin_fits["times"][0] - 1, + coil_dev_rrs=hpi_dig_dev_rrs, + ) + n_hpi = len(hpi_dig_dev_rrs) + del hpi_dig_dev_rrs + for fit_time, sin_fit in ProgressBar(iter_, mesg="cHPI locations "): + # skip this window if bad + if not np.isfinite(sin_fit).all(): + continue + + # check if data has sufficiently changed + if last["sin_fit"] is not None: # first iteration + corrs = np.array( + [np.corrcoef(s, lst)[0, 1] for s, lst in zip(sin_fit, last["sin_fit"])] + ) + corrs *= corrs + # check to see if we need to continue + if ( + fit_time - last["coil_fit_time"] <= t_step_max - 1e-7 + and (corrs > 0.98).sum() >= 3 + ): + # don't need to refit data + continue + + # update 'last' sin_fit *before* inplace sign mult + last["sin_fit"] = sin_fit.copy() + + # + # 2. Fit magnetic dipole for each coil to obtain coil positions + # in device coordinates + # + coil_fits = [ + _fit_magnetic_dipole(f, x0, too_close, whitener, meg_coils, guesses) + for f, x0 in zip(sin_fit, last["coil_dev_rrs"]) + ] + rrs, gofs, moments = zip(*coil_fits) + chpi_locs["times"].append(fit_time) + chpi_locs["rrs"].append(rrs) + chpi_locs["gofs"].append(gofs) + chpi_locs["moments"].append(moments) + last["coil_fit_time"] = fit_time + last["coil_dev_rrs"] = rrs + n_times = len(chpi_locs["times"]) + shapes = dict( + times=(n_times,), + rrs=(n_times, n_hpi, 3), + gofs=(n_times, n_hpi), + moments=(n_times, n_hpi, 3), + ) + for key, val in chpi_locs.items(): + chpi_locs[key] = np.array(val, float).reshape(shapes[key]) + return chpi_locs + + +def _chpi_locs_to_times_dig(chpi_locs): + """Reformat chpi_locs as list of dig (dict).""" + dig = list() + for rrs, gofs in zip(*(chpi_locs[key] for key in ("rrs", "gofs"))): + dig.append( + [ + { + "r": rr, + "ident": idx, + "gof": gof, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + for idx, (rr, gof) in enumerate(zip(rrs, gofs), 1) + ] + ) + return chpi_locs["times"], dig + + +@verbose +def filter_chpi( + raw, + include_line=True, + t_step=0.01, + t_window="auto", + ext_order=1, + allow_line_only=False, + verbose=None, +): + """Remove cHPI and line noise from data. + + .. note:: This function will only work properly if cHPI was on + during the recording. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. Must be preloaded. Operates in-place. + include_line : bool + If True, also filter line noise. + t_step : float + Time step to use for estimation, default is 0.01 (10 ms). + %(t_window_chpi_t)s + %(ext_order_chpi)s + allow_line_only : bool + If True, allow filtering line noise only. The default is False, + which only allows the function to run when cHPI information is present. + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw data. + + Notes + ----- + cHPI signals are in general not stationary, because head movements act + like amplitude modulators on cHPI signals. Thus it is recommended to + use this procedure, which uses an iterative fitting method, to + remove cHPI signals, as opposed to notch filtering. + + .. versionadded:: 0.12 + """ + _validate_type(raw, BaseRaw, "raw") + if not raw.preload: + raise RuntimeError("raw data must be preloaded") + t_step = float(t_step) + if t_step <= 0: + raise ValueError(f"t_step ({t_step}) must be > 0") + n_step = int(np.ceil(t_step * raw.info["sfreq"])) + if include_line and raw.info["line_freq"] is None: + raise RuntimeError( + 'include_line=True but raw.info["line_freq"] is ' + "None, consider setting it to the line frequency" + ) + hpi = _setup_hpi_amplitude_fitting( + raw.info, + t_window, + remove_aliased=True, + ext_order=ext_order, + allow_empty=allow_line_only, + verbose=_verbose_safe_false(), + ) + + fit_idxs = np.arange(0, len(raw.times) + hpi["n_window"] // 2, n_step) + n_freqs = len(hpi["freqs"]) + n_remove = 2 * n_freqs + meg_picks = pick_types(raw.info, meg=True, exclude=()) # filter all chs + n_times = len(raw.times) + + msg = f"Removing {n_freqs} cHPI" + if include_line: + n_remove += 2 * len(hpi["line_freqs"]) + msg += f" and {len(hpi['line_freqs'])} line harmonic" + msg += f" frequencies from {len(meg_picks)} MEG channels" + + recon = np.dot(hpi["model"][:, :n_remove], hpi["inv_model"][:n_remove]).T + logger.info(msg) + chunks = list() # the chunks to subtract + last_endpt = 0 + pb = ProgressBar(fit_idxs, mesg="Filtering") + for ii, midpt in enumerate(pb): + left_edge = midpt - hpi["n_window"] // 2 + time_sl = slice( + max(left_edge, 0), min(left_edge + hpi["n_window"], len(raw.times)) + ) + this_len = time_sl.stop - time_sl.start + if this_len == hpi["n_window"]: + this_recon = recon + else: # first or last window + model = hpi["model"][:this_len] + inv_model = np.linalg.pinv(model) + this_recon = np.dot(model[:, :n_remove], inv_model[:n_remove]).T + this_data = raw._data[meg_picks, time_sl] + subt_pt = min(midpt + n_step, n_times) + if last_endpt != subt_pt: + fit_left_edge = left_edge - time_sl.start + hpi["n_window"] // 2 + fit_sl = slice(fit_left_edge, fit_left_edge + (subt_pt - last_endpt)) + chunks.append((subt_pt, np.dot(this_data, this_recon[:, fit_sl]))) + last_endpt = subt_pt + + # Consume (trailing) chunks that are now safe to remove because + # our windows will no longer touch them + if ii < len(fit_idxs) - 1: + next_left_edge = fit_idxs[ii + 1] - hpi["n_window"] // 2 + else: + next_left_edge = np.inf + while len(chunks) > 0 and chunks[0][0] <= next_left_edge: + right_edge, chunk = chunks.pop(0) + raw._data[meg_picks, right_edge - chunk.shape[1] : right_edge] -= chunk + return raw + + +def _compute_good_distances(hpi_coil_dists, new_pos, dist_limit=0.005): + """Compute good coils based on distances.""" + these_dists = cdist(new_pos, new_pos) + these_dists = np.abs(hpi_coil_dists - these_dists) + # there is probably a better algorithm for finding the bad ones... + good = False + use_mask = np.ones(len(hpi_coil_dists), bool) + while not good: + d = these_dists[use_mask][:, use_mask] + d_bad = d > dist_limit + good = not d_bad.any() + if not good: + if use_mask.sum() == 2: + use_mask[:] = False + break # failure + # exclude next worst point + badness = (d * d_bad).sum(axis=0) + exclude_coils = np.where(use_mask)[0][np.argmax(badness)] + use_mask[exclude_coils] = False + return use_mask, these_dists + + +@verbose +def get_active_chpi(raw, *, on_missing="raise", verbose=None): + """Determine how many HPI coils were active for a time point. + + Parameters + ---------- + raw : instance of Raw + Raw data with cHPI information. + %(on_missing_chpi)s + %(verbose)s + + Returns + ------- + n_active : array, shape (n_times) + The number of active cHPIs for every timepoint in raw. + + Notes + ----- + .. versionadded:: 1.2 + """ + # get meg system + system, _ = _get_meg_system(raw.info) + + # check whether we have a neuromag system + if system not in ["122m", "306m"]: + raise NotImplementedError( + "Identifying active HPI channels is not implemented for other systems than " + "neuromag." + ) + # extract hpi info + chpi_info = get_chpi_info(raw.info, on_missing=on_missing) + if (len(chpi_info[2]) == 0) or (chpi_info[1] is None): + return np.zeros_like(raw.times) + + # extract hpi time series and infer which one was on + chpi_ts = raw[chpi_info[1]][0].astype(int) + chpi_active = (chpi_ts & chpi_info[2][:, np.newaxis]).astype(bool) + return chpi_active.sum(axis=0) diff --git a/mne/commands/__init__.py b/mne/commands/__init__.py new file mode 100644 index 0000000..1c7134e --- /dev/null +++ b/mne/commands/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Command-line utilities.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/commands/__init__.pyi b/mne/commands/__init__.pyi new file mode 100644 index 0000000..c0caf04 --- /dev/null +++ b/mne/commands/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["utils"] +from . import utils diff --git a/mne/commands/mne_anonymize.py b/mne/commands/mne_anonymize.py new file mode 100644 index 0000000..28fae42 --- /dev/null +++ b/mne/commands/mne_anonymize.py @@ -0,0 +1,127 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Anonymize raw fif file. + +To anonymize other file types call :func:`mne.io.anonymize_info` on their +:class:`~mne.Info` objects and resave to disk. + +Examples +-------- +.. code-block:: console + + $ mne anonymize -f sample_audvis_raw.fif + +""" + +import os.path as op +import sys + +import mne + +ANONYMIZE_FILE_PREFIX = "anon" + + +def mne_anonymize(fif_fname, out_fname, keep_his, daysback, overwrite): + """Call *anonymize_info* on fif file and save. + + Parameters + ---------- + fif_fname : path-like + Raw fif File + out_fname : path-like | None + Output file name + relative paths are saved relative to parent dir of fif_fname + None will save to parent dir of fif_fname with default prefix + daysback : int | None + Number of days to subtract from all dates. + If None will default to move date of service to Jan 1 2000 + keep_his : bool + If True his_id of subject_info will NOT be overwritten. + defaults to False + overwrite : bool + Overwrite output file if it already exists + """ + raw = mne.io.read_raw_fif(fif_fname, allow_maxshield=True) + raw.anonymize(daysback=daysback, keep_his=keep_his) + + # determine out_fname + dir_name = op.split(fif_fname)[0] + if out_fname is None: + fif_bname = op.basename(fif_fname) + out_fname = op.join(dir_name, f"{ANONYMIZE_FILE_PREFIX}-{fif_bname}") + elif not op.isabs(out_fname): + out_fname = op.join(dir_name, out_fname) + + raw.save(out_fname, overwrite=overwrite) + + +def run(): + """Run *mne_anonymize* command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-f", + "--file", + type="string", + dest="file", + help="Name of file to modify.", + metavar="FILE", + default=None, + ) + parser.add_option( + "-o", + "--output", + type="string", + dest="output", + help="Name of anonymized output file." + "`anon-` prefix is added to FILE if not given", + metavar="OUTFILE", + default=None, + ) + parser.add_option( + "--keep_his", + dest="keep_his", + action="store_true", + help="Keep the HIS tag (not advised)", + default=False, + ) + parser.add_option( + "-d", + "--daysback", + type="int", + dest="daysback", + help="Move dates in file backwards by this many days.", + metavar="N_DAYS", + default=None, + ) + parser.add_option( + "--overwrite", + dest="overwrite", + action="store_true", + help="Overwrite input file.", + default=False, + ) + + options, args = parser.parse_args() + if options.file is None: + parser.print_help() + sys.exit(1) + + fname = options.file + out_fname = options.output + keep_his = options.keep_his + daysback = options.daysback + overwrite = options.overwrite + if not fname.endswith(".fif"): + raise ValueError(f"{fname} does not seem to be a .fif file.") + + mne_anonymize(fname, out_fname, keep_his, daysback, overwrite) + + +is_main = __name__ == "__main__" +if is_main: + run() diff --git a/mne/commands/mne_browse_raw.py b/mne/commands/mne_browse_raw.py new file mode 100644 index 0000000..a6db0e2 --- /dev/null +++ b/mne/commands/mne_browse_raw.py @@ -0,0 +1,220 @@ +r"""Browse raw data. + +This uses :func:`mne.io.read_raw` so it supports the same formats +(without keyword arguments). + +Examples +-------- +.. code-block:: console + + $ mne browse_raw sample_audvis_raw.fif \ + --proj sample_audvis_ecg-proj.fif \ + --eve sample_audvis_raw-eve.fif +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + from mne.viz import _RAW_CLIP_DEF + + parser = get_optparser(__file__, usage="usage: %prog raw [options]") + + parser.add_option( + "--raw", + dest="raw_in", + help="Input raw FIF file (can also be specified " + "directly as an argument without the --raw prefix)", + metavar="FILE", + ) + parser.add_option( + "--proj", dest="proj_in", help="Projector file", metavar="FILE", default="" + ) + parser.add_option( + "--projoff", + dest="proj_off", + help="Disable all projectors", + default=False, + action="store_true", + ) + parser.add_option( + "--eve", dest="eve_in", help="Events file", metavar="FILE", default="" + ) + parser.add_option( + "-d", + "--duration", + dest="duration", + type="float", + help="Time window for plotting (s)", + default=10.0, + ) + parser.add_option( + "-t", + "--start", + dest="start", + type="float", + help="Initial start time for plotting", + default=0.0, + ) + parser.add_option( + "-n", + "--n_channels", + dest="n_channels", + type="int", + help="Number of channels to plot at a time", + default=20, + ) + parser.add_option( + "-o", + "--order", + dest="group_by", + help="Order to use for grouping during plotting ('type' or 'original')", + default="type", + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Preload raw data (for faster navigation)", + default=False, + action="store_true", + ) + parser.add_option( + "-s", + "--show_options", + dest="show_options", + help="Show projection options dialog", + default=False, + ) + parser.add_option( + "--allowmaxshield", + dest="maxshield", + help="Allow loading MaxShield processed data", + action="store_true", + ) + parser.add_option( + "--highpass", + dest="highpass", + type="float", + help="Display high-pass filter corner frequency", + default=-1, + ) + parser.add_option( + "--lowpass", + dest="lowpass", + type="float", + help="Display low-pass filter corner frequency", + default=-1, + ) + parser.add_option( + "--filtorder", + dest="filtorder", + type="int", + help="Display filtering IIR order (or 0 to use FIR)", + default=4, + ) + parser.add_option( + "--clipping", + dest="clipping", + help="Enable trace clipping mode. Can be 'clamp', 'transparent', a float, " + "or 'none'.", + default=_RAW_CLIP_DEF, + ) + parser.add_option( + "--filterchpi", + dest="filterchpi", + help="Enable filtering cHPI signals.", + default=None, + action="store_true", + ) + parser.add_option( + "--butterfly", + dest="butterfly", + help="Plot in butterfly mode", + default=False, + action="store_true", + ) + _add_verbose_flag(parser) + options, args = parser.parse_args() + + if len(args): + raw_in = args[0] + else: + raw_in = options.raw_in + duration = options.duration + start = options.start + n_channels = options.n_channels + group_by = options.group_by + preload = options.preload + show_options = options.show_options + proj_in = options.proj_in + proj_off = options.proj_off + eve_in = options.eve_in + maxshield = options.maxshield + highpass = options.highpass + lowpass = options.lowpass + filtorder = options.filtorder + clipping = options.clipping + if isinstance(clipping, str): + if clipping.lower() == "none": + clipping = None + else: + try: + clipping = float(clipping) # allow float and convert it + except ValueError: + pass + filterchpi = options.filterchpi + verbose = options.verbose + butterfly = options.butterfly + + if raw_in is None: + parser.print_help() + sys.exit(1) + + kwargs = dict(preload=preload) + if maxshield: + kwargs.update(allow_maxshield="yes") + raw = mne.io.read_raw(raw_in, **kwargs) + if len(proj_in) > 0: + projs = mne.read_proj(proj_in) + raw.info["projs"] = projs + if len(eve_in) > 0: + events = mne.read_events(eve_in) + else: + events = None + + if filterchpi: + if not preload: + raise RuntimeError("Raw data must be preloaded for chpi, use --preload") + raw = mne.chpi.filter_chpi(raw) + + highpass = None if highpass < 0 or filtorder < 0 else highpass + lowpass = None if lowpass < 0 or filtorder < 0 else lowpass + raw.plot( + duration=duration, + start=start, + n_channels=n_channels, + group_by=group_by, + show_options=show_options, + events=events, + highpass=highpass, + lowpass=lowpass, + filtorder=filtorder, + clipping=clipping, + butterfly=butterfly, + proj=not proj_off, + verbose=verbose, + show=True, + block=True, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_bti2fiff.py b/mne/commands/mne_bti2fiff.py new file mode 100644 index 0000000..8a06064 --- /dev/null +++ b/mne/commands/mne_bti2fiff.py @@ -0,0 +1,122 @@ +"""Import BTi / 4D MagnesWH3600 data to fif file. + +Notes +----- +1. Currently direct inclusion of reference channel weights + is not supported. Please use 'mne_create_comp_data' to include + the weights or use the low level functions from this module to + include them by yourself. +2. The informed guess for the 4D name is E31 for the ECG channel and + E63, E63 for the EOG channels. Please check and adjust if those channels + are present in your dataset but 'ECG 01' and 'EOG 01', 'EOG 02' don't + appear in the channel names of the raw object. + +Examples +-------- +.. code-block:: console + + $ mne bti2fiff --pdf C,rfDC -o my_raw.fif + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne +from mne.io import read_raw_bti + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-p", "--pdf", dest="pdf_fname", help="Input data file name", metavar="FILE" + ) + parser.add_option( + "-c", + "--config", + dest="config_fname", + help="Input config file name", + metavar="FILE", + default="config", + ) + parser.add_option( + "--head_shape", + dest="head_shape_fname", + help="Headshape file name", + metavar="FILE", + default="hs_file", + ) + parser.add_option( + "-o", + "--out_fname", + dest="out_fname", + help="Name of the resulting fiff file", + default="as_data_fname", + ) + parser.add_option( + "-r", + "--rotation_x", + dest="rotation_x", + type="float", + help="Compensatory rotation about Neuromag x axis, deg", + default=2.0, + ) + parser.add_option( + "-T", + "--translation", + dest="translation", + type="str", + help="Default translation, meter", + default=(0.00, 0.02, 0.11), + ) + parser.add_option( + "--ecg_ch", dest="ecg_ch", type="str", help="4D ECG channel name", default="E31" + ) + parser.add_option( + "--eog_ch", + dest="eog_ch", + type="str", + help="4D EOG channel names", + default="E63,E64", + ) + + options, args = parser.parse_args() + + pdf_fname = options.pdf_fname + if pdf_fname is None: + parser.print_help() + sys.exit(1) + + config_fname = options.config_fname + head_shape_fname = options.head_shape_fname + out_fname = options.out_fname + rotation_x = options.rotation_x + translation = options.translation + ecg_ch = options.ecg_ch + eog_ch = options.ecg_ch.split(",") + + if out_fname == "as_data_fname": + out_fname = pdf_fname + "_raw.fif" + + raw = read_raw_bti( + pdf_fname=pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + ecg_ch=ecg_ch, + eog_ch=eog_ch, + ) + + raw.save(out_fname) + raw.close() + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_clean_eog_ecg.py b/mne/commands/mne_clean_eog_ecg.py new file mode 100644 index 0000000..add6e38 --- /dev/null +++ b/mne/commands/mne_clean_eog_ecg.py @@ -0,0 +1,232 @@ +"""Clean a raw file from EOG and ECG artifacts with PCA (ie SSP). + +Examples +-------- +.. code-block:: console + + $ mne clean_eog_ecg -i in_raw.fif -o clean_raw.fif -e -c + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def clean_ecg_eog( + in_fif_fname, + out_fif_fname=None, + eog=True, + ecg=True, + ecg_proj_fname=None, + eog_proj_fname=None, + ecg_event_fname=None, + eog_event_fname=None, + in_path=".", + quiet=False, +): + """Clean ECG from raw fif file. + + Parameters + ---------- + in_fif_fname : path-like + Raw fif File + eog_event_fname : str + name of EOG event file required. + eog : bool + Reject or not EOG artifacts. + ecg : bool + Reject or not ECG artifacts. + ecg_event_fname : str + name of ECG event file required. + in_path : str + Path where all the files are. + """ + if not eog and not ecg: + raise Exception("EOG and ECG cannot be both disabled") + + # Reading fif File + raw_in = mne.io.read_raw_fif(in_fif_fname) + + if in_fif_fname.endswith("_raw.fif") or in_fif_fname.endswith("-raw.fif"): + prefix = in_fif_fname[:-8] + else: + prefix = in_fif_fname[:-4] + + if out_fif_fname is None: + out_fif_fname = prefix + "_clean_ecg_eog_raw.fif" + if ecg_proj_fname is None: + ecg_proj_fname = prefix + "_ecg-proj.fif" + if eog_proj_fname is None: + eog_proj_fname = prefix + "_eog-proj.fif" + if ecg_event_fname is None: + ecg_event_fname = prefix + "_ecg-eve.fif" + if eog_event_fname is None: + eog_event_fname = prefix + "_eog-eve.fif" + + print("Implementing ECG and EOG artifact rejection on data") + + kwargs = dict() if quiet else dict(stdout=None, stderr=None) + if ecg: + ecg_events, _, _ = mne.preprocessing.find_ecg_events( + raw_in, reject_by_annotation=True + ) + print(f"Writing ECG events in {ecg_event_fname}") + mne.write_events(ecg_event_fname, ecg_events) + print("Computing ECG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--events", + ecg_event_fname, + "--makeproj", + "--projtmin", + "-0.08", + "--projtmax", + "0.08", + "--saveprojtag", + "_ecg-proj", + "--projnmag", + "2", + "--projngrad", + "1", + "--projevent", + "999", + "--highpass", + "5", + "--lowpass", + "35", + "--projmagrej", + "4000", + "--projgradrej", + "3000", + ) + mne.utils.run_subprocess(command, **kwargs) + if eog: + eog_events = mne.preprocessing.find_eog_events(raw_in) + print(f"Writing EOG events in {eog_event_fname}") + mne.write_events(eog_event_fname, eog_events) + print("Computing EOG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--events", + eog_event_fname, + "--makeproj", + "--projtmin", + "-0.15", + "--projtmax", + "0.15", + "--saveprojtag", + "_eog-proj", + "--projnmag", + "2", + "--projngrad", + "2", + "--projevent", + "998", + "--lowpass", + "35", + "--projmagrej", + "4000", + "--projgradrej", + "3000", + ) + mne.utils.run_subprocess(command, **kwargs) + + if out_fif_fname is not None: + # Applying the ECG EOG projector + print("Applying ECG EOG projector") + command = ( + "mne_process_raw", + "--cd", + in_path, + "--raw", + in_fif_fname, + "--proj", + in_fif_fname, + "--projoff", + "--save", + out_fif_fname, + "--filteroff", + "--proj", + ecg_proj_fname, + "--proj", + eog_proj_fname, + ) + mne.utils.run_subprocess(command, **kwargs) + print("Done removing artifacts.") + print(f"Cleaned raw data saved in: {out_fif_fname}") + print("IMPORTANT : Please eye-ball the data !!") + else: + print("Projection not applied to raw data.") + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "-o", + "--out", + dest="raw_out", + help="Output raw FIF file", + metavar="FILE", + default=None, + ) + parser.add_option( + "-e", + "--no-eog", + dest="eog", + action="store_false", + help="Remove EOG", + default=True, + ) + parser.add_option( + "-c", + "--no-ecg", + dest="ecg", + action="store_false", + help="Remove ECG", + default=True, + ) + parser.add_option( + "-q", + "--quiet", + dest="quiet", + action="store_true", + help="Suppress mne_process_raw output", + default=False, + ) + + options, args = parser.parse_args() + + if options.raw_in is None: + parser.print_help() + sys.exit(1) + + raw_in = options.raw_in + raw_out = options.raw_out + eog = options.eog + ecg = options.ecg + quiet = options.quiet + + clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_compare_fiff.py b/mne/commands/mne_compare_fiff.py new file mode 100644 index 0000000..c619aa5 --- /dev/null +++ b/mne/commands/mne_compare_fiff.py @@ -0,0 +1,32 @@ +"""Compare FIFF files. + +Examples +-------- +.. code-block:: console + + $ mne compare_fiff test_raw.fif test_raw_sss.fif + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser( + __file__, usage="mne compare_fiff " + ) + options, args = parser.parse_args() + if len(args) != 2: + parser.print_help() + sys.exit(1) + mne.viz.compare_fiff(args[0], args[1]) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_compute_proj_ecg.py b/mne/commands/mne_compute_proj_ecg.py new file mode 100644 index 0000000..42038cf --- /dev/null +++ b/mne/commands/mne_compute_proj_ecg.py @@ -0,0 +1,331 @@ +r"""Compute SSP/PCA projections for ECG artifacts. + +Examples +-------- +.. code-block:: console + + $ mne compute_proj_ecg -i sample_audvis_raw.fif -c "MEG 1531" -a \ + --l-freq 1 --h-freq 100 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "--tmin", + dest="tmin", + type="float", + help="Time before event in seconds", + default=-0.2, + ) + parser.add_option( + "--tmax", + dest="tmax", + type="float", + help="Time after event in seconds", + default=0.4, + ) + parser.add_option( + "-g", + "--n-grad", + dest="n_grad", + type="int", + help="Number of SSP vectors for gradiometers", + default=2, + ) + parser.add_option( + "-m", + "--n-mag", + dest="n_mag", + type="int", + help="Number of SSP vectors for magnetometers", + default=2, + ) + parser.add_option( + "-e", + "--n-eeg", + dest="n_eeg", + type="int", + help="Number of SSP vectors for EEG", + default=2, + ) + parser.add_option( + "--l-freq", + dest="l_freq", + type="float", + help="Filter low cut-off frequency in Hz", + default=1, + ) + parser.add_option( + "--h-freq", + dest="h_freq", + type="float", + help="Filter high cut-off frequency in Hz", + default=100, + ) + parser.add_option( + "--ecg-l-freq", + dest="ecg_l_freq", + type="float", + help="Filter low cut-off frequency in Hz used for ECG event detection", + default=5, + ) + parser.add_option( + "--ecg-h-freq", + dest="ecg_h_freq", + type="float", + help="Filter high cut-off frequency in Hz used for ECG event detection", + default=35, + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Temporary file used during computation (to save memory)", + default=True, + ) + parser.add_option( + "-a", + "--average", + dest="average", + action="store_true", + help="Compute SSP after averaging", + default=False, + ) + parser.add_option( + "--proj", dest="proj", help="Use SSP projections from a fif file.", default=None + ) + parser.add_option( + "--filtersize", + dest="filter_length", + type="int", + help="Number of taps to use for filtering", + default=2048, + ) + parser.add_option( + "-j", + "--n-jobs", + dest="n_jobs", + type="int", + help="Number of jobs to run in parallel", + default=1, + ) + parser.add_option( + "-c", + "--channel", + dest="ch_name", + help="Channel to use for ECG detection (Required if no ECG found)", + default=None, + ) + parser.add_option( + "--rej-grad", + dest="rej_grad", + type="float", + help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)", + default=2000, + ) + parser.add_option( + "--rej-mag", + dest="rej_mag", + type="float", + help="Magnetometers rejection parameter in fT (peak to peak amplitude)", + default=3000, + ) + parser.add_option( + "--rej-eeg", + dest="rej_eeg", + type="float", + help="EEG rejection parameter in µV (peak to peak amplitude)", + default=50, + ) + parser.add_option( + "--rej-eog", + dest="rej_eog", + type="float", + help="EOG rejection parameter in µV (peak to peak amplitude)", + default=250, + ) + parser.add_option( + "--avg-ref", + dest="avg_ref", + action="store_true", + help="Add EEG average reference proj", + default=False, + ) + parser.add_option( + "--no-proj", + dest="no_proj", + action="store_true", + help="Exclude the SSP projectors currently in the fiff file", + default=False, + ) + parser.add_option( + "--bad", + dest="bad_fname", + help="Text file containing bad channels list (one per line)", + default=None, + ) + parser.add_option( + "--event-id", + dest="event_id", + type="int", + help="ID to use for events", + default=999, + ) + parser.add_option( + "--event-raw", + dest="raw_event_fname", + help="raw file to use for event detection", + default=None, + ) + parser.add_option( + "--tstart", + dest="tstart", + type="float", + help="Start artifact detection after tstart seconds", + default=0.0, + ) + parser.add_option( + "--qrsthr", + dest="qrs_threshold", + type="string", + help="QRS detection threshold. Between 0 and 1. Can " + "also be 'auto' for automatic selection", + default="auto", + ) + + options, args = parser.parse_args() + + raw_in = options.raw_in + + if raw_in is None: + parser.print_help() + sys.exit(1) + + tmin = options.tmin + tmax = options.tmax + n_grad = options.n_grad + n_mag = options.n_mag + n_eeg = options.n_eeg + l_freq = options.l_freq + h_freq = options.h_freq + ecg_l_freq = options.ecg_l_freq + ecg_h_freq = options.ecg_h_freq + average = options.average + preload = options.preload + filter_length = options.filter_length + n_jobs = options.n_jobs + ch_name = options.ch_name + reject = dict( + grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog), + ) + avg_ref = options.avg_ref + no_proj = options.no_proj + bad_fname = options.bad_fname + event_id = options.event_id + proj_fname = options.proj + raw_event_fname = options.raw_event_fname + tstart = options.tstart + qrs_threshold = options.qrs_threshold + if qrs_threshold != "auto": + try: + qrs_threshold = float(qrs_threshold) + except ValueError: + raise ValueError('qrsthr must be "auto" or a float') + + if bad_fname is not None: + with open(bad_fname) as fid: + bads = [w.rstrip() for w in fid.readlines()] + print(f"Bad channels read : {bads}") + else: + bads = [] + + if raw_in.endswith("_raw.fif") or raw_in.endswith("-raw.fif"): + prefix = raw_in[:-8] + else: + prefix = raw_in[:-4] + + ecg_event_fname = prefix + "_ecg-eve.fif" + + if average: + ecg_proj_fname = prefix + "_ecg_avg-proj.fif" + else: + ecg_proj_fname = prefix + "_ecg-proj.fif" + + raw = mne.io.read_raw_fif(raw_in, preload=preload) + + if raw_event_fname is not None: + raw_event = mne.io.read_raw_fif(raw_event_fname) + else: + raw_event = raw + + flat = None + projs, events = mne.preprocessing.compute_proj_ecg( + raw, + raw_event, + tmin, + tmax, + n_grad, + n_mag, + n_eeg, + l_freq, + h_freq, + average, + filter_length, + n_jobs, + ch_name, + reject, + flat, + bads, + avg_ref, + no_proj, + event_id, + ecg_l_freq, + ecg_h_freq, + tstart, + qrs_threshold, + copy=False, + ) + + raw.close() + + if raw_event_fname is not None: + raw_event.close() + + if proj_fname is not None: + print(f"Including SSP projections from : {proj_fname}") + # append the ecg projs, so they are last in the list + projs = mne.read_proj(proj_fname) + projs + + if isinstance(preload, str) and os.path.exists(preload): + os.remove(preload) + + print(f"Writing ECG projections in {ecg_proj_fname}") + mne.write_proj(ecg_proj_fname, projs) + + print(f"Writing ECG events in {ecg_event_fname}") + mne.write_events(ecg_event_fname, events) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_compute_proj_eog.py b/mne/commands/mne_compute_proj_eog.py new file mode 100644 index 0000000..a1e2679 --- /dev/null +++ b/mne/commands/mne_compute_proj_eog.py @@ -0,0 +1,329 @@ +r"""Compute SSP/PCA projections for EOG artifacts. + +Examples +-------- +.. code-block:: console + + $ mne compute_proj_eog -i sample_audvis_raw.fif -a \ + --l-freq 1 --h-freq 35 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 + +or + +.. code-block:: console + + $ mne compute_proj_eog -i sample_audvis_raw.fif -a \ + --l-freq 1 --h-freq 35 \ + --rej-grad 3000 --rej-mag 4000 --rej-eeg 100 \ + --proj sample_audvis_ecg-proj.fif + +to exclude ECG artifacts from projection computation. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-i", "--in", dest="raw_in", help="Input raw FIF file", metavar="FILE" + ) + parser.add_option( + "--tmin", + dest="tmin", + type="float", + help="Time before event in seconds", + default=-0.2, + ) + parser.add_option( + "--tmax", + dest="tmax", + type="float", + help="Time after event in seconds", + default=0.2, + ) + parser.add_option( + "-g", + "--n-grad", + dest="n_grad", + type="int", + help="Number of SSP vectors for gradiometers", + default=2, + ) + parser.add_option( + "-m", + "--n-mag", + dest="n_mag", + type="int", + help="Number of SSP vectors for magnetometers", + default=2, + ) + parser.add_option( + "-e", + "--n-eeg", + dest="n_eeg", + type="int", + help="Number of SSP vectors for EEG", + default=2, + ) + parser.add_option( + "--l-freq", + dest="l_freq", + type="float", + help="Filter low cut-off frequency in Hz", + default=1, + ) + parser.add_option( + "--h-freq", + dest="h_freq", + type="float", + help="Filter high cut-off frequency in Hz", + default=35, + ) + parser.add_option( + "--eog-l-freq", + dest="eog_l_freq", + type="float", + help="Filter low cut-off frequency in Hz used for EOG event detection", + default=1, + ) + parser.add_option( + "--eog-h-freq", + dest="eog_h_freq", + type="float", + help="Filter high cut-off frequency in Hz used for EOG event detection", + default=10, + ) + parser.add_option( + "-p", + "--preload", + dest="preload", + help="Temporary file used during computation (to save memory)", + default=True, + ) + parser.add_option( + "-a", + "--average", + dest="average", + action="store_true", + help="Compute SSP after averaging", + default=False, + ) + parser.add_option( + "--proj", dest="proj", help="Use SSP projections from a fif file.", default=None + ) + parser.add_option( + "--filtersize", + dest="filter_length", + type="int", + help="Number of taps to use for filtering", + default=2048, + ) + parser.add_option( + "-j", + "--n-jobs", + dest="n_jobs", + type="int", + help="Number of jobs to run in parallel", + default=1, + ) + parser.add_option( + "--rej-grad", + dest="rej_grad", + type="float", + help="Gradiometers rejection parameter in fT/cm (peak to peak amplitude)", + default=2000, + ) + parser.add_option( + "--rej-mag", + dest="rej_mag", + type="float", + help="Magnetometers rejection parameter in fT (peak to peak amplitude)", + default=3000, + ) + parser.add_option( + "--rej-eeg", + dest="rej_eeg", + type="float", + help="EEG rejection parameter in µV (peak to peak amplitude)", + default=50, + ) + parser.add_option( + "--rej-eog", + dest="rej_eog", + type="float", + help="EOG rejection parameter in µV (peak to peak amplitude)", + default=1e9, + ) + parser.add_option( + "--avg-ref", + dest="avg_ref", + action="store_true", + help="Add EEG average reference proj", + default=False, + ) + parser.add_option( + "--no-proj", + dest="no_proj", + action="store_true", + help="Exclude the SSP projectors currently in the fiff file", + default=False, + ) + parser.add_option( + "--bad", + dest="bad_fname", + help="Text file containing bad channels list (one per line)", + default=None, + ) + parser.add_option( + "--event-id", + dest="event_id", + type="int", + help="ID to use for events", + default=998, + ) + parser.add_option( + "--event-raw", + dest="raw_event_fname", + help="raw file to use for event detection", + default=None, + ) + parser.add_option( + "--tstart", + dest="tstart", + type="float", + help="Start artifact detection after tstart seconds", + default=0.0, + ) + parser.add_option( + "-c", + "--channel", + dest="ch_name", + type="string", + help="Custom EOG channel(s), comma separated", + default=None, + ) + + options, args = parser.parse_args() + + raw_in = options.raw_in + + if raw_in is None: + parser.print_help() + sys.exit(1) + + tmin = options.tmin + tmax = options.tmax + n_grad = options.n_grad + n_mag = options.n_mag + n_eeg = options.n_eeg + l_freq = options.l_freq + h_freq = options.h_freq + eog_l_freq = options.eog_l_freq + eog_h_freq = options.eog_h_freq + average = options.average + preload = options.preload + filter_length = options.filter_length + n_jobs = options.n_jobs + reject = dict( + grad=1e-13 * float(options.rej_grad), + mag=1e-15 * float(options.rej_mag), + eeg=1e-6 * float(options.rej_eeg), + eog=1e-6 * float(options.rej_eog), + ) + avg_ref = options.avg_ref + no_proj = options.no_proj + bad_fname = options.bad_fname + event_id = options.event_id + proj_fname = options.proj + raw_event_fname = options.raw_event_fname + tstart = options.tstart + ch_name = options.ch_name + + if bad_fname is not None: + with open(bad_fname) as fid: + bads = [w.rstrip() for w in fid.readlines()] + print(f"Bad channels read : {bads}") + else: + bads = [] + + if raw_in.endswith("_raw.fif") or raw_in.endswith("-raw.fif"): + prefix = raw_in[:-8] + else: + prefix = raw_in[:-4] + + eog_event_fname = prefix + "_eog-eve.fif" + + if average: + eog_proj_fname = prefix + "_eog_avg-proj.fif" + else: + eog_proj_fname = prefix + "_eog-proj.fif" + + raw = mne.io.read_raw_fif(raw_in, preload=preload) + + if raw_event_fname is not None: + raw_event = mne.io.read_raw_fif(raw_event_fname) + else: + raw_event = raw + + flat = None + projs, events = mne.preprocessing.compute_proj_eog( + raw=raw, + raw_event=raw_event, + tmin=tmin, + tmax=tmax, + n_grad=n_grad, + n_mag=n_mag, + n_eeg=n_eeg, + l_freq=l_freq, + h_freq=h_freq, + average=average, + filter_length=filter_length, + n_jobs=n_jobs, + reject=reject, + flat=flat, + bads=bads, + avg_ref=avg_ref, + no_proj=no_proj, + event_id=event_id, + eog_l_freq=eog_l_freq, + eog_h_freq=eog_h_freq, + tstart=tstart, + ch_name=ch_name, + copy=False, + ) + + raw.close() + + if raw_event_fname is not None: + raw_event.close() + + if proj_fname is not None: + print(f"Including SSP projections from : {proj_fname}") + # append the eog projs, so they are last in the list + projs = mne.read_proj(proj_fname) + projs + + if isinstance(preload, str) and os.path.exists(preload): + os.remove(preload) + + print(f"Writing EOG projections in {eog_proj_fname}") + mne.write_proj(eog_proj_fname, projs) + + print(f"Writing EOG events in {eog_event_fname}") + mne.write_events(eog_event_fname, events) + + +is_main = __name__ == "__main__" +if is_main: + run() diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py new file mode 100644 index 0000000..d288c7b --- /dev/null +++ b/mne/commands/mne_coreg.py @@ -0,0 +1,116 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Open the coregistration GUI. + +Examples +-------- +.. code-block:: console + + $ mne coreg + +""" + +import os.path as op + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + default=None, + help="Subjects directory", + ) + parser.add_option( + "-s", "--subject", dest="subject", default=None, help="Subject name" + ) + parser.add_option( + "-f", + "--fiff", + dest="inst", + default=None, + help="FIFF file with digitizer data for coregistration", + ) + parser.add_option( + "--head-opacity", + type=float, + default=None, + dest="head_opacity", + help="The opacity of the head surface, in the range [0, 1].", + ) + parser.add_option( + "--high-res-head", + action="store_true", + default=False, + dest="high_res_head", + help="Use a high-resolution head surface.", + ) + parser.add_option( + "--low-res-head", + action="store_true", + default=False, + dest="low_res_head", + help="Use a low-resolution head surface.", + ) + parser.add_option( + "--trans", + dest="trans", + default=None, + help='Head<->MRI transform FIF file ("-trans.fif")', + ) + parser.add_option( + "--interaction", + type=str, + default=None, + dest="interaction", + help='Interaction style to use, can be "trackball" or "terrain".', + ) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.low_res_head: + if options.high_res_head: + raise ValueError( + "Can't specify --high-res-head and --low-res-head at the same time." + ) + head_high_res = False + elif options.high_res_head: + head_high_res = True + else: + head_high_res = None + + # expanduser allows ~ for --subjects-dir + subjects_dir = options.subjects_dir + if subjects_dir is not None: + subjects_dir = op.expanduser(subjects_dir) + trans = options.trans + if trans is not None: + trans = op.expanduser(trans) + import faulthandler + + faulthandler.enable() + mne.gui.coregistration( + inst=options.inst, + subject=options.subject, + subjects_dir=subjects_dir, + head_opacity=options.head_opacity, + head_high_res=head_high_res, + trans=trans, + interaction=options.interaction, + show=True, + block=True, + verbose=options.verbose, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_flash_bem.py b/mne/commands/mne_flash_bem.py new file mode 100644 index 0000000..b6c7a1b --- /dev/null +++ b/mne/commands/mne_flash_bem.py @@ -0,0 +1,194 @@ +"""Create 3-layer BEM model from Flash MRI images. + +Examples +-------- +.. code-block:: console + + $ mne flash_bem --subject=sample + $ mne flash_bem -s sample -n --registered -5 sample/mri/mef05.mgz -3 sample/mri/mef30.mgz + $ mne flash_bem -s sample -n --registered -5 sample/mri/flash/mef05_*.mgz -3 sample/mri/flash/mef30_*.mgz + +Notes +----- +This program assumes that FreeSurfer and MNE are installed and +sourced properly. + +This function extracts the BEM surfaces (outer skull, inner skull, and +outer skin) from multiecho FLASH MRI data with spin angles of 5 and 30 +degrees. The multiecho FLASH data can be input as .mgz or .nii files. +This function assumes that the Freesurfer segmentation of the subject +has been completed. In particular, the T1.mgz and brain.mgz MRI volumes +should be, as usual, in the subject's mri directory. + +""" # noqa E501 + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import mne +from mne.bem import convert_flash_mris, make_flash_bem + + +def _vararg_callback(option, opt_str, value, parser): + assert value is None + del opt_str # required for input but not used + value = [] + + for arg in parser.rargs: + # stop on --foo like options + if arg[:2] == "--" and len(arg) > 2: + break + if arg[:1] == "-" and len(arg) > 1: + break + value.append(arg) + + del parser.rargs[: len(value)] + setattr(parser.values, option.dest, value) + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name", default=None + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-3", + "--flash30", + "--noflash30", + dest="flash30", + action="callback", + callback=_vararg_callback, + help=( + "The 30-degree flip angle data. If no argument do " + "not use flash30. If arguments are given, them as " + "file names." + ), + ) + parser.add_option( + "-5", + "--flash5", + dest="flash5", + action="callback", + callback=_vararg_callback, + help=("Path to the multiecho flash 5 images. Can be one file or one per echo."), + ) + parser.add_option( + "-r", + "--registered", + dest="registered", + action="store_true", + default=False, + help=( + "Set if the Flash MRI images have already " + "been registered with the T1.mgz file." + ), + ) + parser.add_option( + "-n", + "--noconvert", + dest="noconvert", + action="store_true", + default=False, + help=( + "[DEPRECATED] Assume that the Flash MRI images " + "have already been converted to mgz files" + ), + ) + parser.add_option( + "-u", + "--unwarp", + dest="unwarp", + action="store_true", + default=False, + help=( + "Run grad_unwarp with -unwarp " + "option on each of the converted data sets" + ), + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + action="store_true", + default=False, + help="Write over existing .surf files in bem folder", + ) + parser.add_option( + "-v", + "--view", + dest="show", + action="store_true", + help="Show BEM model in 3D for visual inspection", + default=False, + ) + parser.add_option( + "--copy", + dest="copy", + help="Use copies instead of symlinks for surfaces", + action="store_true", + ) + parser.add_option( + "-p", + "--flash-path", + dest="flash_path", + default=None, + help="[DEPRECATED] The directory containing flash5.mgz " + "files (defaults to " + "$SUBJECTS_DIR/$SUBJECT/mri/flash/parameter_maps", + ) + + options, _ = parser.parse_args() + + subject = options.subject + subjects_dir = options.subjects_dir + flash5 = options.flash5 + if flash5 is None or len(flash5) == 0: + flash5 = True + flash30 = options.flash30 + if flash30 is None: + flash30 = True + elif len(flash30) == 0: + flash30 = False + register = not options.registered + unwarp = options.unwarp + overwrite = options.overwrite + show = options.show + copy = options.copy + + if options.subject is None: + parser.print_help() + raise RuntimeError("The subject argument must be set") + + flash5_img = convert_flash_mris( + subject=subject, + subjects_dir=subjects_dir, + flash5=flash5, + flash30=flash30, + unwarp=unwarp, + verbose=True, + ) + make_flash_bem( + subject=subject, + subjects_dir=subjects_dir, + overwrite=overwrite, + show=show, + copy=copy, + register=register, + flash5_img=flash5_img, + verbose=True, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_freeview_bem_surfaces.py b/mne/commands/mne_freeview_bem_surfaces.py new file mode 100644 index 0000000..4edf146 --- /dev/null +++ b/mne/commands/mne_freeview_bem_surfaces.py @@ -0,0 +1,114 @@ +"""View the 3-Layers BEM model using Freeview. + +Examples +-------- +.. code-block:: console + + $ mne freeview_bem_surfaces -s sample + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import os.path as op +import sys + +import mne +from mne.utils import get_subjects_dir, run_subprocess + + +def freeview_bem_surfaces(subject, subjects_dir, method=None): + """View 3-Layers BEM model with Freeview. + + Parameters + ---------- + subject : str + Subject name + subjects_dir : path-like + Directory containing subjects data (Freesurfer SUBJECTS_DIR) + method : str | None + Can be ``'flash'`` or ``'watershed'``, or None to use the ``bem/`` directory + files. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + + if subject is None: + raise ValueError("subject argument is None.") + + subject_dir = op.join(subjects_dir, subject) + + if not op.isdir(subject_dir): + raise ValueError( + f"Wrong path: '{subject_dir}'. Check subjects-dir or subject argument." + ) + + env = os.environ.copy() + env["SUBJECT"] = subject + env["SUBJECTS_DIR"] = subjects_dir + + if "FREESURFER_HOME" not in env: + raise RuntimeError("The FreeSurfer environment needs to be set up.") + + mri_dir = op.join(subject_dir, "mri") + bem_dir = op.join(subject_dir, "bem") + mri = op.join(mri_dir, "T1.mgz") + + if method == "watershed": + bem_dir = op.join(bem_dir, "watershed") + outer_skin = op.join(bem_dir, f"{subject}_outer_skin_surface") + outer_skull = op.join(bem_dir, f"{subject}_outer_skull_surface") + inner_skull = op.join(bem_dir, f"{subject}_inner_skull_surface") + else: + if method == "flash": + bem_dir = op.join(bem_dir, "flash") + outer_skin = op.join(bem_dir, "outer_skin.surf") + outer_skull = op.join(bem_dir, "outer_skull.surf") + inner_skull = op.join(bem_dir, "inner_skull.surf") + + # put together the command + cmd = ["freeview"] + cmd += ["--volume", mri] + cmd += ["--surface", f"{inner_skull}:color=red:edgecolor=red"] + cmd += ["--surface", f"{outer_skull}:color=yellow:edgecolor=yellow"] + cmd += ["--surface", f"{outer_skin}:color=255,170,127:edgecolor=255,170,127"] + + run_subprocess(cmd, env=env, stdout=sys.stdout) + print("[done]") + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + subject = os.environ.get("SUBJECT") + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name", default=subject + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + ) + parser.add_option( + "-m", + "--method", + dest="method", + help="Method used to generate the BEM model. Can be flash or watershed.", + ) + + options, args = parser.parse_args() + + subject = options.subject + subjects_dir = options.subjects_dir + method = options.method + + freeview_bem_surfaces(subject, subjects_dir, method) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_kit2fiff.py b/mne/commands/mne_kit2fiff.py new file mode 100644 index 0000000..a6874fe --- /dev/null +++ b/mne/commands/mne_kit2fiff.py @@ -0,0 +1,114 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Import KIT / NYU data to fif file. + +Examples +-------- +.. code-block:: console + + $ mne kit2fiff --input input.sqd --output output.fif + +Use without arguments to invoke GUI: + +.. code-block:: console + + $ mne kt2fiff + +""" + +import sys + +import mne +from mne.io import read_raw_kit + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "--input", dest="input_fname", help="Input data file name", metavar="filename" + ) + parser.add_option( + "--mrk", dest="mrk_fname", help="MEG Marker file name", metavar="filename" + ) + parser.add_option( + "--elp", dest="elp_fname", help="Headshape points file name", metavar="filename" + ) + parser.add_option( + "--hsp", dest="hsp_fname", help="Headshape file name", metavar="filename" + ) + parser.add_option( + "--stim", + dest="stim", + help="Colon Separated Stimulus Trigger Channels", + metavar="chs", + ) + parser.add_option("--slope", dest="slope", help="Slope direction", metavar="slope") + parser.add_option( + "--stimthresh", + dest="stimthresh", + default=1, + help="Threshold value for trigger channels", + metavar="value", + ) + parser.add_option( + "--output", + dest="out_fname", + help="Name of the resulting fiff file", + metavar="filename", + ) + parser.add_option( + "--debug", + dest="debug", + action="store_true", + default=False, + help="Set logging level for terminal output to debug", + ) + + options, args = parser.parse_args() + + if options.debug: + mne.set_log_level("debug") + + input_fname = options.input_fname + if input_fname is None: + try: + from mne_kit_gui import kit2fiff # noqa + except ImportError: + raise ImportError( + "The mne-kit-gui package is required, install it using conda or pip" + ) from None + kit2fiff() + sys.exit(0) + + hsp_fname = options.hsp_fname + elp_fname = options.elp_fname + mrk_fname = options.mrk_fname + stim = options.stim + slope = options.slope + stimthresh = options.stimthresh + out_fname = options.out_fname + + if isinstance(stim, str): + stim = map(int, stim.split(":")) + + raw = read_raw_kit( + input_fname=input_fname, + mrk=mrk_fname, + elp=elp_fname, + hsp=hsp_fname, + stim=stim, + slope=slope, + stimthresh=stimthresh, + ) + + raw.save(out_fname) + raw.close() + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_make_scalp_surfaces.py b/mne/commands/mne_make_scalp_surfaces.py new file mode 100644 index 0000000..5b7d020 --- /dev/null +++ b/mne/commands/mne_make_scalp_surfaces.py @@ -0,0 +1,97 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Create high-resolution head surfaces for coordinate alignment. + +Examples +-------- +.. code-block:: console + + $ mne make_scalp_surfaces --overwrite --subject sample + +""" + +import os +import sys + +import mne +from mne.bem import make_scalp_surfaces + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + subjects_dir = mne.get_config("SUBJECTS_DIR") + + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + action="store_true", + help="Overwrite previously computed surface", + ) + parser.add_option( + "-s", "--subject", dest="subject", help="The name of the subject", type="str" + ) + parser.add_option( + "-m", + "--mri", + dest="mri", + type="str", + default="T1.mgz", + help="The MRI file to process using mkheadsurf.", + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + help="Force creation of the surface even if it has " + "some topological defects.", + ) + parser.add_option( + "-t", + "--threshold", + dest="threshold", + type="int", + default=20, + help="Threshold value to use with the MRI.", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=subjects_dir, + ) + parser.add_option( + "-n", + "--no-decimate", + dest="no_decimate", + help="Disable medium and sparse decimations (dense only)", + action="store_true", + ) + _add_verbose_flag(parser) + options, args = parser.parse_args() + + subject = vars(options).get("subject", os.getenv("SUBJECT")) + subjects_dir = options.subjects_dir + if subject is None or subjects_dir is None: + parser.print_help() + sys.exit(1) + make_scalp_surfaces( + subject=subject, + subjects_dir=subjects_dir, + force=options.force, + overwrite=options.overwrite, + no_decimate=options.no_decimate, + threshold=options.threshold, + mri=options.mri, + verbose=options.verbose, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_prepare_bem_model.py b/mne/commands/mne_prepare_bem_model.py new file mode 100644 index 0000000..3a830e5 --- /dev/null +++ b/mne/commands/mne_prepare_bem_model.py @@ -0,0 +1,66 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Create a BEM solution using the linear collocation approach. + +Examples +-------- +.. code-block:: console + + $ mne prepare_bem_model --bem sample-5120-5120-5120-bem.fif + +""" + +import os +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "--bem", + dest="bem_fname", + help="The name of the file containing the " + "triangulations of the BEM surfaces and the " + "conductivities of the compartments. The standard " + "ending for this file is -bem.fif.", + metavar="FILE", + ) + parser.add_option( + "--sol", + dest="bem_sol_fname", + help="The name of the resulting file containing BEM " + "solution (geometry matrix). It uses the linear " + "collocation approach. The file should end with " + "-bem-sof.fif.", + metavar="FILE", + default=None, + ) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + bem_fname = options.bem_fname + bem_sol_fname = options.bem_sol_fname + verbose = True if options.verbose is not None else False + + if bem_fname is None: + parser.print_help() + sys.exit(1) + + if bem_sol_fname is None: + base, _ = os.path.splitext(bem_fname) + bem_sol_fname = base + "-sol.fif" + + bem_model = mne.read_bem_surfaces(bem_fname, patch_stats=False, verbose=verbose) + bem_solution = mne.make_bem_solution(bem_model, verbose=verbose) + mne.write_bem_solution(bem_sol_fname, bem_solution) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_report.py b/mne/commands/mne_report.py new file mode 100644 index 0000000..eec37c4 --- /dev/null +++ b/mne/commands/mne_report.py @@ -0,0 +1,200 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +r"""Create mne report for a folder. + +Examples +-------- +Before getting started with ``mne report``, make sure the files you want to +render follow the filename conventions defined by MNE: + +.. highlight:: console + +.. cssclass:: table-bordered +.. rst-class:: midvalign + +============ ============================================================== +Data object Filename convention (ends with) +============ ============================================================== +raw -raw.fif(.gz), -raw_sss.fif(.gz), -raw_tsss.fif(.gz), + _meg.fif(.gz), _eeg.fif(.gz), _ieeg.fif(.gz) +events -eve.fif(.gz) +epochs -epo.fif(.gz) +evoked -ave.fif(.gz) +covariance -cov.fif(.gz) +trans -trans.fif(.gz) +forward -fwd.fif(.gz) +inverse -inv.fif(.gz) +============ ============================================================== + +To generate a barebones report from all the \*.fif files in the sample +dataset, invoke the following command in a system (e.g., Bash) shell:: + + $ mne report --path MNE-sample-data/ --verbose + +On successful creation of the report, it will open the HTML in a new tab in +the browser. To disable this, use the ``--no-browser`` option. + +TO generate a report for a single subject, give the ``SUBJECT`` name and +the ``SUBJECTS_DIR`` and this will generate the MRI slices (with BEM +contours overlaid on top if available):: + + $ mne report --path MNE-sample-data/ --subject sample --subjects-dir \ + MNE-sample-data/subjects --verbose + +To properly render ``trans`` and ``covariance`` files, add the measurement +information:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --subject sample --subjects-dir MNE-sample-data/subjects --verbose + +To render whitened ``evoked`` files with baseline correction, add the noise +covariance file:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --cov MNE-sample-data/MEG/sample/sample_audvis-cov.fif --bmax 0 \ + --subject sample --subjects-dir MNE-sample-data/subjects --verbose + +To generate the report in parallel:: + + $ mne report --path MNE-sample-data/ \ + --info MNE-sample-data/MEG/sample/sample_audvis-ave.fif \ + --subject sample --subjects-dir MNE-sample-data/subjects \ + --verbose --jobs 6 + +For help on all the available options, do:: + + $ mne report --help +""" + +import sys +import time + +import mne +from mne.report import Report +from mne.utils import logger, verbose + + +@verbose +def log_elapsed(t, verbose=None): + """Log elapsed time.""" + logger.info(f"Report complete in {round(t, 1)} seconds") + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-p", + "--path", + dest="path", + help="Path to folder who MNE-Report must be created", + ) + parser.add_option( + "-i", + "--info", + dest="info_fname", + help="File from which info dictionary is to be read", + metavar="FILE", + ) + parser.add_option( + "-c", + "--cov", + dest="cov_fname", + help="File from which noise covariance is to be read", + metavar="FILE", + ) + parser.add_option( + "--bmin", + dest="bmin", + help="Time at which baseline correction starts for evokeds", + default=None, + ) + parser.add_option( + "--bmax", + dest="bmax", + help="Time at which baseline correction stops for evokeds", + default=None, + ) + parser.add_option( + "-d", "--subjects-dir", dest="subjects_dir", help="The subjects directory" + ) + parser.add_option("-s", "--subject", dest="subject", help="The subject name") + parser.add_option( + "--no-browser", + dest="no_browser", + action="store_false", + help="Do not open MNE-Report in browser", + ) + parser.add_option( + "--overwrite", + dest="overwrite", + action="store_false", + help="Overwrite html report if it already exists", + ) + parser.add_option( + "-j", "--jobs", dest="n_jobs", help="Number of jobs to run in parallel" + ) + parser.add_option( + "-m", + "--mri-decim", + type="int", + dest="mri_decim", + default=2, + help="Integer factor used to decimate BEM plots", + ) + parser.add_option( + "--image-format", + type="str", + dest="image_format", + default="png", + help="Image format to use (can be 'png' or 'svg')", + ) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + path = options.path + if path is None: + parser.print_help() + sys.exit(1) + info_fname = options.info_fname + cov_fname = options.cov_fname + subjects_dir = options.subjects_dir + subject = options.subject + image_format = options.image_format + mri_decim = int(options.mri_decim) + verbose = True if options.verbose is not None else False + open_browser = False if options.no_browser is not None else True + overwrite = True if options.overwrite is not None else False + n_jobs = int(options.n_jobs) if options.n_jobs is not None else 1 + + bmin = float(options.bmin) if options.bmin is not None else None + bmax = float(options.bmax) if options.bmax is not None else None + # XXX: this means (None, None) cannot be specified through command line + if bmin is None and bmax is None: + baseline = None + else: + baseline = (bmin, bmax) + + t0 = time.time() + report = Report( + info_fname, + subjects_dir=subjects_dir, + subject=subject, + baseline=baseline, + cov_fname=cov_fname, + verbose=verbose, + image_format=image_format, + ) + report.parse_folder(path, verbose=verbose, n_jobs=n_jobs, mri_decim=mri_decim) + log_elapsed(time.time() - t0, verbose=verbose) + report.save(open_browser=open_browser, overwrite=overwrite) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_setup_forward_model.py b/mne/commands/mne_setup_forward_model.py new file mode 100644 index 0000000..29e8616 --- /dev/null +++ b/mne/commands/mne_setup_forward_model.py @@ -0,0 +1,150 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Create a BEM model for a subject. + +Examples +-------- +.. code-block:: console + + $ mne setup_forward_model -s 'sample' + +""" + +import os +import sys + +import mne +from mne.utils import get_subjects_dir, warn + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "--model", + dest="model", + help="Output file name. Use a name /-bem.fif", + default=None, + type="string", + ) + parser.add_option( + "--ico", + dest="ico", + help="The surface ico downsampling to use, e.g. " + " 5=20484, 4=5120, 3=1280. If None, no subsampling" + " is applied.", + default=None, + type="int", + ) + parser.add_option( + "--brainc", + dest="brainc", + help="Defines the brain compartment conductivity. " + "The default value is 0.3 S/m.", + default=0.3, + type="float", + ) + parser.add_option( + "--skullc", + dest="skullc", + help="Defines the skull compartment conductivity. " + "The default value is 0.006 S/m.", + default=None, + type="float", + ) + parser.add_option( + "--scalpc", + dest="scalpc", + help="Defines the scalp compartment conductivity. " + "The default value is 0.3 S/m.", + default=None, + type="float", + ) + parser.add_option( + "--homog", + dest="homog", + help="Use a single compartment model (brain only) " + "instead a three layer one (scalp, skull, and " + " brain). If this flag is specified, the options " + "--skullc and --scalpc are irrelevant.", + default=None, + action="store_true", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + _add_verbose_flag(parser) + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + fname = options.model + subjects_dir = options.subjects_dir + ico = options.ico + brainc = options.brainc + skullc = options.skullc + scalpc = options.scalpc + homog = True if options.homog is not None else False + verbose = True if options.verbose is not None else False + # Parse conductivity option + if homog is True: + if skullc is not None: + warn( + "Trying to set the skull conductivity for a single layer " + "model. To use a 3 layer model, do not set the --homog flag." + ) + if scalpc is not None: + warn( + "Trying to set the scalp conductivity for a single layer " + "model. To use a 3 layer model, do not set the --homog flag." + ) + # Single layer + conductivity = [brainc] + else: + if skullc is None: + skullc = 0.006 + if scalpc is None: + scalpc = 0.3 + conductivity = [brainc, skullc, scalpc] + # Create source space + bem_model = mne.make_bem_model( + subject, + ico=ico, + conductivity=conductivity, + subjects_dir=subjects_dir, + verbose=verbose, + ) + # Generate filename + if fname is None: + n_faces = list(str(len(surface["tris"])) for surface in bem_model) + fname = subject + "-" + "-".join(n_faces) + "-bem.fif" + else: + if not (fname.endswith("-bem.fif") or fname.endswith("_bem.fif")): + fname = fname + "-bem.fif" + # Save to subject's directory + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + fname = subjects_dir / subject / "bem" / fname + # Save source space to file + mne.write_bem_surfaces(fname, bem_model) + # Compute the solution + sol_fname = os.path.splitext(str(fname))[0] + "-sol.fif" + bem_sol = mne.make_bem_solution(bem_model, verbose=verbose) + mne.write_bem_solution(sol_fname, bem_sol) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_setup_source_space.py b/mne/commands/mne_setup_source_space.py new file mode 100644 index 0000000..e536a59 --- /dev/null +++ b/mne/commands/mne_setup_source_space.py @@ -0,0 +1,183 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Set up bilateral hemisphere surface-based source space with subsampling. + +Examples +-------- +.. code-block:: console + + $ mne setup_source_space --subject sample + + + .. note : Only one of --ico, --oct or --spacing options can be set at the same + time. Default to oct6. + +""" + +import sys + +import mne +from mne.utils import _check_option + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "--src", + dest="fname", + help="Output file name. Use a name /-src.fif", + metavar="FILE", + default=None, + ) + parser.add_option( + "--morph", + dest="subject_to", + help="morph the source space to this subject", + default=None, + ) + parser.add_option( + "--surf", + dest="surface", + help="The surface to use. (default to white)", + default="white", + type="string", + ) + parser.add_option( + "--spacing", + dest="spacing", + help="Specifies the approximate grid spacing of the " + "source space in mm. (default to 7mm)", + default=None, + type="int", + ) + parser.add_option( + "--ico", + dest="ico", + help="use the recursively subdivided icosahedron " + "to create the source space.", + default=None, + type="int", + ) + parser.add_option( + "--oct", + dest="oct", + help="use the recursively subdivided octahedron to create the source space.", + default=None, + type="int", + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-n", + "--n-jobs", + dest="n_jobs", + help="The number of jobs to run in parallel " + "(default 1). Requires the joblib package. " + "Will use at most 2 jobs" + " (one for each hemisphere).", + default=1, + type="int", + ) + parser.add_option( + "--add-dist", + dest="add_dist", + help='Add distances. Can be "True", "False", or "patch" ' + "to only compute cortical patch statistics (like the --cps option in MNE-C)", + default="True", + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + help="to write over existing files", + default=None, + action="store_true", + ) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + subject_to = options.subject_to + fname = options.fname + subjects_dir = options.subjects_dir + spacing = options.spacing + ico = options.ico + oct_ = options.oct + surface = options.surface + n_jobs = options.n_jobs + add_dist = options.add_dist + _check_option("add_dist", add_dist, ("True", "False", "patch")) + add_dist = {"True": True, "False": False, "patch": "patch"}[add_dist] + verbose = True if options.verbose is not None else False + overwrite = True if options.overwrite is not None else False + + # Parse source spacing option + spacing_options = [ico, oct_, spacing] + n_options = len([x for x in spacing_options if x is not None]) + use_spacing = "oct6" + if n_options > 1: + raise ValueError("Only one spacing option can be set at the same time") + elif n_options == 0: + # Default to oct6 + pass + elif n_options == 1: + if ico is not None: + use_spacing = "ico" + str(ico) + elif oct_ is not None: + use_spacing = "oct" + str(oct_) + elif spacing is not None: + use_spacing = spacing + del ico, oct_, spacing + # Generate filename + if fname is None: + if subject_to is None: + fname = subject + "-" + str(use_spacing) + "-src.fif" + else: + fname = subject_to + "-" + subject + "-" + str(use_spacing) + "-src.fif" + else: + if not (fname.endswith("_src.fif") or fname.endswith("-src.fif")): + fname = fname + "-src.fif" + # Create source space + src = mne.setup_source_space( + subject=subject, + spacing=use_spacing, + surface=surface, + subjects_dir=subjects_dir, + n_jobs=n_jobs, + add_dist=add_dist, + verbose=verbose, + ) + # Morph source space if --morph is set + if subject_to is not None: + src = mne.morph_source_spaces( + src, + subject_to=subject_to, + subjects_dir=subjects_dir, + surf=surface, + verbose=verbose, + ) + + # Save source space to file + src.save(fname=fname, overwrite=overwrite) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_show_fiff.py b/mne/commands/mne_show_fiff.py new file mode 100644 index 0000000..2f9e73d --- /dev/null +++ b/mne/commands/mne_show_fiff.py @@ -0,0 +1,54 @@ +"""Show the contents of a FIFF file. + +Examples +-------- +.. code-block:: console + + $ mne show_fiff test_raw.fif + + +To see only tag 102: + +.. code-block:: console + + $ mne show_fiff test_raw.fif --tag=102 + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser(__file__, usage="mne show_fiff ") + parser.add_option( + "-t", + "--tag", + dest="tag", + help="provide information about this tag", + metavar="TAG", + ) + parser.add_option( + "-b", + "--bytes", + dest="show_bytes", + help="show the byte offset of each tag", + action="store_true", + ) + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + msg = mne.io.show_fiff( + args[0], tag=options.tag, show_bytes=options.show_bytes + ).strip() + print(msg) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_show_info.py b/mne/commands/mne_show_info.py new file mode 100644 index 0000000..b944801 --- /dev/null +++ b/mne/commands/mne_show_info.py @@ -0,0 +1,38 @@ +"""Show measurement info from .fif file. + +Examples +-------- +.. code-block:: console + + $ mne show_info sample_audvis_raw.fif + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser(__file__, usage="mne show_info ") + options, args = parser.parse_args() + if len(args) != 1: + parser.print_help() + sys.exit(1) + + fname = args[0] + + if not fname.endswith(".fif"): + raise ValueError(f"{fname} does not seem to be a .fif file.") + + info = mne.io.read_info(fname) + print(f"File : {fname}") + print(info) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_surf2bem.py b/mne/commands/mne_surf2bem.py new file mode 100644 index 0000000..5d64269 --- /dev/null +++ b/mne/commands/mne_surf2bem.py @@ -0,0 +1,53 @@ +r"""Convert surface to BEM FIF file. + +Examples +-------- +.. code-block:: console + + $ mne surf2bem --surf ${SUBJECTS_DIR}/${SUBJECT}/surf/lh.seghead \ + --fif ${SUBJECTS_DIR}/${SUBJECT}/bem/${SUBJECT}-head.fif \ + --id=4 + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-s", "--surf", dest="surf", help="Surface in Freesurfer format", metavar="FILE" + ) + parser.add_option( + "-f", "--fif", dest="fif", help="FIF file produced", metavar="FILE" + ) + parser.add_option( + "-i", + "--id", + dest="id", + default=4, + help=("Surface Id (e.g. 4 for head surface)"), + ) + + options, args = parser.parse_args() + + if options.surf is None: + parser.print_help() + sys.exit(1) + + print(f"Converting {options.surf} to BEM FIF file.") + surf = mne.bem._surfaces_to_bem([options.surf], [int(options.id)], sigmas=[1]) + mne.write_bem_surfaces(options.fif, surf) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_sys_info.py b/mne/commands/mne_sys_info.py new file mode 100644 index 0000000..70c56e4 --- /dev/null +++ b/mne/commands/mne_sys_info.py @@ -0,0 +1,66 @@ +"""Show system information. + +Examples +-------- +.. code-block:: console + + $ mne sys_info + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import sys + +import mne + + +def run(): + """Run command.""" + parser = mne.commands.utils.get_optparser(__file__, usage="mne sys_info") + parser.add_option( + "-p", + "--show-paths", + dest="show_paths", + help="Show module paths", + action="store_true", + ) + parser.add_option( + "-d", + "--developer", + dest="developer", + help="Show additional developer module information", + action="store_true", + ) + parser.add_option( + "-a", + "--ascii", + dest="unicode", + help="Use ASCII instead of unicode symbols", + action="store_false", + default=True, + ) + parser.add_option( + "--no-check-version", + dest="check_version", + help="Disable MNE-Python remote version checking.", + action="store_false", + default=True, + ) + options, args = parser.parse_args() + dependencies = "developer" if options.developer else "user" + if len(args) != 0: + parser.print_help() + sys.exit(1) + + mne.sys_info( + show_paths=options.show_paths, + dependencies=dependencies, + unicode=options.unicode, + check_version=options.check_version, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_watershed_bem.py b/mne/commands/mne_watershed_bem.py new file mode 100644 index 0000000..4f872b9 --- /dev/null +++ b/mne/commands/mne_watershed_bem.py @@ -0,0 +1,134 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Create BEM surfaces using the watershed algorithm included with FreeSurfer. + +Examples +-------- +.. code-block:: console + + $ mne watershed_bem -s sample + +""" + +import sys + +import mne +from mne.bem import make_watershed_bem +from mne.utils import _check_option + + +def run(): + """Run command.""" + from mne.commands.utils import _add_verbose_flag, get_optparser + + parser = get_optparser(__file__) + + parser.add_option( + "-s", "--subject", dest="subject", help="Subject name (required)", default=None + ) + parser.add_option( + "-d", + "--subjects-dir", + dest="subjects_dir", + help="Subjects directory", + default=None, + ) + parser.add_option( + "-o", + "--overwrite", + dest="overwrite", + help="Write over existing files", + action="store_true", + ) + parser.add_option( + "-v", "--volume", dest="volume", help="Defaults to T1", default="T1" + ) + parser.add_option( + "-a", + "--atlas", + dest="atlas", + help="Specify the --atlas option for mri_watershed", + default=False, + action="store_true", + ) + parser.add_option( + "-g", + "--gcaatlas", + dest="gcaatlas", + help="Specify the --brain_atlas option for mri_watershed", + default=False, + action="store_true", + ) + parser.add_option( + "-p", + "--preflood", + dest="preflood", + help="Change the preflood height", + default=None, + ) + parser.add_option( + "--copy", + dest="copy", + help="Use copies instead of symlinks for surfaces", + action="store_true", + ) + parser.add_option( + "-t", + "--T1", + dest="T1", + help="Whether or not to pass the -T1 flag " + "(can be true, false, 0, or 1). " + "By default it takes the same value as gcaatlas.", + default=None, + ) + parser.add_option( + "-b", + "--brainmask", + dest="brainmask", + help="The filename for the brainmask output file " + "relative to the " + "$SUBJECTS_DIR/$SUBJECT/bem/watershed/ directory.", + default="ws", + ) + _add_verbose_flag(parser) + + options, args = parser.parse_args() + + if options.subject is None: + parser.print_help() + sys.exit(1) + + subject = options.subject + subjects_dir = options.subjects_dir + overwrite = options.overwrite + volume = options.volume + atlas = options.atlas + gcaatlas = options.gcaatlas + preflood = options.preflood + copy = options.copy + brainmask = options.brainmask + T1 = options.T1 + if T1 is not None: + T1 = T1.lower() + _check_option("--T1", T1, ("true", "false", "0", "1")) + T1 = T1 in ("true", "1") + verbose = options.verbose + + make_watershed_bem( + subject=subject, + subjects_dir=subjects_dir, + overwrite=overwrite, + volume=volume, + atlas=atlas, + gcaatlas=gcaatlas, + preflood=preflood, + copy=copy, + T1=T1, + brainmask=brainmask, + verbose=verbose, + ) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/mne_what.py b/mne/commands/mne_what.py new file mode 100644 index 0000000..f6ee168 --- /dev/null +++ b/mne/commands/mne_what.py @@ -0,0 +1,28 @@ +r"""Check type of FIF file. + +Examples +-------- +.. code-block:: console + + $ mne what sample_audvis_raw.fif + raw +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import mne + + +def run(): + """Run command.""" + from mne.commands.utils import get_optparser + + parser = get_optparser(__file__, usage="usage: %prog fname [fname2 ...]") + options, args = parser.parse_args() + for arg in args: + print(mne.what(arg)) + + +mne.utils.run_command_if_main() diff --git a/mne/commands/utils.py b/mne/commands/utils.py new file mode 100644 index 0000000..beb3a4e --- /dev/null +++ b/mne/commands/utils.py @@ -0,0 +1,109 @@ +"""Some utility functions for commands (e.g., for cmdline handling).""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import glob +import importlib +import os +import os.path as op +import sys +from optparse import OptionParser + +import mne + + +def _add_verbose_flag(parser): + parser.add_option( + "--verbose", + dest="verbose", + help="Enable verbose mode (printing of log messages).", + default=None, + action="store_true", + ) + + +def load_module(name, path): + """Load module from .py/.pyc file. + + Parameters + ---------- + name : str + Name of the module. + path : str + Path to .py/.pyc file. + + Returns + ------- + mod : module + Imported module. + + """ + from importlib.util import module_from_spec, spec_from_file_location + + spec = spec_from_file_location(name, path) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + +def get_optparser(cmdpath, usage=None, prog_prefix="mne", version=None): + """Create OptionParser with cmd specific settings (e.g., prog value).""" + # Fetch description + mod = load_module("__temp", cmdpath) + if mod.__doc__: + doc, description, epilog = mod.__doc__, None, None + + doc_lines = doc.split("\n") + description = doc_lines[0] + if len(doc_lines) > 1: + epilog = "\n".join(doc_lines[1:]) + + # Get the name of the command + command = os.path.basename(cmdpath) + command, _ = os.path.splitext(command) + command = command[len(prog_prefix) + 1 :] # +1 is for `_` character + + # Set prog + prog = prog_prefix + f" {command}" + + # Set version + if version is None: + version = mne.__version__ + + # monkey patch OptionParser to not wrap epilog + OptionParser.format_epilog = lambda self, formatter: self.epilog + parser = OptionParser( + prog=prog, version=version, description=description, epilog=epilog, usage=usage + ) + + return parser + + +def main(): + """Entrypoint for mne usage.""" + mne_bin_dir = op.dirname(op.dirname(__file__)) + valid_commands = sorted(glob.glob(op.join(mne_bin_dir, "commands", "mne_*.py"))) + valid_commands = [c.split(op.sep)[-1][4:-3] for c in valid_commands] + + def print_help(): # noqa + print("Usage : mne command options\n") + print("Accepted commands :\n") + for c in valid_commands: + print(f"\t- {c}") + print("\nExample : mne browse_raw --raw sample_audvis_raw.fif") + print("\nGetting help example : mne compute_proj_eog -h") + + if len(sys.argv) == 1 or "help" in sys.argv[1] or "-h" in sys.argv[1]: + print_help() + elif sys.argv[1] == "--version": + print(f"MNE {mne.__version__}") + elif sys.argv[1] not in valid_commands: + print(f'Invalid command: "{sys.argv[1]}"\n') + print_help() + else: + cmd = sys.argv[1] + cmd = importlib.import_module(f".mne_{cmd}", "mne.commands") + sys.argv = sys.argv[1:] + cmd.run() diff --git a/mne/conftest.py b/mne/conftest.py new file mode 100644 index 0000000..d18b440 --- /dev/null +++ b/mne/conftest.py @@ -0,0 +1,1239 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import gc +import inspect +import os +import os.path as op +import shutil +import sys +import warnings +from collections import defaultdict +from contextlib import contextmanager +from pathlib import Path +from textwrap import dedent +from unittest import mock + +import numpy as np +import pytest +from pytest import StashKey + +import mne +from mne import Epochs, pick_types, read_events +from mne.channels import read_layout +from mne.coreg import create_default_subject +from mne.datasets import testing +from mne.fixes import _compare_version, has_numba +from mne.io import read_raw_ctf, read_raw_fif, read_raw_nirx, read_raw_snirf +from mne.stats import cluster_level +from mne.utils import ( + Bunch, + _assert_no_instances, + _check_qt_version, + _pl, + _record_warnings, + _TempDir, + numerics, +) + +# data from sample dataset +from mne.viz._figure import use_browser_backend +from mne.viz.backends._utils import _init_mne_qtapp + +test_path = testing.data_path(download=False) +s_path = op.join(test_path, "MEG", "sample") +fname_evoked = op.join(s_path, "sample_audvis_trunc-ave.fif") +fname_cov = op.join(s_path, "sample_audvis_trunc-cov.fif") +fname_fwd = op.join(s_path, "sample_audvis_trunc-meg-eeg-oct-4-fwd.fif") +fname_fwd_full = op.join(s_path, "sample_audvis_trunc-meg-eeg-oct-6-fwd.fif") +bem_path = op.join(test_path, "subjects", "sample", "bem") +fname_bem = op.join(bem_path, "sample-1280-bem.fif") +fname_aseg = op.join(test_path, "subjects", "sample", "mri", "aseg.mgz") +subjects_dir = op.join(test_path, "subjects") +fname_src = op.join(bem_path, "sample-oct-4-src.fif") +fname_trans = op.join(s_path, "sample_audvis_trunc-trans.fif") + +ctf_dir = op.join(test_path, "CTF") +fname_ctf_continuous = op.join(ctf_dir, "testdata_ctf.ds") + +nirx_path = test_path / "NIRx" +snirf_path = test_path / "SNIRF" +nirsport2 = nirx_path / "nirsport_v2" / "aurora_recording _w_short_and_acc" +nirsport2_snirf = snirf_path / "NIRx" / "NIRSport2" / "1.0.3" / "2021-05-05_001.snirf" +nirsport2_2021_9 = nirx_path / "nirsport_v2" / "aurora_2021_9" +nirsport2_20219_snirf = ( + snirf_path / "NIRx" / "NIRSport2" / "2021.9" / "2021-10-01_002.snirf" +) + +# data from mne.io.tests.data +base_dir = op.join(op.dirname(__file__), "io", "tests", "data") +fname_raw_io = op.join(base_dir, "test_raw.fif") +fname_event_io = op.join(base_dir, "test-eve.fif") +fname_cov_io = op.join(base_dir, "test-cov.fif") +fname_evoked_io = op.join(base_dir, "test-ave.fif") +event_id, tmin, tmax = 1, -0.1, 1.0 +vv_layout = read_layout("Vectorview-all") + +collect_ignore = ["export/_brainvision.py", "export/_eeglab.py", "export/_edf.py"] + + +def pytest_configure(config): + """Configure pytest options.""" + # Markers + for marker in ( + "slowtest", + "ultraslowtest", + "pgtest", + "pvtest", + "allow_unclosed", + ): + config.addinivalue_line("markers", marker) + + # Fixtures + for fixture in ( + "matplotlib_config", + "qt_config", + "protect_config", + ): + config.addinivalue_line("usefixtures", fixture) + + # pytest-qt uses PYTEST_QT_API, but let's make it respect qtpy's QT_API + # if present + if os.getenv("PYTEST_QT_API") is None and os.getenv("QT_API") is not None: + os.environ["PYTEST_QT_API"] = os.environ["QT_API"] + + # suppress: + # Debugger warning: It seems that frozen modules are being used, which may + # make the debugger miss breakpoints. Please pass -Xfrozen_modules=off + # to python to disable frozen modules. + if os.getenv("PYDEVD_DISABLE_FILE_VALIDATION") is None: + os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1" + + # https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501 + if "NUMBA_CAPTURED_ERRORS" not in os.environ: + os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style" + + # Warnings + # - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0) + # we should remove them from here. + # - This list should also be considered alongside reset_warnings in + # doc/conf.py. + if os.getenv("MNE_IGNORE_WARNINGS_IN_TESTS", "") not in ("true", "1"): + first_kind = "error" + else: + first_kind = "always" + warning_lines = f" {first_kind}::" + warning_lines += r""" + # matplotlib->traitlets (notebook) + ignore:Passing unrecognized arguments to super.*:DeprecationWarning + # notebook tests + ignore:There is no current event loop:DeprecationWarning + ignore:unclosed tz + ignore:datetime.datetime.utcfromtimestamp.*:DeprecationWarning + # joblib + ignore:ast\.Num is deprecated.*:DeprecationWarning + ignore:Attribute n is deprecated and will be removed in Python 3\.14.*:DeprecationWarning + # numpydoc + ignore:ast\.NameConstant is deprecated and will be removed in Python 3\.14.*:DeprecationWarning + # pooch + ignore:Python 3\.14 will, by default, filter extracted tar archives.*:DeprecationWarning + # pandas + ignore:\n*Pyarrow will become a required dependency of pandas.*:DeprecationWarning + ignore:np\.find_common_type is deprecated.*:DeprecationWarning + # pyvista <-> NumPy 2.0 + ignore:__array_wrap__ must accept context and return_scalar arguments.*:DeprecationWarning + # nibabel <-> NumPy 2.0 + ignore:__array__ implementation doesn't accept a copy.*:DeprecationWarning + # quantities via neo + ignore:The 'copy' argument in Quantity is deprecated.*: + # debugpy uses deprecated matplotlib API + ignore:The (non_)?interactive_bk attribute was deprecated.*: + # SWIG (via OpenMEEG) + ignore:.*builtin type swigvarlink has no.*:DeprecationWarning + """ # noqa: E501 + for warning_line in warning_lines.split("\n"): + warning_line = warning_line.strip() + if warning_line and not warning_line.startswith("#"): + config.addinivalue_line("filterwarnings", warning_line) + + +# Have to be careful with autouse=True, but this is just an int comparison +# so it shouldn't really add appreciable overhead +@pytest.fixture(autouse=True) +def check_verbose(request): + """Set to the default logging level to ensure it's tested properly.""" + starting_level = mne.utils.logger.level + yield + # ensures that no tests break the global state + try: + assert mne.utils.logger.level == starting_level + except AssertionError: + pytest.fail( + ".".join([request.module.__name__, request.function.__name__]) + + " modifies logger.level" + ) + + +@pytest.fixture(autouse=True) +def close_all(): + """Close all matplotlib plots, regardless of test status.""" + # This adds < 1 µS in local testing, and we have ~2500 tests, so ~2 ms max + import matplotlib.pyplot as plt + + yield + plt.close("all") + + +@pytest.fixture(autouse=True) +def add_mne(doctest_namespace): + """Add mne to the namespace.""" + doctest_namespace["mne"] = mne + + +@pytest.fixture(scope="function") +def verbose_debug(): + """Run a test with debug verbosity.""" + with mne.utils.use_log_level("debug"): + yield + + +@pytest.fixture(scope="session") +def qt_config(): + """Configure the Qt backend for viz tests.""" + os.environ["_MNE_BROWSER_NO_BLOCK"] = "true" + if "_MNE_BROWSER_BACK" not in os.environ: + os.environ["_MNE_BROWSER_BACK"] = "true" + + +@pytest.fixture(scope="session") +def matplotlib_config(): + """Configure matplotlib for viz tests.""" + import matplotlib + from matplotlib import cbook + + # Allow for easy interactive debugging with a call like: + # + # $ MNE_MPL_TESTING_BACKEND=Qt5Agg pytest mne/viz/tests/test_raw.py -k annotation -x --pdb # noqa: E501 + # + try: + want = os.environ["MNE_MPL_TESTING_BACKEND"] + except KeyError: + want = "agg" # don't pop up windows + with warnings.catch_warnings(record=True): # ignore warning + warnings.filterwarnings("ignore") + matplotlib.use(want, force=True) + import matplotlib.pyplot as plt + + assert plt.get_backend() == want + # overwrite some params that can horribly slow down tests that + # users might have changed locally (but should not otherwise affect + # functionality) + plt.ioff() + plt.rcParams["figure.dpi"] = 100 + plt.rcParams["figure.raise_window"] = False + + # Make sure that we always reraise exceptions in handlers + orig = cbook.CallbackRegistry + + class CallbackRegistryReraise(orig): + def __init__(self, exception_handler=None, signals=None): + super().__init__(exception_handler) + + cbook.CallbackRegistry = CallbackRegistryReraise + + +@pytest.fixture(scope="session") +def azure_windows(): + """Determine if running on Azure Windows.""" + return os.getenv( + "AZURE_CI_WINDOWS", "false" + ).lower() == "true" and sys.platform.startswith("win") + + +@pytest.fixture(scope="function") +def raw_orig(): + """Get raw data without any change to it from mne.io.tests.data.""" + raw = read_raw_fif(fname_raw_io, preload=True) + return raw + + +@pytest.fixture(scope="function") +def raw(): + """ + Get raw data and pick channels to reduce load for testing. + + (from mne.io.tests.data) + """ + raw = read_raw_fif(fname_raw_io, preload=True) + # Throws a warning about a changed unit. + with pytest.warns(RuntimeWarning, match="unit"): + raw.set_channel_types({raw.ch_names[0]: "ias"}) + raw.pick(raw.ch_names[:9]) + raw.info.normalize_proj() # Fix projectors after subselection + return raw + + +@pytest.fixture(scope="function") +def raw_ctf(): + """Get ctf raw data from mne.io.tests.data.""" + raw_ctf = read_raw_ctf(fname_ctf_continuous, preload=True) + return raw_ctf + + +@pytest.fixture(scope="function") +def raw_spectrum(raw): + """Get raw with power spectral density computed from mne.io.tests.data.""" + return raw.compute_psd() + + +@pytest.fixture(scope="function") +def events(): + """Get events from mne.io.tests.data.""" + return read_events(fname_event_io) + + +def _get_epochs(stop=5, meg=True, eeg=False, n_chan=20): + """Get epochs.""" + raw = read_raw_fif(fname_raw_io) + events = read_events(fname_event_io) + picks = pick_types( + raw.info, meg=meg, eeg=eeg, stim=False, ecg=False, eog=False, exclude="bads" + ) + # Use a subset of channels for plotting speed + picks = np.round(np.linspace(0, len(picks) + 1, n_chan)).astype(int) + with pytest.warns(RuntimeWarning, match="projection"): + epochs = Epochs( + raw, + events[:stop], + event_id, + tmin, + tmax, + picks=picks, + proj=False, + preload=False, + ) + epochs.info.normalize_proj() # avoid warnings + return epochs + + +@pytest.fixture() +def epochs(): + """ + Get minimal, pre-loaded epochs data suitable for most tests. + + (from mne.io.tests.data) + """ + return _get_epochs().load_data() + + +@pytest.fixture() +def epochs_unloaded(): + """Get minimal, unloaded epochs data from mne.io.tests.data.""" + return _get_epochs() + + +@pytest.fixture() +def epochs_full(): + """Get full, preloaded epochs from mne.io.tests.data.""" + return _get_epochs(None).load_data() + + +@pytest.fixture() +def epochs_spectrum(): + """Get epochs with power spectral density computed from mne.io.tests.data.""" + return _get_epochs().load_data().compute_psd() + + +@pytest.fixture() +def epochs_tfr(): + """Get an EpochsTFR computed from mne.io.tests.data.""" + epochs = _get_epochs().load_data() + return epochs.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def average_tfr(epochs_tfr): + """Get an AverageTFR computed by averaging an EpochsTFR (this is small & fast).""" + return epochs_tfr.average() + + +@pytest.fixture() +def full_average_tfr(full_evoked): + """Get an AverageTFR computed from Evoked. + + This is slower than the `average_tfr` fixture, but a few TFR.plot_* tests need it. + """ + return full_evoked.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def raw_tfr(raw): + """Get a RawTFR computed from mne.io.tests.data.""" + return raw.compute_tfr(method="morlet", freqs=np.linspace(20, 40, num=5)) + + +@pytest.fixture() +def epochs_empty(): + """Get empty epochs from mne.io.tests.data.""" + epochs = _get_epochs(meg=True, eeg=True).load_data() + with pytest.warns(RuntimeWarning, match="were dropped"): + epochs.drop_bad(reject={"mag": 1e-20}) + + return epochs + + +@pytest.fixture(scope="session", params=[testing._pytest_param()]) +def _full_evoked(): + # This is session scoped, so be sure not to modify its return value (use + # `full_evoked` fixture instead) + return mne.read_evokeds(fname_evoked, condition="Left Auditory", baseline=(None, 0)) + + +@pytest.fixture(scope="session", params=[testing._pytest_param()]) +def _evoked(_full_evoked): + # This is session scoped, so be sure not to modify its return value (use `evoked` + # fixture instead) + return _full_evoked.copy().crop(0, 0.2) + + +@pytest.fixture() +def evoked(_evoked): + """Get truncated evoked data.""" + return _evoked.copy() + + +@pytest.fixture() +def full_evoked(_full_evoked): + """Get full-duration evoked data (needed for, e.g., testing TFR).""" + return _full_evoked.copy() + + +@pytest.fixture(scope="function", params=[testing._pytest_param()]) +def noise_cov(): + """Get a noise cov from the testing dataset.""" + return mne.read_cov(fname_cov) + + +@pytest.fixture +def noise_cov_io(): + """Get noise-covariance (from mne.io.tests.data).""" + return mne.read_cov(fname_cov_io) + + +@pytest.fixture(scope="function") +def bias_params_free(evoked, noise_cov): + """Provide inputs for free bias functions.""" + fwd = mne.read_forward_solution(fname_fwd) + return _bias_params(evoked, noise_cov, fwd) + + +@pytest.fixture(scope="function") +def bias_params_fixed(evoked, noise_cov): + """Provide inputs for fixed bias functions.""" + fwd = mne.read_forward_solution(fname_fwd) + mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True, copy=False) + return _bias_params(evoked, noise_cov, fwd) + + +def _bias_params(evoked, noise_cov, fwd): + evoked.pick(picks=["meg", "eeg"]) + # restrict to limited set of verts (small src here) and one hemi for speed + vertices = [fwd["src"][0]["vertno"].copy(), []] + stc = mne.SourceEstimate( + np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1 + ) + fwd = mne.forward.restrict_forward_to_stc(fwd, stc) + assert fwd["sol"]["row_names"] == noise_cov["names"] + assert noise_cov["names"] == evoked.ch_names + evoked = mne.EvokedArray(fwd["sol"]["data"].copy(), evoked.info) + data_cov = noise_cov.copy() + data = fwd["sol"]["data"] @ fwd["sol"]["data"].T + data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm) + # This is rank-deficient, so let's make it actually positive semidefinite + # by regularizing a tiny bit + data.flat[:: data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)["data"] + # Do our projection + proj, _, _ = mne._fiff.proj.make_projector(data_cov["projs"], data_cov["names"]) + data = proj @ data @ proj.T + data_cov["data"][:] = data + assert data_cov["data"].shape[0] == len(noise_cov["names"]) + want = np.arange(fwd["sol"]["data"].shape[1]) + if not mne.forward.is_fixed_orient(fwd): + want //= 3 + return evoked, fwd, noise_cov, data_cov, want + + +@pytest.fixture +def garbage_collect(): + """Garbage collect on exit.""" + yield + gc.collect() + + +@pytest.fixture +def mpl_backend(garbage_collect): + """Use for epochs/ica when not implemented with pyqtgraph yet.""" + with use_browser_backend("matplotlib") as backend: + yield backend + backend._close_all() + + +# Skip functions or modules for mne-qt-browser < 0.2.0 +pre_2_0_skip_modules = ["mne.viz.tests.test_epochs", "mne.viz.tests.test_ica"] +pre_2_0_skip_funcs = ["test_plot_raw_white", "test_plot_raw_selection"] + + +def _check_pyqtgraph(request): + # Check Qt + qt_version, api = _check_qt_version(return_api=True) + if (not qt_version) or _compare_version(qt_version, "<", "5.12"): + pytest.skip( + f"Qt API {api} has version {qt_version} but pyqtgraph needs >= 5.12!" + ) + try: + import mne_qt_browser # noqa: F401 + + # Check mne-qt-browser version + lower_2_0 = _compare_version(mne_qt_browser.__version__, "<", "0.2.0") + m_name = request.function.__module__ + f_name = request.function.__name__ + if lower_2_0 and m_name in pre_2_0_skip_modules: + pytest.skip( + f'Test-Module "{m_name}" was skipped for mne-qt-browser < 0.2.0' + ) + elif lower_2_0 and f_name in pre_2_0_skip_funcs: + pytest.skip(f'Test "{f_name}" was skipped for mne-qt-browser < 0.2.0') + except Exception: + pytest.skip("Requires mne_qt_browser") + else: + ver = mne_qt_browser.__version__ + if api != "PyQt5" and _compare_version(ver, "<=", "0.2.6"): + pytest.skip(f"mne_qt_browser {ver} requires PyQt5, API is {api}") + + +@pytest.fixture +def pg_backend(request, garbage_collect): + """Use for pyqtgraph-specific test-functions.""" + _check_pyqtgraph(request) + from mne_qt_browser._pg_figure import MNEQtBrowser + + with use_browser_backend("qt") as backend: + backend._close_all() + yield backend + backend._close_all() + # This shouldn't be necessary, but let's make sure nothing is stale + import mne_qt_browser + + mne_qt_browser._browser_instances.clear() + if not _test_passed(request): + return + _assert_no_instances(MNEQtBrowser, f"Closure of {request.node.name}") + + +@pytest.fixture( + params=[ + "matplotlib", + pytest.param("qt", marks=pytest.mark.pgtest), + ] +) +def browser_backend(request, garbage_collect, monkeypatch): + """Parametrizes the name of the browser backend.""" + backend_name = request.param + if backend_name == "qt": + _check_pyqtgraph(request) + with use_browser_backend(backend_name) as backend: + backend._close_all() + monkeypatch.setenv("MNE_BROWSE_RAW_SIZE", "10,10") + yield backend + backend._close_all() + if backend_name == "qt": + # This shouldn't be necessary, but let's make sure nothing is stale + import mne_qt_browser + + mne_qt_browser._browser_instances.clear() + + +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) +def renderer(request, options_3d, garbage_collect): + """Yield the 3D backends.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) +def renderer_pyvistaqt(request, options_3d, garbage_collect): + """Yield the PyVista backend.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(params=[pytest.param("notebook", marks=pytest.mark.pvtest)]) +def renderer_notebook(request, options_3d): + """Yield the 3D notebook renderer.""" + with _use_backend(request.param, interactive=False) as renderer: + yield renderer + + +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) +def renderer_interactive_pyvistaqt(request, options_3d, qt_windows_closed): + """Yield the interactive PyVista backend.""" + with _use_backend(request.param, interactive=True) as renderer: + yield renderer + + +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) +def renderer_interactive(request, options_3d): + """Yield the interactive 3D backends.""" + with _use_backend(request.param, interactive=True) as renderer: + yield renderer + + +@contextmanager +def _use_backend(backend_name, interactive): + from mne.viz.backends.renderer import _use_test_3d_backend + + _check_skip_backend(backend_name) + with _use_test_3d_backend(backend_name, interactive=interactive): + from mne.viz.backends import renderer + + try: + yield renderer + finally: + renderer.backend._close_all() + + +def _check_skip_backend(name): + from mne.viz.backends._utils import _notebook_vtk_works + from mne.viz.backends.tests._utils import ( + has_imageio_ffmpeg, + has_pyvista, + has_pyvistaqt, + ) + + if not has_pyvista(): + pytest.skip("Test skipped, requires pyvista.") + if not has_imageio_ffmpeg(): + pytest.skip("Test skipped, requires imageio-ffmpeg") + if name == "pyvistaqt": + if not _check_qt_version(): + pytest.skip("Test skipped, requires Qt.") + if not has_pyvistaqt(): + pytest.skip("Test skipped, requires pyvistaqt") + else: + assert name == "notebook", name + if not _notebook_vtk_works(): + pytest.skip("Test skipped, requires working notebook vtk") + + +@pytest.fixture(scope="session") +def pixel_ratio(): + """Get the pixel ratio.""" + from mne.viz.backends.tests._utils import has_pyvista + + # _check_qt_version will init an app for us, so no need for us to do it + if not has_pyvista() or not _check_qt_version(): + return 1.0 + from qtpy.QtCore import Qt + from qtpy.QtWidgets import QMainWindow + + app = _init_mne_qtapp() + app.processEvents() + window = QMainWindow() + window.setAttribute(Qt.WA_DeleteOnClose, True) + ratio = float(window.devicePixelRatio()) + window.close() + return ratio + + +@pytest.fixture(scope="function", params=[testing._pytest_param()]) +def subjects_dir_tmp(tmp_path): + """Copy MNE-testing-data subjects_dir to a temp dir for manipulation.""" + for key in ("sample", "fsaverage"): + shutil.copytree(op.join(subjects_dir, key), str(tmp_path / key)) + return str(tmp_path) + + +@pytest.fixture(params=[testing._pytest_param()]) +def subjects_dir_tmp_few(tmp_path): + """Copy fewer files to a tmp_path.""" + subjects_path = tmp_path / "subjects" + os.mkdir(subjects_path) + # add fsaverage + create_default_subject(subjects_dir=subjects_path, fs_home=test_path, verbose=True) + # add sample (with few files) + sample_path = subjects_path / "sample" + os.makedirs(sample_path / "bem") + for dirname in ("mri", "surf"): + shutil.copytree( + test_path / "subjects" / "sample" / dirname, sample_path / dirname + ) + return subjects_path + + +# Scoping these as session will make things faster, but need to make sure +# not to modify them in-place in the tests, so keep them private +@pytest.fixture(scope="session", params=[testing._pytest_param()]) +def _evoked_cov_sphere(_evoked): + """Compute a small evoked/cov/sphere combo for use with forwards.""" + evoked = _evoked.copy().pick(picks="meg") + evoked.pick(evoked.ch_names[::4]) + assert len(evoked.ch_names) == 77 + cov = mne.read_cov(fname_cov) + sphere = mne.make_sphere_model("auto", "auto", evoked.info) + return evoked, cov, sphere + + +@pytest.fixture(scope="session") +def _fwd_surf(_evoked_cov_sphere): + """Compute a forward for a surface source space.""" + evoked, cov, sphere = _evoked_cov_sphere + src_surf = mne.read_source_spaces(fname_src) + return mne.make_forward_solution( + evoked.info, fname_trans, src_surf, sphere, mindist=5.0 + ) + + +@pytest.fixture(scope="session") +def _fwd_subvolume(_evoked_cov_sphere): + """Compute a forward for a surface source space.""" + pytest.importorskip("nibabel") + evoked, cov, sphere = _evoked_cov_sphere + volume_labels = ["Left-Cerebellum-Cortex", "right-Cerebellum-Cortex"] + with pytest.raises(ValueError, match=r"Did you mean one of \['Right-Cere"): + mne.setup_volume_source_space( + "sample", pos=20.0, volume_label=volume_labels, subjects_dir=subjects_dir + ) + volume_labels[1] = "R" + volume_labels[1][1:] + src_vol = mne.setup_volume_source_space( + "sample", + pos=20.0, + volume_label=volume_labels, + subjects_dir=subjects_dir, + add_interpolator=False, + ) + return mne.make_forward_solution( + evoked.info, fname_trans, src_vol, sphere, mindist=5.0 + ) + + +@pytest.fixture +def fwd_volume_small(_fwd_subvolume): + """Provide a small volumetric source space.""" + return _fwd_subvolume.copy() + + +@pytest.fixture(scope="session") +def _all_src_types_fwd(_fwd_surf, _fwd_subvolume): + """Create all three forward types (surf, vol, mixed).""" + fwds = dict(surface=_fwd_surf.copy(), volume=_fwd_subvolume.copy()) + with pytest.raises(RuntimeError, match="Invalid source space with kinds"): + fwds["volume"]["src"] + fwds["surface"]["src"] + + # mixed (4) + fwd = fwds["surface"].copy() + f2 = fwds["volume"].copy() + del _fwd_surf, _fwd_subvolume + for keys, axis in [ + (("source_rr",), 0), + (("source_nn",), 0), + (("sol", "data"), 1), + (("_orig_sol",), 1), + ]: + a, b = fwd, f2 + key = keys[0] + if len(keys) > 1: + a, b = a[key], b[key] + key = keys[1] + a[key] = np.concatenate([a[key], b[key]], axis=axis) + fwd["sol"]["ncol"] = fwd["sol"]["data"].shape[1] + fwd["nsource"] = fwd["sol"]["ncol"] // 3 + fwd["src"] = fwd["src"] + f2["src"] + fwds["mixed"] = fwd + + return fwds + + +@pytest.fixture(scope="session") +def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd): + """Compute inverses for all source types.""" + evoked, cov, _ = _evoked_cov_sphere + invs = dict() + for kind, fwd in _all_src_types_fwd.items(): + assert fwd["src"].kind == kind + with pytest.warns(RuntimeWarning, match="has been reduced"): + invs[kind] = mne.minimum_norm.make_inverse_operator(evoked.info, fwd, cov) + return invs, evoked + + +@pytest.fixture(scope="function") +def all_src_types_inv_evoked(_all_src_types_inv_evoked): + """All source types of inverses, allowing for possible modification.""" + invs, evoked = _all_src_types_inv_evoked + invs = {key: val.copy() for key, val in invs.items()} + evoked = evoked.copy() + return invs, evoked + + +@pytest.fixture(scope="function") +def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd): + """Compute inverses for all source types.""" + evoked, cov, _ = _evoked_cov_sphere + return _all_src_types_fwd["mixed"].copy(), cov.copy(), evoked.copy() + + +@pytest.fixture(scope="session") +def src_volume_labels(): + """Create a 7mm source space with labels.""" + pytest.importorskip("nibabel") + volume_labels = mne.get_volume_labels_from_aseg(fname_aseg) + with ( + _record_warnings(), + pytest.warns(RuntimeWarning, match="Found no usable.*t-vessel.*"), + ): + src = mne.setup_volume_source_space( + "sample", + 7.0, + mri="aseg.mgz", + volume_label=volume_labels, + add_interpolator=False, + bem=fname_bem, + subjects_dir=subjects_dir, + ) + lut, _ = mne.read_freesurfer_lut() + assert len(volume_labels) == 46 + assert volume_labels[0] == "Unknown" + assert lut["Unknown"] == 0 # it will be excluded during label gen + return src, tuple(volume_labels), lut + + +def _fail(*args, **kwargs): + __tracebackhide__ = True + raise AssertionError("Test should not download") + + +@pytest.fixture(scope="function") +def download_is_error(monkeypatch): + """Prevent downloading by raising an error when it's attempted.""" + import pooch + + monkeypatch.setattr(pooch, "retrieve", _fail) + yield + + +@pytest.fixture() +def fake_retrieve(monkeypatch, download_is_error): + """Monkeypatch pooch.retrieve to avoid downloading (just touch files).""" + import pooch + + my_func = _FakeFetch() + monkeypatch.setattr(pooch, "retrieve", my_func) + monkeypatch.setattr(pooch, "create", my_func) + yield my_func + + +class _FakeFetch: + def __init__(self): + self.call_args_list = list() + + @property + def call_count(self): + return len(self.call_args_list) + + # Wrapper for pooch.retrieve(...) and pooch.create(...) + def __call__(self, *args, **kwargs): + assert "path" in kwargs + if "fname" in kwargs: # pooch.retrieve(...) + self.call_args_list.append((args, kwargs)) + path = Path(kwargs["path"], kwargs["fname"]) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("test") + return path + else: # pooch.create(...) has been called + self.path = kwargs["path"] + return self + + # Wrappers for Pooch instances (e.g., in eegbci we pooch.create) + def fetch(self, fname): + self(path=self.path, fname=fname) + + def load_registry(self, registry): + assert Path(registry).exists(), registry + + +# We can't use monkeypatch because its scope (function-level) conflicts with +# the requests fixture (module-level), so we live with a module-scoped version +# that uses mock +@pytest.fixture(scope="module") +def options_3d(): + """Disable advanced 3d rendering.""" + with mock.patch.dict( + os.environ, + { + "MNE_3D_OPTION_ANTIALIAS": "false", + "MNE_3D_OPTION_DEPTH_PEELING": "false", + "MNE_3D_OPTION_SMOOTH_SHADING": "false", + }, + ): + yield + + +@pytest.fixture(scope="session") +def protect_config(): + """Protect ~/.mne.""" + temp = _TempDir() + with mock.patch.dict(os.environ, {"_MNE_FAKE_HOME_DIR": temp}): + yield + + +def _test_passed(request): + if _phase_report_key not in request.node.stash: + return True + report = request.node.stash[_phase_report_key] + return "call" in report and report["call"].outcome == "passed" + + +@pytest.fixture() +def brain_gc(request): + """Ensure that brain can be properly garbage collected.""" + keys = ( + "renderer_interactive", + "renderer_interactive_pyvistaqt", + "renderer", + "renderer_pyvistaqt", + "renderer_notebook", + ) + assert set(request.fixturenames) & set(keys) != set() + for key in keys: + if key in request.fixturenames: + is_pv = request.getfixturevalue(key)._get_3d_backend() == "pyvistaqt" + close_func = request.getfixturevalue(key).backend._close_all + break + if not is_pv: + yield + return + from mne.viz import Brain + + ignore = set(id(o) for o in gc.get_objects()) + yield + close_func() + if not _test_passed(request): + return + _assert_no_instances(Brain, "after") + # Check VTK + objs = gc.get_objects() + bad = list() + for o in objs: + try: + name = o.__class__.__name__ + except Exception: # old Python, probably + pass + else: + if name.startswith("vtk") and id(o) not in ignore: + bad.append(name) + del o + del objs, ignore, Brain + assert len(bad) == 0, "VTK objects linger:\n" + "\n".join(bad) + + +_files = list() + + +def pytest_sessionfinish(session, exitstatus): + """Handle the end of the session.""" + n = session.config.option.durations + if n is None: + return + print("\n") + # get the number to print + files = defaultdict(lambda: 0.0) + for item in session.items: + if _phase_report_key not in item.stash: + continue + report = item.stash[_phase_report_key] + dur = sum(x.duration for x in report.values()) + parts = Path(item.nodeid.split(":")[0]).parts + # split mne/tests/test_whatever.py into separate categories since these + # are essentially submodule-level tests. Keeping just [:3] works, + # except for mne/viz where we want level-4 granulatity + split_submodules = (("mne", "viz"), ("mne", "preprocessing")) + parts = parts[: 4 if parts[:2] in split_submodules else 3] + if not parts[-1].endswith(".py"): + parts = parts + ("",) + file_key = "/".join(parts) + files[file_key] += dur + files = sorted(list(files.items()), key=lambda x: x[1])[::-1] + # print + _files[:] = files[:n] + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + """Print the module-level timings.""" + writer = terminalreporter + n = len(_files) + if n: + writer.line("") # newline + writer.write_sep("=", f"slowest {n} test module{_pl(n)}") + names, timings = zip(*_files) + timings = [f"{timing:0.2f}s total" for timing in timings] + rjust = max(len(timing) for timing in timings) + timings = [timing.rjust(rjust) for timing in timings] + for name, timing in zip(names, timings): + writer.line(f"{timing.ljust(15)}{name}") + + +def pytest_report_header(config, startdir=None): + """Add information to the pytest run header.""" + return f"MNE {mne.__version__} -- {Path(mne.__file__).parent}" + + +@pytest.fixture(scope="function", params=("Numba", "NumPy")) +def numba_conditional(monkeypatch, request): + """Test both code paths on machines that have Numba.""" + assert request.param in ("Numba", "NumPy") + if request.param == "NumPy" and has_numba: + monkeypatch.setattr( + cluster_level, "_get_buddies", cluster_level._get_buddies_fallback + ) + monkeypatch.setattr( + cluster_level, "_get_selves", cluster_level._get_selves_fallback + ) + monkeypatch.setattr( + cluster_level, "_where_first", cluster_level._where_first_fallback + ) + monkeypatch.setattr(numerics, "_arange_div", numerics._arange_div_fallback) + if request.param == "Numba" and not has_numba: + pytest.skip("Numba not installed") + yield request.param + + +# Create one nbclient and reuse it +@pytest.fixture(scope="session") +def _nbclient(): + try: + import nbformat + import trame # noqa + from ipywidgets import Button # noqa + from jupyter_client import AsyncKernelManager + from nbclient import NotebookClient + except Exception as exc: + return pytest.skip(f"Skipping Notebook test: {exc}") + km = AsyncKernelManager(config=None) + nb = nbformat.reads( + """ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata":{}, + "outputs": [], + "source":[] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version":3}, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}""", + as_version=4, + ) + client = NotebookClient(nb, km=km) + yield client + try: + client._cleanup_kernel() + except Exception: + pass + + +@pytest.fixture(scope="function") +def nbexec(_nbclient): + """Execute Python code in a notebook.""" + # Adapted/simplified from nbclient/client.py (BSD-3-Clause) + from nbclient.exceptions import CellExecutionError + + _nbclient._cleanup_kernel() + + def execute(code, reset=False): + _nbclient.reset_execution_trackers() + with _nbclient.setup_kernel(): + assert _nbclient.kc is not None + cell = Bunch(cell_type="code", metadata={}, source=dedent(code), outputs=[]) + try: + _nbclient.execute_cell(cell, 0, execution_count=0) + except CellExecutionError: # pragma: no cover + for kind in ("stdout", "stderr"): + print( + "\n".join( + o["text"] for o in cell.outputs if o.get("name", "") == kind + ), + file=getattr(sys, kind), + ) + raise + _nbclient.set_widgets_metadata() + + yield execute + + +def pytest_runtest_call(item): + """Run notebook code written in Python.""" + if "nbexec" in getattr(item, "fixturenames", ()): + nbexec = item.funcargs["nbexec"] + code = inspect.getsource(getattr(item.module, item.name.split("[")[0])) + code = code.splitlines() + ci = 0 + for ci, c in enumerate(code): + if c.startswith(" "): # actual content + break + code = "\n".join(code[ci:]) + + def run(nbexec=nbexec, code=code): + nbexec(code) + + item.runtest = run + return + + +@pytest.fixture( + params=( + [nirsport2, nirsport2_snirf, testing._pytest_param()], + [nirsport2_2021_9, nirsport2_20219_snirf, testing._pytest_param()], + ) +) +def nirx_snirf(request): + """Return a (raw_nirx, raw_snirf) matched pair.""" + pytest.importorskip("h5py") + skipper = request.param[2].marks[0].mark + if skipper.args[0]: # will skip + pytest.skip(skipper.kwargs["reason"]) + return ( + read_raw_nirx(request.param[0], preload=True), + read_raw_snirf(request.param[1], preload=True), + ) + + +@pytest.fixture +def qt_windows_closed(request): + """Ensure that no new Qt windows are open after a test.""" + _check_skip_backend("pyvistaqt") + app = _init_mne_qtapp() + + app.processEvents() + gc.collect() + n_before = len(app.topLevelWidgets()) + marks = set(mark.name for mark in request.node.iter_markers()) + yield + app.processEvents() + gc.collect() + if "allow_unclosed" in marks: + return + # Don't check when the test fails + if not _test_passed(request): + return + widgets = app.topLevelWidgets() + n_after = len(widgets) + assert n_before == n_after, widgets[-4:] + + +# https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures # noqa: E501 +_phase_report_key = StashKey() + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """Stash the status of each item.""" + outcome = yield + rep = outcome.get_result() + item.stash.setdefault(_phase_report_key, {})[rep.when] = rep + + +@pytest.fixture(scope="function") +def eyetrack_cal(): + """Create a toy calibration instance.""" + screen_size = (0.4, 0.225) # width, height in meters + screen_resolution = (1920, 1080) + screen_distance = 0.7 # meters + onset = 0 + model = "HV9" + eye = "R" + avg_error = 0.5 + max_error = 1.0 + positions = np.zeros((9, 2)) + offsets = np.zeros((9,)) + gaze = np.zeros((9, 2)) + cal = mne.preprocessing.eyetracking.Calibration( + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + eye=eye, + model=model, + positions=positions, + offsets=offsets, + gaze=gaze, + onset=onset, + avg_error=avg_error, + max_error=max_error, + ) + return cal + + +@pytest.fixture(scope="function") +def eyetrack_raw(): + """Create a toy raw instance with eyetracking channels.""" + # simulate a steady fixation at the center pixel of a 1920x1080 resolution screen + shape = (1, 100) # x or y, time + data = np.vstack([np.full(shape, 960), np.full(shape, 540), np.full(shape, 0)]) + + info = info = mne.create_info( + ch_names=["xpos", "ypos", "pupil"], sfreq=100, ch_types="eyegaze" + ) + more_info = dict( + xpos=("eyegaze", "px", "right", "x"), + ypos=("eyegaze", "px", "right", "y"), + pupil=("pupil", "au", "right"), + ) + raw = mne.io.RawArray(data, info) + raw = mne.preprocessing.eyetracking.set_channel_types_eyetrack(raw, more_info) + return raw diff --git a/mne/coreg.py b/mne/coreg.py new file mode 100644 index 0000000..f28c614 --- /dev/null +++ b/mne/coreg.py @@ -0,0 +1,2262 @@ +"""Coregistration between different coordinate frames.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import configparser +import fnmatch +import os +import os.path as op +import re +import shutil +import stat +import sys +from functools import reduce +from glob import glob, iglob + +import numpy as np +from scipy.optimize import leastsq +from scipy.spatial.distance import cdist + +from ._fiff._digitization import _get_data_as_dict_from_dig +from ._fiff.constants import FIFF +from ._fiff.meas_info import Info, read_fiducials, read_info, write_fiducials + +# keep get_mni_fiducials for backward compat (no burden to keep in this +# namespace, too) +from ._freesurfer import ( + _read_mri_info, + estimate_head_mri_t, # noqa: F401 + get_mni_fiducials, +) +from .bem import read_bem_surfaces, write_bem_surfaces +from .channels import make_dig_montage +from .label import Label, read_label +from .source_space import ( + add_source_space_distances, + read_source_spaces, # noqa: F401 + write_source_spaces, +) +from .surface import ( + _DistanceQuery, + _normalize_vectors, + complete_surface_info, + decimate_surface, + read_surface, + write_surface, +) +from .transforms import ( + Transform, + _angle_between_quats, + _fit_matched_points, + _quat_to_euler, + _read_fs_xfm, + _write_fs_xfm, + apply_trans, + combine_transforms, + invert_transform, + rot_to_quat, + rotation, + rotation3d, + scaling, + translation, +) +from .utils import ( + _check_option, + _check_subject, + _import_nibabel, + _validate_type, + fill_doc, + get_config, + get_subjects_dir, + logger, + pformat, + verbose, + warn, +) +from .viz._3d import _fiducial_coords + +# some path templates +trans_fname = os.path.join("{raw_dir}", "{subject}-trans.fif") +subject_dirname = os.path.join("{subjects_dir}", "{subject}") +bem_dirname = os.path.join(subject_dirname, "bem") +mri_dirname = os.path.join(subject_dirname, "mri") +mri_transforms_dirname = os.path.join(subject_dirname, "mri", "transforms") +surf_dirname = os.path.join(subject_dirname, "surf") +bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif") +head_bem_fname = pformat(bem_fname, name="head") +head_sparse_fname = pformat(bem_fname, name="head-sparse") +fid_fname = pformat(bem_fname, name="fiducials") +fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif") +src_fname = os.path.join(bem_dirname, "{subject}-{spacing}-src.fif") +_head_fnames = ( + os.path.join(bem_dirname, "outer_skin.surf"), + head_sparse_fname, + head_bem_fname, +) +_high_res_head_fnames = ( + os.path.join(bem_dirname, "{subject}-head-dense.fif"), + os.path.join(surf_dirname, "lh.seghead"), + os.path.join(surf_dirname, "lh.smseghead"), +) + + +def _map_fid_name_to_idx(name: str) -> int: + """Map a fiducial name to its index in the DigMontage.""" + name = name.lower() + + if name == "lpa": + return 0 + elif name == "nasion": + return 1 + else: + assert name == "rpa" + return 2 + + +def _make_writable(fname): + """Make a file writable.""" + os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write + + +def _make_writable_recursive(path): + """Recursively set writable.""" + if sys.platform.startswith("win"): + return # can't safely set perms + for root, dirs, files in os.walk(path, topdown=False): + for f in dirs + files: + _make_writable(os.path.join(root, f)) + + +def _find_head_bem(subject, subjects_dir, high_res=False): + """Find a high resolution head.""" + # XXX this should be refactored with mne.surface.get_head_surf ... + fnames = _high_res_head_fnames if high_res else _head_fnames + for fname in fnames: + path = fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + return path + + +@fill_doc +def coregister_fiducials(info, fiducials, tol=0.01): + """Create a head-MRI transform by aligning 3 fiducial points. + + Parameters + ---------- + %(info_not_none)s + fiducials : path-like | list of dict + Fiducials in MRI coordinate space (either path to a ``*-fiducials.fif`` + file or list of fiducials as returned by :func:`read_fiducials`. + + Returns + ------- + trans : Transform + The device-MRI transform. + + .. note:: The :class:`mne.Info` object fiducials must be in the + head coordinate space. + """ + if isinstance(info, str): + info = read_info(info) + if isinstance(fiducials, str): + fiducials, coord_frame_to = read_fiducials(fiducials) + else: + coord_frame_to = FIFF.FIFFV_COORD_MRI + frames_from = {d["coord_frame"] for d in info["dig"]} + if len(frames_from) > 1: + raise ValueError("info contains fiducials from different coordinate frames") + else: + coord_frame_from = frames_from.pop() + coords_from = _fiducial_coords(info["dig"]) + coords_to = _fiducial_coords(fiducials, coord_frame_to) + trans = fit_matched_points(coords_from, coords_to, tol=tol) + return Transform(coord_frame_from, coord_frame_to, trans) + + +@verbose +def create_default_subject(fs_home=None, update=False, subjects_dir=None, verbose=None): + """Create an average brain subject for subjects without structural MRI. + + Create a copy of fsaverage from the FreeSurfer directory in subjects_dir + and add auxiliary files from the mne package. + + Parameters + ---------- + fs_home : None | str + The FreeSurfer home directory (only needed if ``FREESURFER_HOME`` is + not specified as environment variable). + update : bool + In cases where a copy of the fsaverage brain already exists in the + subjects_dir, this option allows to only copy files that don't already + exist in the fsaverage directory. + subjects_dir : None | path-like + Override the ``SUBJECTS_DIR`` environment variable + (``os.environ['SUBJECTS_DIR']``) as destination for the new subject. + %(verbose)s + + Notes + ----- + When no structural MRI is available for a subject, an average brain can be + substituted. FreeSurfer comes with such an average brain model, and MNE + comes with some auxiliary files which make coregistration easier. + :py:func:`create_default_subject` copies the relevant + files from FreeSurfer into the current subjects_dir, and also adds the + auxiliary files provided by MNE. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + if fs_home is None: + fs_home = get_config("FREESURFER_HOME", fs_home) + if fs_home is None: + raise ValueError( + "FREESURFER_HOME environment variable not found. Please " + "specify the fs_home parameter in your call to " + "create_default_subject()." + ) + + # make sure FreeSurfer files exist + fs_src = os.path.join(fs_home, "subjects", "fsaverage") + if not os.path.exists(fs_src): + raise OSError( + f"fsaverage not found at {fs_src!r}. Is fs_home specified correctly?" + ) + for name in ("label", "mri", "surf"): + dirname = os.path.join(fs_src, name) + if not os.path.isdir(dirname): + raise OSError( + "FreeSurfer fsaverage seems to be incomplete: No directory named " + f"{name} found in {fs_src}" + ) + + # make sure destination does not already exist + dest = os.path.join(subjects_dir, "fsaverage") + if dest == fs_src: + raise OSError( + "Your subjects_dir points to the FreeSurfer subjects_dir " + f"({repr(subjects_dir)}). The default subject can not be created in the " + "FreeSurfer installation directory; please specify a different " + "subjects_dir." + ) + elif (not update) and os.path.exists(dest): + raise OSError( + 'Can not create fsaverage because "fsaverage" already exists in ' + f"subjects_dir {repr(subjects_dir)}. Delete or rename the existing " + "fsaverage subject folder." + ) + + # copy fsaverage from FreeSurfer + logger.info("Copying fsaverage subject from FreeSurfer directory...") + if (not update) or not os.path.exists(dest): + shutil.copytree(fs_src, dest) + _make_writable_recursive(dest) + + # copy files from mne + source_fname = os.path.join( + os.path.dirname(__file__), "data", "fsaverage", "fsaverage-%s.fif" + ) + dest_bem = os.path.join(dest, "bem") + if not os.path.exists(dest_bem): + os.mkdir(dest_bem) + logger.info("Copying auxiliary fsaverage files from mne...") + dest_fname = os.path.join(dest_bem, "fsaverage-%s.fif") + _make_writable_recursive(dest_bem) + for name in ("fiducials", "head", "inner_skull-bem", "trans"): + if not os.path.exists(dest_fname % name): + shutil.copy(source_fname % name, dest_bem) + + +def _decimate_points(pts, res=10): + """Decimate the number of points using a voxel grid. + + Create a voxel grid with a specified resolution and retain at most one + point per voxel. For each voxel, the point closest to its center is + retained. + + Parameters + ---------- + pts : array, shape (n_points, 3) + The points making up the head shape. + res : scalar + The resolution of the voxel space (side length of each voxel). + + Returns + ------- + pts : array, shape = (n_points, 3) + The decimated points. + """ + pts = np.asarray(pts) + + # find the bin edges for the voxel space + xmin, ymin, zmin = pts.min(0) - res / 2.0 + xmax, ymax, zmax = pts.max(0) + res + xax = np.arange(xmin, xmax, res) + yax = np.arange(ymin, ymax, res) + zax = np.arange(zmin, zmax, res) + + # find voxels containing one or more point + H, _ = np.histogramdd(pts, bins=(xax, yax, zax), density=False) + xbins, ybins, zbins = np.nonzero(H) + x = xax[xbins] + y = yax[ybins] + z = zax[zbins] + mids = np.c_[x, y, z] + res / 2.0 + + # each point belongs to at most one voxel center, so figure those out + # (KDTree faster than BallTree for these small problems) + tree = _DistanceQuery(mids, method="KDTree") + _, mid_idx = tree.query(pts) + + # then figure out which to actually use based on proximity + # (take advantage of sorting the mid_idx to get our mapping of + # pts to nearest voxel midpoint) + sort_idx = np.argsort(mid_idx) + bounds = np.cumsum(np.concatenate([[0], np.bincount(mid_idx, minlength=len(mids))])) + assert len(bounds) == len(mids) + 1 + out = list() + for mi, mid in enumerate(mids): + # Now we do this: + # + # use_pts = pts[mid_idx == mi] + # + # But it's faster for many points than making a big boolean indexer + # over and over (esp. since each point can only belong to a single + # voxel). + use_pts = pts[sort_idx[bounds[mi] : bounds[mi + 1]]] + if not len(use_pts): + out.append([np.inf] * 3) + else: + out.append(use_pts[np.argmin(cdist(use_pts, mid[np.newaxis])[:, 0])]) + out = np.array(out, float).reshape(-1, 3) + out = out[np.abs(out - mids).max(axis=1) < res / 2.0] + # """ + + return out + + +def _trans_from_params(param_info, params): + """Convert transformation parameters into a transformation matrix.""" + do_rotate, do_translate, do_scale = param_info + i = 0 + trans = [] + + if do_rotate: + x, y, z = params[:3] + trans.append(rotation(x, y, z)) + i += 3 + + if do_translate: + x, y, z = params[i : i + 3] + trans.insert(0, translation(x, y, z)) + i += 3 + + if do_scale == 1: + s = params[i] + trans.append(scaling(s, s, s)) + elif do_scale == 3: + x, y, z = params[i : i + 3] + trans.append(scaling(x, y, z)) + + trans = reduce(np.dot, trans) + return trans + + +_ALLOW_ANALITICAL = True + + +# XXX this function should be moved out of coreg as used elsewhere +def fit_matched_points( + src_pts, + tgt_pts, + rotate=True, + translate=True, + scale=False, + tol=None, + x0=None, + out="trans", + weights=None, +): + """Find a transform between matched sets of points. + + This minimizes the squared distance between two matching sets of points. + + Uses :func:`scipy.optimize.leastsq` to find a transformation involving + a combination of rotation, translation, and scaling (in that order). + + Parameters + ---------- + src_pts : array, shape = (n, 3) + Points to which the transform should be applied. + tgt_pts : array, shape = (n, 3) + Points to which src_pts should be fitted. Each point in tgt_pts should + correspond to the point in src_pts with the same index. + rotate : bool + Allow rotation of the ``src_pts``. + translate : bool + Allow translation of the ``src_pts``. + scale : bool + Number of scaling parameters. With False, points are not scaled. With + True, points are scaled by the same factor along all axes. + tol : scalar | None + The error tolerance. If the distance between any of the matched points + exceeds this value in the solution, a RuntimeError is raised. With + None, no error check is performed. + x0 : None | tuple + Initial values for the fit parameters. + out : 'params' | 'trans' + In what format to return the estimate: 'params' returns a tuple with + the fit parameters; 'trans' returns a transformation matrix of shape + (4, 4). + + Returns + ------- + trans : array, shape (4, 4) + Transformation that, if applied to src_pts, minimizes the squared + distance to tgt_pts. Only returned if out=='trans'. + params : array, shape (n_params, ) + A single tuple containing the rotation, translation, and scaling + parameters in that order (as applicable). + """ + src_pts = np.atleast_2d(src_pts) + tgt_pts = np.atleast_2d(tgt_pts) + if src_pts.shape != tgt_pts.shape: + raise ValueError( + "src_pts and tgt_pts must have same shape " + f"(got {src_pts.shape}, {tgt_pts.shape})" + ) + if weights is not None: + weights = np.asarray(weights, src_pts.dtype) + if weights.ndim != 1 or weights.size not in (src_pts.shape[0], 1): + raise ValueError( + f"weights (shape={weights.shape}) must be None or have shape " + f"({src_pts.shape[0]},)" + ) + weights = weights[:, np.newaxis] + + param_info = (bool(rotate), bool(translate), int(scale)) + del rotate, translate, scale + + # very common use case, rigid transformation (maybe with one scale factor, + # with or without weighted errors) + if param_info in ((True, True, 0), (True, True, 1)) and _ALLOW_ANALITICAL: + src_pts = np.asarray(src_pts, float) + tgt_pts = np.asarray(tgt_pts, float) + if weights is not None: + weights = np.asarray(weights, float) + x, s = _fit_matched_points(src_pts, tgt_pts, weights, bool(param_info[2])) + x[:3] = _quat_to_euler(x[:3]) + x = np.concatenate((x, [s])) if param_info[2] else x + else: + x = _generic_fit(src_pts, tgt_pts, param_info, weights, x0) + + # re-create the final transformation matrix + if (tol is not None) or (out == "trans"): + trans = _trans_from_params(param_info, x) + + # assess the error of the solution + if tol is not None: + src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1)))) + est_pts = np.dot(src_pts, trans.T)[:, :3] + err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1)) + if np.any(err > tol): + raise RuntimeError(f"Error exceeds tolerance. Error = {err!r}") + + if out == "params": + return x + elif out == "trans": + return trans + else: + raise ValueError( + f"Invalid out parameter: {out!r}. Needs to be 'params' or 'trans'." + ) + + +def _generic_fit(src_pts, tgt_pts, param_info, weights, x0): + if param_info[1]: # translate + src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1)))) + + if param_info == (True, False, 0): + + def error(x): + rx, ry, rz = x + trans = rotation3d(rx, ry, rz) + est = np.dot(src_pts, trans.T) + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + + if x0 is None: + x0 = (0, 0, 0) + elif param_info == (True, True, 0): + + def error(x): + rx, ry, rz, tx, ty, tz = x + trans = np.dot(translation(tx, ty, tz), rotation(rx, ry, rz)) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0) + elif param_info == (True, True, 1): + + def error(x): + rx, ry, rz, tx, ty, tz, s = x + trans = reduce( + np.dot, + (translation(tx, ty, tz), rotation(rx, ry, rz), scaling(s, s, s)), + ) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0, 1) + elif param_info == (True, True, 3): + + def error(x): + rx, ry, rz, tx, ty, tz, sx, sy, sz = x + trans = reduce( + np.dot, + (translation(tx, ty, tz), rotation(rx, ry, rz), scaling(sx, sy, sz)), + ) + est = np.dot(src_pts, trans.T)[:, :3] + d = tgt_pts - est + if weights is not None: + d *= weights + return d.ravel() + + if x0 is None: + x0 = (0, 0, 0, 0, 0, 0, 1, 1, 1) + else: + raise NotImplementedError( + "The specified parameter combination is not implemented: " + "rotate={!r}, translate={!r}, scale={!r}".format(*param_info) + ) + + x, _, _, _, _ = leastsq(error, x0, full_output=True) + return x + + +def _find_label_paths(subject="fsaverage", pattern=None, subjects_dir=None): + """Find paths to label files in a subject's label directory. + + Parameters + ---------- + subject : str + Name of the mri subject. + pattern : str | None + Pattern for finding the labels relative to the label directory in the + MRI subject directory (e.g., "aparc/*.label" will find all labels + in the "subject/label/aparc" directory). With None, find all labels. + subjects_dir : None | path-like + Override the SUBJECTS_DIR environment variable + (sys.environ['SUBJECTS_DIR']) + + Returns + ------- + paths : list + List of paths relative to the subject's label directory + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject_dir = subjects_dir / subject + lbl_dir = subject_dir / "label" + + if pattern is None: + paths = [] + for dirpath, _, filenames in os.walk(lbl_dir): + rel_dir = os.path.relpath(dirpath, lbl_dir) + for filename in fnmatch.filter(filenames, "*.label"): + path = os.path.join(rel_dir, filename) + paths.append(path) + else: + paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)] + + return paths + + +def _find_mri_paths(subject, skip_fiducials, subjects_dir): + """Find all files of an mri relevant for source transformation. + + Parameters + ---------- + subject : str + Name of the mri subject. + skip_fiducials : bool + Do not scale the MRI fiducials. If False, an OSError will be raised + if no fiducials file can be found. + subjects_dir : None | path-like + Override the SUBJECTS_DIR environment variable + (sys.environ['SUBJECTS_DIR']) + + Returns + ------- + paths : dict + Dictionary whose keys are relevant file type names (str), and whose + values are lists of paths. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + paths = {} + + # directories to create + paths["dirs"] = [bem_dirname, surf_dirname] + + # surf/ files + paths["surf"] = [] + surf_fname = os.path.join(surf_dirname, "{name}") + surf_names = ( + "inflated", + "white", + "orig", + "orig_avg", + "inflated_avg", + "inflated_pre", + "pial", + "pial_avg", + "smoothwm", + "white_avg", + "seghead", + "smseghead", + ) + if os.getenv("_MNE_FEW_SURFACES", "") == "true": # for testing + surf_names = surf_names[:4] + for surf_name in surf_names: + for hemi in ("lh.", "rh."): + name = hemi + surf_name + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=name + ) + if os.path.exists(path): + paths["surf"].append(pformat(surf_fname, name=name)) + surf_fname = os.path.join(bem_dirname, "{name}") + surf_names = ("inner_skull.surf", "outer_skull.surf", "outer_skin.surf") + for surf_name in surf_names: + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=surf_name + ) + if os.path.exists(path): + paths["surf"].append(pformat(surf_fname, name=surf_name)) + del surf_names, surf_name, path, hemi + + # BEM files + paths["bem"] = bem = [] + path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + bem.append("head") + bem_pattern = pformat( + bem_fname, subjects_dir=subjects_dir, subject=subject, name="*-bem" + ) + re_pattern = pformat( + bem_fname, subjects_dir=subjects_dir, subject=subject, name="(.+)" + ).replace("\\", "\\\\") + for path in iglob(bem_pattern): + match = re.match(re_pattern, path) + name = match.group(1) + bem.append(name) + del bem, path, bem_pattern, re_pattern + + # fiducials + if skip_fiducials: + paths["fid"] = [] + else: + paths["fid"] = _find_fiducials_files(subject, subjects_dir) + # check that we found at least one + if len(paths["fid"]) == 0: + raise OSError( + f"No fiducials file found for {subject}. The fiducials " + "file should be named " + "{subject}/bem/{subject}-fiducials.fif. In " + "order to scale an MRI without fiducials set " + "skip_fiducials=True." + ) + + # duplicate files (curvature and some surfaces) + paths["duplicate"] = [] + path = os.path.join(surf_dirname, "{name}") + surf_fname = os.path.join(surf_dirname, "{name}") + surf_dup_names = ("curv", "sphere", "sphere.reg", "sphere.reg.avg") + for surf_dup_name in surf_dup_names: + for hemi in ("lh.", "rh."): + name = hemi + surf_dup_name + path = surf_fname.format( + subjects_dir=subjects_dir, subject=subject, name=name + ) + if os.path.exists(path): + paths["duplicate"].append(pformat(surf_fname, name=name)) + del surf_dup_name, name, path, hemi + + # transform files (talairach) + paths["transforms"] = [] + transform_fname = os.path.join(mri_transforms_dirname, "talairach.xfm") + path = transform_fname.format(subjects_dir=subjects_dir, subject=subject) + if os.path.exists(path): + paths["transforms"].append(transform_fname) + del transform_fname, path + + # find source space files + paths["src"] = src = [] + bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject) + fnames = fnmatch.filter(os.listdir(bem_dir), "*-src.fif") + prefix = subject + "-" + for fname in fnames: + if fname.startswith(prefix): + fname = f"{{subject}}-{fname[len(prefix) :]}" + path = os.path.join(bem_dirname, fname) + src.append(path) + + # find MRIs + mri_dir = mri_dirname.format(subjects_dir=subjects_dir, subject=subject) + fnames = fnmatch.filter(os.listdir(mri_dir), "*.mgz") + paths["mri"] = [os.path.join(mri_dir, f) for f in fnames] + + return paths + + +def _find_fiducials_files(subject, subjects_dir): + """Find fiducial files.""" + fid = [] + # standard fiducials + if os.path.exists(fid_fname.format(subjects_dir=subjects_dir, subject=subject)): + fid.append(fid_fname) + # fiducials with subject name + pattern = pformat( + fid_fname_general, subjects_dir=subjects_dir, subject=subject, head="*" + ) + regex = pformat( + fid_fname_general, subjects_dir=subjects_dir, subject=subject, head="(.+)" + ).replace("\\", "\\\\") + for path in iglob(pattern): + match = re.match(regex, path) + head = match.group(1).replace(subject, "{subject}") + fid.append(pformat(fid_fname_general, head=head)) + return fid + + +def _is_mri_subject(subject, subjects_dir=None): + """Check whether a directory in subjects_dir is an mri subject directory. + + Parameters + ---------- + subject : str + Name of the potential subject/directory. + subjects_dir : None | path-like + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + is_mri_subject : bool + Whether ``subject`` is an mri subject. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + return bool( + _find_head_bem(subject, subjects_dir) + or _find_head_bem(subject, subjects_dir, high_res=True) + ) + + +def _mri_subject_has_bem(subject, subjects_dir=None): + """Check whether an mri subject has a file matching the bem pattern. + + Parameters + ---------- + subject : str + Name of the subject. + subjects_dir : None | path-like + Override the SUBJECTS_DIR environment variable. + + Returns + ------- + has_bem_file : bool + Whether ``subject`` has a bem file. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject, name="*-bem") + fnames = glob(pattern) + return bool(len(fnames)) + + +def read_mri_cfg(subject, subjects_dir=None): + """Read information from the cfg file of a scaled MRI brain. + + Parameters + ---------- + subject : str + Name of the scaled MRI subject. + subjects_dir : None | path-like + Override the ``SUBJECTS_DIR`` environment variable. + + Returns + ------- + cfg : dict + Dictionary with entries from the MRI's cfg file. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + fname = subjects_dir / subject / "MRI scaling parameters.cfg" + + if not fname.exists(): + raise OSError( + f"{subject!r} does not seem to be a scaled mri subject: {fname!r} does not" + "exist." + ) + + logger.info(f"Reading MRI cfg file {fname}") + config = configparser.RawConfigParser() + config.read(fname) + n_params = config.getint("MRI Scaling", "n_params") + if n_params == 1: + scale = config.getfloat("MRI Scaling", "scale") + elif n_params == 3: + scale_str = config.get("MRI Scaling", "scale") + scale = np.array([float(s) for s in scale_str.split()]) + else: + raise ValueError(f"Invalid n_params value in MRI cfg: {n_params}") + + out = { + "subject_from": config.get("MRI Scaling", "subject_from"), + "n_params": n_params, + "scale": scale, + } + return out + + +def _write_mri_config(fname, subject_from, subject_to, scale): + """Write the cfg file describing a scaled MRI subject. + + Parameters + ---------- + fname : path-like + Target file. + subject_from : str + Name of the source MRI subject. + subject_to : str + Name of the scaled MRI subject. + scale : float | array_like, shape = (3,) + The scaling parameter. + """ + scale = np.asarray(scale) + if np.isscalar(scale) or scale.shape == (): + n_params = 1 + else: + n_params = 3 + + config = configparser.RawConfigParser() + config.add_section("MRI Scaling") + config.set("MRI Scaling", "subject_from", subject_from) + config.set("MRI Scaling", "subject_to", subject_to) + config.set("MRI Scaling", "n_params", str(n_params)) + if n_params == 1: + config.set("MRI Scaling", "scale", str(scale)) + else: + config.set("MRI Scaling", "scale", " ".join([str(s) for s in scale])) + config.set("MRI Scaling", "version", "1") + with open(fname, "w") as fid: + config.write(fid) + + +def _scale_params(subject_to, subject_from, scale, subjects_dir): + """Assemble parameters for scaling. + + Returns + ------- + subjects_dir : path-like + Subjects directory. + subject_from : str + Name of the source subject. + scale : array + Scaling factor, either shape=() for uniform scaling or shape=(3,) for + non-uniform scaling. + uniform : bool + Whether scaling is uniform. + """ + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + if (subject_from is None) != (scale is None): + raise TypeError( + "Need to provide either both subject_from and scale " + "parameters, or neither." + ) + + if subject_from is None: + cfg = read_mri_cfg(subject_to, subjects_dir) + subject_from = cfg["subject_from"] + n_params = cfg["n_params"] + assert n_params in (1, 3) + scale = cfg["scale"] + scale = np.atleast_1d(scale) + if scale.ndim != 1 or scale.shape[0] not in (1, 3): + raise ValueError( + "Invalid shape for scale parameter. Need scalar or array of length 3. Got " + f"shape {scale.shape}." + ) + n_params = len(scale) + return str(subjects_dir), subject_from, scale, n_params == 1 + + +@verbose +def scale_bem( + subject_to, + bem_name, + subject_from=None, + scale=None, + subjects_dir=None, + *, + on_defects="raise", + verbose=None, +): + """Scale a bem file. + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination mri subject). + bem_name : str + Name of the bem file. For example, to scale + ``fsaverage-inner_skull-bem.fif``, the bem_name would be + "inner_skull-bem". + subject_from : None | str + The subject from which to read the source space. If None, subject_from + is read from subject_to's config file. + scale : None | float | array, shape = (3,) + Scaling factor. Has to be specified if subjects_from is specified, + otherwise it is read from subject_to's config file. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + %(on_defects)s + + .. versionadded:: 1.0 + %(verbose)s + """ + subjects_dir, subject_from, scale, uniform = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) + + src = bem_fname.format( + subjects_dir=subjects_dir, subject=subject_from, name=bem_name + ) + dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to, name=bem_name) + + if os.path.exists(dst): + raise OSError(f"File already exists: {dst}") + + surfs = read_bem_surfaces(src, on_defects=on_defects) + for surf in surfs: + surf["rr"] *= scale + if not uniform: + assert len(surf["nn"]) > 0 + surf["nn"] /= scale + _normalize_vectors(surf["nn"]) + write_bem_surfaces(dst, surfs) + + +def scale_labels( + subject_to, + pattern=None, + overwrite=False, + subject_from=None, + scale=None, + subjects_dir=None, +): + r"""Scale labels to match a brain that was previously created by scaling. + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination brain). + pattern : str | None + Pattern for finding the labels relative to the label directory in the + MRI subject directory (e.g., "lh.BA3a.label" will scale + "fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels + in the "fsaverage/label/aparc" directory). With None, scale all labels. + overwrite : bool + Overwrite any label file that already exists for subject_to (otherwise + existing labels are skipped). + subject_from : None | str + Name of the original MRI subject (the brain that was scaled to create + subject_to). If None, the value is read from subject_to's cfg file. + scale : None | float | array_like, shape = (3,) + Scaling parameter. If None, the value is read from subject_to's cfg + file. + subjects_dir : None | path-like + Override the ``SUBJECTS_DIR`` environment variable. + """ + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) + + # find labels + paths = _find_label_paths(subject_from, pattern, subjects_dir) + if not paths: + return + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + src_root = subjects_dir / subject_from / "label" + dst_root = subjects_dir / subject_to / "label" + + # scale labels + for fname in paths: + dst = dst_root / fname + if not overwrite and dst.exists(): + continue + + if not dst.parent.exists(): + os.makedirs(dst.parent) + + src = src_root / fname + l_old = read_label(src) + pos = l_old.pos * scale + l_new = Label( + l_old.vertices, + pos, + l_old.values, + l_old.hemi, + l_old.comment, + subject=subject_to, + ) + l_new.save(dst) + + +@verbose +def scale_mri( + subject_from, + subject_to, + scale, + overwrite=False, + subjects_dir=None, + skip_fiducials=False, + labels=True, + annot=False, + *, + on_defects="raise", + verbose=None, +): + """Create a scaled copy of an MRI subject. + + Parameters + ---------- + subject_from : str + Name of the subject providing the MRI. + subject_to : str + New subject name for which to save the scaled MRI. + scale : float | array_like, shape = (3,) + The scaling factor (one or 3 parameters). + overwrite : bool + If an MRI already exists for subject_to, overwrite it. + subjects_dir : None | path-like + Override the ``SUBJECTS_DIR`` environment variable. + skip_fiducials : bool + Do not scale the MRI fiducials. If False (default), an OSError will be + raised if no fiducials file can be found. + labels : bool + Also scale all labels (default True). + annot : bool + Copy ``*.annot`` files to the new location (default False). + %(on_defects)s + + .. versionadded:: 1.0 + %(verbose)s + + See Also + -------- + scale_bem : Add a scaled BEM to a scaled MRI. + scale_labels : Add labels to a scaled MRI. + scale_source_space : Add a source space to a scaled MRI. + + Notes + ----- + This function will automatically call :func:`scale_bem`, + :func:`scale_labels`, and :func:`scale_source_space` based on expected + filename patterns in the subject directory. + """ + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + paths = _find_mri_paths(subject_from, skip_fiducials, subjects_dir) + scale = np.atleast_1d(scale) + if scale.shape == (3,): + if np.isclose(scale[1], scale[0]) and np.isclose(scale[2], scale[0]): + scale = scale[0] # speed up scaling conditionals using a singleton + elif scale.shape != (1,): + raise ValueError(f"scale must have shape (3,) or (1,), got {scale.shape}") + + # make sure we have an empty target directory + dest = subject_dirname.format(subject=subject_to, subjects_dir=subjects_dir) + if os.path.exists(dest): + if not overwrite: + raise OSError( + f"Subject directory for {subject_to} already exists: {dest!r}" + ) + shutil.rmtree(dest) + + logger.debug("create empty directory structure") + for dirname in paths["dirs"]: + dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir) + os.makedirs(dir_) + + logger.debug("save MRI scaling parameters") + fname = os.path.join(dest, "MRI scaling parameters.cfg") + _write_mri_config(fname, subject_from, subject_to, scale) + + logger.debug("surf files [in mm]") + for fname in paths["surf"]: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + src = os.path.realpath(src) + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + pts, tri = read_surface(src) + write_surface(dest, pts * scale, tri) + + logger.debug("BEM files [in m]") + for bem_name in paths["bem"]: + scale_bem( + subject_to, + bem_name, + subject_from, + scale, + subjects_dir, + on_defects=on_defects, + verbose=False, + ) + + logger.debug("fiducials [in m]") + for fname in paths["fid"]: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + src = os.path.realpath(src) + pts, cframe = read_fiducials(src, verbose=False) + for pt in pts: + pt["r"] = pt["r"] * scale + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + write_fiducials(dest, pts, cframe, overwrite=True, verbose=False) + + logger.debug("MRIs [nibabel]") + os.mkdir(mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to)) + for fname in paths["mri"]: + mri_name = os.path.basename(fname) + _scale_mri(subject_to, mri_name, subject_from, scale, subjects_dir) + + logger.debug("Transforms") + for mri_name in paths["mri"]: + if mri_name.endswith("T1.mgz"): + os.mkdir( + mri_transforms_dirname.format( + subjects_dir=subjects_dir, subject=subject_to + ) + ) + for fname in paths["transforms"]: + xfm_name = os.path.basename(fname) + _scale_xfm( + subject_to, xfm_name, mri_name, subject_from, scale, subjects_dir + ) + break + + logger.debug("duplicate files") + for fname in paths["duplicate"]: + src = fname.format(subject=subject_from, subjects_dir=subjects_dir) + dest = fname.format(subject=subject_to, subjects_dir=subjects_dir) + shutil.copyfile(src, dest) + + logger.debug("source spaces") + for fname in paths["src"]: + src_name = os.path.basename(fname) + scale_source_space( + subject_to, src_name, subject_from, scale, subjects_dir, verbose=False + ) + + logger.debug("labels [in m]") + os.mkdir(os.path.join(subjects_dir, subject_to, "label")) + if labels: + scale_labels( + subject_to, + subject_from=subject_from, + scale=scale, + subjects_dir=subjects_dir, + ) + + logger.debug("copy *.annot files") + # they don't contain scale-dependent information + if annot: + src_pattern = os.path.join(subjects_dir, subject_from, "label", "*.annot") + dst_dir = os.path.join(subjects_dir, subject_to, "label") + for src_file in iglob(src_pattern): + shutil.copy(src_file, dst_dir) + + +@verbose +def scale_source_space( + subject_to, + src_name, + subject_from=None, + scale=None, + subjects_dir=None, + n_jobs=None, + verbose=None, +): + """Scale a source space for an mri created with scale_mri(). + + Parameters + ---------- + subject_to : str + Name of the scaled MRI subject (the destination mri subject). + src_name : str + Source space name. Can be a spacing parameter (e.g., ``'7'``, + ``'ico4'``, ``'oct6'``) or a file name of a source space file relative + to the bem directory; if the file name contains the subject name, it + should be indicated as "{subject}" in ``src_name`` (e.g., + ``"{subject}-my_source_space-src.fif"``). + subject_from : None | str + The subject from which to read the source space. If None, subject_from + is read from subject_to's config file. + scale : None | float | array, shape = (3,) + Scaling factor. Has to be specified if subjects_from is specified, + otherwise it is read from subject_to's config file. + subjects_dir : None | str + Override the SUBJECTS_DIR environment variable. + n_jobs : int + Number of jobs to run in parallel if recomputing distances (only + applies if scale is an array of length 3, and will not use more cores + than there are source spaces). + %(verbose)s + + Notes + ----- + When scaling volume source spaces, the source (vertex) locations are + scaled, but the reference to the MRI volume is left unchanged. Transforms + are updated so that source estimates can be plotted on the original MRI + volume. + """ + subjects_dir, subject_from, scale, uniform = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) + # if n_params==1 scale is a scalar; if n_params==3 scale is a (3,) array + + # find the source space file names + if src_name.isdigit(): + spacing = src_name # spacing in mm + src_pattern = src_fname + else: + match = re.match(r"(oct|ico|vol)-?(\d+)$", src_name) + if match: + spacing = "-".join(match.groups()) + src_pattern = src_fname + else: + spacing = None + src_pattern = os.path.join(bem_dirname, src_name) + + src = src_pattern.format( + subjects_dir=subjects_dir, subject=subject_from, spacing=spacing + ) + dst = src_pattern.format( + subjects_dir=subjects_dir, subject=subject_to, spacing=spacing + ) + + # read and scale the source space [in m] + sss = read_source_spaces(src) + logger.info("scaling source space %s: %s -> %s", spacing, subject_from, subject_to) + logger.info("Scale factor: %s", scale) + add_dist = False + for ss in sss: + ss["subject_his_id"] = subject_to + ss["rr"] *= scale + # additional tags for volume source spaces + for key in ("vox_mri_t", "src_mri_t"): + # maintain transform to original MRI volume ss['mri_volume_name'] + if key in ss: + ss[key]["trans"][:3] *= scale[:, np.newaxis] + # distances and patch info + if uniform: + if ss["dist"] is not None: + ss["dist"] *= scale[0] + # Sometimes this is read-only due to how it's read + ss["nearest_dist"] = ss["nearest_dist"] * scale + ss["dist_limit"] = ss["dist_limit"] * scale + else: # non-uniform scaling + ss["nn"] /= scale + _normalize_vectors(ss["nn"]) + if ss["dist"] is not None: + add_dist = True + dist_limit = float(np.abs(sss[0]["dist_limit"])) + elif ss["nearest"] is not None: + add_dist = True + dist_limit = 0 + + if add_dist: + logger.info("Recomputing distances, this might take a while") + add_source_space_distances(sss, dist_limit, n_jobs) + + write_source_spaces(dst, sss) + + +def _scale_mri(subject_to, mri_fname, subject_from, scale, subjects_dir): + """Scale an MRI by setting its affine.""" + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) + nibabel = _import_nibabel("scale an MRI") + fname_from = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_from), mri_fname + ) + fname_to = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to), mri_fname + ) + img = nibabel.load(fname_from) + zooms = np.array(img.header.get_zooms()) + zooms[[0, 2, 1]] *= scale + img.header.set_zooms(zooms) + # Hack to fix nibabel problems, see + # https://github.com/nipy/nibabel/issues/619 + img._affine = img.header.get_affine() # or could use None + nibabel.save(img, fname_to) + + +def _scale_xfm(subject_to, xfm_fname, mri_name, subject_from, scale, subjects_dir): + """Scale a transform.""" + subjects_dir, subject_from, scale, _ = _scale_params( + subject_to, subject_from, scale, subjects_dir + ) + + # The nibabel warning should already be there in MRI step, if applicable, + # as we only get here if T1.mgz is present (and thus a scaling was + # attempted) so we can silently return here. + fname_from = os.path.join( + mri_transforms_dirname.format(subjects_dir=subjects_dir, subject=subject_from), + xfm_fname, + ) + fname_to = op.join( + mri_transforms_dirname.format(subjects_dir=subjects_dir, subject=subject_to), + xfm_fname, + ) + assert op.isfile(fname_from), fname_from + assert op.isdir(op.dirname(fname_to)), op.dirname(fname_to) + # The "talairach.xfm" file stores the ras_mni transform. + # + # For "from" subj F, "to" subj T, F->T scaling S, some equivalent vertex + # positions F_x and T_x in MRI (FreeSurfer RAS) coords, knowing that + # we have T_x = S @ F_x, we want to have the same MNI coords computed + # for these vertices: + # + # T_mri_mni @ T_x = F_mri_mni @ F_x + # + # We need to find the correct T_ras_mni (talaraich.xfm file) that yields + # this. So we derive (where † indicates inversion): + # + # T_mri_mni @ S @ F_x = F_mri_mni @ F_x + # T_mri_mni @ S = F_mri_mni + # T_ras_mni @ T_mri_ras @ S = F_ras_mni @ F_mri_ras + # T_ras_mni @ T_mri_ras = F_ras_mni @ F_mri_ras @ Sâ»Â¹ + # T_ras_mni = F_ras_mni @ F_mri_ras @ Sâ»Â¹ @ T_ras_mri + # + + # prepare the scale (S) transform + scale = np.atleast_1d(scale) + scale = np.tile(scale, 3) if len(scale) == 1 else scale + S = Transform("mri", "mri", scaling(*scale)) # F_mri->T_mri + + # + # Get the necessary transforms of the "from" subject + # + xfm, kind = _read_fs_xfm(fname_from) + assert kind == "MNI Transform File", kind + _, _, F_mri_ras, _, _ = _read_mri_info(mri_name, units="mm") + F_ras_mni = Transform("ras", "mni_tal", xfm) + del xfm + + # + # Get the necessary transforms of the "to" subject + # + mri_name = op.join( + mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to), + op.basename(mri_name), + ) + _, _, T_mri_ras, _, _ = _read_mri_info(mri_name, units="mm") + T_ras_mri = invert_transform(T_mri_ras) + del mri_name, T_mri_ras + + # Finally we construct as above: + # + # T_ras_mni = F_ras_mni @ F_mri_ras @ Sâ»Â¹ @ T_ras_mri + # + # By moving right to left through the equation. + T_ras_mni = combine_transforms( + combine_transforms( + combine_transforms(T_ras_mri, invert_transform(S), "ras", "mri"), + F_mri_ras, + "ras", + "ras", + ), + F_ras_mni, + "ras", + "mni_tal", + ) + _write_fs_xfm(fname_to, T_ras_mni["trans"], kind) + + +def _read_surface(filename, *, on_defects): + bem = dict() + if filename is not None and op.exists(filename): + if filename.endswith(".fif"): + bem = read_bem_surfaces(filename, on_defects=on_defects, verbose=False)[0] + else: + try: + bem = read_surface(filename, return_dict=True)[2] + bem["rr"] *= 1e-3 + complete_surface_info(bem, copy=False) + except Exception: + raise ValueError( + f"Error loading surface from {filename} (see " + "Terminal for details)." + ) + return bem + + +@fill_doc +class Coregistration: + """Class for MRI<->head coregistration. + + Parameters + ---------- + info : instance of Info | None + The measurement info. + %(subject)s + %(subjects_dir)s + %(fiducials)s + %(on_defects)s + + .. versionadded:: 1.0 + + Attributes + ---------- + fiducials : instance of DigMontage + A montage containing the MRI fiducials. + trans : instance of Transform + MRI<->Head coordinate transformation. + + See Also + -------- + mne.scale_mri + + Notes + ----- + Internal computation quantities parameters are in the following units: + + - rotation are in radians + - translation are in m + - scale are in scale proportion + + If using a scale mode, the :func:`~mne.scale_mri` should be used + to create a surrogate MRI subject with the proper scale factors. + """ + + def __init__( + self, info, subject, subjects_dir=None, fiducials="auto", *, on_defects="raise" + ): + _validate_type(info, (Info, None), "info") + self._info = info + self._subject = _check_subject(subject, subject) + self._subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + self._scale_mode = None + self._on_defects = on_defects + + self._default_parameters = np.array( + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0] + ) + + self._rotation = self._default_parameters[:3] + self._translation = self._default_parameters[3:6] + self._scale = self._default_parameters[6:9] + self._icp_angle = 0.2 + self._icp_distance = 0.2 + self._icp_scale = 0.2 + self._icp_fid_matches = ("nearest", "matched") + self._icp_fid_match = self._icp_fid_matches[0] + self._lpa_weight = 1.0 + self._nasion_weight = 10.0 + self._rpa_weight = 1.0 + self._hsp_weight = 1.0 + self._eeg_weight = 1.0 + self._hpi_weight = 1.0 + self._extra_points_filter = None + + self._setup_digs() + self._setup_bem() + + self._fid_filename = None + self._setup_fiducials(fiducials) + self.reset() + + def _setup_digs(self): + if self._info is None: + self._dig_dict = dict( + hpi=np.zeros((1, 3)), + dig_ch_pos_location=np.zeros((1, 3)), + hsp=np.zeros((1, 3)), + rpa=np.zeros((1, 3)), + nasion=np.zeros((1, 3)), + lpa=np.zeros((1, 3)), + ) + else: + self._dig_dict = _get_data_as_dict_from_dig( + dig=self._info["dig"], exclude_ref_channel=False + ) + # adjustments: + # set weights to 0 for None input + # convert fids to float arrays + for k, w_atr in zip( + ["nasion", "lpa", "rpa", "hsp", "hpi"], + [ + "_nasion_weight", + "_lpa_weight", + "_rpa_weight", + "_hsp_weight", + "_hpi_weight", + ], + ): + if self._dig_dict[k] is None: + self._dig_dict[k] = np.zeros((0, 3)) + setattr(self, w_atr, 0) + elif k in ["rpa", "nasion", "lpa"]: + self._dig_dict[k] = np.array([self._dig_dict[k]], float) + + def _setup_bem(self): + # find high-res head model (if possible) + high_res_path = _find_head_bem(self._subject, self._subjects_dir, high_res=True) + low_res_path = _find_head_bem(self._subject, self._subjects_dir, high_res=False) + if high_res_path is None and low_res_path is None: + raise RuntimeError( + "No standard head model was " + f"found for subject {self._subject} in " + f"{self._subjects_dir}" + ) + if high_res_path is not None: + self._bem_high_res = _read_surface( + high_res_path, on_defects=self._on_defects + ) + logger.info(f"Using high resolution head model in {high_res_path}") + else: + self._bem_high_res = _read_surface( + low_res_path, on_defects=self._on_defects + ) + logger.info(f"Using low resolution head model in {low_res_path}") + if low_res_path is None: + # This should be very rare! + warn( + "No low-resolution head found, decimating high resolution " + f"mesh ({len(self._bem_high_res['rr'])} vertices): {high_res_path}" + ) + # Create one from the high res one, which we know we have + rr, tris = decimate_surface( + self._bem_high_res["rr"], self._bem_high_res["tris"], n_triangles=5120 + ) + # directly set the attributes of bem_low_res + self._bem_low_res = complete_surface_info( + dict(rr=rr, tris=tris), copy=False, verbose=False + ) + else: + self._bem_low_res = _read_surface(low_res_path, on_defects=self._on_defects) + + def _setup_fiducials(self, fids): + _validate_type(fids, (str, dict, list)) + # find fiducials file + fid_accurate = None + if fids == "auto": + fid_files = _find_fiducials_files(self._subject, self._subjects_dir) + if len(fid_files) > 0: + # Read fiducials from disk + fid_filename = fid_files[0].format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + logger.info(f"Using fiducials from: {fid_filename}.") + fids, _ = read_fiducials(fid_filename) + fid_accurate = True + self._fid_filename = fid_filename + else: + fids = "estimated" + + if fids == "estimated": + logger.info("Estimating fiducials from fsaverage.") + fid_accurate = False + fids = get_mni_fiducials(self._subject, self._subjects_dir) + + fid_accurate = True if fid_accurate is None else fid_accurate + if isinstance(fids, list): + fid_coords = _fiducial_coords(fids) + else: + assert isinstance(fids, dict) + fid_coords = np.array( + [fids["lpa"], fids["nasion"], fids["rpa"]], dtype=float + ) + + self._fid_points = fid_coords + self._fid_accurate = fid_accurate + + # does not seem to happen by itself ... so hard code it: + self._reset_fiducials() + + def _reset_fiducials(self): + dig_montage = make_dig_montage( + lpa=self._fid_points[0], + nasion=self._fid_points[1], + rpa=self._fid_points[2], + coord_frame="mri", + ) + self.fiducials = dig_montage + + def _update_params(self, rot=None, tra=None, sca=None, force_update=False): + if force_update and tra is None: + tra = self._translation + rot_changed = False + if rot is not None: + rot_changed = True + self._last_rotation = self._rotation.copy() + self._rotation = rot + tra_changed = False + if rot_changed or tra is not None: + if tra is None: + tra = self._translation + tra_changed = True + self._last_translation = self._translation.copy() + self._translation = tra + self._head_mri_t = rotation(*self._rotation).T + self._head_mri_t[:3, 3] = -np.dot(self._head_mri_t[:3, :3], tra) + self._transformed_dig_hpi = apply_trans( + self._head_mri_t, self._dig_dict["hpi"] + ) + self._transformed_dig_eeg = apply_trans( + self._head_mri_t, self._dig_dict["dig_ch_pos_location"] + ) + self._transformed_dig_extra = apply_trans( + self._head_mri_t, self._filtered_extra_points + ) + self._transformed_orig_dig_extra = apply_trans( + self._head_mri_t, self._dig_dict["hsp"] + ) + self._mri_head_t = rotation(*self._rotation) + self._mri_head_t[:3, 3] = np.array(tra) + if tra_changed or sca is not None: + if sca is None: + sca = self._scale + self._last_scale = self._scale.copy() + self._scale = sca + self._mri_trans = np.eye(4) + self._mri_trans[:, :3] *= sca + self._transformed_high_res_mri_points = apply_trans( + self._mri_trans, self._processed_high_res_mri_points + ) + self._update_nearest_calc() + + if tra_changed: + self._nearest_transformed_high_res_mri_idx_orig_hsp = ( + self._nearest_calc.query(self._transformed_orig_dig_extra)[1] + ) + self._nearest_transformed_high_res_mri_idx_hpi = self._nearest_calc.query( + self._transformed_dig_hpi + )[1] + self._nearest_transformed_high_res_mri_idx_eeg = self._nearest_calc.query( + self._transformed_dig_eeg + )[1] + self._nearest_transformed_high_res_mri_idx_rpa = self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict["rpa"]) + )[1] + self._nearest_transformed_high_res_mri_idx_nasion = ( + self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict["nasion"]) + )[1] + ) + self._nearest_transformed_high_res_mri_idx_lpa = self._nearest_calc.query( + apply_trans(self._head_mri_t, self._dig_dict["lpa"]) + )[1] + + def set_scale_mode(self, scale_mode): + """Select how to fit the scale parameters. + + Parameters + ---------- + scale_mode : None | str + The scale mode can be 'uniform', '3-axis' or disabled. + Defaults to None. + + * 'uniform': 1 scale factor is recovered. + * '3-axis': 3 scale factors are recovered. + * None: do not scale the MRI. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._scale_mode = scale_mode + return self + + def set_grow_hair(self, value): + """Compensate for hair on the digitizer head shape. + + Parameters + ---------- + value : float + Move the back of the MRI head outwards by ``value`` (mm). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._grow_hair = value + self._update_params(force_update=True) + return self + + def set_rotation(self, rot): + """Set the rotation parameter. + + Parameters + ---------- + rot : array, shape (3,) + The rotation parameter (in radians). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(rot=np.array(rot)) + return self + + def set_translation(self, tra): + """Set the translation parameter. + + Parameters + ---------- + tra : array, shape (3,) + The translation parameter (in m.). + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(tra=np.array(tra)) + return self + + def set_scale(self, sca): + """Set the scale parameter. + + Parameters + ---------- + sca : array, shape (3,) + The scale parameter. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._update_params(sca=np.array(sca)) + return self + + def _update_nearest_calc(self): + self._nearest_calc = _DistanceQuery( + self._processed_high_res_mri_points * self._scale + ) + + @property + def _filtered_extra_points(self): + if self._extra_points_filter is None: + return self._dig_dict["hsp"] + else: + return self._dig_dict["hsp"][self._extra_points_filter] + + @property + def _parameters(self): + return np.concatenate((self._rotation, self._translation, self._scale)) + + @property + def _last_parameters(self): + return np.concatenate( + (self._last_rotation, self._last_translation, self._last_scale) + ) + + @property + def _changes(self): + move = np.linalg.norm(self._last_translation - self._translation) * 1e3 + angle = np.rad2deg( + _angle_between_quats( + rot_to_quat(rotation(*self._rotation)[:3, :3]), + rot_to_quat(rotation(*self._last_rotation)[:3, :3]), + ) + ) + percs = 100 * (self._scale - self._last_scale) / self._last_scale + return move, angle, percs + + @property + def _nearest_transformed_high_res_mri_idx_hsp(self): + return self._nearest_calc.query( + apply_trans(self._head_mri_t, self._filtered_extra_points) + )[1] + + @property + def _has_hsp_data(self): + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hsp) > 0 + ) + + @property + def _has_hpi_data(self): + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hpi) > 0 + ) + + @property + def _has_eeg_data(self): + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_eeg) > 0 + ) + + @property + def _has_lpa_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx("lpa")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_LPA + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["lpa"]) + return has_mri_data and has_head_data + + @property + def _has_nasion_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx("nasion")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_NASION + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["nasion"]) + return has_mri_data and has_head_data + + @property + def _has_rpa_data(self): + mri_point = self.fiducials.dig[_map_fid_name_to_idx("rpa")] + assert mri_point["ident"] == FIFF.FIFFV_POINT_RPA + has_mri_data = np.any(mri_point["r"]) + has_head_data = np.any(self._dig_dict["rpa"]) + return has_mri_data and has_head_data + + @property + def _processed_high_res_mri_points(self): + return self._get_processed_mri_points("high") + + def _get_processed_mri_points(self, res): + bem = self._bem_low_res if res == "low" else self._bem_high_res + points = bem["rr"].copy() + if self._grow_hair: + assert len(bem["nn"]) # should be guaranteed by _read_surface + scaled_hair_dist = 1e-3 * self._grow_hair / np.array(self._scale) + hair = points[:, 2] > points[:, 1] + points[hair] += bem["nn"][hair] * scaled_hair_dist + return points + + @property + def _has_mri_data(self): + return len(self._transformed_high_res_mri_points) > 0 + + @property + def _has_dig_data(self): + return ( + self._has_mri_data + and len(self._nearest_transformed_high_res_mri_idx_hsp) > 0 + ) + + @property + def _orig_hsp_point_distance(self): + mri_points = self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_orig_hsp + ] + hsp_points = self._transformed_orig_dig_extra + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + def _log_dig_mri_distance(self, prefix): + errs_nearest = self.compute_dig_mri_distances() + logger.info( + f"{prefix} median distance: {np.median(errs_nearest * 1000):6.2f} mm" + ) + + @property + def scale(self): + """Get the current scale factor. + + Returns + ------- + scale : ndarray, shape (3,) + The scale factors. + """ + return self._scale.copy() + + @verbose + def fit_fiducials( + self, lpa_weight=1.0, nasion_weight=10.0, rpa_weight=1.0, verbose=None + ): + """Find rotation and translation to fit all 3 fiducials. + + Parameters + ---------- + lpa_weight : float + Relative weight for LPA. The default value is 1. + nasion_weight : float + Relative weight for nasion. The default value is 10. + rpa_weight : float + Relative weight for RPA. The default value is 1. + %(verbose)s + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + logger.info("Aligning using fiducials") + self._log_dig_mri_distance("Start") + n_scale_params = self._n_scale_params + if n_scale_params == 3: + # enforce 1 even for 3-axis here (3 points is not enough) + logger.info("Enforcing 1 scaling parameter for fit with fiducials.") + n_scale_params = 1 + self._lpa_weight = lpa_weight + self._nasion_weight = nasion_weight + self._rpa_weight = rpa_weight + + head_pts = np.vstack( + (self._dig_dict["lpa"], self._dig_dict["nasion"], self._dig_dict["rpa"]) + ) + mri_pts = np.vstack( + ( + self.fiducials.dig[0]["r"], # LPA + self.fiducials.dig[1]["r"], # Nasion + self.fiducials.dig[2]["r"], + ) # RPA + ) + weights = [lpa_weight, nasion_weight, rpa_weight] + + if n_scale_params == 0: + mri_pts *= self._scale # not done in fit_matched_points + x0 = self._parameters + x0 = x0[: 6 + n_scale_params] + est = fit_matched_points( + mri_pts, + head_pts, + x0=x0, + out="params", + scale=n_scale_params, + weights=weights, + ) + if n_scale_params == 0: + self._update_params(rot=est[:3], tra=est[3:6]) + else: + assert est.size == 7 + est = np.concatenate([est, [est[-1]] * 2]) + assert est.size == 9 + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + self._log_dig_mri_distance("End ") + return self + + def _setup_icp(self, n_scale_params): + head_pts = [np.zeros((0, 3))] + mri_pts = [np.zeros((0, 3))] + weights = [np.zeros(0)] + if self._has_dig_data and self._hsp_weight > 0: # should be true + head_pts.append(self._filtered_extra_points) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp + ] + ) + weights.append(np.full(len(head_pts[-1]), self._hsp_weight)) + for key in ("lpa", "nasion", "rpa"): + if getattr(self, f"_has_{key}_data"): + head_pts.append(self._dig_dict[key]) + if self._icp_fid_match == "matched": + idx = _map_fid_name_to_idx(name=key) + p = self.fiducials.dig[idx]["r"].reshape(1, -1) + mri_pts.append(p) + else: + assert self._icp_fid_match == "nearest" + mri_pts.append( + self._processed_high_res_mri_points[ + getattr( + self, + f"_nearest_transformed_high_res_mri_idx_{key}", + ) + ] + ) + weights.append( + np.full(len(mri_pts[-1]), getattr(self, f"_{key}_weight")) + ) + if self._has_eeg_data and self._eeg_weight > 0: + head_pts.append(self._dig_dict["dig_ch_pos_location"]) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg + ] + ) + weights.append(np.full(len(mri_pts[-1]), self._eeg_weight)) + if self._has_hpi_data and self._hpi_weight > 0: + head_pts.append(self._dig_dict["hpi"]) + mri_pts.append( + self._processed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi + ] + ) + weights.append(np.full(len(mri_pts[-1]), self._hpi_weight)) + head_pts = np.concatenate(head_pts) + mri_pts = np.concatenate(mri_pts) + weights = np.concatenate(weights) + if n_scale_params == 0: + mri_pts *= self._scale # not done in fit_matched_points + return head_pts, mri_pts, weights + + def set_fid_match(self, match): + """Set the strategy for fitting anatomical landmark (fiducial) points. + + Parameters + ---------- + match : 'nearest' | 'matched' + Alignment strategy; ``'nearest'`` aligns anatomical landmarks to + any point on the head surface; ``'matched'`` aligns to the fiducial + points in the MRI. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + _check_option("match", match, self._icp_fid_matches) + self._icp_fid_match = match + return self + + @verbose + def fit_icp( + self, + n_iterations=20, + lpa_weight=1.0, + nasion_weight=10.0, + rpa_weight=1.0, + hsp_weight=1.0, + eeg_weight=1.0, + hpi_weight=1.0, + callback=None, + verbose=None, + ): + """Find MRI scaling, translation, and rotation to match HSP. + + Parameters + ---------- + n_iterations : int + Maximum number of iterations. + lpa_weight : float + Relative weight for LPA. The default value is 1. + nasion_weight : float + Relative weight for nasion. The default value is 10. + rpa_weight : float + Relative weight for RPA. The default value is 1. + hsp_weight : float + Relative weight for HSP. The default value is 1. + eeg_weight : float + Relative weight for EEG. The default value is 1. + hpi_weight : float + Relative weight for HPI. The default value is 1. + callback : callable | None + A function to call on each iteration. Useful for status message + updates. It will be passed the keyword arguments ``iteration`` + and ``n_iterations``. + %(verbose)s + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + logger.info("Aligning using ICP") + self._log_dig_mri_distance("Start ") + n_scale_params = self._n_scale_params + self._lpa_weight = lpa_weight + self._nasion_weight = nasion_weight + self._rpa_weight = rpa_weight + self._hsp_weight = hsp_weight + self._eeg_weight = eeg_weight + self._hsp_weight = hpi_weight + + # Initial guess (current state) + est = self._parameters + est = est[: [6, 7, None, 9][n_scale_params]] + + # Do the fits, assigning and evaluating at each step + for iteration in range(n_iterations): + head_pts, mri_pts, weights = self._setup_icp(n_scale_params) + est = fit_matched_points( + mri_pts, + head_pts, + scale=n_scale_params, + x0=est, + out="params", + weights=weights, + ) + if n_scale_params == 0: + self._update_params(rot=est[:3], tra=est[3:6]) + elif n_scale_params == 1: + est = np.array(list(est) + [est[-1]] * 2) + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + else: + self._update_params(rot=est[:3], tra=est[3:6], sca=est[6:9]) + angle, move, scale = self._changes + self._log_dig_mri_distance(f" ICP {iteration + 1:2d} ") + if callback is not None: + callback(iteration, n_iterations) + if ( + angle <= self._icp_angle + and move <= self._icp_distance + and all(scale <= self._icp_scale) + ): + break + self._log_dig_mri_distance("End ") + return self + + @property + def _n_scale_params(self): + if self._scale_mode is None: + n_scale_params = 0 + elif self._scale_mode == "uniform": + n_scale_params = 1 + else: + n_scale_params = 3 + return n_scale_params + + def omit_head_shape_points(self, distance): + """Exclude head shape points that are far away from the MRI head. + + Parameters + ---------- + distance : float + Exclude all points that are further away from the MRI head than + this distance (in m.). A value of distance <= 0 excludes nothing. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + distance = float(distance) + if distance <= 0: + return + + # find the new filter + mask = self._orig_hsp_point_distance <= distance + n_excluded = np.sum(~mask) + logger.info( + "Coregistration: Excluding %i head shape points with " + "distance >= %.3f m.", + n_excluded, + distance, + ) + # set the filter + self._extra_points_filter = mask + self._update_params(force_update=True) + return self + + def compute_dig_mri_distances(self): + """Compute distance between head shape points and MRI skin surface. + + Returns + ------- + dist : array, shape (n_points,) + The distance of the head shape points to the MRI skin surface. + + See Also + -------- + mne.dig_mri_distances + """ + # we don't use `dig_mri_distances` here because it should be much + # faster to use our already-determined nearest points + hsp_points, mri_points, _ = self._setup_icp(0) + hsp_points = apply_trans(self._head_mri_t, hsp_points) + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + @property + def trans(self): + """The head->mri :class:`~mne.transforms.Transform`.""" + return Transform("head", "mri", self._head_mri_t) + + def reset(self): + """Reset all the parameters affecting the coregistration. + + Returns + ------- + self : Coregistration + The modified Coregistration object. + """ + self._grow_hair = 0.0 + self.set_rotation(self._default_parameters[:3]) + self.set_translation(self._default_parameters[3:6]) + self.set_scale(self._default_parameters[6:9]) + self._extra_points_filter = None + self._update_nearest_calc() + return self + + def _get_fiducials_distance(self): + distance = dict() + for key in ("lpa", "nasion", "rpa"): + idx = _map_fid_name_to_idx(name=key) + fid = self.fiducials.dig[idx]["r"].reshape(1, -1) + + transformed_mri = apply_trans(self._mri_trans, fid) + transformed_hsp = apply_trans(self._head_mri_t, self._dig_dict[key]) + distance[key] = np.linalg.norm(np.ravel(transformed_mri - transformed_hsp)) + return np.array(list(distance.values())) * 1e3 + + def _get_fiducials_distance_str(self): + dists = self._get_fiducials_distance() + return f"Fiducials: {dists[0]:.1f}, {dists[1]:.1f}, {dists[2]:.1f} mm" + + def _get_point_distance(self): + mri_points = list() + hsp_points = list() + if self._hsp_weight > 0 and self._has_hsp_data: + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hsp + ] + ) + hsp_points.append(self._transformed_dig_extra) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if self._eeg_weight > 0 and self._has_eeg_data: + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_eeg + ] + ) + hsp_points.append(self._transformed_dig_eeg) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if self._hpi_weight > 0 and self._has_hpi_data: + mri_points.append( + self._transformed_high_res_mri_points[ + self._nearest_transformed_high_res_mri_idx_hpi + ] + ) + hsp_points.append(self._transformed_dig_hpi) + assert len(mri_points[-1]) == len(hsp_points[-1]) + if all(len(h) == 0 for h in hsp_points): + return None + mri_points = np.concatenate(mri_points) + hsp_points = np.concatenate(hsp_points) + return np.linalg.norm(mri_points - hsp_points, axis=-1) + + def _get_point_distance_str(self): + point_distance = self._get_point_distance() + if point_distance is None: + return "" + dists = 1e3 * point_distance + av_dist = np.mean(dists) + std_dist = np.std(dists) + kinds = [ + kind + for kind, check in ( + ("HSP", self._hsp_weight > 0 and self._has_hsp_data), + ("EEG", self._eeg_weight > 0 and self._has_eeg_data), + ("HPI", self._hpi_weight > 0 and self._has_hpi_data), + ) + if check + ] + kinds = "+".join(kinds) + return f"{len(dists)} {kinds}: {av_dist:.1f} ± {std_dist:.1f} mm" diff --git a/mne/cov.py b/mne/cov.py new file mode 100644 index 0000000..8b86119 --- /dev/null +++ b/mne/cov.py @@ -0,0 +1,2545 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import itertools as itt +from copy import deepcopy +from math import log + +import numpy as np +from scipy.sparse import issparse + +from . import viz +from ._fiff.constants import FIFF +from ._fiff.meas_info import _read_bad_channels, _write_bad_channels, create_info +from ._fiff.pick import ( + _DATA_CH_TYPES_SPLIT, + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + pick_channels, + pick_channels_cov, + pick_info, + pick_types, +) +from ._fiff.proj import ( + _check_projs, + _has_eeg_average_ref_proj, + _needs_eeg_average_ref_proj, + _proj_equal, + _read_proj, + _write_proj, +) +from ._fiff.proj import activate_proj as _activate_proj +from ._fiff.proj import make_projector as _make_projector +from ._fiff.tag import find_tag +from ._fiff.tree import dir_tree_find +from .defaults import ( + _BORDER_DEFAULT, + _EXTRAPOLATE_DEFAULT, + _INTERPOLATION_DEFAULT, + DEFAULTS, + _handle_default, +) +from .epochs import Epochs +from .event import make_fixed_length_events +from .evoked import EvokedArray +from .fixes import ( + EmpiricalCovariance, + _EstimatorMixin, + _logdet, + _safe_svd, + empirical_covariance, + log_likelihood, +) +from .rank import _compute_rank +from .utils import ( + _array_repr, + _check_fname, + _check_on_missing, + _check_option, + _on_missing, + _pl, + _scaled_array, + _time_mask, + _undo_scaling_cov, + _validate_type, + _verbose_safe_false, + check_fname, + check_version, + copy_function_doc_to_method_doc, + eigh, + fill_doc, + logger, + verbose, + warn, +) + + +def _check_covs_algebra(cov1, cov2): + if cov1.ch_names != cov2.ch_names: + raise ValueError("Both Covariance do not have the same list of channels.") + projs1 = [str(c) for c in cov1["projs"]] + projs2 = [str(c) for c in cov1["projs"]] + if projs1 != projs2: + raise ValueError( + "Both Covariance do not have the same list of SSP projections." + ) + + +def _get_tslice(epochs, tmin, tmax): + """Get the slice.""" + mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info["sfreq"]) + tstart = np.where(mask)[0][0] if tmin is not None else None + tend = np.where(mask)[0][-1] + 1 if tmax is not None else None + tslice = slice(tstart, tend, None) + return tslice + + +@fill_doc +class Covariance(dict): + """Noise covariance matrix. + + .. note:: + This class should not be instantiated directly via + ``mne.Covariance(...)``. Instead, use one of the functions + listed in the See Also section below. + + Parameters + ---------- + data : array-like + The data. + names : list of str + Channel names. + bads : list of str + Bad channels. + projs : list + Projection vectors. + nfree : int + Degrees of freedom. + eig : array-like | None + Eigenvalues. + eigvec : array-like | None + Eigenvectors. + method : str | None + The method used to compute the covariance. + loglik : float + The log likelihood. + %(verbose)s + + Attributes + ---------- + data : array of shape (n_channels, n_channels) + The covariance. + ch_names : list of str + List of channels' names. + nfree : int + Number of degrees of freedom i.e. number of time points used. + dim : int + The number of channels ``n_channels``. + + See Also + -------- + compute_covariance + compute_raw_covariance + make_ad_hoc_cov + read_cov + """ + + @verbose + def __init__( + self, + data, + names, + bads, + projs, + nfree, + eig=None, + eigvec=None, + method=None, + loglik=None, + *, + verbose=None, + ): + """Init of covariance.""" + diag = data.ndim == 1 + projs = _check_projs(projs) + self.update( + data=data, + dim=len(data), + names=names, + bads=bads, + nfree=nfree, + eig=eig, + eigvec=eigvec, + diag=diag, + projs=projs, + kind=FIFF.FIFFV_MNE_NOISE_COV, + ) + if method is not None: + self["method"] = method + if loglik is not None: + self["loglik"] = loglik + + @property + def data(self): + """Numpy array of Noise covariance matrix.""" + return self["data"] + + @property + def ch_names(self): + """Channel names.""" + return self["names"] + + @property + def nfree(self): + """Number of degrees of freedom.""" + return self["nfree"] + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save covariance matrix in a FIF file. + + Parameters + ---------- + fname : path-like + Output filename. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + """ + from ._fiff.write import start_and_end_file + + check_fname( + fname, "covariance", ("-cov.fif", "-cov.fif.gz", "_cov.fif", "_cov.fif.gz") + ) + fname = _check_fname(fname=fname, overwrite=overwrite) + with start_and_end_file(fname) as fid: + _write_cov(fid, self) + + def copy(self): + """Copy the Covariance object. + + Returns + ------- + cov : instance of Covariance + The copied object. + """ + return deepcopy(self) + + def as_diag(self): + """Set covariance to be processed as being diagonal. + + Returns + ------- + cov : dict + The covariance. + + Notes + ----- + This function allows creation of inverse operators + equivalent to using the old "--diagnoise" mne option. + + This function operates in place. + """ + if self["diag"]: + return self + self["diag"] = True + self["data"] = np.diag(self["data"]) + self["eig"] = None + self["eigvec"] = None + return self + + def _as_square(self): + # This is a hack but it works because np.diag() behaves nicely + if self["diag"]: + self["diag"] = False + self.as_diag() + self["diag"] = False + return self + + def _get_square(self): + if self["diag"] != (self.data.ndim == 1): + raise RuntimeError( + "Covariance attributes inconsistent, got data with " + f"dimensionality {self.data.ndim} but diag={self['diag']}" + ) + return np.diag(self.data) if self["diag"] else self.data.copy() + + def __repr__(self): # noqa: D105 + s = "" + return s + + def __add__(self, cov): + """Add Covariance taking into account number of degrees of freedom.""" + _check_covs_algebra(self, cov) + this_cov = cov.copy() + this_cov["data"] = ( + (this_cov["data"] * this_cov["nfree"]) + (self["data"] * self["nfree"]) + ) / (self["nfree"] + this_cov["nfree"]) + this_cov["nfree"] += self["nfree"] + + this_cov["bads"] = list(set(this_cov["bads"]).union(self["bads"])) + + return this_cov + + def __iadd__(self, cov): + """Add Covariance taking into account number of degrees of freedom.""" + _check_covs_algebra(self, cov) + self["data"][:] = ( + (self["data"] * self["nfree"]) + (cov["data"] * cov["nfree"]) + ) / (self["nfree"] + cov["nfree"]) + self["nfree"] += cov["nfree"] + + self["bads"] = list(set(self["bads"]).union(cov["bads"])) + + return self + + @verbose + @copy_function_doc_to_method_doc(viz.plot_cov) + def plot( + self, + info, + exclude=(), + colorbar=True, + proj=False, + show_svd=True, + show=True, + verbose=None, + ): + return viz.plot_cov( + self, info, exclude, colorbar, proj, show_svd, show, verbose + ) + + @verbose + def plot_topomap( + self, + info, + ch_type=None, + *, + scalings=None, + proj=False, + noise_cov=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + show=True, + verbose=None, + ): + """Plot a topomap of the covariance diagonal. + + Parameters + ---------- + %(info_not_none)s + %(ch_type_topomap)s + + .. versionadded:: 0.21 + %(scalings_topomap)s + %(proj_plot)s + noise_cov : instance of Covariance | None + If not None, whiten the instance with ``noise_cov`` before + plotting. + %(sensors_topomap)s + %(show_names_topomap)s + %(mask_topomap)s + %(mask_params_topomap)s + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionchanged:: 0.21 + + - The default was changed to ``'local'`` for MEG sensors. + - ``'local'`` was changed to use a convex hull mask + - ``'head'`` was changed to extrapolate out to the clipping circle. + %(border_topomap)s + + .. versionadded:: 0.20 + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap)s + + .. versionadded:: 1.2 + %(cnorm)s + + .. versionadded:: 1.2 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + %(units_topomap_evoked)s + %(axes_cov_plot_topomap)s + %(show)s + %(verbose)s + + Returns + ------- + fig : instance of Figure + The matplotlib figure. + + Notes + ----- + .. versionadded:: 0.21 + """ + from .viz.misc import _index_info_cov + + info, C, _, _ = _index_info_cov(info, self, exclude=()) + evoked = EvokedArray(np.diag(C)[:, np.newaxis], info) + if noise_cov is not None: + # need to left and right multiply whitener, which for the diagonal + # entries is the same as multiplying twice + evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov) + if units is None: + units = "AU" + if scalings is None: + scalings = 1.0 + if units is None: + units = {k: f"({v})²" for k, v in DEFAULTS["units"].items()} + if scalings is None: + scalings = {k: v * v for k, v in DEFAULTS["scalings"].items()} + return evoked.plot_topomap( + times=[0], + ch_type=ch_type, + vlim=vlim, + cmap=cmap, + sensors=sensors, + cnorm=cnorm, + colorbar=colorbar, + scalings=scalings, + units=units, + res=res, + size=size, + cbar_fmt=cbar_fmt, + proj=proj, + show=show, + show_names=show_names, + mask=mask, + mask_params=mask_params, + outlines=outlines, + contours=contours, + image_interp=image_interp, + axes=axes, + extrapolate=extrapolate, + sphere=sphere, + border=border, + time_format="", + ) + + @verbose + def pick_channels(self, ch_names, ordered=True, *, verbose=None): + """Pick channels from this covariance matrix. + + Parameters + ---------- + ch_names : list of str + List of channels to keep. All other channels are dropped. + %(ordered)s + %(verbose)s + + Returns + ------- + cov : instance of Covariance. + The modified covariance matrix. + + Notes + ----- + Operates in-place. + + .. versionadded:: 0.20.0 + """ + return pick_channels_cov( + self, ch_names, exclude=[], ordered=ordered, copy=False + ) + + +############################################################################### +# IO + + +@verbose +def read_cov(fname, verbose=None): + """Read a noise covariance from a FIF file. + + Parameters + ---------- + fname : path-like + The path-like of file containing the covariance matrix. It should end + with ``-cov.fif`` or ``-cov.fif.gz``. + %(verbose)s + + Returns + ------- + cov : Covariance + The noise covariance matrix. + + See Also + -------- + write_cov, compute_covariance, compute_raw_covariance + """ + from ._fiff.open import fiff_open + + check_fname( + fname, "covariance", ("-cov.fif", "-cov.fif.gz", "_cov.fif", "_cov.fif.gz") + ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + f, tree, _ = fiff_open(fname) + with f as fid: + return Covariance( + **_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV, limited=True) + ) + + +############################################################################### +# Estimate from data + + +@verbose +def make_ad_hoc_cov(info, std=None, *, verbose=None): + """Create an ad hoc noise covariance. + + Parameters + ---------- + %(info_not_none)s + std : dict of float | None + Standard_deviation of the diagonal elements. If dict, keys should be + ``'grad'`` for gradiometers, ``'mag'`` for magnetometers and ``'eeg'`` + for EEG channels. If None, default values will be used (see Notes). + %(verbose)s + + Returns + ------- + cov : instance of Covariance + The ad hoc diagonal noise covariance for the M/EEG data channels. + + Notes + ----- + The default noise values are 5 fT/cm, 20 fT, and 0.2 µV for gradiometers, + magnetometers, and EEG channels respectively. + + .. versionadded:: 0.9.0 + """ + picks = pick_types(info, meg=True, eeg=True, exclude=()) + std = _handle_default("noise_std", std) + + data = np.zeros(len(picks)) + for meg, eeg, val in zip( + ("grad", "mag", False), + (False, False, True), + (std["grad"], std["mag"], std["eeg"]), + ): + these_picks = pick_types(info, meg=meg, eeg=eeg) + data[np.searchsorted(picks, these_picks)] = val * val + ch_names = [info["ch_names"][pick] for pick in picks] + return Covariance(data, ch_names, info["bads"], info["projs"], nfree=0) + + +def _check_n_samples(n_samples, n_chan): + """Check to see if there are enough samples for reliable cov calc.""" + n_samples_min = 10 * (n_chan + 1) // 2 + if n_samples <= 0: + raise ValueError("No samples found to compute the covariance matrix") + if n_samples < n_samples_min: + warn( + f"Too few samples (required : {n_samples_min} got : {n_samples}), " + "covariance estimate may be unreliable" + ) + + +@verbose +def compute_raw_covariance( + raw, + tmin=0, + tmax=None, + tstep=0.2, + reject=None, + flat=None, + picks=None, + method="empirical", + method_params=None, + cv=3, + scalings=None, + n_jobs=None, + return_estimators=False, + reject_by_annotation=True, + rank=None, + verbose=None, +): + """Estimate noise covariance matrix from a continuous segment of raw data. + + It is typically useful to estimate a noise covariance from empty room + data or time intervals before starting the stimulation. + + .. note:: To estimate the noise covariance from epoched data, use + :func:`mne.compute_covariance` instead. + + Parameters + ---------- + raw : instance of Raw + Raw data. + tmin : float + Beginning of time interval in seconds. Defaults to 0. + tmax : float | None (default None) + End of time interval in seconds. If None (default), use the end of the + recording. + tstep : float (default 0.2) + Length of data chunks for artifact rejection in seconds. + Can also be None to use a single epoch of (tmax - tmin) + duration. This can use a lot of memory for large ``Raw`` + instances. + reject : dict | None (default None) + Rejection parameters based on peak-to-peak amplitude. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. + If reject is None then no rejection is done. Example:: + + reject = dict(grad=4000e-13, # T / m (gradiometers) + mag=4e-12, # T (magnetometers) + eeg=40e-6, # V (EEG channels) + eog=250e-6 # V (EOG channels) + ) + + flat : dict | None (default None) + Rejection parameters based on flatness of signal. + Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values + are floats that set the minimum acceptable peak-to-peak amplitude. + If flat is None then no rejection is done. + %(picks_good_data_noref)s + method : str | list | None (default 'empirical') + The method used for covariance estimation. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.12 + method_params : dict | None (default None) + Additional parameters to the estimation procedure. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.12 + cv : int | sklearn.model_selection object (default 3) + The cross validation method. Defaults to 3, which will + internally trigger by default :class:`sklearn.model_selection.KFold` + with 3 splits. + + .. versionadded:: 0.12 + scalings : dict | None (default None) + Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``. + These defaults will scale magnetometers and gradiometers + at the same unit. + + .. versionadded:: 0.12 + %(n_jobs)s + + .. versionadded:: 0.12 + return_estimators : bool (default False) + Whether to return all estimators or the best. Only considered if + method equals 'auto' or is a list of str. Defaults to False. + + .. versionadded:: 0.12 + %(reject_by_annotation_epochs)s + + .. versionadded:: 0.14 + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + %(verbose)s + + Returns + ------- + cov : instance of Covariance | list + The computed covariance. If method equals 'auto' or is a list of str + and return_estimators equals True, a list of covariance estimators is + returned (sorted by log-likelihood, from high to low, i.e. from best + to worst). + + See Also + -------- + compute_covariance : Estimate noise covariance matrix from epoched data. + + Notes + ----- + This function will: + + 1. Partition the data into evenly spaced, equal-length epochs. + 2. Load them into memory. + 3. Subtract the mean across all time points and epochs for each channel. + 4. Process the :class:`Epochs` by :func:`compute_covariance`. + + This will produce a slightly different result compared to using + :func:`make_fixed_length_events`, :class:`Epochs`, and + :func:`compute_covariance` directly, since that would (with the recommended + baseline correction) subtract the mean across time *for each epoch* + (instead of across epochs) for each channel. + """ + tmin = 0.0 if tmin is None else float(tmin) + dt = 1.0 / raw.info["sfreq"] + tmax = raw.times[-1] + dt if tmax is None else float(tmax) + tstep = tmax - tmin if tstep is None else float(tstep) + tstep_m1 = tstep - dt # inclusive! + events = make_fixed_length_events(raw, 1, tmin, tmax, tstep) + logger.info(f"Using up to {len(events)} segment{_pl(events)}") + + # don't exclude any bad channels, inverses expect all channels present + if picks is None: + # Need to include all good channels e.g. if eog rejection is to be used + picks = np.arange(raw.info["nchan"]) + pick_mask = np.isin(picks, _pick_data_channels(raw.info, with_ref_meg=False)) + else: + pick_mask = slice(None) + picks = _picks_to_idx(raw.info, picks) + epochs = Epochs( + raw, + events, + 1, + 0, + tstep_m1, + baseline=None, + picks=picks, + reject=reject, + flat=flat, + verbose=_verbose_safe_false(), + preload=False, + proj=False, + reject_by_annotation=reject_by_annotation, + ) + if method is None: + method = "empirical" + if isinstance(method, str) and method == "empirical": + # potentially *much* more memory efficient to do it the iterative way + picks = picks[pick_mask] + data = 0 + n_samples = 0 + mu = 0 + # Read data in chunks + for raw_segment in epochs: + raw_segment = raw_segment[pick_mask] + mu += raw_segment.sum(axis=1) + data += np.dot(raw_segment, raw_segment.T) + n_samples += raw_segment.shape[1] + _check_n_samples(n_samples, len(picks)) + data -= mu[:, None] * (mu[None, :] / n_samples) + data /= n_samples - 1.0 + logger.info("Number of samples used : %d", n_samples) + logger.info("[done]") + ch_names = [raw.info["ch_names"][k] for k in picks] + bads = [b for b in raw.info["bads"] if b in ch_names] + return Covariance(data, ch_names, bads, raw.info["projs"], nfree=n_samples - 1) + del picks, pick_mask + + # This makes it equivalent to what we used to do (and do above for + # empirical mode), treating all epochs as if they were a single long one + epochs.load_data() + ch_means = epochs._data.mean(axis=0).mean(axis=1) + epochs._data -= ch_means[np.newaxis, :, np.newaxis] + # fake this value so there are no complaints from compute_covariance + epochs.baseline = (None, None) + return compute_covariance( + epochs, + keep_sample_mean=True, + method=method, + method_params=method_params, + cv=cv, + scalings=scalings, + n_jobs=n_jobs, + return_estimators=return_estimators, + rank=rank, + ) + + +def _check_method_params( + method, + method_params, + keep_sample_mean=True, + name="method", + allow_auto=True, + rank=None, +): + """Check that method and method_params are usable.""" + accepted_methods = ( + "auto", + "empirical", + "diagonal_fixed", + "ledoit_wolf", + "oas", + "shrunk", + "pca", + "factor_analysis", + "shrinkage", + ) + _method_params = { + "empirical": {"store_precision": False, "assume_centered": True}, + "diagonal_fixed": {"store_precision": False, "assume_centered": True}, + "ledoit_wolf": {"store_precision": False, "assume_centered": True}, + "oas": {"store_precision": False, "assume_centered": True}, + "shrinkage": { + "shrinkage": 0.1, + "store_precision": False, + "assume_centered": True, + }, + "shrunk": { + "shrinkage": np.logspace(-4, 0, 30), + "store_precision": False, + "assume_centered": True, + }, + "pca": {"iter_n_components": None}, + "factor_analysis": {"iter_n_components": None}, + } + + for ch_type in _DATA_CH_TYPES_SPLIT: + _method_params["diagonal_fixed"][ch_type] = 0.1 + + if isinstance(method_params, dict): + for key, values in method_params.items(): + if key not in _method_params: + raise ValueError( + 'key ({}) must be "{}"'.format(key, '" or "'.join(_method_params)) + ) + + _method_params[key].update(method_params[key]) + shrinkage = method_params.get("shrinkage", {}).get("shrinkage", 0.1) + if not 0 <= shrinkage <= 1: + raise ValueError(f"shrinkage must be between 0 and 1, got {shrinkage}") + + was_auto = False + if method is None: + method = ["empirical"] + elif method == "auto" and allow_auto: + was_auto = True + method = ["shrunk", "diagonal_fixed", "empirical", "factor_analysis"] + + if not isinstance(method, list | tuple): + method = [method] + + if not all(k in accepted_methods for k in method): + raise ValueError( + f"Invalid {name} ({method}). Accepted values (individually or " + f"in a list) are any of '{accepted_methods}' or None." + ) + if not (isinstance(rank, str) and rank == "full"): + if was_auto: + method.pop(method.index("factor_analysis")) + for method_ in method: + if method_ in ("pca", "factor_analysis"): + raise ValueError( + f'{method_} can so far only be used with rank="full", got rank=' + f"{rank!r}" + ) + if not keep_sample_mean: + if len(method) != 1 or "empirical" not in method: + raise ValueError( + f'`keep_sample_mean=False` is only supported with {name}="empirical"' + ) + for p, v in _method_params.items(): + if v.get("assume_centered", None) is False: + raise ValueError( + "`assume_centered` must be True if `keep_sample_mean` is False" + ) + return method, _method_params + + +@verbose +def compute_covariance( + epochs, + keep_sample_mean=True, + tmin=None, + tmax=None, + projs=None, + method="empirical", + method_params=None, + cv=3, + scalings=None, + n_jobs=None, + return_estimators=False, + on_mismatch="raise", + rank=None, + verbose=None, +): + """Estimate noise covariance matrix from epochs. + + The noise covariance is typically estimated on pre-stimulus periods + when the stimulus onset is defined from events. + + If the covariance is computed for multiple event types (events + with different IDs), the following two options can be used and combined: + + 1. either an Epochs object for each event type is created and + a list of Epochs is passed to this function. + 2. an Epochs object is created for multiple events and passed + to this function. + + .. note:: To estimate the noise covariance from non-epoched raw data, such + as an empty-room recording, use + :func:`mne.compute_raw_covariance` instead. + + Parameters + ---------- + epochs : instance of Epochs, or list of Epochs + The epochs. + keep_sample_mean : bool (default True) + If False, the average response over epochs is computed for + each event type and subtracted during the covariance + computation. This is useful if the evoked response from a + previous stimulus extends into the baseline period of the next. + Note. This option is only implemented for method='empirical'. + tmin : float | None (default None) + Start time for baseline. If None start at first sample. + tmax : float | None (default None) + End time for baseline. If None end at last sample. + projs : list of Projection | None (default None) + List of projectors to use in covariance calculation, or None + to indicate that the projectors from the epochs should be + inherited. If None, then projectors from all epochs must match. + method : str | list | None (default 'empirical') + The method used for covariance estimation. If 'empirical' (default), + the sample covariance will be computed. A list can be passed to + perform estimates using multiple methods. + If 'auto' or a list of methods, the best estimator will be determined + based on log-likelihood and cross-validation on unseen data as + described in :footcite:`EngemannGramfort2015`. Valid methods are + 'empirical', 'diagonal_fixed', 'shrunk', 'oas', 'ledoit_wolf', + 'factor_analysis', 'shrinkage', and 'pca' (see Notes). If ``'auto'``, + it expands to:: + + ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis'] + + ``'factor_analysis'`` is removed when ``rank`` is not 'full'. + The ``'auto'`` mode is not recommended if there are many + segments of data, since computation can take a long time. + + .. versionadded:: 0.9.0 + method_params : dict | None (default None) + Additional parameters to the estimation procedure. Only considered if + method is not None. Keys must correspond to the value(s) of ``method``. + If None (default), expands to the following (with the addition of + ``{'store_precision': False, 'assume_centered': True} for all methods + except ``'factor_analysis'`` and ``'pca'``):: + + {'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...}, + 'shrinkage': {'shrinkage': 0.1}, + 'shrunk': {'shrinkage': np.logspace(-4, 0, 30)}, + 'pca': {'iter_n_components': None}, + 'factor_analysis': {'iter_n_components': None}} + + cv : int | sklearn.model_selection object (default 3) + The cross validation method. Defaults to 3, which will + internally trigger by default :class:`sklearn.model_selection.KFold` + with 3 splits. + scalings : dict | None (default None) + Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``. + These defaults will scale data to roughly the same order of + magnitude. + %(n_jobs)s + return_estimators : bool (default False) + Whether to return all estimators or the best. Only considered if + method equals 'auto' or is a list of str. Defaults to False. + on_mismatch : str + What to do when the MEG<->Head transformations do not match between + epochs. If "raise" (default) an error is raised, if "warn" then a + warning is emitted, if "ignore" then nothing is printed. Having + mismatched transforms can in some cases lead to unexpected or + unstable results in covariance calculation, e.g. when data + have been processed with Maxwell filtering but not transformed + to the same head position. + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + %(verbose)s + + Returns + ------- + cov : instance of Covariance | list + The computed covariance. If method equals ``'auto'`` or is a list of str + and ``return_estimators=True``, a list of covariance estimators is + returned (sorted by log-likelihood, from high to low, i.e. from best + to worst). + + See Also + -------- + compute_raw_covariance : Estimate noise covariance from raw data, such as + empty-room recordings. + + Notes + ----- + Baseline correction or sufficient high-passing should be used + when creating the :class:`Epochs` to ensure that the data are zero mean, + otherwise the computed covariance matrix will be inaccurate. + + Valid ``method`` strings are: + + * ``'empirical'`` + The empirical or sample covariance (default) + * ``'diagonal_fixed'`` + A diagonal regularization based on channel types as in + :func:`mne.cov.regularize`. + * ``'shrinkage'`` + Fixed shrinkage. + + .. versionadded:: 0.16 + * ``'ledoit_wolf'`` + The Ledoit-Wolf estimator, which uses an + empirical formula for the optimal shrinkage value :footcite:`LedoitWolf2004`. + * ``'oas'`` + The OAS estimator :footcite:`ChenEtAl2010`, which uses a different + empricial formula for the optimal shrinkage value. + + .. versionadded:: 0.16 + * ``'shrunk'`` + Like 'ledoit_wolf', but with cross-validation for optimal alpha. + * ``'pca'`` + Probabilistic PCA with low rank :footcite:`TippingBishop1999`. + * ``'factor_analysis'`` + Factor analysis with low rank :footcite:`Barber2012`. + + ``'ledoit_wolf'`` and ``'pca'`` are similar to ``'shrunk'`` and + ``'factor_analysis'``, respectively, except that they use + cross validation (which is useful when samples are correlated, which + is often the case for M/EEG data). The former two are not included in + the ``'auto'`` mode to avoid redundancy. + + For multiple event types, it is also possible to create a + single :class:`Epochs` object with events obtained using + :func:`mne.merge_events`. However, the resulting covariance matrix + will only be correct if ``keep_sample_mean is True``. + + The covariance can be unstable if the number of samples is small. + In that case it is common to regularize the covariance estimate. + The ``method`` parameter allows to regularize the covariance in an + automated way. It also allows to select between different alternative + estimation algorithms which themselves achieve regularization. + Details are described in :footcite:t:`EngemannGramfort2015`. + + For more information on the advanced estimation methods, see + :ref:`the sklearn manual `. + + References + ---------- + .. footbibliography:: + """ + # scale to natural unit for best stability with MEG/EEG + scalings = _check_scalings_user(scalings) + method, _method_params = _check_method_params( + method, method_params, keep_sample_mean, rank=rank + ) + del method_params + + # for multi condition support epochs is required to refer to a list of + # epochs objects + + def _unpack_epochs(epochs): + if len(epochs.event_id) > 1: + epochs = [epochs[k] for k in epochs.event_id] + else: + epochs = [epochs] + return epochs + + if not isinstance(epochs, list): + epochs = _unpack_epochs(epochs) + else: + epochs = sum([_unpack_epochs(epoch) for epoch in epochs], []) + + # check for baseline correction + if any( + epochs_t.baseline is None + and epochs_t.info["highpass"] < 0.5 + and keep_sample_mean + for epochs_t in epochs + ): + warn("Epochs are not baseline corrected, covariance matrix may be inaccurate") + + orig = epochs[0].info["dev_head_t"] + _check_on_missing(on_mismatch, "on_mismatch") + for ei, epoch in enumerate(epochs): + epoch.info._check_consistency() + if (orig is None) != (epoch.info["dev_head_t"] is None) or ( + orig is not None + and not np.allclose(orig["trans"], epoch.info["dev_head_t"]["trans"]) + ): + msg = ( + "MEG<->Head transform mismatch between epochs[0]:\n{}\n\n" + "and epochs[{}]:\n{}".format(orig, ei, epoch.info["dev_head_t"]) + ) + _on_missing(on_mismatch, msg, "on_mismatch") + + bads = epochs[0].info["bads"] + if projs is None: + projs = epochs[0].info["projs"] + # make sure Epochs are compatible + for epochs_t in epochs[1:]: + if epochs_t.proj != epochs[0].proj: + raise ValueError("Epochs must agree on the use of projections") + for proj_a, proj_b in zip(epochs_t.info["projs"], projs): + if not _proj_equal(proj_a, proj_b): + raise ValueError("Epochs must have same projectors") + projs = _check_projs(projs) + ch_names = epochs[0].ch_names + + # make sure Epochs are compatible + for epochs_t in epochs[1:]: + if epochs_t.info["bads"] != bads: + raise ValueError("Epochs must have same bad channels") + if epochs_t.ch_names != ch_names: + raise ValueError("Epochs must have same channel names") + picks_list = _picks_by_type(epochs[0].info) + picks_meeg = np.concatenate([b for _, b in picks_list]) + picks_meeg = np.sort(picks_meeg) + ch_names = [epochs[0].ch_names[k] for k in picks_meeg] + info = epochs[0].info # we will overwrite 'epochs' + + if not keep_sample_mean: + # prepare mean covs + n_epoch_types = len(epochs) + data_mean = [0] * n_epoch_types + n_samples = np.zeros(n_epoch_types, dtype=np.int64) + n_epochs = np.zeros(n_epoch_types, dtype=np.int64) + + for ii, epochs_t in enumerate(epochs): + tslice = _get_tslice(epochs_t, tmin, tmax) + for e in epochs_t: + e = e[picks_meeg, tslice] + if not keep_sample_mean: + data_mean[ii] += e + n_samples[ii] += e.shape[1] + n_epochs[ii] += 1 + + n_samples_epoch = n_samples // n_epochs + norm_const = np.sum(n_samples_epoch * (n_epochs - 1)) + data_mean = [ + 1.0 / n_epoch * np.dot(mean, mean.T) + for n_epoch, mean in zip(n_epochs, data_mean) + ] + + info = pick_info(info, picks_meeg) + tslice = _get_tslice(epochs[0], tmin, tmax) + epochs = [ee.get_data(picks=picks_meeg)[..., tslice] for ee in epochs] + picks_meeg = np.arange(len(picks_meeg)) + picks_list = _picks_by_type(info) + + if len(epochs) > 1: + epochs = np.concatenate(epochs, 0) + else: + epochs = epochs[0] + + epochs = np.hstack(epochs) + n_samples_tot = epochs.shape[-1] + _check_n_samples(n_samples_tot, len(picks_meeg)) + + epochs = epochs.T # sklearn | C-order + cov_data = _compute_covariance_auto( + epochs, + method=method, + method_params=_method_params, + info=info, + cv=cv, + n_jobs=n_jobs, + stop_early=True, + picks_list=picks_list, + scalings=scalings, + rank=rank, + ) + + if keep_sample_mean is False: + cov = cov_data["empirical"]["data"] + # undo scaling + cov *= n_samples_tot - 1 + # ... apply pre-computed class-wise normalization + for mean_cov in data_mean: + cov -= mean_cov + cov /= norm_const + + covs = list() + for this_method, data in cov_data.items(): + cov = Covariance( + data.pop("data"), ch_names, info["bads"], projs, nfree=n_samples_tot - 1 + ) + + # add extra info + cov.update(method=this_method, **data) + covs.append(cov) + logger.info("Number of samples used : %d", n_samples_tot) + covs.sort(key=lambda c: c["loglik"], reverse=True) + + if len(covs) > 1: + msg = ["log-likelihood on unseen data (descending order):"] + for c in covs: + msg.append(f"{c['method']}: {c['loglik']:0.3f}") + logger.info("\n ".join(msg)) + if return_estimators: + out = covs + else: + out = covs[0] + logger.info("selecting best estimator: {}".format(out["method"])) + else: + out = covs[0] + logger.info("[done]") + + return out + + +def _check_scalings_user(scalings): + if isinstance(scalings, dict): + for k, v in scalings.items(): + _check_option("the keys in `scalings`", k, ["mag", "grad", "eeg"]) + elif scalings is not None and not isinstance(scalings, np.ndarray): + raise TypeError( + f"scalings must be a dict, ndarray, or None, got {type(scalings)}" + ) + scalings = _handle_default("scalings", scalings) + return scalings + + +def _eigvec_subspace(eig, eigvec, mask): + """Compute the subspace from a subset of eigenvectors.""" + # We do the same thing we do with projectors: + P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask]) + eig, eigvec = eigh(P) + eigvec = eigvec.conj().T + return eig, eigvec + + +@verbose +def _compute_rank_raw_array( + data, info, rank, scalings, *, log_ch_type=None, verbose=None +): + from .io import RawArray + + return _compute_rank( + RawArray(data, info, copy=None, verbose=_verbose_safe_false()), + rank, + scalings, + info, + log_ch_type=log_ch_type, + ) + + +def _compute_covariance_auto( + data, + method, + info, + method_params, + cv, + scalings, + n_jobs, + stop_early, + picks_list, + rank, + *, + cov_kind="", + log_ch_type=None, + log_rank=True, +): + """Compute covariance auto mode.""" + # rescale to improve numerical stability + orig_rank = rank + rank = _compute_rank_raw_array( + data.T, + info, + rank=rank, + scalings=scalings, + verbose=_verbose_safe_false(), + ) + with _scaled_array(data.T, picks_list, scalings): + C = np.dot(data.T, data) + _, eigvec, mask = _smart_eigh( + C, + info, + rank, + proj_subspace=True, + do_compute_rank=False, + log_ch_type=log_ch_type, + verbose=None if log_rank else _verbose_safe_false(), + ) + eigvec = eigvec[mask] + data = np.dot(data, eigvec.T) + used = np.where(mask)[0] + sub_picks_list = [ + (key, np.searchsorted(used, picks)) for key, picks in picks_list + ] + sub_info = pick_info(info, used) if len(used) != len(mask) else info + if log_rank: + logger.info(f"Reducing data rank from {len(mask)} -> {eigvec.shape[0]}") + estimator_cov_info = list() + + ok_sklearn = check_version("sklearn") + if not ok_sklearn and (len(method) != 1 or method[0] != "empirical"): + raise ValueError( + 'scikit-learn is not installed, `method` must be "empirical", got ' + f"{repr(method)}" + ) + + for method_ in method: + data_ = data.copy() + name = method_.__name__ if callable(method_) else method_ + logger.info( + f'Estimating {cov_kind + (" " if cov_kind else "")}' + f"covariance using {name.upper()}" + ) + mp = method_params[method_] + _info = {} + + if method_ == "empirical": + est = EmpiricalCovariance(**mp) + est.fit(data_) + estimator_cov_info.append((est, est.covariance_, _info)) + del est + + elif method_ == "diagonal_fixed": + est = _RegCovariance(info=sub_info, **mp) + est.fit(data_) + estimator_cov_info.append((est, est.covariance_, _info)) + del est + + elif method_ == "ledoit_wolf": + from sklearn.covariance import LedoitWolf + + shrinkages = [] + lw = LedoitWolf(**mp) + + for ch_type, picks in sub_picks_list: + lw.fit(data_[:, picks]) + shrinkages.append((ch_type, lw.shrinkage_, picks)) + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del lw, sc + + elif method_ == "oas": + from sklearn.covariance import OAS + + shrinkages = [] + oas = OAS(**mp) + + for ch_type, picks in sub_picks_list: + oas.fit(data_[:, picks]) + shrinkages.append((ch_type, oas.shrinkage_, picks)) + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del oas, sc + + elif method_ == "shrinkage": + sc = _ShrunkCovariance(**mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del sc + + elif method_ == "shrunk": + from sklearn.covariance import ShrunkCovariance + from sklearn.model_selection import GridSearchCV + + shrinkage = mp.pop("shrinkage") + tuned_parameters = [{"shrinkage": shrinkage}] + shrinkages = [] + gs = GridSearchCV(ShrunkCovariance(**mp), tuned_parameters, cv=cv) + for ch_type, picks in sub_picks_list: + gs.fit(data_[:, picks]) + shrinkages.append((ch_type, gs.best_estimator_.shrinkage, picks)) + shrinkages = [c[0] for c in zip(shrinkages)] + sc = _ShrunkCovariance(shrinkage=shrinkages, **mp) + sc.fit(data_) + estimator_cov_info.append((sc, sc.covariance_, _info)) + del shrinkage, sc + + elif method_ == "pca": + assert orig_rank == "full" + pca, _info = _auto_low_rank_model( + data_, + method_, + n_jobs=n_jobs, + method_params=mp, + cv=cv, + stop_early=stop_early, + ) + pca.fit(data_) + estimator_cov_info.append((pca, pca.get_covariance(), _info)) + del pca + + elif method_ == "factor_analysis": + assert orig_rank == "full" + fa, _info = _auto_low_rank_model( + data_, + method_, + n_jobs=n_jobs, + method_params=mp, + cv=cv, + stop_early=stop_early, + ) + fa.fit(data_) + estimator_cov_info.append((fa, fa.get_covariance(), _info)) + del fa + else: + raise ValueError("Oh no! Your estimator does not have a .fit method") + logger.info("Done.") + + if len(method) > 1: + logger.info("Using cross-validation to select the best estimator.") + + out = dict() + for ei, (estimator, cov, runtime_info) in enumerate(estimator_cov_info): + if len(method) > 1: + loglik = _cross_val(data, estimator, cv, n_jobs) + else: + loglik = None + # project back + cov = np.dot(eigvec.T, np.dot(cov, eigvec)) + # undo bias + cov *= data.shape[0] / (data.shape[0] - 1) + # undo scaling + _undo_scaling_cov(cov, picks_list, scalings) + method_ = method[ei] + name = method_.__name__ if callable(method_) else method_ + out[name] = dict(loglik=loglik, data=cov, estimator=estimator) + out[name].update(runtime_info) + + return out + + +def _gaussian_loglik_scorer(est, X, y=None): + """Compute the Gaussian log likelihood of X under the model in est.""" + # compute empirical covariance of the test set + precision = est.get_precision() + n_samples, n_features = X.shape + log_like = -0.5 * (X * (np.dot(X, precision))).sum(axis=1) + log_like -= 0.5 * (n_features * log(2.0 * np.pi) - _logdet(precision)) + out = np.mean(log_like) + return out + + +def _cross_val(data, est, cv, n_jobs): + """Compute cross validation.""" + from sklearn.model_selection import cross_val_score + + return np.mean( + cross_val_score( + est, data, cv=cv, n_jobs=n_jobs, scoring=_gaussian_loglik_scorer + ) + ) + + +def _auto_low_rank_model( + data, mode, n_jobs, method_params, cv, stop_early=True, verbose=None +): + """Compute latent variable models.""" + method_params = deepcopy(method_params) + iter_n_components = method_params.pop("iter_n_components") + if iter_n_components is None: + iter_n_components = np.arange(5, data.shape[1], 5) + from sklearn.decomposition import PCA, FactorAnalysis + + if mode == "factor_analysis": + est = FactorAnalysis + else: + assert mode == "pca" + est = PCA + est = est(**method_params) + est.n_components = 1 + scores = np.empty_like(iter_n_components, dtype=np.float64) + scores.fill(np.nan) + + # make sure we don't empty the thing if it's a generator + max_n = max(list(deepcopy(iter_n_components))) + if max_n > data.shape[1]: + warn( + f"You are trying to estimate {max_n} components on matrix " + f"with {data.shape[1]} features." + ) + + for ii, n in enumerate(iter_n_components): + est.n_components = n + try: # this may fail depending on rank and split + score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs) + except ValueError: + score = np.inf + if np.isinf(score) or score > 0: + logger.info("... infinite values encountered. stopping estimation") + break + logger.info("... rank: %i - loglik: %0.3f", n, score) + if score != -np.inf: + scores[ii] = score + + if ii >= 3 and np.all(np.diff(scores[ii - 3 : ii]) < 0) and stop_early: + # early stop search when loglik has been going down 3 times + logger.info("early stopping parameter search.") + break + + # happens if rank is too low right form the beginning + if np.isnan(scores).all(): + raise RuntimeError( + "Oh no! Could not estimate covariance because all " + "scores were NaN. Please contact the MNE-Python " + "developers." + ) + + i_score = np.nanargmax(scores) + best = est.n_components = iter_n_components[i_score] + logger.info("... best model at rank = %i", best) + runtime_info = { + "ranks": np.array(iter_n_components), + "scores": scores, + "best": best, + "cv": cv, + } + return est, runtime_info + + +############################################################################### +# Sklearn Estimators + + +class _RegCovariance(_EstimatorMixin): + """Aux class.""" + + def __init__( + self, + info, + grad=0.1, + mag=0.1, + eeg=0.1, + seeg=0.1, + ecog=0.1, + hbo=0.1, + hbr=0.1, + fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, + fnirs_od=0.1, + csd=0.1, + dbs=0.1, + store_precision=False, + assume_centered=False, + ): + self.info = info + # For sklearn compat, these cannot (easily?) be combined into + # a single dictionary + self.grad = grad + self.mag = mag + self.eeg = eeg + self.seeg = seeg + self.dbs = dbs + self.ecog = ecog + self.hbo = hbo + self.hbr = hbr + self.fnirs_cw_amplitude = fnirs_cw_amplitude + self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude + self.fnirs_fd_phase = fnirs_fd_phase + self.fnirs_od = fnirs_od + self.csd = csd + self.store_precision = store_precision + self.assume_centered = assume_centered + + def fit(self, X): + """Fit covariance model with classical diagonal regularization.""" + self.estimator_ = EmpiricalCovariance( + store_precision=self.store_precision, assume_centered=self.assume_centered + ) + + self.covariance_ = self.estimator_.fit(X).covariance_ + self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T) + cov_ = Covariance( + data=self.covariance_, + names=self.info["ch_names"], + bads=self.info["bads"], + projs=self.info["projs"], + nfree=len(self.covariance_), + ) + cov_ = regularize( + cov_, + self.info, + proj=False, + exclude="bads", + grad=self.grad, + mag=self.mag, + eeg=self.eeg, + ecog=self.ecog, + seeg=self.seeg, + dbs=self.dbs, + hbo=self.hbo, + hbr=self.hbr, + rank="full", + ) + self.estimator_.covariance_ = self.covariance_ = cov_.data + return self + + def score(self, X_test, y=None): + """Delegate call to modified EmpiricalCovariance instance.""" + return self.estimator_.score(X_test, y=y) + + def get_precision(self): + """Delegate call to modified EmpiricalCovariance instance.""" + return self.estimator_.get_precision() + + +class _ShrunkCovariance(_EstimatorMixin): + """Aux class.""" + + def __init__(self, store_precision, assume_centered, shrinkage=0.1): + self.store_precision = store_precision + self.assume_centered = assume_centered + self.shrinkage = shrinkage + + def fit(self, X): + """Fit covariance model with oracle shrinkage regularization.""" + from sklearn.covariance import shrunk_covariance + + self.estimator_ = EmpiricalCovariance( + store_precision=self.store_precision, assume_centered=self.assume_centered + ) + + cov = self.estimator_.fit(X).covariance_ + + if not isinstance(self.shrinkage, list | tuple): + shrinkage = [("all", self.shrinkage, np.arange(len(cov)))] + else: + shrinkage = self.shrinkage + + zero_cross_cov = np.zeros_like(cov, dtype=bool) + for a, b in itt.combinations(shrinkage, 2): + picks_i, picks_j = a[2], b[2] + ch_ = a[0], b[0] + if "eeg" in ch_: + zero_cross_cov[np.ix_(picks_i, picks_j)] = True + zero_cross_cov[np.ix_(picks_j, picks_i)] = True + + self.zero_cross_cov_ = zero_cross_cov + + # Apply shrinkage to blocks + for ch_type, c, picks in shrinkage: + sub_cov = cov[np.ix_(picks, picks)] + cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov, shrinkage=c) + + # Apply shrinkage to cross-cov + for a, b in itt.combinations(shrinkage, 2): + shrinkage_i, shrinkage_j = a[1], b[1] + picks_i, picks_j = a[2], b[2] + c_ij = np.sqrt((1.0 - shrinkage_i) * (1.0 - shrinkage_j)) + cov[np.ix_(picks_i, picks_j)] *= c_ij + cov[np.ix_(picks_j, picks_i)] *= c_ij + + # Set to zero the necessary cross-cov + if np.any(zero_cross_cov): + cov[zero_cross_cov] = 0.0 + + self.estimator_.covariance_ = self.covariance_ = cov + return self + + def score(self, X_test, y=None): + """Delegate to modified EmpiricalCovariance instance.""" + # compute empirical covariance of the test set + test_cov = empirical_covariance( + X_test - self.estimator_.location_, assume_centered=True + ) + if np.any(self.zero_cross_cov_): + test_cov[self.zero_cross_cov_] = 0.0 + res = log_likelihood(test_cov, self.estimator_.get_precision()) + return res + + def get_precision(self): + """Delegate to modified EmpiricalCovariance instance.""" + return self.estimator_.get_precision() + + +############################################################################### +# Writing + + +@verbose +def write_cov(fname, cov, *, overwrite=False, verbose=None): + """Write a noise covariance matrix. + + Parameters + ---------- + fname : path-like + The name of the file. It should end with ``-cov.fif`` or + ``-cov.fif.gz``. + cov : Covariance + The noise covariance matrix. + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + See Also + -------- + read_cov + """ + cov.save(fname, overwrite=overwrite, verbose=verbose) + + +############################################################################### +# Prepare for inverse modeling + + +def _unpack_epochs(epochs): + """Aux Function.""" + if len(epochs.event_id) > 1: + epochs = [epochs[k] for k in epochs.event_id] + else: + epochs = [epochs] + + return epochs + + +def _get_ch_whitener(A, pca, ch_type, rank): + """Get whitener params for a set of channels.""" + # whitening operator + eig, eigvec = eigh(A, overwrite_a=True) + eigvec = eigvec.conj().T + mask = np.ones(len(eig), bool) + eig[:-rank] = 0.0 + mask[:-rank] = False + + logger.info( + f" Setting small {ch_type} eigenvalues to zero " + f'({"using" if pca else "without"} PCA)' + ) + if pca: # No PCA case. + # This line will reduce the actual number of variables in data + # and leadfield to the true rank. + eigvec = eigvec[:-rank].copy() + return eig, eigvec, mask + + +@verbose +def prepare_noise_cov( + noise_cov, + info, + ch_names=None, + rank=None, + scalings=None, + on_rank_mismatch="ignore", + verbose=None, +): + """Prepare noise covariance matrix. + + Parameters + ---------- + noise_cov : instance of Covariance + The noise covariance to process. + %(info_not_none)s (Used to get channel types and bad channels). + ch_names : list | None + The channel names to be considered. Can be None to use + ``info['ch_names']``. + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + Data will be rescaled before rank estimation to improve accuracy. + If dict, it will override the following dict (default if None):: + + dict(mag=1e12, grad=1e11, eeg=1e5) + %(on_rank_mismatch)s + %(verbose)s + + Returns + ------- + cov : instance of Covariance + A copy of the covariance with the good channels subselected + and parameters updated. + """ + # reorder C and info to match ch_names order + noise_cov_idx = list() + missing = list() + ch_names = info["ch_names"] if ch_names is None else ch_names + for c in ch_names: + # this could be try/except ValueError, but it is not the preferred way + if c in noise_cov.ch_names: + noise_cov_idx.append(noise_cov.ch_names.index(c)) + else: + missing.append(c) + if len(missing): + raise RuntimeError(f"Not all channels present in noise covariance:\n{missing}") + C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)] + info = pick_info(info, pick_channels(info["ch_names"], ch_names, ordered=False)) + projs = info["projs"] + noise_cov["projs"] + noise_cov = Covariance( + data=C, + names=ch_names, + bads=list(noise_cov["bads"]), + projs=deepcopy(noise_cov["projs"]), + nfree=noise_cov["nfree"], + method=noise_cov.get("method", None), + loglik=noise_cov.get("loglik", None), + ) + + eig, eigvec, _ = _smart_eigh( + noise_cov, + info, + rank, + scalings, + projs, + ch_names, + on_rank_mismatch=on_rank_mismatch, + ) + noise_cov.update(eig=eig, eigvec=eigvec) + return noise_cov + + +@verbose +def _smart_eigh( + C, + info, + rank, + scalings=None, + projs=None, + ch_names=None, + proj_subspace=False, + do_compute_rank=True, + on_rank_mismatch="ignore", + *, + log_ch_type=None, + verbose=None, +): + """Compute eigh of C taking into account rank and ch_type scalings.""" + scalings = _handle_default("scalings_cov_rank", scalings) + projs = info["projs"] if projs is None else projs + ch_names = info["ch_names"] if ch_names is None else ch_names + if info["ch_names"] != ch_names: + info = pick_info(info, [info["ch_names"].index(c) for c in ch_names]) + assert info["ch_names"] == ch_names + n_chan = len(ch_names) + + # Create the projection operator + proj, ncomp, _ = _make_projector(projs, ch_names) + + if isinstance(C, Covariance): + C = C["data"] + if ncomp > 0: + logger.info(" Created an SSP operator (subspace dimension = %d)", ncomp) + C = np.dot(proj, np.dot(C, proj.T)) + + noise_cov = Covariance(C, ch_names, [], projs, 0) + if do_compute_rank: # if necessary + rank = _compute_rank( + noise_cov, + rank, + scalings, + info, + on_rank_mismatch=on_rank_mismatch, + log_ch_type=log_ch_type, + ) + assert C.ndim == 2 and C.shape[0] == C.shape[1] + + # time saving short-circuit + if proj_subspace and sum(rank.values()) == C.shape[0]: + return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool) + + dtype = complex if C.dtype == np.complex128 else float + eig = np.zeros(n_chan, dtype) + eigvec = np.zeros((n_chan, n_chan), dtype) + mask = np.zeros(n_chan, bool) + for ch_type, picks in _picks_by_type( + info, meg_combined=True, ref_meg=False, exclude=[] + ): + if len(picks) == 0: + continue + this_C = C[np.ix_(picks, picks)] + + if ch_type not in rank and ch_type in ("mag", "grad"): + this_rank = rank["meg"] # if there is only one or the other + else: + this_rank = rank[ch_type] + + if log_ch_type is not None: + ch_type_ = log_ch_type + else: + ch_type_ = ch_type.upper() + e, ev, m = _get_ch_whitener(this_C, False, ch_type_, this_rank) + if proj_subspace: + # Choose the subspace the same way we do for projections + e, ev = _eigvec_subspace(e, ev, m) + eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m + largest, smallest = e[-1], e[m][0] + if largest > 1e10 * smallest: + warn( + f"The largest eigenvalue of the {len(picks)}-channel {ch_type} " + f"covariance (rank={this_rank}) is over 10 orders of magnitude " + f"larger than the smallest ({largest:0.3g} > 1e10 * {smallest:0.3g}), " + "the resulting whitener will likely be unstable" + ) + + # XXX : also handle ref for sEEG and ECoG + if ( + ch_type == "eeg" + and _needs_eeg_average_ref_proj(info) + and not _has_eeg_average_ref_proj(info, projs=projs) + ): + warn( + 'No average EEG reference present in info["projs"], ' + "covariance may be adversely affected. Consider recomputing " + "covariance using with an average eeg reference projector " + "added." + ) + return eig, eigvec, mask + + +@verbose +def regularize( + cov, + info, + mag=0.1, + grad=0.1, + eeg=0.1, + exclude="bads", + proj=True, + seeg=0.1, + ecog=0.1, + hbo=0.1, + hbr=0.1, + fnirs_cw_amplitude=0.1, + fnirs_fd_ac_amplitude=0.1, + fnirs_fd_phase=0.1, + fnirs_od=0.1, + csd=0.1, + dbs=0.1, + rank=None, + scalings=None, + verbose=None, +): + """Regularize noise covariance matrix. + + This method works by adding a constant to the diagonal for each + channel type separately. Special care is taken to keep the + rank of the data constant. + + .. note:: This function is kept for reasons of backward-compatibility. + Please consider explicitly using the ``method`` parameter in + :func:`mne.compute_covariance` to directly combine estimation + with regularization in a data-driven fashion. See the + :ref:`FAQ ` for more information. + + Parameters + ---------- + cov : Covariance + The noise covariance matrix. + %(info_not_none)s (Used to get channel types and bad channels). + mag : float (default 0.1) + Regularization factor for MEG magnetometers. + grad : float (default 0.1) + Regularization factor for MEG gradiometers. Must be the same as + ``mag`` if data have been processed with SSS. + eeg : float (default 0.1) + Regularization factor for EEG. + exclude : list | 'bads' (default 'bads') + List of channels to mark as bad. If 'bads', bads channels + are extracted from both info['bads'] and cov['bads']. + proj : bool (default True) + Apply projections to keep rank of data. + seeg : float (default 0.1) + Regularization factor for sEEG signals. + ecog : float (default 0.1) + Regularization factor for ECoG signals. + hbo : float (default 0.1) + Regularization factor for HBO signals. + hbr : float (default 0.1) + Regularization factor for HBR signals. + fnirs_cw_amplitude : float (default 0.1) + Regularization factor for fNIRS CW raw signals. + fnirs_fd_ac_amplitude : float (default 0.1) + Regularization factor for fNIRS FD AC raw signals. + fnirs_fd_phase : float (default 0.1) + Regularization factor for fNIRS raw phase signals. + fnirs_od : float (default 0.1) + Regularization factor for fNIRS optical density signals. + csd : float (default 0.1) + Regularization factor for EEG-CSD signals. + dbs : float (default 0.1) + Regularization factor for DBS signals. + %(rank_none)s + + .. versionadded:: 0.17 + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + Data will be rescaled before rank estimation to improve accuracy. + See :func:`mne.compute_covariance`. + + .. versionadded:: 0.17 + %(verbose)s + + Returns + ------- + reg_cov : Covariance + The regularized covariance matrix. + + See Also + -------- + mne.compute_covariance + """ # noqa: E501 + cov = cov.copy() + info._check_consistency() + scalings = _handle_default("scalings_cov_rank", scalings) + regs = dict( + eeg=eeg, + seeg=seeg, + dbs=dbs, + ecog=ecog, + hbo=hbo, + hbr=hbr, + fnirs_cw_amplitude=fnirs_cw_amplitude, + fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude, + fnirs_fd_phase=fnirs_fd_phase, + fnirs_od=fnirs_od, + csd=csd, + ) + + if exclude is None: + raise ValueError('exclude must be a list of strings or "bads"') + + if exclude == "bads": + exclude = info["bads"] + cov["bads"] + + picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT} + meg_combined = "auto" if rank != "full" else False + picks_dict.update( + dict( + _picks_by_type( + info, meg_combined=meg_combined, exclude=exclude, ref_meg=False + ) + ) + ) + if len(picks_dict.get("meg", [])) > 0 and rank != "full": # combined + if mag != grad: + raise ValueError( + "On data where magnetometers and gradiometers are dependent (e.g., " + f"SSSed data), mag ({mag}) must equal grad ({grad})" + ) + logger.info("Regularizing MEG channels jointly") + regs["meg"] = mag + else: + regs.update(mag=mag, grad=grad) + if rank != "full": + rank = _compute_rank(cov, rank, scalings, info) + + info_ch_names = info["ch_names"] + ch_names_by_type = dict() + for ch_type, picks_type in picks_dict.items(): + ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type] + + # This actually removes bad channels from the cov, which is not backward + # compatible, so let's leave all channels in + cov_good = pick_channels_cov( + cov, include=info_ch_names, exclude=exclude, ordered=False + ) + ch_names = cov_good.ch_names + + # Now get the indices for each channel type in the cov + idx_cov = {ch_type: [] for ch_type in ch_names_by_type} + for i, ch in enumerate(ch_names): + for ch_type in ch_names_by_type: + if ch in ch_names_by_type[ch_type]: + idx_cov[ch_type].append(i) + break + else: + raise Exception(f"channel {ch} is unknown type") + + C = cov_good["data"] + + assert len(C) == sum(map(len, idx_cov.values())) + + if proj: + projs = info["projs"] + cov_good["projs"] + projs = _activate_proj(projs) + + for ch_type in idx_cov: + desc = ch_type.upper() + idx = idx_cov[ch_type] + if len(idx) == 0: + continue + reg = regs[ch_type] + if reg == 0.0: + logger.info(f" {desc} regularization : None") + continue + logger.info(f" {desc} regularization : {reg}") + + this_C = C[np.ix_(idx, idx)] + U = np.eye(this_C.shape[0]) + this_ch_names = [ch_names[k] for k in idx] + if rank == "full": + if proj: + P, ncomp, _ = _make_projector(projs, this_ch_names) + if ncomp > 0: + # This adjustment ends up being redundant if rank is None: + U = _safe_svd(P)[0][:, :-ncomp] + logger.info( + f" Created an SSP operator for {desc} (dimension = {ncomp})" + ) + else: + this_picks = pick_channels(info["ch_names"], this_ch_names) + this_info = pick_info(info, this_picks) + # Here we could use proj_subspace=True, but this should not matter + # since this is already in a loop over channel types + _, eigvec, mask = _smart_eigh(this_C, this_info, rank) + U = eigvec[mask].T + this_C = np.dot(U.T, np.dot(this_C, U)) + + sigma = np.mean(np.diag(this_C)) + this_C.flat[:: len(this_C) + 1] += reg * sigma # modify diag inplace + this_C = np.dot(U, np.dot(this_C, U.T)) + C[np.ix_(idx, idx)] = this_C + + # Put data back in correct locations + idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude, ordered=False) + cov["data"][np.ix_(idx, idx)] = C + + return cov + + +def _regularized_covariance( + data, + reg=None, + method_params=None, + info=None, + rank=None, + *, + log_ch_type=None, + log_rank=None, + cov_kind="", +): + """Compute a regularized covariance from data using sklearn. + + This is a convenience wrapper for mne.decoding functions, which + adopted a slightly different covariance API. + + Returns + ------- + cov : ndarray, shape (n_channels, n_channels) + The covariance matrix. + """ + _validate_type(reg, (str, "numeric", None)) + if reg is None: + reg = "empirical" + elif not isinstance(reg, str): + reg = float(reg) + if method_params is not None: + raise ValueError( + "If reg is a float, method_params must be None (got " + f"{type(method_params)})" + ) + method_params = dict( + shrinkage=dict(shrinkage=reg, assume_centered=True, store_precision=False) + ) + reg = "shrinkage" + method, method_params = _check_method_params( + reg, method_params, name="reg", allow_auto=False, rank=rank + ) + # use mag instead of eeg here to avoid the cov EEG projection warning + info = create_info(data.shape[-2], 1000.0, "mag") if info is None else info + picks_list = _picks_by_type(info) + scalings = _handle_default("scalings_cov_rank", None) + cov = _compute_covariance_auto( + data.T, + method=method, + method_params=method_params, + info=info, + cv=None, + n_jobs=None, + stop_early=True, + picks_list=picks_list, + scalings=scalings, + rank=rank, + cov_kind=cov_kind, + log_ch_type=log_ch_type, + log_rank=log_rank, + )[reg]["data"] + return cov + + +@verbose +def compute_whitener( + noise_cov, + info=None, + picks=None, + rank=None, + scalings=None, + return_rank=False, + pca=False, + return_colorer=False, + on_rank_mismatch="warn", + verbose=None, +): + """Compute whitening matrix. + + Parameters + ---------- + noise_cov : Covariance + The noise covariance. + %(info)s Can be None if ``noise_cov`` has already been + prepared with :func:`prepare_noise_cov`. + %(picks_good_data_noref)s + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None + The rescaling method to be applied. See documentation of + ``prepare_noise_cov`` for details. + return_rank : bool + If True, return the rank used to compute the whitener. + + .. versionadded:: 0.15 + pca : bool | str + Space to project the data into. Options: + + :data:`python:True` + Whitener will be shape (n_nonzero, n_channels). + ``'white'`` + Whitener will be shape (n_channels, n_channels), potentially rank + deficient, and have the first ``n_channels - n_nonzero`` rows and + columns set to zero. + :data:`python:False` (default) + Whitener will be shape (n_channels, n_channels), potentially rank + deficient, and rotated back to the space of the original data. + + .. versionadded:: 0.18 + return_colorer : bool + If True, return the colorer as well. + %(on_rank_mismatch)s + %(verbose)s + + Returns + ------- + W : ndarray, shape (n_channels, n_channels) or (n_nonzero, n_channels) + The whitening matrix. + ch_names : list + The channel names. + rank : int + Rank reduction of the whitener. Returned only if return_rank is True. + colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero) + The coloring matrix. + """ # noqa: E501 + _validate_type(pca, (str, bool), "space") + _valid_pcas = (True, "white", False) + if pca not in _valid_pcas: + raise ValueError(f"space must be one of {_valid_pcas}, got {pca}") + if info is None: + if "eig" not in noise_cov: + raise ValueError( + "info can only be None if the noise cov has already been prepared with " + "prepare_noise_cov" + ) + ch_names = deepcopy(noise_cov["names"]) + else: + picks = _picks_to_idx(info, picks, with_ref_meg=False) + ch_names = [info["ch_names"][k] for k in picks] + del picks + noise_cov = prepare_noise_cov( + noise_cov, info, ch_names, rank, scalings, on_rank_mismatch=on_rank_mismatch + ) + + n_chan = len(ch_names) + assert n_chan == len(noise_cov["eig"]) + + # Omit the zeroes due to projection + eig = noise_cov["eig"].copy() + nzero = eig > 0 + eig[~nzero] = 0.0 # get rid of numerical noise (negative) ones + + if noise_cov["eigvec"].dtype.kind == "c": + dtype = np.complex128 + else: + dtype = np.float64 + W = np.zeros((n_chan, 1), dtype) + W[nzero, 0] = 1.0 / np.sqrt(eig[nzero]) + # Rows of eigvec are the eigenvectors + W = W * noise_cov["eigvec"] # C ** -0.5 + C = np.sqrt(eig) * noise_cov["eigvec"].conj().T # C ** 0.5 + n_nzero = nzero.sum() + logger.info( + " Created the whitener using a noise covariance matrix " + "with rank %d (%d small eigenvalues omitted)", + n_nzero, + noise_cov["dim"] - n_nzero, + ) + + # Do the requested projection + if pca is True: + W = W[nzero] + C = C[:, nzero] + elif pca is False: + W = np.dot(noise_cov["eigvec"].conj().T, W) + C = np.dot(C, noise_cov["eigvec"]) + + # Triage return + out = W, ch_names + if return_rank: + out += (n_nzero,) + if return_colorer: + out += (C,) + return out + + +@verbose +def whiten_evoked( + evoked, noise_cov, picks=None, diag=None, rank=None, scalings=None, verbose=None +): + """Whiten evoked data using given noise covariance. + + Parameters + ---------- + evoked : instance of Evoked + The evoked data. + noise_cov : instance of Covariance + The noise covariance. + %(picks_good_data)s + diag : bool (default False) + If True, whiten using only the diagonal of the covariance. + %(rank_none)s + + .. versionadded:: 0.18 + Support for 'info' mode. + scalings : dict | None (default None) + To achieve reliable rank estimation on multiple sensors, + sensors have to be rescaled. This parameter controls the + rescaling. If dict, it will override the + following default dict (default if None): + + dict(mag=1e12, grad=1e11, eeg=1e5) + %(verbose)s + + Returns + ------- + evoked_white : instance of Evoked + The whitened evoked data. + """ + evoked = evoked.copy() + picks = _picks_to_idx(evoked.info, picks) + + if diag: + noise_cov = noise_cov.as_diag() + + W, _ = compute_whitener( + noise_cov, evoked.info, picks=picks, rank=rank, scalings=scalings + ) + + evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks]) + return evoked + + +@verbose +def _read_cov(fid, node, cov_kind, limited=False, verbose=None): + """Read a noise covariance matrix.""" + # Find all covariance matrices + from ._fiff.write import _safe_name_list + + covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV) + if len(covs) == 0: + raise ValueError("No covariance matrices found") + + # Is any of the covariance matrices a noise covariance + for p in range(len(covs)): + tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND) + + if tag is not None and int(tag.data.item()) == cov_kind: + this = covs[p] + + # Find all the necessary data + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM) + if tag is None: + raise ValueError("Covariance matrix dimension not found") + dim = int(tag.data.item()) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE) + if tag is None: + nfree = -1 + else: + nfree = int(tag.data.item()) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD) + if tag is None: + method = None + else: + method = tag.data + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE) + if tag is None: + score = None + else: + score = tag.data[0] + + tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES) + if tag is None: + names = [] + else: + names = _safe_name_list(tag.data, "read", "names") + if len(names) != dim: + raise ValueError( + "Number of names does not match covariance matrix dimension" + ) + + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV) + if tag is None: + tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG) + if tag is None: + raise ValueError("No covariance matrix data found") + else: + # Diagonal is stored + data = tag.data + diag = True + logger.info( + " %d x %d diagonal covariance (kind = " "%d) found.", + dim, + dim, + cov_kind, + ) + + else: + if not issparse(tag.data): + # Lower diagonal is stored + vals = tag.data + data = np.zeros((dim, dim)) + data[np.tril(np.ones((dim, dim))) > 0] = vals + data = data + data.T + data.flat[:: dim + 1] /= 2.0 + diag = False + logger.info( + " %d x %d full covariance (kind = %d) " "found.", + dim, + dim, + cov_kind, + ) + else: + diag = False + data = tag.data + logger.info( + " %d x %d sparse covariance (kind = %d)" " found.", + dim, + dim, + cov_kind, + ) + + # Read the possibly precomputed decomposition + tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES) + tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS) + if tag1 is not None and tag2 is not None: + eig = tag1.data + eigvec = tag2.data + else: + eig = None + eigvec = None + + # Read the projection operator + projs = _read_proj(fid, this) + + # Read the bad channel list + bads = _read_bad_channels(fid, this, None) + + # Put it together + assert dim == len(data) + assert data.ndim == (1 if diag else 2) + cov = dict( + kind=cov_kind, + diag=diag, + dim=dim, + names=names, + data=data, + projs=projs, + bads=bads, + nfree=nfree, + eig=eig, + eigvec=eigvec, + ) + if score is not None: + cov["loglik"] = score + if method is not None: + cov["method"] = method + if limited: + del cov["kind"], cov["dim"], cov["diag"] + + return cov + + logger.info(" Did not find the desired covariance matrix (kind = %d)", cov_kind) + + return None + + +def _write_cov(fid, cov): + """Write a noise covariance matrix.""" + from ._fiff.write import ( + end_block, + start_block, + write_double, + write_float_matrix, + write_int, + write_name_list_sanitized, + write_string, + ) + + start_block(fid, FIFF.FIFFB_MNE_COV) + + # Dimensions etc. + write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov["kind"]) + write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov["dim"]) + if cov["nfree"] > 0: + write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov["nfree"]) + + # Channel names + if cov["names"] is not None and len(cov["names"]) > 0: + write_name_list_sanitized( + fid, FIFF.FIFF_MNE_ROW_NAMES, cov["names"], 'cov["names"]' + ) + + # Data + if cov["diag"]: + write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov["data"]) + else: + # Store only lower part of covariance matrix + dim = cov["dim"] + mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0 + vals = cov["data"][mask].ravel() + write_double(fid, FIFF.FIFF_MNE_COV, vals) + + # Eigenvalues and vectors if present + if cov["eig"] is not None and cov["eigvec"] is not None: + write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov["eigvec"]) + write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov["eig"]) + + # Projection operator + if cov["projs"] is not None and len(cov["projs"]) > 0: + _write_proj(fid, cov["projs"]) + + # Bad channels + _write_bad_channels(fid, cov["bads"], None) + + # estimator method + if "method" in cov: + write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov["method"]) + + # negative log-likelihood score + if "loglik" in cov: + write_double(fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov["loglik"])) + + # Done! + end_block(fid, FIFF.FIFFB_MNE_COV) + + +@verbose +def _ensure_cov(cov, name="cov", *, verbose=None): + _validate_type(cov, ("path-like", Covariance), name) + logger.info(f"Noise covariance : {cov}") + if not isinstance(cov, Covariance): + cov = read_cov(cov, verbose=_verbose_safe_false()) + return cov diff --git a/mne/cuda.py b/mne/cuda.py new file mode 100644 index 0000000..f44dc65 --- /dev/null +++ b/mne/cuda.py @@ -0,0 +1,390 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from scipy.fft import irfft, rfft + +from .utils import ( + _check_option, + _explain_exception, + fill_doc, + get_config, + logger, + sizeof_fmt, + verbose, + warn, +) + +_cuda_capable = False + + +def get_cuda_memory(kind="available"): + """Get the amount of free memory for CUDA operations. + + Parameters + ---------- + kind : str + Can be ``"available"`` or ``"total"``. + + Returns + ------- + memory : str + The amount of available or total memory as a human-readable string. + """ + if not _cuda_capable: + warn("CUDA not enabled, returning zero for memory") + mem = 0 + else: + import cupy + + mem = cupy.cuda.runtime.memGetInfo()[dict(available=0, total=1)[kind]] + return sizeof_fmt(mem) + + +@verbose +def init_cuda(ignore_config=False, verbose=None): + """Initialize CUDA functionality. + + This function attempts to load the necessary interfaces + (hardware connectivity) to run CUDA-based filtering. This + function should only need to be run once per session. + + If the config var (set via mne.set_config or in ENV) + MNE_USE_CUDA == 'true', this function will be executed when + the first CUDA setup is performed. If this variable is not + set, this function can be manually executed. + + Parameters + ---------- + ignore_config : bool + If True, ignore the config value MNE_USE_CUDA and force init. + %(verbose)s + """ + global _cuda_capable + if _cuda_capable: + return + if not ignore_config and (get_config("MNE_USE_CUDA", "false").lower() != "true"): + logger.info("CUDA not enabled in config, skipping initialization") + return + # Triage possible errors for informative messaging + _cuda_capable = False + try: + import cupy # noqa + except ImportError: + warn("module cupy not found, CUDA not enabled") + return + device_id = int(get_config("MNE_CUDA_DEVICE", "0")) + try: + # Initialize CUDA + _set_cuda_device(device_id, verbose) + except Exception: + warn( + "so CUDA device could be initialized, likely a hardware error, " + f"CUDA not enabled{_explain_exception()}" + ) + return + + _cuda_capable = True + # Figure out limit for CUDA FFT calculations + logger.info(f"Enabling CUDA with {get_cuda_memory()} available memory") + + +@verbose +def set_cuda_device(device_id, verbose=None): + """Set the CUDA device temporarily for the current session. + + Parameters + ---------- + device_id : int + Numeric ID of the CUDA-capable device you want MNE-Python to use. + %(verbose)s + """ + if _cuda_capable: + _set_cuda_device(device_id, verbose) + elif get_config("MNE_USE_CUDA", "false").lower() == "true": + init_cuda() + _set_cuda_device(device_id, verbose) + else: + warn( + "Could not set CUDA device because CUDA is not enabled; either " + "run mne.cuda.init_cuda() first, or set the MNE_USE_CUDA config " + 'variable to "true".' + ) + + +@verbose +def _set_cuda_device(device_id, verbose=None): + """Set the CUDA device.""" + import cupy + + cupy.cuda.Device(device_id).use() + logger.info(f"Now using CUDA device {device_id}") + + +############################################################################### +# Repeated FFT multiplication + + +def _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft, kind="FFT FIR filtering"): + """Set up repeated CUDA FFT multiplication with a given filter. + + Parameters + ---------- + n_jobs : int | str + If ``n_jobs='cuda'``, the function will attempt to set up for CUDA + FFT multiplication. + h : array + The filtering function that will be used repeatedly. + n_fft : int + The number of points in the FFT. + kind : str + The kind to report to the user. + + Returns + ------- + n_jobs : int + Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise + original n_jobs is passed. + cuda_dict : dict + Dictionary with the following CUDA-related variables: + use_cuda : bool + Whether CUDA should be used. + fft_plan : instance of FFTPlan + FFT plan to use in calculating the FFT. + ifft_plan : instance of FFTPlan + FFT plan to use in calculating the IFFT. + x_fft : instance of gpuarray + Empty allocated GPU space for storing the result of the + frequency-domain multiplication. + x : instance of gpuarray + Empty allocated GPU space for the data to filter. + h_fft : array | instance of gpuarray + This will either be a gpuarray (if CUDA enabled) or ndarray. + + Notes + ----- + This function is designed to be used with fft_multiply_repeated(). + """ + cuda_dict = dict(n_fft=n_fft, rfft=rfft, irfft=irfft, h_fft=rfft(h, n=n_fft)) + if isinstance(n_jobs, str): + _check_option("n_jobs", n_jobs, ("cuda",)) + n_jobs = 1 + init_cuda() + if _cuda_capable: + import cupy + + try: + # do the IFFT normalization now so we don't have to later + h_fft = cupy.array(cuda_dict["h_fft"]) + logger.info(f"Using CUDA for {kind}") + except Exception as exp: + logger.info( + "CUDA not used, could not instantiate memory (arrays may be too " + f'large: "{exp}"), falling back to n_jobs=None' + ) + cuda_dict.update(h_fft=h_fft, rfft=_cuda_upload_rfft, irfft=_cuda_irfft_get) + else: + logger.info( + "CUDA not used, CUDA could not be initialized, " + "falling back to n_jobs=None" + ) + return n_jobs, cuda_dict + + +def _fft_multiply_repeated(x, cuda_dict): + """Do FFT multiplication by a filter function (possibly using CUDA). + + Parameters + ---------- + h_fft : 1-d array or gpuarray + The filtering array to apply. + x : 1-d array + The array to filter. + n_fft : int + The number of points in the FFT. + cuda_dict : dict + Dictionary constructed using setup_cuda_multiply_repeated(). + + Returns + ------- + x : 1-d array + Filtered version of x. + """ + # do the fourier-domain operations + x_fft = cuda_dict["rfft"](x, cuda_dict["n_fft"]) + x_fft *= cuda_dict["h_fft"] + x = cuda_dict["irfft"](x_fft, cuda_dict["n_fft"]) + return x + + +############################################################################### +# FFT Resampling + + +def _setup_cuda_fft_resample(n_jobs, W, new_len): + """Set up CUDA FFT resampling. + + Parameters + ---------- + n_jobs : int | str + If n_jobs == 'cuda', the function will attempt to set up for CUDA + FFT resampling. + W : array + The filtering function to be used during resampling. + If n_jobs='cuda', this function will be shortened (since CUDA + assumes FFTs of real signals are half the length of the signal) + and turned into a gpuarray. + new_len : int + The size of the array following resampling. + + Returns + ------- + n_jobs : int + Sets n_jobs = 1 if n_jobs == 'cuda' was passed in, otherwise + original n_jobs is passed. + cuda_dict : dict + Dictionary with the following CUDA-related variables: + use_cuda : bool + Whether CUDA should be used. + fft_plan : instance of FFTPlan + FFT plan to use in calculating the FFT. + ifft_plan : instance of FFTPlan + FFT plan to use in calculating the IFFT. + x_fft : instance of gpuarray + Empty allocated GPU space for storing the result of the + frequency-domain multiplication. + x : instance of gpuarray + Empty allocated GPU space for the data to resample. + + Notes + ----- + This function is designed to be used with fft_resample(). + """ + cuda_dict = dict(use_cuda=False, rfft=rfft, irfft=irfft) + rfft_len_x = len(W) // 2 + 1 + # fold the window onto inself (should be symmetric) and truncate + W = W.copy() + W[1:rfft_len_x] = (W[1:rfft_len_x] + W[::-1][: rfft_len_x - 1]) / 2.0 + W = W[:rfft_len_x] + if isinstance(n_jobs, str): + _check_option("n_jobs", n_jobs, ("cuda",)) + n_jobs = 1 + init_cuda() + if _cuda_capable: + try: + import cupy + + # do the IFFT normalization now so we don't have to later + W = cupy.array(W) + logger.info("Using CUDA for FFT resampling") + except Exception: + logger.info( + "CUDA not used, could not instantiate memory " + "(arrays may be too large), falling back to " + "n_jobs=None" + ) + else: + cuda_dict.update( + use_cuda=True, rfft=_cuda_upload_rfft, irfft=_cuda_irfft_get + ) + else: + logger.info( + "CUDA not used, CUDA could not be initialized, " + "falling back to n_jobs=None" + ) + cuda_dict["W"] = W + return n_jobs, cuda_dict + + +def _cuda_upload_rfft(x, n, axis=-1): + """Upload and compute rfft.""" + import cupy + + return cupy.fft.rfft(cupy.array(x), n=n, axis=axis) + + +def _cuda_irfft_get(x, n, axis=-1): + """Compute irfft and get.""" + import cupy + + return cupy.fft.irfft(x, n=n, axis=axis).get() + + +@fill_doc +def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, pad="reflect_limited"): + """Do FFT resampling with a filter function (possibly using CUDA). + + Parameters + ---------- + x : 1-d array + The array to resample. Will be converted to float64 if necessary. + new_len : int + The size of the output array (before removing padding). + npads : tuple of int + Amount of padding to apply to the start and end of the + signal before resampling. + to_removes : tuple of int + Number of samples to remove after resampling. + cuda_dict : dict + Dictionary constructed using setup_cuda_multiply_repeated(). + %(pad_resample)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + + Returns + ------- + x : 1-d array + Filtered version of x. + """ + cuda_dict = dict(use_cuda=False) if cuda_dict is None else cuda_dict + # add some padding at beginning and end to make this work a little cleaner + if x.dtype != np.float64: + x = x.astype(np.float64) + x = _smart_pad(x, npads, pad) + old_len = len(x) + shorter = new_len < old_len + use_len = new_len if shorter else old_len + x_fft = cuda_dict["rfft"](x, None) + if use_len % 2 == 0: + nyq = use_len // 2 + x_fft[nyq : nyq + 1] *= 2 if shorter else 0.5 + x_fft *= cuda_dict["W"] + y = cuda_dict["irfft"](x_fft, new_len) + + # now let's trim it back to the correct size (if there was padding) + if (to_removes > 0).any(): + y = y[to_removes[0] : y.shape[0] - to_removes[1]] + + return y + + +############################################################################### +# Misc + + +# this has to go in mne.cuda instead of mne.filter to avoid import errors +def _smart_pad(x, n_pad, pad="reflect_limited"): + """Pad vector x.""" + n_pad = np.asarray(n_pad) + assert n_pad.shape == (2,) + if (n_pad == 0).all(): + return x + elif (n_pad < 0).any(): + raise RuntimeError("n_pad must be non-negative") + if pad == "reflect_limited": + # need to pad with zeros if len(x) <= npad + l_z_pad = np.zeros(max(n_pad[0] - len(x) + 1, 0), dtype=x.dtype) + r_z_pad = np.zeros(max(n_pad[1] - len(x) + 1, 0), dtype=x.dtype) + return np.concatenate( + [ + l_z_pad, + 2 * x[0] - x[n_pad[0] : 0 : -1], + x, + 2 * x[-1] - x[-2 : -n_pad[1] - 2 : -1], + r_z_pad, + ] + ) + else: + return np.pad(x, (tuple(n_pad),), pad) diff --git a/mne/data/FreeSurferColorLUT.txt b/mne/data/FreeSurferColorLUT.txt new file mode 100644 index 0000000..2a7bd23 --- /dev/null +++ b/mne/data/FreeSurferColorLUT.txt @@ -0,0 +1,1397 @@ +#$Id: FreeSurferColorLUT.txt,v 1.70.2.7 2012/08/27 17:20:08 nicks Exp $ + +#No. Label Name: R G B A + +0 Unknown 0 0 0 0 +1 Left-Cerebral-Exterior 70 130 180 0 +2 Left-Cerebral-White-Matter 245 245 245 0 +3 Left-Cerebral-Cortex 205 62 78 0 +4 Left-Lateral-Ventricle 120 18 134 0 +5 Left-Inf-Lat-Vent 196 58 250 0 +6 Left-Cerebellum-Exterior 0 148 0 0 +7 Left-Cerebellum-White-Matter 220 248 164 0 +8 Left-Cerebellum-Cortex 230 148 34 0 +9 Left-Thalamus 0 118 14 0 +10 Left-Thalamus-Proper 0 118 14 0 +11 Left-Caudate 122 186 220 0 +12 Left-Putamen 236 13 176 0 +13 Left-Pallidum 12 48 255 0 +14 3rd-Ventricle 204 182 142 0 +15 4th-Ventricle 42 204 164 0 +16 Brain-Stem 119 159 176 0 +17 Left-Hippocampus 220 216 20 0 +18 Left-Amygdala 103 255 255 0 +19 Left-Insula 80 196 98 0 +20 Left-Operculum 60 58 210 0 +21 Line-1 60 58 210 0 +22 Line-2 60 58 210 0 +23 Line-3 60 58 210 0 +24 CSF 60 60 60 0 +25 Left-Lesion 255 165 0 0 +26 Left-Accumbens-area 255 165 0 0 +27 Left-Substancia-Nigra 0 255 127 0 +28 Left-VentralDC 165 42 42 0 +29 Left-undetermined 135 206 235 0 +30 Left-vessel 160 32 240 0 +31 Left-choroid-plexus 0 200 200 0 +32 Left-F3orb 100 50 100 0 +33 Left-lOg 135 50 74 0 +34 Left-aOg 122 135 50 0 +35 Left-mOg 51 50 135 0 +36 Left-pOg 74 155 60 0 +37 Left-Stellate 120 62 43 0 +38 Left-Porg 74 155 60 0 +39 Left-Aorg 122 135 50 0 +40 Right-Cerebral-Exterior 70 130 180 0 +41 Right-Cerebral-White-Matter 0 225 0 0 +42 Right-Cerebral-Cortex 205 62 78 0 +43 Right-Lateral-Ventricle 120 18 134 0 +44 Right-Inf-Lat-Vent 196 58 250 0 +45 Right-Cerebellum-Exterior 0 148 0 0 +46 Right-Cerebellum-White-Matter 220 248 164 0 +47 Right-Cerebellum-Cortex 230 148 34 0 +48 Right-Thalamus 0 118 14 0 +49 Right-Thalamus-Proper 0 118 14 0 +50 Right-Caudate 122 186 220 0 +51 Right-Putamen 236 13 176 0 +52 Right-Pallidum 13 48 255 0 +53 Right-Hippocampus 220 216 20 0 +54 Right-Amygdala 103 255 255 0 +55 Right-Insula 80 196 98 0 +56 Right-Operculum 60 58 210 0 +57 Right-Lesion 255 165 0 0 +58 Right-Accumbens-area 255 165 0 0 +59 Right-Substancia-Nigra 0 255 127 0 +60 Right-VentralDC 165 42 42 0 +61 Right-undetermined 135 206 235 0 +62 Right-vessel 160 32 240 0 +63 Right-choroid-plexus 0 200 221 0 +64 Right-F3orb 100 50 100 0 +65 Right-lOg 135 50 74 0 +66 Right-aOg 122 135 50 0 +67 Right-mOg 51 50 135 0 +68 Right-pOg 74 155 60 0 +69 Right-Stellate 120 62 43 0 +70 Right-Porg 74 155 60 0 +71 Right-Aorg 122 135 50 0 +72 5th-Ventricle 120 190 150 0 +73 Left-Interior 122 135 50 0 +74 Right-Interior 122 135 50 0 +# 75/76 removed. duplicates of 4/43 +77 WM-hypointensities 200 70 255 0 +78 Left-WM-hypointensities 255 148 10 0 +79 Right-WM-hypointensities 255 148 10 0 +80 non-WM-hypointensities 164 108 226 0 +81 Left-non-WM-hypointensities 164 108 226 0 +82 Right-non-WM-hypointensities 164 108 226 0 +83 Left-F1 255 218 185 0 +84 Right-F1 255 218 185 0 +85 Optic-Chiasm 234 169 30 0 +192 Corpus_Callosum 250 255 50 0 + +86 Left_future_WMSA 200 120 255 0 +87 Right_future_WMSA 200 121 255 0 +88 future_WMSA 200 122 255 0 + + +96 Left-Amygdala-Anterior 205 10 125 0 +97 Right-Amygdala-Anterior 205 10 125 0 +98 Dura 160 32 240 0 + +100 Left-wm-intensity-abnormality 124 140 178 0 +101 Left-caudate-intensity-abnormality 125 140 178 0 +102 Left-putamen-intensity-abnormality 126 140 178 0 +103 Left-accumbens-intensity-abnormality 127 140 178 0 +104 Left-pallidum-intensity-abnormality 124 141 178 0 +105 Left-amygdala-intensity-abnormality 124 142 178 0 +106 Left-hippocampus-intensity-abnormality 124 143 178 0 +107 Left-thalamus-intensity-abnormality 124 144 178 0 +108 Left-VDC-intensity-abnormality 124 140 179 0 +109 Right-wm-intensity-abnormality 124 140 178 0 +110 Right-caudate-intensity-abnormality 125 140 178 0 +111 Right-putamen-intensity-abnormality 126 140 178 0 +112 Right-accumbens-intensity-abnormality 127 140 178 0 +113 Right-pallidum-intensity-abnormality 124 141 178 0 +114 Right-amygdala-intensity-abnormality 124 142 178 0 +115 Right-hippocampus-intensity-abnormality 124 143 178 0 +116 Right-thalamus-intensity-abnormality 124 144 178 0 +117 Right-VDC-intensity-abnormality 124 140 179 0 + +118 Epidermis 255 20 147 0 +119 Conn-Tissue 205 179 139 0 +120 SC-Fat-Muscle 238 238 209 0 +121 Cranium 200 200 200 0 +122 CSF-SA 74 255 74 0 +123 Muscle 238 0 0 0 +124 Ear 0 0 139 0 +125 Adipose 173 255 47 0 +126 Spinal-Cord 133 203 229 0 +127 Soft-Tissue 26 237 57 0 +128 Nerve 34 139 34 0 +129 Bone 30 144 255 0 +130 Air 147 19 173 0 +131 Orbital-Fat 238 59 59 0 +132 Tongue 221 39 200 0 +133 Nasal-Structures 238 174 238 0 +134 Globe 255 0 0 0 +135 Teeth 72 61 139 0 +136 Left-Caudate-Putamen 21 39 132 0 +137 Right-Caudate-Putamen 21 39 132 0 +138 Left-Claustrum 65 135 20 0 +139 Right-Claustrum 65 135 20 0 +140 Cornea 134 4 160 0 +142 Diploe 221 226 68 0 +143 Vitreous-Humor 255 255 254 0 +144 Lens 52 209 226 0 +145 Aqueous-Humor 239 160 223 0 +146 Outer-Table 70 130 180 0 +147 Inner-Table 70 130 181 0 +148 Periosteum 139 121 94 0 +149 Endosteum 224 224 224 0 +150 R-C-S 255 0 0 0 +151 Iris 205 205 0 0 +152 SC-Adipose-Muscle 238 238 209 0 +153 SC-Tissue 139 121 94 0 +154 Orbital-Adipose 238 59 59 0 + +155 Left-IntCapsule-Ant 238 59 59 0 +156 Right-IntCapsule-Ant 238 59 59 0 +157 Left-IntCapsule-Pos 62 10 205 0 +158 Right-IntCapsule-Pos 62 10 205 0 + +# These labels are for babies/children +159 Left-Cerebral-WM-unmyelinated 0 118 14 0 +160 Right-Cerebral-WM-unmyelinated 0 118 14 0 +161 Left-Cerebral-WM-myelinated 220 216 21 0 +162 Right-Cerebral-WM-myelinated 220 216 21 0 +163 Left-Subcortical-Gray-Matter 122 186 220 0 +164 Right-Subcortical-Gray-Matter 122 186 220 0 +165 Skull 255 165 0 0 +166 Posterior-fossa 14 48 255 0 +167 Scalp 166 42 42 0 +168 Hematoma 121 18 134 0 +169 Left-Basal-Ganglia 236 13 127 0 +176 Right-Basal-Ganglia 236 13 126 0 + +# Label names and colors for Brainstem constituents +# No. Label Name: R G B A +170 brainstem 119 159 176 0 +171 DCG 119 0 176 0 +172 Vermis 119 100 176 0 +173 Midbrain 119 200 176 0 +174 Pons 119 159 100 0 +175 Medulla 119 159 200 0 + +#176 Right-Basal-Ganglia found in babies/children section above + +180 Left-Cortical-Dysplasia 73 61 139 0 +181 Right-Cortical-Dysplasia 73 62 139 0 + +#192 Corpus_Callosum listed after #85 above +193 Left-hippocampal_fissure 0 196 255 0 +194 Left-CADG-head 255 164 164 0 +195 Left-subiculum 196 196 0 0 +196 Left-fimbria 0 100 255 0 +197 Right-hippocampal_fissure 128 196 164 0 +198 Right-CADG-head 0 126 75 0 +199 Right-subiculum 128 96 64 0 +200 Right-fimbria 0 50 128 0 +201 alveus 255 204 153 0 +202 perforant_pathway 255 128 128 0 +203 parasubiculum 255 255 0 0 +204 presubiculum 64 0 64 0 +205 subiculum 0 0 255 0 +206 CA1 255 0 0 0 +207 CA2 128 128 255 0 +208 CA3 0 128 0 0 +209 CA4 196 160 128 0 +210 GC-ML-DG 32 200 255 0 +211 HATA 128 255 128 0 +212 fimbria 204 153 204 0 +213 lateral_ventricle 121 17 136 0 +214 molecular_layer_HP 128 0 0 0 +215 hippocampal_fissure 128 32 255 0 +216 entorhinal_cortex 255 204 102 0 +217 molecular_layer_subiculum 128 128 128 0 +218 Amygdala 104 255 255 0 +219 Cerebral_White_Matter 0 226 0 0 +220 Cerebral_Cortex 205 63 78 0 +221 Inf_Lat_Vent 197 58 250 0 +222 Perirhinal 33 150 250 0 +223 Cerebral_White_Matter_Edge 226 0 0 0 +224 Background 100 100 100 0 +225 Ectorhinal 197 150 250 0 +226 HP_tail 170 170 255 0 + +250 Fornix 255 0 0 0 +251 CC_Posterior 0 0 64 0 +252 CC_Mid_Posterior 0 0 112 0 +253 CC_Central 0 0 160 0 +254 CC_Mid_Anterior 0 0 208 0 +255 CC_Anterior 0 0 255 0 + +# This is for keeping track of voxel changes +256 Voxel-Unchanged 0 0 0 0 + +# lymph node and vascular labels +331 Aorta 255 0 0 0 +332 Left-Common-IliacA 255 80 0 0 +333 Right-Common-IliacA 255 160 0 0 +334 Left-External-IliacA 255 255 0 0 +335 Right-External-IliacA 0 255 0 0 +336 Left-Internal-IliacA 255 0 160 0 +337 Right-Internal-IliacA 255 0 255 0 +338 Left-Lateral-SacralA 255 50 80 0 +339 Right-Lateral-SacralA 80 255 50 0 +340 Left-ObturatorA 160 255 50 0 +341 Right-ObturatorA 160 200 255 0 +342 Left-Internal-PudendalA 0 255 160 0 +343 Right-Internal-PudendalA 0 0 255 0 +344 Left-UmbilicalA 80 50 255 0 +345 Right-UmbilicalA 160 0 255 0 +346 Left-Inf-RectalA 255 210 0 0 +347 Right-Inf-RectalA 0 160 255 0 +348 Left-Common-IliacV 255 200 80 0 +349 Right-Common-IliacV 255 200 160 0 +350 Left-External-IliacV 255 80 200 0 +351 Right-External-IliacV 255 160 200 0 +352 Left-Internal-IliacV 30 255 80 0 +353 Right-Internal-IliacV 80 200 255 0 +354 Left-ObturatorV 80 255 200 0 +355 Right-ObturatorV 195 255 200 0 +356 Left-Internal-PudendalV 120 200 20 0 +357 Right-Internal-PudendalV 170 10 200 0 +358 Pos-Lymph 20 130 180 0 +359 Neg-Lymph 20 180 130 0 + +400 V1 206 62 78 0 +401 V2 121 18 134 0 +402 BA44 199 58 250 0 +403 BA45 1 148 0 0 +404 BA4a 221 248 164 0 +405 BA4p 231 148 34 0 +406 BA6 1 118 14 0 +407 BA2 120 118 14 0 +408 BA1_old 123 186 221 0 +409 BAun2 238 13 177 0 +410 BA1 123 186 220 0 +411 BA2b 138 13 206 0 +412 BA3a 238 130 176 0 +413 BA3b 218 230 76 0 +414 MT 38 213 176 0 +415 AIPS_AIP_l 1 225 176 0 +416 AIPS_AIP_r 1 225 176 0 +417 AIPS_VIP_l 200 2 100 0 +418 AIPS_VIP_r 200 2 100 0 +419 IPL_PFcm_l 5 200 90 0 +420 IPL_PFcm_r 5 200 90 0 +421 IPL_PF_l 100 5 200 0 +422 IPL_PFm_l 25 255 100 0 +423 IPL_PFm_r 25 255 100 0 +424 IPL_PFop_l 230 7 100 0 +425 IPL_PFop_r 230 7 100 0 +426 IPL_PF_r 100 5 200 0 +427 IPL_PFt_l 150 10 200 0 +428 IPL_PFt_r 150 10 200 0 +429 IPL_PGa_l 175 10 176 0 +430 IPL_PGa_r 175 10 176 0 +431 IPL_PGp_l 10 100 255 0 +432 IPL_PGp_r 10 100 255 0 +433 Visual_V3d_l 150 45 70 0 +434 Visual_V3d_r 150 45 70 0 +435 Visual_V4_l 45 200 15 0 +436 Visual_V4_r 45 200 15 0 +437 Visual_V5_b 227 45 100 0 +438 Visual_VP_l 227 45 100 0 +439 Visual_VP_r 227 45 100 0 + +# wm lesions +498 wmsa 143 188 143 0 +499 other_wmsa 255 248 220 0 + +# HiRes Hippocampus labeling +500 right_CA2_3 17 85 136 0 +501 right_alveus 119 187 102 0 +502 right_CA1 204 68 34 0 +503 right_fimbria 204 0 255 0 +504 right_presubiculum 221 187 17 0 +505 right_hippocampal_fissure 153 221 238 0 +506 right_CA4_DG 51 17 17 0 +507 right_subiculum 0 119 85 0 +508 right_fornix 20 100 200 0 + +550 left_CA2_3 17 85 137 0 +551 left_alveus 119 187 103 0 +552 left_CA1 204 68 35 0 +553 left_fimbria 204 0 254 0 +554 left_presubiculum 221 187 16 0 +555 left_hippocampal_fissure 153 221 239 0 +556 left_CA4_DG 51 17 18 0 +557 left_subiculum 0 119 86 0 +558 left_fornix 20 100 201 0 + +600 Tumor 254 254 254 0 + + +# Cerebellar parcellation labels from SUIT (matches labels in cma.h) +#No. Label Name: R G B A +601 Cbm_Left_I_IV 70 130 180 0 +602 Cbm_Right_I_IV 245 245 245 0 +603 Cbm_Left_V 205 62 78 0 +604 Cbm_Right_V 120 18 134 0 +605 Cbm_Left_VI 196 58 250 0 +606 Cbm_Vermis_VI 0 148 0 0 +607 Cbm_Right_VI 220 248 164 0 +608 Cbm_Left_CrusI 230 148 34 0 +609 Cbm_Vermis_CrusI 0 118 14 0 +610 Cbm_Right_CrusI 0 118 14 0 +611 Cbm_Left_CrusII 122 186 220 0 +612 Cbm_Vermis_CrusII 236 13 176 0 +613 Cbm_Right_CrusII 12 48 255 0 +614 Cbm_Left_VIIb 204 182 142 0 +615 Cbm_Vermis_VIIb 42 204 164 0 +616 Cbm_Right_VIIb 119 159 176 0 +617 Cbm_Left_VIIIa 220 216 20 0 +618 Cbm_Vermis_VIIIa 103 255 255 0 +619 Cbm_Right_VIIIa 80 196 98 0 +620 Cbm_Left_VIIIb 60 58 210 0 +621 Cbm_Vermis_VIIIb 60 58 210 0 +622 Cbm_Right_VIIIb 60 58 210 0 +623 Cbm_Left_IX 60 58 210 0 +624 Cbm_Vermis_IX 60 60 60 0 +625 Cbm_Right_IX 255 165 0 0 +626 Cbm_Left_X 255 165 0 0 +627 Cbm_Vermis_X 0 255 127 0 +628 Cbm_Right_X 165 42 42 0 + +# Cerebellar lobule parcellations +640 Cbm_Right_I_V_med 204 0 0 0 +641 Cbm_Right_I_V_mid 255 0 0 0 +642 Cbm_Right_VI_med 0 0 255 0 +643 Cbm_Right_VI_mid 30 144 255 0 +644 Cbm_Right_VI_lat 100 212 237 0 +645 Cbm_Right_CrusI_med 218 165 32 0 +646 Cbm_Right_CrusI_mid 255 215 0 0 +647 Cbm_Right_CrusI_lat 255 255 166 0 +648 Cbm_Right_CrusII_med 153 0 204 0 +649 Cbm_Right_CrusII_mid 153 141 209 0 +650 Cbm_Right_CrusII_lat 204 204 255 0 +651 Cbm_Right_7med 31 212 194 0 +652 Cbm_Right_7mid 3 255 237 0 +653 Cbm_Right_7lat 204 255 255 0 +654 Cbm_Right_8med 86 74 147 0 +655 Cbm_Right_8mid 114 114 190 0 +656 Cbm_Right_8lat 184 178 255 0 +657 Cbm_Right_PUNs 126 138 37 0 +658 Cbm_Right_TONs 189 197 117 0 +659 Cbm_Right_FLOs 240 230 140 0 +660 Cbm_Left_I_V_med 204 0 0 0 +661 Cbm_Left_I_V_mid 255 0 0 0 +662 Cbm_Left_VI_med 0 0 255 0 +663 Cbm_Left_VI_mid 30 144 255 0 +664 Cbm_Left_VI_lat 100 212 237 0 +665 Cbm_Left_CrusI_med 218 165 32 0 +666 Cbm_Left_CrusI_mid 255 215 0 0 +667 Cbm_Left_CrusI_lat 255 255 166 0 +668 Cbm_Left_CrusII_med 153 0 204 0 +669 Cbm_Left_CrusII_mid 153 141 209 0 +670 Cbm_Left_CrusII_lat 204 204 255 0 +671 Cbm_Left_7med 31 212 194 0 +672 Cbm_Left_7mid 3 255 237 0 +673 Cbm_Left_7lat 204 255 255 0 +674 Cbm_Left_8med 86 74 147 0 +675 Cbm_Left_8mid 114 114 190 0 +676 Cbm_Left_8lat 184 178 255 0 +677 Cbm_Left_PUNs 126 138 37 0 +678 Cbm_Left_TONs 189 197 117 0 +679 Cbm_Left_FLOs 240 230 140 0 + +701 CSF-FSL-FAST 120 18 134 0 +702 GrayMatter-FSL-FAST 205 62 78 0 +703 WhiteMatter-FSL-FAST 0 225 0 0 + +999 SUSPICIOUS 255 100 100 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg in which the aseg cortex label is replaced +# by the labels in the aparc. It also supports wm labels that will +# eventually be created by mri_aparc2aseg. Otherwise, the aseg labels +# do not change from above. The cortical labels are the same as in +# colortable_desikan_killiany.txt, except that left hemisphere has +# 1000 added to the index and the right has 2000 added. The label +# names are also prepended with ctx-lh or ctx-rh. The white matter +# labels are the same as in colortable_desikan_killiany.txt, except +# that left hemisphere has 3000 added to the index and the right has +# 4000 added. The label names are also prepended with wm-lh or wm-rh. +# Centrum semiovale is also labeled with 5001 (left) and 5002 (right). +# Even further below are the color tables for aparc.a2005s and aparc.a2009s. + +#No. Label Name: R G B A +1000 ctx-lh-unknown 25 5 25 0 +1001 ctx-lh-bankssts 25 100 40 0 +1002 ctx-lh-caudalanteriorcingulate 125 100 160 0 +1003 ctx-lh-caudalmiddlefrontal 100 25 0 0 +1004 ctx-lh-corpuscallosum 120 70 50 0 +1005 ctx-lh-cuneus 220 20 100 0 +1006 ctx-lh-entorhinal 220 20 10 0 +1007 ctx-lh-fusiform 180 220 140 0 +1008 ctx-lh-inferiorparietal 220 60 220 0 +1009 ctx-lh-inferiortemporal 180 40 120 0 +1010 ctx-lh-isthmuscingulate 140 20 140 0 +1011 ctx-lh-lateraloccipital 20 30 140 0 +1012 ctx-lh-lateralorbitofrontal 35 75 50 0 +1013 ctx-lh-lingual 225 140 140 0 +1014 ctx-lh-medialorbitofrontal 200 35 75 0 +1015 ctx-lh-middletemporal 160 100 50 0 +1016 ctx-lh-parahippocampal 20 220 60 0 +1017 ctx-lh-paracentral 60 220 60 0 +1018 ctx-lh-parsopercularis 220 180 140 0 +1019 ctx-lh-parsorbitalis 20 100 50 0 +1020 ctx-lh-parstriangularis 220 60 20 0 +1021 ctx-lh-pericalcarine 120 100 60 0 +1022 ctx-lh-postcentral 220 20 20 0 +1023 ctx-lh-posteriorcingulate 220 180 220 0 +1024 ctx-lh-precentral 60 20 220 0 +1025 ctx-lh-precuneus 160 140 180 0 +1026 ctx-lh-rostralanteriorcingulate 80 20 140 0 +1027 ctx-lh-rostralmiddlefrontal 75 50 125 0 +1028 ctx-lh-superiorfrontal 20 220 160 0 +1029 ctx-lh-superiorparietal 20 180 140 0 +1030 ctx-lh-superiortemporal 140 220 220 0 +1031 ctx-lh-supramarginal 80 160 20 0 +1032 ctx-lh-frontalpole 100 0 100 0 +1033 ctx-lh-temporalpole 70 70 70 0 +1034 ctx-lh-transversetemporal 150 150 200 0 +1035 ctx-lh-insula 255 192 32 0 + +2000 ctx-rh-unknown 25 5 25 0 +2001 ctx-rh-bankssts 25 100 40 0 +2002 ctx-rh-caudalanteriorcingulate 125 100 160 0 +2003 ctx-rh-caudalmiddlefrontal 100 25 0 0 +2004 ctx-rh-corpuscallosum 120 70 50 0 +2005 ctx-rh-cuneus 220 20 100 0 +2006 ctx-rh-entorhinal 220 20 10 0 +2007 ctx-rh-fusiform 180 220 140 0 +2008 ctx-rh-inferiorparietal 220 60 220 0 +2009 ctx-rh-inferiortemporal 180 40 120 0 +2010 ctx-rh-isthmuscingulate 140 20 140 0 +2011 ctx-rh-lateraloccipital 20 30 140 0 +2012 ctx-rh-lateralorbitofrontal 35 75 50 0 +2013 ctx-rh-lingual 225 140 140 0 +2014 ctx-rh-medialorbitofrontal 200 35 75 0 +2015 ctx-rh-middletemporal 160 100 50 0 +2016 ctx-rh-parahippocampal 20 220 60 0 +2017 ctx-rh-paracentral 60 220 60 0 +2018 ctx-rh-parsopercularis 220 180 140 0 +2019 ctx-rh-parsorbitalis 20 100 50 0 +2020 ctx-rh-parstriangularis 220 60 20 0 +2021 ctx-rh-pericalcarine 120 100 60 0 +2022 ctx-rh-postcentral 220 20 20 0 +2023 ctx-rh-posteriorcingulate 220 180 220 0 +2024 ctx-rh-precentral 60 20 220 0 +2025 ctx-rh-precuneus 160 140 180 0 +2026 ctx-rh-rostralanteriorcingulate 80 20 140 0 +2027 ctx-rh-rostralmiddlefrontal 75 50 125 0 +2028 ctx-rh-superiorfrontal 20 220 160 0 +2029 ctx-rh-superiorparietal 20 180 140 0 +2030 ctx-rh-superiortemporal 140 220 220 0 +2031 ctx-rh-supramarginal 80 160 20 0 +2032 ctx-rh-frontalpole 100 0 100 0 +2033 ctx-rh-temporalpole 70 70 70 0 +2034 ctx-rh-transversetemporal 150 150 200 0 +2035 ctx-rh-insula 255 192 32 0 + +3000 wm-lh-unknown 230 250 230 0 +3001 wm-lh-bankssts 230 155 215 0 +3002 wm-lh-caudalanteriorcingulate 130 155 95 0 +3003 wm-lh-caudalmiddlefrontal 155 230 255 0 +3004 wm-lh-corpuscallosum 135 185 205 0 +3005 wm-lh-cuneus 35 235 155 0 +3006 wm-lh-entorhinal 35 235 245 0 +3007 wm-lh-fusiform 75 35 115 0 +3008 wm-lh-inferiorparietal 35 195 35 0 +3009 wm-lh-inferiortemporal 75 215 135 0 +3010 wm-lh-isthmuscingulate 115 235 115 0 +3011 wm-lh-lateraloccipital 235 225 115 0 +3012 wm-lh-lateralorbitofrontal 220 180 205 0 +3013 wm-lh-lingual 30 115 115 0 +3014 wm-lh-medialorbitofrontal 55 220 180 0 +3015 wm-lh-middletemporal 95 155 205 0 +3016 wm-lh-parahippocampal 235 35 195 0 +3017 wm-lh-paracentral 195 35 195 0 +3018 wm-lh-parsopercularis 35 75 115 0 +3019 wm-lh-parsorbitalis 235 155 205 0 +3020 wm-lh-parstriangularis 35 195 235 0 +3021 wm-lh-pericalcarine 135 155 195 0 +3022 wm-lh-postcentral 35 235 235 0 +3023 wm-lh-posteriorcingulate 35 75 35 0 +3024 wm-lh-precentral 195 235 35 0 +3025 wm-lh-precuneus 95 115 75 0 +3026 wm-lh-rostralanteriorcingulate 175 235 115 0 +3027 wm-lh-rostralmiddlefrontal 180 205 130 0 +3028 wm-lh-superiorfrontal 235 35 95 0 +3029 wm-lh-superiorparietal 235 75 115 0 +3030 wm-lh-superiortemporal 115 35 35 0 +3031 wm-lh-supramarginal 175 95 235 0 +3032 wm-lh-frontalpole 155 255 155 0 +3033 wm-lh-temporalpole 185 185 185 0 +3034 wm-lh-transversetemporal 105 105 55 0 +3035 wm-lh-insula 254 191 31 0 + +4000 wm-rh-unknown 230 250 230 0 +4001 wm-rh-bankssts 230 155 215 0 +4002 wm-rh-caudalanteriorcingulate 130 155 95 0 +4003 wm-rh-caudalmiddlefrontal 155 230 255 0 +4004 wm-rh-corpuscallosum 135 185 205 0 +4005 wm-rh-cuneus 35 235 155 0 +4006 wm-rh-entorhinal 35 235 245 0 +4007 wm-rh-fusiform 75 35 115 0 +4008 wm-rh-inferiorparietal 35 195 35 0 +4009 wm-rh-inferiortemporal 75 215 135 0 +4010 wm-rh-isthmuscingulate 115 235 115 0 +4011 wm-rh-lateraloccipital 235 225 115 0 +4012 wm-rh-lateralorbitofrontal 220 180 205 0 +4013 wm-rh-lingual 30 115 115 0 +4014 wm-rh-medialorbitofrontal 55 220 180 0 +4015 wm-rh-middletemporal 95 155 205 0 +4016 wm-rh-parahippocampal 235 35 195 0 +4017 wm-rh-paracentral 195 35 195 0 +4018 wm-rh-parsopercularis 35 75 115 0 +4019 wm-rh-parsorbitalis 235 155 205 0 +4020 wm-rh-parstriangularis 35 195 235 0 +4021 wm-rh-pericalcarine 135 155 195 0 +4022 wm-rh-postcentral 35 235 235 0 +4023 wm-rh-posteriorcingulate 35 75 35 0 +4024 wm-rh-precentral 195 235 35 0 +4025 wm-rh-precuneus 95 115 75 0 +4026 wm-rh-rostralanteriorcingulate 175 235 115 0 +4027 wm-rh-rostralmiddlefrontal 180 205 130 0 +4028 wm-rh-superiorfrontal 235 35 95 0 +4029 wm-rh-superiorparietal 235 75 115 0 +4030 wm-rh-superiortemporal 115 35 35 0 +4031 wm-rh-supramarginal 175 95 235 0 +4032 wm-rh-frontalpole 155 255 155 0 +4033 wm-rh-temporalpole 185 185 185 0 +4034 wm-rh-transversetemporal 105 105 55 0 +4035 wm-rh-insula 254 191 31 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg (with --a2005s flag) in which the aseg +# cortex label is replaced by the labels in the aparc.a2005s. The +# cortical labels are the same as in Simple_surface_labels2005.txt, +# except that left hemisphere has 1100 added to the index and the +# right has 2100 added. The label names are also prepended with +# ctx-lh or ctx-rh. The aparc.a2009s labels are further below + +#No. Label Name: R G B A +1100 ctx-lh-Unknown 0 0 0 0 +1101 ctx-lh-Corpus_callosum 50 50 50 0 +1102 ctx-lh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +1103 ctx-lh-G_cingulate-Isthmus 60 25 25 0 +1104 ctx-lh-G_cingulate-Main_part 25 60 60 0 + +1200 ctx-lh-G_cingulate-caudal_ACC 25 60 61 0 +1201 ctx-lh-G_cingulate-rostral_ACC 25 90 60 0 +1202 ctx-lh-G_cingulate-posterior 25 120 60 0 + +1205 ctx-lh-S_cingulate-caudal_ACC 25 150 60 0 +1206 ctx-lh-S_cingulate-rostral_ACC 25 180 60 0 +1207 ctx-lh-S_cingulate-posterior 25 210 60 0 + +1210 ctx-lh-S_pericallosal-caudal 25 150 90 0 +1211 ctx-lh-S_pericallosal-rostral 25 180 90 0 +1212 ctx-lh-S_pericallosal-posterior 25 210 90 0 + +1105 ctx-lh-G_cuneus 180 20 20 0 +1106 ctx-lh-G_frontal_inf-Opercular_part 220 20 100 0 +1107 ctx-lh-G_frontal_inf-Orbital_part 140 60 60 0 +1108 ctx-lh-G_frontal_inf-Triangular_part 180 220 140 0 +1109 ctx-lh-G_frontal_middle 140 100 180 0 +1110 ctx-lh-G_frontal_superior 180 20 140 0 +1111 ctx-lh-G_frontomarginal 140 20 140 0 +1112 ctx-lh-G_insular_long 21 10 10 0 +1113 ctx-lh-G_insular_short 225 140 140 0 +1114 ctx-lh-G_and_S_occipital_inferior 23 60 180 0 +1115 ctx-lh-G_occipital_middle 180 60 180 0 +1116 ctx-lh-G_occipital_superior 20 220 60 0 +1117 ctx-lh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +1118 ctx-lh-G_occipit-temp_med-Lingual_part 220 180 140 0 +1119 ctx-lh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +1120 ctx-lh-G_orbital 220 60 20 0 +1121 ctx-lh-G_paracentral 60 100 60 0 +1122 ctx-lh-G_parietal_inferior-Angular_part 20 60 220 0 +1123 ctx-lh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +1124 ctx-lh-G_parietal_superior 220 180 220 0 +1125 ctx-lh-G_postcentral 20 180 140 0 +1126 ctx-lh-G_precentral 60 140 180 0 +1127 ctx-lh-G_precuneus 25 20 140 0 +1128 ctx-lh-G_rectus 20 60 100 0 +1129 ctx-lh-G_subcallosal 60 220 20 0 +1130 ctx-lh-G_subcentral 60 20 220 0 +1131 ctx-lh-G_temporal_inferior 220 220 100 0 +1132 ctx-lh-G_temporal_middle 180 60 60 0 +1133 ctx-lh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +1134 ctx-lh-G_temp_sup-Lateral_aspect 220 60 220 0 +1135 ctx-lh-G_temp_sup-Planum_polare 65 220 60 0 +1136 ctx-lh-G_temp_sup-Planum_tempolare 25 140 20 0 +1137 ctx-lh-G_and_S_transverse_frontopolar 13 0 250 0 +1138 ctx-lh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +1139 ctx-lh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +1140 ctx-lh-Lat_Fissure-post_sgt 61 60 100 0 +1141 ctx-lh-Medial_wall 25 25 25 0 +1142 ctx-lh-Pole_occipital 140 20 60 0 +1143 ctx-lh-Pole_temporal 220 180 20 0 +1144 ctx-lh-S_calcarine 63 180 180 0 +1145 ctx-lh-S_central 221 20 10 0 +1146 ctx-lh-S_central_insula 21 220 20 0 +1147 ctx-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +1148 ctx-lh-S_cingulate-Marginalis_part 221 20 100 0 +1149 ctx-lh-S_circular_insula_anterior 221 60 140 0 +1150 ctx-lh-S_circular_insula_inferior 221 20 220 0 +1151 ctx-lh-S_circular_insula_superior 61 220 220 0 +1152 ctx-lh-S_collateral_transverse_ant 100 200 200 0 +1153 ctx-lh-S_collateral_transverse_post 10 200 200 0 +1154 ctx-lh-S_frontal_inferior 221 220 20 0 +1155 ctx-lh-S_frontal_middle 141 20 100 0 +1156 ctx-lh-S_frontal_superior 61 220 100 0 +1157 ctx-lh-S_frontomarginal 21 220 60 0 +1158 ctx-lh-S_intermedius_primus-Jensen 141 60 20 0 +1159 ctx-lh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +1160 ctx-lh-S_occipital_anterior 61 20 180 0 +1161 ctx-lh-S_occipital_middle_and_Lunatus 101 60 220 0 +1162 ctx-lh-S_occipital_superior_and_transversalis 21 20 140 0 +1163 ctx-lh-S_occipito-temporal_lateral 221 140 20 0 +1164 ctx-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +1165 ctx-lh-S_orbital-H_shapped 101 20 20 0 +1166 ctx-lh-S_orbital_lateral 221 100 20 0 +1167 ctx-lh-S_orbital_medial-Or_olfactory 181 200 20 0 +1168 ctx-lh-S_paracentral 21 180 140 0 +1169 ctx-lh-S_parieto_occipital 101 100 180 0 +1170 ctx-lh-S_pericallosal 181 220 20 0 +1171 ctx-lh-S_postcentral 21 140 200 0 +1172 ctx-lh-S_precentral-Inferior-part 21 20 240 0 +1173 ctx-lh-S_precentral-Superior-part 21 20 200 0 +1174 ctx-lh-S_subcentral_ant 61 180 60 0 +1175 ctx-lh-S_subcentral_post 61 180 250 0 +1176 ctx-lh-S_suborbital 21 20 60 0 +1177 ctx-lh-S_subparietal 101 60 60 0 +1178 ctx-lh-S_supracingulate 21 220 220 0 +1179 ctx-lh-S_temporal_inferior 21 180 180 0 +1180 ctx-lh-S_temporal_superior 223 220 60 0 +1181 ctx-lh-S_temporal_transverse 221 60 60 0 + +2100 ctx-rh-Unknown 0 0 0 0 +2101 ctx-rh-Corpus_callosum 50 50 50 0 +2102 ctx-rh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +2103 ctx-rh-G_cingulate-Isthmus 60 25 25 0 +2104 ctx-rh-G_cingulate-Main_part 25 60 60 0 + +2105 ctx-rh-G_cuneus 180 20 20 0 +2106 ctx-rh-G_frontal_inf-Opercular_part 220 20 100 0 +2107 ctx-rh-G_frontal_inf-Orbital_part 140 60 60 0 +2108 ctx-rh-G_frontal_inf-Triangular_part 180 220 140 0 +2109 ctx-rh-G_frontal_middle 140 100 180 0 +2110 ctx-rh-G_frontal_superior 180 20 140 0 +2111 ctx-rh-G_frontomarginal 140 20 140 0 +2112 ctx-rh-G_insular_long 21 10 10 0 +2113 ctx-rh-G_insular_short 225 140 140 0 +2114 ctx-rh-G_and_S_occipital_inferior 23 60 180 0 +2115 ctx-rh-G_occipital_middle 180 60 180 0 +2116 ctx-rh-G_occipital_superior 20 220 60 0 +2117 ctx-rh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +2118 ctx-rh-G_occipit-temp_med-Lingual_part 220 180 140 0 +2119 ctx-rh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +2120 ctx-rh-G_orbital 220 60 20 0 +2121 ctx-rh-G_paracentral 60 100 60 0 +2122 ctx-rh-G_parietal_inferior-Angular_part 20 60 220 0 +2123 ctx-rh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +2124 ctx-rh-G_parietal_superior 220 180 220 0 +2125 ctx-rh-G_postcentral 20 180 140 0 +2126 ctx-rh-G_precentral 60 140 180 0 +2127 ctx-rh-G_precuneus 25 20 140 0 +2128 ctx-rh-G_rectus 20 60 100 0 +2129 ctx-rh-G_subcallosal 60 220 20 0 +2130 ctx-rh-G_subcentral 60 20 220 0 +2131 ctx-rh-G_temporal_inferior 220 220 100 0 +2132 ctx-rh-G_temporal_middle 180 60 60 0 +2133 ctx-rh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +2134 ctx-rh-G_temp_sup-Lateral_aspect 220 60 220 0 +2135 ctx-rh-G_temp_sup-Planum_polare 65 220 60 0 +2136 ctx-rh-G_temp_sup-Planum_tempolare 25 140 20 0 +2137 ctx-rh-G_and_S_transverse_frontopolar 13 0 250 0 +2138 ctx-rh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +2139 ctx-rh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +2140 ctx-rh-Lat_Fissure-post_sgt 61 60 100 0 +2141 ctx-rh-Medial_wall 25 25 25 0 +2142 ctx-rh-Pole_occipital 140 20 60 0 +2143 ctx-rh-Pole_temporal 220 180 20 0 +2144 ctx-rh-S_calcarine 63 180 180 0 +2145 ctx-rh-S_central 221 20 10 0 +2146 ctx-rh-S_central_insula 21 220 20 0 +2147 ctx-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +2148 ctx-rh-S_cingulate-Marginalis_part 221 20 100 0 +2149 ctx-rh-S_circular_insula_anterior 221 60 140 0 +2150 ctx-rh-S_circular_insula_inferior 221 20 220 0 +2151 ctx-rh-S_circular_insula_superior 61 220 220 0 +2152 ctx-rh-S_collateral_transverse_ant 100 200 200 0 +2153 ctx-rh-S_collateral_transverse_post 10 200 200 0 +2154 ctx-rh-S_frontal_inferior 221 220 20 0 +2155 ctx-rh-S_frontal_middle 141 20 100 0 +2156 ctx-rh-S_frontal_superior 61 220 100 0 +2157 ctx-rh-S_frontomarginal 21 220 60 0 +2158 ctx-rh-S_intermedius_primus-Jensen 141 60 20 0 +2159 ctx-rh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +2160 ctx-rh-S_occipital_anterior 61 20 180 0 +2161 ctx-rh-S_occipital_middle_and_Lunatus 101 60 220 0 +2162 ctx-rh-S_occipital_superior_and_transversalis 21 20 140 0 +2163 ctx-rh-S_occipito-temporal_lateral 221 140 20 0 +2164 ctx-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +2165 ctx-rh-S_orbital-H_shapped 101 20 20 0 +2166 ctx-rh-S_orbital_lateral 221 100 20 0 +2167 ctx-rh-S_orbital_medial-Or_olfactory 181 200 20 0 +2168 ctx-rh-S_paracentral 21 180 140 0 +2169 ctx-rh-S_parieto_occipital 101 100 180 0 +2170 ctx-rh-S_pericallosal 181 220 20 0 +2171 ctx-rh-S_postcentral 21 140 200 0 +2172 ctx-rh-S_precentral-Inferior-part 21 20 240 0 +2173 ctx-rh-S_precentral-Superior-part 21 20 200 0 +2174 ctx-rh-S_subcentral_ant 61 180 60 0 +2175 ctx-rh-S_subcentral_post 61 180 250 0 +2176 ctx-rh-S_suborbital 21 20 60 0 +2177 ctx-rh-S_subparietal 101 60 60 0 +2178 ctx-rh-S_supracingulate 21 220 220 0 +2179 ctx-rh-S_temporal_inferior 21 180 180 0 +2180 ctx-rh-S_temporal_superior 223 220 60 0 +2181 ctx-rh-S_temporal_transverse 221 60 60 0 + + +2200 ctx-rh-G_cingulate-caudal_ACC 25 60 61 0 +2201 ctx-rh-G_cingulate-rostral_ACC 25 90 60 0 +2202 ctx-rh-G_cingulate-posterior 25 120 60 0 + +2205 ctx-rh-S_cingulate-caudal_ACC 25 150 60 0 +2206 ctx-rh-S_cingulate-rostral_ACC 25 180 60 0 +2207 ctx-rh-S_cingulate-posterior 25 210 60 0 + +2210 ctx-rh-S_pericallosal-caudal 25 150 90 0 +2211 ctx-rh-S_pericallosal-rostral 25 180 90 0 +2212 ctx-rh-S_pericallosal-posterior 25 210 90 0 + +3100 wm-lh-Unknown 0 0 0 0 +3101 wm-lh-Corpus_callosum 50 50 50 0 +3102 wm-lh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +3103 wm-lh-G_cingulate-Isthmus 60 25 25 0 +3104 wm-lh-G_cingulate-Main_part 25 60 60 0 +3105 wm-lh-G_cuneus 180 20 20 0 +3106 wm-lh-G_frontal_inf-Opercular_part 220 20 100 0 +3107 wm-lh-G_frontal_inf-Orbital_part 140 60 60 0 +3108 wm-lh-G_frontal_inf-Triangular_part 180 220 140 0 +3109 wm-lh-G_frontal_middle 140 100 180 0 +3110 wm-lh-G_frontal_superior 180 20 140 0 +3111 wm-lh-G_frontomarginal 140 20 140 0 +3112 wm-lh-G_insular_long 21 10 10 0 +3113 wm-lh-G_insular_short 225 140 140 0 +3114 wm-lh-G_and_S_occipital_inferior 23 60 180 0 +3115 wm-lh-G_occipital_middle 180 60 180 0 +3116 wm-lh-G_occipital_superior 20 220 60 0 +3117 wm-lh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +3118 wm-lh-G_occipit-temp_med-Lingual_part 220 180 140 0 +3119 wm-lh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +3120 wm-lh-G_orbital 220 60 20 0 +3121 wm-lh-G_paracentral 60 100 60 0 +3122 wm-lh-G_parietal_inferior-Angular_part 20 60 220 0 +3123 wm-lh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +3124 wm-lh-G_parietal_superior 220 180 220 0 +3125 wm-lh-G_postcentral 20 180 140 0 +3126 wm-lh-G_precentral 60 140 180 0 +3127 wm-lh-G_precuneus 25 20 140 0 +3128 wm-lh-G_rectus 20 60 100 0 +3129 wm-lh-G_subcallosal 60 220 20 0 +3130 wm-lh-G_subcentral 60 20 220 0 +3131 wm-lh-G_temporal_inferior 220 220 100 0 +3132 wm-lh-G_temporal_middle 180 60 60 0 +3133 wm-lh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +3134 wm-lh-G_temp_sup-Lateral_aspect 220 60 220 0 +3135 wm-lh-G_temp_sup-Planum_polare 65 220 60 0 +3136 wm-lh-G_temp_sup-Planum_tempolare 25 140 20 0 +3137 wm-lh-G_and_S_transverse_frontopolar 13 0 250 0 +3138 wm-lh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +3139 wm-lh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +3140 wm-lh-Lat_Fissure-post_sgt 61 60 100 0 +3141 wm-lh-Medial_wall 25 25 25 0 +3142 wm-lh-Pole_occipital 140 20 60 0 +3143 wm-lh-Pole_temporal 220 180 20 0 +3144 wm-lh-S_calcarine 63 180 180 0 +3145 wm-lh-S_central 221 20 10 0 +3146 wm-lh-S_central_insula 21 220 20 0 +3147 wm-lh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +3148 wm-lh-S_cingulate-Marginalis_part 221 20 100 0 +3149 wm-lh-S_circular_insula_anterior 221 60 140 0 +3150 wm-lh-S_circular_insula_inferior 221 20 220 0 +3151 wm-lh-S_circular_insula_superior 61 220 220 0 +3152 wm-lh-S_collateral_transverse_ant 100 200 200 0 +3153 wm-lh-S_collateral_transverse_post 10 200 200 0 +3154 wm-lh-S_frontal_inferior 221 220 20 0 +3155 wm-lh-S_frontal_middle 141 20 100 0 +3156 wm-lh-S_frontal_superior 61 220 100 0 +3157 wm-lh-S_frontomarginal 21 220 60 0 +3158 wm-lh-S_intermedius_primus-Jensen 141 60 20 0 +3159 wm-lh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +3160 wm-lh-S_occipital_anterior 61 20 180 0 +3161 wm-lh-S_occipital_middle_and_Lunatus 101 60 220 0 +3162 wm-lh-S_occipital_superior_and_transversalis 21 20 140 0 +3163 wm-lh-S_occipito-temporal_lateral 221 140 20 0 +3164 wm-lh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +3165 wm-lh-S_orbital-H_shapped 101 20 20 0 +3166 wm-lh-S_orbital_lateral 221 100 20 0 +3167 wm-lh-S_orbital_medial-Or_olfactory 181 200 20 0 +3168 wm-lh-S_paracentral 21 180 140 0 +3169 wm-lh-S_parieto_occipital 101 100 180 0 +3170 wm-lh-S_pericallosal 181 220 20 0 +3171 wm-lh-S_postcentral 21 140 200 0 +3172 wm-lh-S_precentral-Inferior-part 21 20 240 0 +3173 wm-lh-S_precentral-Superior-part 21 20 200 0 +3174 wm-lh-S_subcentral_ant 61 180 60 0 +3175 wm-lh-S_subcentral_post 61 180 250 0 +3176 wm-lh-S_suborbital 21 20 60 0 +3177 wm-lh-S_subparietal 101 60 60 0 +3178 wm-lh-S_supracingulate 21 220 220 0 +3179 wm-lh-S_temporal_inferior 21 180 180 0 +3180 wm-lh-S_temporal_superior 223 220 60 0 +3181 wm-lh-S_temporal_transverse 221 60 60 0 + +4100 wm-rh-Unknown 0 0 0 0 +4101 wm-rh-Corpus_callosum 50 50 50 0 +4102 wm-rh-G_and_S_Insula_ONLY_AVERAGE 180 20 30 0 +4103 wm-rh-G_cingulate-Isthmus 60 25 25 0 +4104 wm-rh-G_cingulate-Main_part 25 60 60 0 +4105 wm-rh-G_cuneus 180 20 20 0 +4106 wm-rh-G_frontal_inf-Opercular_part 220 20 100 0 +4107 wm-rh-G_frontal_inf-Orbital_part 140 60 60 0 +4108 wm-rh-G_frontal_inf-Triangular_part 180 220 140 0 +4109 wm-rh-G_frontal_middle 140 100 180 0 +4110 wm-rh-G_frontal_superior 180 20 140 0 +4111 wm-rh-G_frontomarginal 140 20 140 0 +4112 wm-rh-G_insular_long 21 10 10 0 +4113 wm-rh-G_insular_short 225 140 140 0 +4114 wm-rh-G_and_S_occipital_inferior 23 60 180 0 +4115 wm-rh-G_occipital_middle 180 60 180 0 +4116 wm-rh-G_occipital_superior 20 220 60 0 +4117 wm-rh-G_occipit-temp_lat-Or_fusiform 60 20 140 0 +4118 wm-rh-G_occipit-temp_med-Lingual_part 220 180 140 0 +4119 wm-rh-G_occipit-temp_med-Parahippocampal_part 65 100 20 0 +4120 wm-rh-G_orbital 220 60 20 0 +4121 wm-rh-G_paracentral 60 100 60 0 +4122 wm-rh-G_parietal_inferior-Angular_part 20 60 220 0 +4123 wm-rh-G_parietal_inferior-Supramarginal_part 100 100 60 0 +4124 wm-rh-G_parietal_superior 220 180 220 0 +4125 wm-rh-G_postcentral 20 180 140 0 +4126 wm-rh-G_precentral 60 140 180 0 +4127 wm-rh-G_precuneus 25 20 140 0 +4128 wm-rh-G_rectus 20 60 100 0 +4129 wm-rh-G_subcallosal 60 220 20 0 +4130 wm-rh-G_subcentral 60 20 220 0 +4131 wm-rh-G_temporal_inferior 220 220 100 0 +4132 wm-rh-G_temporal_middle 180 60 60 0 +4133 wm-rh-G_temp_sup-G_temp_transv_and_interm_S 60 60 220 0 +4134 wm-rh-G_temp_sup-Lateral_aspect 220 60 220 0 +4135 wm-rh-G_temp_sup-Planum_polare 65 220 60 0 +4136 wm-rh-G_temp_sup-Planum_tempolare 25 140 20 0 +4137 wm-rh-G_and_S_transverse_frontopolar 13 0 250 0 +4138 wm-rh-Lat_Fissure-ant_sgt-ramus_horizontal 61 20 220 0 +4139 wm-rh-Lat_Fissure-ant_sgt-ramus_vertical 61 20 60 0 +4140 wm-rh-Lat_Fissure-post_sgt 61 60 100 0 +4141 wm-rh-Medial_wall 25 25 25 0 +4142 wm-rh-Pole_occipital 140 20 60 0 +4143 wm-rh-Pole_temporal 220 180 20 0 +4144 wm-rh-S_calcarine 63 180 180 0 +4145 wm-rh-S_central 221 20 10 0 +4146 wm-rh-S_central_insula 21 220 20 0 +4147 wm-rh-S_cingulate-Main_part_and_Intracingulate 183 100 20 0 +4148 wm-rh-S_cingulate-Marginalis_part 221 20 100 0 +4149 wm-rh-S_circular_insula_anterior 221 60 140 0 +4150 wm-rh-S_circular_insula_inferior 221 20 220 0 +4151 wm-rh-S_circular_insula_superior 61 220 220 0 +4152 wm-rh-S_collateral_transverse_ant 100 200 200 0 +4153 wm-rh-S_collateral_transverse_post 10 200 200 0 +4154 wm-rh-S_frontal_inferior 221 220 20 0 +4155 wm-rh-S_frontal_middle 141 20 100 0 +4156 wm-rh-S_frontal_superior 61 220 100 0 +4157 wm-rh-S_frontomarginal 21 220 60 0 +4158 wm-rh-S_intermedius_primus-Jensen 141 60 20 0 +4159 wm-rh-S_intraparietal-and_Parietal_transverse 143 20 220 0 +4160 wm-rh-S_occipital_anterior 61 20 180 0 +4161 wm-rh-S_occipital_middle_and_Lunatus 101 60 220 0 +4162 wm-rh-S_occipital_superior_and_transversalis 21 20 140 0 +4163 wm-rh-S_occipito-temporal_lateral 221 140 20 0 +4164 wm-rh-S_occipito-temporal_medial_and_S_Lingual 141 100 220 0 +4165 wm-rh-S_orbital-H_shapped 101 20 20 0 +4166 wm-rh-S_orbital_lateral 221 100 20 0 +4167 wm-rh-S_orbital_medial-Or_olfactory 181 200 20 0 +4168 wm-rh-S_paracentral 21 180 140 0 +4169 wm-rh-S_parieto_occipital 101 100 180 0 +4170 wm-rh-S_pericallosal 181 220 20 0 +4171 wm-rh-S_postcentral 21 140 200 0 +4172 wm-rh-S_precentral-Inferior-part 21 20 240 0 +4173 wm-rh-S_precentral-Superior-part 21 20 200 0 +4174 wm-rh-S_subcentral_ant 61 180 60 0 +4175 wm-rh-S_subcentral_post 61 180 250 0 +4176 wm-rh-S_suborbital 21 20 60 0 +4177 wm-rh-S_subparietal 101 60 60 0 +4178 wm-rh-S_supracingulate 21 220 220 0 +4179 wm-rh-S_temporal_inferior 21 180 180 0 +4180 wm-rh-S_temporal_superior 223 220 60 0 +4181 wm-rh-S_temporal_transverse 221 60 60 0 + +5001 Left-UnsegmentedWhiteMatter 20 30 40 0 +5002 Right-UnsegmentedWhiteMatter 20 30 40 0 + +# Below is the color table for white-matter pathways produced by dmri_paths + +#No. Label Name: R G B A +# +5100 fmajor 204 102 102 0 +5101 fminor 204 102 102 0 +# +5102 lh.atr 255 255 102 0 +5103 lh.cab 153 204 0 0 +5104 lh.ccg 0 153 153 0 +5105 lh.cst 204 153 255 0 +5106 lh.ilf 255 153 51 0 +5107 lh.slfp 204 204 204 0 +5108 lh.slft 153 255 255 0 +5109 lh.unc 102 153 255 0 +# +5110 rh.atr 255 255 102 0 +5111 rh.cab 153 204 0 0 +5112 rh.ccg 0 153 153 0 +5113 rh.cst 204 153 255 0 +5114 rh.ilf 255 153 51 0 +5115 rh.slfp 204 204 204 0 +5116 rh.slft 153 255 255 0 +5117 rh.unc 102 153 255 0 + +# These are the same tracula labels as above in human-readable form +5200 CC-ForcepsMajor 204 102 102 0 +5201 CC-ForcepsMinor 204 102 102 0 +5202 LAntThalRadiation 255 255 102 0 +5203 LCingulumAngBundle 153 204 0 0 +5204 LCingulumCingGyrus 0 153 153 0 +5205 LCorticospinalTract 204 153 255 0 +5206 LInfLongFas 255 153 51 0 +5207 LSupLongFasParietal 204 204 204 0 +5208 LSupLongFasTemporal 153 255 255 0 +5209 LUncinateFas 102 153 255 0 +5210 RAntThalRadiation 255 255 102 0 +5211 RCingulumAngBundle 153 204 0 0 +5212 RCingulumCingGyrus 0 153 153 0 +5213 RCorticospinalTract 204 153 255 0 +5214 RInfLongFas 255 153 51 0 +5215 RSupLongFasParietal 204 204 204 0 +5216 RSupLongFasTemporal 153 255 255 0 +5217 RUncinateFas 102 153 255 0 + +######################################## + +6000 CST-orig 0 255 0 0 +6001 CST-hammer 255 255 0 0 +6002 CST-CVS 0 255 255 0 +6003 CST-flirt 0 0 255 0 + +6010 Left-SLF1 236 16 231 0 +6020 Right-SLF1 237 18 232 0 + +6030 Left-SLF3 236 13 227 0 +6040 Right-SLF3 236 17 228 0 + +6050 Left-CST 1 255 1 0 +6060 Right-CST 2 255 1 0 + +6070 Left-SLF2 236 14 230 0 +6080 Right-SLF2 237 14 230 0 + +#No. Label Name: R G B A + +7001 Lateral-nucleus 72 132 181 0 +7002 Basolateral-nucleus 243 243 243 0 +7003 Basal-nucleus 207 63 79 0 +7004 Centromedial-nucleus 121 20 135 0 +7005 Central-nucleus 197 60 248 0 +7006 Medial-nucleus 2 149 2 0 +7007 Cortical-nucleus 221 249 166 0 +7008 Accessory-Basal-nucleus 232 146 35 0 +7009 Corticoamygdaloid-transitio 20 60 120 0 +7010 Anterior-amygdaloid-area-AAA 250 250 0 0 +7011 Fusion-amygdala-HP-FAH 122 187 222 0 +7012 Hippocampal-amygdala-transition-HATA 237 12 177 0 +7013 Endopiriform-nucleus 10 49 255 0 +7014 Lateral-nucleus-olfactory-tract 205 184 144 0 +7015 Paralaminar-nucleus 45 205 165 0 +7016 Intercalated-nucleus 117 160 175 0 +7017 Prepiriform-cortex 221 217 21 0 +7018 Periamygdaloid-cortex 20 60 120 0 +7019 Envelope-Amygdala 141 21 100 0 +7020 Extranuclear-Amydala 225 140 141 0 + +7100 Brainstem-inferior-colliculus 42 201 168 0 +7101 Brainstem-cochlear-nucleus 168 104 162 0 + +8001 Thalamus-Anterior 74 130 181 0 +8002 Thalamus-Ventral-anterior 242 241 240 0 +8003 Thalamus-Lateral-dorsal 206 65 78 0 +8004 Thalamus-Lateral-posterior 120 21 133 0 +8005 Thalamus-Ventral-lateral 195 61 246 0 +8006 Thalamus-Ventral-posterior-medial 3 147 6 0 +8007 Thalamus-Ventral-posterior-lateral 220 251 163 0 +8008 Thalamus-intralaminar 232 146 33 0 +8009 Thalamus-centromedian 4 114 14 0 +8010 Thalamus-mediodorsal 121 184 220 0 +8011 Thalamus-medial 235 11 175 0 +8012 Thalamus-pulvinar 12 46 250 0 +8013 Thalamus-lateral-geniculate 203 182 143 0 +8014 Thalamus-medial-geniculate 42 204 167 0 + +# +# Labels for thalamus parcellation using probabilistic tractography. See: +# Functional--Anatomical Validation and Individual Variation of Diffusion +# Tractography-based Segmentation of the Human Thalamus; Cerebral Cortex +# January 2005;15:31--39, doi:10.1093/cercor/bhh105, Advance Access +# publication July 6, 2004 +# + +#No. Label Name: R G B A +9000 ctx-lh-prefrontal 30 5 30 0 +9001 ctx-lh-primary-motor 30 100 45 0 +9002 ctx-lh-premotor 130 100 165 0 +9003 ctx-lh-temporal 105 25 5 0 +9004 ctx-lh-posterior-parietal 125 70 55 0 +9005 ctx-lh-prim-sec-somatosensory 225 20 105 0 +9006 ctx-lh-occipital 225 20 15 0 + +9500 ctx-rh-prefrontal 30 55 30 0 +9501 ctx-rh-primary-motor 30 150 45 0 +9502 ctx-rh-premotor 130 150 165 0 +9503 ctx-rh-temporal 105 75 5 0 +9504 ctx-rh-posterior-parietal 125 120 55 0 +9505 ctx-rh-prim-sec-somatosensory 225 70 105 0 +9506 ctx-rh-occipital 225 70 15 0 + +# Below is the color table for the cortical labels of the seg volume +# created by mri_aparc2aseg (with --a2009s flag) in which the aseg +# cortex label is replaced by the labels in the aparc.a2009s. The +# cortical labels are the same as in Simple_surface_labels2009.txt, +# except that left hemisphere has 11100 added to the index and the +# right has 12100 added. The label names are also prepended with +# ctx_lh_, ctx_rh_, wm_lh_ and wm_rh_ (note usage of _ instead of - +# to differentiate from a2005s labels). + +#No. Label Name: R G B A +11100 ctx_lh_Unknown 0 0 0 0 +11101 ctx_lh_G_and_S_frontomargin 23 220 60 0 +11102 ctx_lh_G_and_S_occipital_inf 23 60 180 0 +11103 ctx_lh_G_and_S_paracentral 63 100 60 0 +11104 ctx_lh_G_and_S_subcentral 63 20 220 0 +11105 ctx_lh_G_and_S_transv_frontopol 13 0 250 0 +11106 ctx_lh_G_and_S_cingul-Ant 26 60 0 0 +11107 ctx_lh_G_and_S_cingul-Mid-Ant 26 60 75 0 +11108 ctx_lh_G_and_S_cingul-Mid-Post 26 60 150 0 +11109 ctx_lh_G_cingul-Post-dorsal 25 60 250 0 +11110 ctx_lh_G_cingul-Post-ventral 60 25 25 0 +11111 ctx_lh_G_cuneus 180 20 20 0 +11112 ctx_lh_G_front_inf-Opercular 220 20 100 0 +11113 ctx_lh_G_front_inf-Orbital 140 60 60 0 +11114 ctx_lh_G_front_inf-Triangul 180 220 140 0 +11115 ctx_lh_G_front_middle 140 100 180 0 +11116 ctx_lh_G_front_sup 180 20 140 0 +11117 ctx_lh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +11118 ctx_lh_G_insular_short 225 140 140 0 +11119 ctx_lh_G_occipital_middle 180 60 180 0 +11120 ctx_lh_G_occipital_sup 20 220 60 0 +11121 ctx_lh_G_oc-temp_lat-fusifor 60 20 140 0 +11122 ctx_lh_G_oc-temp_med-Lingual 220 180 140 0 +11123 ctx_lh_G_oc-temp_med-Parahip 65 100 20 0 +11124 ctx_lh_G_orbital 220 60 20 0 +11125 ctx_lh_G_pariet_inf-Angular 20 60 220 0 +11126 ctx_lh_G_pariet_inf-Supramar 100 100 60 0 +11127 ctx_lh_G_parietal_sup 220 180 220 0 +11128 ctx_lh_G_postcentral 20 180 140 0 +11129 ctx_lh_G_precentral 60 140 180 0 +11130 ctx_lh_G_precuneus 25 20 140 0 +11131 ctx_lh_G_rectus 20 60 100 0 +11132 ctx_lh_G_subcallosal 60 220 20 0 +11133 ctx_lh_G_temp_sup-G_T_transv 60 60 220 0 +11134 ctx_lh_G_temp_sup-Lateral 220 60 220 0 +11135 ctx_lh_G_temp_sup-Plan_polar 65 220 60 0 +11136 ctx_lh_G_temp_sup-Plan_tempo 25 140 20 0 +11137 ctx_lh_G_temporal_inf 220 220 100 0 +11138 ctx_lh_G_temporal_middle 180 60 60 0 +11139 ctx_lh_Lat_Fis-ant-Horizont 61 20 220 0 +11140 ctx_lh_Lat_Fis-ant-Vertical 61 20 60 0 +11141 ctx_lh_Lat_Fis-post 61 60 100 0 +11142 ctx_lh_Medial_wall 25 25 25 0 +11143 ctx_lh_Pole_occipital 140 20 60 0 +11144 ctx_lh_Pole_temporal 220 180 20 0 +11145 ctx_lh_S_calcarine 63 180 180 0 +11146 ctx_lh_S_central 221 20 10 0 +11147 ctx_lh_S_cingul-Marginalis 221 20 100 0 +11148 ctx_lh_S_circular_insula_ant 221 60 140 0 +11149 ctx_lh_S_circular_insula_inf 221 20 220 0 +11150 ctx_lh_S_circular_insula_sup 61 220 220 0 +11151 ctx_lh_S_collat_transv_ant 100 200 200 0 +11152 ctx_lh_S_collat_transv_post 10 200 200 0 +11153 ctx_lh_S_front_inf 221 220 20 0 +11154 ctx_lh_S_front_middle 141 20 100 0 +11155 ctx_lh_S_front_sup 61 220 100 0 +11156 ctx_lh_S_interm_prim-Jensen 141 60 20 0 +11157 ctx_lh_S_intrapariet_and_P_trans 143 20 220 0 +11158 ctx_lh_S_oc_middle_and_Lunatus 101 60 220 0 +11159 ctx_lh_S_oc_sup_and_transversal 21 20 140 0 +11160 ctx_lh_S_occipital_ant 61 20 180 0 +11161 ctx_lh_S_oc-temp_lat 221 140 20 0 +11162 ctx_lh_S_oc-temp_med_and_Lingual 141 100 220 0 +11163 ctx_lh_S_orbital_lateral 221 100 20 0 +11164 ctx_lh_S_orbital_med-olfact 181 200 20 0 +11165 ctx_lh_S_orbital-H_Shaped 101 20 20 0 +11166 ctx_lh_S_parieto_occipital 101 100 180 0 +11167 ctx_lh_S_pericallosal 181 220 20 0 +11168 ctx_lh_S_postcentral 21 140 200 0 +11169 ctx_lh_S_precentral-inf-part 21 20 240 0 +11170 ctx_lh_S_precentral-sup-part 21 20 200 0 +11171 ctx_lh_S_suborbital 21 20 60 0 +11172 ctx_lh_S_subparietal 101 60 60 0 +11173 ctx_lh_S_temporal_inf 21 180 180 0 +11174 ctx_lh_S_temporal_sup 223 220 60 0 +11175 ctx_lh_S_temporal_transverse 221 60 60 0 + +12100 ctx_rh_Unknown 0 0 0 0 +12101 ctx_rh_G_and_S_frontomargin 23 220 60 0 +12102 ctx_rh_G_and_S_occipital_inf 23 60 180 0 +12103 ctx_rh_G_and_S_paracentral 63 100 60 0 +12104 ctx_rh_G_and_S_subcentral 63 20 220 0 +12105 ctx_rh_G_and_S_transv_frontopol 13 0 250 0 +12106 ctx_rh_G_and_S_cingul-Ant 26 60 0 0 +12107 ctx_rh_G_and_S_cingul-Mid-Ant 26 60 75 0 +12108 ctx_rh_G_and_S_cingul-Mid-Post 26 60 150 0 +12109 ctx_rh_G_cingul-Post-dorsal 25 60 250 0 +12110 ctx_rh_G_cingul-Post-ventral 60 25 25 0 +12111 ctx_rh_G_cuneus 180 20 20 0 +12112 ctx_rh_G_front_inf-Opercular 220 20 100 0 +12113 ctx_rh_G_front_inf-Orbital 140 60 60 0 +12114 ctx_rh_G_front_inf-Triangul 180 220 140 0 +12115 ctx_rh_G_front_middle 140 100 180 0 +12116 ctx_rh_G_front_sup 180 20 140 0 +12117 ctx_rh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +12118 ctx_rh_G_insular_short 225 140 140 0 +12119 ctx_rh_G_occipital_middle 180 60 180 0 +12120 ctx_rh_G_occipital_sup 20 220 60 0 +12121 ctx_rh_G_oc-temp_lat-fusifor 60 20 140 0 +12122 ctx_rh_G_oc-temp_med-Lingual 220 180 140 0 +12123 ctx_rh_G_oc-temp_med-Parahip 65 100 20 0 +12124 ctx_rh_G_orbital 220 60 20 0 +12125 ctx_rh_G_pariet_inf-Angular 20 60 220 0 +12126 ctx_rh_G_pariet_inf-Supramar 100 100 60 0 +12127 ctx_rh_G_parietal_sup 220 180 220 0 +12128 ctx_rh_G_postcentral 20 180 140 0 +12129 ctx_rh_G_precentral 60 140 180 0 +12130 ctx_rh_G_precuneus 25 20 140 0 +12131 ctx_rh_G_rectus 20 60 100 0 +12132 ctx_rh_G_subcallosal 60 220 20 0 +12133 ctx_rh_G_temp_sup-G_T_transv 60 60 220 0 +12134 ctx_rh_G_temp_sup-Lateral 220 60 220 0 +12135 ctx_rh_G_temp_sup-Plan_polar 65 220 60 0 +12136 ctx_rh_G_temp_sup-Plan_tempo 25 140 20 0 +12137 ctx_rh_G_temporal_inf 220 220 100 0 +12138 ctx_rh_G_temporal_middle 180 60 60 0 +12139 ctx_rh_Lat_Fis-ant-Horizont 61 20 220 0 +12140 ctx_rh_Lat_Fis-ant-Vertical 61 20 60 0 +12141 ctx_rh_Lat_Fis-post 61 60 100 0 +12142 ctx_rh_Medial_wall 25 25 25 0 +12143 ctx_rh_Pole_occipital 140 20 60 0 +12144 ctx_rh_Pole_temporal 220 180 20 0 +12145 ctx_rh_S_calcarine 63 180 180 0 +12146 ctx_rh_S_central 221 20 10 0 +12147 ctx_rh_S_cingul-Marginalis 221 20 100 0 +12148 ctx_rh_S_circular_insula_ant 221 60 140 0 +12149 ctx_rh_S_circular_insula_inf 221 20 220 0 +12150 ctx_rh_S_circular_insula_sup 61 220 220 0 +12151 ctx_rh_S_collat_transv_ant 100 200 200 0 +12152 ctx_rh_S_collat_transv_post 10 200 200 0 +12153 ctx_rh_S_front_inf 221 220 20 0 +12154 ctx_rh_S_front_middle 141 20 100 0 +12155 ctx_rh_S_front_sup 61 220 100 0 +12156 ctx_rh_S_interm_prim-Jensen 141 60 20 0 +12157 ctx_rh_S_intrapariet_and_P_trans 143 20 220 0 +12158 ctx_rh_S_oc_middle_and_Lunatus 101 60 220 0 +12159 ctx_rh_S_oc_sup_and_transversal 21 20 140 0 +12160 ctx_rh_S_occipital_ant 61 20 180 0 +12161 ctx_rh_S_oc-temp_lat 221 140 20 0 +12162 ctx_rh_S_oc-temp_med_and_Lingual 141 100 220 0 +12163 ctx_rh_S_orbital_lateral 221 100 20 0 +12164 ctx_rh_S_orbital_med-olfact 181 200 20 0 +12165 ctx_rh_S_orbital-H_Shaped 101 20 20 0 +12166 ctx_rh_S_parieto_occipital 101 100 180 0 +12167 ctx_rh_S_pericallosal 181 220 20 0 +12168 ctx_rh_S_postcentral 21 140 200 0 +12169 ctx_rh_S_precentral-inf-part 21 20 240 0 +12170 ctx_rh_S_precentral-sup-part 21 20 200 0 +12171 ctx_rh_S_suborbital 21 20 60 0 +12172 ctx_rh_S_subparietal 101 60 60 0 +12173 ctx_rh_S_temporal_inf 21 180 180 0 +12174 ctx_rh_S_temporal_sup 223 220 60 0 +12175 ctx_rh_S_temporal_transverse 221 60 60 0 + +#No. Label Name: R G B A +13100 wm_lh_Unknown 0 0 0 0 +13101 wm_lh_G_and_S_frontomargin 23 220 60 0 +13102 wm_lh_G_and_S_occipital_inf 23 60 180 0 +13103 wm_lh_G_and_S_paracentral 63 100 60 0 +13104 wm_lh_G_and_S_subcentral 63 20 220 0 +13105 wm_lh_G_and_S_transv_frontopol 13 0 250 0 +13106 wm_lh_G_and_S_cingul-Ant 26 60 0 0 +13107 wm_lh_G_and_S_cingul-Mid-Ant 26 60 75 0 +13108 wm_lh_G_and_S_cingul-Mid-Post 26 60 150 0 +13109 wm_lh_G_cingul-Post-dorsal 25 60 250 0 +13110 wm_lh_G_cingul-Post-ventral 60 25 25 0 +13111 wm_lh_G_cuneus 180 20 20 0 +13112 wm_lh_G_front_inf-Opercular 220 20 100 0 +13113 wm_lh_G_front_inf-Orbital 140 60 60 0 +13114 wm_lh_G_front_inf-Triangul 180 220 140 0 +13115 wm_lh_G_front_middle 140 100 180 0 +13116 wm_lh_G_front_sup 180 20 140 0 +13117 wm_lh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +13118 wm_lh_G_insular_short 225 140 140 0 +13119 wm_lh_G_occipital_middle 180 60 180 0 +13120 wm_lh_G_occipital_sup 20 220 60 0 +13121 wm_lh_G_oc-temp_lat-fusifor 60 20 140 0 +13122 wm_lh_G_oc-temp_med-Lingual 220 180 140 0 +13123 wm_lh_G_oc-temp_med-Parahip 65 100 20 0 +13124 wm_lh_G_orbital 220 60 20 0 +13125 wm_lh_G_pariet_inf-Angular 20 60 220 0 +13126 wm_lh_G_pariet_inf-Supramar 100 100 60 0 +13127 wm_lh_G_parietal_sup 220 180 220 0 +13128 wm_lh_G_postcentral 20 180 140 0 +13129 wm_lh_G_precentral 60 140 180 0 +13130 wm_lh_G_precuneus 25 20 140 0 +13131 wm_lh_G_rectus 20 60 100 0 +13132 wm_lh_G_subcallosal 60 220 20 0 +13133 wm_lh_G_temp_sup-G_T_transv 60 60 220 0 +13134 wm_lh_G_temp_sup-Lateral 220 60 220 0 +13135 wm_lh_G_temp_sup-Plan_polar 65 220 60 0 +13136 wm_lh_G_temp_sup-Plan_tempo 25 140 20 0 +13137 wm_lh_G_temporal_inf 220 220 100 0 +13138 wm_lh_G_temporal_middle 180 60 60 0 +13139 wm_lh_Lat_Fis-ant-Horizont 61 20 220 0 +13140 wm_lh_Lat_Fis-ant-Vertical 61 20 60 0 +13141 wm_lh_Lat_Fis-post 61 60 100 0 +13142 wm_lh_Medial_wall 25 25 25 0 +13143 wm_lh_Pole_occipital 140 20 60 0 +13144 wm_lh_Pole_temporal 220 180 20 0 +13145 wm_lh_S_calcarine 63 180 180 0 +13146 wm_lh_S_central 221 20 10 0 +13147 wm_lh_S_cingul-Marginalis 221 20 100 0 +13148 wm_lh_S_circular_insula_ant 221 60 140 0 +13149 wm_lh_S_circular_insula_inf 221 20 220 0 +13150 wm_lh_S_circular_insula_sup 61 220 220 0 +13151 wm_lh_S_collat_transv_ant 100 200 200 0 +13152 wm_lh_S_collat_transv_post 10 200 200 0 +13153 wm_lh_S_front_inf 221 220 20 0 +13154 wm_lh_S_front_middle 141 20 100 0 +13155 wm_lh_S_front_sup 61 220 100 0 +13156 wm_lh_S_interm_prim-Jensen 141 60 20 0 +13157 wm_lh_S_intrapariet_and_P_trans 143 20 220 0 +13158 wm_lh_S_oc_middle_and_Lunatus 101 60 220 0 +13159 wm_lh_S_oc_sup_and_transversal 21 20 140 0 +13160 wm_lh_S_occipital_ant 61 20 180 0 +13161 wm_lh_S_oc-temp_lat 221 140 20 0 +13162 wm_lh_S_oc-temp_med_and_Lingual 141 100 220 0 +13163 wm_lh_S_orbital_lateral 221 100 20 0 +13164 wm_lh_S_orbital_med-olfact 181 200 20 0 +13165 wm_lh_S_orbital-H_Shaped 101 20 20 0 +13166 wm_lh_S_parieto_occipital 101 100 180 0 +13167 wm_lh_S_pericallosal 181 220 20 0 +13168 wm_lh_S_postcentral 21 140 200 0 +13169 wm_lh_S_precentral-inf-part 21 20 240 0 +13170 wm_lh_S_precentral-sup-part 21 20 200 0 +13171 wm_lh_S_suborbital 21 20 60 0 +13172 wm_lh_S_subparietal 101 60 60 0 +13173 wm_lh_S_temporal_inf 21 180 180 0 +13174 wm_lh_S_temporal_sup 223 220 60 0 +13175 wm_lh_S_temporal_transverse 221 60 60 0 + +14100 wm_rh_Unknown 0 0 0 0 +14101 wm_rh_G_and_S_frontomargin 23 220 60 0 +14102 wm_rh_G_and_S_occipital_inf 23 60 180 0 +14103 wm_rh_G_and_S_paracentral 63 100 60 0 +14104 wm_rh_G_and_S_subcentral 63 20 220 0 +14105 wm_rh_G_and_S_transv_frontopol 13 0 250 0 +14106 wm_rh_G_and_S_cingul-Ant 26 60 0 0 +14107 wm_rh_G_and_S_cingul-Mid-Ant 26 60 75 0 +14108 wm_rh_G_and_S_cingul-Mid-Post 26 60 150 0 +14109 wm_rh_G_cingul-Post-dorsal 25 60 250 0 +14110 wm_rh_G_cingul-Post-ventral 60 25 25 0 +14111 wm_rh_G_cuneus 180 20 20 0 +14112 wm_rh_G_front_inf-Opercular 220 20 100 0 +14113 wm_rh_G_front_inf-Orbital 140 60 60 0 +14114 wm_rh_G_front_inf-Triangul 180 220 140 0 +14115 wm_rh_G_front_middle 140 100 180 0 +14116 wm_rh_G_front_sup 180 20 140 0 +14117 wm_rh_G_Ins_lg_and_S_cent_ins 23 10 10 0 +14118 wm_rh_G_insular_short 225 140 140 0 +14119 wm_rh_G_occipital_middle 180 60 180 0 +14120 wm_rh_G_occipital_sup 20 220 60 0 +14121 wm_rh_G_oc-temp_lat-fusifor 60 20 140 0 +14122 wm_rh_G_oc-temp_med-Lingual 220 180 140 0 +14123 wm_rh_G_oc-temp_med-Parahip 65 100 20 0 +14124 wm_rh_G_orbital 220 60 20 0 +14125 wm_rh_G_pariet_inf-Angular 20 60 220 0 +14126 wm_rh_G_pariet_inf-Supramar 100 100 60 0 +14127 wm_rh_G_parietal_sup 220 180 220 0 +14128 wm_rh_G_postcentral 20 180 140 0 +14129 wm_rh_G_precentral 60 140 180 0 +14130 wm_rh_G_precuneus 25 20 140 0 +14131 wm_rh_G_rectus 20 60 100 0 +14132 wm_rh_G_subcallosal 60 220 20 0 +14133 wm_rh_G_temp_sup-G_T_transv 60 60 220 0 +14134 wm_rh_G_temp_sup-Lateral 220 60 220 0 +14135 wm_rh_G_temp_sup-Plan_polar 65 220 60 0 +14136 wm_rh_G_temp_sup-Plan_tempo 25 140 20 0 +14137 wm_rh_G_temporal_inf 220 220 100 0 +14138 wm_rh_G_temporal_middle 180 60 60 0 +14139 wm_rh_Lat_Fis-ant-Horizont 61 20 220 0 +14140 wm_rh_Lat_Fis-ant-Vertical 61 20 60 0 +14141 wm_rh_Lat_Fis-post 61 60 100 0 +14142 wm_rh_Medial_wall 25 25 25 0 +14143 wm_rh_Pole_occipital 140 20 60 0 +14144 wm_rh_Pole_temporal 220 180 20 0 +14145 wm_rh_S_calcarine 63 180 180 0 +14146 wm_rh_S_central 221 20 10 0 +14147 wm_rh_S_cingul-Marginalis 221 20 100 0 +14148 wm_rh_S_circular_insula_ant 221 60 140 0 +14149 wm_rh_S_circular_insula_inf 221 20 220 0 +14150 wm_rh_S_circular_insula_sup 61 220 220 0 +14151 wm_rh_S_collat_transv_ant 100 200 200 0 +14152 wm_rh_S_collat_transv_post 10 200 200 0 +14153 wm_rh_S_front_inf 221 220 20 0 +14154 wm_rh_S_front_middle 141 20 100 0 +14155 wm_rh_S_front_sup 61 220 100 0 +14156 wm_rh_S_interm_prim-Jensen 141 60 20 0 +14157 wm_rh_S_intrapariet_and_P_trans 143 20 220 0 +14158 wm_rh_S_oc_middle_and_Lunatus 101 60 220 0 +14159 wm_rh_S_oc_sup_and_transversal 21 20 140 0 +14160 wm_rh_S_occipital_ant 61 20 180 0 +14161 wm_rh_S_oc-temp_lat 221 140 20 0 +14162 wm_rh_S_oc-temp_med_and_Lingual 141 100 220 0 +14163 wm_rh_S_orbital_lateral 221 100 20 0 +14164 wm_rh_S_orbital_med-olfact 181 200 20 0 +14165 wm_rh_S_orbital-H_Shaped 101 20 20 0 +14166 wm_rh_S_parieto_occipital 101 100 180 0 +14167 wm_rh_S_pericallosal 181 220 20 0 +14168 wm_rh_S_postcentral 21 140 200 0 +14169 wm_rh_S_precentral-inf-part 21 20 240 0 +14170 wm_rh_S_precentral-sup-part 21 20 200 0 +14171 wm_rh_S_suborbital 21 20 60 0 +14172 wm_rh_S_subparietal 101 60 60 0 +14173 wm_rh_S_temporal_inf 21 180 180 0 +14174 wm_rh_S_temporal_sup 223 220 60 0 +14175 wm_rh_S_temporal_transverse 221 60 60 0 + diff --git a/mne/data/__init__.py b/mne/data/__init__.py new file mode 100644 index 0000000..a48c2d6 --- /dev/null +++ b/mne/data/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE-Python data.""" diff --git a/mne/data/coil_def.dat b/mne/data/coil_def.dat new file mode 100644 index 0000000..e3f7ebc --- /dev/null +++ b/mne/data/coil_def.dat @@ -0,0 +1,776 @@ +# +# MEG coil definition file +# +# Copyright 2005 - 2019 +# +# Matti Hamalainen +# Athinoula A. Martinos Center for Biomedical Imaging +# Charlestown, MA, USA +# +# +# "" +# +# struct class id accuracy num_points size baseline description +# format '%d %d %d %d %e %e %s' +# +# +# +# struct w x y z nx ny nz +# format '%f %e %e %e %e %e %e' +# +# .... +# +# +# +# 1 magnetometer +# 2 axial gradiometer +# 3 planar gradiometer +# 4 axial second-order gradiometer +# +# 0 point approximation +# 1 normal +# 2 accurate +# +# Produced with: +# +# mne_list_coil_def version 1.14 compiled at May 15 2021 07:58:54 +# +3 2 0 2 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 61.7284 8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-61.7284 -8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 2 1 2 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 61.7284 8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-61.7284 -8.100e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 2 2 8 2.789e-02 1.620e-02 "Neuromag-122 planar gradiometer size = 27.89 mm base = 16.20 mm" + 15.1057 1.111e-02 7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 5.440e-03 7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 5.440e-03 -7.680e-03 0.000e+00 0.000 0.000 1.000 + 15.1057 1.111e-02 -7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -1.111e-02 7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -5.440e-03 7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -5.440e-03 -7.680e-03 0.000e+00 0.000 0.000 1.000 +-15.1057 -1.111e-02 -7.680e-03 0.000e+00 0.000 0.000 1.000 +1 2000 0 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 2000 1 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 2000 2 1 0.000e+00 0.000e+00 "Point magnetometer" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3012 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +3 3012 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3012 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3013 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 3.000e-04 0.000 0.000 1.000 +3 3013 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3013 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T2 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3014 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3014 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3014 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T3 size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3015 0 2 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 59.5238 8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +-59.5238 -8.400e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +3 3015 1 4 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 29.7619 8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 29.7619 8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-29.7619 -8.400e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +3 3015 2 8 2.639e-02 1.680e-02 "Vectorview planar gradiometer T4 (MEG-MRI) size = 26.39 mm base = 16.80 mm" + 14.9858 1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 + 14.9858 1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -5.891e-03 -6.713e-03 3.000e-04 0.000 0.000 1.000 +-14.9858 -1.079e-02 -6.713e-03 3.000e-04 0.000 0.000 1.000 +1 3022 0 1 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3022 1 4 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 0.2500 -6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 +1 3022 2 16 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" + 0.0625 -9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 +1 3023 0 1 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3023 1 4 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 0.2500 -6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 -6.450e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 6.450e-03 6.450e-03 3.000e-04 0.000 0.000 1.000 +1 3023 2 16 2.580e-02 0.000e+00 "Vectorview magnetometer T2 size = 25.80 mm" + 0.0625 -9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.225e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -9.675e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 -3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 3.225e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 9.675e-03 9.675e-03 3.000e-04 0.000 0.000 1.000 +1 3024 0 1 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3024 1 4 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 0.2500 -5.250e-03 -5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -5.250e-03 5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 5.250e-03 -5.250e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 5.250e-03 5.250e-03 3.000e-04 0.000 0.000 1.000 +1 3024 2 16 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" + 0.0625 -7.875e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -7.875e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -2.625e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 2.625e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 -7.875e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 -2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 2.625e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 7.875e-03 7.875e-03 3.000e-04 0.000 0.000 1.000 +1 3025 0 1 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 3025 1 4 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 0.2500 -7.000e-03 -7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 -7.000e-03 7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 7.000e-03 -7.000e-03 3.000e-04 0.000 0.000 1.000 + 0.2500 7.000e-03 7.000e-03 3.000e-04 0.000 0.000 1.000 +1 3025 2 16 2.800e-02 0.000e+00 "Vectorview magnetometer T4 (MEG-MRI) size = 28.00 mm" + 0.0625 -1.050e-02 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -1.050e-02 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 -3.500e-03 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 3.500e-03 1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 -1.050e-02 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 -3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 3.500e-03 3.000e-04 0.000 0.000 1.000 + 0.0625 1.050e-02 1.050e-02 3.000e-04 0.000 0.000 1.000 +1 4001 0 1 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 4001 1 4 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 0.2500 5.750e-03 5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.750e-03 5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.750e-03 -5.750e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.750e-03 -5.750e-03 0.000e+00 0.000 0.000 1.000 +1 4001 2 7 2.300e-02 0.000e+00 "Magnes WH2500 magnetometer size = 23.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 9.390e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -9.390e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.695e-03 8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.695e-03 -8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.695e-03 8.132e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.695e-03 -8.132e-03 0.000e+00 0.000 0.000 1.000 +2 4002 0 2 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 4002 1 8 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 +2 4002 2 14 1.800e-02 5.000e-02 "Magnes WH3600 gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 +1 4003 0 1 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 4003 1 4 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 0.2500 7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 +1 4003 2 4 3.000e-02 0.000e+00 "Magnes reference magnetometer size = 30.00 mm" + 0.2500 7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 7.500e-03 -7.500e-03 0.000e+00 0.000 0.000 1.000 +2 4004 0 2 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 1.350e-01 0.000 0.000 1.000 +2 4004 1 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 0.2500 2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 +2 4004 2 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (diag) size = 80.00 mm base = 135.00 mm" + 0.2500 2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 2.000e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 -2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 + -0.2500 2.000e-02 -2.000e-02 1.350e-01 0.000 0.000 1.000 +2 4005 0 2 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 1.0000 6.750e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 -6.750e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 +2 4005 1 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 0.2500 8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 +2 4005 2 8 8.000e-02 1.350e-01 "Magnes reference gradiometer (offdiag) size = 80.00 mm base = 135.00 mm" + 0.2500 8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + 0.2500 8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -8.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.750e-02 -2.000e-02 0.000e+00 0.000 0.000 1.000 +2 5001 0 2 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 5001 1 8 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.500e-03 -4.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 4.500e-03 -4.500e-03 5.000e-02 0.000 0.000 1.000 +2 5001 2 14 1.800e-02 5.000e-02 "CTF axial gradiometer size = 18.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -7.348e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 6.364e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.674e-03 -6.364e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -7.348e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 6.364e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.674e-03 -6.364e-03 5.000e-02 0.000 0.000 1.000 +1 5002 0 1 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 5002 1 4 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 0.2500 4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 +1 5002 2 4 1.600e-02 0.000e+00 "CTF reference magnetometer size = 16.00 mm" + 0.2500 4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.000e-03 -4.000e-03 0.000e+00 0.000 0.000 1.000 +2 5003 0 2 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 7.860e-02 0.000 0.000 1.000 +2 5003 1 8 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 0.2500 8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 +2 5003 2 8 3.440e-02 7.860e-02 "CTF reference gradiometer (diag) size = 34.40 mm base = 78.60 mm" + 0.2500 8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 8.600e-03 -8.600e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 -8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 + -0.2500 8.600e-03 -8.600e-03 7.860e-02 0.000 0.000 1.000 +2 5004 0 2 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 1.0000 3.930e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 -3.930e-02 0.000e+00 0.000e+00 0.000 0.000 1.000 +2 5004 1 8 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 0.2500 4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 +2 5004 2 8 3.440e-02 7.860e-02 "CTF reference gradiometer (offdiag) size = 34.40 mm base = 78.60 mm" + 0.2500 4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -4.780e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 -3.080e-02 -8.500e-03 0.000e+00 0.000 0.000 1.000 +2 6001 0 2 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 6001 1 8 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 0.2500 3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.875e-03 3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -3.875e-03 3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -3.875e-03 -3.875e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 3.875e-03 -3.875e-03 5.000e-02 0.000 0.000 1.000 +2 6001 2 14 1.550e-02 5.000e-02 "MIT KIT system gradiometer size = 15.50 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 6.328e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -6.328e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 3.164e-03 5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 3.164e-03 -5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.164e-03 5.480e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -3.164e-03 -5.480e-03 5.000e-02 0.000 0.000 1.000 +1 6002 0 1 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 6002 1 4 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 0.2500 3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.875e-03 -3.875e-03 0.000e+00 0.000 0.000 1.000 +1 6002 2 7 1.550e-02 0.000e+00 "MIT KIT system reference magnetometer size = 15.50 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.328e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 5.480e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.164e-03 -5.480e-03 0.000e+00 0.000 0.000 1.000 +2 7001 0 2 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 7001 1 2 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 7001 2 8 6.000e-03 5.000e-02 "BabySQUID system gradiometer size = 6.00 mm base = 50.00 mm" + 0.2500 1.500e-03 1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -1.500e-03 1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -1.500e-03 -1.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 1.500e-03 -1.500e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 1.500e-03 1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -1.500e-03 1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -1.500e-03 -1.500e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 1.500e-03 -1.500e-03 5.000e-02 0.000 0.000 1.000 +1 7002 0 1 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7002 1 4 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 0.2500 2.500e-03 2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.500e-03 2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -2.500e-03 -2.500e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 2.500e-03 -2.500e-03 0.000e+00 0.000 0.000 1.000 +1 7002 2 7 1.000e-02 0.000e+00 "BabyMEG system magnetometer size = 10.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 2.041e-03 3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 2.041e-03 -3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -2.041e-03 3.536e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -2.041e-03 -3.536e-03 0.000e+00 0.000 0.000 1.000 +1 7003 0 1 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7003 1 4 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 +1 7003 2 7 2.000e-02 0.000e+00 "BabyMEG system compensation magnetometer size = 20.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 +1 7004 0 1 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7004 1 4 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 +1 7004 2 7 2.000e-02 0.000e+00 "BabyMEG system reference magnetometer size = 20.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 +2 9001 0 2 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 9001 1 8 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 0.2500 5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.000e-03 -5.000e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 5.000e-03 5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.000e-03 5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.000e-03 -5.000e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 5.000e-03 -5.000e-03 5.000e-02 0.000 0.000 1.000 +2 9001 2 14 2.000e-02 5.000e-02 "KRISS system gradiometer size = 20.00 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.165e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 7.071e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.082e-03 -7.071e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 8.165e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -8.165e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 4.082e-03 7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 4.082e-03 -7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.082e-03 7.071e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.082e-03 -7.071e-03 5.000e-02 0.000 0.000 1.000 +2 7501 0 2 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.740e-02 0.000 0.000 1.000 +2 7501 1 8 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 5.740e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 5.740e-02 0.000 0.000 1.000 +2 7501 2 14 1.486e-02 5.740e-02 "Artemis system gradiometer size = 14.86 mm base = 57.40 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 6.067e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -6.067e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 3.033e-03 5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 3.033e-03 -5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.033e-03 5.254e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -3.033e-03 -5.254e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 6.067e-03 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 -6.067e-03 0.000e+00 5.740e-02 0.000 0.000 1.000 + -0.1250 3.033e-03 5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 3.033e-03 -5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 -3.033e-03 5.254e-03 5.740e-02 0.000 0.000 1.000 + -0.1250 -3.033e-03 -5.254e-03 5.740e-02 0.000 0.000 1.000 +1 7502 0 1 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 7502 1 4 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 0.2500 3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 +1 7502 2 4 1.485e-02 0.000e+00 "Artemis system reference magnetometer size = 14.85 mm" + 0.2500 3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.712e-03 -3.712e-03 0.000e+00 0.000 0.000 1.000 +2 7503 0 2 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 3.000e-02 0.000 0.000 1.000 +2 7503 1 8 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 +2 7503 2 8 1.486e-02 3.000e-02 "Artemis system reference gradiometer size = 14.86 mm base = 30.00 mm" + 0.2500 3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 3.715e-03 -3.715e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 -3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 + -0.2500 3.715e-03 -3.715e-03 3.000e-02 0.000 0.000 1.000 +2 9101 0 2 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 +2 9101 1 8 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 0.2500 5.125e-03 5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.125e-03 5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 -5.125e-03 -5.125e-03 0.000e+00 0.000 0.000 1.000 + 0.2500 5.125e-03 -5.125e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 5.125e-03 5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.125e-03 5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 -5.125e-03 -5.125e-03 5.000e-02 0.000 0.000 1.000 + -0.2500 5.125e-03 -5.125e-03 5.000e-02 0.000 0.000 1.000 +2 9101 2 14 2.050e-02 5.000e-02 "Compumedics adult gradiometer size = 20.50 mm base = 50.00 mm" + 0.2500 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 8.369e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 -8.369e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1250 4.185e-03 7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.185e-03 -7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.185e-03 7.248e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.185e-03 -7.248e-03 0.000e+00 0.000 0.000 1.000 + -0.2500 0.000e+00 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 8.369e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 -8.369e-03 0.000e+00 5.000e-02 0.000 0.000 1.000 + -0.1250 4.185e-03 7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 4.185e-03 -7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.185e-03 7.248e-03 5.000e-02 0.000 0.000 1.000 + -0.1250 -4.185e-03 -7.248e-03 5.000e-02 0.000 0.000 1.000 +2 9102 0 2 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + -1.0000 0.000e+00 0.000e+00 4.700e-02 0.000 0.000 1.000 +2 9102 1 16 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 0.1250 4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 +2 9102 2 16 1.660e-02 4.700e-02 "Compumedics pediatric gradiometer size = 16.60 mm base = 47.00 mm" + 0.1250 4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 0.000e+00 0.000 0.000 1.000 + 0.1250 4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 -4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + 0.1250 4.150e-03 -4.150e-03 1.500e-03 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.700e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 -4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 + -0.1250 4.150e-03 -4.150e-03 4.850e-02 0.000 0.000 1.000 +1 8001 0 1 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8001 1 6 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8001 2 24 7.000e-04 0.000e+00 "QSpin OPM sensor Gen1 size = 0.70 mm" + 0.0417 -1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 +1 8002 0 1 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8002 1 6 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8002 2 24 7.000e-04 0.000e+00 "QSpin OPM sensor Gen2 size = 0.70 mm" + 0.0417 -1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -1.750e-04 -1.750e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 1.750e-04 -1.750e-04 0.000 0.000 1.000 +1 8101 0 1 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8101 1 6 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 0.1667 -1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 -2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 2.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 7.500e-04 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1667 1.250e-03 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8101 2 24 2.000e-03 0.000e+00 "FieldLine OPM sensor Gen1 size = 2.00 mm" + 0.0417 -1.250e-03 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -1.250e-03 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -7.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 -2.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 2.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 7.500e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.0417 1.250e-03 5.000e-04 -5.000e-04 0.000 0.000 1.000 +1 8201 0 1 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 1.0000 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 +1 8201 1 9 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 0.1111 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 +1 8201 2 9 2.000e-03 0.000e+00 "Kernel OPM sensor Gen1 size = 2.00 mm" + 0.1111 0.000e+00 0.000e+00 0.000e+00 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 -5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 -5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 -5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 + 0.1111 5.000e-04 5.000e-04 5.000e-04 0.000 0.000 1.000 diff --git a/mne/data/coil_def_Elekta.dat b/mne/data/coil_def_Elekta.dat new file mode 100644 index 0000000..4bca22d --- /dev/null +++ b/mne/data/coil_def_Elekta.dat @@ -0,0 +1,70 @@ +# +# MEG coil definition file for Maxwell Filtering +# +# These coil definitions make use of integration points according to the last +# formula in section 25.4.62 in the "Handbook of Mathematical Functions: +# With Formulas, Graphs, and Mathematical Tables" edited by Abramowitz and Stegun. +# +# These coil definitions were used by Samu Taulu in the Spherical Space +# Separation work, which was subsequently used by Elekta in Maxfilter. The only +# difference is that the local z-coordinate was set to zero in Taulu's original +# formulation. Source of small z-coordinate offset (0.0003m) is due to manufacturing bug. +# +# Issues left to be sorted out. +# 1) Discrepancy between gradiometer base size. 16.69 in Elekta, 16.80 in MNE +# +# "" +# +# struct class id accuracy num_points size baseline description +# format '%d %d %d %d %e %e %s' +# +# +# +# struct w x y z nx ny nz +# format '%f %e %e %e %e %e %e' +# +# .... +# +# +# +# 1 magnetometer +# 2 axial gradiometer +# 3 planar gradiometer +# 4 axial second-order gradiometer +# +# 0 point approximation +# 1 normal +# 2 accurate +# +# +1 2000 2 1 0.000e+00 0.000e+00 "Point magnetometer, z-normal" + 1.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +3 3012 2 8 2.639e-02 1.669e-02 "Vectorview planar gradiometer T1 size = 26.39 mm base = 16.69 mm" +1.4979029359e+01 1.0800000000e-02 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 5.8900000000e-03 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 5.8900000000e-03 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.4979029359e+01 1.0800000000e-02 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -1.0800000000e-02 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -5.8900000000e-03 6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -5.8900000000e-03 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +-1.4979029359e+01 -1.0800000000e-02 -6.7100000000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1 3022 2 9 2.580e-02 0.000e+00 "Vectorview magnetometer T1 size = 25.80 mm" +7.7160493800e-02 -9.9922970000e-03 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 9.9922970000e-03 9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 -9.9922970000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.9753086420e-01 0.0000000000e+00 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 9.9922970000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 -9.9922970000e-03 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 9.9922970000e-03 -9.9922970000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1 3024 2 9 2.100e-02 0.000e+00 "Vectorview magnetometer T3 size = 21.00 mm" +7.7160493800e-02 -8.1332650000e-03 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 8.1332650000e-03 8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 -8.1332650000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.9753086420e-01 0.0000000000e+00 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 8.1332650000e-03 0.0000000000e+00 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 -8.1332650000e-03 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +1.2345679010e-01 0.0000000000e+00 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 +7.7160493800e-02 8.1332650000e-03 -8.1332650000e-03 3.0000000000e-04 0.0000000000e+00 0.0000000000e+00 1.0000000000e+00 diff --git a/mne/data/eegbci_checksums.txt b/mne/data/eegbci_checksums.txt new file mode 100644 index 0000000..1702429 --- /dev/null +++ b/mne/data/eegbci_checksums.txt @@ -0,0 +1,3058 @@ +S008/S008R06.edf.event 149997f77af08c9d6ad150aad5198f91c6c964c07e4d639baa770eac01012cfc +S008/S008R05.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S008/S008R02.edf dcd82e2a2477c52ca4a3dc784d9c04a55f935f2ed9ff10cfe0ec880d56c60edc +S008/S008R09.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S008/S008R03.edf ab3ea90d829e1e2e10bb6e551e828e66ac6262ec7bc24e2e17db6e1e350088fe +S008/S008R08.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S008/S008R13.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S008/S008R03.edf.event 8d9c5bb3c83f5f447b4f8159b1454d55332838299c0e4a8e2dc62f413b08cea6 +S008/S008R05.edf 4a1005e7d877efe17e6ab6849665304f21aae138ef72759e5e2e0b96e444e447 +S008/S008R07.edf 9e05bf83ad067538667ab853081165b854b3d13ee334c4185ee5f40aa6b76a7e +S008/S008R12.edf ddced4ba4dc801313554039823fa1826d0dd52648f87a5ee5ada8e9cdd0678c8 +S008/S008R07.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S008/S008R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S008/S008R14.edf 19c943fb32f7749b7e37d8765f84a3bbf76c4ac7ea48ff29fa074322ebcad885 +S008/S008R12.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S008/S008R09.edf a13ed84381df5a71e3e0049a8cdc33f8f04f5f4e3f3af8881ade8b5c57064713 +S008/S008R11.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S008/S008R01.edf 678e47541d9903c300ba7811554ad1f8bfbe2bff086407cb4ff489d2d0e507bc +S008/S008R14.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S008/S008R08.edf 534c25b65a4fa68afe29a5c0272a686ac474e638c86521b177660d888401f374 +S008/S008R10.edf 6a7934c18466078caf899f724cf13b665d98e41fac9d978d9521f89021e0377c +S008/S008R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S008/S008R11.edf ef072b260a1d92c45d5d43a2a38c2affc8db37bc2da3df2d0962c44cdc449131 +S008/S008R04.edf 034a26131e1425e6374a459e5887b1f831f7bfdb101a3658d2cd07620cf2c06b +S008/S008R13.edf 9707099fbc3c9bb8cd9655c9ac491b5c6eb0383ea2a17972602f0e5c68ee6741 +S008/S008R10.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S008/S008R04.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S008/S008R06.edf c2afadf0b5cbc8764825fbe26ae358df677c46ce44c6e8622e4fa3d47d6abb14 +S024/S024R06.edf f4553ab40c7b8334a61f1f880d9cad635af87210fb59de6ab5b84ec79af0c296 +S024/S024R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S024/S024R11.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S024/S024R12.edf f6287534159e552ed194cd1e3d44719e5ac162691779fd1a6fa3ae535cc036df +S024/S024R14.edf 774fb0c7d2bd28ab25364a1761d042725081a1ccb77f7dbd80d6b32be18252bd +S024/S024R09.edf 660928b6113112c7aaca4e2ed77188fcb66a12b75577f670202eb070b6fb351c +S024/S024R13.edf bf02f4f15115f6cb760f5f0f37437b930e648a85a43840b0382a83c6aa0e7144 +S024/S024R13.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S024/S024R11.edf 9ff78f7743d2b47590d5ae93f97d1afc5d266d41d086f6706cdc87ec32de31d4 +S024/S024R03.edf add2fbc9ed9bac885c6c192f7dc4cbfee1872da48da7423e2b06955bb200e0da +S024/S024R04.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S024/S024R01.edf abd9b141d8e2853e30a126a891929bf99f22661e27f571f02d5f6a415b40841a +S024/S024R07.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S024/S024R05.edf f0de0cc7eb55154e2378911413c3a2ce353a2b4e68a9d298b2778ac4c0c7e587 +S024/S024R03.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S024/S024R08.edf 863899a80ad93a9ed8871be3bb775f8e74bf777dbf7a2c3da50e1433ceb3b50c +S024/S024R04.edf fe32b65b90079a307b05c42602aa66b3c79e09726ce9457e1d95242b102c6ba1 +S024/S024R02.edf 23634177bf16de6e243a1c7f0902043ccef5befca66b0504950cd45e08cc6e6f +S024/S024R12.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S024/S024R06.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S024/S024R10.edf ef34765f73a46e40ee2c59acc9a15d308765da781da11ce01197ee6e21f41c0f +S024/S024R09.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S024/S024R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S024/S024R07.edf cbf04619f8a980286067dd7fb943a3c5f1bce9928e27fdf3910ac1715d50f420 +S024/S024R05.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S024/S024R14.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S024/S024R08.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S024/S024R10.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S076/S076R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S076/S076R09.edf.event 0acfeb483b2cb94304c9eb67c0f7c62d40e5219d810874aa1100d47751846b50 +S076/S076R13.edf 5a1a2283844cf3b493f92ffcbe31f2f3f74f09ecef18181edf644d10bcabd041 +S076/S076R06.edf 77739b23f5c793eddec3f39523c08c0dd1794cbee1886bfe34fc32c94700f557 +S076/S076R09.edf 074de623e475352ea6278d66895b15699bbd7213b561f7ec23791ab3a86e3629 +S076/S076R13.edf.event 26ab1ab393fc9047a9dcc0795bfb1c27444f58002c53a85545ba90395b1b54a2 +S076/S076R10.edf 64735a5159983af2f17f6405284ce59abb176211178e9ffa6bf2142bebd1055a +S076/S076R05.edf.event da9621a04c94f97b7529e748da5c8ea934cf17ff04b8b25167bbefb402b05574 +S076/S076R05.edf 9ad75a6b4e666c5f1db7afadf553b3d6f7f0eb46cbfe9980ef83f2ca4c351bca +S076/S076R03.edf.event 816b9e94c71aa492a890b3ee1209e4e3978a2bc1a055bb8d5b29d2aa438e7519 +S076/S076R07.edf.event 25f3656128a87915adc8cadfaac6dcf8a8c1c6f327617318054f010301d6b6c6 +S076/S076R08.edf.event df69a0d4526ab4c42f8d35b328874aafaeda087bb95ee7310d4f3654498f5746 +S076/S076R02.edf 092bbc018ef8bbfe21ff2ebf10e9587c00a1836a922c391e632b829e38aef05a +S076/S076R14.edf.event 246045b94ade6decbe7fab2d5ffd7ca93aada9d955f2d3ad771d86993cebd407 +S076/S076R10.edf.event f902df8b382a804cc4355668d708d0c1fc4103aa929df7db535c913a6352a463 +S076/S076R08.edf b7177ac5c852f96bc54093baafb7fcf0036cabd1263b8dfc2ab594105df3ead8 +S076/S076R11.edf.event 3cb2bc9973bfc45d63eacff0d4dbc31c925dfbadc12a78a18ff82731d7cdf19e +S076/S076R01.edf 86659701591d9e676b453556fd006a7501530d9f328050f2860060150b9ee75b +S076/S076R04.edf 48efdb5438f403e40a95262b12bcfe0147d581610c57ae33ed71c639068a04e8 +S076/S076R11.edf 2a5e9b3db7c0a2d6e030a25131a20e62bb5c4995cf9c001baf605fd0b7f8f254 +S076/S076R12.edf.event 879d902507387beb62ba2852ca4d85abb3e0bf9ee2913b3a553338a93590fe26 +S076/S076R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S076/S076R04.edf.event 994f60d74757d686b44b6a76e2d8e5946d5c8bec058f00a2264bae2e66ef7f62 +S076/S076R12.edf 6e9353b6b73166048fe7477a7f76b25a768d56bb0490188f5c8186506e96c47a +S076/S076R07.edf f45b4bb5193806594236d25ce2e6d3664d560aba813b92259e688f791a8d3552 +S076/S076R03.edf 413e4c44629550309789e4d0bcb33b6c5d9018731b497fc5189634e077636860 +S076/S076R14.edf 51114d952fde1e67627aa2abe33ffbdaf29653097cdc81c559f16d08226c0aad +S076/S076R06.edf.event 4e2eb91af71f5afce0e8a62b4a4ca68badcc99f5cad95fefc14d81d3b8b942ba +S102/S102R09.edf 507e52473e0378f094eb7622edb4ebc669772dcaf28961055790d17adfe64ea2 +S102/S102R01.edf 8eb9f4a85bd7854b545d00504f59d00ae4ba301011bcf3b562fb8d09ecb37d1d +S102/S102R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S102/S102R04.edf a66ad16f27f36c7fd126e15d47eae464e52c906eb5777c9188f392d0db97169e +S102/S102R07.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S102/S102R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S102/S102R09.edf.event 2d2b8dff802b702fe4eb03f1ac14632c8e7ac45acf9ff5566d798e576f1458d7 +S102/S102R06.edf 2275d9b0af1689c652d0846747eef993479998941fe7dbd3fc1206e39922b67d +S102/S102R04.edf.event 9fd975ed76b006ff20105ebf0fd1ae6dd127f008e06a75cad4484483eaad568d +S102/S102R05.edf.event f89b8b851132f6a167fb816989ff4c56e00ee3cea0b5a0531d0b31636ad86634 +S102/S102R12.edf bdfca4e013a18c8f43c424fd3acf604c231b1405e12c86dc7dd902b427d235a0 +S102/S102R07.edf f25c51950d872a5bfd169cb79263fda5e7c49d4c14d9cdfede7698b242099bde +S102/S102R10.edf 0dd5cabe19f4a7be3e907a69b752c41d459da617fe1b4110ed4bf8400a912666 +S102/S102R11.edf 8debe5134c1bec5e986ce2c025cd133bf1de8486837c363910b79349ebd6e889 +S102/S102R02.edf dedc3e5421cfd02d5505738c92bfe77ad7afc5478cc5c63f1ee07598a66af255 +S102/S102R14.edf 9c983d89d2df07f4477a73b17c5226166f859498f1f859fa64ca92ea3b144427 +S102/S102R14.edf.event 28f75c7ca5b9a2497e2ac9802a60d98795fd30929d3a9999fdab4252b01f28a3 +S102/S102R11.edf.event 82111548c0e7fdffe9a81852fe0ca1917a0898ce34e4d5af5bec8826a50ae766 +S102/S102R08.edf.event 94aaf9017009cf1af17ca3bf9b16aa30c750bd60048f18af249b106afdde2c63 +S102/S102R03.edf f4e0d946b27185e80ed8326d2b0985a72e8c1b25aba86496ba3c91797fd8a6b3 +S102/S102R10.edf.event 761750058b75b97c1177655c9399a4de55d2f053dd7397c6d95c96c7718ded98 +S102/S102R12.edf.event 8eec54f234ee4835606a193713d02283c66ac6177147ea2d90c6cb07814fb12f +S102/S102R13.edf.event d107d48c57429e3818a39fec732023db2d15060d55e82d27b578baa3875b6025 +S102/S102R05.edf 7d0b0c5d517978af5f1ff0386f14eb81af3f9d3402986bbb02e7afd47a7c98b6 +S102/S102R08.edf 6155ab874f65352641325fa1a308cca891820821572a28cae81a87740a046883 +S102/S102R06.edf.event 2de1f3543fab2d52b1e97e2b52f84bee5719ac28f3e613357cc69f70ce2f1e7b +S102/S102R03.edf.event 6c457793161b0b2dafe7d78bf4d750a570530145b98ce0bb007627422152b0f2 +S102/S102R13.edf 9ba36fa3b8377e35c9e666cfa48715e3f2b07513739f7dbb841c45d541673cef +S105/S105R14.edf cd55636e49e04b863df13ab0aa09bc5294eef100f8bc055cbc528f316c415aa6 +S105/S105R10.edf.event 353c44c45eb89b709590af77f11106666cfa3680e5470b61c607b80304933399 +S105/S105R03.edf b04097ea13899d9139a25b8a109d83b78f45aebdd7145610d1abb74ab1302f4a +S105/S105R11.edf.event d9d89addd8fca4d057ce27c16b349184b9dc3b13193561b7c99ffa9414e86138 +S105/S105R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S105/S105R06.edf.event 61280c9a206447732db06a6480d23654d272250513af280790c631d46fd6de1f +S105/S105R08.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S105/S105R04.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S105/S105R14.edf.event 3fef219961dd488cd267ad63abdc2bd5db054783d67c836968d1266e4a9f8560 +S105/S105R11.edf 1275ff3237fa4bfc006b5e296d6a77a2d65c9b1045451b1485f2837d5ba189d3 +S105/S105R10.edf 3367bdaef3d66d056ec64c553ecb2fecf7fb19677b0d310e4fa6c7501052b7c5 +S105/S105R05.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S105/S105R07.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S105/S105R02.edf 8816d8070f58a9ddee89d1d6f9923c9df3d73707a0a204953a128b6c357d237c +S105/S105R09.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S105/S105R09.edf f3b966181f93016d52127e1bcc049bf5c4e866bc0540fc7697f369618f78741a +S105/S105R01.edf 712c423a028c67f2fa328bb427cf140fbbeca2fee90bec6c17fb54ed01ffec85 +S105/S105R04.edf 218e3e8ba2bc171a3c6dd5891e6cd604c8fec4f32fe2d4c274732ef880264180 +S105/S105R03.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S105/S105R13.edf.event e4c9fdcbbe3469b81dd48d30396ee921a23d45fb900a0dd3b7eb4ceaf04936a6 +S105/S105R12.edf.event bf636aa7d9551082dd6cb4265143a22c283dfa2e23b0fee221d83a0b0a57742c +S105/S105R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S105/S105R13.edf 0942c77de3e2167d8f6d2ef7cc5b62fbfc9248ea92985c0c484726a0b893bb51 +S105/S105R07.edf dd475e1a162d6665cca22cfe293d45874b84a017007ca845300fb205ace24424 +S105/S105R05.edf a50dfac654a6f0e0d730cd2475e086f78122aac22d288792c44756e646ccfed1 +S105/S105R12.edf 926fe3afca0077495884cbd67a09761d0a7cbb956c37dc7c140165798b510891 +S105/S105R06.edf f80b5f1ae34d833fc370e17a8e369609240a43f2dcad050eed9b393a32cc4475 +S105/S105R08.edf 52bca66d6e03db2c45e6cf560f54d13abe113ad3fca5396ac4c6fda7796e36c1 +S090/S090R11.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S090/S090R04.edf.event e3fab15583152a967f41c771dfbae2769f9b7aa683947093588c32e8521f7150 +S090/S090R07.edf 4dcb675b5b73c8d79b7e780e310ef91e5060c5965b6aa7e0bdeeed00862e2621 +S090/S090R08.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S090/S090R05.edf 0f8919f65642a8abae2baa57f5c4550ebfc3c5320eb97d1c9da11b4fe082a926 +S090/S090R06.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S090/S090R12.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S090/S090R10.edf 53184bb4b5b4785bb80819ee6779f6684b5b17b7bdc0be4bfa1113c2c723adb1 +S090/S090R09.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S090/S090R04.edf 693e109389bc37367ab66ff558171a2fdbcf74bdcace569303e7da8c0f9f8a58 +S090/S090R03.edf aa2696005ede093289712cbff6c1d9ca7fb72c3f127e8e29593b87c160a2c277 +S090/S090R07.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S090/S090R01.edf ef8e84de758cebfad15ee86d49bd134578ac5ff24154ece98e9e853c91356e34 +S090/S090R12.edf 8cc1a80be7f14683b645e7443854c6cd938e31049be92344adc58a9e4c586554 +S090/S090R14.edf.event b9997b11f88beac0859d8b671d263d82a6687a271c7dccc0c874b0cb51d57af4 +S090/S090R14.edf 2b900c0724f97e41416ffabc40ff8de4bc2e3333f808169bd2e5c9b4be5c9f76 +S090/S090R05.edf.event 5bb35bd49434a9630e941b5646d6d89f7907531ef3e44464334b78943d4b0237 +S090/S090R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S090/S090R06.edf 07c41b448f75aa807e63018fe370a017e00f6d82b8d23be59ed8969e4091492a +S090/S090R13.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S090/S090R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S090/S090R03.edf.event a6b508d78111c06b1a185c7a70db08a6126b40c12b7f09d110a17945d19cfdbf +S090/S090R09.edf 6c3614bce81b0e38d7b41c9bb18c77899f1d71be7fa8d135c1f81b210b71d950 +S090/S090R08.edf 9585057959bbaae8a4b6ef81546a88a3ae1d6e95bbc7dfb1dd6a846d46af0637 +S090/S090R10.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S090/S090R11.edf d2bf6493d1968c5e0ad3c90f0a5b807b87e321ba5e516d2184db3c18a54443bb +S090/S090R13.edf 4aab298b8aeb892039c948102e8dec14d8824c876dd27332d147f2da11e64a97 +S090/S090R02.edf 19877620e881df6cf52aa25b6383dd55d9097e4425b16b9451f99d68a67c51f7 +S039/S039R03.edf 966a6c64ac3e79dcd455917fdb1b298b812bd8164b77089f560ab6fe0a93d941 +S039/S039R10.edf 4e1fe26c20c3990f2a197c6f4090613de90eab0699c4606daa9bfca9eaefdd77 +S039/S039R08.edf 4f5f011e61932f4f5998692bd6324c70b2286191dca137e340afbd4f9b7b93ae +S039/S039R11.edf 46dc22efd58d1179f521a3827b62d5ab4081fc333bb65025e940aeccf34e684a +S039/S039R12.edf 19078a959cce7c535e92c6fdc557dbd01c162549784a409af79929d9c4f20e16 +S039/S039R11.edf.event 1f21e6a28cc1b59fdc3667207c5b1029902f6d0018bb978cecbb5905b868cb10 +S039/S039R14.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S039/S039R09.edf 1d86d0dd505aa15e6e819feb0d9a36f43f783f25861514aaf703b8f93ed774b1 +S039/S039R09.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S039/S039R05.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S039/S039R05.edf 127aa579b8333abc584461ad7efba5ba8b8b9da701fe111677a812dc5d73d622 +S039/S039R03.edf.event b7ad55e8ce55c5743b2d7e417e77c8d7efdeba7b6cbbddb02fe11361879eb9a4 +S039/S039R02.edf e54936d509c572066383ae4250e28f41e9990bad464c26eaa10e248667464483 +S039/S039R04.edf 57302f5f9cd1714693e52b7785a839666da1560ee88b00779fe7f18f56899324 +S039/S039R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S039/S039R10.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S039/S039R13.edf 9bef82ea3f69eff3f3c9f47779593a278a10c9ea379b35d5741b063a4a22c66f +S039/S039R12.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S039/S039R08.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S039/S039R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S039/S039R14.edf 29a2f92b3a9ddad0d55ff5440eea81b69fcf6b21e62e33c3dbfdb42fc1398190 +S039/S039R04.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S039/S039R06.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S039/S039R07.edf c0c5867355e92bfcd0da79ac30584aa8a97558aaa9900af35f43f5acdbc82378 +S039/S039R06.edf 8512ad15c7d2f497fd1d48753b10f81d6dfde4a1c27434499cf1f50e4e066b32 +S039/S039R01.edf b1c5ca3e506c3dc0d7dc855f3908b5d4ac3b1839e9fac8e7dd316eaaa50f953e +S039/S039R13.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S039/S039R07.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S098/S098R12.edf 7523f3be8c00b0ffaf151c4e5f4b3407a88d499b65aeb8b37e6c37bdbed5bb01 +S098/S098R14.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S098/S098R06.edf d07ce4a4f2d6eab3e8f0b3b45285302c9551a6562cb31a247f160d5d702e6b73 +S098/S098R13.edf.event e129d841c2e51546d5ad32cb6c2a8303e4c0bfcd90dc9e3e821c0c40a9a049bc +S098/S098R07.edf 9e6287b11b514ababb4185c98955086fbc53547c2758e2876e23c63bbd7ff766 +S098/S098R09.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S098/S098R12.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S098/S098R10.edf e7f29d50b356234f59f36e3f2e239864e40696bafa7f856f295900064ed8519a +S098/S098R07.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S098/S098R04.edf 9bae31bf3add196cc4588f279b3a372112331b5b385eec4490f09a23c29a9468 +S098/S098R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S098/S098R05.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S098/S098R05.edf 0e27c55698a030660f39462f0f948da1b1637b4381c3babb432b3963483b8044 +S098/S098R13.edf 95d6338578f27881bf3cc14b09ee3137a5e5be8e7ce598e5c7f562bc1715d93c +S098/S098R08.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S098/S098R11.edf 55c06e652ae9e6862e19c9e6e0709add991fda1d314df27982d3dfc13ea93a72 +S098/S098R09.edf d0c14bf014bb521244332d7044a8e01763266175d676323a82a92a0164632430 +S098/S098R01.edf e908b862df484c6ffa822ccbb5658894cd3459fadd6c9d02b26e30bb9c392552 +S098/S098R08.edf e357cf8d5236aa3b63e212bccd663e8f07d8b98952d3ae301eca861a63736c19 +S098/S098R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S098/S098R06.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S098/S098R02.edf 2d710c6eac0d9bf911512a5cd3ef10f168592af7b2d4f76aa7378c77a9b83bb5 +S098/S098R14.edf cd8fe3074a59818a00e8749cd0a3b9ada624955b56acf7cc81bc20824a7370bd +S098/S098R03.edf 5f37a275e82a5911f1f467fee237bd97dac7496277db7025864bbeb0e8e52b80 +S098/S098R10.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S098/S098R03.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S098/S098R04.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S098/S098R11.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S020/S020R05.edf b78e35ca2354a77d8d44af4b50205f09bbf866d8f295d5047cca111f60eae213 +S020/S020R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S020/S020R09.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S020/S020R08.edf 05314b692cd4c103071ed616e36f7dec96116f2b5251e0912a35d65782ae9205 +S020/S020R12.edf 63ef2b1452fe4a93a84c501f10fc35a234d87eece20f0b563b373229b9b8ae7d +S020/S020R11.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S020/S020R06.edf 6e127745b2b1031bb274f72063b954a79b584163e17c964aa8e859c878498145 +S020/S020R04.edf 3ff2fd30d246987573897d429b8a6c51cca3f1ea4cfcf8815c49e1f6f6512013 +S020/S020R12.edf.event 611df9f780acf887245656c2987fb77e486d2bc016936d00eea0a55d2f5c3028 +S020/S020R02.edf 0fce787b1715bb3799b110bbdf72198fbb5d46af9b25d1c0f55014f245afdfb7 +S020/S020R14.edf 1d9a26f8c38886fa4d700deb86100e56df958b2507875f45adbac7c98de15ed8 +S020/S020R10.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S020/S020R10.edf aef44481a6fe1a8e732450ef594b3167bca2fbd01bb23a29d7ec79a670f0a658 +S020/S020R09.edf 92c11a238de78b5f58402760ca9e157391ca8501705dfd4a4a172fd7e2a7bf05 +S020/S020R13.edf 8fe70e65b1e8f3a61b748d12a01a3d3844071b32bc591a8996b9a4fe1d824b66 +S020/S020R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S020/S020R05.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S020/S020R07.edf 7e1038d12397731e36039b6ba26504383d9f700d4b2e0252421d072d41cb3ce3 +S020/S020R13.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S020/S020R03.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S020/S020R07.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S020/S020R04.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S020/S020R06.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S020/S020R08.edf.event 146180adc42ba38b30989a7a30f8dc33c397aeed2db797d266b9ddc607fbddec +S020/S020R11.edf 80014e2cfda81a19647e9f2a16f66082693a591b78a57aee5d220b3ef2a0241f +S020/S020R03.edf 4660edd9962577ceac6e35db1a5c0756fbdfa954a77f479dc8aa3e3b3819f453 +S020/S020R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S020/S020R01.edf da48b7d4ed805218e12c70c775b47a3a111670901e06e7de843916c579278693 +S042/S042R14.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S042/S042R12.edf e327ec08604c1a835bd0082c79c2b4f44ec22c1e0b39d12ca7be6c80d1acc8d9 +S042/S042R10.edf a414c5017fa91f1f099174fa99bd13b3391b68fa1473cb0b6642debf264365f1 +S042/S042R13.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S042/S042R05.edf 423d7daa4453176ed054da2ac90d2951c45c48f4324a49cde4846160af810e52 +S042/S042R05.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S042/S042R11.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S042/S042R09.edf 621acaf37bc58d96395bf0a6d8c242e2784768822aa8515251a3cfa149ca5fcd +S042/S042R03.edf 27158284aabae182fcc060a945e8070bf0e9a40bbe737f43a1c4baba5988d34c +S042/S042R11.edf b9eb76af792f6ea5992359b14ad57ece1d5ac3b3d233cc46ebfc6afd07bfd697 +S042/S042R06.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S042/S042R08.edf b277ee780d7ed612958ca36ea7db2be9797001d5afcaf1dc20cf554dd774d18f +S042/S042R07.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S042/S042R06.edf d4bcaf4ed157911a326c61770386a2d9c95dd8e60d8b3d035efc8266dea1292b +S042/S042R03.edf.event 39c9864c57efec906759ab97dba0ab26a900fa25ad8fc3c48b0d97ea83c3a893 +S042/S042R08.edf.event e16d907d8f296edaf98d1ab54138ee16bae85a4bd81d90a487ccfece5b611fd7 +S042/S042R04.edf 278f745b79a507e87c4a4fe97542416b7723ee66e6960a459d43995486cf60bf +S042/S042R10.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S042/S042R02.edf 34bdf9da7d2a4845db721022179717d6ab5b9e7acb47e2c8f96be20a4c6fc30f +S042/S042R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S042/S042R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S042/S042R09.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S042/S042R07.edf bacd80ce7c09dd4442384e04babdb34259e79f708c2b093a613b4478f305c2e9 +S042/S042R13.edf 676232fb801e81ade568c6ea9c5e2f98f262283168c92358a0251c50c6fc0565 +S042/S042R14.edf c5292099a382f2a0279cfc29f3a86322e8b607f5329f2295524faa528b7f23fb +S042/S042R04.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S042/S042R12.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S042/S042R01.edf 34f1adbcb0155953eb68874d9a23cc1a8d7ce57a507bc6ceea9189b410d38439 +S031/S031R11.edf 77ae7fd0a38e2bd23eaab19f8c612d144cece3d581fc0ce6740b9eec4e8dfc49 +S031/S031R06.edf ac5b92e882911bc3d4c5d1410a9baca5282536cbe1d183f6333b8fbf9ce8ab54 +S031/S031R10.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S031/S031R03.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S031/S031R06.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S031/S031R05.edf 92e138d4764bc8709af2170722b28d816655ff80287f418346b2886387f25a67 +S031/S031R10.edf 0967dba784038babf1459f8afb6a1b6ae57e51e591a8d84896d5c81736fd4a18 +S031/S031R07.edf ec5602c9eca8e29c1aa51f225504e97189589a18848d1ad968320dcdbf834daf +S031/S031R11.edf.event a931510ec25c8e6b0352576ca7f98b414a922451f0a6ee6aab03d3409a677c66 +S031/S031R14.edf bf60fcc7c476f9e7b0b23710f0c62edd6ed3364698db305fd76925f052c27f5a +S031/S031R01.edf 55d1e8e8145b8eb6778468b55020662275d152d34f33e18c2893b70605175fa3 +S031/S031R04.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S031/S031R13.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S031/S031R05.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S031/S031R02.edf e1f18d1810f486b34075e9b2ba9f028e303b5f548c2477d1340ed4ce0f578bf6 +S031/S031R12.edf 6bfe914504368ce54822eac53598d9d53b251b39b17e9a7090dd64619fcee982 +S031/S031R14.edf.event 5b8eb2e2184e2816cb3f7104149139af83c25f381a47b42e81c9c6aa1723fb5a +S031/S031R07.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S031/S031R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S031/S031R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S031/S031R04.edf 514a2b800313549f00e927f8f322eac9a381b715b8728b835f0986fa4f7d5096 +S031/S031R08.edf b4cc77950c0f364cceb1ab55559a40c3ffe7a5b2fd5146a8ffa0243a35897e6a +S031/S031R08.edf.event 927b59b2e19d89df8169362057a329c5b70fd65d1d2d0a77e56546927bc02281 +S031/S031R09.edf 3217f543f6f1db0ef3b8e7b268e87e0e1dd69bfc7fdbd33e0ad8ba295341161e +S031/S031R03.edf f9d75722d2b3412a9b9da296dd28185d54f6b5471082f7952c2cfa51212f6845 +S031/S031R12.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S031/S031R09.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S031/S031R13.edf f0fefd12e9a037ba9d46750508f834c99d1331f75024be889bf05c826e55b2c6 +RECORDS 02d1de9c00511d3b7548a6b59bdf209c819db025518ddcc52b21fd3b2b9de4a8 +S004/S004R05.edf 03d587f2b60f6fc1cb8105ea06219b560345e354f4e6623cb1840cea5d0e7138 +S004/S004R12.edf c44b7be0464d86be4d460ec66432869b7b3e8dcaa2067af02d6e772abf5c11de +S004/S004R10.edf 8f8034251a5bc4bf8dd8ecc1869da6ffd9e61bb9d9f8a4ae0df9003d72d40e9f +S004/S004R12.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S004/S004R14.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S004/S004R09.edf 2b5b61af7ebad040f57ab761e7b2bfb83bd7488801bfcf35805399c076b5e1d3 +S004/S004R10.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S004/S004R09.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S004/S004R07.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S004/S004R05.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S004/S004R11.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S004/S004R13.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S004/S004R06.edf 2d9afd10462b0dc93c07e605ce6dd49ddf42c856c843a18a8236cbf08c9af7fa +S004/S004R03.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S004/S004R07.edf eb00bf6a816a6ded6f93e0b96d2a1c2ae8f6d13a59e39d475bf94d57fb842643 +S004/S004R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S004/S004R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S004/S004R08.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S004/S004R14.edf 06a3c88276f1db76214f8b1068878add45b22836327fe6b95d0231d415cce752 +S004/S004R04.edf c9ddf0294961f0b877192a2479802b9c6c88403682a17b5ad679fd6485aa6f59 +S004/S004R04.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S004/S004R02.edf 32134f3ce7056fd6487855552636b8f93ea2d67631ef1da6f6474eda6040162f +S004/S004R13.edf 136e362b79e95407025f6ff390631e301e75c996538435ca5e8fee80b4c08682 +S004/S004R06.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S004/S004R01.edf 23e9645f492488d2376ac3a606d177043efc6e5fb00281c2d6648e4b06a6ce62 +S004/S004R11.edf a8b96b32782329f12accdc222331545707be4cd497f97f158de4c06e87235f8c +S004/S004R08.edf 1ab5909b183413283a7ea76e35311a68a4688558f0044b9faab3987291cbdb92 +S004/S004R03.edf b24e3ae9d302935be06c0905d1e925ad68a94b4a5b20dc659cbc63bb8481d503 +S002/S002R11.edf 694bd9fbee1305dbc212ea4eecb8930750f5e08f8cc8ea45e2b94c92ac5f5a7d +S002/S002R14.edf 21e20c72ae3f52cc95f6fd6d4b5b958e28fc85bc0d3886f494de97a82c2aa24d +S002/S002R05.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S002/S002R07.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S002/S002R05.edf 16ad84f17851599da5e199106b9b29086cc5793d78197a46b36efd49602b35e1 +S002/S002R11.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S002/S002R04.edf fcd37831378c411d50c223d97ffbc00949be2271d093f1d8e56bbe7c02bd1539 +S002/S002R09.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S002/S002R06.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S002/S002R04.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S002/S002R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S002/S002R13.edf 6c018440a9b52980fa40e4b9b9baeeaec7c94fc6d1c0b88c7fb32f1dae3a5a45 +S002/S002R03.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S002/S002R06.edf 08b07a8495a51ddca66a91fcc1275651f2d3e6b0a7a56711f06769b4ecbb8d53 +S002/S002R07.edf cdba64ad60574903248aed651d393c148df3c611eebdc9694717a04e2e2deef3 +S002/S002R13.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S002/S002R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S002/S002R12.edf a3d166b23375942a5ccf352924f7766f0ca9cfd1bac7951175e710d978f5239f +S002/S002R14.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S002/S002R02.edf 666a9299341ece77522df050b1ddd128179a540548d82642ba975a65b4f6d84e +S002/S002R08.edf 02d64941f6bcd1635bc7dd187a9553331b73933e9771f4e7c59249dfc5632c5a +S002/S002R10.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S002/S002R03.edf cbabe29620b19978454bc429f59976f6ee8f32f6392e4fcdf7e463981248072c +S002/S002R12.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S002/S002R10.edf 817961f28a7acfebc45c664f1a9e40dcf4a8e1e1e51dc089062d7e3e2cef44e9 +S002/S002R01.edf d542689b31c977838f20b1a2062865b98ecf10a7e9702f56f101000d47f2ec68 +S002/S002R09.edf bc439584841a5b637596485e1979c89d872a90f2270a363a039a31587954ec4b +S002/S002R08.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S093/S093R08.edf.event e129d841c2e51546d5ad32cb6c2a8303e4c0bfcd90dc9e3e821c0c40a9a049bc +S093/S093R13.edf e9e7a6f1676cce8250555d109df575bf434049a2be4919faff324defe427fb54 +S093/S093R06.edf fe0a178f7fcfcb2f60d9981e7d5c57caf1cffbac74b73e248b1f4ddcda8cd07f +S093/S093R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S093/S093R06.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S093/S093R09.edf.event 3593f38db6b9b0b72284e6ea58a9169bb2459a37f75643fd634363b665a636d9 +S093/S093R05.edf 08c4b631fb815382a0ca5a3b76309308d589317888cc78d9254113cc71b9806c +S093/S093R01.edf 2d1bca83825b37f2186af5dbdb0c4d7c48d0a971903761a84c385d7dfeb3f54c +S093/S093R05.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S093/S093R02.edf 35ccae312fc9b7a87d3b485bd8cd0721c579d722a78e5e0802d2ba65f07a2e66 +S093/S093R07.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S093/S093R11.edf d312b4f5b5faa38dbc90b0ce863a4dd8181f4a32fb535487fc049b2cc9a523a7 +S093/S093R10.edf 6f0f8189513bf06f9a0d7e694ad12b7efe948195f4b077a57706936b79dc6e6a +S093/S093R14.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S093/S093R07.edf 1c493c8dc14e1e1e01c02e0cbe585d1798be82e1503dfdcfe7066db93387157f +S093/S093R12.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S093/S093R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S093/S093R03.edf 20c0e4a51b3eac6047fe26e1f3aef29b4f74caf5e335db090f682f1f7610afb4 +S093/S093R13.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S093/S093R04.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S093/S093R10.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S093/S093R14.edf d061f438e28cebbadad78943f0d70cefc79b82b20ffc8d9421239ed5cf912aa5 +S093/S093R09.edf 5cc82efff1b3669dcd61697c00596eb850a7117b55817100ff100849888b5fb6 +S093/S093R12.edf 3fea038b8ee2551bfa67f17a6dd668a30e70817996102b7b30dd7f8d7451bd47 +S093/S093R04.edf cab05a2b51874a488cb968bb6c76afb883a7057e384c0af1b7a766e34f4a1de2 +S093/S093R08.edf 54d9a7c0997e29abaa6ffec4a48e32555fff894d83e778c5e974452665835d4d +S093/S093R03.edf.event 6adfbec29ec794c0e3c78a211cdc8485ffa3b00bd6e1c3dab9a7fe1bab88aa88 +S093/S093R11.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S006/S006R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S006/S006R09.edf cb3af5f176ea1f5be8e493d80f62eb374adf0a92259beabf254a6b423711cd64 +S006/S006R02.edf f114ee8be3445ff48c78422d4cc7b6b68ab83d79f06cb47634b1710796aeda4f +S006/S006R05.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S006/S006R01.edf bfe32b925fdd469c90ce6913f7409cf85e4492b40d36c77b83511592d6e4c647 +S006/S006R14.edf 4fb5946ed7a29268af9b1770db80d4c6a3dbeacab4be573b6da9c095b8a5e68b +S006/S006R11.edf ce5ef60e07d2b1db8492739786eec2bcc6bb9e85b05fa0c9f2851e3a0f3e9907 +S006/S006R06.edf fc5e1cc21df8b1de6a63fe1a063db27d14ac882d6105057c9d4f924365730a4d +S006/S006R09.edf.event 825d019e301e14c4cc2a396252dd43ba79dd75489c263fe4af3e18e46c5dab2d +S006/S006R07.edf 56b8c705b9c1406546ce90bee6373172f43ec8bb5437aa9772dc2512405e6a17 +S006/S006R13.edf d4ebe044bd1f71c2a653bd8ada69b343b79fe18d9002a24ba96917533ecabe87 +S006/S006R13.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S006/S006R08.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S006/S006R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S006/S006R12.edf 296b5cb14549098dc55ee047cc9ff63faccf480e432b94797ff73cc7209b7353 +S006/S006R06.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S006/S006R03.edf 1bedb6634f28af44942c14901aa82f420858bc367fd5830482b4e8cf7a28bbf1 +S006/S006R12.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S006/S006R14.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S006/S006R11.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S006/S006R04.edf 4bd532bb728e1f2ed2afc0a5c162830a07465b6a4a58e105d1316c6ff1921f3b +S006/S006R10.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S006/S006R04.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S006/S006R08.edf 006e55ec16590f5f5b9230ac53a2f8fd0660960fa7b4b512ff61f8f7e2f8714e +S006/S006R10.edf d4a1d432d4c703decce7f7ce0a6d7c05a030fd19f562fe2be52d4c581ca24365 +S006/S006R03.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S006/S006R07.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S006/S006R05.edf 2b793afa3378f56fb546da8265f3efad685d1201b26bc35ea5b0f660a15d87aa +S096/S096R12.edf a55f541160ec1f29086e8ecc7e54ee60fd5d0ec9edbd40313c30bd4c7695c81d +S096/S096R14.edf ad6c2bb5130d2c4379d08069f45be5bd6e16e2cf52d3d46c480a211ad4fc0c0c +S096/S096R02.edf 2a8c6c0c88ba142d17c19c902dce4befd1a2cc497bd43e112c14bfeee792ee18 +S096/S096R11.edf f4893d08eeb086de8eb889dd3d36ec3fe8ffbc56c93ade7a08ebf1af6c731877 +S096/S096R07.edf 1d4575fea624049f78bd4139006308fb88cea1da31e51690f0aef14c5063cb06 +S096/S096R04.edf c39a9977ef6b916ec4183e678a0eedb12695ce9084e2a43fade39a6038cc907f +S096/S096R13.edf ea7994757aa12872aac2fa8bc6aee5b48fae69f74d69e9c060c3f95bed4fb98a +S096/S096R04.edf.event 3d40baf6066941639ef493e2ee821cfa6bd1a236dff48c0659131c0e88fef481 +S096/S096R03.edf d84c57263849e5980aaa0ce9c4b40590ffa912cc45ddb9231de7f6336b8af234 +S096/S096R12.edf.event 37cc966752b10d3890cb16ee78927420f7facf9e9edaf86af928442f1f9df3b4 +S096/S096R14.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S096/S096R08.edf 6dfcfa5182fc32ca2aa8bf74722f43cd992b2bce64cb7fc15a4ba13a9533fe10 +S096/S096R03.edf.event cfdc7957f5c28cd1f437cdb649750034f1340084be6ccc484dad902fff9127ed +S096/S096R05.edf.event 1d29aa2dd90032d41f7c1c6386db9b2b26b7c29b87234d56e63f65c958acaa3c +S096/S096R09.edf 77c04af955fa1af9dcacfe325d01bb99da92f48d30a041f68a6f8e9ae3e2358b +S096/S096R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S096/S096R05.edf fa817e1acdefa929755a9e0e5b8dfcf85ebd08369ab5d9be92287ecaf172827b +S096/S096R09.edf.event 37784e11407ce31ee513275216bd40c96a4580f080500f2331a58f58a3ca826b +S096/S096R11.edf.event 2e361d8b420d6db7c1d4372f2f615a9f9290ab6956917406863d12a1fdec8f5f +S096/S096R06.edf f69902e69dc0894d9bbb2aa64a154571cd60d867c885578a35bb312ef6f9b831 +S096/S096R10.edf b5e8594fb191adf76d6f29a1607cde6435b25616b0fab99bc18f24adedb17dc3 +S096/S096R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S096/S096R01.edf 005e6ffa9938042c262035797bcbab224dd6039236872416dbe0ec42cac91e49 +S096/S096R06.edf.event 14a453fc6552b2a6d7b55329ca3533853a89a83368101bee6b522e4a67d37283 +S096/S096R10.edf.event f345df3e4e6ca2a51df43688b6036dc5af9a89117b2005dd4a45a7e3886d6bee +S096/S096R08.edf.event 7bc37a046920a5e8c90ae816bf99bac5ca23a1a0ad7d98c1d3c2c5d698871986 +S096/S096R07.edf.event 3c11674692ca50df8414eed1ec2d7308aec7f8ee62785db4f654ee632e18d29d +S096/S096R13.edf.event 405867bf01702626e40e74e21dcc164b7e87d59de89a4e6fd2c4ce561f2e6c1f +S088/S088R12.edf 930e7f40798227e75d28c35e9d79b6c58c78d742daa1daad95f967f633ee02b3 +S088/S088R12.edf.event ccb7718e9ad0d8edd7de8d12553d98a89bcb191c436a6067b3e688a579d4abc9 +S088/S088R09.edf 06f8ec3746f7fe6f2fd6e80465bc8d94471cedd91bbc7f9ac0f1b8dcc00fe65c +S088/S088R05.edf cecedd6c8a8f4320a038a2b32034acbe54bdfbc9f2ec8ec5e04f00eae4e15ece +S088/S088R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S088/S088R04.edf ce6b97a3e00de1b844694fc889ec0ac568fb5c0129130d8e701761fdf0e2711c +S088/S088R08.edf cc57c5b3d363247feacb04b4ef37a30b930ef7a202449874f3773a24af5332f2 +S088/S088R04.edf.event 01f74ce2c10cdf9ed7fe03895dd7f42bb5a5c5b9b847f0f0d5143ef0f1d2211b +S088/S088R13.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S088/S088R06.edf e4bb1819e8c496f228e80d60335964c5ab8e4e1fd1084bc1238ad573aecf748c +S088/S088R05.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S088/S088R07.edf e7eed7f6d17a3d7e11ef52de0237ba2d93508990406cfc5fba40c91347b366b6 +S088/S088R13.edf 291d09d8e7c444f9dfa93db24cf69f7c64f6db817c6c7a717871d98edd7f8549 +S088/S088R11.edf a7fa83aedd07dacc07408fa53026d4626aa9680968946930a6f369bfdada8778 +S088/S088R10.edf 1c49b9ba7cc30f59443c4292aeaf85dfe81f37e09e4e24db3cf66c7e45a01d3b +S088/S088R03.edf 95c2618922b253bb9faf4a565dc64bbd3b213347764aaf470661b5ea3755c241 +S088/S088R01.edf d7ec3aea4cb49618ec70a1c223c578afac19338e4142807ce9128e78aea49618 +S088/S088R06.edf.event 01f74ce2c10cdf9ed7fe03895dd7f42bb5a5c5b9b847f0f0d5143ef0f1d2211b +S088/S088R08.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R02.edf 52a17441943433ca8965ce7a21262166ab3ac6e8609e5bed3c39e2ca12cc049c +S088/S088R14.edf.event e88110dc871719db682717f3a256188ebe916d76e5a1a09b0cea908778ae4424 +S088/S088R07.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S088/S088R14.edf 8a912f35e39b002fe61206bb8661bd783a7b7ba23338f00b8e14ffa746938d0e +S088/S088R09.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S088/S088R11.edf.event 82e7ace9f055649957ab04324ffce9101db68bcfcceac1df6786e304f9de8669 +S088/S088R10.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S088/S088R03.edf.event a27a31471bf1b8bc6ff9b144eb110e9eb0711b24514aaf47ed1cd94395c10b7e +S029/S029R14.edf de0767217b8a403b9670e09ae75d301bfca1e31c4c08cef8678091a038ed9fc2 +S029/S029R03.edf 8e65ac9c88f3dc61adafbdc6809c1bcb9a46bc9ec911239fc8fd7726416e2b5b +S029/S029R07.edf.event 5207e825d15ed81ce802718b5134141adfbb78f1e5f1ca62f6083c6fc9cdefb9 +S029/S029R05.edf a6b3b806a7e4f79ad706ea4e65aeb1109b37730a87919d46460aa8682433eaf4 +S029/S029R09.edf d3acd3558084062eab5a28643879f51b03b8b6377e1cb31d8d45a93517342262 +S029/S029R11.edf 7c6f7cce142ba49222a0f4dc65f0094a7f1a0b8690ff043b834f043cc4286408 +S029/S029R04.edf 4674f7e44919af804dfd99a38004f6b9ae62b21eacd61ca389df55aa18a00f01 +S029/S029R13.edf.event 5d2f63f6f7525c2a049a2a72e3cce5df4e2f537e88220d558ea3f39afc316bb5 +S029/S029R12.edf.event 8d1471f501a31820d94d776160244282b4f4d8c8faec4fcedc3041360481c5c0 +S029/S029R01.edf 48641d0db6e810c96273d8c26d52510132ae503badf74471558b7c12306265b2 +S029/S029R08.edf.event e79dd10c21416b18d7df92203956af2ed5ee19efd50147912adc49744cc31c00 +S029/S029R08.edf a77d299e99458745c7010d023698208f6b0d56cd5f2a74628688d08a600df781 +S029/S029R06.edf.event 32d42684dec82d19ed241af99bffa1b231a1379487eeab2ca6ecf4e960d72495 +S029/S029R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S029/S029R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S029/S029R04.edf.event 8968db92630d7873955e9e9f9858bc44efed375ca81e0e18966b9e0e060b589c +S029/S029R03.edf.event 064ef0b07a868c81ecd78eaf44c28915f919810c3799f26c5fa5040d9de0a71f +S029/S029R11.edf.event 321c4a867784fc95b428c11912af5a17c303ae8df6cd8f6e4124a31ccbfea8e9 +S029/S029R05.edf.event 9ae43452d01b3a55cbcf82776804cae07420bf106c0e5d7010ce1d987637c83e +S029/S029R13.edf 5c1e9299ec2f5c58455b4f9c42ca729dd5013c898ffcd4b33e9114b7d030509c +S029/S029R09.edf.event 6ca3ff62b8d0191c28d6529edb07636e822a980b4ba1b463fd26b1ad8a09c5d8 +S029/S029R10.edf 86340bf35f707ee4194c4440a5bbdc1938b885f2ae0bb425cd3f37a82d352133 +S029/S029R10.edf.event cb58aa4b6d937e71ea9ca4f60e2e4c78b06334d7bd11091e5a5927fa712064b1 +S029/S029R12.edf 9d01ce9be2230b87a47050e0fcdbbfde4ff10144b9466fd286b618e59a81473a +S029/S029R06.edf 0ca802caa9950b5277730fad8b5e8afb0363b0df99dfebe2a7c8a0229e6d2760 +S029/S029R07.edf b99f383631ea61de2fa1b36d28c490a72009bf5bb192294fc1ebaaea254b070c +S029/S029R02.edf 7eff9783f20f4d3521ae877f26d7b2c87c609a920e16d12e5479072c2cb65671 +S029/S029R14.edf.event 5a42bfc69bfc027aed4436c7be8edf69672b70d1ad5a4354e11a92ebe527fe53 +S047/S047R12.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S047/S047R04.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S047/S047R07.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S047/S047R06.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S047/S047R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S047/S047R10.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S047/S047R11.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S047/S047R10.edf 65fa985cd59fb7c1d5e2cebea93e23608c59ebce4e4e7f69e4cc23542290dda8 +S047/S047R09.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S047/S047R13.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S047/S047R01.edf e9e5061be6fea706fdbf03bbc9d0d8de48476d036553a06ff0117c98b60303c2 +S047/S047R03.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S047/S047R08.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S047/S047R12.edf d457e75b37b82724af1b7024194e5c4858b31402b01f8b2691bc9d4bd6ea112c +S047/S047R11.edf 15b68b4f9959a4239152f3e33f0e9f526cc8e8a5d20195f0910724c3f24170dc +S047/S047R06.edf 49d91dcc440c3579b89a557d2aadf39c3fd80f1ae219e91c6d4fc8c36c880562 +S047/S047R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S047/S047R04.edf 67a0ab9dbf9e0164fbe16036ec578b89056fdfe8efdb1eb4aa441502a23b5a5a +S047/S047R14.edf 225c6e69e9f83c33c7529bc91bbd5a446df75589abb2c9d67b34f6b819561720 +S047/S047R07.edf 19b9ec59b71b23bc0228076fd0250cc3f942a14c39bf8abf3ea756e3727b81d0 +S047/S047R02.edf bf7069f688dd19df47c0f9f9b0fcdd6c01fba48a170d5c0dae8551abec383388 +S047/S047R05.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S047/S047R14.edf.event 5b8eb2e2184e2816cb3f7104149139af83c25f381a47b42e81c9c6aa1723fb5a +S047/S047R13.edf 4c8e71039e5368b6e6a9acb68b81a4f3bc3c9bb5911d925f0064eceec6b92a9a +S047/S047R05.edf 04dd463e35a6e7099508e49eb2a92dc4cd9bd2b893f194bf332a705232d2b8e6 +S047/S047R08.edf 4de761cc5ffcb89ae9a1d4e206a0397742e7a6e34e458bdf8e50883a3f7f6328 +S047/S047R03.edf 9f43c92bd5cbbc1eaea5b1decf8e66b8629874ed3b81055551b7a59574dfe28e +S047/S047R09.edf 988d7b5518596909926ef6038fd4d6dea34310e874e3b681331bd7a5c8de2f86 +S070/S070R07.edf.event ed0acb0c9635a838cea852cecffa253bbee6e084a3404ea9ac9900188570c878 +S070/S070R06.edf 94aaeafdd41b40b187a9517590531be5559cd86d21bb729bf797d32d4de584d1 +S070/S070R04.edf ac0f01d6df57bd65645c2c6a5b6685d38b11727bf41927fdca90ef7e2d601482 +S070/S070R01.edf 5dbb88fd5e39ce0aae257321765c2b9fd67ee9248993c55fdcccf79605442f23 +S070/S070R02.edf 26e7f3e389e357fe2070157a1d4fa0f504cb8e9cefbfcbb2f1cc5997e71a0e7c +S070/S070R10.edf a0235c84275a4066a7905b0b3322d604213c00c803d59ed9b55b912d9cdd89b6 +S070/S070R08.edf 8c3737b379124d86fe35ccbe37164773c97926d6c7731007dbf54722848e685f +S070/S070R07.edf cd366575453680fb6b0af26f93bd220b17c5da1d66ef4a594a7c489c0f1d6907 +S070/S070R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S070/S070R04.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S070/S070R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S070/S070R05.edf d28e20d96170e384b92c71cd7a1ea6393e7f28f779eaea3c7fb7142f5d90dc83 +S070/S070R09.edf.event 72f39ff0119f687cb1efa8861280ba991bf463a2eeac59557c3bb3ea57e09aab +S070/S070R12.edf.event 951ec5064433ae4ed0e83f00905ef394c5250b33e1122f41b10ea8917d13afd7 +S070/S070R05.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S070/S070R14.edf 3661f01ba04678982e2efdd3823c1ac1f9b3bc254b1fbc7982b28c5a7ddeeb4d +S070/S070R11.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S070/S070R13.edf f724ddf4f4037b8e7beef57d55a76e7641fb9ec8bf98bd0a50ff61e2d9433129 +S070/S070R09.edf 7c69a40d9f5aa95bbe259e4cfd8a2664934a1719cc66a6289fae7d1fddf2aa2f +S070/S070R10.edf.event 8aa5136ec7d40284d8eb37993d435226d73bc8621186eae0d173bf0e65165054 +S070/S070R13.edf.event 1f21e6a28cc1b59fdc3667207c5b1029902f6d0018bb978cecbb5905b868cb10 +S070/S070R03.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S070/S070R14.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S070/S070R06.edf.event fb76d885a6c380c62ce2da054f5ec78c9f0178752694a36e4b78183ec8850830 +S070/S070R11.edf d54f1b715e23bb3a48a6f904ae2eb50d109447fe4c483eadffe7e965339c34de +S070/S070R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S070/S070R12.edf 9519f622bb5de5cd8f24eb6a765888a78a28bd17446fc9a41ec13ba84b75a0db +S070/S070R03.edf 9b04b49209c33999d70119b985f0ed5ea5dbe3fd4363dde555e742872ed7f5c5 +S072/S072R08.edf.event 7234785f889c4885f0b35192d9919da14670dae4f01dc951f9099ca59d5c7bd4 +S072/S072R06.edf 1b615047f9d42fd7e0da4f649348ebf2b1d929c6b81852898189dfbaeac47901 +S072/S072R05.edf 135ea7898b7c7454535a4be865cb286b6196f20f2dbbd4ae7fc8a7919e86d44d +S072/S072R04.edf 854bc325b4913798858d5ad099c953c6a018cbbb73a1dbf465882ea2f965d5f9 +S072/S072R08.edf 94bfc11c4720ca4a52dd683f74dba027c47a3e00a634ec15c816f7ff3801ba8f +S072/S072R10.edf.event daacde82696d6cf6075cf81c698fad407304dd3c18071f700a24850cf39d5427 +S072/S072R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S072/S072R09.edf 5d1653c82bbea3fa795dfd7f97e535bdbf17587bde003ba45cc5ea4101f965cb +S072/S072R12.edf.event 10b6367f469a7259d23674e3f8fc88a24dbdb41460385095daa9a795a3b47010 +S072/S072R12.edf 8d4b7877af0a9f04c281ce539c65085ab41d21cdb97b28282f55ab26f5dd6afc +S072/S072R10.edf 29700aafb0ceb72983447355ff62479f7ac371db4fbee7c35e2b9427412f647a +S072/S072R11.edf.event 2c7eac05570280238d6e75f11c28223cf3c6d9c9bbc4a888e05c2b99df0d8b15 +S072/S072R13.edf.event 90ddccb5a9607fc09dfb6e835c770a03f7ef1f0e65eef2306953bf4894302136 +S072/S072R14.edf 45cf02bea1f4db899aec41ab64a7eae44b4d6e2fc2fbac0109400e84706a6602 +S072/S072R09.edf.event 357bcc1ef517dab5a2c819b99716cb28db0a13240c332e1c37196b27021825a9 +S072/S072R07.edf ad44b64c22a62b119afba8ca57a9dd1e632da8e422abd2dbfe92b9088df96250 +S072/S072R03.edf 65e1b35a0098d9eb6a64f618ff8b106662dc14a6624a392e7c5bcbf675a843fa +S072/S072R07.edf.event 880bc36e6f5d82ca838cf6169f989c0e959e8c2d5970a205282cc1e8b8a7c068 +S072/S072R13.edf 84a5a50facd102fbebd09f7dff4ec4459beeaa0ac2c79d16a165de0aa37fd215 +S072/S072R04.edf.event ebbd9bc0cd84bebd02e71f43989cfd362624adbb968d778a6e759a70c66956a0 +S072/S072R06.edf.event 352673445bcc538539b47f676cd9701d31de8178217f7cf903d3b878f64b294b +S072/S072R14.edf.event b4317c81c19626393ab603e338ee0d39f9b6b23f4bcd2dd8ada827ee5a14093a +S072/S072R03.edf.event bb4d81b1643558abae8dda4829a909d44c1d41b95f317fd8470886f9c19a27da +S072/S072R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S072/S072R02.edf 4d9044d0e5c9c0d71becc31e5f5c1eb5739303905c4523566bf63de7497ff5a4 +S072/S072R01.edf 7742f6c605fd73b04ca4ddb7dbc1db4a11cae8497107d3720cae64bcf1fa32a3 +S072/S072R05.edf.event 76db7f6afb0264b534d087d90c2136b0f5e75f69d744c33be087337e64d4a360 +S072/S072R11.edf 3a2d6e7ba690f2ca53a716cbb6eccbb6bd47d1ad1021a819a60c1e9f68333b45 +S017/S017R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S017/S017R10.edf 1cb81c247ff4d726beca43e0bd37d6ff1aacf0d0c63068c6d389af327a86484b +S017/S017R09.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S017/S017R12.edf.event 3ea2fd67495fde74ca6a29f7b4f660c7de2dc5720c3a99a762e365ca4d09ab34 +S017/S017R11.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S017/S017R10.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S017/S017R13.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S017/S017R05.edf 76f7e14a4d6a3fe0914722ba2ffdfd8e67f3b878b530872e1d6c1d6fd0840217 +S017/S017R08.edf 902e30ad95dd3786fa3d3e0532743edf049850536cf773974fb9fbe3f4475da5 +S017/S017R13.edf 11045212225733576e7ca2a908205f0c9780e25d3db13ba9c7b3de8f990aa076 +S017/S017R01.edf 051e38a4601ef46f4e5be488f06e3a6c0cf53263d6166075dd94cbddec38624c +S017/S017R03.edf 19911c4a9a693eaed4addcee0ffd78589dc3fe35724079081079ad29e56c911b +S017/S017R09.edf ecb7b472281ac86ff5ca02240d785c0fe94e910f4983c3beaba7998caaadff46 +S017/S017R14.edf f95cb81454f050fc58c4d2bcb7698526e5a6cef17b157e601ec1d4abb96eb3ea +S017/S017R06.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S017/S017R06.edf 0e32de926757e1a493c3df6d85955c30e0b45b0caa7f1ddac2a4cb483257891e +S017/S017R04.edf 9d13376473bae5b868c07fefde7d4ec465573b393a4831b0142a26812e5b6bf7 +S017/S017R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S017/S017R05.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S017/S017R04.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S017/S017R08.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S017/S017R07.edf 850cbc48702c9a180b6fc2408762554fb21d1ad9a6e57fcc33963055de46b517 +S017/S017R02.edf b0a760d49747671b62f7ec39cc147cdf4a3f1a6a7e2f898cddc4f5a0c8b73c5b +S017/S017R07.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S017/S017R14.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S017/S017R11.edf 864a47610ad9fe92a8dfe0b667ef81aff31f65ce988f608642572c4a7d7ee859 +S017/S017R03.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S017/S017R12.edf ffb8ee6f01d5b12337052ee1eb9e1caccade52937b6999d34925e90634d5b82e +S103/S103R09.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S103/S103R06.edf 1f716bfca1542c8a270fa5ec298ac2bc5d2fb47590d2d9ef1209c3ba2cde95bc +S103/S103R10.edf ecc2b6bf5823c8aee52aeb1093804a42139993110ad49cf584a78e87b6c63c93 +S103/S103R13.edf 5ddb5493a032511dc890488bc246ed22260b40d55633b8d26a2005e989100e08 +S103/S103R04.edf.event 8a76f461c0b920a909de21383cb5135f496cf1aa992529755f784ebf12db55c1 +S103/S103R09.edf 4ad22209206ad0dac641f659b404ef1895c6446ea11a82f387d0442fd92cd76a +S103/S103R08.edf 9a594869f6cfff74f5d448abd2268870fa98c57381bdfb3eeb7bf7372daabf2d +S103/S103R03.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S103/S103R12.edf.event a4198dd583f6157a8688c48cddf8a62d6829703a2a057ea11e115214c9cd151a +S103/S103R07.edf.event 0e11701069314a60a51b64f11d88913bbbeadf584e9322f073cea02c7d91cbeb +S103/S103R13.edf.event 8496271f8f27f048601fbb18304e61a0950c33536381baa6e6d211de6443cc67 +S103/S103R01.edf 48dbfa8c73308311e3f7c7137b25cbf4a060775c1a460fb8496c50723b8fb231 +S103/S103R11.edf 0d6426287419f30577f49cd759d77dfb2720c84007f5942b0bc9e0f12367323f +S103/S103R02.edf 62217247b15b987831cddf456d3e45904d06d470926e6df2e4ae8e4c23b1b08e +S103/S103R05.edf.event 5ac7e9c852d23361d66baa9d38cfacc10651dcd9582bc391cc58f08094c06941 +S103/S103R05.edf 65a86688d81296f935a4825fd74a189587f02300b66654ba0a837d294c94ca70 +S103/S103R10.edf.event 14bdd1b94d8b8ccdce55e581601a4a304c1444e030aee15596722a70be24c5c4 +S103/S103R14.edf 0b97a59662e0f0cd76ee688f4080ccf71a93bb5db06d0dadc82c970c4aa62f92 +S103/S103R11.edf.event 9688994b1285c6e0afb366716a7cfc380bd4340c8701275dc9340f0713deee95 +S103/S103R03.edf a4d0c0209751833078eeb3d953e161d0fcaa02c4af5e93e93094d2add8106b65 +S103/S103R12.edf 96ea0eb56fb4470b1be09516ccd0561b857748abed9c6afd5302b24c32430aca +S103/S103R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S103/S103R14.edf.event b77225fdf7eecf463ffe5af19336030526137aef78111785b6605cdf4c15c95b +S103/S103R08.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S103/S103R07.edf e40a112d21a463232a84e08c88496fdec8e7cf2e12fde7654c891ded54640cc8 +S103/S103R04.edf a4c707cbd94c08bb37718f9c7d03c7259389e4e43e9418f3887de35acfb1125f +S103/S103R06.edf.event 43583e190556326d49c6693ecacd19aa4b29002fcaba93157b7803fca2f71842 +S103/S103R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R08.edf.event dbd0435f98476653a27f53b54a6757c6e4596f6d9318a14067e4dd50bb37a888 +S061/S061R03.edf 27558de88191bea04c87f3f079f778217fcd016362f350e8a2c0eac6a182dc67 +S061/S061R13.edf 194e5626af80b437a223c8f28bb908647d7cef050cde4aee91224dbe3e26bee7 +S061/S061R02.edf e6c090a982e016f10248e16123afd230f8725c5a8bfd56774cb659e94efc3783 +S061/S061R13.edf.event 405867bf01702626e40e74e21dcc164b7e87d59de89a4e6fd2c4ce561f2e6c1f +S061/S061R11.edf 20a555d403226f3ab61a341b768a32f4230d0613b7b7a2edb616bbff67e9f39f +S061/S061R01.edf b0728cf10eb1885b0d5cbb9fadc43dc1625dcd892b043f05f7a55a375ad1a3eb +S061/S061R08.edf 9cd27a3563ba90dd3ca373b0da2be78b64c7e4858cbc9954cf4f503f39809aac +S061/S061R12.edf e16007dff642ce2bd6275e806525a0d66d0829f39b6b7e7a8fe18cb3fcea498a +S061/S061R14.edf 5a6711078c8d3114c4708f012b4ad5a9f60f615ea9fd1e9874f348e5ee4d6926 +S061/S061R10.edf 9502b545c2e86b5403dcfcb98c32f7738c6918d9e21c893b61ee0f5f12a0773a +S061/S061R03.edf.event 7fbc239687b3b3100e651edd3c5804afc9493f15f29a15a7c7c523f9f42b5b1b +S061/S061R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R05.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S061/S061R11.edf.event 109d77619d968e04ce3d80665c461d8412a594700916e31d523f31604e109b24 +S061/S061R06.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S061/S061R05.edf a559920a213923175fd8d90bbeb55840ddbc0851e4cdf661476b19f1c5d8fa31 +S061/S061R07.edf b7a91ef5bdde5cdc5255ad9d8482d8b6f44d89a402d67041e05441dcdf37fd71 +S061/S061R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S061/S061R07.edf.event 3a98d78e3ff8d947a073981e739716fc7bd37c89605bf37bf0addaa16779530a +S061/S061R14.edf.event c6742adf7ce83d034f6e3cecede733919ea0d2fe7854dc4a86c12cf32c5c5aa2 +S061/S061R04.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S061/S061R12.edf.event 4b81a244be0ef71bc8b32a5a151ca3746b282903e6c7a28b644879e8bae159b9 +S061/S061R09.edf 93ab6309318f1c2f9866e4b158badd8d1eca684ce936eb8d8d8b5ab80ecf8b33 +S061/S061R09.edf.event 82f0f5ea19a20ed8cc8994d29456053772eff3af83ffacc51907fa043cc0ef48 +S061/S061R10.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S061/S061R04.edf 788c7f7d524dc1e483e49e81f36920a65c464ae54dece6c96ce48b7d168a22ae +S061/S061R06.edf 24dbe6c1d0465c3bd447413a5b28920ea0dd241c05762fef4f19b5bae3a7cc78 +S057/S057R07.edf b46f328990c5607512f28a229769746f0322d8b50e9bc76eb2499abccd0aae27 +S057/S057R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S057/S057R10.edf c7c581aa6f0b7d21e348034fc4f22c3ce66ca81533a3b30252c6b8be10f0d5e8 +S057/S057R06.edf.event d704e59168ad57b1fa2751e18c013c84b7169c08d874e72c7388b74c93006559 +S057/S057R09.edf b6e2cfa63bf3ee3815fb384bbab18aff1c34254e30c1b0d0d4778f5bbec2e40e +S057/S057R07.edf.event 018a676bc733cbc27dffcbbf3767758e526d1ae08a5dbd0785d28e267132a3aa +S057/S057R12.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S057/S057R13.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S057/S057R14.edf.event cef39c5c7fed8cace25154d7385aa5c8666f28db51ba7c7224ce1650f9388915 +S057/S057R13.edf fdbbaa81ee07af8968795bd8fcdd52ea0a2b74ff686ab03666db07757e0b63d1 +S057/S057R12.edf a893824591d10cd67a04b1273ed436bff4522e14c1d6b6745f2e75348267d7b1 +S057/S057R05.edf 3c6aa834c303ebe0e89e012a0cb7b541c020dc40d028d54dba51ba16c3537998 +S057/S057R01.edf 0b5b499f95a86f7c04d63cef9489f04a81a335eac0c80ec8897eace504c0205a +S057/S057R08.edf.event e96a6caea4f9685ebdd4b9c4f79dee271ab2ca8fa30cd6fcc49d330a6a0c770c +S057/S057R04.edf 53d8515a722509864bd45b148d02730770f540ef56f44cb1e161fdec6978a0a3 +S057/S057R02.edf e38e0435c54470b41f2c2fde3685d58dddaa127e989474eb30955a5d14009969 +S057/S057R03.edf.event 211c6aa0a079d9cba1e8d336802ff8e126829175da37deb01fb3234c2fd81d9d +S057/S057R11.edf.event 6a0785ec7d7cd8c80087db9605dd320790daf7db8cdc6c9fbe68fad0470ed604 +S057/S057R11.edf f0195299b3289d582c4aa4fe027fb937a1c2339def7afabccd065887e8039220 +S057/S057R09.edf.event dea4ebd6eb695d78f9821d3bfa3d4a325dfc5d7ff6f514c6a3f205dfee689e4a +S057/S057R10.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S057/S057R03.edf c14f828bbab8801708392702b0af3536aa23ce706a18b212ff0599a566375876 +S057/S057R14.edf 0994c41b6b5b114b14d85e8af5c4a4cd9d54cee8dacfb780ec922d8af2c59b25 +S057/S057R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S057/S057R08.edf bb2842570aa4a89021f710397914cd810dac010cac428b75173adcc2fbf30a28 +S057/S057R04.edf.event 837f0145b9da4dcc73e14962769b9c68f3eebad462eebe9d8796bc8b099af925 +S057/S057R05.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S057/S057R06.edf 4f03601e2259a409efad3ade660609c937960ef4dfa564c70deab294f5700c9b +S083/S083R07.edf.event 1db920b489aacf8fb0632d9e919efb2b41c903f8721a96cda16479fdb668ec36 +S083/S083R03.edf 05e2dd9755eba090bd9445c850a7d7d3ea85f1945f36b84787ee434f25ba9c80 +S083/S083R09.edf 0e3ec56ae46f1497d6b427940d6648257beccbf16c779903eb8f51ba66de16ea +S083/S083R10.edf 8772357764f7bb97617443ab303c47d851e0a1f0ba30c9f485507aaa0e0b48fd +S083/S083R12.edf.event a79f747a01fa0ee3d769d3c8e6d6c47bfd1e464df2bb7748eb057537007ded6c +S083/S083R05.edf eed3d080081210253834894b06568f427a996c40e3e11ee28a7acd7133684050 +S083/S083R05.edf.event c81d77b099878d1d392e93aa7a18a46b936b690bad605aa84a652b2bd9cbff1d +S083/S083R13.edf 02a1be3f1868f8f70c83bf073259c01185cbd2762bf96f488d0d140843ddc5e9 +S083/S083R14.edf.event 15eac883e797e576d72c57d60ca80b477563711c2f4f8dd16cadc5a529d40f03 +S083/S083R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S083/S083R04.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S083/S083R10.edf.event 3d40baf6066941639ef493e2ee821cfa6bd1a236dff48c0659131c0e88fef481 +S083/S083R07.edf 30da924b540a596f91e0964cc993821f29f3ec4ddea82aea78fd60706c6ac32f +S083/S083R11.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S083/S083R08.edf.event a8e304ecabaf8d9f1d4997f8bb05c9613cb7743d7dc73f6af7fb82cea10f7792 +S083/S083R01.edf 5f6e11cd2acd6244310fefb52a9754242e5fd67e9d1e9fbc148c40ba6c660815 +S083/S083R08.edf cb1c0b233d3a0e618c4ef16f80cc5c2cfd400162aed06847c6e24b1d6cd2add6 +S083/S083R11.edf 2fbf5cfee173730d5a816355c31442044ed73fb20683f376ed90115786f0f21b +S083/S083R13.edf.event 8e32d162c32c431dc9eedfd3b87e45cd3f4466027dc3ebde41c1840612c6f52a +S083/S083R09.edf.event cf74c26a450e66b1953f1cafaa4b5e1beed2c2e16627f1466e08f1d0e4ca0653 +S083/S083R06.edf.event e8e41113aee5fb4f165be2a6037f6a4368faa2c5f90a63ec9b60f4466a2ba6f7 +S083/S083R06.edf 4a715e03ec3c310eb02d9bf03b2e16f65cf3a892a033956fbd60210c202fe751 +S083/S083R03.edf.event 7bc37a046920a5e8c90ae816bf99bac5ca23a1a0ad7d98c1d3c2c5d698871986 +S083/S083R12.edf 7fa028930f6c461c054e1d22e6026f954688b99817232e88f13892c4d7e2dd91 +S083/S083R04.edf 8e28c29d9d6203a35237b30878a1678b677e762561ee6f55854661284d9b3a21 +S083/S083R14.edf b94a229fa28185fc61737b30c0df46a3b40712d6f16f10bbfa369789b9010cd7 +S083/S083R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S083/S083R02.edf 27108699388b469a3d7c75e75d9156b168715f9cfec764c11ea45d8b188dab1d +S019/S019R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S019/S019R11.edf 0bbc66e7c1fbf22e6e8f43aadc075508d94ad0567809f0257d7b65d2b7c9ac8f +S019/S019R10.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S019/S019R09.edf 9640606d791c5eeb183b385ae2da0c187cce28b7491378fe9624c777cf063b47 +S019/S019R12.edf a3c4942a316f2cc431aaa358288b3c8f55fffa7b832ebca26705f68f007983ea +S019/S019R14.edf f1c4268a5d230b83417494012193d3511dd8d4de5930a9ebbb03ad2e26a75dd2 +S019/S019R08.edf ae16abeb147d185bf9bd6e1ceff37d95f73fa2b3d03561225a35447d72cc8020 +S019/S019R07.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S019/S019R04.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S019/S019R05.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S019/S019R03.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S019/S019R08.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S019/S019R02.edf 0e82a0cc44fd65c15b9cfaac7143a9915c1712d6286feb81396911e7d4c06925 +S019/S019R11.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S019/S019R09.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S019/S019R06.edf 1b0ad7d85228488fe95410bcb88108d41cd8bc2f9b078cf8c0641ed1e8656f42 +S019/S019R04.edf 97eda5477ba1fa369a3718b332a0d5ce6a71e37b5153d6472d65b430103a3d06 +S019/S019R12.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S019/S019R10.edf 809a866319f3dca5bc549d4ed8bb36a891ba270d4aee23950cb5e85c2819fdbe +S019/S019R07.edf 0a97151bc1c64abd2d943c3de22f64d769cdef62ebc5ba93a502b50f82a26f2f +S019/S019R13.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S019/S019R14.edf.event a190f5e706b1ba961aeaf8f8f44e40328c3825ee96b16903b64992c144c93a60 +S019/S019R13.edf 661f5140802887726cee2dd2f53657bc61025610686ba187796a67c58c691be9 +S019/S019R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S019/S019R05.edf ca4f301dbdf4a3a6d2a92e747938e249a0b14636ced35d1049c98165090fb233 +S019/S019R01.edf 2c542b11398dfe863ffdfa3e94aa8ee3fe6414b5ca384c7a242b018dd9e9df12 +S019/S019R06.edf.event f2f8656ee521f666124ee80cf26440ad4cb3e88315a64306e592a3424ebb8ee5 +S019/S019R03.edf 27c074fa068c8e6c8b38479d83f99ae810ca06f2d60ed76339f30c3da111f385 +64_channel_sharbrough.pdf 258b24e77052a735ab66ac99692fc7b815f1c79b6e3adb9c71111ecbb1636cd7 +S027/S027R06.edf ca8bce6ff67b1815190481926ed96c73dbf0dbde4a2cab4f1545be725d04d685 +S027/S027R03.edf.event 57e9107b34629563ac9d22f509b6f40e40ebedb8afaca03c2199613287fc06ad +S027/S027R08.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S027/S027R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S027/S027R14.edf 3035250ffd4e397cf1475df74263c66af7e2e9e04bad3319de9f92ebde448745 +S027/S027R07.edf ed97a6b4e01e5d5590da6bda8ab79197ed47a998932d391c2c950518aa8c17b7 +S027/S027R11.edf 9f361a90bac8599e7ff0132bd7524b6e12472d6bbd858ec2b75c70b6c4c70bc9 +S027/S027R09.edf 08e6788c429964dd22318fbbf5375a48047ee8e5b2b7a4fcab369ef3ba7019fa +S027/S027R07.edf.event 74fe6694983b37f44ede4efdbd00b6344db3ee7dfce3f27d06f5ec67ac6fc65e +S027/S027R13.edf 62d5ccdfb702cb9050a3243d3baea6466cf591c47c3544128daaadf33f736809 +S027/S027R05.edf.event 60706e2b67a15786207bcb87a2548eb3631153f455476ae03ec52b135003a858 +S027/S027R12.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S027/S027R09.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S027/S027R12.edf d782cd20eb82862f4bb6f74f4fbd507ff2610480f581ccbc6ab885a60be94ac6 +S027/S027R08.edf bb50d3febb694639ff3d90cedae79d6f9531f4d6824919ebcc843254acc8cd96 +S027/S027R04.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S027/S027R01.edf 16ae12914a9c301b652d64f1d6337e6e31edbc4f64f9c222c3670dd7157ecf21 +S027/S027R05.edf 7fdc9ad49431d5f713f883578426385d776b50787a08bff7e0a3a853952b4d62 +S027/S027R13.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S027/S027R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S027/S027R14.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S027/S027R06.edf.event 37c09b028d07b5b1954199394e59b7c78fd1325c5fada30ce1411ada2513eb23 +S027/S027R10.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S027/S027R03.edf 59493f105a7531b67e6cc52c1ace3c2fc248800077015c4dcb417b7c6ca6f3fb +S027/S027R10.edf 9fab75ae533d35878ef06a2e8ec6c623fcad251511a8c2dc81df8c332603b707 +S027/S027R02.edf c0ef16b97e2fff2692e400825728ca3c102b359634ac34a08fd2307f14b05d96 +S027/S027R11.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S027/S027R04.edf 4089acd89834f5aae1ea194c2d42b469b3dff95700554dbb4f0da68e91d94d48 +S040/S040R01.edf 45423b0cfc992bee8cede102265968d424d3731a6a7f2c8fb4a714f4ba3dfe61 +S040/S040R12.edf.event 27db1e140fad9d7a3cbfb7c99bf32f74d60de45fbaac18d08e05970e1ff5b49f +S040/S040R11.edf 693e9ccc55339199be31a7bc2099a26509855447ba1dd94cc8a32760ae9d67bf +S040/S040R11.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S040/S040R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S040/S040R07.edf a9dbe8be9c65ff1bfea01eb7a87c68a8fcc59b0bf3a5e78d8b3361687c0ff0d0 +S040/S040R14.edf e439a81cf9c84fb35c9764ba5e760806fa638bb99fe0c222f9fca85e948cb72c +S040/S040R03.edf 0ad4d13fe0bdf89731358eb2ef45bfd1e3d6ccb5704ec601a5b548a18395beb5 +S040/S040R05.edf d3b0fff7acb44c5c07900ab3c3b86a63c188a86ad4c0506bafaf3303930c1dd1 +S040/S040R02.edf 3f9cb083e7c2677528a1f4d23243a062c3ea3498fde899803ae752fd29f9c2db +S040/S040R05.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S040/S040R08.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S040/S040R07.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S040/S040R04.edf 7e57eaba05275c3238d5fc60b1e696ffab146d578449abe50113f8b0dc388c12 +S040/S040R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S040/S040R09.edf 6b58a26d5f7914661d5f82c9e4e83fd1453af9b9b9452350d82ce5332f01d0d3 +S040/S040R12.edf 4039c3d1a84f9cf689592549332130bfcd37e6739d8e978046f1427af22b10d5 +S040/S040R04.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S040/S040R10.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S040/S040R10.edf 5d6089202962ef26fa68fec40221916256c07e0fb7017fed0dd63ad9a9fe3fe6 +S040/S040R08.edf 941d55ce92107bf7cab012df55566d5dedb0f7b0f13a408dd0a4feeae2d16da8 +S040/S040R13.edf.event ed0acb0c9635a838cea852cecffa253bbee6e084a3404ea9ac9900188570c878 +S040/S040R13.edf 6bfe46c52dd085d36216c28f1dca46071de404bd33d630687ced49d948d1aa52 +S040/S040R06.edf 82f87f29942e399a8d0f996ee8132b8f0028f159826bf31416e1b98a5676017f +S040/S040R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S040/S040R06.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S040/S040R09.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S040/S040R03.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S043/S043R14.edf 60940854f70ceaf6fd2438544df485283deb3e6dbb2037b48a8f981e9d8e0fc8 +S043/S043R07.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S043/S043R03.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S043/S043R10.edf 050765b97761f0943cfd1f95fdb707a2b7cf52b26b8a2899a140bcaf502d3a20 +S043/S043R06.edf d58bdfae3036681d8f041ebd230537fe396023f8c42ed6f54003627b50c0dcd6 +S043/S043R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S043/S043R01.edf 49151f1f618da69d7de8bea138e21c0d1f7f9833bdc8b23a9a98c95dbf0120db +S043/S043R04.edf e841bfd7c22f200ed1999fd56466b9c13b05c5bfaf0c1ee30a2685827a137b85 +S043/S043R05.edf 649d2daf74d6bb33e922dea594322c90b35c8bbd9bcb75963fc170892ee4fdf6 +S043/S043R05.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S043/S043R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S043/S043R04.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S043/S043R13.edf f51789ff14d721b809f205eefdca14e6bd57d6d63202a7624e19c1d6793e16aa +S043/S043R12.edf.event 08cefdab90cc2c4f915e80340ff4030bb291ede00b87eaa742fbe6a306942d05 +S043/S043R13.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S043/S043R10.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S043/S043R02.edf 51f6d82c5ce4c53a4bc5477d3fa8decd78cc6e3656209b8324df9be0030f27b2 +S043/S043R07.edf e554299d7a61ac63e3c193c9cf968b0c921b20f644673417c767ade5f254e391 +S043/S043R09.edf 32f3a9cdad8dc71123ba983b7b30905dce563cf330d061374d69187d696d23f0 +S043/S043R09.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S043/S043R14.edf.event 8b26d9be3cec072b0ba8e7e1b1aa9f46dbf8f50992131d413440192fc40ccc5f +S043/S043R06.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S043/S043R11.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S043/S043R03.edf 3bb31e415a5cc1b889c80b16473bd18ee5fb6cc8b03cbc2e57255a55e5564047 +S043/S043R12.edf 514e5dd961671fbc4e1e23f852ac6e82320be1ef8b65383602c368f6de946909 +S043/S043R08.edf 9359ced76df3828782fe5ab696efdef57b54e87be5363f2f3f07f8d4b3ac0adb +S043/S043R08.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S043/S043R11.edf 6a70abdb1e96f21d02777643cd36113b950e99ffcd114ea4fbba8889f1114285 +S048/S048R07.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S048/S048R12.edf.event 50f12f26efafddcd994732349e4117055595d324e4dcff8fa56160baad5d5533 +S048/S048R13.edf cca277275339fcc530dd047960b75284bfc0c7af32ab695162c79189ee46abb3 +S048/S048R08.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S048/S048R11.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S048/S048R11.edf d369c128d89f0d8a8e151eb2e335ff858836fad04c432c51db041807992d9844 +S048/S048R02.edf dba6cfa10d8bf65a0ecf681c3d1f7112bfc96e50eaa9c2782a52dc90fb81ef00 +S048/S048R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S048/S048R10.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S048/S048R04.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S048/S048R01.edf c614f7a42eee90e494006d410917f21c7cc14e04f6cf86e0728437c636286171 +S048/S048R12.edf 9dbd643ce614d3b1c9e07190375fb04222e065c057d468f36b1141572e1490b2 +S048/S048R10.edf cb8c1bb4774306aacc0e3211444f9470dd0b89d73f5422b4b5c63450b495063d +S048/S048R05.edf ec79c9a93f997629556f37c47f408941813181d4bb8f02a191f0fe3bdf958afc +S048/S048R13.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S048/S048R09.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S048/S048R05.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S048/S048R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S048/S048R04.edf 0916751e1b738588b93540043b78ea96f033b83ed87141a7cde6351409020d1c +S048/S048R03.edf 9c7ba6a229783276d59f9893fcf26a56d498bd4f231bbbdf1530315782b3775e +S048/S048R03.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S048/S048R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S048/S048R06.edf.event d6641564c0a8724abac8f919ad99064213b0fafe2c25f0d37d1cc67d83bda19c +S048/S048R07.edf 4b01ca005dd50ba06f1efe2bea065d30bff04204f5af10f18539747f7025e93a +S048/S048R09.edf 9d0481b4338cbc3d3070e53a9198e5d8d889fb692334ed32f8453d480dc0a3d8 +S048/S048R08.edf 5f519c4a10d3e18c4be0eb840be7be1022471594c46c451c3614712896feb669 +S048/S048R14.edf 234b83adaeb3c24523dc12a3c74a5271887cbc85ebd770095644dd373c27329b +S048/S048R06.edf 195f272f2e56fdd9e6b3151982c59bc5bdac42cbcf26cf75846872378ec317d9 +S028/S028R13.edf 780092ab1c7826d7d1fad8329cf8fcbb39c2b6a078314d018f64abc24b5fe47f +S028/S028R09.edf 27fce7d1a64a75ac16d6717161703a402cfaefe4756d27635df2635620e0cf47 +S028/S028R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S028/S028R08.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S028/S028R12.edf ce460f08c9ece9756e66c7e413e565d106676c865bd9a4fe6aa19097ff693bf7 +S028/S028R04.edf deee0e2e89467831f8d6d6ad4b142bbe5ffcf79a1e4dc993e1a52b5face5604a +S028/S028R07.edf c4059eefa097feea4b52bdf3aaa2684b2011c6f207668836ae461cf3ad4ef2d3 +S028/S028R11.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S028/S028R10.edf d6eca0c406b75892a351c65d108546d27ed2d51fd31c0967427fa6115f6097a1 +S028/S028R05.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S028/S028R07.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S028/S028R03.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S028/S028R12.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S028/S028R01.edf 7aefa2fd7d92490654beb7deb417dd5e04c9d7c924716c16a48ee3d73ad6c3ce +S028/S028R09.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S028/S028R10.edf.event 149997f77af08c9d6ad150aad5198f91c6c964c07e4d639baa770eac01012cfc +S028/S028R06.edf 04ad2d3a29a0f76a5f8840940bf87fbdcf6acb5d95c677e5bd69b2d259053d3d +S028/S028R03.edf 2137c12dbba064b5181dc83d7d385047ba7f33874679be51bc17bef2a566bdd6 +S028/S028R06.edf.event c2f5111be300abf5d209c1908e46d378a7a94c2f8043fe6acf88665aab8efb02 +S028/S028R11.edf 15e3a1a2c0f28b5856901f6ee4338cd65f98c3b3870b29ee7b5967d2fdc17e55 +S028/S028R04.edf.event c117ba4c66b5467903fddc4ed77a580e09639381683c611dd1f02cb0d311a4b5 +S028/S028R02.edf 3663ccafe904bbf31e929a7999a03e8c387aa645b5a90c8d11b0a8bf751f3d7d +S028/S028R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S028/S028R14.edf 3a2732e2965c9d8a56c1244bba7640b688177b6d2a13e7da4e3d934d25e00db9 +S028/S028R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S028/S028R08.edf f4faed4d95f419e54b95e5232155fa77af1a1a0a739bdc12b3bdfc6edb5f0a79 +S028/S028R05.edf e784563aaecbb0a085924511ecda4ac320a564f9e0dae2e70ba044dede5f8928 +S028/S028R13.edf.event 611df9f780acf887245656c2987fb77e486d2bc016936d00eea0a55d2f5c3028 +S045/S045R07.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S045/S045R12.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S045/S045R03.edf 14e948d21adce6478e71ef21dd9dea8bc3f1555518cf8927b2c33ac6feef4a13 +S045/S045R13.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S045/S045R09.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S045/S045R05.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S045/S045R06.edf 0a2348d5e52f55836b1154606d89d530f0fc914885cc334ec430c304f7a0627a +S045/S045R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S045/S045R14.edf b7cb236b51fc8acf948277e703c2bfb04fe5515e3036a7a1d1c8df5a5333a837 +S045/S045R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S045/S045R08.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S045/S045R03.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S045/S045R07.edf c37669371b627ee9f63de41f8f276867f8defeff59489873eab0217a852ddf85 +S045/S045R02.edf 91c60dc17ea3b0c8a636b727f5d2caa9d84d6813bbc2a63b40a80d6186e8b75a +S045/S045R04.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S045/S045R13.edf 5eebb9e7c81bc73766bd32d167005ac7dcb256155052e2bb9ab750f104c6efe2 +S045/S045R04.edf 655d2602c087d8f9efa32722bb32aceed76cab3c72570516df12e41458a25482 +S045/S045R10.edf 309184404b1ef8d649d7e8c6177b48a75fb4eaa655eb49d96a797f92b79d3b67 +S045/S045R10.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S045/S045R05.edf 3bc2887a902ebfa42449cd0d7bb5488d2f6fe6615072b2059639d119d62d76b8 +S045/S045R01.edf 99ed18a81e6d86970dcef27b3bf1ad6d4adf01477b4560aa5e198673de2992bf +S045/S045R12.edf 171f3487729fbbea555c923c74b8ce1e0f408aab10205cc95d2f498ba3220b2b +S045/S045R09.edf 2c5c79713d5127bf59e824029a0e5cba9a46badeb0ae94b0cd729d8dd1bfa26c +S045/S045R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S045/S045R11.edf 1517b91e4166540a416bd233b4ed77be080f740de5c25df6c52f60d61916b68e +S045/S045R08.edf 8e3ac9f836029bfdca587290743f5b7a190f2470f55fcdccbecf9eb7bb3aa40a +S045/S045R11.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S045/S045R06.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S109/S109R02.edf 006fc0cfb18c2084a453578484b2252b6c1dbe5608d73e20717a39b557a1d429 +S109/S109R03.edf e2adb0517ec78e1ea79e6dbc7aba14f16b56243b8dafe630ad57deec29b79d34 +S109/S109R09.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S109/S109R05.edf 033bd271a8054992c658ccf79575e61d3e25d820188cca4f3bd563f899f05bf4 +S109/S109R06.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S109/S109R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S109/S109R01.edf 4547299bf4645e4afb08006d03b419e1273d4a3e853b8aa73d569e5a96dce8d0 +S109/S109R04.edf c267b8aa26c70924fc9cb9fc8596b351a83894490bab95325f738d68829aab3e +S109/S109R10.edf.event 85ffa4ceaf93483bfdb010c4404fa88a39a260f5371d86a551e206b015abd33d +S109/S109R12.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S109/S109R07.edf 2dde459f8e061be6d581880e6fdcd6cfc1109ff7151729d82097fa8ad66ca50b +S109/S109R04.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S109/S109R12.edf b9f0f477c70ce519595b74c887af790c78c77a7f67279fabc7fba2989ee59782 +S109/S109R05.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S109/S109R10.edf b147611b45da6fd50f0df50f78be9b4782f85a1d5b43edfc0d428ec519bf5012 +S109/S109R07.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S109/S109R08.edf 879f8c8cda430e95bcda881f9cb1ede5e0693c9b83849d659e60082dd3e8706d +S109/S109R14.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S109/S109R13.edf.event e4c9fdcbbe3469b81dd48d30396ee921a23d45fb900a0dd3b7eb4ceaf04936a6 +S109/S109R06.edf 5a685c86cf1e3d85e6c7aa58d7d9eabf048d0e1192951777f30f97dfb675bf3e +S109/S109R09.edf 07fce275dae8835e1c4458ff66dd9e87805593e4169faeb025b60c55fbc637a7 +S109/S109R03.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S109/S109R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S109/S109R08.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S109/S109R11.edf 7b5859aece6cd7b8d8a732ba372645d6885af1bf29da24a0a27a366dcc92021b +S109/S109R11.edf.event 0bddeca4a8cd621dca3130a2d5affe793bfed62c379600613bb61a6e554ec750 +S109/S109R14.edf 40ff8875daf98e4614854d1bb6bdf97b85b386b9caddc0dc944414fec0d049fe +S109/S109R13.edf aa94b64cd18a1b106471367ca9f2b3a16ca9c5669e637b8328d620df60a96a0c +S068/S068R11.edf 9993b459a0a3667c0b2810476632a71f9c6601f12516ce94c3e127fc8685687d +S068/S068R14.edf d5cba484c5cb43dc0bf4cdf4cb0ff098803b9d4b1f3fd8dd6c3f79006b58ba9d +S068/S068R14.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S068/S068R08.edf d49374089836e11880347909fb5a358d6834303153e074f230be6caf0c66aa16 +S068/S068R10.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S068/S068R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S068/S068R07.edf.event 819461ff12613675eb0a26eec47ae680594adb9c6696c5e02f8e4b0d3731d56b +S068/S068R09.edf 171dbab1442c48f774cdab95e154d52e2dc9b8c3a20b01933ccaee264871dbbe +S068/S068R09.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S068/S068R01.edf 7f528d62c8d4b342ff2f958ec6571d425f7d3926f0832ece121fa162efccab1c +S068/S068R04.edf.event 1b02f94c97b31c7b11129bd2f0db6eb71ffb209c8dc379625185f7a3726d3aff +S068/S068R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S068/S068R12.edf.event 3a8202a5a33eb8ffe91e329a5cb7a69f0647af677bc441cdc0bd0e11b3631c64 +S068/S068R06.edf 81a71170f71fd313c373612f474c6bc051481d5a831cba3f37e7b06c1d9935f4 +S068/S068R04.edf 7009d509684ab0b13b0253178ebf2b715a7ced2dee2b409969f088c3094f272f +S068/S068R13.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S068/S068R11.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S068/S068R06.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S068/S068R13.edf bea2097a6ee72e37f7039b38cbd57af051dba76797ee83629d4c333c66a3d934 +S068/S068R05.edf 9f5307cda4c4be652743ce6fca4275d2614ec81c42968c1f9c46a22fdcd8a376 +S068/S068R05.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S068/S068R08.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S068/S068R07.edf bc4403c8c2186589008953fac143b4556e05b294fff29bae1de2151f9a7ad36b +S068/S068R02.edf b8207c54c2f123032ba589c1d69ebe39503cbd4f539e96935da842fdf9dcfd4d +S068/S068R03.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S068/S068R03.edf 2ce7f50663087ed5f90aa5a75583994c11b86e6556c2395d8cc327cd04599f4a +S068/S068R12.edf 966c9fe51563d5dd048482211d01eb1005130ebc14b5bc6ac89097b96ba9a5ab +S068/S068R10.edf 12250b352069cdef00d68d872a5c110a1098e5e7be6a77c0658f95ebea52a6a4 +S085/S085R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S085/S085R08.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S085/S085R07.edf 67e6d9307b088dee4ab676419451931e4215372843e55c99f55629375fa1d61d +S085/S085R11.edf d2dbe9fbdf9e0a050f08811c18b25bc69a4a87abbca6c819ef96515b72147fe1 +S085/S085R07.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S085/S085R13.edf 253e4d192802423c11f3d9ddb6ffa35c41a5fec990c71f04fe79d86f362836be +S085/S085R14.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S085/S085R03.edf 3723d3c37f3fc3b8ca7246de882e8ad2edea370284e4878f8a706d6bef0e2f8b +S085/S085R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S085/S085R12.edf bd78d9b98a9ba10ac7ba51cc213970da2f2c7a7f3238714b362017306d53983f +S085/S085R04.edf.event 74fe6694983b37f44ede4efdbd00b6344db3ee7dfce3f27d06f5ec67ac6fc65e +S085/S085R03.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S085/S085R09.edf b2f2fc7f0bb5a83d8876b40ff654d495a9882a4c046f4e7c9317a524e55a55c3 +S085/S085R08.edf 7da6a14c5b2722ddfa3962cb0b5580e3a5207c77154a6bac05c73d7f75e4409e +S085/S085R04.edf da0fbebf6dd7074cb3b71769f2a32259249b360e8ff03d832e99a02ae7a55090 +S085/S085R11.edf.event ecb3c28bfbaf7c670aa5547fa414949828cb36fcb3d84e0389aa669e01381627 +S085/S085R01.edf 378774575493af619ad20aa110604dc98be465894a7f82ce8ef815e3e58b51ec +S085/S085R10.edf 9d9fb1173e754a9b1ed6cd430c2f4b5418e52ae89878ad51c7f79d7a10244d60 +S085/S085R09.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S085/S085R13.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S085/S085R05.edf 0caeea47c39fc638c03524b356f2fe749fff9ffad8fb3248c476c47c15766ca7 +S085/S085R12.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S085/S085R10.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S085/S085R06.edf b5a691952339b9159eeda0ff49d33a1a05869254398761e4da70a5aca445e1a3 +S085/S085R02.edf a89c675ea2e6e0211aaea83cc2fe0d154bb1984c1ed836868f283f0f61b6c943 +S085/S085R14.edf 77a596bc1306b966accaf331af8501c3cc6dbdf66b0213993a11b189afc01a9f +S085/S085R06.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S085/S085R05.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S050/S050R04.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S050/S050R01.edf 8407057efedd6e133b5e7b13caba82c90654d2697288d35b27b7fc33aeb4f522 +S050/S050R14.edf 4a11efcc5be7792041af8429b25cf7344e4c9c0adfa75d86bb06bbf8e870ff01 +S050/S050R08.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S050/S050R05.edf 59950256e03b968a6686c8d102efa843d7ed824e458597084be1a9a535a7d6d8 +S050/S050R10.edf 72174e570bfa31ece2dae5dad0ac8c242044784755bb9d6481a194a4a2b7e311 +S050/S050R06.edf 4b0d2ba3215359fe1b2a963c8ba0c94e0b10c08f33a0eb1c1da0e3bcb8c2617e +S050/S050R09.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S050/S050R11.edf.event 08d1f10d75e3c7f02ed234fdfe64948be5fa14e26fb27441a377ebf76532dc50 +S050/S050R06.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S050/S050R04.edf d2068d794befef727cb907835cf04cbc357e16ef90766ca22917dd4b4649df85 +S050/S050R07.edf.event 08d1f10d75e3c7f02ed234fdfe64948be5fa14e26fb27441a377ebf76532dc50 +S050/S050R02.edf e4d2e4444e5dd830417f8729a549daba168d49e2f2db6467f44aa75ab9554474 +S050/S050R09.edf 15c17c5266b4c46e49e454b455dd4fc9cba0cc2ed670b64f47440c5e96e814d4 +S050/S050R03.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S050/S050R07.edf 0b0435ff445199521d9d11589e69bb0940f7c4878f6546dcb7d65d0dc52fdfea +S050/S050R05.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S050/S050R08.edf 211a70a072988cc85fbab6e28b0e0550c18d22225dda82a9471a980447eafcf9 +S050/S050R13.edf 2a586af9a1e31ef0bce58d53c92c55c0c1ad670b301711c669daafa2fc452dfd +S050/S050R14.edf.event 1f581da8e4856c63e25c541381ff8a370d89525260484c85de24ca995a24a984 +S050/S050R13.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S050/S050R03.edf 114ffd39341cdf2375d4762190ee4d8b893c0468e02f4c8a413b183432fd44b5 +S050/S050R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S050/S050R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S050/S050R12.edf.event 2b0cd50273254147215db0f2d93c0e409b2279fe37afd6cc7d86edb7df57486d +S050/S050R12.edf ae1fa0a96ddbcf2f2e55f1df26fb90c4bfe439bf0bacb89c03579c0308ea1180 +S050/S050R11.edf 16d0f476a75c5ce8c0dd1e99bbfc8ac1cd0ad72804b9dd76e6803ccc6b4f3b1c +S050/S050R10.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S049/S049R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S049/S049R14.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S049/S049R10.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S049/S049R09.edf.event 6558be2dc3366ecd25198b4561d2d6a49bf257ea06005bb7804aa7c5f9bb7c8b +S049/S049R03.edf faff4a3b9f549956039d5fc56f6a3a69666a722767b52672d10057c3af3bd32c +S049/S049R13.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S049/S049R06.edf 0d6e568a62a15ada1889fbced43069e0c2d5ca1dbb5fede5484a954af40bd079 +S049/S049R08.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S049/S049R02.edf 1a717c22907a799d191aa1e34b4702d5429359f0d10419ae64aa3baf6d4a9241 +S049/S049R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S049/S049R11.edf b90f50b60a229cc4426527b09711ce720932165fa23d427cee871f13e3b372bd +S049/S049R13.edf dd9504e0120ce5876437f1fbd0ffc150bb6e713550ac818fa7f1b165571b0b47 +S049/S049R07.edf 7a290ed431af3fc57e9e8dd41e383ec8a734fc59240eeced59872f32ca117723 +S049/S049R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S049/S049R01.edf bdfcb91b7c914a621617d59cd6f132c9f3229f64f22084e57904232e84c2eaf9 +S049/S049R11.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S049/S049R14.edf 22d3b7e7e6efeae214e237437447f3928214f5d174413f03a113613bb8840771 +S049/S049R05.edf b6eeb2d843cdaacd5316dbe64a15a8332397ce454a657e887fa9ffeb9c335984 +S049/S049R12.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S049/S049R03.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S049/S049R08.edf 449bc4afe04a06d018e6a4385144253bdbea91334af3be4f59165883be2c46a5 +S049/S049R10.edf 901196f4f5fd3d9cfec28a04d581bb0e91b43bd6c1815655f112755f7536efed +S049/S049R04.edf.event cd91d83b7ed2080d3d803a7381c00951232c99bd8107647556298a104a0ce8c0 +S049/S049R04.edf a481efa8e833cdc64e9f0a3a5c082ac1765462d4f134beb46a3439acb40323f8 +S049/S049R12.edf 37ac2679e0f9b35f879dd80255dad13f27fda1950a5182eddcee5cab8dc6e8ef +S049/S049R06.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S049/S049R09.edf c21b03a3947c630f19e63796321c31088c9b1fd082a4d43b8de813e2140cfb9c +S049/S049R05.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S003/S003R13.edf.event f08aa591d71f3207481623462ebe22d9e72781bd8f6adce3738257c0be7ed7f5 +S003/S003R10.edf 99429f0075f59216a10b75eea4029a7ded7bccec34d17100b08a55770cf1f014 +S003/S003R03.edf ebf184ea51d9aa3178190583f428db02f184e22412ff300a5f224776d1e8dbb4 +S003/S003R03.edf.event c9ceffa934dbf8716d8f84a8ab3a0d227c746d5d4fd50627350da4bfc6029e39 +S003/S003R11.edf 0563c2a26f759d849d6b99b3efb6047d1e1d288f80c0c16f5f07403bd0029271 +S003/S003R07.edf.event 92b3e8a6b67a6846154b1244f9044558257134f17b25840c7f71206ad195584e +S003/S003R09.edf e871d2d855a538f653e1e71c4181f1e2b0b5eba59ef698328908cd8511dfdf58 +S003/S003R13.edf db14ae5f0e58a25593fdf9a71c60a80540e06fac158ecf0c28380c7fe54c02d8 +S003/S003R10.edf.event d192ea57ff85ecd8427faf400415c2002cb41aa1189199e0c6ed62a7ecd048c1 +S003/S003R06.edf.event 1056a6ae091909b3aed4f5a2b6e53c4083bb73cd4929def393a94ea0f5953f0d +S003/S003R12.edf.event cfccf092791e4f541cc50e2a8c3317df4e3eaf04f099301c7f827e7dffd9c354 +S003/S003R02.edf 41044fea497e05650c4b1f4009011b44a50a6cefd55d5afa9e1d1b7988afd6e5 +S003/S003R01.edf 2af68afa8d4f158a7ae7f3903b66f0947925880639b7a8b5b6bb13b0c6c50f96 +S003/S003R06.edf 8d48a46397416bbea19eb6b97474aaade72029364231202295bbda805ed79c97 +S003/S003R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S003/S003R04.edf 7d0732eea963488a53153835524e55c2b68220b0a0c7c5be99e535a9f5367e7f +S003/S003R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S003/S003R05.edf.event f73e764c2a5687f81917e5c4eaa8b964ecb99e4c58ff33aa7e02e5fe1f655a98 +S003/S003R07.edf d8a610bf60a19c1d653a11633f7df40bd7b3eca976bebf2b525eb65017fdf044 +S003/S003R11.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S003/S003R14.edf c80a7a0fab93074cdead76450b49ba8d27b7183f1baa406daf5207d2c1825194 +S003/S003R08.edf 0f50978bb972e693b8c758a9223a2d9fa35c7f117226391090bcc32a83ce765d +S003/S003R04.edf.event 53726fe905bea0abe513d5e84fd629ed577ff03739fbd56c6a12198e7cca5cc0 +S003/S003R12.edf bbf7137bfa7905724741e95359fa090439d4422c07bdba16c792acb09ebd6421 +S003/S003R08.edf.event a3a021aa3014366fda2210569cdbdc257724ba6d0d11b1e6c924103e837c7294 +S003/S003R05.edf 847632c525a231a97e61cd942fe8f4205b2138764f14cecd62f76621287efacd +S003/S003R14.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S003/S003R09.edf.event 99a46eb7d9a4fa08a856556abfd134cec7b55f86ee94c1eedc9b0b5214911db9 +S108/S108R13.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S108/S108R06.edf 79c8e5268af37c118dd3b380918b58ccbd90ef51a44f571d6b718929322be16b +S108/S108R05.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S108/S108R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S108/S108R12.edf 424aa46e174d38f3395647a591c20aedcecf29989f98a6006a77502af9a2add7 +S108/S108R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S108/S108R08.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S108/S108R14.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S108/S108R13.edf 0708199ca4f0fa026eb23ded19eb6de66c62db605db7df8129696f6d4fec1b64 +S108/S108R11.edf 7826876fcdac9aff4bfcc44cd2417a844673aa3d15a3d14bdc16c86eb6c51e26 +S108/S108R04.edf 7c56ee104d1b080834992d79fcfdf9f995965f0a416a66fed454784486d97d40 +S108/S108R01.edf 2115183a713af1edb4ddbda03f35712340a2e1bfdc62f825e2b73f495d421291 +S108/S108R07.edf 53f4d5dcaa3e7acbee5762e68d7d3552954df9f4e46c5cc232c2cbb794c3deb8 +S108/S108R09.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S108/S108R03.edf ebe6c07ca779b1226b3dc11ee2d46877fd5d3a94d1fe9f97fa5c2161f16cbade +S108/S108R04.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S108/S108R10.edf da4adc3a144f5699b4a4facd89dc85a4617170ae84828ad0a909d154979751a8 +S108/S108R14.edf c51f9e98fc6c4dab6a73fe3ec7363e3ae2c721cde414a46cf4e1fad1bf9d81ff +S108/S108R03.edf.event f1d7c85c7c4b298aa795662274613ada05d95ec81ad8b21ce4c8ddca5a11ecdc +S108/S108R06.edf.event 4ca96a0e24908b831c877816b86e3dbd1eee397d9d9851bd1e890d67134c57a0 +S108/S108R02.edf 2e117bc915d3720afd714e82f1769b0f8a1f040e39ba735fd1c3d2f0e704301a +S108/S108R08.edf 472179c1c0efd3662cbd47ae8ab1be2aef9ec8db8344c086f79c3ac2cd5b5fd3 +S108/S108R05.edf 5073bba54535642aa061d4cffdca53a981c06069f7c5ebca3a8e18953cde928f +S108/S108R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S108/S108R11.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S108/S108R09.edf 91bc85098b6ae4e93c859dd7e1f606d48b7d83d9bae3b54eb0db4dc7db876ce9 +S108/S108R12.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S108/S108R10.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S060/S060R10.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S060/S060R05.edf.event b7ad55e8ce55c5743b2d7e417e77c8d7efdeba7b6cbbddb02fe11361879eb9a4 +S060/S060R07.edf 17a7012187c8be76959586c37b5d8cce35add75d601f254a683a8b805a62e404 +S060/S060R03.edf 7b6a4c4b50ae82d8dda38d58dbf4a63668a6ca241c230b110bdba481088116e5 +S060/S060R03.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S060/S060R12.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S060/S060R14.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S060/S060R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S060/S060R06.edf 19abfd9e28c5d02166a4ef1753ba504c745faee53d28c4943f53a577c76e5b32 +S060/S060R10.edf 93ba02bbc1b91007849214d6cf0330402d55d993357bb70bfd8dc781f96464a3 +S060/S060R01.edf ae25d2c837966144341798b0bd0cbcba1b0eddd28a6b31502f813a41fd6ad1d5 +S060/S060R08.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S060/S060R02.edf 597c676b123c5e834048a91035349ce2f72116eef1b80c64602587def6354ad5 +S060/S060R05.edf dc2b1b2ad165e2edceb0fe2634b2885c9f122afb571bb8830758ed8579192400 +S060/S060R04.edf 9547bdc9013bf74161abaae66f3f30453c159a4a0ad7f030a1d527538250ab59 +S060/S060R04.edf.event 931266ffca879dea17f50ad227bae49a0d891b282f30fa3ee4b13ab8623dc5eb +S060/S060R09.edf 6ad2e283d739177eb6fa17c421b3a40b1b9b65c8645dd39ccc2bc491ac70f7e1 +S060/S060R08.edf 5f6411e04fc1fa14636a78f15a8bb49dfbd765fd5f7d9c3c3357044a9037aaec +S060/S060R11.edf.event e16d907d8f296edaf98d1ab54138ee16bae85a4bd81d90a487ccfece5b611fd7 +S060/S060R11.edf c33e3465474c7640dd8d58f7633c4055b85a13199f324cb77246107cb80690a2 +S060/S060R09.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S060/S060R06.edf.event 13968dcd0074afe70c79ea250f7148e28870c5b0140104f078a501fd3b51ed69 +S060/S060R12.edf 8b1954f873ac2e1c741522541e4d02e1ff8beede7434a5ccff36cc356d9b95c0 +S060/S060R13.edf 5faf02a9d111e48f09fc40c4ae2906385c37ecd9e62be423e12399215463e21f +S060/S060R14.edf 546122a56dd778291a7867d913093f1605b85ebbbc63cfcb5bae93cb360a0d96 +S060/S060R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S060/S060R13.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S060/S060R07.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S055/S055R07.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S055/S055R03.edf 5a700147e53e7659c57d78b15dc2590378d5549edc35631fd7302c7847c5569c +S055/S055R09.edf.event a931510ec25c8e6b0352576ca7f98b414a922451f0a6ee6aab03d3409a677c66 +S055/S055R13.edf 8a1182a5eabd260a4d09929bf2b503bcaa5a24ec18c13b9cf525b06e39d787e7 +S055/S055R12.edf 2e9307c91262be0c77efaebd886f75b7e3c11729a9688cada4b5767e7f7490a1 +S055/S055R12.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S055/S055R11.edf 7715df0df55699795e90e5de10cfb1e1b1d60ae7351b81672ebac9fb566ad33b +S055/S055R06.edf db7897ba36039a65f10d8197a3947c6c64d9ed478b9ba21a1a443440243a06af +S055/S055R10.edf 6ec12775870d4e1a563ea51f7eca8dc75528bb283e10dfdf9817066afded8ecb +S055/S055R04.edf.event 00929d2d58d65a270182e61a69c5c896d7083a039df6ae5b3e35e318a811d1aa +S055/S055R14.edf e96ddb32c6f99774a78634ee17e2f8da83a84ac51eb2a3de02262bfde2549f30 +S055/S055R01.edf 38df022c18db1a69e74261fefd85f397b0b55b457384c4c7208ef00f7f6231c6 +S055/S055R06.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S055/S055R05.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S055/S055R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S055/S055R10.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S055/S055R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S055/S055R08.edf 808e0060198a3c0189860aa2b92b768f1764c4441eee72af3a01d6014dd81176 +S055/S055R04.edf 5626401f88b26ca66d9621f300a98c0fd80bba41438380dddba05111c8721d36 +S055/S055R05.edf 8afe70b47200e2163b1d57af89eb7eef854cefd1747a938ab21b90120bfb1f15 +S055/S055R09.edf 0cf04dae0b3ba9d249aeeac77e339a5f48f434eb667b901ca65f08b30dfd6c65 +S055/S055R03.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S055/S055R08.edf.event 9a16113810c1d4f4c6d4bb0e9fa5ea774628a0b8f3e1764e93d71da831cb206e +S055/S055R11.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S055/S055R07.edf fb8ba11d022fb1e8b8df561e6eaa84f745cb51c14872e6fbdc4361c4cc14aa01 +S055/S055R02.edf e872c1d3f02e88cd7c040953e4693c851899a0e3020e85aec1373db26cb1503b +S055/S055R14.edf.event 91a58fdc6ac826e1cc60d1f6d7a8f3c395a374bee6c3fedb7e90d360410f8f97 +S055/S055R13.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S037/S037R03.edf b08e4b04625ff65ddacad42cdfc403952e391e976ca1451c6c485f6477f290ab +S037/S037R01.edf 51182f52a9316783ef7d393bc1354efcac916524dfa826b3519b132bf516f7db +S037/S037R10.edf.event a3605df98453cf17b0c003ae26ea3b82e48e5080e12c7d92eaa104fb30571adb +S037/S037R13.edf 14b5137aa15c2ba7ee9d1b441f7207c82ea3456a6051eec3062e0037d31bb1a1 +S037/S037R06.edf.event 6c457793161b0b2dafe7d78bf4d750a570530145b98ce0bb007627422152b0f2 +S037/S037R13.edf.event ed3cf1bb44b1d2c21282c844044e08add0a9e1ba501a19ac91fd1593cb7a0f90 +S037/S037R08.edf.event fa3c7a187a7f8a7aebf8ef9cc11c0a206ae8b7b11512d6753899c156e88d5394 +S037/S037R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S037/S037R09.edf.event 10fe2ea57f3dd20adb3824338de616d484101e5955b8607ce38e43162b4cf53d +S037/S037R08.edf 7b21a0f65631f91e46812aac3a2c3d46b9da9e847b2d0289f618419827973e4d +S037/S037R07.edf 483eff50a97ecc856ad5e5cecf01a630d3e11dd64b454ef61a091d30accf133d +S037/S037R06.edf 7aa020ed64d5fbbacbb5d44fb6aaf1fef2bc6d97c5e55a2827f7620055652b34 +S037/S037R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S037/S037R05.edf.event f751705cb1ac76297342f42e431acdcfbf89711a08d63c261434f3584d3edf77 +S037/S037R04.edf.event 30da64dda979b788f6866a64bdbb871697aea2971b00b19bb12b45eb03c9e13b +S037/S037R12.edf ca053b2a09045e39cd0ecf2d7f50e4c067a515becfaaa7ade1f4dd4a06f91b6a +S037/S037R12.edf.event 20b9488db0142f8af79f3a1f6669aa603fafc46d3617ebab296207f1df9f4c2e +S037/S037R04.edf 058d115743bbd3a114a21aa839415312b7210f4aabba8260ec7b9b471430c17a +S037/S037R05.edf 7ee430473a18c6dc76f1635a6a5d5b439de3f873fc83c07016d0508ec991dde9 +S037/S037R03.edf.event 3ebf8a974399c5878d02e333c8cea9226287a30a0ba2f22d93f03dda6fc523e0 +S037/S037R14.edf a3c62714157519822f6820b551505a2329b01ae4359ba6f1099e99b513418dd5 +S037/S037R09.edf 7979da78867d6d4091a4ef919e82fe0e6c7b03727f7f3d67a3dc7718ae9ef502 +S037/S037R10.edf 89a3de5959e5855294d1b86d14b1648909d4c2657fba6a2da289d00364eafd03 +S037/S037R11.edf 8bb1b09e6bba588a17be875f46d33e86a9fcf37dbeebcca51910d636de6b5df8 +S037/S037R14.edf.event 5bbe935f4d605f380ecfc838403e37a58fa59da73118602632e00558c003c171 +S037/S037R11.edf.event 2b4e8c33ee0adfbf5ea0a5dc317f8f76f92419263cf693730153cd417ff4bcf5 +S037/S037R07.edf.event d0350c9c71f7bf60173004774cb340f2afa23c26f3c0b859574afb0514a062a8 +S037/S037R02.edf 9d4da39ddfef739b7a2b0b4fc6e9a748cce3c227c665f6a05fcc8a7da91feb9f +S058/S058R13.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S058/S058R12.edf ec035945aa497928bca1ae1387f50f06b68f346a992e9525afd3f91f74c081da +S058/S058R12.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S058/S058R04.edf 4e52357fdd5f23f7acff7de83d870b946d3225885ea77b60f2ef6c2f889221d0 +S058/S058R03.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S058/S058R10.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S058/S058R14.edf 09de80fabf5bfdbc914150d0bb1af7e53f59be055dd2823534fc9d47f9584764 +S058/S058R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S058/S058R09.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S058/S058R06.edf 299a82dbf807ad746e2adf9ed15fc373f60e9b5770ea8a39c76e1a46e1e41360 +S058/S058R01.edf 6a3d55038d5198b06772b43243f2009436a23c41696e98b57d8d98acd2008df1 +S058/S058R05.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S058/S058R07.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S058/S058R11.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S058/S058R07.edf be2e3cf48c68ab002ca78bac6f055b3bfc8e6597e07033bb868b0393c7a8a0d7 +S058/S058R11.edf 4b079809ac45a8aaae9feb663e7a37501c02c3c67ce1e2399bafd49ff5b9f7d6 +S058/S058R13.edf 2eaac3fa4afade6e1772235fb0a7a8ffb5f1c1c1fa0bfb4a9ed97373a0a59d24 +S058/S058R03.edf d974afce72f8c832de47fb809125fa6f78bf6b32558cc468c62c554078d30d3f +S058/S058R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S058/S058R04.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S058/S058R05.edf a389493fb0ae8a74d30a8e69db8d16045b53d0c8bbebde7902953db44cf012e9 +S058/S058R06.edf.event 2da7955eedd5ec220793d3b4b3224c26256f1427c213729179eecd79fd4321c2 +S058/S058R08.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S058/S058R08.edf 156b9fd913d2277dcf4fd553f77b9a842992671d3f0b250e16c485b66c3aebf2 +S058/S058R14.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S058/S058R02.edf 1ccaa3ea370002190f9d69c0f799c2e061050388e12ff37acd8d69e09f34d5d8 +S058/S058R10.edf 889d23e02b38dae04ef10f756badfeb26f7675af6481a9a033e6b84913e5999e +S058/S058R09.edf f8e9caa438bcc4daaa18f12f0ce70ab74b200c6361dc5b5d7212d3e4a505f277 +S094/S094R04.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S094/S094R06.edf ff3769b67a9d0ef29467c4fe73ab47134557baae366284dff40c4e217cecb006 +S094/S094R13.edf de8815227485510ea171fef9903c358a23467caa82d8adfd8a61c65321c88a2a +S094/S094R05.edf b6a7f75293a4d26c06c33f34726ab89b28010fea614e1b4aa61c5de6e9906be4 +S094/S094R07.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S094/S094R09.edf 0f51c986bb317505fe7d6c4d1df17ac5ee1cdc4cf5d5f45664c86f9afd825b78 +S094/S094R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S094/S094R13.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S094/S094R08.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S094/S094R07.edf 9aa5ec681860884f534e93a56e10cb276dca8b092435483abdde2b853df35c07 +S094/S094R04.edf 41b63eedefea1796e9289de2ead00b6e03bc6f8319cabe1cd293c981c4cfc406 +S094/S094R03.edf ceed432815df2089c58aff28a7a9bf37b3d5cf7ef61825a717dc0fd414f6a54a +S094/S094R03.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S094/S094R11.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S094/S094R10.edf 187f1af97f51fb5add42e3b511e70ce7ae19d30982f9e7c2901c9ee1aefd15c1 +S094/S094R02.edf 5f08c5aaf8e21015ae6fce4a8f8e6e6e658df662e393ec77fc75689bb0a94c63 +S094/S094R01.edf b11fbdf3f5ecd11daaad702b58bdabaf1f15c82f045b7ed53dfd88855271a5bc +S094/S094R14.edf 4111e5c2b5d385cc8f0e5c7ba9757d8deac4cd9c969e44c4008593e78a5f7c70 +S094/S094R05.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S094/S094R11.edf d07dc1f9703a53caf60c3eff7de342cd54264a3077e6c7975efa20adff7c52fd +S094/S094R14.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S094/S094R10.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S094/S094R12.edf 2f70c608f9e291559ca1e135824ea1c8aa44eccdae35bd18705d5574abc68523 +S094/S094R08.edf a265132a093b20b1366ebdd17e77628a0b4feb3b02cf7c037fb092a03a1f47dd +S094/S094R12.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S094/S094R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S094/S094R06.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S094/S094R09.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S053/S053R03.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S053/S053R05.edf aa23445a5c64286f876853e2d68046fd2e21e01f0d3c6f12767d7078edc63bdb +S053/S053R10.edf.event 83a13523a9ab3f80c3d6e974582a163a305c3619a795d4e26af679735769b3ed +S053/S053R07.edf.event ee9a506277766c8d64377864252176694f8cbdaac82a329d5cce5e9c11e2e529 +S053/S053R14.edf 6f555682d43d8e95f69dc2805bd305d253147b28b6641ee038956ee667a96233 +S053/S053R06.edf 9151463be662a8961ad6e26b3ac320e1f3fa22792e3a5a48685f1fdea17518e7 +S053/S053R09.edf.event 3593f38db6b9b0b72284e6ea58a9169bb2459a37f75643fd634363b665a636d9 +S053/S053R08.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S053/S053R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S053/S053R12.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S053/S053R02.edf e9a60f7a8aea15a1093cfbbd58e2f4cedcbd4ca1fab1fdbaf272386cf87b4bbd +S053/S053R04.edf 90703ee0a3f4880dfb4bac085c883ce4b0349e7de9d9d4485fae4fb0f2a8d78a +S053/S053R09.edf 6d0b9439be889c04c476d082117cc895ec0a2b8b10d347328e2561e56bd2281e +S053/S053R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S053/S053R04.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S053/S053R13.edf a835beec4d19661978710a3680265dc18968f1bd82c006e80a69fd2a60cd7d4c +S053/S053R11.edf c666e7a5af9453a6d7eb9526304229111cb1dbd9ffbf9f4e8514a60a78bbbe95 +S053/S053R08.edf 21c33cc1ab60d7d392b99f1a00df4d4d8024db6f46d0e3f55f53e6321eaeefb0 +S053/S053R01.edf 72184f7aad1257f85cc97ea9afbe7c6e80b4cc3c202403a1323a648955d2d327 +S053/S053R11.edf.event c6fd76cba5a912b92a280d7d54b9158146ba5522f81d9192baecc014a6c9149c +S053/S053R12.edf d151fcac368e31dbe81495377f734a1c2ed898587696503253e197324c884ed0 +S053/S053R03.edf 8e74f83632eea295b71f82004abb466635b0cdb33d7ad393c130866995924544 +S053/S053R14.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S053/S053R13.edf.event 4c374f58a91c1ff71894cacb50bfb798d20cbef67ee65a1ce8a2d5826349e390 +S053/S053R06.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S053/S053R10.edf af5fc64c8dddefe50ae7d3f12b2f31f3ef1f4b3e72dd5972399205e5a37035bc +S053/S053R05.edf.event 2d33f0e6820d6c85c1b096ad02de0f117c60e1202edb58aad42f3ec835f3162b +S053/S053R07.edf a3f42b2a8cb0cf59ccf421f5ca2d2a11992085eefa971cd28bc1ab0efb6f734b +64_channel_sharbrough.png 9e2ba68c31f3499b1f558fe02cc20aa44f69179416e5de13a025a3012f91d73e +S077/S077R05.edf 5c5f74f4d83f9fb5a08a7df7f34eb1c488d7e3d5c2d4d0ccf2e799b443f7db57 +S077/S077R09.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S077/S077R02.edf 164a6d74ebbd56ec2f8a4cc182ff49b6e41b01796d76ee0d3f245e03a6d4675f +S077/S077R07.edf 69788b22d85f3fe8f1e6d18ed916c035ceb5bb042830c8d2d62515e4a5ed75b2 +S077/S077R14.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S077/S077R09.edf fefae86e28f02f36fd24642b62657c00bdb8a82a4088dee6c0a5e10acaf0634c +S077/S077R03.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S077/S077R03.edf 07eca0c3b99b709c128c52ff9b8bae2b0dca9b84bdc30845cae16e694a935aee +S077/S077R07.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S077/S077R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S077/S077R04.edf cdccce472114bda440a3eb03fa0a1bf32d9dec5753a105736672389bff46d9d9 +S077/S077R06.edf 002a49d4320ef6dbc9daa93471a6f2e7e4036b49fe87e7cd29927f1803333c35 +S077/S077R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S077/S077R14.edf d02fd15e9745107464dc057d0936bbab18a55e1fec8e652c85b9f17f28171a71 +S077/S077R11.edf e023615579d13a36aff9332b376c5e98e795b77d26827e331b3ac0151742734a +S077/S077R06.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S077/S077R04.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S077/S077R11.edf.event b0c7884218a114ab4fc2b8cb09b2c8f1bd0ddbaf69aac65191618c0a230f65d3 +S077/S077R13.edf 74955924f5ad34205cb1a0cde6904fa9bfe8d80ae1e8ed8e389089eb28fe3471 +S077/S077R05.edf.event a6c871f2eb69757aec91f71c7de20007524b3b628f251799a3dd57bbb181fd84 +S077/S077R13.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S077/S077R10.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S077/S077R08.edf c0bea153dc620d85e59fd9115459a07da82c137b4e836c1752d1a350cbdedab7 +S077/S077R08.edf.event d9d89addd8fca4d057ce27c16b349184b9dc3b13193561b7c99ffa9414e86138 +S077/S077R12.edf 09e9310ea5e0a9a17a0f0d48e9d2cd3b36bca4c4901c4ef246b2b789d08e1927 +S077/S077R01.edf 1eed03d32fc8cc2cb57cf28fec4b380d56ce3bef548ad82b045d43d7f0ac82b1 +S077/S077R12.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S077/S077R10.edf 794f13c06350ac4f14523c20edbe60185d3a464e72ae50fecd9074b9d5bc5352 +S069/S069R01.edf 8da5f1695c8c03bcc530dc1478ff74749380d579a5ca84d6232e1df1815071f4 +S069/S069R09.edf 90f43268da66b9f89b4092fadc9e40e6f68fa86f0f54dfb55e1bb4f00d17af9d +S069/S069R12.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S069/S069R05.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S069/S069R10.edf 6d404692f34d3da6dfadd926d862bba22072377d1b3c38c18928b8f0d0203073 +S069/S069R10.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S069/S069R03.edf 1a3ca3653caf83303ac1e7e9cbc965a8a135d04f383dca4170c54264d5368253 +S069/S069R08.edf.event 8a6bbfcfdb8f2653e0bd440f6aa7459e00906a06d68c4df1b718df856267369a +S069/S069R08.edf 72a1bc8c4b7790aa730a92029fb18cc9a9894d3934411d3b02f5bf6806f4c3af +S069/S069R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S069/S069R14.edf 4fcba35e3c96f6083afe2124813a6847893c811fddb7848a640ebbebdc1c60fa +S069/S069R11.edf 83adb3635af06c072ead0677b0f97ea35af309cfc9499e386c6aa5b697e0e016 +S069/S069R07.edf.event d45b05b6c6897898bdd688c12fad776011d39250bd1bd8de7e7745d939b60e3e +S069/S069R09.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S069/S069R02.edf 10456a512b499a18be5cd9d8e6835ad45ab8666db79e25470b7785b062e43e85 +S069/S069R05.edf 4af85e1eb3da69940edf24de72c7676f30a29b8d52b6cfdfabf6b0cbda20d180 +S069/S069R06.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S069/S069R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S069/S069R12.edf c47fb991214b23a3e30fcdc62d8bca21f4df7765417bba5ebc89255f5043fb7c +S069/S069R11.edf.event cb2f9b00882e2e0ece44139387a5ab787bf248faadba57d4e5f0e759bd015f4d +S069/S069R03.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S069/S069R14.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S069/S069R06.edf 670e54ba92e9781c033d2d360f50ebb156723ffc2ab77d1e88f0d68a247cd96c +S069/S069R07.edf 3d465afa0d689bf913f8a5ffa486a3dd9808310d20d7750920f760a010ef158d +S069/S069R04.edf 31d86ad9d97e919e9705503bcd47f391a75e1b89f327bfe4656eb76b41fc628b +S069/S069R13.edf 914947f72ec016f128ff2c071624f1be1311500a8af5d0b673948ef65fbaeb53 +S069/S069R04.edf.event 386c51194ef175e56c4d94b5e327133f39929095caa7c3ac86e89628f6fc0610 +S069/S069R13.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S065/S065R08.edf ec531cbf9279eda222e9b873129c34b8bf33f0fcfcdeb790e1c4b5a1146dfd85 +S065/S065R14.edf.event 86187c2aee7d3ec840b09b5c7d4105dede60c6f6536fcc3d5347079d5ce58666 +S065/S065R12.edf 578268f2055e302e5bc3248f8dbfbbd63c8c60b300f4082b4213add5c03e7a2f +S065/S065R07.edf 7bbc404c049acc05e8d5e380bf62594686c3a265e222f516405254721c6d39da +S065/S065R10.edf.event a8e304ecabaf8d9f1d4997f8bb05c9613cb7743d7dc73f6af7fb82cea10f7792 +S065/S065R11.edf.event aa719b9aea445a02c8c5c6a6de32bcb12238842c8d4ae16493f623bf0d226c1d +S065/S065R01.edf 2f6d0594f0207077831c620c299d84ad49cfa36c12af43a32bdbc129f0d9d703 +S065/S065R05.edf.event a26099d22ad0d99c6db2d456dbd53db1b49b139b9020075cd2d55944c659394f +S065/S065R11.edf 48f2b63b9f9d1274c197bc0fc0a7bf67dc53795d06f01319ace8937b2911da71 +S065/S065R13.edf.event 6a0785ec7d7cd8c80087db9605dd320790daf7db8cdc6c9fbe68fad0470ed604 +S065/S065R14.edf 13f91f949f5ec9e1937a7e3722da005338ea6e00abe879eef6144185bea1858c +S065/S065R06.edf cccc62bb8932916489019988e831812a9e38e48d0045a52d395358790203c3d1 +S065/S065R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S065/S065R03.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S065/S065R13.edf 1ecc6c90f6e2d2199cea20044d6d90f0e7e457b5effd3c186a939419e115933f +S065/S065R04.edf 3fc9b1712f959c02a8b6588a77e879d6d9a89d0ec4e746fafb40c2a469d66fb6 +S065/S065R02.edf 8b33a98df8b418688cc3e5fdcc931c78fc616f6a2f82bf3c166455715b3a9ed1 +S065/S065R07.edf.event b1551573cd91101be666afa2abcfdb421f144c3fa966b0a62b6251bba942a18e +S065/S065R03.edf 484dc9e2aeabd8e8ed55b14364b4848e079070bca661bb42f51a7cab9ca6013f +S065/S065R12.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S065/S065R10.edf 1fab43de441d8fb8266264f03b8a72984d41ace8c1ce1bcc04fc084ae550739b +S065/S065R08.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S065/S065R06.edf.event 3a98d78e3ff8d947a073981e739716fc7bd37c89605bf37bf0addaa16779530a +S065/S065R09.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S065/S065R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S065/S065R05.edf 13a29cf406fca98795925cd43b56e202ac098c075a2599fd76294baef8139b96 +S065/S065R04.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S065/S065R09.edf 63e28becfd834edd2d827e2e674e2451af2b5195159bd11bead16d41fe44c817 +S032/S032R05.edf ae793cc1cc052c1680defcb9dde25ec5a7551da1848b9d493f31704e6ef766f8 +S032/S032R12.edf 86ec0265944349a8e801a96924a33cbf8834197e312cca86a576bc4be4057844 +S032/S032R09.edf d415986f87c0465cddc8097b69851f6df6d031366fe4694cf521f1d9c29626c7 +S032/S032R14.edf.event 3746a126df3fdfbe9c0038a3eaaf45fe9135091875ffc4e91bf194bec65bd6f8 +S032/S032R02.edf 2cf498af92415c18090d52e96ca5977a63ba2041368dd87df761f037f41ce526 +S032/S032R08.edf.event a9832f55fb6afe673f34035a0fd1c83d70ffe8552c3a3b9927a0296a70c67809 +S032/S032R10.edf ad4e5d446db449d64f10910c6aeac448fc258859257cc5e7f3948478985518e7 +S032/S032R13.edf 242f6cefc2109136019430327cb2639b024c31a7fec71fb69beaf0e30ba387b0 +S032/S032R06.edf.event 86f35381c7eb24cc6094567ca1b734e53a81bd8184a6364c79b7f00b9a5ece02 +S032/S032R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S032/S032R11.edf 6e8c0532a32664a47a4147dee233d71f28bf0a033cd3343addc6b23df805ded6 +S032/S032R11.edf.event 7e31a2cfbce9a99bc07e9ddbae2cc7459eacaa2ee5661470ec567df0527acb79 +S032/S032R03.edf 5cf58f6d8f7b5771cfaa660754a73759e71d7cb5eb4d207bfaaafa815c3e4729 +S032/S032R14.edf 03dc57e9d57878a08ec8201f903b4c3d7539f3cdce3793088139e6974327f88d +S032/S032R10.edf.event 377caeb601fd91dd56677c88ac2c9264debff52b4cc40eb517657abcc42e6d9a +S032/S032R04.edf ae89d2c1dd10b26c8d382ddda0f6681723ab994168d0e94b4051c43d83bce694 +S032/S032R13.edf.event edb77eab3780127cb1134d7d4b9339cd17e3006b601c1091241521e1671ff407 +S032/S032R01.edf ba99cf38dc94b4408469708227736b388eab4cee0a0fb27ecba353ac4348757e +S032/S032R07.edf f32f55586744578a948a01b2c0ae7528f004c6c8085cbac88b7830ae9e5691b1 +S032/S032R03.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S032/S032R07.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S032/S032R08.edf d707e791b0439e6b0c2401af9bcb90085272a857fa138f2b0c7d9bb20cf638a6 +S032/S032R04.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S032/S032R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S032/S032R12.edf.event 7eb3d99d1a46e50d5ec14297be174789c9a514ecb48636ff9ba19d90bf2ed9c3 +S032/S032R06.edf 48b8480f6b38c9130c433d907db9b695198d432da837bddc720eb93099bc1ba5 +S032/S032R09.edf.event 1bc8e1dc23f970d7522827a583fce66cb96923864ede7ce6fb77f885dd7ce6b3 +S032/S032R05.edf.event aa88e9dc85f46564702f7964b37b2058d00e5e0b93d498e32bee49aaf8e7c745 +S084/S084R06.edf.event 79987bce81c997afe211bb1afd5e197582440355efe08dfb5466d616a8f03f7f +S084/S084R10.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S084/S084R05.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S084/S084R01.edf b8fe107728ed7e3057cc35faf2e66f91602dab9260b0ab2b0a64bfe6a3952434 +S084/S084R13.edf 86889443cdf0605b603b1b6b9a0d61f670e16212730e986b8a0f8e90a85d6a78 +S084/S084R08.edf 4228f23c5c8579ed84d2de2a35d33b90ade7202ff18df9e404aad44395acea75 +S084/S084R03.edf 83318bf6b9b298cd261109537d31ce9a945d29c0064152fa19f04b49a004a0cc +S084/S084R03.edf.event 5bca2030ba3ea66c594c376bfc5a701d3f25c2f85ea7d72b8abb2b2bbc5644ba +S084/S084R08.edf.event 01b8244e62aea11c106c000350e5b2ebec864736b68aa1525ea371fb86a3b0b2 +S084/S084R07.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S084/S084R12.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S084/S084R04.edf 0b79d12851eb2fc57b8fb94c29fc78d228c2125233a0a4ab36581d83e1136456 +S084/S084R12.edf 7e726571c4a9b7513e9e0afce0546a9fb221f13681e70e2d29a02725e608451b +S084/S084R13.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S084/S084R07.edf e9674d16f540f1568073ad21fe40ebe186414813069548bc45c8fe6f0ff44a66 +S084/S084R04.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S084/S084R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S084/S084R09.edf f01a0af0ad426c6ea59e93584b352091b2448a3bb7e503755fca4b5d74f9c242 +S084/S084R09.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S084/S084R11.edf 2491e3b8b033ae43659c025a9ef62beceeecf202ecedf579f1dfe7e0b06fe8ec +S084/S084R11.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S084/S084R05.edf 8a1c8373b6276d1e755ef71144d2b56e30b4b687d08cc56bdf12d880a099938c +S084/S084R06.edf 361908a101cab206d5cf84f47b813dd6ed436fa773b2a17ea97a3b2206b97cbe +S084/S084R10.edf 6c2096104cb26769e84b982f625ebdccc2de28b5b2f45c3a7c4fa398eb564f3d +S084/S084R02.edf eba216592aa8b26c390fe02a33773d9d007cd7b9cd886b18fc9250b649d7e4fa +S084/S084R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S084/S084R14.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S084/S084R14.edf d24540d0dae99f7453bbcf4acb93d299747ad65844ee4f4ef7d846e7ed912309 +ANNOTATORS f5bff1d1558405bb6de5d02821a0f00b4be3671ed3454b19d1d2ee75dc689264 +S097/S097R14.edf 76520698cbd9ae79cfa6ababc8627fc60469dfd0f307eb4acf3ca96b5e67e099 +S097/S097R01.edf 7bf7346caa9bd3effc06b27a1f0c4bd94e21a861047fefbf19e7b60ae76ca02f +S097/S097R13.edf fcbde4d0bbb01757064d48afbacff750cb9fc416c741ceb88a3dba5624b8f23a +S097/S097R02.edf 1b138117c090c2041b27984970a5f0a568b5a95b2235c29cd4189c28365b3d21 +S097/S097R12.edf afab0765b4ee4d03198c1e0a18a0f285c84d6734dbd1cf305886cf0d7464f433 +S097/S097R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S097/S097R09.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S097/S097R10.edf 6acfd4c3295829d636f686910d28b41a73fa1b6a26ae4d1a7fe30578f1049e7a +S097/S097R11.edf 6d491c8035377279db2aecf740cb19fccc0b00d92d642595775146adbcbbade0 +S097/S097R04.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S097/S097R08.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S097/S097R12.edf.event 9a9f0b49f8c6435ad71289db5003bf298965348ca79b0498dc572d390808b84e +S097/S097R08.edf d36269db5e812dfe12879bdf3ecd1d1f7dc0319d59f897dae352e5e19532cb98 +S097/S097R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S097/S097R13.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S097/S097R06.edf f75339f76c9fdc59aff27e03ed839898bb9b3dea36ded727ff5287b85c274b79 +S097/S097R05.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S097/S097R05.edf 42d0339fe1c0632861d54e04e1e4cdbe3dc2da0d00295c8a275b8978b6d176c2 +S097/S097R09.edf 3534c74016c2cee1c9ad49e5a08a31e6634b604a8fb3018b183596c863ea1531 +S097/S097R07.edf 1e895aef79f71d0a04a68f4578ad204733a3631ab9c2852526a4f604b8d9392b +S097/S097R03.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S097/S097R03.edf c3ff3efe887db00e8fa2900dfa09278abd23dfc090262c231815005e81f0736e +S097/S097R14.edf.event bd6dfaccdd7deb73743f500e0a1fa9d0ff333e94d59c6d153e2e9d2dc8a4795f +S097/S097R04.edf b179e92e1d5ff926d6aa88bf3c748201c8368312fb31b1d2bfb8bdd033c67cce +S097/S097R10.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S097/S097R07.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S097/S097R06.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S097/S097R11.edf.event a71674e1fd86a57270a24706f4e05755887534f04ecc35f98e56f000312402a9 +S099/S099R08.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S099/S099R13.edf 399bbb5280b215c3451d0e10d317fbcfdac333093adfced081aa2db59c523ad0 +S099/S099R11.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S099/S099R11.edf c5e935a60f572f30fd2aa2e9639b02f313cb498af17f9105f644d51636fda4f7 +S099/S099R07.edf.event 3670ed6734a1e3a4a23fe378ef332968f910405646ccd883a62369d2add4b888 +S099/S099R14.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S099/S099R07.edf e9c2be6c0a2d80d78feaef0d251a18f6b753c7987297ec84e66dee0910397afa +S099/S099R12.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S099/S099R09.edf 9041b94fb5dd2cdf300413c690263a223d506ce38347855ee02ef7d45188aafd +S099/S099R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S099/S099R12.edf 79a4364092c5678b43071c5907afe061974346751bd90a2d8eabdc7ef8808e32 +S099/S099R10.edf d3b233a2403e3d00777830137377c4fd3bb4c9130a137439c6ecdd3e39066fef +S099/S099R10.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S099/S099R03.edf 9166582d968b024a2ca92b99926c4658d6911bb2d3a2256514be716fe57a165f +S099/S099R05.edf eeba6d292190922beee9af52ee0c83530e9e4a406e2fe2e255094ed0ec681417 +S099/S099R04.edf c463ad89dc08d0d5e6fd9f766b0e1c743b802290d93a3c496a9faf3b8ed6032d +S099/S099R06.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S099/S099R06.edf 02a19ca40ac26693a6763d2ebcdfae9e588dbc32cf6675cc236f0c9c17809899 +S099/S099R01.edf 9bdac81d159f0f6c153e4a48a72bcb1bc53ec0ae77a35df1263524016df3a937 +S099/S099R03.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S099/S099R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S099/S099R14.edf 1a56b53c56685a91872ca0e57ed2cc13f88fd70b0bdfa369f1cc2ae19dd19f34 +S099/S099R05.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S099/S099R08.edf 6c5630263f0e2a23e2ec4c57c00831ced0469b3fc4c8dc79cee7216bb85a4810 +S099/S099R09.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S099/S099R13.edf.event 537ad705e53a339bd1d130f3331df882a0416fb7e95c4f565d283142dcd120f4 +S099/S099R02.edf ee86fd6058f1996d9535938c1cd6eea923ce7dd06774bad24ec342c0a1c1a2af +S099/S099R04.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S089/S089R13.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R14.edf 8693681084d9a31a2373e1206a31a5255a098e377986dae311f40b1945aa14f1 +S089/S089R04.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R10.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R01.edf fa4c6f60949968c607bf5105f9c80ea1a786b5bf6b15737077a9f256af66bfa3 +S089/S089R11.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R02.edf 44c5016ee50908df260c8adb6d195c44824787637b2537ee823efcabbc8cfac6 +S089/S089R04.edf 59b477c2e08b85fd3efe774945f6becc5073d654bf0f5b911d45dcf30b92c8a9 +S089/S089R03.edf.event aa72ec0fb1b9bb4224bd65e15fc8975e31a5647924ae870539169014ee07329e +S089/S089R06.edf 7762f48f45a3b446d508942e05bdc9081c897abbe0b6a57651a494da1dba8c27 +S089/S089R11.edf 7f41dc795cb95835ad59fbb44fc8fd14a191dd9634ccb82b73cf2e9a04eaabd5 +S089/S089R12.edf 2fcf2f10853418d0ca8184e008b29ce2d49a4762c0f5e82aa71298925ab2abf4 +S089/S089R07.edf 93a6e3dba6343f46211d0f57dd81776da03488be2356006dda7023c1c22e1dc3 +S089/S089R14.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R12.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R05.edf a5726d13ea72450e4a367588e79c740daf7f5288a2d950fd4cf4904e10ea6ff1 +S089/S089R05.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R09.edf.event d8477aec0c4899d8efdae84a2031ad72cfb0d63c48f6c0d509cf23003d2b29ad +S089/S089R07.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R06.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R10.edf 4a3dbca5c67c12590dc5b9c1faf95587030bc3029a810ecbc7193d5e07f72a87 +S089/S089R03.edf 0de49011384bfb26129a96a0ad2d4edf64f22eacfb523da12afb9889c324c0af +S089/S089R01.edf.event cc4b6b7e93062a4a93859d69a21eac72c2dd51a7a596f8c3941d89d78073eb06 +S089/S089R13.edf 5f3a32cdde3b650569d7b8925bcbb887ac06f354ec987ab6db15aff9d2948c1e +S089/S089R09.edf c5c002521ae41431dedeb6f07fb283f55fd030024f56956f3d371d542be3a0a4 +S089/S089R08.edf.event 5c4801afd1f2da908b7c1f672c345bb1db7d2e1892eaf195db0e1ae6e7a9e84d +S089/S089R02.edf.event cc4b6b7e93062a4a93859d69a21eac72c2dd51a7a596f8c3941d89d78073eb06 +S089/S089R08.edf ee2d004c55381ccbc5f75062e714da9705c7e8cac1d4193952d646399f3ef7e6 +S100/S100R05.edf f9826ef24c2091bbe26928a23b8cd6e9ea764a55a7489c0a06dfa6d8f0b09368 +S100/S100R08.edf 8371eec06d0b1ff62d10e016124e3bc2db411ab6764fea572fb2d1f25a6afd97 +S100/S100R06.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R13.edf 0294751068a2a6477dbfdb86bda76199069cf506d9bbfba907945accfab9e1d6 +S100/S100R07.edf b3bb899c1bd819e50b9c7ad5837fd57b70ca7c423e06e3f9bafa885eebc0fa97 +S100/S100R10.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R04.edf 0b8af6ed149b8b8d05a5d25da8896c6b555a199002acc0705dc1c5a01e489ea4 +S100/S100R01.edf f3741fda19dfb05973fb367c6f9663df5a9a1c378324c223e677e59cb4450fa8 +S100/S100R14.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R05.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R12.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R04.edf.event 8c302e42b971167e26925e79e5ace7653e62a27d70b8c0a53aada8f5f3770a99 +S100/S100R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S100/S100R11.edf.event 2679ccb7e833183a26577624b2f4dd83eb2cf8afe48763eb5fda873c412cb7ad +S100/S100R06.edf e57c7986d7814bd142c3cbd525d9602bf00023d397a5e9a89e4646cfae0a8163 +S100/S100R03.edf 1498da5c347ddbade9c693f68063d3fbe244dddb6fc73bcbbb16331d05a0e9a9 +S100/S100R03.edf.event 8c302e42b971167e26925e79e5ace7653e62a27d70b8c0a53aada8f5f3770a99 +S100/S100R09.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R09.edf 3e1651550a82aa15acdf0f9639712da2153ba1d00cea5a8c11608a43ee88287e +S100/S100R02.edf 506d0a77e01e0326076136cf41c4dfc5ee6206376f20da6efb858affa1c044e5 +S100/S100R13.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R07.edf.event c78171063f0b6cb3d3f0d6ae265f22d687d1a2136c5a8dc8b39c9f87ecd253c9 +S100/S100R11.edf 9fca7506be3fde10877ebc06d1f64a10aeebf7a99c1058762da81548d562fd18 +S100/S100R12.edf 6dbc4350c7e26c46b96ece5cd7ae2cdf1a9303d237d7089216e5facbc1d25538 +S100/S100R14.edf e80cc42dc4b5dca4cd37bff1a44d971452d265d825f893cb52a6b71d61922882 +S100/S100R08.edf.event 45d618537012fa72a10d1b7fb27c2662f90c1b7975f1c48a918fb5bafda756de +S100/S100R10.edf e68dbed45e873cf275dc2174c7b56ba2fb2ac2ecf162bc7ba7cff5ed0d789cc6 +S100/S100R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R12.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S025/S025R06.edf.event 6ca290c8f2ab5d2a3d0c42a123bd24341d790899e901dc5991dd66614dfb1842 +S025/S025R09.edf 3049771d277191a89db3650c36972cd0c41cb947f14674501dab593d68edb66f +S025/S025R14.edf 470766d1886c2c43076606110cc79b995808717abd7c109419ac8cb9924ce6dd +S025/S025R03.edf 19a6350b61d317385b80ffd2b7d7f4a23fa889a9b8a3c978db630f004a88fa3f +S025/S025R04.edf 25ee02ebfe5c2b2a652a295835d7a4da653c5e6b390084b7c78a4daa64336381 +S025/S025R04.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S025/S025R11.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S025/S025R06.edf f11f802c5ed1087c08cab562ae165fa0eec1c13fce7a203427e210c0940dabab +S025/S025R13.edf 1048a353f1132ba976dc920cff2048dc6d47a35bf1f70eb8ae64e8085fa0eac2 +S025/S025R07.edf feaf0611e4671d07daff067c688af3c74e81b4b5e903529b786bc2240f8079ab +S025/S025R01.edf d630db7ef42ec83068197a587f93bd71e218e3d592e190e79111b414633d7fed +S025/S025R09.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S025/S025R13.edf.event 33a34db8c83e4bcf4288f2413aa75b7d7c03a7bccfa172b157568ba29ee42d1a +S025/S025R02.edf a7a4ad734563e92b931f990bcfad62a6d03daaa273c7a4b9f039c3030c1536d7 +S025/S025R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R05.edf 1bf20c6a6e9f9d926b581eaf8a771f61efb626930c28a19455c90ae0dea6935c +S025/S025R10.edf 93c2e8893122eae1ea5c83f02f08c54847b6dde32814ef1c7b0d390d3a2d3f0c +S025/S025R12.edf 94a0c6cb21c67381221fdf364c8eadd17ce3b70021c579d0721c22d04c6d2523 +S025/S025R07.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S025/S025R10.edf.event 1b02f94c97b31c7b11129bd2f0db6eb71ffb209c8dc379625185f7a3726d3aff +S025/S025R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S025/S025R08.edf eca94875e6b247b85ee541ccca3118d259132e96212fcd059ccc943f48c2dd56 +S025/S025R11.edf e2b2694aa6f7a47dd5a774c7463eb232af25987580d11e729d23037320272e64 +S025/S025R03.edf.event 39b46b55fa02f8503f1d8726fb4de22eb131c815725552e89309f9db71c825ea +S025/S025R14.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S025/S025R08.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S025/S025R05.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S063/S063R05.edf 212f1e4ae284e58d11c14dbb6c1cf5cf0ff59cc4b8b5b777412c54bb626ebf02 +S063/S063R11.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S063/S063R14.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S063/S063R03.edf 83bb2377058d1ecc7e2ea22d55aef11d131903acf88953323cef68488ad41f04 +S063/S063R11.edf 9b829774c5dbcb1e7c3c5ffa541c9e7c22672cfa964535a8b8ecdba0db447f24 +S063/S063R04.edf b3f74fe891fd3b0da76d7c42b624c8ac2b31c566e2c5d017b779ce5b2b4c8989 +S063/S063R14.edf a32cdf01125535e453c311aef70a400c7483d4a09c00751c38d2b522168f2510 +S063/S063R08.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S063/S063R12.edf fe724c2eff756b9b2079c0028e719837ed8a2e3cc90341065ba31f073c04ef5e +S063/S063R04.edf.event 3cbb6086e0e7d8748f07bafa5905b530a6f1a43c1ebb1a06c6f846a82247c451 +S063/S063R06.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S063/S063R10.edf 6ca54d65cfa0768f22e7ddeaed4043860731f8e0fb777b08b78525875b656a26 +S063/S063R12.edf.event 186a60a98ff75df214cbab061e8be8ae3bdc4dfa895704bc1ba0c490018953d5 +S063/S063R09.edf c167ad6b516365c6a9cdebbe8fd49658ad31a882b82416eb7a759544f3c27b03 +S063/S063R13.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S063/S063R13.edf 63a81fbc94679318c4bfdcf8ed864a3acccdd904e2f7b8b4486d38f5efcf6d79 +S063/S063R05.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S063/S063R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S063/S063R07.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S063/S063R03.edf.event a50926e15d8027e4167d22e5479fe3751780003aa245b90b2eecb0b4f474fde2 +S063/S063R10.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S063/S063R02.edf 5611a5b628d656e04f3c11c64c3d9da543098757ccff5ee6b41b8b069c3aa682 +S063/S063R07.edf 1ff351b434542537e83eab5e3aef59bc272c42bdb69dc450589a0903b0b5469e +S063/S063R06.edf 75495dff156daef37b3d008791930822aade0ef6b9f8a69ff473d71d8189d7d2 +S063/S063R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S063/S063R09.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S063/S063R01.edf 19e89fec35b5227c3ddd5a031668a7311264610cde156def93771adc15685af6 +S063/S063R08.edf 76fae5c78ea8a2c05a6f7231b5eb8fb2b46a6001344197916a87306c786ef241 +S073/S073R01.edf be8473cb11d653152f32965bfc62691bae4b3df7fbc1e193840667f5912cfb73 +S073/S073R03.edf f1632361422757358260ab11e6b818e9594be3d85082097a772ba46bbd21f295 +S073/S073R07.edf 012e37d81ab052c3751d06203f4c418341135c71ab4ef6536f4485d02bc11872 +S073/S073R08.edf df5ac3bbcadd037c1698db4e3f36ba4f752c6571775c7780c3ec5a778025f909 +S073/S073R08.edf.event 10fe2ea57f3dd20adb3824338de616d484101e5955b8607ce38e43162b4cf53d +S073/S073R13.edf f7dfa58f68f7863670f91abc8c252f2df4875c52ec69dc2246d792b2c7a5eed3 +S073/S073R09.edf b27f1e82d1e744fe40538fa8c700eab5373def323d6df8108a6808bd7f6376d0 +S073/S073R05.edf.event 1fd941c7969069964d579a646ecff980c4713b69568dc624c832f58c5c7df8a4 +S073/S073R10.edf 9a02aad8afcdb74ac200b42b662cb1cd524aad7a6352992339486ca67644f606 +S073/S073R05.edf 0ad69cf05c39f8641d4b319f0ee822237a551c4ecf1214a906ade02c827739b8 +S073/S073R06.edf.event f4c34ff99deadcec51112b9cb6b5db7a903fc19968b9ca14feb0a67ba8795ae5 +S073/S073R13.edf.event ed3cf1bb44b1d2c21282c844044e08add0a9e1ba501a19ac91fd1593cb7a0f90 +S073/S073R09.edf.event 132d7c78fc024e2a331d21e4a954e9dade78d9bc7c55ab44b306fe6ef20a13dd +S073/S073R06.edf 22b272fcefe3337a277ec3b3becf14b2cf78409e878c8e5bd73e44e803b75eed +S073/S073R03.edf.event 0c68a34fc7c7f42962c230acd4f90579e920c83f2d32cd504111843c5f7ae410 +S073/S073R04.edf b7545db1ffdc782d1012419b18a823c35d6f6fa96e3f7c836f08e3d799aff5f4 +S073/S073R14.edf 0d2d955cd5175eff84083b78808f28dfc2a808f6d1f3870f2de5edd21d479f80 +S073/S073R02.edf 843e65b4b436dbd13649e680c90c807d033bb15de8e80e167dcec74335c5db60 +S073/S073R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S073/S073R14.edf.event 726af5b965cd68175880075d714ab5c49b3b1681512af02ff7658e9ba0edbfb5 +S073/S073R07.edf.event ed7531e033fd753faf42520203638d4de33208e84160d44677c41467cb0c16b4 +S073/S073R11.edf a2b965a59a34f3f872756d5d891bd268d5b041ec60f8c0010847d17dbbff6256 +S073/S073R12.edf da24f28f7a880c97e25a45e0f9eb86dbe7aeb13b651e6cef0f99afe2aa1f425f +S073/S073R11.edf.event 8eb33e667f4552c335382b3baa23b804d5f1b703c747eabc52d51698bf1cd83a +S073/S073R10.edf.event 16ea690d66c06482dd3120d00dd8a03decfa55bab04dc1c945f99213f9c5753d +S073/S073R04.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S073/S073R12.edf.event f8969447e196cd3b85aca233f8197d3642b3a82ffb49458332044d86c5df05d7 +S073/S073R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R09.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S009/S009R11.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S009/S009R02.edf ca5a6f0a0fc6fe11696d5505c3db2b965136d85c4c0590f7c03af1b527b97242 +S009/S009R08.edf a8121c688ffca3db1d3b1f61dd53d6636ac91763cd2269b39636c65dfc6e4fe2 +S009/S009R12.edf.event 1a93c91ef79032df8664905730df14e57174a6aab494378ea976c6845e662faa +S009/S009R11.edf 2e1eac06f72f940fe826ff9c6b963bc686c1c7357d2b860e351baa263684f66b +S009/S009R10.edf eeeeb3a1fad45ab52993a7696c8f86b0f4cb7de3aa68a62cb2b1379fe87b4084 +S009/S009R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R06.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S009/S009R04.edf 705f53460954e465e7a6ef45bd1f64e675548c08e9238dfb1f448713f9e559f8 +S009/S009R07.edf 9d737ae61e6b01feee5a8cd22c219cfa84e8c5f67210a04dd08ad08aa61d82ce +S009/S009R05.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S009/S009R07.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S009/S009R14.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S009/S009R05.edf 6ecfb972c81f97b5bc88a292b9b249b4fbd3c51f1959cac1ce8f83ee480c7689 +S009/S009R01.edf 37f117b69060f2c784d74fe973d6af30b3b978f9c7c127324273f5741749bb2e +S009/S009R13.edf af39afa6d2e9006c7d7a2cc573206febd62b75bd696f7ac5d0d733af6789e455 +S009/S009R03.edf f5718a37aa5ea43c8f52dd01326ce09568a71f52e1b2750e0e8e2145e88b6e4a +S009/S009R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S009/S009R04.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S009/S009R12.edf 0c3f9700d6bfd8a8dd797803d61b80852688709011e5863f4e13e9ec3948191f +S009/S009R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S009/S009R09.edf 33060662f7551cba7035f177f88442594250cf4359da7d9c9fc7070d929cd3f5 +S009/S009R03.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S009/S009R14.edf e9ffb381cf76880a63c95ffe80106e9339a290f1fa9632e7575515b4900a820b +S009/S009R06.edf c6665f0c93288610a0f3cc379edb8064072e7b276722358912a76e899bd6b194 +S009/S009R13.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S009/S009R10.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S030/S030R04.edf.event 21ce3656bdb563d0169b7aa05527648a736ad7cd77c301a0c752f4cf76eca3a2 +S030/S030R02.edf 0527816f344412f459589c2e29a24f87f956c5ed63328755ef06151c96d1ea0e +S030/S030R11.edf 2c0ac5cf0e945d5c66c220d1cac70b376ed1808a723c107051a2c9f8fac1a793 +S030/S030R06.edf.event 745873996b96ae5adff4736ed896862f8ea11f4da06821cb2df5927090639fe5 +S030/S030R13.edf.event 76436f363d0b1f03efa79422710ad179cb02fcaf8b3e4d6a724efc92143ee8fc +S030/S030R08.edf 6d4742bae0754084c96a1b14ff6430203b9d71b619fe9b33b420aa3d6fb83d28 +S030/S030R04.edf 41305848c93a818493e0bbfb965d01433bdce1b48e9ea1b355e88e08f5c20f20 +S030/S030R03.edf d4ea66ddb38572d264851ffe4e1468e206593c2789e675ef29d18ef7c2767e5e +S030/S030R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S030/S030R09.edf.event a8d31a5eb87f88035de02464248ec55b948d96d57ac210508ba4d4b320318124 +S030/S030R09.edf 16f4ea9781be4ef8d52499252bc782056e6424ee391370398095f59d34fdd83e +S030/S030R03.edf.event 0dc10cf1c35632cd53bc4336ca89f30d7f590ef366a20b7511c2cbaa031469f4 +S030/S030R01.edf 3b0b3d390a9d15b30f95230729a24d5b8a2a55b22ae6ab9ee0c18653068d22e8 +S030/S030R11.edf.event 5cf15190744627a260c9ee5573dfc68ba2b843205a19877288aa8be1015fed06 +S030/S030R07.edf 412ff959e0ac1b98565fc741205625bb31a7f19a446edb6d1e8070b2c7d2bed9 +S030/S030R05.edf cf385ce00f0b8efb8714af8058678f375d0597e66a079bc152dadfa922440dd4 +S030/S030R12.edf c1e5ffc5f0876254f77021f0a478bb374e1ef3c1388f8e90694e984aa085ac90 +S030/S030R10.edf ebd59f99c05f284e1cae773c3d00622d45b137e388951b566d8dc3d48ccfafa7 +S030/S030R06.edf 15b136ad592e0f6fd705ac06588ad6bc300c1b6adbbf5a1437ff6a6fdb2be046 +S030/S030R07.edf.event 1089930d94c72e3ce9766d907746b5e30bc79325e62cafe09bf378282e3d63d3 +S030/S030R12.edf.event 839324cb15f4c85297a1bab617a363408975197aa26bb5ce4aa6f0238ecdb3db +S030/S030R14.edf 569adaf14287ac5944c1492ed853d934f1b7e44f1bb89297fdacb0071f81e110 +S030/S030R08.edf.event 60ca10a619a64d8ffe05bbe5271b4639be106480c5613e3a767224f993989d2a +S030/S030R13.edf 5bab95b93f88b747aff0c96c055326197e51a2d2887264ceee3a59f799c4aa64 +S030/S030R10.edf.event e9b96219c96a5df8ee769b66c4f57662e192352a69c8192bf391653bbe782df3 +S030/S030R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S030/S030R14.edf.event 92869f74a264158c2c716f02e82ce6bb81d49bd4cea9ee281a8c63768600773b +S030/S030R05.edf.event 4d70c0e1ea5d348919f757429a1fa31b6bdc7087f75c8e925771beb9d5894c32 +S013/S013R12.edf 5a4d97f54f5769fb61a64854d46108b2554f1013f0e664810746911f69ea241d +S013/S013R01.edf 6f41486d61f25ea3c38608b650f4e9a19d5e6782113fa184408049267fd75310 +S013/S013R14.edf.event f1d83aaf535b3be7098ef9960def69c712f4dadb4f334e40434e155e9088b299 +S013/S013R06.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S013/S013R12.edf.event 68922fc617d2a7f851f538c226fc5491b6f57526417f33c57ada180e97746c59 +S013/S013R09.edf 78796fb0af41599ba7fbd25a1a084226f7dc0e385ea6bbbee21e6cc8afbde3dd +S013/S013R14.edf ebdbc2b8c8d86022a8f285bf4b42674364d91cc46eecc1e1abc04043ef12c17b +S013/S013R05.edf.event 7ee25d4d164dab7d909181d9ed3898053c69dd75d8affe3f277cb9ceac75184a +S013/S013R10.edf 14c0536c6a2bbc572cf7b4b7e128aa2f1bd5b3a65e84a16184fd4b382609459d +S013/S013R13.edf.event 60cee9d6dd15c5919b0d3d4f0618922c82d19e6490ea88b6fcd09fc7631fac71 +S013/S013R05.edf 882aecd05b5b3869c55c483795743af24e2c7ddea43caafbcd07c6ef435fb6cf +S013/S013R10.edf.event 9787b103ae02f348ebe837cf8e545e3e630609a320868cfa229f0f82a0734ddd +S013/S013R07.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S013/S013R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S013/S013R04.edf 796eb8df7d12b32e6ebbfb6756e527217c7b59a282735eb782dd50f5f16c39c8 +S013/S013R13.edf a81924a0668c8af7b92bc554eccd15881f4f7f6e7a1811db5f44e11e5c61717f +S013/S013R02.edf f60098973655243b1ce53e1e1bfda84cabb9862849461aef58d170369f2700f0 +S013/S013R03.edf b3ccfde504fe3bc9377495c2b70feabcc415a2facdbea1ab0d33e4644f7fde66 +S013/S013R06.edf 2e615fe7a657d7047f4fbe789b66968dbde9394c16cdb9d0995f1c0d376a94a6 +S013/S013R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S013/S013R03.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S013/S013R11.edf 261e6a4c8f81071cd6f1309ebd6deef79496601c6b77a8fe3bcc8c3b065626a4 +S013/S013R04.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S013/S013R11.edf.event 1f581da8e4856c63e25c541381ff8a370d89525260484c85de24ca995a24a984 +S013/S013R07.edf 17a4e59d8c4fdd133f19b428ffe4927484814d1dbaad7df7e5623361fff769c5 +S013/S013R09.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S013/S013R08.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S013/S013R08.edf dac533ea38aaa6c7505565059a5a8e19825702e68f5d27f47edd11cff0035736 +S104/S104R06.edf b32a0d5028810a98f7712b58ec2c6009f135d0727d03abd0debbeb3cc9190277 +S104/S104R04.edf 21f1d57c977b6dd8e9d3e1a5893431cf4bf0a142c850e273cb34f7e0864dedab +S104/S104R02.edf 02b55d7a4f3e38a37ae12acc81cbf1f9c6f40ececb6290fca2a902c19ce6b180 +S104/S104R11.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S104/S104R08.edf 505ca2e90c3476a945714776461c10ce6112ad70b5893b2ac80722b56d26693f +S104/S104R09.edf 7bfa4e74dab55b1fadab1c4c3567e9a612bec013a2da6047c855c5dd125fe27e +S104/S104R03.edf.event b50d31cc4a2ec520a336774ea70761d08ebeef4930f053a00ed66803060bddef +S104/S104R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S104/S104R04.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S104/S104R14.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S104/S104R07.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S104/S104R01.edf 359e9f6ee1667782fc85dc9c9c963fd8327fcc78c7c24381c7be139b66f89dab +S104/S104R07.edf ab366eaa999f517e5b23ca8021d7cf28f6ee29d68a165ee4a6e95775663c2ded +S104/S104R06.edf.event e2b79b09413cc4065a25ea2956bc7726f4bdedc8730e89a7b86a8617c79876c8 +S104/S104R05.edf 57c4e11375232ddd10c4b2e4a4212956b4c700a8c855a788b3d187e653ecf2ee +S104/S104R09.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S104/S104R12.edf 541340f418003d02928cfd51baeead5dcf82a405bc6dc20499e129d43dd8e8d2 +S104/S104R14.edf f1f07d0e2801a69d6dd1991273efbdef58edb92220425168ca195fabc7373327 +S104/S104R10.edf.event 353c44c45eb89b709590af77f11106666cfa3680e5470b61c607b80304933399 +S104/S104R08.edf.event 803329ff4c524f2f94c14ac888eb15a5670f44ca9e42a101568a0c16cd0c69b3 +S104/S104R03.edf e74ac8759624fad7c5059fd925d458f2952b893513381c86818619027e698ca9 +S104/S104R13.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S104/S104R12.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S104/S104R13.edf 907fe8ab003947ee5395c97f6f2425857ae0be24f4e95b32801cb08a5464e074 +S104/S104R10.edf 2fa882510d50bd3492537b204eeaa5f29e646bb81388ec119276aff07d8ddd94 +S104/S104R05.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S104/S104R11.edf d0a943d736737361dea98c5585d63a7bc1b1a4d1130fb43fa43255426ba73b96 +S104/S104R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R02.edf 7af50c7caa2b873015cabd726bc47ac5dc57de2ae61d151e278dd2613aca3681 +S038/S038R12.edf edb6693591ebc3e273004339ec6d5eb7b11c60604828cca5e61e0d4df8eb22c1 +S038/S038R13.edf aabefe770caa5296de15ad7395dabfb5947a6daa79bec50596dc0eed3b505243 +S038/S038R08.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S038/S038R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R12.edf.event 3e83abe294ec7b278fb4495c4fbc171e9413e9d805ad8464dc6ea5a4875e7625 +S038/S038R05.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S038/S038R11.edf 315510579e7c607daa75969532808476c162076335ef7bd981aa7a02c62b894c +S038/S038R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S038/S038R06.edf bded2cfd25b9240d522d3e5858a6325b0ae13d46ed9cb01c5dad5634c6136227 +S038/S038R09.edf.event c777015bef40a19f68f8ed8c37572f501bc00d9f061933697ce2f238c9ad3f9b +S038/S038R06.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S038/S038R05.edf 34fafe40c0e98874f023c9548e12a139452c8cff9d71836272ae2ca53222cd7d +S038/S038R03.edf 6d703baf9b57dec7da30d953deb26c24f93ce290240ca136d707ed5a273cd442 +S038/S038R14.edf 160da28cda376cf3e4206f7892f9efcf9342cc701bd9136a31d57d71048451f6 +S038/S038R13.edf.event 83f18e3a845e17346dc10fd176b61c6f306078227a59bb51610834234d1454aa +S038/S038R04.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S038/S038R10.edf c358329a87e97ea80a3daf5702e9d7114e954bd98dca5a157702ed096f3b04ac +S038/S038R03.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S038/S038R01.edf cf352ddee891d1f3f6c6aa3fb4e947ae0c89b9dcb2edd0cb2c75defa5c5eb2e1 +S038/S038R08.edf 2f5146836f84423a5dc0a274f4d9da645bf7e485a5100d74cf98508bdac29b69 +S038/S038R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S038/S038R09.edf 2abf752ee32dbfcf22c85bd8137c6976b39b46169a2f63d13a769eef0243ed60 +S038/S038R07.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S038/S038R10.edf.event e4d125d65ed410f2e37eb6c7f7075c585662cb4e5931bd8436cbd1b59c474f77 +S038/S038R04.edf 45e64556375b00e2a2541ed5e14f96c79f4b86c18d54fcb1bafb2508c1ef06f2 +S038/S038R11.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S038/S038R07.edf b6c989c93126b209c5e513e3c8cf32521a86b0ce8b89cc811a1a653c18ee86e7 +S078/S078R03.edf 108c6824287a8a48eca4aba572a0a689bf46931e7c35556ebd36957907c16ceb +S078/S078R07.edf 43521ca9ff9b7cab6cf52bbe6313ea2cdbe7772a27f42e8a310cd0aeccb4be34 +S078/S078R14.edf 9bc71363b71480789c601f6d9cb90aa8bc690567a54a2474af2d9f2fd8c6e37c +S078/S078R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S078/S078R04.edf 4448c6889ea396f851dd48708509b0620974421768814d18d1d5b0f6f22c60c4 +S078/S078R05.edf.event 190f359cc14939d921985886ad1c9081e5e2059b38ae9d130845e8dad044d790 +S078/S078R12.edf e4f6f983bf941c4f713b353a79594af49095e7f002b102ec086a93dec3aa37f5 +S078/S078R14.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S078/S078R09.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S078/S078R12.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S078/S078R10.edf 222df3b50cc0eff62d6fbbc2182e3365a74d996997e9b4219f904201310623c5 +S078/S078R13.edf.event b974d964a889e51c36cb932266f84898e1e404062fac116381b220b354f04feb +S078/S078R04.edf.event fb76d885a6c380c62ce2da054f5ec78c9f0178752694a36e4b78183ec8850830 +S078/S078R01.edf 42899f0af33e3bc857c963066f250f261d4e76d73c0c7700d223b09d13ad495c +S078/S078R09.edf 7c12e0c729eeb0e250422ae71f03558dd411c9f56d63a49a5292f13630d9366a +S078/S078R07.edf.event 176fd10c94c5d482875b1ae7f0756ac60194f41bba099b0b40e5efd4e95e8df7 +S078/S078R11.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S078/S078R02.edf 3201f4cdd5585a66c882504f15b3427187c78effd66d16976e178ec396abd878 +S078/S078R13.edf ab016fc3d05e5277fdd813ffecd387cb5125ba62d3bb997ed7ee4008697a22aa +S078/S078R11.edf cfad033ec2419b92300f61a64be8f0a547ea531ffc20a0d51719721da81f4f1d +S078/S078R06.edf 1623a5be10e9f4ac175476f00e8b0637b7a63a0253fff92eb7ac561bb21be749 +S078/S078R08.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S078/S078R08.edf a008d9c403db430abd52d91a04955431b675eb283d3fb9db80e75feb08e3fe39 +S078/S078R10.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S078/S078R03.edf.event e1b29e90b20be6fe74d2526e8f695d3e9dfe8bdd7f08c801c24f523ac253462a +S078/S078R05.edf 77bdcfdbcf477261a4d6dac03c7a7d2278c445f4d659da8b5693a1b5e70852a7 +S078/S078R06.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S078/S078R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R01.edf 62814a71a5926f51efe322e6f959b0c6a7b18da7603474a4daa3de3cd4ac2718 +S034/S034R14.edf fab8316da4a5398cda818d57bd9db5f016a3c500ae387bac4e65f9c5eedcf1aa +S034/S034R04.edf 16500fe53e24fd3683538d894bc85214a41801e172787e68fd7e1dc5a42bb56c +S034/S034R08.edf 01d92913e2d1f2bd5eba5d16f1ce03a150f6dba999db83a442e4d6a754fd4c44 +S034/S034R11.edf.event cdc1e4d28716284ddcb2f27cd78fa71625a5d0743587c4085cd40553c3d4593f +S034/S034R11.edf 104b08f5f2f728a5e88b75a4ba8037aa6daac2592bc5814580b76dbf1dbb32d5 +S034/S034R05.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S034/S034R05.edf d81499b1372d6ccf90677a7a75ab740ff8250347e7883cd9e15035687bea1350 +S034/S034R06.edf.event 626f0f4bcd2b8e692655298d20611bd8343efcaaffc9750f9de2d48d12d814bc +S034/S034R10.edf 9f35f25a3894e5438e2c4673605ba0804c5998e4e75ec1fc5662503f50c77293 +S034/S034R08.edf.event 242b32e3360e3746853cdb2b5bc9e3680de1c253901aec55810ebf98a6f228cd +S034/S034R03.edf 5605f6ea99761ffa6ef64072b3f74688b3483656fe60ec711abec72e90a4bad8 +S034/S034R07.edf.event c3b5fa1c4d34c735d2c72afc39bc9de181add93cfe26671d98a67a907d91ae61 +S034/S034R12.edf.event e5ff9b02c6b12b21bf3d43e30b4e2e24dc33e4dc8e4f263b175a6be78cdbecd4 +S034/S034R03.edf.event 6a82f1537b9e346f18c99107c26f471d6dd0e1a1866c60ec0fd81e8ae2d94fd0 +S034/S034R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R14.edf.event 4c04256488608599d6bb2976fe1f7764651501c5b36fa6450a25871cccdffb65 +S034/S034R04.edf.event 72ce0e64e6b25c72495c25f11b75df8d02784d1e7645c959e2379fd7b73c8959 +S034/S034R13.edf.event 80df586db524a77e67f11eb275b00d505b0ba9212ee984d6f721958f2b100b4b +S034/S034R10.edf.event 8e39b81d7164017e1d67672dbc17ff18d31922b3f6365e9e1961814c475b2210 +S034/S034R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S034/S034R06.edf c801d9aada4694ed376b3b1804003c1c5bec9dff51797504ff0f2b93f9c6a580 +S034/S034R12.edf e7e7b75d6af726120e7482462e429a9faab0b1509eee0a73e064f972c4ed9779 +S034/S034R09.edf 044914d21373682d7d59fd35aab9f61350e7594f7b8aaaba9e1c3fbf5399e7e0 +S034/S034R07.edf d687daafc901f01b9536129779d8e53ebcbe1ce55820c0e4a433ec27ae2f04a2 +S034/S034R13.edf fbf72ecf8fa58e695e5b3946827190174beac79d6684da47288ac33120c9718d +S034/S034R09.edf.event ebbd9bc0cd84bebd02e71f43989cfd362624adbb968d778a6e759a70c66956a0 +S034/S034R02.edf 5a59bca545dfc4fcafeb49a9c11bace3a0c91029c941c9f2a46fef9904681514 +S026/S026R11.edf.event 6ad812d50b44ed49ee87e09d1cf28b68a99855b6d266be8b9017d8056de057b4 +S026/S026R05.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S026/S026R01.edf 2c4c52f3afd7dc4742bc1b009744da1172bb9c4f45546acee2fef230931d411b +S026/S026R13.edf f86fd8bc5817c7cb98176b0e58e7c4a3dade5243311676d1e9fd4a227f22e999 +S026/S026R13.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S026/S026R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S026/S026R09.edf ea6dea29fff8a84242be061a37dac15b84acacee2a97045e18ca219943c57bc5 +S026/S026R08.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S026/S026R12.edf 681a772c11bbdd917acbf081308f8854c972afb4bf8a05d50fdef1b1c86feff2 +S026/S026R09.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S026/S026R05.edf 4631e05233933160bb1ba8152f03167481004845fce05bd821ee424dcad7610e +S026/S026R07.edf aa551f530d6949a53ddfd0a4ce0371d2ca8db7c97e11309c45fae79a1600824c +S026/S026R04.edf 1e6bb923f94a3b28354244ea4302d213e1656bb309fe3b3ab069e9dc0b2d62ea +S026/S026R11.edf 39666f75b931ce689e7b76bc9286eec8258cb4bd745f7208b16de318237f9382 +S026/S026R04.edf.event 3670ed6734a1e3a4a23fe378ef332968f910405646ccd883a62369d2add4b888 +S026/S026R08.edf 2c035db71fdf3615a23be361328ff9e9017feb97c097b49a4f6c8aff2c36f1d8 +S026/S026R02.edf 462ce4f1335dcfc319fa4d706b6a23467512366055f0e3283a836a0232891cbb +S026/S026R12.edf.event 37b92549c1fe5d740cc394805738f2d228f8fb948bb4c3aa4817c4dd4b04b9e5 +S026/S026R10.edf b8ee20e9711cc6deae83ca355a3665616d61d0114c263db49138230a71189eb3 +S026/S026R03.edf 5ef33f46f67a415ce54210af6ccd112f00a517cba357489f4cb66da7d2e9265f +S026/S026R14.edf 829b319606b77a21a3c7a4a3906e2c6f2301ae1f6b0d1003c6f3d4eb41297f27 +S026/S026R14.edf.event 37c09b028d07b5b1954199394e59b7c78fd1325c5fada30ce1411ada2513eb23 +S026/S026R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S026/S026R06.edf 3866938781e7784301ba3a0f61e753f4fe0b5117a787c61e1f7b5e5c30c0000c +S026/S026R10.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S026/S026R06.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S026/S026R03.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S026/S026R07.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S007/S007R06.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S007/S007R03.edf.event 7c8a0ac57a3537143f6591a8d4f0420a4948e65577e0caca7c08eb06daa4dbaa +S007/S007R10.edf acd9571b0a6b1f864807e556d09d913592d1eff978e7469bb68a9fefed62e172 +S007/S007R07.edf d801688adfc14ce87dd52f4308b0dbf72fffba30fc5d5b66a4c05a4f3b9310d6 +S007/S007R05.edf 6697e3a18c3025727806e28df7c378468b700f73532fd61ecbebaa2960d712dc +S007/S007R14.edf.event 600983ef19711fe2016d742d1857fcdfa4f0ddc7c5b8c88773db0019f92315d1 +S007/S007R12.edf.event 94d1d7007146c80e4f45e873986a770b4b807a34cfeca23f6a7d9054865154c1 +S007/S007R09.edf.event e8da7819eee7d2c77d74fa9e6aec49532a6b299f30b13b3938ad7ce0357cfb02 +S007/S007R01.edf 2ca422c6076860f12bce174799c5d7e85cae46b7b09a8e8519681d0f82168b22 +S007/S007R05.edf.event 54016a6260b12c8b99943951ccdc7d5425efbf9ab503ef1abeb93deafb262790 +S007/S007R14.edf d6245b9bf35efde8522982146138e2f03fc352217579cb0c8f4d83eedd7d8c9c +S007/S007R11.edf a2ec8339ddfd99168bfa0fdb6b94145fcf6e17abd9816f950d3182f888928b76 +S007/S007R04.edf.event 194c7017f9fb0a9e8e5ed2d2c50b59fe067aa973a5e75567ca562949327e2edb +S007/S007R08.edf fa8cfc38727e908be4626cf82b19533f7a761f102319696e53e4ee2d61b1092b +S007/S007R04.edf 3b31e50c4a5ebbb25459b1ebdf802b7ca86010857637580aacd53067350553a6 +S007/S007R12.edf 70cd3287d6b4821661521954206f86dde9f5e62904edd76cc0cf1d03fe112df3 +S007/S007R06.edf 9f6e8d2367dd2965fe71380ad6d42c94e13848d589516fda86fa0eb93481998e +S007/S007R07.edf.event 5c551f31bb279f96e1589e6bd04f5890f6ec5f9b8eba042c3e54359a4cd3a0ee +S007/S007R08.edf.event 73d0d5230c58b2964db8202c6deae200c4a894209cfbcbdcc9b9898f2359f015 +S007/S007R11.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S007/S007R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S007/S007R13.edf.event e2f45590bae85355ca04a69408a62dab39a883a35597ae39c912ac5923e8579f +S007/S007R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S007/S007R09.edf 47aed2ff57705ed0e534cca0fe5e7cf2571b7709efd50c61c2480be4752e0244 +S007/S007R10.edf.event 582fbf3e4f5bbd51cc3d858954988e7800ce943626d6f081f659cdd9b863fc0a +S007/S007R03.edf af7f2fe4fc8ae5b727a2cee5e41a58ad0aca7907f0da83b67a754ea7e94ff3a9 +S007/S007R02.edf 9afdeee151fbbcf9ab5ab3537f8f647f0303d97f506048cfdcc6a99d8da809a7 +S007/S007R13.edf 89c78f89c4436ac06622d314ed5cc49a825418a7f051f6c0d3675c70f19cc018 +64_channel_sharbrough-old.png 96d50a707bc8841a946682e47dcfa6ded573ed91dc5f1c01b129c281473f0ce1 +S064/S064R14.edf 330c6c6384fb9a7cf686ed9f230d180cb70a863ec824fccf1a55e047b1696aab +S064/S064R13.edf.event 45f5eb2c092354aee3ed9873f05fbb270ea898808952a8e8d89f53a58d171ca5 +S064/S064R07.edf.event 0c18fa49d469703f30b80a748450ef0688aa72103e201d690fb064dd55c7e540 +S064/S064R07.edf 93149a39154aa81119b69e6cf2c849b214541ce7d16f48a09091efc8f54426d3 +S064/S064R04.edf c4fd5e8940ff3c9809023886c770dc3ca374adcfa93b85784d749241918ee5e1 +S064/S064R08.edf.event 25b9b610fa5d47c04ecf80ff5afc8dc8ae41908a3571f3bdc98cd14a55c003e8 +S064/S064R09.edf 28410cf933254c2c1657a0c89e0177d5ddf25512988dd414a2efd303e3f1bde4 +S064/S064R05.edf a6611a3362f3dc1b41205d9fc5270880b3bea097a7b5d820bbb8dd4dbe236e51 +S064/S064R11.edf bde8d57b66d9904d666702e0824f209ba0ebef5a1b3e5d01d03ab7807e901671 +S064/S064R02.edf ba4aac54d62892710166b5038da3485a1158d1e03cca99fa9710922623a35bd6 +S064/S064R12.edf.event 1071a29bc96a7a8302b3670dbd4e05d2ac47cbf397e29b74a32e6f4029d8a52f +S064/S064R05.edf.event ce9aa6f7710315564bedec57181565954b52788105428ed69c693ca407a546a9 +S064/S064R09.edf.event 7c4bd521fbb97676e1187543f70b99a542e6a60682c3ca4455c69b001e97baf2 +S064/S064R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S064/S064R01.edf e208a4c403cb3597ee704d7b59918e86faf195fdaa28587b84efc86708109219 +S064/S064R10.edf.event be8b4e0108640ed6cf994ccaa0ea908d53eeaee5d7d1fe5059a06c982f4b08b1 +S064/S064R08.edf c0abcac7f314a6a8e921f3a3e100feeb7454f3fffaee325a89930660c5f0bb6e +S064/S064R10.edf c1e0330c970bb6b82a6501027da9de5b62ced4fe5b1fb85db11a3d6c3a8a5a41 +S064/S064R03.edf.event 57cbb289c3aecc1dd1481829d7cbb9f0c68e99192017986dc9107d7cad5f6506 +S064/S064R14.edf.event cce8c6ada8b4f46efbe5930fa6779f8d1e0c787058b6dd8c7b3841254bbdaf35 +S064/S064R06.edf 95acb614f05f3ec49fa426fd474ad02bf26b8f093e837584961472559ae1ffd6 +S064/S064R04.edf.event c0e03fc052196f399444ee8e817cb9226c53c8b1d27137831cbcd0d283821560 +S064/S064R11.edf.event 549c8a3be85848123efbb6bdfc0cc6622bf8ab61ed733497407936b0086b9805 +S064/S064R03.edf b3001738030c26d58536d233a18956c35e12640a4b226c663e05a3c674914645 +S064/S064R06.edf.event 45f381c89be373b8eb7565ebc5489e673811cfc334165888408d2e99864e484a +S064/S064R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S064/S064R12.edf d3f5426e082f27bf037e9ad16336a763b95b00578ef426cb1389830cf40058dd +S064/S064R13.edf 466b835133c3edaaabbcbc612e90cb438f3b216d922ced5419d8ef189eb63423 +S005/S005R14.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S005/S005R08.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S005/S005R13.edf 2012101a68c9abcf8021eca5587bf3305ef36ac983486b3396a3c012227b3c47 +S005/S005R06.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S005/S005R04.edf 956d0857b17b040955fee9b2384f818f85bfb02248c387d9aa7930c42934ca0c +S005/S005R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S005/S005R11.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S005/S005R09.edf bdc007224d5b82152476140b18d2e54b9abdadfeb509c06e1a0c18f62bb0a60f +S005/S005R13.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S005/S005R01.edf f84f58d7d96bce1700af8b2c9e333d66e4b2183e34b3ed5634840fff37845af0 +S005/S005R07.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S005/S005R08.edf eeed6c328ee256dd2c20d1808e05819bed4ce56b8de1c57914ded00565b6b7bc +S005/S005R05.edf 40ab4f0bff456a6b55cd55f6a80449a1865fc9a63a202f597a5d679b14978380 +S005/S005R06.edf 07260f0bc56394b88fc506823779ebaf9e0b6ab6286608b010900155a4d206dc +S005/S005R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S005/S005R10.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S005/S005R12.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S005/S005R11.edf a4d4111c206de3c094d23c9fbaf2b3fece74eff7a68a11073f29dad9800653d4 +S005/S005R03.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S005/S005R02.edf bdb498c613cd048a889899282e8f5491c23a76731f3c2be11800362b7bbcb836 +S005/S005R05.edf.event 8b26d9be3cec072b0ba8e7e1b1aa9f46dbf8f50992131d413440192fc40ccc5f +S005/S005R07.edf 630fbaf27a7a962a4a8bd61396b6a1c4f56b44e85fa5e9400016ca19085ad1f9 +S005/S005R10.edf a5721f5a8229799b6323e26f5f2fa149c121ff87d4ba73b30fe9280602aee140 +S005/S005R04.edf.event f1d7c85c7c4b298aa795662274613ada05d95ec81ad8b21ce4c8ddca5a11ecdc +S005/S005R03.edf 3bf2695b66661a274bb099dd093c8f2eb4ede8a7068ec4bfd0a1fe9f4bfc0e18 +S005/S005R09.edf.event c843292c2d927d69501ccd581dd6688cd971a162df8d02a7eb20e6a3c1aa2d83 +S005/S005R12.edf dfaf07c8ecc583b07363485596258e66a75fb33169496c62918c6dd0803814ce +S005/S005R14.edf 77d5b9b1f03074e96c0e42234f57363272fa90501661cbe038bd387f3ce922e0 +S015/S015R09.edf.event 20d3d791a72acbcdd619cb968567a104858a5889afc628a6fae4776c90c0780d +S015/S015R08.edf 70e4dcfa5bb501007b4a502fb1ac6975128fb316de67eb825f8eee00cba9377f +S015/S015R11.edf 5403967a07422bcce09d1d96dd78d0eef6f29344d8e0d388975b483b93d47341 +S015/S015R05.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S015/S015R12.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S015/S015R03.edf.event 2eb503fa48ef7e13b77098d860c716de2d0eb55a3a0580117eb34aedd472a728 +S015/S015R13.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S015/S015R05.edf 9119fdc1ff6ac9f67eb34d14803c8d1279a1d4c013a77f05b99abe2e334dd55a +S015/S015R08.edf.event a7dd5d461dbdafc68a647202ccb3decd039a172ca33d925065fad2be2efab6ab +S015/S015R06.edf 705c63ec04c0f9096cc6b4f32eec0bcea9a0bfa8d01aed7494b67a487e014afe +S015/S015R14.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S015/S015R04.edf.event 4ca96a0e24908b831c877816b86e3dbd1eee397d9d9851bd1e890d67134c57a0 +S015/S015R12.edf a5e56adba75340447849c46eb049b39799ee98d68133357741c6980f8f60c54a +S015/S015R02.edf a4aae371744dcfe5febf36d2605ede4d704524f9971f5501c4316a077d91562a +S015/S015R14.edf 3f57b464e13807f30aacbfc2f6780bc02bf8faecea883725683177416503c875 +S015/S015R03.edf 9de915e373fc372b372cbe9c59692c7c961dc9d2ff12cbffcdc99190a1bac99d +S015/S015R07.edf 914f9c987bf66fdbc3183531bbc5ca97ba8afaabd8864811c438d8ddb38dfc3a +S015/S015R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S015/S015R13.edf e154f143697251a1b3401fa5792fe47c7d9891a2cf63edd1cc67234752b428e3 +S015/S015R06.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S015/S015R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S015/S015R10.edf 67233630176aa19faa4133355be20476cc8f571d9327943b9d35a73ecb96f519 +S015/S015R09.edf 628b83cdf791ab95545606d1b879edd13449da7e143627f931a35680de2b312c +S015/S015R10.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S015/S015R01.edf 79564c46036cd6c3d1371811f2cc426c9ac14d09d7da3dcf935a65d1a633c9c6 +S015/S015R11.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S015/S015R04.edf 1721d4a066e0c28221877d0f9e7da2402bdd924a532f5f19261c429def061273 +S015/S015R07.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S081/S081R04.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S081/S081R14.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S081/S081R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S081/S081R10.edf a8cf1995bc0c09a2a50bf889759ab4c1076735bf6a6144bd73a9ff3e845d885a +S081/S081R06.edf 726507264648754d688db166ebec9836b7be70025eebcbb8ede1ca9e47f74e53 +S081/S081R03.edf.event fb7055ba8adb05cadd3009cfa9adefacc5298e01d2b7156815f6424ce1d4f3f6 +S081/S081R12.edf 48e0a4562e2355a3737b412fc54259c7174269f5e6012448fed56253854950a8 +S081/S081R09.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S081/S081R11.edf.event 48bfdaa247bad1b393cff54386a6d30257090bf0e7327f31b2b320921da53270 +S081/S081R09.edf 2662fbeb19e4154447cdce6f73b767e972cc5abddc9c7c19b1a65acd24166847 +S081/S081R10.edf.event 7925ab0749163820c00af5a617adba24f1ce1c711886e12def08eb72594e10cc +S081/S081R08.edf 7faa1f739438962a64487e2132aabdc5c9528430a5f8e374f4b5a9106cc70fa6 +S081/S081R01.edf e5aa41fe4d8447c05d0e3efb752f1312210db47de6a670da2c7c57b3cdb1bad8 +S081/S081R05.edf 9f5bc4ef1c7fcfee0211319ca64aa04ff7a3b2c01c0c5dc460eaa3d17d3b24ff +S081/S081R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S081/S081R08.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S081/S081R12.edf.event 472673d75763793097b36d5b0883addc030cb3790ee84a18c36eb33ef80d009b +S081/S081R05.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S081/S081R07.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S081/S081R02.edf feca30506294768d8b87d959f757ea1c95e17dd5749627a6d7cb4488b314e230 +S081/S081R06.edf.event 931266ffca879dea17f50ad227bae49a0d891b282f30fa3ee4b13ab8623dc5eb +S081/S081R07.edf 3eebdead7b378306bf45d32035f06493de11b6e0f6e7d127ea1f6281371efce7 +S081/S081R13.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S081/S081R13.edf 163ad2dab963d61eeb59a57e4df45b33c7183bde1209396ccdf3bb74a9c2008a +S081/S081R04.edf abb5db7c9c42a09fe10644c0a071d8511b1a31b0014258e0b076cd9195b0ed2f +S081/S081R14.edf 1ff930fd50f5b1710395ee4e4680ba050f01c776b3d6605609cf2446793159fd +S081/S081R11.edf 98120ee5954f8e394f6405ec853f66cdf656c24c456b6fc004b66e2117135c98 +S081/S081R03.edf c493f4eca660b386bcac76cec638928f5b98cc88eea16ff97b67993b6405ea5c +S018/S018R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S018/S018R10.edf.event 3cbb6086e0e7d8748f07bafa5905b530a6f1a43c1ebb1a06c6f846a82247c451 +S018/S018R08.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S018/S018R10.edf de1322a24dc7d95c29d64d91488cb1cf3ea30aa5953c7fbc80b1fa42d4ab44f2 +S018/S018R12.edf.event d090cffefb3b3a6b7c514daaeff9edd2dc1c358aaa5ba0a069b62e257f59e09c +S018/S018R06.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S018/S018R06.edf c63f23d980060ac721c705567e65603808dffd0af32391c6e2528072bdaf3cec +S018/S018R03.edf 6538d4a1fe7df53296897dc6efb2274e78429346086c6383db9c58af16c46251 +S018/S018R09.edf 75c9cd943edd18b43cc73f8b3702e3c7c2fe58dd35119b147041d4ba40e358cd +S018/S018R13.edf f7886c46a154ad73254422b68b56d81e0b986274ab7d1db177de4ec656c88292 +S018/S018R07.edf 1bfa4c8da5c4f30a71797cb27f2c77cc02c66133d3cb83430918cb2950e3d7d1 +S018/S018R04.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S018/S018R07.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S018/S018R05.edf 86d9fbc0881a182960ba9486a456f2f91710e94cf2ad755247dd37d352b40e19 +S018/S018R11.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S018/S018R12.edf 50b4e74957541c9f1f2161ec3665b58b247727b50ed9284f1d173b9b77d1feef +S018/S018R13.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S018/S018R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S018/S018R03.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S018/S018R02.edf 61c88fcc56859ba2299afb7b1f72a288062826ca7ececf6898224dc9b52c7a70 +S018/S018R01.edf 664012489cbc59928d2bbd665e4f3d557421e9ecec7a8ff6f46faa64ea9e7028 +S018/S018R08.edf b417c65a63c4b70f7eedc81ad11f8b5c067be5143d343a250d9397050ec5e27e +S018/S018R09.edf.event 9aaec3ff6fcd8c6d47bbf0c154dac87cdccac4bfb099e5995ee69afc7b34257b +S018/S018R04.edf 0b726bfd4ce56c9fa7417f84f52759c40eb7cd326f06a25374034c199f5fd8bf +S018/S018R11.edf a08b1554878eedfad123f7a86e034acd22cd9052992e5967b693e13c94e14fff +S018/S018R14.edf 33e7d70d1b51cb549dbe2785e85414e3c82b039c4dbfb71be40c2bcd48136dcc +S018/S018R14.edf.event 09d9d4cbfb435c34d0498269502687afe1e41cb410faf772b5fd4389e435ad7d +S018/S018R05.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S056/S056R02.edf 061c293fe7d9d36359e77b59c26693809fdf02a4629fee46e9c857b98aa9ad47 +S056/S056R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S056/S056R01.edf 29f8a7a25c02b872dbbf0cb52475ed55b45fd3b5a90e2fc5025691ff389b04b3 +S056/S056R04.edf.event 6546377ca20368b45bfd1f06c72e153809df57db54b8731ecceb6c8ec813a774 +S056/S056R10.edf 3d4f919505b19d346f4d81a8af960119113de7d69ca04effd0d6756ba7e926bd +S056/S056R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S056/S056R07.edf f404688039c70c11a61caeceff3c9e02c214d83d3a404a78947c1dbdc0e4d41b +S056/S056R09.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S056/S056R05.edf f0a6de4bfe5925d98cf3c0bfcef21a9e61cfb8165c3caf40fb6228c8a507a29a +S056/S056R06.edf 6ce8cd28af985d32ab874784e311f06ae0c2fbf0b9ef4817bc0c561bc4abbd0b +S056/S056R12.edf c47457c3baec40f4b7205cd1fdd878ab5855cfe0d32fe00655639101efddd69c +S056/S056R11.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S056/S056R13.edf d04270980331cd9888307b60cb0241f83cd4f4f6a2f840d0350523275776a3bd +S056/S056R06.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S056/S056R14.edf.event 1c886979e539efbac81a65fd3868773f73efb803324a60ebab5a203d75cdb97a +S056/S056R10.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S056/S056R07.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S056/S056R03.edf 5714d4361308d21f12c5fd37fda0f2513fc0a2d4a1ef7520b5f20ee302847705 +S056/S056R09.edf 9df278a8cd6331dbbbaa7c79aaeaa70411906fc23b8fea9f3e44a1fa85cdf62b +S056/S056R12.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S056/S056R14.edf 281285940d65a262d8544c4ce035bd24720fc5bac685d5e557654aa0470afe2a +S056/S056R05.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S056/S056R08.edf 0fa0554aafcc78c38f2e7d65f5941c528d26741194a1eeea8801225c44799db8 +S056/S056R13.edf.event 020a012ee89a9fe2c7bcf34bf02c0d2d78b688185ed74043d21a3d53053e3882 +S056/S056R08.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S056/S056R04.edf a618b25fc7570d8fe836949761a53e9a9a61cad59e99ac53e09bccca78155c14 +S056/S056R11.edf d68892e7f775eee5630a37cef9b0357671546f0d05272cb2b5c272cf1c533765 +S056/S056R03.edf.event 25eca6452bd9e487db46059b497ec8df5b6c353e97a2ac76ba8344fcc0797c79 +S095/S095R08.edf e3a48b8a9582fc7286a014902b73cdfec56c3cb1b4376822b5275178ef7d363d +S095/S095R14.edf e38842403578f1438e2946e72257fa3ba5d8d422cc0303f35b50d3f2471c8aa5 +S095/S095R05.edf 0bf1b8fce6528eea18ab08b0d479ae5ecff4befb1d18f707b0637251197f000d +S095/S095R14.edf.event 60a42ff3c5acecb10ff34635fb3d1ffa125153f262355d4bdd58ab9b9345bcae +S095/S095R12.edf.event 9ff78ae1debf975d7ce67d0ae65ebc35a183ca2e2a87fec3f877fda244e34487 +S095/S095R09.edf 98d149a2f0832ef787ea016d2e215ef57bbe944ce4c7768e3bfc8e4d9a8eb888 +S095/S095R09.edf.event 8f01ffccfcd90b3dc7f8bcf530f915ede4d3a7359039c3d68ac2b897a331031d +S095/S095R12.edf ec1661503c6628bda69ba9aff83004bd9528e282707e83f243dc906bcd64caec +S095/S095R01.edf dfc6a2fa8305ddc93e46704abac01c6c9138bb8cd69fd86ea6b29868940d0466 +S095/S095R07.edf 9dbd5a02ae1fecdb713909528ab6803c5a4fbe027dde1da453b71ed3420a3315 +S095/S095R10.edf.event cae710b11a021b7affcf41c5a3937e4c6bed000e6bdc1690cb997e03ef6db7af +S095/S095R06.edf.event 7309915dfa710afbbadf32d03ef14d92ea4f3831f5edb707fd8ba858d5360684 +S095/S095R10.edf d3456f0afc83bf51ea717fdc7f379df64555397b3cd52951b8f4e0ba8bf3b220 +S095/S095R06.edf 8f6847f949aba7a3aa2b5b2394e847a1ef5473f4dfe426792c7c8e75c13afe1b +S095/S095R13.edf 96ed237df16addebaec9ac6d19ba880e0818866c1e7be4bd00d6d69c7bea608e +S095/S095R13.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S095/S095R07.edf.event 6ae700f98a136e3a38418f1e7ad1db6ec534a13167c4f31e5d88b04564d47fc2 +S095/S095R11.edf.event 178339a2095d7eaf759407927ba3657ec8e96b153a8e085f8a593674f3697b42 +S095/S095R02.edf 1b71278872c1d079892ce8593c714b5dfe0d8a8f17c0133db8ad0bb4eff7cadf +S095/S095R11.edf 677157a867afe196b3c9eed6e156da65323055506f3bee130034ea7381ccf14a +S095/S095R05.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S095/S095R03.edf fc0385bf90bb072c71c20602106599dfc212cd92c7675697d857db811e3ec2bc +S095/S095R04.edf d6e647c36b24df0524c32d896328b876a079ae75be41aa665c5fb0871ace1430 +S095/S095R03.edf.event cef39c5c7fed8cace25154d7385aa5c8666f28db51ba7c7224ce1650f9388915 +S095/S095R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S095/S095R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S095/S095R04.edf.event 8d0bdc46ca76b86f6ed45d1ca23522221fb5a9e165c0de588616c0b9c11a6077 +S095/S095R08.edf.event c6742adf7ce83d034f6e3cecede733919ea0d2fe7854dc4a86c12cf32c5c5aa2 +S091/S091R11.edf 1fdc4fa050d6249b5daa14099225882eae6c48a99b7993016b536b34ba51fd2e +S091/S091R03.edf.event 0487cd7ee2fef7813667623ecbe113ebbdd5400e7dff28fd7f0f7b1e00d585a6 +S091/S091R01.edf 98db88036017237d6b096d1307927fcbb21aedd5412a55ec641912a9a8dc1cf4 +S091/S091R06.edf b7d61422c998c20a1321c1dc8a1c4f8f33e47b31f8021f4f396ee3fce010f672 +S091/S091R05.edf 7425b2930d298ad1040c2e1ad27d85413fda9de0aa4a09482007be2ef5736cef +S091/S091R11.edf.event fbeefca3ec1354b1a7a1326279d804c2c564c4b25f2475ccda7b8e94ba4cd68d +S091/S091R14.edf 1182c440b3bbee1cec62695b426e6d6021e86f2b40902c9579e2385bd9c6d53d +S091/S091R07.edf 1f4f23a9a08c83685a165f25386bfc5aefd90ba5031337bf0775d790f7fd8e9f +S091/S091R08.edf d38c2dff0d43f699c7c2bde4387d52745f5073a827c2d41200e0aef9371015af +S091/S091R09.edf df600737e8cd8e20fabd7d2e1cab7f1814e555483a254bbdce0c8c5da1857152 +S091/S091R12.edf.event 9825c80a69d06961cbce2e12cf8d0aa56d063e5f4598312c4e5b78693a0219c1 +S091/S091R05.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S091/S091R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S091/S091R12.edf 3c17be3adc32ebdf36b4addb89fb78b0b0d0aed352497795aedd8f17cea30b4e +S091/S091R08.edf.event 082acf669cebc89d5f5f4bf29769c743f3b1e14290b8f0f34415e285db553b3a +S091/S091R09.edf.event 1cfd45342f8ef617862da12ca0e446e681027d6001054fe41ac2728751d3b2d1 +S091/S091R14.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S091/S091R06.edf.event 4e057b134c286502a0bd4e491daa0ebb3c4bfdc21b738b2ba2453c8ffc558218 +S091/S091R02.edf a7a4ae35661c7ca877f353762e1717f6441dea1fa345fc8e4908381197cd25aa +S091/S091R13.edf.event 985f9701733fcf2689c3a35bdb9e4b0058b6137ded5bc0f1a2a3b84431edb435 +S091/S091R10.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S091/S091R04.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S091/S091R07.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S091/S091R03.edf 0bed1c3dded92a145b61d90c8df087b5502f72b54e5f027855297253944a1f82 +S091/S091R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S091/S091R13.edf 0dce3749db17c971b2d67c7aa1f0cf6db9745714d596f28808fb4595caa8855d +S091/S091R04.edf 285b7884315a91e2d565aa74f208b1c13a9386af477cc7c6dc8253efcd5d1f25 +S091/S091R10.edf 1acf6fe5f95741ceb0db5a4bb741bdd8dd45e99530dd2e159c11ace48707a703 +wfdbcal 76c15f6af371fd682ba78553d974a8c2d5f12c2f84f0bbc3f98d02f9757d426e +S036/S036R07.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S036/S036R10.edf 7a675ef450988c75998d3ddca96f3773e611ba79d6aee5c09d51dc63e9a1a246 +S036/S036R14.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S036/S036R11.edf 3cb485b341bacf8ade8226b2d2bc25ae42ff3d5dddfe7b500c558f3d6febd6b4 +S036/S036R05.edf f43f1de6a8790fd42aeda8cc107dcbae9577598842da1a7f46f2b6020c6d713d +S036/S036R01.edf dbdda796236e1262d7dd1758d7b4c80762b1865047b8bd2b28d59d273fc4f789 +S036/S036R04.edf c4646d8128095e80fbebfd03dd6c77ebb3491fad49f9824c74f3e4861d9a9d04 +S036/S036R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S036/S036R09.edf.event 132c4ce227ebb4e694982a37f3f7e9289511d75fdc0079dc876ccb6d9cf1a81d +S036/S036R14.edf 84c2ffe0f52ec16ab7b188b2713173b635a3a8beeecf575d77b118b795e6738d +S036/S036R08.edf.event 5bb35bd49434a9630e941b5646d6d89f7907531ef3e44464334b78943d4b0237 +S036/S036R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S036/S036R06.edf.event a4381c73837f1ba112959670ca4c474004b1d78b82daefb4ea220692bfe23c8f +S036/S036R11.edf.event d229cfdfcd562a5cfb40ea306452dab12d3ca82a70a465289b4c69c299fb0258 +S036/S036R05.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S036/S036R04.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S036/S036R10.edf.event a71674e1fd86a57270a24706f4e05755887534f04ecc35f98e56f000312402a9 +S036/S036R09.edf c0db65c2fbaf526b62270a476cacc1200f437060fdebcc2528bc85891575e090 +S036/S036R06.edf 116ffdcab798c901b6fb13763417dba611275d4f22c405248f84144cebd33b43 +S036/S036R12.edf.event e19fc47bcf5b585ab27ad756884c0c4cd37d239bb04c8bf15e2f33fe9fe05e18 +S036/S036R02.edf 0dcc07f8b1362772a26db20e0e8ce9426c463973741bf6ef0079f897771ded0c +S036/S036R12.edf ce79efbd62a81e00a6b7e98bbb80e2d8c0c4bf5c23ab1c1a036884044d5978df +S036/S036R03.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S036/S036R13.edf 6b2c188626213b2dbb06c07d9906be0f982fda71c9149821532cffa3855fd782 +S036/S036R08.edf 60ab89e8d0ecba687a40908643caaa4c7f4bf6ef92abb8d5c3c31201315eab2b +S036/S036R13.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S036/S036R07.edf 1b978c2ef1ed07eb355b270c32d59e9170e16280d6247578ec806886da798d0f +S036/S036R03.edf e9ec8c8a0fba4c7bf9de6db237bb6bd1ac4a3d5d6d273f9e37a0253ff4d431a7 +S106/S106R12.edf 1ea2b9ed0d8f02557b21bee5b70514b641e68321b3363cc3325b942832005a99 +S106/S106R08.edf e75bafd408f19d7c04d86479a414c0cc29be39150f20b9e676dfb40843f34792 +S106/S106R10.edf 42a957d437ba3b040aba7f84da7d0d30989268f5dffd9e9824436ac99eead107 +S106/S106R11.edf b3819655e3af2c063091ee1df642fef4205268cbe1692d64f91a37393a8e6e56 +S106/S106R07.edf 4cf953b4b5eff90f64667556cf5bf5c7185bb9c013cc8f00a0222fbab852faa8 +S106/S106R04.edf.event b1fa69a82433c5887997fcbcc3cd7d906b4b49e77e25114de9d45087c9b126dd +S106/S106R13.edf 7137ab5a7fb1fa9ae84d4172a80338ab124824d219b52725490854192d5a8596 +S106/S106R09.edf.event 4b81a244be0ef71bc8b32a5a151ca3746b282903e6c7a28b644879e8bae159b9 +S106/S106R03.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S106/S106R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S106/S106R11.edf.event 6ae700f98a136e3a38418f1e7ad1db6ec534a13167c4f31e5d88b04564d47fc2 +S106/S106R07.edf.event 6004c1ac954f2a17e948f3273c849a0e322cb8bd539c483c363378dc50ae2099 +S106/S106R06.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S106/S106R12.edf.event ccca38b5a6381c8bccf9729b6a2e1d1cef2d3880619313ab656bd58ab7a0df0d +S106/S106R09.edf b7b4983a788dcd3e801873d215a4d9279ad10166da96e0f47fdf5844ad58c8b9 +S106/S106R06.edf d36e4c2a47438a8c4bd92caae50c73825e7927b4011f44bef010a43982a99022 +S106/S106R05.edf 85fb5d6ca6f88320f8245627bf418b098cdfeb00bcb2f909dc68c7d813aef14b +S106/S106R10.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S106/S106R05.edf.event e580516304935711e290073cc13e75cf23b577826c9814c6b0676828f9976222 +S106/S106R01.edf 605ac5d4ea9ca2170fa10ff85119297a77a7ccb7054b107294a903b911788c37 +S106/S106R08.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S106/S106R14.edf.event ac9692706f0387f2683b9a9fd24b4d8ed371ffd2c916c2e3049ee3f63a0c5c9d +S106/S106R04.edf 7d7599596aacb4635049041a26635e1e14b6b96437a3304701a2a3d7f7cc8c05 +S106/S106R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S106/S106R14.edf 27e090ed4774109e03a7b986ebf7f8f617eef959906987a8f98e4599e285b653 +S106/S106R02.edf fec905ea2aa23a83ee57ec8cfa2dc7babdb169a3ddc3d71623f350ff4a54b340 +S106/S106R13.edf.event 59a87fb5bdf07fa6133a8304b27cefbaef11d6edf13e912fe47b9c6752721e60 +S106/S106R03.edf 603e54496f1e7f21a673e23dc91e304320f7d49d31139369f1232892c4c2ea89 +S079/S079R06.edf.event 15eac883e797e576d72c57d60ca80b477563711c2f4f8dd16cadc5a529d40f03 +S079/S079R10.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S079/S079R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S079/S079R03.edf.event 220ce5d60602a369af8dd5bc9c722c30b74d0950101a84f6dcc0ab8dcb0c2025 +S079/S079R05.edf a66c18316867709634bcdbfea16f81c3612758d26f0bd252c088242e56281845 +S079/S079R01.edf 5dbffbbbeb71b073533cd280510875199ac7b00e5c6225f95c1b88dcf2fbed1a +S079/S079R11.edf.event 8c5f1f6af23dab21ca4abe1e798a979ad9ab71892604c84a00379c39bd269aed +S079/S079R02.edf 1a43e3f707804eaeec05bda7513e811ada8701131b93650de93131e2e1befb88 +S079/S079R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S079/S079R04.edf.event b1fa69a82433c5887997fcbcc3cd7d906b4b49e77e25114de9d45087c9b126dd +S079/S079R08.edf.event a6cacf979bad39813ad2028620db2781eea6e16e029b8bf69686928d2958fede +S079/S079R14.edf.event 86e7714efe0f24263f2567d7a273935782e2b0153d9e1db329e96ab5cb995a8e +S079/S079R14.edf f3f3119d4f31164076e5d6668b931ddf1730961a7a9f7e198106ed0232adc217 +S079/S079R13.edf.event cfccf092791e4f541cc50e2a8c3317df4e3eaf04f099301c7f827e7dffd9c354 +S079/S079R06.edf 1d7a8f7c0bb7f17ddb08cfac1bd77bbe79acd1973cd920dd34fcfdb684cdd6a6 +S079/S079R05.edf.event 4b408c3796f7bf8dcd27259a0ae6508128a2f1069560a5cff7d3eba4508a6768 +S079/S079R03.edf 72bc07225a4c4cf79335f39e6dc62690bd4bce2789064e53d61af13029ae1416 +S079/S079R09.edf 9e224ee24274de2211dbc80327460af713fd708ca65242eae9119d860299b914 +S079/S079R12.edf 0350def02e230173edb8d69c7e66fb22155d00530fb7ce9e945921e0ab6abf95 +S079/S079R13.edf bf4a8cc1b524063cbcf3e4a7dffc1085be8c02a44e9f8fc5568b63a9d6d45c4e +S079/S079R07.edf.event 86f35381c7eb24cc6094567ca1b734e53a81bd8184a6364c79b7f00b9a5ece02 +S079/S079R04.edf 5538f0fbeb647ea7577e448856433edd3c3884516ef6bc5bf10d601cc62824ac +S079/S079R07.edf 74184070e26635730190095b4e81558916e897b97a0b09326facfbf3412ac6c9 +S079/S079R09.edf.event 4720e082121115ddaf3c9fdf14c4fdd2b2cd4b97a259d705f45872624ca24718 +S079/S079R11.edf 446fea2a6a8885b3230d3fea22cfa4aaaf2ea3f8ff43a413ddbb4e532b41aa70 +S079/S079R12.edf.event ee1750c58b0ffdf35fc0b7841091977647634e837de14a0b9f891ff04dd2f5d9 +S079/S079R10.edf 51ab713a7c33dd19f805f91cc03c62525d0d4c4b746cedaa3a26f2ea67f49471 +S079/S079R08.edf e65556abef6b586d84ebbb2e0a42caeb493f58af9c3d62ee4d41dbf43ceff973 +S023/S023R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S023/S023R03.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S023/S023R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S023/S023R10.edf 402074b10f0e8689441f60a53e8439a80c43ba456f0cfe245a84d1fc27c3fe1f +S023/S023R11.edf e5be72fc22fb3420b4a4936c87ec9c003fe377c6c5f7bd6fa39242496a0ba2d7 +S023/S023R07.edf 10d59b860cead78e224b2f6633a9cf54a5b735804157f5c8f820e7c560e05f42 +S023/S023R09.edf a787f36a42c2c1b1aaca4c71bfc26b1e0ce60fa90089763e86e4de36a6f476d2 +S023/S023R01.edf 3dc49252e5969854b3c0b9126058e3a9f28efcddd61a6f570702ee3043fec0fb +S023/S023R08.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S023/S023R05.edf 37405690b9dcb16205a9877bce43f81dc50efaa85422a29493e7a521a8e997d0 +S023/S023R04.edf 16ebfe43b2510115dbf1b15a00a429cfbd6b3a1443c2bed08e6755282e06f3c2 +S023/S023R13.edf.event f686071dd83e0610c6463f51dda1c01de1abacf998f19f9b7d3910c06492fc30 +S023/S023R04.edf.event 43bc660a507d479ebfc1c782945c8504dc21abb3e55cd48acc2faf4db673e854 +S023/S023R11.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S023/S023R03.edf a2f2a6ed190a2381db55e49223fda8526bd0a603bc339f862046604352cceefb +S023/S023R12.edf 366faf95b5e6142e8c6cd9b55da2005533b199983ef0599d73d1535179e7ca5b +S023/S023R12.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S023/S023R13.edf b316cc43e36336e6b535f0c8c065db474d057de54b42a95471b2d5ee342860fb +S023/S023R07.edf.event 1cecdedf3d7f8b7931b4cd84b48bcd356337c0ee32518d737ce0ee8f0d428d8f +S023/S023R09.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S023/S023R05.edf.event 33a34db8c83e4bcf4288f2413aa75b7d7c03a7bccfa172b157568ba29ee42d1a +S023/S023R14.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S023/S023R08.edf 70f7e8d4af5500e2114eea7db8c50d7c4b822fbd7c92c8907018e3aa094f4ffc +S023/S023R06.edf 2b903a9f5bd0ce741321e887274e5114d81a4138fd61a7baabacd83b630b4dd6 +S023/S023R06.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S023/S023R02.edf e75a898d302ec8aae307d84835922dae6c298607e9a09a95fbaa9d90452831ec +S023/S023R10.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S023/S023R14.edf 6d8ac157a1c9559c29aaefdead7748e0853a3eacc54e7b855b97e17653a4c53c +S052/S052R13.edf 1997e7d693fa8a6f2f1f344ba3b57e1ff1ef2aeb330b44afb3151b05ac5dcd69 +S052/S052R02.edf 2ca319811ed895cb132183e3fdcbae6e493bad2759c806833a234c0e7d65fb7e +S052/S052R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S052/S052R06.edf b8e9e0add922295c01a91cc2b88c540dbf8c3076f77d97ffe726fa952442db03 +S052/S052R05.edf 8b90b20f8191b826f42192575c888496f7eeaafdb8e58aa5d3e8e523d48fe677 +S052/S052R10.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S052/S052R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S052/S052R04.edf.event 5e6dd7d9983b10c75f267d25fb4f039777b8f17f9d64869cb39446d1e9306505 +S052/S052R03.edf a81f9ff3a2af1126e4a1c65491ad03805e04c6895f74bb7c4edcbb630f144eaa +S052/S052R12.edf bd07d4c56f8d31a0fef2a31926a98c11104d21e9b127f84b02743cacc9e6eb40 +S052/S052R10.edf 4d8eb059c2fe2823ffde762b986df2ac941d7ad761b8006fff1719fd863dac22 +S052/S052R12.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S052/S052R11.edf f9b1baab5e1efe90df754dbc91f79d6dfcc145947ea1518272caa713f9da2f25 +S052/S052R08.edf.event 6e4a2f2b413bd53735575eeda52d35059687bb58d072e4de1626bc16d7a28d24 +S052/S052R03.edf.event 98da35a4768842863f8f05d63e959a36f12126a205e0370d8519299fc06e56ec +S052/S052R07.edf 04cf399950b747941a2f78f7de5fbe7bc38c57064fa1b70603ac3879c44a4ad6 +S052/S052R01.edf 8346bfe5bae53f3a82438211538a6482d4e1c885b5b70953b862a4d30fc0bc26 +S052/S052R07.edf.event cdf1a70ff91abbdc6f14a7ca794a7b5932276819e6f7c68ee344c3d21e9b73f0 +S052/S052R13.edf.event 78e5b5619faf1f99b53251a9e743644a731ec9cc5b6e60533a3695f33fca35fb +S052/S052R09.edf fe58ed0bef28cb86f78bfa82dcdc94020a0281b05749b2c7828b580dd8eaab39 +S052/S052R05.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S052/S052R04.edf a519ee3e292f1d8317b69f8bf39e3c6ace26dd5084baf5824f8954b42fc2c8c4 +S052/S052R06.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S052/S052R08.edf b9583eccd78acef6f7bbb65318f6b1d3fec1ca88ab6e470cd5f97722635379e8 +S052/S052R14.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S052/S052R11.edf.event 13968dcd0074afe70c79ea250f7148e28870c5b0140104f078a501fd3b51ed69 +S052/S052R09.edf.event 0ff4d5f572885db930cfd469a0c25535d26e1712145e5a489a5e31e8a2020961 +S052/S052R14.edf 55a5eec629950b7d895830a3643d3692d47e8509426b2660aaad8798b905a038 +S107/S107R11.edf 8af14cd3527fa03c48ac9093a61310dcc562af7366867b65f4fbd1d75d53d520 +S107/S107R03.edf.event 272aa3698bab2b05544eceafbf5b26dafc58d0eb50a508a47c26572576853ebd +S107/S107R05.edf.event 5d8061cd48a1d74cf68aa1abca145987d71fda1c779f4e067d7120a42e8532ce +S107/S107R11.edf.event a7ffbae85e6a87d36d8a1790de88fe9163b4487f124a5e2cbad1abd1527eb341 +S107/S107R12.edf 1f61ae883dcb0973b8aae174b588eb352e3d55cd32006cd3a083bf019ece19a9 +S107/S107R14.edf 1a9669549bca2d45e36cd14ad90df230987fadb9eba18af366054390599025f9 +S107/S107R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S107/S107R07.edf 1e0c4ce1cc9b901c3478bffa5bfd97010755ab3c61a598e193fe44364de61dd0 +S107/S107R10.edf 447972429a3a7ee6e6bbabd66b2dc3182b2ee8afa60b8dc354dd9b90837cd269 +S107/S107R12.edf.event f94fa47bd56896d1b35682e2e14d8d1fdb308a058c4a48b7360c7276bb8cd922 +S107/S107R03.edf 90826bec34861d168306758314750e1ff80a09684b25f2b15f393840e8558b8c +S107/S107R10.edf.event 018a676bc733cbc27dffcbbf3767758e526d1ae08a5dbd0785d28e267132a3aa +S107/S107R09.edf.event e318e6bd044ead383499ec22af114e14b3a59def9653470287d411ce26ee7335 +S107/S107R13.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S107/S107R04.edf.event c10d573a5a983f2f3315ff36b74aac2bb21199e85afc6827d4bd4acf2a3c46e9 +S107/S107R09.edf 42b7bf6ddeef1eceb30b2e166d401edd2638d0db97b3c3c7561415b83cfc1313 +S107/S107R14.edf.event 1442094c7e4c8cf80c2b917010d3a5835f895b48a64150371ef6f1f36d5eb9dc +S107/S107R13.edf 83524393a1922213c548d24a014cac7ac9db87425a33cc2bab8612409fae5c52 +S107/S107R08.edf.event 53726fe905bea0abe513d5e84fd629ed577ff03739fbd56c6a12198e7cca5cc0 +S107/S107R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S107/S107R04.edf 7a4eb02e295694a920cc1db2b57bce8024b131dbbd1960310f4c5ac78d614371 +S107/S107R06.edf f164b842f5743e05ae7c67e4a190527b6f53cd7c4d6b8e7e34ee8c4f494c137e +S107/S107R06.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S107/S107R01.edf 08f3946647a37c1a9dff23a2821c23d5fc1421eaed08bd5b1e14291bf9166363 +S107/S107R02.edf ee27e5b4641392d705cb7ebf76cc42355edd23aabff64aa34495e5cbe9436d79 +S107/S107R05.edf 11a0cdbc2881cdc6bb1c8df54f8468eb0ef77dda94df5c1dad3de09fedccca6d +S107/S107R08.edf 0561cb71fa613df859d632d315cd694a686a0d871ee6661d645bc7e9355a6f0b +S107/S107R07.edf.event 6f21f5809578073ed2c858eefa6348294beb3ca2936579fcd1c562953dfd18ba +S041/S041R08.edf.event e1ea33ab1840970a45f9494f5d70cd8cabbc689566eca11d6942253b2395c368 +S041/S041R09.edf 43b6031d29f744456ec1b586735168e6cf08633d1f34d7353528e61059752f97 +S041/S041R06.edf.event df69a0d4526ab4c42f8d35b328874aafaeda087bb95ee7310d4f3654498f5746 +S041/S041R11.edf 29dadfc9c0a7046ad6fca3ff3123a8de9825e6ea46407d0ea0397f0f1243371b +S041/S041R05.edf e3b18470da33e7377931d2f8932bae6b2abdf12bbc5a4715f59b9d331d9ad5af +S041/S041R05.edf.event f8969447e196cd3b85aca233f8197d3642b3a82ffb49458332044d86c5df05d7 +S041/S041R09.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S041/S041R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S041/S041R14.edf dd94f1e4d2b83ccf6b984922886bcbdb9a2923ae8ea0756699114c50526399e8 +S041/S041R12.edf.event f148c5d5feebd73d0187005d90c8f91681f338bbb40cb34a407743227184144d +S041/S041R10.edf.event f148c5d5feebd73d0187005d90c8f91681f338bbb40cb34a407743227184144d +S041/S041R08.edf daba914c30b37217f9ed61f4f79157f9e23372ed083260c47edcbda4359c97f1 +S041/S041R02.edf 3c67750762b3ccdc5d76f81e13bd10b996a7d7f6b7a3de4f16985a21484d08d0 +S041/S041R07.edf.event 67b710bfbdaea5c65257f5bee9fa64cd171dd8b9c8a41d1686b0a14b0d997c51 +S041/S041R01.edf 5fec76892825c885cb231c16424e497fa7b0bc5d255f15117c2f896da85a27bb +S041/S041R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S041/S041R10.edf 4c75369bddd2fc10cbf7777bbb1c7c289547ab6f10bf05ebe4a9bbd37d9fedc4 +S041/S041R04.edf.event 4583d527bb9a02f3b0e943a0fc2ce2201582f8b83cdbc0f712b57fac2679359c +S041/S041R12.edf 99f13ffe9ca9d9a3d680360b91555e73f190d91948c80ab8415b901b42350bc0 +S041/S041R06.edf 171ccfca39f773142019df61be511b23536720e385ad4e38e92c4c9a24a8870e +S041/S041R11.edf.event 9fd975ed76b006ff20105ebf0fd1ae6dd127f008e06a75cad4484483eaad568d +S041/S041R14.edf.event 1e4f0638e7419908f6581021a41fbbb2b368f8e087db11c71f96241ae6ccb506 +S041/S041R13.edf ec6f193a78c29024ae8e67e69b942ee30171cb36b99d82ef96d776f2fb1b7e4a +S041/S041R13.edf.event 5eab54e7aecee45049c9cf3e99655056d386829d66b77ed83e678bfb70ef0bfe +S041/S041R03.edf.event 89a83f0e623d00fceb1de134620c85fbe897d37c7fbb2b8d4c97de688059a2b0 +S041/S041R07.edf 980f0e64820ab06e8c913511d92c799b2ae1abe1e48bacf9b9f29b12fd2c93b6 +S041/S041R03.edf 568218fd5956a8c3c8e7cdac30a1e8ea0bd5371fa7ebb96dc9b17a120ad566a8 +S041/S041R04.edf d814dd4d8d3d1e3853051e48cd3ac5e4761c3ed33a509d3c06ec946b9394c534 +S101/S101R10.edf e90eeb58bf74f072e1a7382befc414f5f4c6a88cfd0c46e67b59bb59c09e404e +S101/S101R09.edf 265ae1a34c6bf36478bf3646aa3bced1df8d073090656f2d3b15993ba88e7532 +S101/S101R03.edf 6bcc5fc3330b946a93fc50f2d4ffae97612a78c6f592a443b46b9cba7a06df6c +S101/S101R06.edf 1a4573d0f58b9431883c95b5f4d7695d352632f7d0d40698f4cc82081561e23f +S101/S101R13.edf 13779bb09ca42f247f6fc8dffe19f02a70afb3f5baa49f902e2706fdb9671c49 +S101/S101R14.edf.event f804d1f19243344c8d8b505a1f229c8626e7f81cf4397b94650b0e6959d27437 +S101/S101R03.edf.event 1d29aa2dd90032d41f7c1c6386db9b2b26b7c29b87234d56e63f65c958acaa3c +S101/S101R01.edf 111b902998ad14ea66860661023c2e3132c8da06961afe0de02f71ddc4ee383c +S101/S101R05.edf.event 6110dd47d25df5fec4b10a72fb28e306cc9addec318d4703b53d4b40b49f1930 +S101/S101R12.edf 89538acbaec07991b51b64bb1dd2b7e3927a4436f02028410de95d06e85f683c +S101/S101R07.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S101/S101R13.edf.event 273c4fa451c5781d94b0c8204068736b90665f96073084c33770108605bd302e +S101/S101R10.edf.event 0513fd04977ef5a66b77e72c59699e4e54ff57c226456d0796b1c58c38fb4d59 +S101/S101R08.edf.event a09f7e11a8c8dae371fa8ecbb2a00172679cc16c6776199906f532a7130b3e33 +S101/S101R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S101/S101R04.edf.event 41090322343b92e918aeb527b6dbeed57fa35198578161fe2d243fbc66dcf876 +S101/S101R14.edf ea43cff7ba5813903ce1695cbdac51c5ffecac98e09c974e3e63645dec45f9d1 +S101/S101R12.edf.event 3e0305869314baf38d1ecf15532e8069bab3bec83bfc99e199b9f5c49899ec56 +S101/S101R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S101/S101R06.edf.event bd6d931c82981d9463af509515eeb55f7b6499310a828316c8f4a3ea35ecc7bf +S101/S101R11.edf.event 007c6ac586d5de80642bcf571fae31808a22c5f6b8fff35fdd8f99f1b334f97b +S101/S101R02.edf 0b999b572a22c300088a4d0f7288a9adc7d09c554a8216b1231124a73468dfee +S101/S101R04.edf 6618bca949e60277dc7a6a23e9fcc5e1d582165550fa31b391a132b2b30b2ace +S101/S101R05.edf f09ad4dba652c6cca39cd9f6d2e030990a564413b30ae57ccfb68b770f111d4a +S101/S101R11.edf 0cac663bde79ffa8674e322b8d7590621568306cbd411618c4370843b3eadb6b +S101/S101R07.edf 4159deabe1ee6e896e8b3604c4260f227c93c43284c99d5c39db63fd3928cac0 +S101/S101R09.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S101/S101R08.edf 6e3be34d8b83eac9aca9438049e9de0a8026d6d861e1a0cde5a2df3b4c225331 +S067/S067R03.edf.event 82c4504914809d88b1ce13e11caa12c3a659e1a2edfab31c2e894283b14daba8 +S067/S067R02.edf 814eb9ee675c39178536aec7223f88869ce3dc0a02592fc422995bf8c9a8f192 +S067/S067R05.edf 3fbb87ec6c2b0e60be7dab51c5f5dc4078a89d116b292740bc49904ea2c3bcdc +S067/S067R13.edf.event 69bc42ecdda8587b1f3dcaeb49f434eb51439670272d896577812c1a8c0b14aa +S067/S067R09.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S067/S067R12.edf 88607f444d64af64f30578bfd1019ff837a0139ad7985235a6401b4edcc1f388 +S067/S067R07.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S067/S067R10.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S067/S067R04.edf dbdadbd4a88eb8fda3fd90835ac8a0257101c3b58dd08fc272892d707a560fbf +S067/S067R08.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S067/S067R11.edf.event 2fa0b9392dbafefb266a1c269022274e4f4638513c85370c47ce925b54b7d5d6 +S067/S067R14.edf 2b5dd5b0a424ebb04c1d89b0f97532cfda4c4af1bc1c4c1c441b9ba05e3f0d15 +S067/S067R01.edf 7a869d87fe690e94b63ae466a2f8a888d8baf3a028c1be35b6d7fc2b80cb5eb9 +S067/S067R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S067/S067R06.edf 88c56ace75081e57c1b66f781831e4c82403aecd472e2883ea2cd696acdde5f7 +S067/S067R08.edf 838faf306afb01abe27febd1a0992f9ee46bf522073aa94bd625eef578f98480 +S067/S067R11.edf 41d47be97d322c350d4869bcc5fe702162f15f5e2ff8640aa87836904208bba2 +S067/S067R07.edf 5a0c99dc1266dfb64219b872d2ce8aa6536e37f3cccadb0a330794e467120477 +S067/S067R09.edf 06fe2f2805cb210ee9120ef76d997788545a71b52d18c53e58fb2550603bff8a +S067/S067R14.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S067/S067R13.edf 267c23b4048f8214cad4b3903515bdc36f4aeec9f61e74c3be3d0f791df12b63 +S067/S067R06.edf.event 0c671e4e4ff7b21e4f75cb8796305c57d6ee3fc48e74337e26c3b9f5d49408ee +S067/S067R05.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S067/S067R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S067/S067R12.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S067/S067R10.edf 6cdfae4ec7f0ff4b034296a7bc15c92cd6eb6db0536bf21dbb83520b604cc9a9 +S067/S067R03.edf b4b57a88b18afa8e2d40a0d05c53f5868879664d1c6363d4ecc0b38c9af8815b +S067/S067R04.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S001/S001R09.edf.event e438579df1a078f089a8a64cd82e2e11cb94abbd724605c4705658e9cc2458fd +S001/S001R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S001/S001R12.edf.event 482ba145b4cf10d7d6dc91bf708cc14230ec2a86592562a3f6c187be5c9f3e9b +S001/S001R08.edf 358fb5189220725141968ae285fbe9e3f36210b834ffba71d940af308e3aca68 +S001/S001R05.edf 8828f4b97931100af136cebf27838248b82f69cf359f7e6c3146a1970a8fb8d9 +S001/S001R03.edf.event ba934fef5794a278b361c657b30c7a254a5c2d10754a37f484373d779d37bf66 +S001/S001R10.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S001/S001R08.edf.event ccd5cf82de626903ece23e339121ff7919b3c3788cf647b575cf08dbb0198ebd +S001/S001R06.edf.event ea56fcb5af6dc22a09e6c52c6c50be06f5bf4722cba178188f67732c6ecd0395 +S001/S001R05.edf.event 53f4af70d71eafab6e0d5fabc2c8e8b73a48b4f21df8be00e2543cc640c12549 +S001/S001R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S001/S001R14.edf.event 7e61f9359aba654d84b2ebabe218ba21c6b3f8c33a13e5b6c9c71df1e91cd112 +S001/S001R13.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S001/S001R01.edf 4743b736131a7e147c150e8b37711029b6cda5e356c4b3e8261a03cdcaaf8b0c +S001/S001R10.edf 20de1c7746c2349d16bda5e9f1b0ac7b7ad1581102a2e30dd2ac422696f62fb1 +S001/S001R11.edf d5296b9232b0ad88b7022155cbcde618df44d4b0db046ce3bec54f8f8644207a +S001/S001R03.edf 3427c8d01bff1380bc9ab9f27a35ece2af5dfadf3e291bbc05eb66e4dadbfe2e +S001/S001R07.edf 6320a941815eb7a0bc632e32c07c88b6e2281a0e2f177e8f49e2d0a16231145c +S001/S001R04.edf.event c81d77b099878d1d392e93aa7a18a46b936b690bad605aa84a652b2bd9cbff1d +S001/S001R11.edf.event 8162d74d19617d3dc613cc4d3505a1e143edecd3fb9c7901e255495cf94de0d1 +S001/S001R04.edf 3d161f88e1c00632585287d2ce584c2bc0f08862438eb255ea8723e00fac693d +S001/S001R06.edf 5369364f2c4e81ca141679d6dd2ba6ece61c7eb53d7fae31241b308876e1b6b3 +S001/S001R13.edf dde646236a13d846ca68ee71440f1fd38d818bc50a2f4804a29bcf0f773ad167 +S001/S001R12.edf 2b281c9b687b4c4176e83251d74743721f2d6ebd76656a972a3b9c44d9d88cd5 +S001/S001R14.edf 2110c48e3106898e3dbca47e39b330637afd3d3b8bc2da3ba1e44f4ac1118137 +S001/S001R02.edf 31a95e0a880e6c3d89960d9d62c144f24cc4e9f5d7e93c7f864ef61cd49e847e +S001/S001R07.edf.event 92b3e8a6b67a6846154b1244f9044558257134f17b25840c7f71206ad195584e +S001/S001R09.edf 1b642457807be572c31e8bb56a936cbdb554507d66e25e242c9bd1b0c557c53c +S016/S016R07.edf.event 8fbb43d322f1567ddece82c464cdf460a9df3070b5684895a7bfa7febb8e9950 +S016/S016R14.edf.event 6e9a969133a5a862400b62cb84f763eda38a0967078b1ebbfea1ca2ce8635b48 +S016/S016R10.edf dd359a54e0811d4a08e82284cc251e1ce193f365079578f18d006cf7f1e3919c +S016/S016R14.edf 80dafefa87cba873be315c21c9bc805c9f3c57b50cb9d0efa9a5b8b71037b1c7 +S016/S016R03.edf.event a40b48032f894d3401affa4d6ccfd4defb4c639cf235451c5fbe2296febae7a1 +S016/S016R01.edf a0396eaa7765e822a9520e78321044f7296888442e6258f3e33c550228d84576 +S016/S016R08.edf c7d59291f3d037e25c672da1bf0c202f63b7913a0f72401cb73f27deddb5ac24 +S016/S016R09.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S016/S016R06.edf 8ff4a1d19384bffce8524194507bdb42946fa06da83bdb75518c9a23f9677876 +S016/S016R06.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S016/S016R10.edf.event 9a16113810c1d4f4c6d4bb0e9fa5ea774628a0b8f3e1764e93d71da831cb206e +S016/S016R13.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S016/S016R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S016/S016R04.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S016/S016R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S016/S016R08.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S016/S016R03.edf 976bcc77f002affd1833ecf5adbe73eca86d1d1b9e2ccaa0d8712095498cea4a +S016/S016R02.edf 129dccd14aab4f3c5761f1e301a9f75c9c0e408333fa4736771834dd0eab5d4a +S016/S016R05.edf 67d864417c5739713aa2defaf1dc9a88add841049adb829f61fd4c344c61f089 +S016/S016R05.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S016/S016R11.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S016/S016R04.edf 501fdb3cc2fb504d2948e7ce665c81e828612cf1af80a0b964b34a13803a4d6b +S016/S016R12.edf.event a7a73578d2f76bccb3e355d9d1c30ebbb1c7c5f4f6abd7b7289b88c138f56b17 +S016/S016R13.edf fd037b68ef03566fa39b6318b4b768cd0255f9e255b137cc3214a1d1175e952f +S016/S016R09.edf c1e5591ed4ea70ebe7fc300b52614affec8df35279aac42bce0ba79f4ccf6d1b +S016/S016R11.edf 067acc3474bca1ec4e4e59bf5c0e4d8402a9067fe4f1231d5e50808b64b8781f +S016/S016R07.edf 4fcbf7f4b5bae4290078934484ba3bfb2052b3cde700c9e2d0c1fb0654368f55 +S016/S016R12.edf f4e5942da16ca0992db7884a42d0b2a16bad648fc4bbd483f2954a93ca48a931 +S051/S051R05.edf 64036f696368f79d9caf39094a99727e919d33e6e1573236b6fa7f8dfaf15b61 +S051/S051R08.edf.event f15f8724ee430d797697caab42986f25c432c24a06e9476b217deb6aed7450e2 +S051/S051R02.edf e4d456647129cdfb07b277b2c9a2c761d2e6fe92ff273bd6a15456aa5118b4c9 +S051/S051R11.edf 783e75586ce3cf5c00121b3b419215cb20dddd9c8394c22fc02a82becd411b1b +S051/S051R09.edf.event cf1d9fc8033ee0c3ff02b28ba22ccb2f38d4189988404e0aa8611090eca3aba6 +S051/S051R04.edf f1560c8e23d36805035cbd36496630af7fe2a16d7676b91df7ec8f2d039b2cd8 +S051/S051R10.edf 2f1d5ca3ba1b7a9b3d5193db62a36586b29905a2ea42633219befab290cf552a +S051/S051R03.edf a7e916278599c36b9a3a58060b44a3dd0b56c12b546a86918fdcd6711f4201e1 +S051/S051R14.edf.event ad5857b78d92955c6aca208cf731246cfd8288233693e3f4a523f0703f49f73f +S051/S051R12.edf a89b20cfa7435430bd00d59a884e1c9afbd5190636c63ad9d65de5917922d986 +S051/S051R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S051/S051R06.edf.event 478b8ccd4857cac2290deccf56d25d39325800222eebfce18860bdaad7d56c20 +S051/S051R07.edf cd6fbae8dee9aefa9fbbb8fa30cc10d532bd7dcfc021e466759a42e08416a71a +S051/S051R08.edf 548006fe57af6638963b64e36efff127e1581c9f27ac932c7d4f318fcd0008ee +S051/S051R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S051/S051R13.edf f68f9dba7c2141b4fba3d861c23ae7b5e46393a3293fb3f8087ba2efc4c1165f +S051/S051R10.edf.event 23eb3f298cf113a6057c8c3099fb82c94065a1da3246d3720329fb470a09b5d7 +S051/S051R05.edf.event 906c14054fb3c295684bcbdada9fce2c7ee5c3189024361cee275c892fdcd324 +S051/S051R09.edf 501c0bb8594da4fcd1bde7bc864fdf8fd91f8c5e86a269c847b8155e2b979697 +S051/S051R14.edf 1a97ab81c5f3e4ab30c8fc11f6509b81975aa726b214a65f7f17829e25b91b10 +S051/S051R03.edf.event 93831ffc4037fe5777b156102f8c0af39014f5cb1afa9207470c2bf4bb5af867 +S051/S051R06.edf 40860d44894450c46d5dc9fe2afaf7bac8415cc5345532b7d1c121cb6bdd4cd2 +S051/S051R13.edf.event 8e39b81d7164017e1d67672dbc17ff18d31922b3f6365e9e1961814c475b2210 +S051/S051R04.edf.event 910fb8c1ca895e78ccb614a06814156c6ae67b42893086f2694aaacc81a199cb +S051/S051R11.edf.event 80df586db524a77e67f11eb275b00d505b0ba9212ee984d6f721958f2b100b4b +S051/S051R01.edf 47fed7512d9d7d87c2f85deae9f5c8e80c9734b44352ade28655e8a27ba0ecca +S051/S051R07.edf.event 1dc2bc1afb05cb56cadc05fbdf2f7ee0aa7f1bfa84e10e335f6618bbb761763e +S051/S051R12.edf.event c0e03fc052196f399444ee8e817cb9226c53c8b1d27137831cbcd0d283821560 +S066/S066R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S066/S066R04.edf c92f6dbe450f79af5e76a61188c012ce7140899ef23a4adc5ea74ffcd1c36fec +S066/S066R07.edf b35cc97a9bdda9f56f4a85a19d13ca7f3abf1b1dbeb93139a1d26b788643d95f +S066/S066R13.edf f455678ce77fb04fb42c6f4dc40e0abcb0b009f2b2bc9dfe59250d08a0d8ca34 +S066/S066R14.edf 788e35ffccd4ad43f92c6a42465aba7b781109aec6e6a6fbc4851b96e68e929f +S066/S066R09.edf.event 257122cacc7c40238756b11f4e144c0736d7ac0f933a02a368163a2db8e83122 +S066/S066R09.edf 831a2876dfc28239666c53d7aa18f1add6beaa33032fd96f5854a37bd8d30ea7 +S066/S066R12.edf.event ea1eeabac130c6b0553b7253953303c231f74a93e902a501d7882ce6c58f2f2d +S066/S066R01.edf 6511edb550c0b24d34f0fdd8ce50bd61202130524099c1e087aa92e258bdacb9 +S066/S066R03.edf.event a4955229baef6ae4fbf6af78608901c020d3c47fee4b7280c8c617c752a1865e +S066/S066R02.edf 6ac66d3775bf65c163f944648a5a608248071ae401b45acfa7adca1fe98bd23e +S066/S066R08.edf 60bf15a0629829bc2e0c615639c0f288e9b1e963a086293ee95da8a4d70f3b24 +S066/S066R10.edf.event aa88e9dc85f46564702f7964b37b2058d00e5e0b93d498e32bee49aaf8e7c745 +S066/S066R04.edf.event 8a76f461c0b920a909de21383cb5135f496cf1aa992529755f784ebf12db55c1 +S066/S066R13.edf.event 834d050efe55560305b0e19f49629260d26f65d715e477894ee2b099b0f5b1fd +S066/S066R06.edf 545722e46baeb21596eacd15e46e6236a925daf0283c23804950973900a4c22a +S066/S066R14.edf.event 8c03a670d4980d94f17b5f418cb3ae868421643e376bcfc41d654e67cab2a9f6 +S066/S066R11.edf b06a5d3fc3538be7edacf428a6c2080c011c3241996714172a456528daa2b9eb +S066/S066R07.edf.event 2e361d8b420d6db7c1d4372f2f615a9f9290ab6956917406863d12a1fdec8f5f +S066/S066R05.edf de03c1802e5f55b5de86e87dea506fe7c45c643fdfbcd6f5ffd95e7af8c8b463 +S066/S066R03.edf a760e737deaa26ac36f6c330bdcae1ed775eb6d2fc702a729eab4c0d0c7d1b93 +S066/S066R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S066/S066R10.edf 9705bb7c458b9220de25f6ecea44c046c25558c62bc975e7a3acf2b2354bf54c +S066/S066R06.edf.event 3cc4e538745fefccad07dbe59abc1ac855e1b6121abf89d9f5027ac9337e7c8f +S066/S066R08.edf.event b6e7436289258e26595ceb80330f103bbdd26d9f045dd5fe5e5bb46498a69180 +S066/S066R11.edf.event c8694215a3c53fb8dbf9397e9d5cb12ad4bf06f22ed39533939d10b8ebcacab2 +S066/S066R12.edf a3e8b2021df910995ad9792dfc2ec025ce1b858cba8946cab2bcab9899b51e2a +S066/S066R05.edf.event b1551573cd91101be666afa2abcfdb421f144c3fa966b0a62b6251bba942a18e +S044/S044R07.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S044/S044R09.edf.event a8ce17ff32c2c3aa820817e047ec3ff9976bd33355d14877f0ff811df5a288a1 +S044/S044R10.edf 0d7bf36388fd847667152db2d79177868cd25981779cd66edbccdbb5646d7c58 +S044/S044R06.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S044/S044R08.edf 5f85b54550e4f648712fbd56a6edda9df6d93f0169b057dcd79ffc0a20224988 +S044/S044R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S044/S044R05.edf 85675c31570d00c43ffaa607609f7e699add2f044f0051aac9ee0b1dd88f256c +S044/S044R09.edf 9dd68765db03a4e42839ff923d246b88e53ba314489c553c6c9c0740bed073b2 +S044/S044R10.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S044/S044R01.edf ca0199eeca73ce7b30843e2a06baacca5cfcdc4750cd01a1e237af6feec5dccf +S044/S044R13.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S044/S044R13.edf 64219a96357fe674f8926e8042f9fd2f8210dc8e653f038a1f26671cc2c8bd0b +S044/S044R14.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S044/S044R08.edf.event 48ad6251760a77acc67b82ce0b201f4244cd1385a6181c8236487af76fe1636b +S044/S044R05.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S044/S044R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S044/S044R04.edf 5da0315dd8b4e33769cbaf69b91e88845cd460f406fc0c72b793bc1f6b2229d2 +S044/S044R02.edf 52dd4f7c19b57e0f61aef7db45993dd60678657045b7541e390defb5602724f4 +S044/S044R04.edf.event 51f07832e9b1d3d8c667f73dde4aa38f9d3e45cf2a4c2baf8e47ea328c860420 +S044/S044R12.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S044/S044R03.edf df43d9106188f5a38d0f518a5809dc454d5c6d66dcc6b3bfa3edc42041edced8 +S044/S044R12.edf 84d09ee270206ab1f93595cb00d28b51af2cee24f4dc44c23a38464db833d463 +S044/S044R06.edf d16e19dd5d5ecb9b871c11ca624ad8471c3088a3500a65314ec04f3056551e44 +S044/S044R11.edf.event 39b46b55fa02f8503f1d8726fb4de22eb131c815725552e89309f9db71c825ea +S044/S044R14.edf 717ec3207574cd4152200d6f57d531bcb9c699b4f321d34754d055407bc0a542 +S044/S044R07.edf 5d0b03dbf8b1973dcd0a77ee284ecc4cf6fa13d7113ec6b3d0f7c1cf6090ca03 +S044/S044R11.edf 9c104974be9015b57253abae61eb0c3e391314cfae8fbc94bae03c1ce0cac4b6 +S044/S044R03.edf.event efa5d828429e8df393cf3e5b452a00dc9f05f3ad9eeeeb119ad18eafd1cc4e2a +S092/S092R14.edf 99b147c5ea0bca55544ed7163d3747174a5a06a6707d057100d124062f1af173 +S092/S092R03.edf 7131a3198f80a3a770ee2396e8792f644fe9ee520edb632edcec921fc9a57c69 +S092/S092R06.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R04.edf 73ca4043e4c8d38722fc6482a0e231a8505ef6b760cb26bbbb8206f481e683c1 +S092/S092R07.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R11.edf d6952ffae0a847e93296a6a5b66c9ef66d5e685b08fceb730e84fb36793e806a +S092/S092R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S092/S092R02.edf aabf2489637c7c71e664cb697665bfdb134d5d4776b1ed95cce47bb7c02efada +S092/S092R09.edf 9ad876cd642aaa1e8607da4e838331b5254526c21940423742f224bcbcd820cf +S092/S092R06.edf 8d8c4a918a1e4081bde09019c0449531dec960908dadca7ad25ce9beb87d900a +S092/S092R10.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S092/S092R07.edf 0067ca60ff80f5f27bd234bdc23414e0b99b0adf5cc8a989aa2c34afe6de2204 +S092/S092R12.edf.event ccb7718e9ad0d8edd7de8d12553d98a89bcb191c436a6067b3e688a579d4abc9 +S092/S092R08.edf 38c8c9e7261c6882b4fc0db19c4ccf905e5249611c6fe9d3c34ed7da098c6481 +S092/S092R09.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R05.edf 777b5e451f9dda96881d311ef5f6c9b913a738aaee0c1ac613347dcc6936e3b4 +S092/S092R14.edf.event 0930979151ea3e22809e2b68d6fad0bb672dea8e58caeb269c20f5bbed49f1a3 +S092/S092R03.edf.event 87602b626ec0e48829f47bf9d0c1e958c80a92ea1d57745168a2630ed1101f98 +S092/S092R01.edf ca68794592fe3ce3f0217fda4729b2ff16cc83a3bb447302236fb0d83052edfb +S092/S092R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S092/S092R12.edf 5e84bc4b49bd530383b8c28c9f0296d7847012e49b276e916278cea2d9b4ff9d +S092/S092R04.edf.event 82e7ace9f055649957ab04324ffce9101db68bcfcceac1df6786e304f9de8669 +S092/S092R10.edf fb3c55539a8144ffcb8318461c5f8c8967ae27e84a56e8aebcfd6fc0a21df933 +S092/S092R08.edf.event a27a31471bf1b8bc6ff9b144eb110e9eb0711b24514aaf47ed1cd94395c10b7e +S092/S092R11.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R05.edf.event 682de6811acbabf1882380fe24d80b87fda2ac6a48fbdd6eab47970ea1190440 +S092/S092R13.edf be01e0d6d7fc971cb579f030389178b37159e6bacf90dfb41422d48bb5ad02c1 +S092/S092R13.edf.event e88110dc871719db682717f3a256188ebe916d76e5a1a09b0cea908778ae4424 +S035/S035R08.edf ef619d2dc6be970660c2a7ecbff9d084633c3fb023627604283b9606ea7fb668 +S035/S035R12.edf.event 96801f1d89a25da6122f3b69f7160e12f563f1b8380dce64f2d4b18d00121dc5 +S035/S035R12.edf 2358c7e90652d3c687fbcda65ae4ad4a0faca39dbd736c06ea1d8e20173671dd +S035/S035R14.edf 4192067f79a42c47fc73338bca078246f2325f88f5627893efb1a1b5550bcffb +S035/S035R06.edf.event a07306d470013adf2a41bea413a8bac37a03938f3358cb519e480a3b753330d9 +S035/S035R09.edf.event 4b408c3796f7bf8dcd27259a0ae6508128a2f1069560a5cff7d3eba4508a6768 +S035/S035R14.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S035/S035R07.edf d27bdac7d879d570fd9ba141bdb8cc1e9b94a1628b4a0f430e5e1964dbd90941 +S035/S035R03.edf 030e8222c98a22dac34b41a08b60208dd1b314ff3b805d0a7e102b455cd279fa +S035/S035R13.edf.event 14a453fc6552b2a6d7b55329ca3533853a89a83368101bee6b522e4a67d37283 +S035/S035R05.edf 09e053c055aaa3fd01b024aea74f20957498f72215850510184cb3484cfec3fc +S035/S035R10.edf dd4df070d8b01f68173410835b3d932f4f9a8779191c0518c0933c38bbadcc94 +S035/S035R04.edf a8f13980108a70cc3d410ab231588bf76928e51ef9234650be443ae9cb4bfab3 +S035/S035R06.edf 5324d829c96aca9fe7a0662090dff6b541693e00b782fb3bd07136896a3ddb9f +S035/S035R08.edf.event 944e95e6448926cfca766f748af6a71320282fbbea4cc8f227c8d8f4401f5bc4 +S035/S035R03.edf.event 969cbf2f95e5e05eee52395b936c0acb835c1d39af20b3327802965209d3513a +S035/S035R05.edf.event dbd0435f98476653a27f53b54a6757c6e4596f6d9318a14067e4dd50bb37a888 +S035/S035R04.edf.event dea4ebd6eb695d78f9821d3bfa3d4a325dfc5d7ff6f514c6a3f205dfee689e4a +S035/S035R11.edf 1959e6d5a8130d0f13be346a63880029c949526ff68285713d790107389f00c2 +S035/S035R02.edf 877f3cbaee32e10af7a724be0d709a3a9dc4121a4068b7e4ae418300830990c9 +S035/S035R13.edf cbb0c1b7d31129ac29c52e20ed1b01eb9322e8c72591da0bffc3ce51135cd9e9 +S035/S035R09.edf 5f930fdb0975acb9c936448d93c9e454cf3ee73ef3dae7eecc54d1a92662058c +S035/S035R07.edf.event 273c4fa451c5781d94b0c8204068736b90665f96073084c33770108605bd302e +S035/S035R01.edf c3f65f266df7240a795efc209424b75cd86a30823f63795b51a62deb095f4700 +S035/S035R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S035/S035R11.edf.event e9223514a9ad07601113bf25a3c8e05f3728fcbb5a15e7cce3b90c71df93b940 +S035/S035R10.edf.event 21ef6252cae53bae58d9207d35feda75f1e4a419273f78801e618eccfbbfee77 +S035/S035R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R12.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S046/S046R14.edf 1583e16bc35be30f73901d64a976852fdf89fbf11cefb7b71473875497a9110e +S046/S046R11.edf.event 271170f0da25b9bc7523c8eccfc0cb14ca8d86085b0706c07a42357eb742bdb5 +S046/S046R14.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S046/S046R11.edf fd4300ba279b330343ee16ed3e53e09b84123f9df78ddb15726c5dc77d747d42 +S046/S046R01.edf 49b18f6779e3fdb549c80551379d76ab5fa7f9faa429e171bf4cd38f121ae5d7 +S046/S046R06.edf.event 2a8cf80126be415d578e94fa0061ffee1677829d53c6df1ba1aaede8c845f31e +S046/S046R10.edf.event 7eb3d99d1a46e50d5ec14297be174789c9a514ecb48636ff9ba19d90bf2ed9c3 +S046/S046R08.edf 30e3df4eb6380358330a30b9d9906286a110c46a5eae2a991e83ced4a2abe3b4 +S046/S046R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R02.edf 73f41c42ba32cc7acb2d2312bd33308378310e3a4be74490589b95e91fb64d45 +S046/S046R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S046/S046R12.edf 3238e8feb6b1faeb6130cc813b0c147eff986e9332cbffe7b0dd9c5a87975c1b +S046/S046R04.edf ed0b219b3b7f3a340b504a801044b8a6c9c33095a59a3dfab535c6a06abd0670 +S046/S046R07.edf 6e18290e6aa7e774e61d3d5f1903b3c59680b56a9afa3807d381d267fc8f8d9a +S046/S046R08.edf.event c8e92376627dec774f2017745ed32f94ddef2c19ff70ce24fa9679133dc0e7d7 +S046/S046R07.edf.event 13687e378e5618583bff478cd8ad8d7c8c39f230597cfb455142d8199d52bd8a +S046/S046R05.edf c7887aeddeedc3aa7a7439bfb778fd8f8028eca8f53375cc99c3b5aefe2f9bf3 +S046/S046R09.edf.event 58db360bc4c16d775b7e2c4797c2215d9f1405c25061a2237ec95b2ac264f964 +S046/S046R03.edf.event e3378f798c2ae2109571b901374ec3f6e67c17b45e2f63b458ca5b1db30f1ea0 +S046/S046R09.edf 49c058fd06ce3f5fb70d637aed9953ca168d6cb2ca0ac698a8061101ba2c3054 +S046/S046R06.edf a7e2bb4b01b80d2a367d92755a106f908a09716da557f527876d6feef610482b +S046/S046R13.edf.event a6cacf979bad39813ad2028620db2781eea6e16e029b8bf69686928d2958fede +S046/S046R03.edf 23321c49c441693d32b7b85b5e77840dc94a90f2cd66c7df24d5d3e6fd01345b +S046/S046R13.edf d17bee540747f86c9a72c8e8ed054d2a85c419cca8c9daa77e6cbe0e312d8c3a +S046/S046R04.edf.event 3048bcb4e0fbb0a4db3d630b323c27c5e88b2f2b6aedecde86b33b1d4f135461 +S046/S046R05.edf.event f642bffc792f9e20fa7a76eaa7a0776154e7b8e8be7138c7683483a28a2ccc74 +S046/S046R10.edf 884de1d68843591d3e82f99a1b9c88e1bd9293ce0dede387c14a66446c6f74bb +S071/S071R11.edf.event c65a4f3a39476047a743af02cee3029310ece7de2dce5aba866f5d620c729555 +S071/S071R12.edf 54ab787be535020ba05284d323656d201776261b78e2d9f903a8b4b573dc6be2 +S071/S071R09.edf b5f606ac18c89c1a9ca45e2541d18dcb0fb0cb337bba89cb91dc36c45c5f45a4 +S071/S071R10.edf 363b6fcf3c4c82b8a7c7e4af8a4a68e4861004c3838f15c7e80b34cf2649ea10 +S071/S071R08.edf 0fe7edfcff87102410fde01bbb9a4770b20ac6359f3bd77dbbc3551cd2ce35cc +S071/S071R04.edf.event 4c61f07321b475d0f07c3a93b262cc94a5bcc7e8c59d05610786071ce45cc544 +S071/S071R08.edf.event f73e764c2a5687f81917e5c4eaa8b964ecb99e4c58ff33aa7e02e5fe1f655a98 +S071/S071R12.edf.event 954333c8a5a38083d1d58167e4f7c3abbbf8949bb46067df41825e1156c3b517 +S071/S071R09.edf.event 14bdd1b94d8b8ccdce55e581601a4a304c1444e030aee15596722a70be24c5c4 +S071/S071R05.edf 751711bc497af9317c44ba30c77fdfa1333a743541b8197f2d7a341536cbe941 +S071/S071R06.edf e3ccdfd753bde27a09107acfd381fe97d3efb253826d211373559051bae81199 +S071/S071R14.edf.event e9223514a9ad07601113bf25a3c8e05f3728fcbb5a15e7cce3b90c71df93b940 +S071/S071R05.edf.event 061a2814f7c0e21b94dd0e714f51ae50ebe28a4f57d1c4761b554a77f13694a3 +S071/S071R14.edf 6e682c11303810dbddbc47aea87f7e5f9c2d800e1cc28147daf7215488251c5f +S071/S071R03.edf.event 178339a2095d7eaf759407927ba3657ec8e96b153a8e085f8a593674f3697b42 +S071/S071R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S071/S071R02.edf d2ba66c8ec637b4fc6304b0562cbce0874ed0faea3464a72221840390717d7d6 +S071/S071R04.edf 746cf8cb98ec73c251059675f4c5b956c9e8db11bd0de7b7785950e53ebb4647 +S071/S071R03.edf 2a196695568ea8b1f977ddcc95e4e8211d55c087e04d539ec64f65537d438e58 +S071/S071R13.edf.event a3a021aa3014366fda2210569cdbdc257724ba6d0d11b1e6c924103e837c7294 +S071/S071R10.edf.event 837f0145b9da4dcc73e14962769b9c68f3eebad462eebe9d8796bc8b099af925 +S071/S071R01.edf 55ee9a28e1496f5b730ccbeaf859063b2dcc9f11039905e6fbe04fd7fbeb38d2 +S071/S071R06.edf.event 582fbf3e4f5bbd51cc3d858954988e7800ce943626d6f081f659cdd9b863fc0a +S071/S071R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S071/S071R13.edf e47c2ff64522b900eb7c950722b3cf31740747543a512b65b11a233282c8938b +S071/S071R07.edf.event e697ba3ef839244357e1c111d7dfa9afb60310a0bd8b7935ea5a426a1711194c +S071/S071R11.edf 6c798e4af0e540fcf6f026ed4f10498ebc2da3075b3d30a564822228d67af808 +S071/S071R07.edf 0e734c92705376f7e26989101d0d6bc4adff69d8eca4a8f98a17b50700cfef27 +S086/S086R08.edf.event d192ea57ff85ecd8427faf400415c2002cb41aa1189199e0c6ed62a7ecd048c1 +S086/S086R09.edf 907868e30923356a1d675361fecd1a60cd4db3e2fa52d308a5da2a10f2079331 +S086/S086R11.edf 54c35f32beb832e3549fd73264eeb3092601b2ba29b2b11c9c7be42a70795e31 +S086/S086R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S086/S086R08.edf 221dca79488392aa00b3462fcfa6a21b185e5814cf6534ca35b24780aa1da799 +S086/S086R05.edf 46d4fc4f3986d58f220d2c5db58475cdaf6c2960eab7c8e811d2206bf576130f +S086/S086R14.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S086/S086R04.edf 606f9e327b219cbd980f4cb5d83497052a07d927415d1769b039bf0ed7eda693 +S086/S086R10.edf.event 3b5e25a187d5d82fb5b851ecbebf095c41a5496c5a40492c7a2c638245bbed51 +S086/S086R11.edf.event 9b428c26e9e449e90bf57c3b9f5322d9c5bc5f4a65f709a1ccc3c22566292dbe +S086/S086R04.edf.event 272aa3698bab2b05544eceafbf5b26dafc58d0eb50a508a47c26572576853ebd +S086/S086R06.edf.event f4b63d6f50dd4c8695c739b04c7adbcdae610db216040f9c2b732bd361dc9121 +S086/S086R09.edf.event 7771fdd0442286733558fade28a9d92e116695d1dfda6da3cf3487c4467925a6 +S086/S086R03.edf b9d675c4eea24f641af5ca3bc5eb08edd0b8441100c6729a4b3652bf0997447c +S086/S086R12.edf.event 9b428c26e9e449e90bf57c3b9f5322d9c5bc5f4a65f709a1ccc3c22566292dbe +S086/S086R10.edf 2ebdf1e349101b21c3e9e9c5a24f8b6546bc7c30691c6f7cb599087554a5fc69 +S086/S086R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S086/S086R13.edf.event 54016a6260b12c8b99943951ccdc7d5425efbf9ab503ef1abeb93deafb262790 +S086/S086R07.edf 012d5e9c8d6e50fed9e25a748a638168b18fd18153a2ef69a4e1b8cf5836250b +S086/S086R02.edf 4a9237d42c10627cb84d023b5b7056000f3575ee82c10918e4cee3960f27e0e8 +S086/S086R03.edf.event 600983ef19711fe2016d742d1857fcdfa4f0ddc7c5b8c88773db0019f92315d1 +S086/S086R06.edf 370494eb367603a1bb5e16ae5a41c0c85f359c2c0d727bed2a2658ed3dd2b612 +S086/S086R14.edf 712b7758f35dbec80272ac5671e567eb1a61a898a8a05c77a47286955c8e6074 +S086/S086R01.edf 32f1b040a3f8bbc6f3393a48b81ed911095fb0cd78859a129e918f675b3c4167 +S086/S086R12.edf 966b7d92a7e7d3981d56dbf0e159fbfb3fbaf2d955e3573e384918b24431284d +S086/S086R13.edf 6fa20a1e6197c689ec1e4092bb26e75de63e5b2f755b434652eeb11a8b49bb11 +S086/S086R05.edf.event c2ef16690608bdb9ca84fdc930f584191923370f4568e2e42e5808557fef6d8e +S086/S086R07.edf.event fa68bd707dddb5ac8734d2427c1812d242ef23f1aa95b9b002fb67ce4bf47ea9 +S012/S012R08.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S012/S012R10.edf f70b4292ae24574f7f4835ae89e7d5fc6350c26d0d8da86b347ea0bf4956a17a +S012/S012R14.edf e1b65406fcdf9107d1cb5ec813b7dac9a074a212172d1f79fc403304125900ff +S012/S012R13.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S012/S012R12.edf 2b76c97a6cbc894a85a4f54385997b6aa07d3e9ec040ec1adf61a310b26f5caa +S012/S012R04.edf.event 1ccc28680a9b24af49105d307967ceb434f5229a29e70fbfde089b2d092db7c3 +S012/S012R05.edf.event bed28cb624951271916f88bc556ff204ccc63699ebc4523ed8043baa9724625a +S012/S012R09.edf 4255d6b2a406f8a8e9cded98032950cd00d97d8d40a558ea1eb34993b3a9c7c0 +S012/S012R02.edf b4f7e8b1b083835899b7b6cbb58c582fd8290311135b3e0148c5b6ffee4f6b1a +S012/S012R03.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S012/S012R11.edf fd7bda82802bb91b4e04848ce1a8a9a3e5bee822a811d0bedcf812ea4acd273e +S012/S012R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S012/S012R10.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S012/S012R07.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S012/S012R13.edf 892c9975b683852675a14d64c24730756c189133c01aa974eeeb41a7291e3d09 +S012/S012R08.edf 5d7d13ad211f615a21db64c3a4dbc8ff4ef10dc3f777b8e6441d8942e8d40336 +S012/S012R06.edf fe617707aa63e902238e4de12cd8ec22c55822d05d09f60bccab9d08ac53055a +S012/S012R09.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S012/S012R04.edf 974d6c7558c8d8ee48449bf9a1f40cda596febb6365e69e8e65c2d952644017e +S012/S012R14.edf.event 37b92549c1fe5d740cc394805738f2d228f8fb948bb4c3aa4817c4dd4b04b9e5 +S012/S012R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S012/S012R11.edf.event 04192a55fa7728d34533e76a1b831fe5c94d79c3bc9f1028f45644c51892a744 +S012/S012R12.edf.event 151aa0e52269f6759e2bed18339cad06a9761f4b713071e665a50681af66afc2 +S012/S012R07.edf 47e446122ad05c46d8c0e605c84641ab8a93ba2b8647f0eeceb86ee26ee9d9ab +S012/S012R05.edf e216fb3bfd30a0b9e8c94e1a99e2be1eacdca620872e8d152c129f30517bfd25 +S012/S012R01.edf ad02ce8943d45f07bcf6f69714a61005e570f3fd26f0e8e1bac38c7ad80fcc73 +S012/S012R03.edf d2af7bd93c37e741a77f8f0f611abe3b15baf1a3db65eced1bbd1011f3b193aa +S012/S012R06.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S080/S080R03.edf 021c4dbe3656eeb9e95ee4ad422fd0d0acaca6ef06ab3607e373ac1bde810a6f +S080/S080R11.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S080/S080R04.edf.event 49dac749bf850e47e5d13a8edef5121c75a7baa7f50917a5c17c1dda52d3013e +S080/S080R14.edf.event 9a154a517e2cf402786cffa7d164d0656aa7a8bb30af51266fc6403fcd9d3d00 +S080/S080R11.edf 3f143f67b084e6b46bd62ceea2fb3de4b68445b319355239694dd037ae85264f +S080/S080R13.edf 85a42c5f160296eb8c67c85ccc341c70bdee43def24421b69f88aaa8200a317f +S080/S080R04.edf 39a8fcd0b3e2e7f7e33c54e867036d5bf43350e6525278d42c39cf6e3f642796 +S080/S080R08.edf b0f8d997cf95924f44e7a6fe44e525d22f66c4760587cd7589d4430d210c2d0b +S080/S080R07.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S080/S080R14.edf 3fe341ba3fc26f7646e874235bd651eee3b490546c07a4d06421694d357c6a91 +S080/S080R13.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S080/S080R12.edf.event 88e1ea56ed0459a99b011a3ea2f25f947016dd359b2d4fd683f838ac00d993d5 +S080/S080R02.edf d4bca47e46e37452771dd39b66a98cffaff0bf9235a8c03efbe3c8edac3d3978 +S080/S080R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S080/S080R09.edf 1b3751f1ae69e24ceaaa4eba361b97df0cefef1d2896c36817502585196d37b6 +S080/S080R07.edf 42d1be730e0f57bd2f6974856ada684ded850b1d11e7a58754209892f2e1dc5f +S080/S080R01.edf e02ada53d55ff4abf99928b1a5da1773023170f345d2c223a3a73b0362683d62 +S080/S080R03.edf.event 39c9864c57efec906759ab97dba0ab26a900fa25ad8fc3c48b0d97ea83c3a893 +S080/S080R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S080/S080R12.edf a0b99813248b36ff650076e0fe38f205e84a6f26e18bce0a43c5510a4cd933f7 +S080/S080R05.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S080/S080R09.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S080/S080R08.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S080/S080R10.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S080/S080R06.edf aa562fdab20e7bb423562f3e5d61c0b821e3912bb29353fbd1032df7d78ac34e +S080/S080R05.edf d97fb389b6a1c7e6d1a6c5e6f41e91d791f963bc798e793b0c494dc452bb809b +S080/S080R10.edf 8171d40610e3f67b030e5e8f99b39e2751e559398acce78a339962bb43fc1801 +S080/S080R06.edf.event e9aa79af3e48ec970083b6f911002eac68ffb799057d4805cd5fde8f16d76b97 +S054/S054R03.edf 342edbf15c5e37d6b788767bff954a2c0c8b4fcde334d733ba67cbe2dfdf3077 +S054/S054R11.edf 0801cbd50fb6bf1a9afee61b5b424a1f0025025f6fd3d8e580c2b5a76bb641f1 +S054/S054R04.edf c2caf79d559efa0dbade6c67290c2840a75d9b9266b242286aab64ea048facfc +S054/S054R05.edf.event 9e1cfe676de301ff52ef7ef70ad2cdd8b8c9562c01e98d8eae191a5c8c28aa46 +S054/S054R14.edf.event 8121b5c61470f12aea6d6cad9727a5b33c741c9f15d24ee389ddc52970df76af +S054/S054R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S054/S054R08.edf 8fccdcfa00d100148b350b772557a39b8252ceec048da5e0da93e1273cdc8c88 +S054/S054R10.edf 17828d840d9f134ad5129aa66f8842e267a5c5945126dde28fe4c621d0b61f87 +S054/S054R11.edf.event b7fc6043070236adccd2c6d2a291a12804c8a08c7d7b2194d31b1f6996080655 +S054/S054R06.edf b2ec60edb8b8c1a6cea288d328a13b5ebedc3a6d3e9df4ca97f9b2d9de0f5fab +S054/S054R14.edf 6c3ea5618b7e8ac1289f042ff45f55cf017f2e76e0512e7fbda0d2c33c3c5e3f +S054/S054R04.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S054/S054R12.edf 2e1807664cfff1512a8a9ef5ff105e3a19a6c7ebf30dcc6a321244a46f943cf8 +S054/S054R06.edf.event d0280a6531ef96d2b622c2d562c05bf53a0d00439a4819213365b6e52e54abd1 +S054/S054R12.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S054/S054R13.edf b7f29036b47a2daa493f819cb3419a348d8281d39e116a06e89342da6c54e4c3 +S054/S054R10.edf.event 48eea52bb9946ed46ef7e1b621c8558d41739ffc9aa7ed8fc25eaebd171ac223 +S054/S054R09.edf ef124916d0134c53ed3dff058aa6ffbfa1275932751b98b977c55c4fc174a2b3 +S054/S054R09.edf.event 8cd8c690df55a4167dd5e136be1c424a77655375599a9d3350d5460cabab539d +S054/S054R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S054/S054R03.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S054/S054R02.edf 55442382a62972894ca7ffcb353b8aa4048043d805da35fff567ad37b47a85e8 +S054/S054R08.edf.event 7c83be0506163b52a89548c7af6d4893472403c2bab3a455c7d8c92bf3550ad8 +S054/S054R13.edf.event 26fabc186c9b04bd70469a5964b2648cb7a2115fb0a397d51de147fc640d8d83 +S054/S054R07.edf 38ab1a1b03ec75552208467a02ad50c8b29be99e587b31b1f0b21281683861c1 +S054/S054R07.edf.event 68922fc617d2a7f851f538c226fc5491b6f57526417f33c57ada180e97746c59 +S054/S054R05.edf 499cfc020d673829f77de992d6014a5946968fe26a39adbd9efcae257ac580c1 +S054/S054R01.edf 329625cd03103c4162f6de3c409f3664c68481979263f56e50b8dae272e27f7f +S033/S033R13.edf.event 815500806b68247c0c383804bb4774dc90e7f96d61e9e2b246fc5d33fcb5aafa +S033/S033R03.edf 7b062b51f8af082de4aea5b514db3ec97db2dad908166f4781566218458443c4 +S033/S033R08.edf 0ee2a5a00463bbc650852de790943bc5af90547bc325bbc65be19e9b16eaf0a1 +S033/S033R04.edf.event 25af506ba70b68343c97ccca1ad0f235958a8dc9bfdd84c6f7155937e7bc7efa +S033/S033R14.edf eb372e2cb331dbc63087439c5dd18da0610f4309e68c2297949c897c6a0d6a98 +S033/S033R03.edf.event c843292c2d927d69501ccd581dd6688cd971a162df8d02a7eb20e6a3c1aa2d83 +S033/S033R10.edf 3e4a4eea41ce75b89fd2027c06115b30e6ff36ba14906dc6915441faecd4869f +S033/S033R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S033/S033R13.edf 64ef4744b1b276a462b60627dfef69b157b3fe908634eadc17ca157b083a987a +S033/S033R11.edf.event 57636037048fc90c882a690b692342a1f4fd070d9f2a219d527ef6c0f7ea7dc1 +S033/S033R04.edf d4520b8cafbbf03ce8d2159bb727f0f6032c1e9533bbc275f1fa4ddcb9cf135d +S033/S033R06.edf 772144edd7d06ab31cc649a6c460e43125e9eb13801064276529899f4b51cf01 +S033/S033R07.edf 6aeeed7165812bdf3bc7380539f53f0b0712a74392a8f37f38c4747c96260db1 +S033/S033R12.edf a2968d5f651e72700096a652c3dd2fd34f51e9c381bef25c329dad3a3e2b319f +S033/S033R09.edf e0f43954e8632d987be2a52f116febffc93f9d1dc62311d4f3831249427cec9c +S033/S033R10.edf.event 96cef3a9a9e2cd7f438d53008e256b198c48c6e0d2f0b2d70e0d296180cd58b0 +S033/S033R06.edf.event 0aaac0e6493abaa7abc2bbcabb2ba2ab9cf91a665d7b07e95e181cb6e7a7d207 +S033/S033R05.edf.event 83f18e3a845e17346dc10fd176b61c6f306078227a59bb51610834234d1454aa +S033/S033R08.edf.event 440457b385c6d53d2340acbe5512027de7800fb1514858b055b126c10e58b404 +S033/S033R01.edf 5801c97826b681eddd76641707c479385742b0fa35e5fae0935a81fd139fc646 +S033/S033R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S033/S033R12.edf.event 57e9107b34629563ac9d22f509b6f40e40ebedb8afaca03c2199613287fc06ad +S033/S033R07.edf.event 8d9c5bb3c83f5f447b4f8159b1454d55332838299c0e4a8e2dc62f413b08cea6 +S033/S033R11.edf e70abe84015480fac548205192edf6e472ba392086504118425bf80777b80984 +S033/S033R05.edf 962ba5f5aa3fbd2440ba34ef22a9d3852ae01f9f7cd93475cdb13e00782ebbd8 +S033/S033R02.edf 9a4872573c4ba67267f49d8c2d286776792fdbfe76dbd10c696950cdb34f6e90 +S033/S033R09.edf.event cfb6d9316552151b2cc7a4fe7b3c98b0ffddca0cea554883679586887149feba +S033/S033R14.edf.event 9b116dac2d8fff59f5bf4cc8d45636716575c73d6e6b2287709cbfdf0ce5f55e +S062/S062R07.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S062/S062R12.edf f95f21cde281182535aac5a41be11bcd12c1939a485535bf551e6da3f3ffe6f3 +S062/S062R03.edf.event 350042ca5c37cad9cabe8c469aead2bd074d996b13c69cafba8d24cd8fd825fb +S062/S062R14.edf.event f500286a76884018ad149ef34cc8871332593723b30ad3d79f8f39236a5cc25f +S062/S062R09.edf a614ede1529dc1cc60dd7ac3af1a4ae2214050ddc6c378eb5e2f8a59edb8ae68 +S062/S062R06.edf bb609b4819f7e10634350762ccb52d9b04df6746baa95b0c73b9d4f36c857a15 +S062/S062R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S062/S062R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S062/S062R05.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S062/S062R11.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 +S062/S062R06.edf.event a18bb793691db61833ea9b3b10da118a6687892d063d72ae795055c9209ba8ef +S062/S062R12.edf.event 8ad7739d7d370e087ebe5db6ceadbc03ee38d96703e78cb5c91f2b714f07a25e +S062/S062R01.edf 3a2d4ed688479495bf9312be25dcd3ffbf393ba786f185700b611e9b95dbd4ef +S062/S062R03.edf 94b594c87e91c1e1a932bbda2022760641d3cab621132f8cbcdada1e919f5cec +S062/S062R14.edf 40d5eeb86b99c22b7fefc5a08d638d9be9bb5b8240b661451a71574b2c6d21ef +S062/S062R07.edf ba192adc3503f736ef9df7d07796f5b15b0413c3a5e05d5e7d3523970474ee11 +S062/S062R02.edf 5ce5b78bebf8a5e7cb5ed88064f0ef25f3c7cea58ffbf791453d5aa2fbdad831 +S062/S062R13.edf feb14f0a8de34b68017e66be8b19f702d3872e7102f63f6dd8fb17b1e4de4a6e +S062/S062R05.edf 2257a462ca24563b2f157d4f6618f5313933ddf7fc66986ffeac5df2de258794 +S062/S062R08.edf.event 8fb228960cd397f6100e75cca49e1045159bf62e6184dd5267ea608be1d540b6 +S062/S062R13.edf.event e7ef0a3d66a5c814c6e917ac8f9bf83d6056810f78dd0c9296b5445289483fa5 +S062/S062R10.edf.event 815500806b68247c0c383804bb4774dc90e7f96d61e9e2b246fc5d33fcb5aafa +S062/S062R09.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S062/S062R04.edf a054c47a0c59c1a375061a59144160ada314be8ad42ad7bb3971c490572c0324 +S062/S062R04.edf.event 918e61da68f21c26600383b29f33fec7665f2f3fae232e8413bc9266cc617863 +S062/S062R08.edf 0653afdbaee66604e8f9f9161e5cfbb24fda28d9dfbd48b8a343a73a544c5e91 +S062/S062R10.edf 1d1f19e928275c373dd8611a5fba6af15fca64768101ff35b443e47cc7df8b6f +S062/S062R11.edf 1b48ec4778eb975463f126ca0d2efd093978b900f6ea0275ee32ff953d6a4339 +S021/S021R02.edf cbb27fbbb61a98a9e7edd1f0aefdc5a94d964eea0ca16fe3b752aba6b38ec399 +S021/S021R05.edf.event 99a46eb7d9a4fa08a856556abfd134cec7b55f86ee94c1eedc9b0b5214911db9 +S021/S021R08.edf.event 8bbb904a2259011a3e81d620c25b30347e4e7839d6d80c3b648c53545b89f24d +S021/S021R12.edf.event f8a465b9ebddc2704252299afc352d87e33c523fa8f80ca82a96fe0b268727cd +S021/S021R03.edf.event 0a0620d55b56f9b27d4ae912d41ca29efc65875b00d662d149ff508493487b74 +S021/S021R10.edf.event 24945e04ff21b52347d9ac969d09c4af21811576baba1bfaf35d81960e94008f +S021/S021R01.edf 983781fb39c3a747051a04f23484164de485b97ba43a7fd63fd8fe3d7f6f6f3c +S021/S021R03.edf 3404a07e58a4054c33bb554133d7988bc19fa5f2dff5ed74e1669862c44869a4 +S021/S021R04.edf.event 4df88ea43362416385ab85f64107559b8c8cae7e4f9feb2b48eb9ae5bbeea3b2 +S021/S021R10.edf 6e5ea8c53bba188385f86ae619dfe3282679deba4e651ca46ee1c91c06b2c9b1 +S021/S021R07.edf 26e5c7f56d59c4e0188f7479c8af38ee973d789f26d38b80a8e421d62b542a4d +S021/S021R13.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S021/S021R06.edf 6b27c8a6782a213bc37bd0ef13c156699a8087107b6bb8893f24c526cba684c2 +S021/S021R09.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S021/S021R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S021/S021R04.edf 9481518d6a0b086d782ba17e78fe12575500ce3c7f25071f29adc5eae093c863 +S021/S021R05.edf a03d4e4f066acaac766cb935d3a40eb11886254d82e3a38a6b06714f81df9c98 +S021/S021R11.edf.event f804d1f19243344c8d8b505a1f229c8626e7f81cf4397b94650b0e6959d27437 +S021/S021R14.edf.event 30b760c52698fc58e43824eb7302010b60c8374cc35c21494035da15da835fdf +S021/S021R14.edf 76c2f56b68c08ee48dd151faf5d10d432421ecb33a938ca9b440adeed546015d +S021/S021R07.edf.event 574e54bb07934f5ad1177a8834c912e24b6537ad2eb83ff2f84b43af7024211c +S021/S021R13.edf fde2f1b885036d11a81f403bfef81cda7aad4ffd9b81cf9ff9d725cd988a38ee +S021/S021R11.edf 7b4fc08ae9e199aef5e077ae14a912c790a6d2a0fbf8565aa2c80690800aa911 +S021/S021R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S021/S021R09.edf f5b4c7deeb1c581e455c6d5d91e2d5a403233f373c0ea36f34ce3087f92cb3a3 +S021/S021R08.edf 8abd01e9b2a9c563469beac4d632727d9b341438b93aea76258ea180de2ac6e5 +S021/S021R06.edf.event 60e820216cd891875b6597a2a69eefb270be91108dd94e6c6d72fc69bff372f6 +S021/S021R12.edf 326023193bc901b74d1fcd5e7a88f064643a41842fef121fa031d9f628cbf3ea +S022/S022R04.edf.event 1513629f6e8e700394421b828869afdbd4e2ccdf8bdbed2127f75a42b2db3ff4 +S022/S022R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S022/S022R03.edf 2c7f904749e97f1acf45a98079c515ff40f15234d0463917d5f99dfd4763797e +S022/S022R03.edf.event 23fbb5b9f1007e8652ada0247eb06163f5b93eb5899bab7b719341fbeaf1345b +S022/S022R01.edf 496fc1ca74a265ea0f888b9334ac8b16168ab201dfec3eb7d932e68a5348edc5 +S022/S022R14.edf.event ac2e8337cc4fe1a692a1f25efffa62d821b6b3bb37188c793591c0d07fbac0c0 +S022/S022R06.edf 0133a61ecfdfbd3dbb8c0d678076d9f67836b6fa90c34e35369cce7d4854914a +S022/S022R05.edf e76a89c457b5deaef2fcf393d755af60c4b91fdf804ab5c97a0e461d5421f17d +S022/S022R13.edf.event e8da7819eee7d2c77d74fa9e6aec49532a6b299f30b13b3938ad7ce0357cfb02 +S022/S022R07.edf b2b28d8b0db957a13721997f5a4ccb6ebcc7bf770c7acf0fa710a08f13f1421f +S022/S022R13.edf f032660b52f6da0047f057da1a95bb03412014abc61ff1ebdbaa526e668e63ec +S022/S022R10.edf 4162c067a5d3b1bacd9614234baa0af9a75c269926de688c50227c39567ea49e +S022/S022R06.edf.event 58db360bc4c16d775b7e2c4797c2215d9f1405c25061a2237ec95b2ac264f964 +S022/S022R12.edf 46f0835897374fdc2dbc634bc4c16a61cd03a4ca25506cf923b922629b07674d +S022/S022R08.edf.event 007c6ac586d5de80642bcf571fae31808a22c5f6b8fff35fdd8f99f1b334f97b +S022/S022R08.edf 8f890909113d536152b987caff0d70c73468b0332dca478783e665598697367a +S022/S022R09.edf 735e4a3caffbbc78077baf7f0bf0d23ec707c3926e3857995b90acdc04eb8593 +S022/S022R09.edf.event aa719b9aea445a02c8c5c6a6de32bcb12238842c8d4ae16493f623bf0d226c1d +S022/S022R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S022/S022R05.edf.event 71b0870f3a5490c969240bb5653ec1eaf656e40942ab93a71deb32dbff919601 +S022/S022R07.edf.event 22a6b841d94ebe84eeaabd93fb3e0f00da65ddf3bc8de6d5a79394e3a1394567 +S022/S022R02.edf 1e302df13c28a0cf311c54b205a42155198f2c580e2d3d4e00f6ac66dd5436e1 +S022/S022R12.edf.event 93367a70cc359570029f435dd94fb546895eb7fa6c629c6678544071cd4bdc2c +S022/S022R11.edf 514f1cc57f2a93c418ed81fd3ecbb3a1cc4e4c9a3360587dcd2badb019b443b0 +S022/S022R10.edf.event 9a1f9c37f0137ac6d7341c2a9dbef1bc161b35fcb6bd9e0db6cb95f8c7096c5b +S022/S022R11.edf.event f94fa47bd56896d1b35682e2e14d8d1fdb308a058c4a48b7360c7276bb8cd922 +S022/S022R14.edf 115ee66ec70617c149079edb9a5e823b1b218a885f5a80b56f754ad745a632b0 +S022/S022R04.edf d2f48578f30b4edea2e138e74854a09e36887f8833a3e0f48cb2346ead82b201 +S011/S011R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S011/S011R04.edf 072b75496dd935be460bc8ac9c3bf2d1fdcdb62f32ffb552955319abcb24cfe9 +S011/S011R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S011/S011R07.edf ee5542027039bf28eb7fbb95826235f618e32c74b28857ad51df388256e8fda2 +S011/S011R08.edf.event 533737c87655572a20223c364be6c8be7a035cd8cf3dc42ed8c418e46e174661 +S011/S011R04.edf.event e14dc31d476c11841d45564cac660be9776fccedd3dec71dcee4c5232f00f25c +S011/S011R03.edf 369bc902a59bf9a9d480c2bad2a4b165b83716c7cba5760c2ce0e6500f120443 +S011/S011R10.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S011/S011R10.edf f3941502e7692576978f24c14c837b07e760fb94cdc65f2358790375d5537d87 +S011/S011R02.edf 3d64573c6da6183ec5b8d230e38609d4d7f365a2aecbcc8a310c94d13c986f7e +S011/S011R12.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S011/S011R09.edf 8e0a6cb4cfcf916fb73ca5d93839ef79131a6d76874b8307081eb03b6947c23a +S011/S011R12.edf 9034d085bc01c1c738d80a48fdc43059f88c13d587b5de1c59d4a85ecd194ccb +S011/S011R14.edf ba4007cd1ab12e9f87fb8a0d158b55e9b236cf29190db21709482290a05adbb9 +S011/S011R14.edf.event 0db4656c1041f6626ac6fd54117fb1e02890492bb86525e197e9ed116a0fe6c7 +S011/S011R05.edf e02118b61831ef02eb7268afa90ebceb136543fee310dbea1279c21bf842c285 +S011/S011R06.edf 05e960ac5207e35af7dc1fe492b78c7f473340aa88a6639a8e9d711d27c80270 +S011/S011R03.edf.event e1c00064d3aa1fb0636aaf0dedd65aa66d02a8afcf3dd70b9a5fa4df4d4ebd47 +S011/S011R11.edf 4a6c375686a70b4d0d09c789f309d8e41b76bc660d4a20f2f136fedaac2ff1a4 +S011/S011R13.edf.event 5b005cafab5d6d8968aafc95da4c7b50e36cdf3bf2d71b98cd18431017101fcf +S011/S011R01.edf 374dfa06c6df18e3bd324f4c8aefa462bacb6bf9dcbfe8d58ae6e84ed2bbc443 +S011/S011R13.edf a1a02f3cddd77bdf2eef2778bb2fe7dd76030d378d1b84850593ab3dfcb8ea3f +S011/S011R08.edf 70b05e696e9faff6566cabda58b9144e1cd8ca3b16e04178309b22ef7bb612b7 +S011/S011R11.edf.event 2553cdc46c8017dc83d793a7cce6a36ddf99a043e777612539831e67f1aebc23 +S011/S011R06.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S011/S011R09.edf.event f39b724630cf39de64096da55e80ef4acde232132367bdb459b9a4b872fdabb8 +S011/S011R07.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S011/S011R05.edf.event 017d78426c2e9f2b1807cc35ed07405ab8ff157014df0701b8524f965539b319 +S082/S082R07.edf 2e76d2510e31a569d934e5e0c79d80ad5d5b717210edfef603da9ed469a0ebe0 +S082/S082R04.edf b3e4c74b4cbbf1aaec1af0c3613fc9d2a27d6d96ac62c7907551a87fa9bf0bbd +S082/S082R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S082/S082R10.edf.event 732398d864936092994f6aa5c7a3d554d1c1c35ff4fd7a90dd0c06a61260c68b +S082/S082R06.edf 1fc2e82fe3d7cac5dc6d1b36f7e427f82dd881d8795e7c1ffaaad4309df741f5 +S082/S082R02.edf 5a60133110089cd88b358a949f2e1d584fa14f90ff4c4016554f4e3a827aed5c +S082/S082R13.edf.event c117ba4c66b5467903fddc4ed77a580e09639381683c611dd1f02cb0d311a4b5 +S082/S082R05.edf 9e318d00b9ab3f34936e0f1a63e36996213b48e72b6cef4e22f3835c58411853 +S082/S082R12.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S082/S082R12.edf a389f918554ba206ee4330deaafe1e21750fb44317401628841ee6e2e9b83a8a +S082/S082R03.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S082/S082R04.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S082/S082R05.edf.event af9bed6667136366cf88d2cd7f0f5fd9dda3fa51cc4cf73d3afe4d57dc8b7502 +S082/S082R11.edf 7b7a5f1608517ddeed1dc7e4e349c346f90ef9d1aaabddce3dde87b65efe2120 +S082/S082R01.edf c36d3ce061cef168595ebbf3995891c60fcefa82dc1534f6dbc4fc6312d09ebe +S082/S082R11.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S082/S082R06.edf.event ffbdc0e528335775cb88a14baab7b7f1fff4fa1647a473bcd640feefcbfab5a3 +S082/S082R03.edf 1df043bd8c67a867ffafc493d40cc183b9c15f46b4a45609b27b386a6b056280 +S082/S082R13.edf 4d97f79b10606441620868d7fab83054e1a3961fc2e056f6082f75f636be17a5 +S082/S082R10.edf c7fc55b18d9be4cdc9faa3b16cfb2814139a7d3c8264e9cdb2b6a5ded49298d2 +S082/S082R14.edf 09c317d8cc56eb914443ad400d1167b60c10ab73766894ddf2934223e30e4aee +S082/S082R07.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S082/S082R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S082/S082R09.edf ba6d90669cf26f7346353afa0fd1a496ae333c8f72a5b4d7b35d0da5607b8cb9 +S082/S082R08.edf.event 00eb5e22d01c3eaeb1626660a99e0c6fbf2f4423eed750224cc549752b7b6761 +S082/S082R08.edf 47ba5b30c2427a7dd4117f26ac5491a0bdf9a2e5a619bfc418ee59b3941042c9 +S082/S082R14.edf.event 6e13e4f708f4b769dde500a1db747206aa87951c3914cdd436fb7f07e9aa5052 +S082/S082R09.edf.event af55de06274e9f057a8c2b10bb8a4665c70ab950b43d066ea9e3d1de5f188b79 +S087/S087R11.edf.event 029131148bb6c782573739be6ec79dc67ade89f0b101169912d9c8201871bcd0 +S087/S087R04.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S087/S087R10.edf 83d8cc5669b446e85209bc1d3cbcfdd915dbbb910634d096122ef2581b2e3d6f +S087/S087R14.edf.event f45bf8e0fbc90d9616344960adaf0ce8357cfa7171b36030a957f85c93267870 +S087/S087R11.edf ef3af1f11e1c90c305bb5caaa47d3ca98076a3b47378c1a81b6a85b3f2eec5ef +S087/S087R02.edf 869950862ef4df60bc0a4dde3316a8db3780262e29e65a1fd4d98ab20c9859f4 +S087/S087R13.edf.event 7f2596bafc4dd481e36c47d0e562fb6c5f9b7e91c2a915f19f66844b55b75410 +S087/S087R01.edf 5876a24fa07f5580ee8df69f00a069aeeeea2a22e08f3c6ba3678a3978f4a73a +S087/S087R05.edf 9acd4c1585c623ccf52ca1d8c5793168a6d736c528af283fb68116c415cecb48 +S087/S087R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S087/S087R07.edf.event 7ee6977e5ff9c282ca6370406d7a9871d162328940b104573ac9f1d5151c4b96 +S087/S087R08.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S087/S087R09.edf 6797f700e9a9b26efa58d863349724bf7cb330f62d3653e5f87a6e8a2a9d22b8 +S087/S087R03.edf.event 0724cecedb70811bcaf06ec5a41151edaf5209f968315c4da2e3a25bbf0fb9ec +S087/S087R06.edf.event 8612fac39dad16048db3bea4e27be45fe67ced9a22781462eb9c6e435fe28d6d +S087/S087R05.edf.event b0dc1155da666ef5240efa8026cec5f4f69b5fb6f1d21226a512de3f6b62002d +S087/S087R14.edf 203089c991c61a6701bfbb1e413bbd0bbb5e0a103179852422eab0640e232a14 +S087/S087R12.edf 1fc744ced6882cc083447d4228f2868ad8cd3708506491adaf6b949e1e1aefc8 +S087/S087R12.edf.event a716ed693834141bb2b56720ce6124df46e42e86652ec02bc96746644a79378e +S087/S087R04.edf 4f1599825e6aae94add2c0ebcf8bcb23a65bed258cfb89a73a06a6861fc586c3 +S087/S087R13.edf e07def80de0aad137aeb40c1ec6cc9fe1affb9e53fba10ea09a721ddbdec630f +S087/S087R06.edf bce28b9526a4dfbc28502e3b2d2586a133900fa946ae1b2d7e19752a23d08ebe +S087/S087R08.edf 58ec622ce0deaebeb43334c3f255e81b01e612751cb0b13d2471b2ce98fb67bb +S087/S087R07.edf 70be34572544b1dbb96739d53b694fd0d983a91810f9e05addf1eced8b013b3d +S087/S087R10.edf.event 0d4308120129d0f92d2b8779374ae232f98d1357e6c6427983fed655d9611af0 +S087/S087R03.edf dc60f233daf4d2a62453582210f0186514e02e4d3967af1ef8881361cc55da5b +S087/S087R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S087/S087R09.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S014/S014R06.edf.event efd977506cc195b985e542f3333bc334c4793652f9b52580c96ef8a5948f4db9 +S014/S014R07.edf.event de55d737a8555e174e0cd004746337287c294a01d42348abe972b98f07739b4c +S014/S014R02.edf 6294928380c0ad63219a2713b9f5488d570be1c0f1da72662668c1b4f5bc906d +S014/S014R13.edf.event e8abea1fb06a5ad73e91ea5c97aa84247e1c165ee2bc724e9b9c7ae6f6c017cc +S014/S014R13.edf 28ef92f5bad0bcfabde422238a74fc13e58b9e31882cd9c4ea421f806a923af9 +S014/S014R10.edf c0fdc474a78421cc6048a9c8dfc8cafa8424dd4379135b60d68a77c4ee0b2b6a +S014/S014R10.edf.event 8e9579e89d1ec6576609e05f5e6215b4322a4c5375265379e3acd7952866ac83 +S014/S014R09.edf.event b1046f2c937e8d05f2c3e45473ca880864a5d33222d740a0bee41b055662ea91 +S014/S014R01.edf 9ff971d9225e764428182bf4833ce5a60e620b7b157df9798ea6266070442433 +S014/S014R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S014/S014R03.edf.event c4fe35467d7d0b21a1a13fdeae18b7f036dce640d9e06acb7a946289a7fd4f44 +S014/S014R09.edf 3214586a275de7cfaaa94b5b6ff39c4f8fdaf04ecb3adfddde30ae5327d5e8b8 +S014/S014R05.edf b675b2ff333e7661944d6f37edf44d0c104c0fe9d6c04661c96885fbc0c2edfa +S014/S014R11.edf.event 11c7903b0dd963937962aeddd4ffb77feeec607958b581d996b712009c71f910 +S014/S014R04.edf.event 07fe70f3f8a3bdbc31f5b573f0a7411d64d34385995e5b88892dc178ef898e16 +S014/S014R06.edf 2effc21654d198aa9b98432717d66111358ee5795b929bb4c433d5d9ab066e97 +S014/S014R14.edf.event a376b452fa5f7c300d71ec31ce58f97920510a057f4ca6c506f4cef51faa7315 +S014/S014R12.edf e6ef184a364734f2cc0fce314c93ffa545660a89567475f56ddd979a2d0fbc39 +S014/S014R07.edf 4fc8a7f723e8577d5f0a851ee416ccdd7511aed74ae3da6bcc4bd19a2dda0d1c +S014/S014R11.edf d1d6a99dc4439690c820469616b6bc71d80be65340615246741c0d67efde59c4 +S014/S014R04.edf 7e443b4a4bb000506137ffd79eae9fd915a282f4c6c5aac71ad8b8612072227d +S014/S014R03.edf 670c197481bd1761f1393c3f33e7c286f5966f483493cb6d2e05ff23de93c6a9 +S014/S014R08.edf a63bc896da7b0fa302c3c3b0ad53a4c54f1a35fd1f66bebb2cedc81cbabc9320 +S014/S014R08.edf.event a730605f9838282a7ec09530538d4f4dc6f2f5cf73c8f2e85944d5e8297c441e +S014/S014R14.edf dc0139288de0668975cfde05cce9663a186ba1b2ccb3fce71fbd4bbbb206b5ba +S014/S014R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S014/S014R12.edf.event a38580800381d773ef251e941cbd4e02611c4c4e4efab3c383f14300a3394f1e +S014/S014R05.edf.event a5608231854f8f457c107c9acdfead912cd6d55fc459b7abc2f11d77bb166129 +S075/S075R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S075/S075R12.edf.event 818acb17a86d3ee6425a512fab58b363d01b6ccc783417cdad466d1ecddc506f +S075/S075R05.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S075/S075R02.edf 712ea0f0fd69d3b108cde439012068e81b4cae645d170a93d52c1ead4dd45877 +S075/S075R10.edf.event 8553d29b6f3410b4ee7ebd3f65050ee16a492ed893d0bc9a14cf35cfa904d586 +S075/S075R08.edf 12c3c5d354210080fff50cd4bb475cf89df420c18b0a1e675dee817a79b380ba +S075/S075R10.edf 5d683e7fcc9f0d168f8ba02b226ba4bfe0b00bd26216310522e24c139ef71314 +S075/S075R11.edf 8d55cffc5f065f1de6c5007d2f42393f0f4fa2d40a01de17060d03cd515d3b23 +S075/S075R04.edf.event fd77510c7ae392a42ed020605a40066a43ea3dcf80082d17cfd95141d71549ae +S075/S075R14.edf 4805ec2a2ecefc95562a0f2178e2fd3aba647559674d13fd5c46a2f2dc27ef8e +S075/S075R06.edf b07163bf94520e29c548541fd33ab783ae9953009f4357e6aa57dd6f1d81af88 +S075/S075R03.edf.event 3764472ec04047763aeff3c1680cbc45cec3a88ed5f483d80cfbb31b50a12ac9 +S075/S075R07.edf.event f338197f5dd0ca078ea8eee22145e57e694f7dce6a2bbd55f5f05346ce3b3f17 +S075/S075R12.edf 6c33faf3fad9d274e8e7718c6dc2df6e8d54e5478638684530b7f854961b0eec +S075/S075R03.edf 3c7d10331971c041abdc9fc02041ab2c80384e3b84ff3f58d9d54c7572590ca1 +S075/S075R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S075/S075R09.edf.event 61878a566785fc86672b3e061fc0eefd0fb9735a4239d8f13ceb0d08ff22f60d +S075/S075R01.edf 79c65d9990c6606d45d9c3be18ae7226cc97e4757955fd8675f313ff8f91b238 +S075/S075R14.edf.event 8cd8c690df55a4167dd5e136be1c424a77655375599a9d3350d5460cabab539d +S075/S075R04.edf 87714f913602a939c24e3ce672a689ce20f966c5d19d5f699f898a94cca4c086 +S075/S075R08.edf.event dea45ba8a8662cd82aeb9d6c523756ce7079f3020c2ad1b23d6dca2f63b5bf82 +S075/S075R11.edf.event 596ad948f8aa9dfdc6cd3125a3d02e115d1cd6a505023467bf5cd721125ee7fb +S075/S075R13.edf 10e9456acf73b9c355684b37aaf102a2d78389c47fdf0c026c39a32e08c95c71 +S075/S075R07.edf d933c3ead467e6c662fc45bdba943f20177079470d1d06ac4cdc989d6dfadf5a +S075/S075R06.edf.event 734d5da22686d9f9052ce819f7fba86e4f5225f2d3638eb1e3795e9ebd018fe1 +S075/S075R13.edf.event 83ec130ac6a664e0d88923e1496dc0806008967b51e6158521a6beb0515b2eb8 +S075/S075R09.edf b17c615c3a63e91e8e26a4b9299c1c5bcc022c2d50932520a382c713e8f1f708 +S075/S075R05.edf aa220a279d288581e32dcb10fd8e44688aacf89de7f0dece4765f0b3c40a4229 +S010/S010R09.edf 91bcc4d9068115e0d022a6a9ebb1fe05cfcba27cc1bf025beb8a3eebb72b0e7c +S010/S010R09.edf.event a788bd6a825d960534053c6559cb4f24cb7ee7a3164017d99c1dbc55f3b22215 +S010/S010R08.edf b468e77d0c8a73377b4510220c6be95bdefd572f6ed5c4b5f539c9dc0bdef485 +S010/S010R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S010/S010R12.edf.event 820bcb0b8aa75c06572fb3677af8b965e06ca92c0ff5f4eabd0d347c7b141680 +S010/S010R06.edf 5f5c213f1f7db4bdb52d8d54e8074d7f5e73655b1e040d41a7916d3d0a00b666 +S010/S010R14.edf 138893d950405102e1536365290ac15255688eaab181170afbd5178e6714ae2e +S010/S010R14.edf.event 99dc24a5732bc2857f1d3fba3d1a5b942b288ef56950e45354e03fb10183bbe6 +S010/S010R13.edf.event 212052daaa4c0fbc66f91923236861d93fc52f73403f6b4d988205287f1f9ece +S010/S010R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S010/S010R10.edf.event 8a4b5822b251050d46b816bf54fbb47530d91210b6d01cc2e6acbe662193553d +S010/S010R07.edf.event e11c72f75777baaef94ad75881c37d283df8f4f917ca8c1c823cece56a31f215 +S010/S010R05.edf.event 50f12f26efafddcd994732349e4117055595d324e4dcff8fa56160baad5d5533 +S010/S010R13.edf 10e642cfacf5d55a4a90b73f8968e2dc4da6db4b8458388d87641c21e1e00c6a +S010/S010R06.edf.event f39e6f6c43f557d6a85493c1722b39ca7055c4ba399525941217678a10b78568 +S010/S010R03.edf.event 296bafcd473fd6051b9cf19b1af83d7f0a4ba4eb310f7fbf462aa7ac3ebb8120 +S010/S010R01.edf 0d694b503e5ea0f6088a790f09390efb1be2688ad92213f5e82fa2f343cd18c4 +S010/S010R03.edf 49016ed40a1c17164b7b56031d567ee96b00b0b3755fab072c1d78db9ccdab73 +S010/S010R11.edf 4e8a3c50558c9dce96ab060d4883d19fbfe5e859989d7c633bf9f88a6a65d467 +S010/S010R12.edf 0983609a12e6fd9b3ae99fd6968938ec5a3b012948894602233a65b720ac3975 +S010/S010R04.edf 8e68b22936cbcb7f84ed8ff037cb4a99f01064589d181c8056dbef06c1c7159b +S010/S010R02.edf d44f5f6c774771bc2b6cdd6d29c7f558e11ad11e6198cf4603f473edd4b2074a +S010/S010R05.edf e83d0bcd64dc403e430be79f865e92576b62d42ebbc07583fb1bd964238127bc +S010/S010R04.edf.event 3e275ced710e3ac50d345aab942c9fb009e11f5447566d67dfaa0345a0d96840 +S010/S010R10.edf 693b0d9240095c01eb8ec5b2d0b3887cfbb8fbfeb85a04076b79c297b5b7d42c +S010/S010R11.edf.event 710672ce0f425a207ef543d23f608683e1863d7423702d511235d71ccbd289cf +S010/S010R08.edf.event 0028edf9b5fcc76311706f809ca44f884f227acaa02c56031cf87a7937c1d9a5 +S010/S010R07.edf cf59750f6f9576e106875f257673940cbaf2b0d62e1d3b1a45260dbc46c1981b +S074/S074R02.edf 0135aacd143f05dcde96a748fb925d60823187e0b4a910051d81c91a85b72bed +S074/S074R14.edf.event 046534c84c8b0ff5bc1e42577bfd07477507c70bf5fa72787065271450df27e8 +S074/S074R07.edf d51407f00e5f51714f155025b59165e1629a6f77e1205f0f693917400489198a +S074/S074R12.edf.event ef90708131efeff70f834269381bdac1901f295f238b806a3e7faa6295beae60 +S074/S074R04.edf ea84e05235eb45067eb84f19aa09c4c5d429af1e9d865ee5eae1b2aaab46a081 +S074/S074R05.edf.event c2cda6658840270926f0e5923526e91f0b5c3b025e5e480e8128411d202279f7 +S074/S074R04.edf.event 41a37ed42798630028522e1dddfb80e81b973b74f6159b9cfbb856b13dd30d4b +S074/S074R12.edf f6ea6103b34721b8d2d8e1175d305491303cf5f028879791f14443da810d1c7a +S074/S074R06.edf.event 5bbce0aecf4877ae15055ab9b31e4f2c644ddde0790031ab0902e5c6d913e97d +S074/S074R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S074/S074R01.edf 5a9fed173c09eb34df7bfb6e79d6c15b616f2cf95c65bbed6b7b392acfa2ac3a +S074/S074R05.edf 10c0b403ec5d30a1bfdd118218814ad6a3ab807ba2f25c767aea3374b71de285 +S074/S074R13.edf.event 21d57c80128ab83ad495fc6a3f948d954b23ed08be42eabdf2d058a81fa1ebeb +S074/S074R07.edf.event 33d88435be58ca0eaa6dbf2eedec24ab55aec24d6e067979c6aa5f2549ce02bc +S074/S074R08.edf e594ad62e81f3948d52e1bf5b896598543e4c739e06df596160dd379af59b99f +S074/S074R14.edf a6b678a0c9f505a58fe9b6c769276a84a4785743a3866529febf5f489e41d6d9 +S074/S074R11.edf c7ab21c448f8059e8674b21f9bdd96311c83086c6a7faf95aec5d75c319bc01b +S074/S074R11.edf.event e6e3fae8bbcbc00e9d7d959eb1e3c2c0ab93eb9e896a0adc93c685329e429982 +S074/S074R09.edf e8d8787476b0bf24a5caa9fbc450296cc86bb92fbf25c22dfde96f893bf20c4d +S074/S074R03.edf b93be723755be84e63dd7ad2a09e396392d71a4a6c156b085a7577a1ff6fce9d +S074/S074R09.edf.event 413f7dd132c171c2c14d9a7cdebe9c5e6ea047f267c8e9fbd8669a3ad0fda05b +S074/S074R08.edf.event 6a98c81dedf4cfc225c5d5260ce569ed2e2c48b1be5698ab3cba2089b491e545 +S074/S074R03.edf.event 0c18fa49d469703f30b80a748450ef0688aa72103e201d690fb064dd55c7e540 +S074/S074R10.edf 25261ebba19c4f04b7fe9597287a49a97e91268941c9fd3609ec0a7f53fcd5e6 +S074/S074R13.edf a2816222018a1b9b734f200ea774b9e43371a8c835c729649955ec1b6ad2335b +S074/S074R10.edf.event 772fcde48d228db4376dd7486ad68145a7a40f0180612741f47a1886d5be50f4 +S074/S074R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S074/S074R06.edf c7213e39ce5e307c6619762169d0c07823db7fade9cd2eaa6696fa0ac658c008 +S059/S059R05.edf 80b7c06b3187db15ae83e290d7f82af308e44dcdcb6eef1a0162ec2d9760ad4c +S059/S059R04.edf 904e79d95c362eab86e46e0a1addbe3b80ba802b98b372c63b8872537fcd9bbe +S059/S059R05.edf.event d0677488ead680ce24ed6d8a77105b8da3f0bd8cad38fff43732482c17bc4510 +S059/S059R03.edf ceeae042f3be7a4d705c8b13b507e58d901cdb5e7bd4e367b571f85fe5fb6070 +S059/S059R13.edf 72f0530ffb191f575b29f72964e51fa108ae57f495db19dc11de1e7f3c35262f +S059/S059R09.edf 10491ecfa52bcc63056974652c8931ea19a0adb0d3ed331a1a4d58e5bdedb849 +S059/S059R07.edf d425bba7b0f3019f725f3b43c2a63d703b3524f177b6341beaefc1d479c9561b +S059/S059R09.edf.event feeb9bf47f8f6dd0e083c64236055fd7ca30a06709747978b6a7bbc0b632e21c +S059/S059R13.edf.event d6641564c0a8724abac8f919ad99064213b0fafe2c25f0d37d1cc67d83bda19c +S059/S059R06.edf c7ccb0ab2969a4f1d5c522e66299c08d8db2c39c9ad977a9be7613132b4bb46d +S059/S059R08.edf.event e1ad9bc40c1368a9f08ea6dd90c3750fa2947ff48bcebe59b7e89441a2db68b8 +S059/S059R02.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S059/S059R10.edf 20becd179b2aff43d00c5c5a11faf39a820cb59c8d43f2668d9458dddda9ddd8 +S059/S059R06.edf.event 23b6fc268df2ca2b48b0a505a6eed055a62dad293041c180f6f03136d0f4b36e +S059/S059R01.edf b8b6001fbd9853ab9cf3cc7dc2c40fe0547fd8904044d4bb5b654a083d6f1584 +S059/S059R14.edf.event ea6f6003ddc3676cda270c185ba40fd7915630a24511085aeb31217c9a1e8852 +S059/S059R02.edf 0135175166b23559b8ff05f4e3a4fa0ba0fa24a65b411e0bc1e20dfda9d89074 +S059/S059R01.edf.event 98cfbe863896ed90aa76ff95ad4151ed3b02157854cfa6a974121fc123c109c0 +S059/S059R10.edf.event 927b59b2e19d89df8169362057a329c5b70fd65d1d2d0a77e56546927bc02281 +S059/S059R11.edf 6841c44bb69451d341157e05f64d00cc51ddc1d9076bf6903c5aaaff5aeed129 +S059/S059R08.edf 9c96400ddc6cddc13df50bc49a564365e8f3e24fd9fac3b6fa494cb939c9c627 +S059/S059R03.edf.event 60cee9d6dd15c5919b0d3d4f0618922c82d19e6490ea88b6fcd09fc7631fac71 +S059/S059R12.edf ed9b66b031f545783a98ccc77915ee74b1f9b0471a6edb601408b63b7320121f +S059/S059R12.edf.event aaa5cf263ed5e1e7bf38b3d540f4e2d286107c553f7904af5c9cee9bfe715f19 +S059/S059R07.edf.event 037c923028aca24716dbc8d307c618e19d4e94bb1e759e5ddb8a9d3b6b205a86 +S059/S059R11.edf.event b9568e8466c8f90e1fe1f9aab8ddb73ea16c008b7b67cbbe5863f04f2ec408f0 +S059/S059R14.edf 341c60903abc18408b921bef74bfaa5dbed75d886f13c55b25090afa23690c16 +S059/S059R04.edf.event 950cf97301dd495a2a6a7a19a93d67d2ee9ee03b2675e300582e19305f5a9287 \ No newline at end of file diff --git a/mne/data/extinction_coef.mat b/mne/data/extinction_coef.mat new file mode 100644 index 0000000..0ea91c9 Binary files /dev/null and b/mne/data/extinction_coef.mat differ diff --git a/mne/data/fsaverage/fsaverage-fiducials.fif b/mne/data/fsaverage/fsaverage-fiducials.fif new file mode 100644 index 0000000..faaae97 Binary files /dev/null and b/mne/data/fsaverage/fsaverage-fiducials.fif differ diff --git a/mne/data/fsaverage/fsaverage-head.fif b/mne/data/fsaverage/fsaverage-head.fif new file mode 100644 index 0000000..b66d16a Binary files /dev/null and b/mne/data/fsaverage/fsaverage-head.fif differ diff --git a/mne/data/fsaverage/fsaverage-inner_skull-bem.fif b/mne/data/fsaverage/fsaverage-inner_skull-bem.fif new file mode 100644 index 0000000..6f23294 Binary files /dev/null and b/mne/data/fsaverage/fsaverage-inner_skull-bem.fif differ diff --git a/mne/data/fsaverage/fsaverage-trans.fif b/mne/data/fsaverage/fsaverage-trans.fif new file mode 100644 index 0000000..92b5573 Binary files /dev/null and b/mne/data/fsaverage/fsaverage-trans.fif differ diff --git a/mne/data/helmets/122m.fif.gz b/mne/data/helmets/122m.fif.gz new file mode 100644 index 0000000..79d1773 Binary files /dev/null and b/mne/data/helmets/122m.fif.gz differ diff --git a/mne/data/helmets/306m.fif.gz b/mne/data/helmets/306m.fif.gz new file mode 100644 index 0000000..e57e840 Binary files /dev/null and b/mne/data/helmets/306m.fif.gz differ diff --git a/mne/data/helmets/306m_rt.fif.gz b/mne/data/helmets/306m_rt.fif.gz new file mode 100644 index 0000000..60d025b Binary files /dev/null and b/mne/data/helmets/306m_rt.fif.gz differ diff --git a/mne/data/helmets/BabySQUID.fif.gz b/mne/data/helmets/BabySQUID.fif.gz new file mode 100644 index 0000000..3269ffb Binary files /dev/null and b/mne/data/helmets/BabySQUID.fif.gz differ diff --git a/mne/data/helmets/CTF_275.fif.gz b/mne/data/helmets/CTF_275.fif.gz new file mode 100644 index 0000000..4b36411 Binary files /dev/null and b/mne/data/helmets/CTF_275.fif.gz differ diff --git a/mne/data/helmets/KIT.fif.gz b/mne/data/helmets/KIT.fif.gz new file mode 100644 index 0000000..b508585 Binary files /dev/null and b/mne/data/helmets/KIT.fif.gz differ diff --git a/mne/data/helmets/Kernel_Flux.fif.gz b/mne/data/helmets/Kernel_Flux.fif.gz new file mode 100644 index 0000000..66504d7 Binary files /dev/null and b/mne/data/helmets/Kernel_Flux.fif.gz differ diff --git a/mne/data/helmets/Kernel_Flux_ch_pos.txt b/mne/data/helmets/Kernel_Flux_ch_pos.txt new file mode 100644 index 0000000..8f9807c --- /dev/null +++ b/mne/data/helmets/Kernel_Flux_ch_pos.txt @@ -0,0 +1,202 @@ +{ + "MA1": [ + -0.040249, + 0.092195, + 0.024061 + ], + "MA2": [ + 0.0, + 0.10462, + 0.016906 + ], + "MA3": [ + 0.040249, + 0.092195, + 0.024061 + ], + "MA4": [ + 0.0, + 0.083089, + 0.053953 + ], + "MB1": [ + -0.040251, + 0.063041, + 0.05933 + ], + "MB2": [ + 0.0, + 0.045194, + 0.077115 + ], + "MB3": [ + 0.040251, + 0.063041, + 0.05933 + ], + "MB4": [ + -0.036861, + 0.026016, + 0.076163 + ], + "MB5": [ + 0.036861, + 0.026016, + 0.076163 + ], + "MC1": [ + -0.035716, + -0.017727, + 0.077709 + ], + "MC2": [ + 0.0, + 0.00196, + 0.085517 + ], + "MC3": [ + 0.035716, + -0.017727, + 0.077709 + ], + "MC4": [ + -0.034502, + -0.056871, + 0.064752 + ], + "MC5": [ + 0.0, + -0.037943, + 0.080668 + ], + "MC6": [ + 0.034502, + -0.056871, + 0.064752 + ], + "MD1": [ + -0.036596, + -0.084035, + 0.027244 + ], + "MD2": [ + 0, + -0.082423, + 0.048749 + ], + "MD3": [ + 0.036596, + -0.084035, + 0.027244 + ], + "MD4": [ + 0, + -0.095211, + 0.008834 + ], + "ME1": [ + -0.038329, + -0.084588, + -0.017313 + ], + "ME2": [ + 0.0, + -0.086368, + -0.035705 + ], + "ME3": [ + 0.038329, + -0.084588, + -0.017313 + ], + "ME4": [ + -0.028545, + -0.071814, + -0.06008 + ], + "ME5": [ + 0.028545, + -0.071814, + -0.06008 + ], + "RA1": [ + 0.06916, + 0.059735, + 0.008274 + ], + "RA2": [ + 0.070053, + 0.032925, + 0.039006 + ], + "RB1": [ + 0.069394, + -0.009905, + 0.045791 + ], + "RB2": [ + 0.06596, + -0.048253, + 0.029541 + ], + "RC1": [ + 0.07757, + 0.023536, + -0.014638 + ], + "RC2": [ + 0.077612, + -0.013307, + 0.002181 + ], + "RC3": [ + 0.068927, + -0.049354, + -0.013998 + ], + "RC4": [ + 0.06444, + -0.040455, + -0.053382 + ], + "LA1": [ + -0.06916, + 0.059735, + 0.008274 + ], + "LA2": [ + -0.070053, + 0.032925, + 0.039006 + ], + "LB1": [ + -0.069394, + -0.009905, + 0.045791 + ], + "LB2": [ + -0.06596, + -0.048253, + 0.029541 + ], + "LC1": [ + -0.07757, + 0.023536, + -0.014638 + ], + "LC2": [ + -0.077612, + -0.013307, + 0.002181 + ], + "LC3": [ + -0.068927, + -0.049354, + -0.013998 + ], + "LC4": [ + -0.06444, + -0.040455, + -0.053382 + ] +} \ No newline at end of file diff --git a/mne/data/helmets/Magnes_2500wh.fif.gz b/mne/data/helmets/Magnes_2500wh.fif.gz new file mode 100644 index 0000000..27275ae Binary files /dev/null and b/mne/data/helmets/Magnes_2500wh.fif.gz differ diff --git a/mne/data/helmets/Magnes_3600wh.fif.gz b/mne/data/helmets/Magnes_3600wh.fif.gz new file mode 100644 index 0000000..c665595 Binary files /dev/null and b/mne/data/helmets/Magnes_3600wh.fif.gz differ diff --git a/mne/data/icos.fif.gz b/mne/data/icos.fif.gz new file mode 100644 index 0000000..99e526b Binary files /dev/null and b/mne/data/icos.fif.gz differ diff --git a/mne/data/mne_analyze.sel b/mne/data/mne_analyze.sel new file mode 100644 index 0000000..ae4bf34 --- /dev/null +++ b/mne/data/mne_analyze.sel @@ -0,0 +1,19 @@ +# +# All channels +# +Vertex:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 0631|MEG 0431|MEG 0711|MEG 0431|MEG 0741|MEG 1821|MEG 1041|MEG 1111|MEG 0721|MEG 1141|MEG 0731|MEG 2211 +Left-temporal:MEG 0223|MEG 0222|MEG 0212|MEG 0213|MEG 0133|MEG 0132|MEG 0112|MEG 0113|MEG 0233|MEG 0232|MEG 0243|MEG 0242|MEG 1512|MEG 1513|MEG 0143|MEG 0142|MEG 1623|MEG 1622|MEG 1613|MEG 1612|MEG 1523|MEG 1522|MEG 1543|MEG 1542|MEG 1533|MEG 1532|MEG 0221|MEG 0211|MEG 0131|MEG 0111|MEG 0231|MEG 0241|MEG 1511|MEG 0141|MEG 1621|MEG 1611|MEG 1521|MEG 1541|MEG 1531 +Right-temporal:MEG 1312|MEG 1313|MEG 1323|MEG 1322|MEG 1442|MEG 1443|MEG 1423|MEG 1422|MEG 1342|MEG 1343|MEG 1333|MEG 1332|MEG 2612|MEG 2613|MEG 1433|MEG 1432|MEG 2413|MEG 2412|MEG 2422|MEG 2423|MEG 2642|MEG 2643|MEG 2623|MEG 2622|MEG 2633|MEG 2632|MEG 1311|MEG 1321|MEG 1441|MEG 1421|MEG 1341|MEG 1331|MEG 2611|MEG 1431|MEG 2411|MEG 2421|MEG 2641|MEG 2621|MEG 2631 +Left-parietal:MEG 0633|MEG 0632|MEG 0423|MEG 0422|MEG 0412|MEG 0413|MEG 0712|MEG 0713|MEG 0433|MEG 0432|MEG 0442|MEG 0443|MEG 0742|MEG 0743|MEG 1822|MEG 1823|MEG 1813|MEG 1812|MEG 1832|MEG 1833|MEG 1843|MEG 1842|MEG 1632|MEG 1633|MEG 2013|MEG 2012|MEG 0631|MEG 0421|MEG 0411|MEG 0711|MEG 0431|MEG 0441|MEG 0741|MEG 1821|MEG 1811|MEG 1831|MEG 1841|MEG 1631|MEG 2011 +Right-parietal:MEG 1043|MEG 1042|MEG 1112|MEG 1113|MEG 1123|MEG 1122|MEG 0722|MEG 0723|MEG 1142|MEG 1143|MEG 1133|MEG 1132|MEG 0732|MEG 0733|MEG 2212|MEG 2213|MEG 2223|MEG 2222|MEG 2242|MEG 2243|MEG 2232|MEG 2233|MEG 2442|MEG 2443|MEG 2023|MEG 2022|MEG 1041|MEG 1111|MEG 1121|MEG 0721|MEG 1141|MEG 1131|MEG 0731|MEG 2211|MEG 2221|MEG 2241|MEG 2231|MEG 2441|MEG 2021 +Left-occipital:MEG 2042|MEG 2043|MEG 1913|MEG 1912|MEG 2113|MEG 2112|MEG 1922|MEG 1923|MEG 1942|MEG 1943|MEG 1642|MEG 1643|MEG 1933|MEG 1932|MEG 1733|MEG 1732|MEG 1723|MEG 1722|MEG 2143|MEG 2142|MEG 1742|MEG 1743|MEG 1712|MEG 1713|MEG 2041|MEG 1911|MEG 2111|MEG 1921|MEG 1941|MEG 1641|MEG 1931|MEG 1731|MEG 1721|MEG 2141|MEG 1741|MEG 1711 +Right-occipital:MEG 2032|MEG 2033|MEG 2313|MEG 2312|MEG 2342|MEG 2343|MEG 2322|MEG 2323|MEG 2433|MEG 2432|MEG 2122|MEG 2123|MEG 2333|MEG 2332|MEG 2513|MEG 2512|MEG 2523|MEG 2522|MEG 2133|MEG 2132|MEG 2542|MEG 2543|MEG 2532|MEG 2533|MEG 2031|MEG 2311|MEG 2341|MEG 2321|MEG 2431|MEG 2121|MEG 2331|MEG 2511|MEG 2521|MEG 2131|MEG 2541|MEG 2531 +Left-frontal:MEG 0522|MEG 0523|MEG 0512|MEG 0513|MEG 0312|MEG 0313|MEG 0342|MEG 0343|MEG 0122|MEG 0123|MEG 0822|MEG 0823|MEG 0533|MEG 0532|MEG 0543|MEG 0542|MEG 0322|MEG 0323|MEG 0612|MEG 0613|MEG 0333|MEG 0332|MEG 0622|MEG 0623|MEG 0643|MEG 0642|MEG 0521|MEG 0511|MEG 0311|MEG 0341|MEG 0121|MEG 0821|MEG 0531|MEG 0541|MEG 0321|MEG 0611|MEG 0331|MEG 0621|MEG 0641 +Right-frontal:MEG 0813|MEG 0812|MEG 0912|MEG 0913|MEG 0922|MEG 0923|MEG 1212|MEG 1213|MEG 1223|MEG 1222|MEG 1412|MEG 1413|MEG 0943|MEG 0942|MEG 0933|MEG 0932|MEG 1232|MEG 1233|MEG 1012|MEG 1013|MEG 1022|MEG 1023|MEG 1243|MEG 1242|MEG 1033|MEG 1032|MEG 0811|MEG 0911|MEG 0921|MEG 1211|MEG 1221|MEG 1411|MEG 0941|MEG 0931|MEG 1231|MEG 1011|MEG 1021|MEG 1241|MEG 1031 +# +# EEG in groups of 32 channels +# +EEG 1-32:EEG 001|EEG 002|EEG 003|EEG 004|EEG 005|EEG 006|EEG 007|EEG 008|EEG 009|EEG 010|EEG 011|EEG 012|EEG 013|EEG 014|EEG 015|EEG 016|EEG 017|EEG 018|EEG 019|EEG 020|EEG 021|EEG 022|EEG 023|EEG 024|EEG 025|EEG 026|EEG 027|EEG 028|EEG 029|EEG 030|EEG 031|EEG 032 +EEG 33-64:EEG 033|EEG 034|EEG 035|EEG 036|EEG 037|EEG 038|EEG 039|EEG 040|EEG 041|EEG 042|EEG 043|EEG 044|EEG 045|EEG 046|EEG 047|EEG 048|EEG 049|EEG 050|EEG 051|EEG 052|EEG 053|EEG 054|EEG 055|EEG 056|EEG 057|EEG 058|EEG 059|EEG 060|EEG 061|EEG 062|EEG 063|EEG 064 +EEG 65-96:EEG 065|EEG 066|EEG 067|EEG 068|EEG 069|EEG 070|EEG 071|EEG 072|EEG 073|EEG 074|EEG 075|EEG 076|EEG 077|EEG 078|EEG 079|EEG 080|EEG 081|EEG 082|EEG 083|EEG 084|EEG 085|EEG 086|EEG 087|EEG 088|EEG 089|EEG 090|EEG 091|EEG 092|EEG 093|EEG 094|EEG 095|EEG 096 +EEG 97-128:EEG 097|EEG 098|EEG 099|EEG 100|EEG 101|EEG 102|EEG 103|EEG 104|EEG 105|EEG 106|EEG 107|EEG 108|EEG 109|EEG 110|EEG 111|EEG 112|EEG 113|EEG 114|EEG 115|EEG 116|EEG 117|EEG 118|EEG 119|EEG 120|EEG 121|EEG 122|EEG 123|EEG 124|EEG 125|EEG 126|EEG 127|EEG 128 \ No newline at end of file diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py new file mode 100644 index 0000000..700dab0 --- /dev/null +++ b/mne/datasets/__init__.py @@ -0,0 +1,12 @@ +"""Functions for fetching remote datasets. + +See :ref:`datasets` for more information. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/datasets/__init__.pyi b/mne/datasets/__init__.pyi new file mode 100644 index 0000000..44cee84 --- /dev/null +++ b/mne/datasets/__init__.pyi @@ -0,0 +1,76 @@ +__all__ = [ + "_download_all_example_data", + "_fake", + "brainstorm", + "eegbci", + "epilepsy_ecog", + "erp_core", + "eyelink", + "fetch_aparc_sub_parcellation", + "fetch_dataset", + "fetch_fsaverage", + "fetch_hcp_mmp_parcellation", + "fetch_infant_template", + "fetch_phantom", + "fieldtrip_cmc", + "fnirs_motor", + "has_dataset", + "hf_sef", + "kiloword", + "limo", + "misc", + "mtrf", + "multimodal", + "opm", + "phantom_4dbti", + "phantom_kernel", + "phantom_kit", + "refmeg_noise", + "sample", + "sleep_physionet", + "somato", + "spm_face", + "ssvep", + "testing", + "ucl_opm_auditory", + "visual_92_categories", +] +from . import ( + _fake, + brainstorm, + eegbci, + epilepsy_ecog, + erp_core, + eyelink, + fieldtrip_cmc, + fnirs_motor, + hf_sef, + kiloword, + limo, + misc, + mtrf, + multimodal, + opm, + phantom_4dbti, + phantom_kernel, + phantom_kit, + refmeg_noise, + sample, + sleep_physionet, + somato, + spm_face, + ssvep, + testing, + ucl_opm_auditory, + visual_92_categories, +) +from ._fetch import fetch_dataset +from ._fsaverage.base import fetch_fsaverage +from ._infant import fetch_infant_template +from ._phantom.base import fetch_phantom +from .utils import ( + _download_all_example_data, + fetch_aparc_sub_parcellation, + fetch_hcp_mmp_parcellation, + has_dataset, +) diff --git a/mne/datasets/_fake/__init__.py b/mne/datasets/_fake/__init__.py new file mode 100644 index 0000000..fb11651 --- /dev/null +++ b/mne/datasets/_fake/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Fake dataset for testing.""" + +from ._fake import data_path, get_version diff --git a/mne/datasets/_fake/_fake.py b/mne/datasets/_fake/_fake.py new file mode 100644 index 0000000..e983153 --- /dev/null +++ b/mne/datasets/_fake/_fake.py @@ -0,0 +1,30 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=False, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="fake", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format(name="fake", conf="MNE_DATASETS_FAKE_PATH") + + +def get_version(): # noqa: D103 + return _get_version("fake") + + +get_version.__doc__ = _version_doc.format(name="fake") diff --git a/mne/datasets/_fetch.py b/mne/datasets/_fetch.py new file mode 100644 index 0000000..1e38606 --- /dev/null +++ b/mne/datasets/_fetch.py @@ -0,0 +1,307 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 + +import os +import os.path as op +import sys +import time +from pathlib import Path +from shutil import rmtree + +from .. import __version__ as mne_version +from ..fixes import _compare_version +from ..utils import _safe_input, logger, warn +from .config import ( + MISC_VERSIONED, + RELEASES, + TESTING_VERSIONED, + _bst_license_text, +) +from .utils import ( + _dataset_version, + _do_path_update, + _downloader_params, + _get_path, + _log_time_size, +) + +_FAKE_VERSION = None # used for monkeypatching while testing versioning + + +def fetch_dataset( + dataset_params, + processor=None, + path=None, + force_update=False, + update_path=True, + download=True, + check_version=False, + return_version=False, + accept=False, + auth=None, + token=None, +) -> Path | tuple[Path, str]: + """Fetch an MNE-compatible dataset using pooch. + + Parameters + ---------- + dataset_params : list of dict | dict + The dataset name(s) and corresponding parameters to download the + dataset(s). The dataset parameters that contains the following keys: + ``archive_name``, ``url``, ``folder_name``, ``hash``, + ``config_key`` (optional). See Notes. + processor : None | "unzip" | "untar" | instance of pooch.Unzip | instance of pooch.Untar + What to do after downloading the file. ``"unzip"`` and ``"untar"`` will + decompress the downloaded file in place; for custom extraction (e.g., + only extracting certain files from the archive) pass an instance of + ``pooch.Unzip`` or ``pooch.Untar``. If ``None`` (the + default), the files are left as-is. + path : None | str + Directory in which to put the dataset. If ``None``, the dataset + location is determined by first checking whether + ``dataset_params['config_key']`` is defined, and if so, whether that + config key exists in the MNE-Python config file. If so, the configured + path is used; if not, the location is set to the value of the + ``MNE_DATA`` config key (if it exists), or ``~/mne_data`` otherwise. + force_update : bool + Force update of the dataset even if a local copy exists. + Default is False. + update_path : bool | None + If True (default), set the mne-python config to the given + path. If None, the user is prompted. + download : bool + If False and the dataset has not been downloaded yet, it will not be + downloaded and the path will be returned as ``''`` (empty string). This + is mostly used for testing purposes and can be safely ignored by most + users. + check_version : bool + Whether to check the version of the dataset or not. Each version + of the dataset is stored in the root with a ``version.txt`` file. + return_version : bool + Whether or not to return the version of the dataset or not. + Defaults to False. + accept : bool + Some MNE-supplied datasets require acceptance of an additional license. + Default is ``False``. + auth : tuple | None + Optional authentication tuple containing the username and + password/token, passed to ``pooch.HTTPDownloader`` (e.g., + ``auth=('foo', 012345)``). + token : str | None + Optional authentication token passed to ``pooch.HTTPDownloader``. + + Returns + ------- + data_path : instance of Path + The path to the fetched dataset. + version : str + Only returned if ``return_version`` is True. + + See Also + -------- + mne.get_config + mne.set_config + mne.datasets.has_dataset + + Notes + ----- + The ``dataset_params`` argument must contain the following keys: + + - ``archive_name``: The name of the (possibly compressed) file to download + - ``url``: URL from which the file can be downloaded + - ``folder_name``: the subfolder within the ``MNE_DATA`` folder in which to + save and uncompress (if needed) the file(s) + - ``hash``: the cryptographic hash type of the file followed by a colon and + then the hash value (examples: "sha256:19uheid...", "md5:upodh2io...") + - ``config_key`` (optional): key passed to :func:`mne.set_config` to store + the on-disk location of the downloaded dataset (e.g., + ``"MNE_DATASETS_EEGBCI_PATH"``). This will only work for the provided + datasets listed :ref:`here `; do not use for user-defined + datasets. + + An example would look like:: + + {'dataset_name': 'sample', + 'archive_name': 'MNE-sample-data-processed.tar.gz', + 'hash': 'md5:12b75d1cb7df9dfb4ad73ed82f61094f', + 'url': 'https://osf.io/86qa2/download?version=5', + 'folder_name': 'MNE-sample-data', + 'config_key': 'MNE_DATASETS_SAMPLE_PATH'} + + For datasets where a single (possibly compressed) file must be downloaded, + pass a single :class:`dict` as ``dataset_params``. For datasets where + multiple files must be downloaded and (optionally) uncompressed separately, + pass a list of dicts. + """ # noqa E501 + import pooch + + t0 = time.time() + + if auth is not None: + if len(auth) != 2: + raise RuntimeError( + "auth should be a 2-tuple consisting " + "of a username and password/token." + ) + + # processor to uncompress files + if processor == "untar": + processor = pooch.Untar(extract_dir=path) + elif processor == "unzip": + processor = pooch.Unzip(extract_dir=path) + + if isinstance(dataset_params, dict): + dataset_params = [dataset_params] + + # extract configuration parameters + names = [params["dataset_name"] for params in dataset_params] + name = names[0] + dataset_dict = dataset_params[0] + config_key = dataset_dict.get("config_key", None) + folder_name = dataset_dict["folder_name"] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=name) + + # get the actual path to each dataset folder name + final_path = op.join(path, folder_name) + + # handle BrainStorm datasets with nested folders for datasets + if name.startswith("bst_"): + final_path = op.join(final_path, name) + + final_path = Path(final_path) + + # additional condition: check for version.txt and parse it + # check if testing or misc data is outdated; if so, redownload it + want_version = RELEASES.get(name, None) + want_version = _FAKE_VERSION if name == "fake" else want_version + + # get the version of the dataset and then check if the version is outdated + data_version = _dataset_version(final_path, name) + outdated = want_version is not None and _compare_version( + want_version, ">", data_version + ) + + if outdated: + logger.info( + f"Dataset {name} version {data_version} out of date, " + f"latest version is {want_version}" + ) + empty = Path("") + + # return empty string if outdated dataset and we don't want to download + if (not force_update) and outdated and not download: + logger.info( + "Dataset out of date but force_update=False and download=False, " + "returning empty data_path" + ) + return (empty, data_version) if return_version else empty + + # reasons to bail early (hf_sef has separate code for this): + if (not force_update) and (not outdated) and (not name.startswith("hf_sef_")): + # ...if target folder exists (otherwise pooch downloads every + # time because we don't save the archive files after unpacking, so + # pooch can't check its checksum) + if op.isdir(final_path): + if config_key is not None: + _do_path_update(path, update_path, config_key, name) + return (final_path, data_version) if return_version else final_path + # ...if download=False (useful for debugging) + elif not download: + return (empty, data_version) if return_version else empty + # ...if user didn't accept the license + elif name.startswith("bst_"): + if accept or "--accept-brainstorm-license" in sys.argv: + answer = "y" + else: + # If they don't have stdin, just accept the license + # https://github.com/mne-tools/mne-python/issues/8513#issuecomment-726823724 # noqa: E501 + answer = _safe_input(f"{_bst_license_text}Agree (y/[n])? ", use="y") + if answer.lower() != "y": + raise RuntimeError("You must agree to the license to use this dataset") + # downloader & processors + download_params = _downloader_params(auth=auth, token=token) + if name == "fake": + download_params["progressbar"] = False + downloader = pooch.HTTPDownloader(**download_params) + + # make mappings from archive names to urls and to checksums + urls = dict() + registry = dict() + for idx, this_name in enumerate(names): + this_dataset = dataset_params[idx] + archive_name = this_dataset["archive_name"] + dataset_url = this_dataset["url"] + dataset_hash = this_dataset["hash"] + urls[archive_name] = dataset_url + registry[archive_name] = dataset_hash + + # create the download manager + use_path = final_path if processor is None else Path(path) + fetcher = pooch.create( + path=str(use_path), + base_url="", # Full URLs are given in the `urls` dict. + version=None, # Data versioning is decoupled from MNE-Python version. + urls=urls, + registry=registry, + retry_if_failed=2, # 2 retries = 3 total attempts + ) + + # use our logger level for pooch's logger too + pooch.get_logger().setLevel(logger.getEffectiveLevel()) + sz = 0 + + for idx in range(len(names)): + # fetch and unpack the data + archive_name = dataset_params[idx]["archive_name"] + try: + fetcher.fetch( + fname=archive_name, downloader=downloader, processor=processor + ) + except ValueError as err: + err = str(err) + if "hash of downloaded file" in str(err): + raise ValueError( + f"{err} Consider using force_update=True to force " + "the dataset to be downloaded again." + ) from None + else: + raise + fname = use_path / archive_name + sz += fname.stat().st_size + # after unpacking, remove the archive file + if processor is not None: + fname.unlink() + + # remove version number from "misc" and "testing" datasets folder names + if name == "misc": + rmtree(final_path, ignore_errors=True) + os.replace(op.join(path, MISC_VERSIONED), final_path) + elif name == "testing": + rmtree(final_path, ignore_errors=True) + os.replace(op.join(path, TESTING_VERSIONED), final_path) + + # maybe update the config + if config_key is not None: + old_name = "brainstorm" if name.startswith("bst_") else name + _do_path_update(path, update_path, config_key, old_name) + + # compare the version of the dataset and mne + data_version = _dataset_version(path, name) + # 0.7 < 0.7.git should be False, therefore strip + if check_version and ( + _compare_version(data_version, "<", mne_version.strip(".git")) + ): + # OK to `nosec` because it's false positive (misidentified as SQL) + warn( + f"The {name} dataset (version {data_version}) is older than " + f"mne-python (version {mne_version}). If the examples fail, " + f"you may need to update the {name} dataset by using " + f"mne.datasets.{name}.data_path(force_update=True)" # nosec B608 + ) + _log_time_size(t0, sz) + return (final_path, data_version) if return_version else final_path diff --git a/mne/datasets/_fsaverage/__init__.py b/mne/datasets/_fsaverage/__init__.py new file mode 100644 index 0000000..04c673b --- /dev/null +++ b/mne/datasets/_fsaverage/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/datasets/_fsaverage/base.py b/mne/datasets/_fsaverage/base.py new file mode 100644 index 0000000..b15a8c7 --- /dev/null +++ b/mne/datasets/_fsaverage/base.py @@ -0,0 +1,103 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from ...utils import get_subjects_dir, set_config, verbose +from ..utils import _get_path, _manifest_check_download + +FSAVERAGE_MANIFEST_PATH = Path(__file__).parent + + +@verbose +def fetch_fsaverage(subjects_dir=None, *, verbose=None): + """Fetch and update fsaverage. + + Parameters + ---------- + subjects_dir : str | None + The path to use as the subjects directory in the MNE-Python + config file. None will use the existing config variable (i.e., + will not change anything), and if it does not exist, will use + ``~/mne_data/MNE-fsaverage-data``. + %(verbose)s + + Returns + ------- + fs_dir : Path + The fsaverage directory. + (essentially ``subjects_dir / 'fsaverage'``). + + .. versionchanged:: 1.8 + A :class:`pathlib.Path` object is returned instead of a string. + + Notes + ----- + This function is designed to provide + + 1. All modern (Freesurfer 6) fsaverage subject files + 2. All MNE fsaverage parcellations + 3. fsaverage head surface, fiducials, head<->MRI trans, 1- and 3-layer + BEMs (and surfaces) + + This function will compare the contents of ``subjects_dir/fsaverage`` + to the ones provided in the remote zip file. If any are missing, + the zip file is downloaded and files are updated. No files will + be overwritten. + + .. versionadded:: 0.18 + """ + # Code used to create the BEM (other files taken from MNE-sample-data): + # + # $ mne watershed_bem -s fsaverage -d $PWD --verbose info --copy + # $ python + # >>> bem = mne.make_bem_model('fsaverage', subjects_dir='.', verbose=True) + # >>> mne.write_bem_surfaces( + # ... 'fsaverage/bem/fsaverage-5120-5120-5120-bem.fif', bem) + # >>> sol = mne.make_bem_solution(bem, verbose=True) + # >>> mne.write_bem_solution( + # ... 'fsaverage/bem/fsaverage-5120-5120-5120-bem-sol.fif', sol) + # >>> import os + # >>> import os.path as op + # >>> names = sorted(op.join(r, f) + # ... for r, d, files in os.walk('fsaverage') + # ... for f in files) + # with open('fsaverage.txt', 'w') as fid: + # fid.write('\n'.join(names)) + # + subjects_dir = _set_montage_coreg_path(subjects_dir) + subjects_dir = subjects_dir.expanduser().absolute() + fs_dir = subjects_dir / "fsaverage" + fs_dir.mkdir(parents=True, exist_ok=True) + _manifest_check_download( + manifest_path=FSAVERAGE_MANIFEST_PATH / "root.txt", + destination=subjects_dir, + url="https://osf.io/3bxqt/download?version=2", + hash_="5133fe92b7b8f03ae19219d5f46e4177", + ) + _manifest_check_download( + manifest_path=FSAVERAGE_MANIFEST_PATH / "bem.txt", + destination=subjects_dir / "fsaverage", + url="https://osf.io/7ve8g/download?version=4", + hash_="b31509cdcf7908af6a83dc5ee8f49fb1", + ) + return fs_dir + + +def _get_create_subjects_dir(subjects_dir): + subjects_dir = get_subjects_dir(subjects_dir, raise_error=False) + if subjects_dir is None: + subjects_dir = _get_path(None, "MNE_DATA", "montage coregistration") + subjects_dir = subjects_dir / "MNE-fsaverage-data" + subjects_dir.mkdir(parents=True, exist_ok=True) + return subjects_dir + + +def _set_montage_coreg_path(subjects_dir=None): + """Set a subject directory suitable for montage(-only) coregistration.""" + subjects_dir = _get_create_subjects_dir(subjects_dir) + old_subjects_dir = get_subjects_dir(None, raise_error=False) + if old_subjects_dir is None: + set_config("SUBJECTS_DIR", subjects_dir) + return subjects_dir diff --git a/mne/datasets/_fsaverage/bem.txt b/mne/datasets/_fsaverage/bem.txt new file mode 100644 index 0000000..0f4ade5 --- /dev/null +++ b/mne/datasets/_fsaverage/bem.txt @@ -0,0 +1,12 @@ +bem/fsaverage-fiducials.fif +bem/fsaverage-5120-5120-5120-bem.fif +bem/fsaverage-head.fif +bem/outer_skin.surf +bem/brain.surf +bem/fsaverage-trans.fif +bem/fsaverage-ico-5-src.fif +bem/fsaverage-vol-5-src.fif +bem/outer_skull.surf +bem/inner_skull.surf +bem/fsaverage-5120-5120-5120-bem-sol.fif +bem/fsaverage-inner_skull-bem.fif diff --git a/mne/datasets/_fsaverage/root.txt b/mne/datasets/_fsaverage/root.txt new file mode 100644 index 0000000..a6d3281 --- /dev/null +++ b/mne/datasets/_fsaverage/root.txt @@ -0,0 +1,179 @@ +fsaverage/bem/fsaverage-head-dense.fif +fsaverage/bem/fsaverage-head-medium.fif +fsaverage/bem/fsaverage-head.fif +fsaverage/bem/fsaverage-ico-5-src.fif +fsaverage/label/lh.BA1.label +fsaverage/label/lh.BA2.label +fsaverage/label/lh.BA3a.label +fsaverage/label/lh.BA3b.label +fsaverage/label/lh.BA44.label +fsaverage/label/lh.BA45.label +fsaverage/label/lh.BA4a.label +fsaverage/label/lh.BA4p.label +fsaverage/label/lh.BA6.label +fsaverage/label/lh.HCPMMP1.annot +fsaverage/label/lh.HCPMMP1_combined.annot +fsaverage/label/lh.MT.label +fsaverage/label/lh.Medial_wall.label +fsaverage/label/lh.PALS_B12.labels.gii +fsaverage/label/lh.PALS_B12_Brodmann.annot +fsaverage/label/lh.PALS_B12_Lobes.annot +fsaverage/label/lh.PALS_B12_OrbitoFrontal.annot +fsaverage/label/lh.PALS_B12_Visuotopic.annot +fsaverage/label/lh.V1.label +fsaverage/label/lh.V2.label +fsaverage/label/lh.Yeo2011_17Networks_N1000.annot +fsaverage/label/lh.Yeo2011_7Networks_N1000.annot +fsaverage/label/lh.aparc.a2005s.annot +fsaverage/label/lh.aparc.a2009s.annot +fsaverage/label/lh.aparc.annot +fsaverage/label/lh.aparc.label +fsaverage/label/lh.aparc_sub.annot +fsaverage/label/lh.cortex.label +fsaverage/label/lh.entorhinal.label +fsaverage/label/lh.oasis.chubs.annot +fsaverage/label/rh.BA1.label +fsaverage/label/rh.BA2.label +fsaverage/label/rh.BA3a.label +fsaverage/label/rh.BA3b.label +fsaverage/label/rh.BA44.label +fsaverage/label/rh.BA45.label +fsaverage/label/rh.BA4a.label +fsaverage/label/rh.BA4p.label +fsaverage/label/rh.BA6.label +fsaverage/label/rh.HCPMMP1.annot +fsaverage/label/rh.HCPMMP1_combined.annot +fsaverage/label/rh.MT.label +fsaverage/label/rh.Medial_wall.label +fsaverage/label/rh.PALS_B12.labels.gii +fsaverage/label/rh.PALS_B12_Brodmann.annot +fsaverage/label/rh.PALS_B12_Lobes.annot +fsaverage/label/rh.PALS_B12_OrbitoFrontal.annot +fsaverage/label/rh.PALS_B12_Visuotopic.annot +fsaverage/label/rh.V1.label +fsaverage/label/rh.V2.label +fsaverage/label/rh.Yeo2011_17Networks_N1000.annot +fsaverage/label/rh.Yeo2011_7Networks_N1000.annot +fsaverage/label/rh.aparc.a2005s.annot +fsaverage/label/rh.aparc.a2009s.annot +fsaverage/label/rh.aparc.annot +fsaverage/label/rh.aparc.label +fsaverage/label/rh.aparc_sub.annot +fsaverage/label/rh.cortex.label +fsaverage/label/rh.entorhinal.label +fsaverage/label/rh.oasis.chubs.annot +fsaverage/mri.2mm/README +fsaverage/mri.2mm/T1.mgz +fsaverage/mri.2mm/aseg.mgz +fsaverage/mri.2mm/brain.mgz +fsaverage/mri.2mm/brainmask.mgz +fsaverage/mri.2mm/mni305.cor.mgz +fsaverage/mri.2mm/orig.mgz +fsaverage/mri.2mm/reg.2mm.dat +fsaverage/mri.2mm/reg.2mm.mni152.dat +fsaverage/mri.2mm/subcort.mask.mgz +fsaverage/mri.2mm/subcort.prob.mgz +fsaverage/mri/T1.mgz +fsaverage/mri/aparc+aseg.mgz +fsaverage/mri/aparc.a2005s+aseg.mgz +fsaverage/mri/aparc.a2009s+aseg.mgz +fsaverage/mri/aseg.mgz +fsaverage/mri/brain.mgz +fsaverage/mri/brainmask.mgz +fsaverage/mri/lh.ribbon.mgz +fsaverage/mri/mni305.cor.mgz +fsaverage/mri/orig.mgz +fsaverage/mri/p.aseg.mgz +fsaverage/mri/rh.ribbon.mgz +fsaverage/mri/ribbon.mgz +fsaverage/mri/seghead.mgz +fsaverage/mri/subcort.prob.log +fsaverage/mri/subcort.prob.mgz +fsaverage/mri/transforms/reg.mni152.2mm.dat +fsaverage/mri/transforms/talairach.xfm +fsaverage/scripts/build-stamp.txt +fsaverage/scripts/csurfdir +fsaverage/scripts/make_average_surface.log +fsaverage/scripts/make_average_volume.log +fsaverage/scripts/mkheadsurf.log +fsaverage/scripts/mris_inflate.log +fsaverage/scripts/mris_inflate_lh.log +fsaverage/scripts/mris_inflate_rh.log +fsaverage/scripts/recon-all-status.log +fsaverage/scripts/recon-all.cmd +fsaverage/scripts/recon-all.done +fsaverage/scripts/recon-all.env +fsaverage/scripts/recon-all.env.bak +fsaverage/scripts/recon-all.local-copy +fsaverage/scripts/recon-all.log +fsaverage/surf/lh.area +fsaverage/surf/lh.area.seghead +fsaverage/surf/lh.avg_curv +fsaverage/surf/lh.avg_sulc +fsaverage/surf/lh.avg_thickness +fsaverage/surf/lh.cortex.patch.3d +fsaverage/surf/lh.cortex.patch.flat +fsaverage/surf/lh.curv +fsaverage/surf/lh.curv.seghead +fsaverage/surf/lh.fsaverage_sym.sphere.reg +fsaverage/surf/lh.inflated +fsaverage/surf/lh.inflated.H +fsaverage/surf/lh.inflated.K +fsaverage/surf/lh.inflated_avg +fsaverage/surf/lh.inflated_pre +fsaverage/surf/lh.orig +fsaverage/surf/lh.orig.avg.area.mgh +fsaverage/surf/lh.orig_avg +fsaverage/surf/lh.pial +fsaverage/surf/lh.pial.avg.area.mgh +fsaverage/surf/lh.pial_avg +fsaverage/surf/lh.pial_semi_inflated +fsaverage/surf/lh.seghead +fsaverage/surf/lh.seghead.inflated +fsaverage/surf/lh.smoothwm +fsaverage/surf/lh.sphere +fsaverage/surf/lh.sphere.left_right +fsaverage/surf/lh.sphere.reg +fsaverage/surf/lh.sphere.reg.avg +fsaverage/surf/lh.sulc +fsaverage/surf/lh.sulc.seghead +fsaverage/surf/lh.thickness +fsaverage/surf/lh.white +fsaverage/surf/lh.white.avg.area.mgh +fsaverage/surf/lh.white_avg +fsaverage/surf/lh.white_avg.H +fsaverage/surf/lh.white_avg.K +fsaverage/surf/mris_preproc.surface.lh.log +fsaverage/surf/mris_preproc.surface.rh.log +fsaverage/surf/rh.area +fsaverage/surf/rh.avg_curv +fsaverage/surf/rh.avg_sulc +fsaverage/surf/rh.avg_thickness +fsaverage/surf/rh.cortex.patch.3d +fsaverage/surf/rh.cortex.patch.flat +fsaverage/surf/rh.curv +fsaverage/surf/rh.fsaverage_sym.sphere.reg +fsaverage/surf/rh.inflated +fsaverage/surf/rh.inflated.H +fsaverage/surf/rh.inflated.K +fsaverage/surf/rh.inflated_avg +fsaverage/surf/rh.inflated_pre +fsaverage/surf/rh.orig +fsaverage/surf/rh.orig.avg.area.mgh +fsaverage/surf/rh.orig_avg +fsaverage/surf/rh.pial +fsaverage/surf/rh.pial.avg.area.mgh +fsaverage/surf/rh.pial_avg +fsaverage/surf/rh.pial_semi_inflated +fsaverage/surf/rh.smoothwm +fsaverage/surf/rh.sphere +fsaverage/surf/rh.sphere.left_right +fsaverage/surf/rh.sphere.reg +fsaverage/surf/rh.sphere.reg.avg +fsaverage/surf/rh.sulc +fsaverage/surf/rh.thickness +fsaverage/surf/rh.white +fsaverage/surf/rh.white.avg.area.mgh +fsaverage/surf/rh.white_avg +fsaverage/surf/rh.white_avg.H +fsaverage/surf/rh.white_avg.K diff --git a/mne/datasets/_infant/ANTS1-0Months3T.txt b/mne/datasets/_infant/ANTS1-0Months3T.txt new file mode 100644 index 0000000..fc77ace --- /dev/null +++ b/mne/datasets/_infant/ANTS1-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS1-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS1-0Months3T-5120-5120-5120-bem.fif +bem/ANTS1-0Months3T-fiducials.fif +bem/ANTS1-0Months3T-head.fif +bem/ANTS1-0Months3T-oct-6-src.fif +bem/ANTS1-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS10-5Months3T.txt b/mne/datasets/_infant/ANTS10-5Months3T.txt new file mode 100644 index 0000000..cec0a3e --- /dev/null +++ b/mne/datasets/_infant/ANTS10-5Months3T.txt @@ -0,0 +1,115 @@ +bem/ANTS10-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS10-5Months3T-5120-5120-5120-bem.fif +bem/ANTS10-5Months3T-fiducials.fif +bem/ANTS10-5Months3T-head.fif +bem/ANTS10-5Months3T-oct-6-src.fif +bem/ANTS10-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS12-0Months3T.txt b/mne/datasets/_infant/ANTS12-0Months3T.txt new file mode 100644 index 0000000..d1fdbbc --- /dev/null +++ b/mne/datasets/_infant/ANTS12-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS12-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS12-0Months3T-5120-5120-5120-bem.fif +bem/ANTS12-0Months3T-fiducials.fif +bem/ANTS12-0Months3T-head.fif +bem/ANTS12-0Months3T-oct-6-src.fif +bem/ANTS12-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS15-0Months3T.txt b/mne/datasets/_infant/ANTS15-0Months3T.txt new file mode 100644 index 0000000..50487c0 --- /dev/null +++ b/mne/datasets/_infant/ANTS15-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS15-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS15-0Months3T-5120-5120-5120-bem.fif +bem/ANTS15-0Months3T-fiducials.fif +bem/ANTS15-0Months3T-head.fif +bem/ANTS15-0Months3T-oct-6-src.fif +bem/ANTS15-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS18-0Months3T.txt b/mne/datasets/_infant/ANTS18-0Months3T.txt new file mode 100644 index 0000000..8f386c8 --- /dev/null +++ b/mne/datasets/_infant/ANTS18-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS18-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS18-0Months3T-5120-5120-5120-bem.fif +bem/ANTS18-0Months3T-fiducials.fif +bem/ANTS18-0Months3T-head.fif +bem/ANTS18-0Months3T-oct-6-src.fif +bem/ANTS18-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Months3T.txt b/mne/datasets/_infant/ANTS2-0Months3T.txt new file mode 100644 index 0000000..2a6b9c2 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Months3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Months3T-5120-5120-5120-bem.fif +bem/ANTS2-0Months3T-fiducials.fif +bem/ANTS2-0Months3T-head.fif +bem/ANTS2-0Months3T-oct-6-src.fif +bem/ANTS2-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Weeks3T.txt b/mne/datasets/_infant/ANTS2-0Weeks3T.txt new file mode 100644 index 0000000..e940f24 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Weeks3T.txt @@ -0,0 +1,117 @@ +bem/ANTS2-0Weeks3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Weeks3T-5120-5120-5120-bem.fif +bem/ANTS2-0Weeks3T-fiducials.fif +bem/ANTS2-0Weeks3T-head.fif +bem/ANTS2-0Weeks3T-oct-6-src.fif +bem/ANTS2-0Weeks3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS2-0Years3T.txt b/mne/datasets/_infant/ANTS2-0Years3T.txt new file mode 100644 index 0000000..7763969 --- /dev/null +++ b/mne/datasets/_infant/ANTS2-0Years3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS2-0Years3T-5120-5120-5120-bem-sol.fif +bem/ANTS2-0Years3T-5120-5120-5120-bem.fif +bem/ANTS2-0Years3T-fiducials.fif +bem/ANTS2-0Years3T-head.fif +bem/ANTS2-0Years3T-oct-6-src.fif +bem/ANTS2-0Years3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS3-0Months3T.txt b/mne/datasets/_infant/ANTS3-0Months3T.txt new file mode 100644 index 0000000..29a7148 --- /dev/null +++ b/mne/datasets/_infant/ANTS3-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS3-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS3-0Months3T-5120-5120-5120-bem.fif +bem/ANTS3-0Months3T-fiducials.fif +bem/ANTS3-0Months3T-head.fif +bem/ANTS3-0Months3T-oct-6-src.fif +bem/ANTS3-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS4-5Months3T.txt b/mne/datasets/_infant/ANTS4-5Months3T.txt new file mode 100644 index 0000000..b918849 --- /dev/null +++ b/mne/datasets/_infant/ANTS4-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS4-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS4-5Months3T-5120-5120-5120-bem.fif +bem/ANTS4-5Months3T-fiducials.fif +bem/ANTS4-5Months3T-head.fif +bem/ANTS4-5Months3T-oct-6-src.fif +bem/ANTS4-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS6-0Months3T.txt b/mne/datasets/_infant/ANTS6-0Months3T.txt new file mode 100644 index 0000000..3235de4 --- /dev/null +++ b/mne/datasets/_infant/ANTS6-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS6-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS6-0Months3T-5120-5120-5120-bem.fif +bem/ANTS6-0Months3T-fiducials.fif +bem/ANTS6-0Months3T-head.fif +bem/ANTS6-0Months3T-oct-6-src.fif +bem/ANTS6-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS7-5Months3T.txt b/mne/datasets/_infant/ANTS7-5Months3T.txt new file mode 100644 index 0000000..8b38563 --- /dev/null +++ b/mne/datasets/_infant/ANTS7-5Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS7-5Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS7-5Months3T-5120-5120-5120-bem.fif +bem/ANTS7-5Months3T-fiducials.fif +bem/ANTS7-5Months3T-head.fif +bem/ANTS7-5Months3T-oct-6-src.fif +bem/ANTS7-5Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/ANTS9-0Months3T.txt b/mne/datasets/_infant/ANTS9-0Months3T.txt new file mode 100644 index 0000000..8d37f25 --- /dev/null +++ b/mne/datasets/_infant/ANTS9-0Months3T.txt @@ -0,0 +1,121 @@ +atlases/brain_ANTS_IXI_atlas_head_space.nii.gz +atlases/brain_ANTS_LPBA40_atlas_head_space.nii.gz +atlases/brain_IXI_atlas_head_space.nii.gz +atlases/brain_LPBA40_atlas_head_space.nii.gz +bem/ANTS9-0Months3T-5120-5120-5120-bem-sol.fif +bem/ANTS9-0Months3T-5120-5120-5120-bem.fif +bem/ANTS9-0Months3T-fiducials.fif +bem/ANTS9-0Months3T-head.fif +bem/ANTS9-0Months3T-oct-6-src.fif +bem/ANTS9-0Months3T-vol-5-src.fif +bem/inner_skull.surf +bem/inner_skull_large.surf +bem/outer_skin.surf +bem/outer_skin_large.surf +bem/outer_skull.surf +bem/outer_skull_large.surf +flirt_trans_mat.txt +label/aparc.annot.ctab +label/lh.aparc.a2009s.annot +label/lh.aparc.annot +label/lh.cortex.label +label/rh.aparc.a2009s.annot +label/rh.aparc.annot +label/rh.cortex.label +montages/10-10-montage.fif +montages/10-10_electrodes.tsv +montages/10-20-montage.fif +montages/10-20_electrodes.tsv +montages/10-5-montage.fif +montages/10-5_electrodes.tsv +montages/HGSN128-montage.fif +montages/HGSN128_electrodes.tsv +montages/HGSN129-montage.fif +montages/HGSN129_electrodes.tsv +mprage.nii.gz +mri/T1.mgz +mri/aparc+aseg.mgz +mri/aseg.count.txt +mri/aseg.mgz +mri/aseg.nii.gz +mri/aseg.presurf.mgz +mri/brain.finalsurfs.mgz +mri/brain.mgz +mri/brain.nii.gz +mri/brainmask.mgz +mri/brainmask.nii.gz +mri/filled.mgz +mri/lh.dpial.ribbon.mgz +mri/lh.dwhite.ribbon.mgz +mri/lh.ribbon.mgz +mri/norm.mgz +mri/norm.nii.gz +mri/rh.dpial.ribbon.mgz +mri/rh.dwhite.ribbon.mgz +mri/rh.ribbon.mgz +mri/ribbon.mgz +mri/transforms/niftyreg_affine.lta +mri/transforms/niftyreg_affine.nii.gz +mri/transforms/niftyreg_affine.txt +mri/transforms/niftyreg_affine.xfm +mri/transforms/talairach.auto.xfm +mri/transforms/talairach.xfm +mri/wm.mgz +surf/10-10-montage.obj +surf/10-10-montage.surf +surf/10-20-montage.obj +surf/10-20-montage.surf +surf/10-5-montage.obj +surf/10-5-montage.surf +surf/HGSN128-montage.obj +surf/HGSN128-montage.surf +surf/fiducials.obj +surf/fiducials.surf +surf/lh.area +surf/lh.curv +surf/lh.defects +surf/lh.inflated +surf/lh.inflated.H +surf/lh.inflated.K +surf/lh.input +surf/lh.orig +surf/lh.orig.euler.txt +surf/lh.orig_corrected +surf/lh.pial +surf/lh.qsphere +surf/lh.qsphere.nofix +surf/lh.smoothwm +surf/lh.smoothwm1 +surf/lh.smoothwm2 +surf/lh.smoothwm3 +surf/lh.smoothwm4 +surf/lh.smoothwm5 +surf/lh.sphere +surf/lh.sphere.reg +surf/lh.sulc +surf/lh.thickness +surf/lh.white +surf/rh.area +surf/rh.curv +surf/rh.defects +surf/rh.inflated +surf/rh.inflated.H +surf/rh.inflated.K +surf/rh.input +surf/rh.orig +surf/rh.orig.euler.txt +surf/rh.orig_corrected +surf/rh.pial +surf/rh.qsphere +surf/rh.qsphere.nofix +surf/rh.smoothwm +surf/rh.smoothwm1 +surf/rh.smoothwm2 +surf/rh.smoothwm3 +surf/rh.smoothwm4 +surf/rh.smoothwm5 +surf/rh.sphere +surf/rh.sphere.reg +surf/rh.sulc +surf/rh.thickness +surf/rh.white \ No newline at end of file diff --git a/mne/datasets/_infant/__init__.py b/mne/datasets/_infant/__init__.py new file mode 100644 index 0000000..d787cc6 --- /dev/null +++ b/mne/datasets/_infant/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .base import fetch_infant_template diff --git a/mne/datasets/_infant/base.py b/mne/datasets/_infant/base.py new file mode 100644 index 0000000..6c4d038 --- /dev/null +++ b/mne/datasets/_infant/base.py @@ -0,0 +1,94 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from ...utils import _check_option, _validate_type, get_subjects_dir, verbose +from ..utils import _manifest_check_download + +_AGES = "2wk 1mo 2mo 3mo 4.5mo 6mo 7.5mo 9mo 10.5mo 12mo 15mo 18mo 2yr" +# https://github.com/christian-oreilly/infant_template_paper/releases +_ORIGINAL_URL = "https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/{subject}.zip" # noqa: E501 +# Formatted the same way as md5sum *.zip on Ubuntu: +_ORIGINAL_HASHES = """ +851737d5f8f246883f2aef9819c6ec29 ANTS10-5Months3T.zip +32ab6d025f4311433a82e81374f1a045 ANTS1-0Months3T.zip +48ef349e7cc542fdf63ff36d7958ab57 ANTS12-0Months3T.zip +bba22c95aa97988c6e8892d6169ed317 ANTS15-0Months3T.zip +e1bfe5e3ef380592822ced446a4008c7 ANTS18-0Months3T.zip +fa7bee6c0985b9cd15ba53820cd72ccd ANTS2-0Months3T.zip +2ad90540cdf42837c09f8ce829458a35 ANTS2-0Weeks3T.zip +73e6a8b2579b7959a96f7d294ffb7393 ANTS2-0Years3T.zip +cb7b9752894e16a4938ddfe220f6286a ANTS3-0Months3T.zip +16b2a6804c7d5443cfba2ad6f7d4ac6a ANTS4-5Months3T.zip +dbdf2a9976121f2b106da96775690da3 ANTS6-0Months3T.zip +75fe37a1bc80ed6793a8abb47681d5ab ANTS7-5Months3T.zip +790f7dba0a264262e6c1c2dfdf216215 ANTS9-0Months3T.zip +""" +_MANIFEST_PATH = Path(__file__).parent + + +@verbose +def fetch_infant_template(age, subjects_dir=None, *, verbose=None): + """Fetch and update an infant MRI template. + + Parameters + ---------- + age : str + Age to download. Can be one of ``{'2wk', '1mo', '2mo', '3mo', '4.5mo', + '6mo', '7.5mo', '9mo', '10.5mo', '12mo', '15mo', '18mo', '2yr'}``. + subjects_dir : str | None + The path to download the template data to. + %(verbose)s + + Returns + ------- + subject : str + The standard subject name, e.g. ``ANTS4-5Month3T``. + + Notes + ----- + If you use these templates in your work, please cite + :footcite:`OReillyEtAl2021` and :footcite:`RichardsEtAl2016`. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + # Code used to create the lists: + # + # $ for name in 2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years; do wget https://github.com/christian-oreilly/infant_template_paper/releases/download/v0.1-alpha/ANTS${name}3T.zip; done # noqa: E501 + # $ md5sum ANTS*.zip + # $ python + # >>> import os.path as op + # >>> import zipfile + # >>> names = [f'ANTS{name}3T' for name in '2-0Weeks 1-0Months 2-0Months 3-0Months 4-5Months 6-0Months 7-5Months 9-0Months 10-5Months 12-0Months 15-0Months 18-0Months 2-0Years'.split()] # noqa: E501 + # >>> for name in names: + # ... with zipfile.ZipFile(f'{name}.zip', 'r') as zip: + # ... names = sorted(name for name in zip.namelist() if not zipfile.Path(zip, name).is_dir()) # noqa: E501 + # ... with open(f'{name}.txt', 'w') as fid: + # ... fid.write('\n'.join(names)) + _validate_type(age, str, "age") + _check_option("age", age, _AGES.split()) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + unit = dict(wk="Weeks", mo="Months", yr="Years")[age[-2:]] + first = age[:-2].split(".")[0] + dash = "-5" if ".5" in age else "-0" + subject = f"ANTS{first}{dash}{unit}3T" + # Actually get and create the files + subject_dir = subjects_dir / subject + subject_dir.mkdir(parents=True, exist_ok=True) + # .zip -> hash mapping + orig_hashes = dict( + line.strip().split()[::-1] for line in _ORIGINAL_HASHES.strip().splitlines() + ) + _manifest_check_download( + manifest_path=_MANIFEST_PATH / f"{subject}.txt", + destination=subject_dir, + url=_ORIGINAL_URL.format(subject=subject), + hash_=orig_hashes[f"{subject}.zip"], + ) + return subject diff --git a/mne/datasets/_phantom/__init__.py b/mne/datasets/_phantom/__init__.py new file mode 100644 index 0000000..04c673b --- /dev/null +++ b/mne/datasets/_phantom/__init__.py @@ -0,0 +1,3 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. diff --git a/mne/datasets/_phantom/base.py b/mne/datasets/_phantom/base.py new file mode 100644 index 0000000..52fa6bf --- /dev/null +++ b/mne/datasets/_phantom/base.py @@ -0,0 +1,63 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from ...utils import _check_option, _validate_type, get_subjects_dir, verbose +from ..utils import _manifest_check_download + +PHANTOM_MANIFEST_PATH = Path(__file__).parent + + +@verbose +def fetch_phantom(kind, subjects_dir=None, *, verbose=None): + """Fetch and update a phantom subject. + + Parameters + ---------- + kind : str + The kind of phantom to fetch. Can only be ``'otaniemi'`` (default). + %(subjects_dir)s + %(verbose)s + + Returns + ------- + subject_dir : pathlib.Path + The resulting phantom subject directory. + + See Also + -------- + mne.dipole.get_phantom_dipoles + + Notes + ----- + This function is designed to provide a head surface and T1.mgz for + the 32-dipole Otaniemi phantom. The VectorView/TRIUX phantom has the same + basic outside geometry, but different internal dipole positions. + + Unlike most FreeSurfer subjects, the Otaniemi phantom scan was aligned + to the "head" coordinate frame, so an identity head<->MRI :term:`trans` + is appropriate. + + .. versionadded:: 0.24 + """ + phantoms = dict( + otaniemi=dict( + url="https://osf.io/j5czy/download?version=1", + hash="42d17db5b1db3e30327ffb4cf2649de8", + ), + ) + _validate_type(kind, str, "kind") + _check_option("kind", kind, list(phantoms)) + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + subject = f"phantom_{kind}" + subject_dir = subjects_dir / subject + subject_dir.mkdir(parents=True, exist_ok=True) + _manifest_check_download( + manifest_path=PHANTOM_MANIFEST_PATH / f"{subject}.txt", + destination=subjects_dir, + url=phantoms[kind]["url"], + hash_=phantoms[kind]["hash"], + ) + return subject_dir diff --git a/mne/datasets/_phantom/phantom_otaniemi.txt b/mne/datasets/_phantom/phantom_otaniemi.txt new file mode 100644 index 0000000..84f8302 --- /dev/null +++ b/mne/datasets/_phantom/phantom_otaniemi.txt @@ -0,0 +1,3 @@ +phantom_otaniemi/bem/phantom_otaniemi-fiducials.fif +phantom_otaniemi/mri/T1.mgz +phantom_otaniemi/surf/lh.seghead diff --git a/mne/datasets/brainstorm/__init__.py b/mne/datasets/brainstorm/__init__.py new file mode 100644 index 0000000..90a0eaa --- /dev/null +++ b/mne/datasets/brainstorm/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Brainstorm datasets.""" + +from . import bst_raw, bst_resting, bst_auditory, bst_phantom_ctf, bst_phantom_elekta diff --git a/mne/datasets/brainstorm/bst_auditory.py b/mne/datasets/brainstorm/bst_auditory.py new file mode 100644 index 0000000..230fd67 --- /dev/null +++ b/mne/datasets/brainstorm/bst_auditory.py @@ -0,0 +1,69 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory + - One subject, two acquisition runs of 6 minutes each + - Subject stimulated binaurally with intra-aural earphones + (air tubes+transducers) + - Each run contains: + - 200 regular beeps (440Hz) + - 40 easy deviant beeps (554.4Hz, 4 semitones higher) + - Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly + distributed + - The subject presses a button when detecting a deviant with the right + index finger + - Auditory stimuli generated with the Matlab Psychophysics toolbox +""" + + +@verbose +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 + return _download_mne_dataset( + name="bst_auditory", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) + + +_data_path_doc = _data_path_doc_accept.format( + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_auditory) dataset" +) +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version("bst_auditory") + + +get_version.__doc__ = _version_doc.format(name="brainstorm") + + +def description(): + """Get description of brainstorm (bst_auditory) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/mne/datasets/brainstorm/bst_phantom_ctf.py b/mne/datasets/brainstorm/bst_phantom_ctf.py new file mode 100644 index 0000000..328a774 --- /dev/null +++ b/mne/datasets/brainstorm/bst_phantom_ctf.py @@ -0,0 +1,58 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf +""" + + +@verbose +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 + return _download_mne_dataset( + name="bst_phantom_ctf", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) + + +_data_path_doc = _data_path_doc_accept.format( + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_phantom_ctf) dataset" +) +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version("bst_phantom_ctf") + + +get_version.__doc__ = _version_doc.format(name="brainstorm") + + +def description(): + """Get description of brainstorm (bst_phantom_ctf) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/mne/datasets/brainstorm/bst_phantom_elekta.py b/mne/datasets/brainstorm/bst_phantom_elekta.py new file mode 100644 index 0000000..1a7e3ac --- /dev/null +++ b/mne/datasets/brainstorm/bst_phantom_elekta.py @@ -0,0 +1,58 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomElekta +""" + + +@verbose +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 + return _download_mne_dataset( + name="bst_phantom_elekta", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) + + +_data_path_doc = _data_path_doc_accept.format( + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_phantom_elekta) dataset" +) +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version("bst_phantom_elekta") + + +get_version.__doc__ = _version_doc.format(name="brainstorm") + + +def description(): + """Get description of brainstorm (bst_phantom_elekta) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/mne/datasets/brainstorm/bst_raw.py b/mne/datasets/brainstorm/bst_raw.py new file mode 100644 index 0000000..13a4aa3 --- /dev/null +++ b/mne/datasets/brainstorm/bst_raw.py @@ -0,0 +1,88 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from functools import partial + +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) + +has_brainstorm_data = partial(has_dataset, name="bst_raw") + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetMedianNerveCtf + - One subject, one acquisition run of 6 minutes + - Subject stimulated using Digitimer Constant Current Stimulator + (model DS7A) + - The run contains 200 electric stimulations randomly distributed between + left and right: + - 102 stimulations of the left hand + - 98 stimulations of the right hand + - Inter-stimulus interval: jittered between [1500, 2000]ms + - Stimuli generated using PsychToolBox on Windows PC (TTL pulse generated + with the parallel port connected to the Digitimer via the rear panel BNC) +""" + + +@verbose +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 + return _download_mne_dataset( + name="bst_raw", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) + + +_data_path_doc = _data_path_doc_accept.format( + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_raw) dataset" +) +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version("bst_raw") + + +get_version.__doc__ = _version_doc.format(name="brainstorm") + + +def description(): # noqa: D103 + """Get description of brainstorm (bst_raw) dataset.""" + for desc in _description.splitlines(): + print(desc) + + +def _skip_bstraw_data(): + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" + skip = skip_testing or not has_brainstorm_data() + return skip + + +def requires_bstraw_data(func): + """Skip testing data test.""" + import pytest + + return pytest.mark.skipif( + _skip_bstraw_data(), reason="Requires brainstorm dataset" + )(func) diff --git a/mne/datasets/brainstorm/bst_resting.py b/mne/datasets/brainstorm/bst_resting.py new file mode 100644 index 0000000..d740cf1 --- /dev/null +++ b/mne/datasets/brainstorm/bst_resting.py @@ -0,0 +1,61 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import ( + _data_path_doc_accept, + _download_mne_dataset, + _get_version, + _version_doc, +) + +_description = """ +URL: http://neuroimage.usc.edu/brainstorm/DatasetResting + - One subject + - Two runs of 10 min of resting state recordings + - Eyes open +""" + + +@verbose +def data_path( + path=None, + force_update=False, + update_path=True, + download=True, + accept=False, + *, + verbose=None, +): # noqa: D103 + return _download_mne_dataset( + name="bst_resting", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ) + + +_data_path_doc = _data_path_doc_accept.format( + name="brainstorm", conf="MNE_DATASETS_BRAINSTORM_DATA_PATH" +) +_data_path_doc = _data_path_doc.replace( + "brainstorm dataset", "brainstorm (bst_resting) dataset" +) +data_path.__doc__ = _data_path_doc + + +def get_version(): # noqa: D103 + return _get_version("bst_resting") + + +get_version.__doc__ = _version_doc.format(name="brainstorm") + + +def description(): + """Get description of brainstorm (bst_resting) dataset.""" + for desc in _description.splitlines(): + print(desc) diff --git a/mne/datasets/config.py b/mne/datasets/config.py new file mode 100644 index 0000000..ccd4bab --- /dev/null +++ b/mne/datasets/config.py @@ -0,0 +1,371 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +_bst_license_text = """ +License +------- +This tutorial dataset (EEG and MRI data) remains a property of the MEG Lab, +McConnell Brain Imaging Center, Montreal Neurological Institute, +McGill University, Canada. Its use and transfer outside the Brainstorm +tutorial, e.g. for research purposes, is prohibited without written consent +from the MEG Lab. + +If you reference this dataset in your publications, please: + + 1) acknowledge its authors: Elizabeth Bock, Esther Florin, Francois Tadel + and Sylvain Baillet, and + 2) cite Brainstorm as indicated on the website: + http://neuroimage.usc.edu/brainstorm + +For questions, please contact Francois Tadel (francois.tadel@mcgill.ca). +""" + +_hcp_mmp_license_text = """ +License +------- +I request access to data collected by the Washington University - University +of Minnesota Consortium of the Human Connectome Project (WU-Minn HCP), and +I agree to the following: + +1. I will not attempt to establish the identity of or attempt to contact any + of the included human subjects. + +2. I understand that under no circumstances will the code that would link + these data to Protected Health Information be given to me, nor will any + additional information about individual human subjects be released to me + under these Open Access Data Use Terms. + +3. I will comply with all relevant rules and regulations imposed by my + institution. This may mean that I need my research to be approved or + declared exempt by a committee that oversees research on human subjects, + e.g. my IRB or Ethics Committee. The released HCP data are not considered + de-identified, insofar as certain combinations of HCP Restricted Data + (available through a separate process) might allow identification of + individuals. Different committees operate under different national, state + and local laws and may interpret regulations differently, so it is + important to ask about this. If needed and upon request, the HCP will + provide a certificate stating that you have accepted the HCP Open Access + Data Use Terms. + +4. I may redistribute original WU-Minn HCP Open Access data and any derived + data as long as the data are redistributed under these same Data Use Terms. + +5. I will acknowledge the use of WU-Minn HCP data and data derived from + WU-Minn HCP data when publicly presenting any results or algorithms + that benefitted from their use. + + 1. Papers, book chapters, books, posters, oral presentations, and all + other printed and digital presentations of results derived from HCP + data should contain the following wording in the acknowledgments + section: "Data were provided [in part] by the Human Connectome + Project, WU-Minn Consortium (Principal Investigators: David Van Essen + and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and + Centers that support the NIH Blueprint for Neuroscience Research; and + by the McDonnell Center for Systems Neuroscience at Washington + University." + + 2. Authors of publications or presentations using WU-Minn HCP data + should cite relevant publications describing the methods used by the + HCP to acquire and process the data. The specific publications that + are appropriate to cite in any given study will depend on what HCP + data were used and for what purposes. An annotated and appropriately + up-to-date list of publications that may warrant consideration is + available at http://www.humanconnectome.org/about/acknowledgehcp.html + + 3. The WU-Minn HCP Consortium as a whole should not be included as an + author of publications or presentations if this authorship would be + based solely on the use of WU-Minn HCP data. + +6. Failure to abide by these guidelines will result in termination of my + privileges to access WU-Minn HCP data. +""" + +# To update the `testing` or `misc` datasets, push or merge commits to their +# respective repos, and make a new release of the dataset on GitHub. Then +# update the checksum in the MNE_DATASETS dict below, and change version +# here: ↓↓↓↓↓↓↓↓ +RELEASES = dict( + testing="0.156", + misc="0.27", + phantom_kit="0.2", + ucl_opm_auditory="0.2", +) +TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' +MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' + +# To update any other dataset besides `testing` or `misc`, upload the new +# version of the data archive itself (e.g., to https://osf.io or wherever) and +# then update the corresponding checksum in the MNE_DATASETS dict entry below. +MNE_DATASETS = dict() + +# MANDATORY KEYS: +# - archive_name : the name of the compressed file that is downloaded +# - hash : the checksum type followed by a colon and then the checksum value +# (examples: "sha256:19uheid...", "md5:upodh2io...") +# - url : URL from which the file can be downloaded +# - folder_name : the subfolder within the MNE data folder in which to save and +# uncompress (if needed) the file(s) +# +# OPTIONAL KEYS: +# - config_key : key to use with `mne.set_config` to store the on-disk location +# of the downloaded dataset (ex: "MNE_DATASETS_EEGBCI_PATH"). + +# Testing and misc are at the top as they're updated most often +MNE_DATASETS["testing"] = dict( + archive_name=f"{TESTING_VERSIONED}.tar.gz", + hash="md5:d94fe9f3abe949a507eaeb865fb84a3f", + url=( + "https://codeload.github.com/mne-tools/mne-testing-data/" + f'tar.gz/{RELEASES["testing"]}' + ), + # In case we ever have to resort to osf.io again... + # archive_name='mne-testing-data.tar.gz', + # hash='md5:c805a5fed8ca46f723e7eec828d90824', + # url='https://osf.io/dqfgy/download?version=1', # 0.136 + folder_name="MNE-testing-data", + config_key="MNE_DATASETS_TESTING_PATH", +) +MNE_DATASETS["misc"] = dict( + archive_name=f"{MISC_VERSIONED}.tar.gz", # 'mne-misc-data', + hash="md5:e343d3a00cb49f8a2f719d14f4758afe", + url=( + "https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/" + f'{RELEASES["misc"]}' + ), + folder_name="MNE-misc-data", + config_key="MNE_DATASETS_MISC_PATH", +) + +MNE_DATASETS["fnirs_motor"] = dict( + archive_name="MNE-fNIRS-motor-data.tgz", + hash="md5:c4935d19ddab35422a69f3326a01fef8", + url="https://osf.io/dj3eh/download?version=1", + folder_name="MNE-fNIRS-motor-data", + config_key="MNE_DATASETS_FNIRS_MOTOR_PATH", +) + +MNE_DATASETS["ucl_opm_auditory"] = dict( + archive_name="auditory_OPM_stationary.zip", + hash="md5:b2d69aa2d656b960bd0c18968dc1a14d", + url="https://osf.io/download/tp324/?version=1", # original is mwrt3 + folder_name="auditory_OPM_stationary", + config_key="MNE_DATASETS_UCL_OPM_AUDITORY_PATH", +) + +MNE_DATASETS["kiloword"] = dict( + archive_name="MNE-kiloword-data.tar.gz", + hash="md5:3a124170795abbd2e48aae8727e719a8", + url="https://osf.io/qkvf9/download?version=1", + folder_name="MNE-kiloword-data", + config_key="MNE_DATASETS_KILOWORD_PATH", +) + +MNE_DATASETS["multimodal"] = dict( + archive_name="MNE-multimodal-data.tar.gz", + hash="md5:26ec847ae9ab80f58f204d09e2c08367", + url="https://ndownloader.figshare.com/files/5999598", + folder_name="MNE-multimodal-data", + config_key="MNE_DATASETS_MULTIMODAL_PATH", +) + +MNE_DATASETS["opm"] = dict( + archive_name="MNE-OPM-data.tar.gz", + hash="md5:370ad1dcfd5c47e029e692c85358a374", + url="https://osf.io/p6ae7/download?version=2", + folder_name="MNE-OPM-data", + config_key="MNE_DATASETS_OPM_PATH", +) + +MNE_DATASETS["phantom_kit"] = dict( + archive_name="MNE-phantom-KIT-data.tar.gz", + hash="md5:7bfdf40bbeaf17a66c99c695640e0740", + url="https://osf.io/fb6ya/download?version=1", + folder_name="MNE-phantom-KIT-data", + config_key="MNE_DATASETS_PHANTOM_KIT_PATH", +) + +MNE_DATASETS["phantom_4dbti"] = dict( + archive_name="MNE-phantom-4DBTi.zip", + hash="md5:938a601440f3ffa780d20a17bae039ff", + url="https://osf.io/v2brw/download?version=2", + folder_name="MNE-phantom-4DBTi", + config_key="MNE_DATASETS_PHANTOM_4DBTI_PATH", +) + +MNE_DATASETS["phantom_kernel"] = dict( + archive_name="MNE-phantom-kernel.tar.gz", + hash="md5:4e2ad987dac1a20f95bae8ffeb2d41d6", + url="https://osf.io/dj7wz/download?version=1", + folder_name="MNE-phantom-kernel-data", + config_key="MNE_DATASETS_PHANTOM_KERNEL_PATH", +) + +MNE_DATASETS["sample"] = dict( + archive_name="MNE-sample-data-processed.tar.gz", + hash="md5:e8f30c4516abdc12a0c08e6bae57409c", + url="https://osf.io/86qa2/download?version=6", + folder_name="MNE-sample-data", + config_key="MNE_DATASETS_SAMPLE_PATH", +) + +MNE_DATASETS["somato"] = dict( + archive_name="MNE-somato-data.tar.gz", + hash="md5:32fd2f6c8c7eb0784a1de6435273c48b", + url="https://osf.io/tp4sg/download?version=7", + folder_name="MNE-somato-data", + config_key="MNE_DATASETS_SOMATO_PATH", +) + +MNE_DATASETS["spm"] = dict( + archive_name="MNE-spm-face.tar.gz", + hash="md5:9f43f67150e3b694b523a21eb929ea75", + url="https://osf.io/je4s8/download?version=2", + folder_name="MNE-spm-face", + config_key="MNE_DATASETS_SPM_FACE_PATH", +) + +# Visual 92 categories has the dataset split into 2 files. +# We define a dictionary holding the items with the same +# value across both files: folder name and configuration key. +MNE_DATASETS["visual_92_categories"] = dict( + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) +MNE_DATASETS["visual_92_categories_1"] = dict( + archive_name="MNE-visual_92_categories-data-part1.tar.gz", + hash="md5:74f50bbeb65740903eadc229c9fa759f", + url="https://osf.io/8ejrs/download?version=1", + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) +MNE_DATASETS["visual_92_categories_2"] = dict( + archive_name="MNE-visual_92_categories-data-part2.tar.gz", + hash="md5:203410a98afc9df9ae8ba9f933370e20", + url="https://osf.io/t4yjp/download?version=1", + folder_name="MNE-visual_92_categories-data", + config_key="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH", +) + +MNE_DATASETS["mtrf"] = dict( + archive_name="mTRF_1.5.zip", + hash="md5:273a390ebbc48da2c3184b01a82e4636", + url="https://osf.io/h85s2/download?version=1", + folder_name="mTRF_1.5", + config_key="MNE_DATASETS_MTRF_PATH", +) +MNE_DATASETS["refmeg_noise"] = dict( + archive_name="sample_reference_MEG_noise-raw.zip", + hash="md5:779fecd890d98b73a4832e717d7c7c45", + url="https://osf.io/drt6v/download?version=1", + folder_name="MNE-refmeg-noise-data", + config_key="MNE_DATASETS_REFMEG_NOISE_PATH", +) + +MNE_DATASETS["ssvep"] = dict( + archive_name="ssvep_example_data.zip", + hash="md5:af866bbc0f921114ac9d683494fe87d6", + url="https://osf.io/z8h6k/download?version=5", + folder_name="ssvep-example-data", + config_key="MNE_DATASETS_SSVEP_PATH", +) + +MNE_DATASETS["erp_core"] = dict( + archive_name="MNE-ERP-CORE-data.tar.gz", + hash="md5:5866c0d6213bd7ac97f254c776f6c4b1", + url="https://osf.io/rzgba/download?version=1", + folder_name="MNE-ERP-CORE-data", + config_key="MNE_DATASETS_ERP_CORE_PATH", +) + +MNE_DATASETS["epilepsy_ecog"] = dict( + archive_name="MNE-epilepsy-ecog-data.tar.gz", + hash="md5:ffb139174afa0f71ec98adbbb1729dea", + url="https://osf.io/z4epq/download?version=1", + folder_name="MNE-epilepsy-ecog-data", + config_key="MNE_DATASETS_EPILEPSY_ECOG_PATH", +) + +# Fieldtrip CMC dataset +MNE_DATASETS["fieldtrip_cmc"] = dict( + archive_name="SubjectCMC.zip", + hash="md5:6f9fd6520f9a66e20994423808d2528c", + url="https://osf.io/j9b6s/download?version=1", + folder_name="MNE-fieldtrip_cmc-data", + config_key="MNE_DATASETS_FIELDTRIP_CMC_PATH", +) + +# brainstorm datasets: +MNE_DATASETS["bst_auditory"] = dict( + archive_name="bst_auditory.tar.gz", + hash="md5:fa371a889a5688258896bfa29dd1700b", + url="https://osf.io/5t9n8/download?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_phantom_ctf"] = dict( + archive_name="bst_phantom_ctf.tar.gz", + hash="md5:80819cb7f5b92d1a5289db3fb6acb33c", + url="https://osf.io/sxr8y/download?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_phantom_elekta"] = dict( + archive_name="bst_phantom_elekta.tar.gz", + hash="md5:1badccbe17998d18cc373526e86a7aaf", + url="https://osf.io/dpcku/download?version=1", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_raw"] = dict( + archive_name="bst_raw.tar.gz", + hash="md5:fa2efaaec3f3d462b319bc24898f440c", + url="https://osf.io/9675n/download?version=2", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) +MNE_DATASETS["bst_resting"] = dict( + archive_name="bst_resting.tar.gz", + hash="md5:70fc7bf9c3b97c4f2eab6260ee4a0430", + url="https://osf.io/m7bd3/download?version=3", + folder_name="MNE-brainstorm-data", + config_key="MNE_DATASETS_BRAINSTORM_PATH", +) + +# HF-SEF +MNE_DATASETS["hf_sef_raw"] = dict( + archive_name="hf_sef_raw.tar.gz", + hash="md5:33934351e558542bafa9b262ac071168", + url="https://zenodo.org/record/889296/files/hf_sef_raw.tar.gz", + folder_name="hf_sef", + config_key="MNE_DATASETS_HF_SEF_PATH", +) +MNE_DATASETS["hf_sef_evoked"] = dict( + archive_name="hf_sef_evoked.tar.gz", + hash="md5:13d34cb5db584e00868677d8fb0aab2b", + # Zenodo can be slow, so we use the OSF mirror + # url=('https://zenodo.org/record/3523071/files/' + # 'hf_sef_evoked.tar.gz'), + url="https://osf.io/25f8d/download?version=2", + folder_name="hf_sef", + config_key="MNE_DATASETS_HF_SEF_PATH", +) + +# "fake" dataset (for testing) +MNE_DATASETS["fake"] = dict( + archive_name="foo.tgz", + hash="md5:3194e9f7b46039bb050a74f3e1ae9908", + url="https://github.com/mne-tools/mne-testing-data/raw/master/datasets/foo.tgz", + folder_name="foo", + config_key="MNE_DATASETS_FAKE_PATH", +) + +# eyelink dataset +MNE_DATASETS["eyelink"] = dict( + archive_name="MNE-eyelink-data.zip", + hash="md5:68a6323ef17d655f1a659c3290ee1c3f", + url=("https://osf.io/xsu4g/download?version=1"), + folder_name="MNE-eyelink-data", + config_key="MNE_DATASETS_EYELINK_PATH", +) diff --git a/mne/datasets/eegbci/__init__.py b/mne/datasets/eegbci/__init__.py new file mode 100644 index 0000000..ac2a717 --- /dev/null +++ b/mne/datasets/eegbci/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""EEG Motor Movement/Imagery Dataset.""" + +from .eegbci import data_path, load_data, standardize diff --git a/mne/datasets/eegbci/eegbci.py b/mne/datasets/eegbci/eegbci.py new file mode 100644 index 0000000..91d78f5 --- /dev/null +++ b/mne/datasets/eegbci/eegbci.py @@ -0,0 +1,268 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import re +import time +from importlib.resources import files +from os import path as op +from pathlib import Path + +from ...utils import _url_to_local_path, logger, verbose, warn +from ..utils import _do_path_update, _downloader_params, _get_path, _log_time_size + +EEGMI_URL = "https://physionet.org/files/eegmmidb/1.0.0/" + + +@verbose +def data_path(url, path=None, force_update=False, update_path=None, *, verbose=None): + """Get path to local copy of EEGMMI dataset URL. + + This is a low-level function useful for getting a local copy of a remote EEGBCI + dataset :footcite:`SchalkEtAl2004`, which is also available at PhysioNet + :footcite:`GoldbergerEtAl2000`. Metadata, such as the meaning of event markers + may be obtained from the + `PhysioNet documentation page `_. + + Parameters + ---------- + url : str + The dataset to use. + path : None | path-like + Location of where to look for the EEGBCI data. If ``None``, the environment + variable or config parameter ``MNE_DATASETS_EEGBCI_PATH`` is used. If neither + exists, the ``~/mne_data`` directory is used. If the EEGBCI dataset is not found + under the given path, the data will be automatically downloaded to the specified + folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If ``True``, set ``MNE_DATASETS_EEGBCI_PATH`` in the configuration to the given + path. If ``None``, the user is prompted. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list of + length one for compatibility. + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import eegbci + >>> url = "http://www.physionet.org/physiobank/database/eegmmidb/" + >>> eegbci.data_path(url, "~/datasets") # doctest:+SKIP + + This would download the given EEGBCI data file to the ``~/datasets`` folder and + prompt the user to store this path in the config (if it does not already exist). + + References + ---------- + .. footbibliography:: + """ + import pooch + + key = "MNE_DATASETS_EEGBCI_PATH" + name = "EEGBCI" + path = _get_path(path, key, name) + fname = "MNE-eegbci-data" + destination = _url_to_local_path(url, op.join(path, fname)) + destinations = [destination] + + # fetch the file + downloader = pooch.HTTPDownloader(**_downloader_params()) + if not op.isfile(destination) or force_update: + if op.isfile(destination): + os.remove(destination) + if not op.isdir(op.dirname(destination)): + os.makedirs(op.dirname(destination)) + pooch.retrieve( + url=url, + path=destination, + downloader=downloader, + fname=fname, + ) + + # offer to update the path + _do_path_update(path, update_path, key, name) + destinations = [Path(dest) for dest in destinations] + return destinations + + +@verbose +def load_data( + subjects=None, + runs=None, + *, + subject=None, + path=None, + force_update=False, + update_path=None, + base_url=EEGMI_URL, + verbose=None, +): # noqa: D301 + """Get paths to local copies of EEGBCI dataset files. + + This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is + also available at PhysioNet :footcite:`GoldbergerEtAl2000`. Metadata, such as the + meaning of event markers may be obtained from the + `PhysioNet documentation page `_. + + Parameters + ---------- + subjects : int | list of int + The subjects to use. Can be in the range of 1-109 (inclusive). + runs : int | list of int + The runs to use (see Notes for details). + subject : int + This parameter is deprecated and will be removed in mne version 1.9. + Please use ``subjects`` instead. + path : None | path-like + Location of where to look for the EEGBCI data. If ``None``, the environment + variable or config parameter ``MNE_DATASETS_EEGBCI_PATH`` is used. If neither + exists, the ``~/mne_data`` directory is used. If the EEGBCI dataset is not found + under the given path, the data will be automatically downloaded to the specified + folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If ``True``, set ``MNE_DATASETS_EEGBCI_PATH`` in the configuration to the given + path. If ``None``, the user is prompted. + base_url : str + The URL root for the data. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + Notes + ----- + The run numbers correspond to: + + ========= =================================== + run task + ========= =================================== + 1 Baseline, eyes open + 2 Baseline, eyes closed + 3, 7, 11 Motor execution: left vs right hand + 4, 8, 12 Motor imagery: left vs right hand + 5, 9, 13 Motor execution: hands vs feet + 6, 10, 14 Motor imagery: hands vs feet + ========= =================================== + + For example, one could do:: + + >>> from mne.datasets import eegbci + >>> eegbci.load_data([1, 2], [6, 10, 14], "~/datasets") # doctest:+SKIP + + This would download runs 6, 10, and 14 (hand/foot motor imagery) runs from subjects + 1 and 2 in the EEGBCI dataset to "~/datasets" and prompt the user to store this path + in the config (if it does not already exist). + + References + ---------- + .. footbibliography:: + """ + import pooch + + # XXX: Remove this with mne 1.9 ↓↓↓ + # Also remove the subject parameter at that point. + # Also remove the `None` default for subjects and runs params at that point. + if subject is not None: + subjects = subject + warn( + "The ``subject`` parameter is deprecated and will be removed in version " + "1.9. Use the ``subjects`` parameter (note the `s`) to suppress this " + "warning.", + FutureWarning, + ) + del subject + if subjects is None or runs is None: + raise ValueError("You must pass the parameters ``subjects`` and ``runs``.") + # ↑↑↑ + + t0 = time.time() + + if not hasattr(subjects, "__iter__"): + subjects = [subjects] + + if not hasattr(runs, "__iter__"): + runs = [runs] + + # get local storage path + config_key = "MNE_DATASETS_EEGBCI_PATH" + folder = "MNE-eegbci-data" + name = "EEGBCI" + path = _get_path(path, config_key, name) + + # extract path parts + pattern = r"(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?" + match = re.compile(pattern).match(base_url) + if match is None: + raise ValueError( + "base_url does not match the expected EEGMI folder " + "structure. Please notify MNE-Python developers." + ) + base_path = op.join(path, folder, *match.groups()) + + # create the download manager + fetcher = pooch.create( + path=base_path, + base_url=base_url, + version=None, # data versioning is decoupled from MNE-Python version + registry=None, # registry is loaded from file (below) + retry_if_failed=2, # 2 retries = 3 total attempts + ) + + # load the checksum registry + registry = files("mne").joinpath("data", "eegbci_checksums.txt") + fetcher.load_registry(registry) + + # fetch the file(s) + data_paths = [] + sz = 0 + for subject in subjects: + for run in runs: + file_part = f"S{subject:03d}/S{subject:03d}R{run:02d}.edf" + destination = Path(base_path, file_part) + data_paths.append(destination) + if destination.exists(): + if force_update: + destination.unlink() + else: + continue + if sz == 0: # log once + logger.info("Downloading EEGBCI data") + fetcher.fetch(file_part) + # update path in config if desired + sz += destination.stat().st_size + + _do_path_update(path, update_path, config_key, name) + if sz > 0: + _log_time_size(t0, sz) + return data_paths + + +def standardize(raw): + """Standardize channel positions and names. + + Parameters + ---------- + raw : instance of Raw + The raw data to standardize. Operates in-place. + """ + rename = dict() + for name in raw.ch_names: + std_name = name.strip(".") + std_name = std_name.upper() + if std_name.endswith("Z"): + std_name = std_name[:-1] + "z" + if std_name.startswith("FP"): + std_name = "Fp" + std_name[2:] + rename[name] = std_name + raw.rename_channels(rename) diff --git a/mne/datasets/epilepsy_ecog/__init__.py b/mne/datasets/epilepsy_ecog/__init__.py new file mode 100644 index 0000000..6ecb337 --- /dev/null +++ b/mne/datasets/epilepsy_ecog/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Clinical epilepsy datasets.""" + +from ._data import data_path, get_version diff --git a/mne/datasets/epilepsy_ecog/_data.py b/mne/datasets/epilepsy_ecog/_data.py new file mode 100644 index 0000000..20abb9f --- /dev/null +++ b/mne/datasets/epilepsy_ecog/_data.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="epilepsy_ecog", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="epilepsy_ecog", conf="MNE_DATASETS_EPILEPSY_ECOG_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("epilepsy_ecog") + + +get_version.__doc__ = _version_doc.format(name="epilepsy_ecog") diff --git a/mne/datasets/erp_core/__init__.py b/mne/datasets/erp_core/__init__.py new file mode 100644 index 0000000..b16989a --- /dev/null +++ b/mne/datasets/erp_core/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""ERP-CORE EEG dataset.""" + +from .erp_core import data_path, get_version diff --git a/mne/datasets/erp_core/erp_core.py b/mne/datasets/erp_core/erp_core.py new file mode 100644 index 0000000..2771b17 --- /dev/null +++ b/mne/datasets/erp_core/erp_core.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="erp_core", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="erp_core", conf="MNE_DATASETS_ERP_CORE_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("erp_core") + + +get_version.__doc__ = _version_doc.format(name="erp_core") diff --git a/mne/datasets/eyelink/__init__.py b/mne/datasets/eyelink/__init__.py new file mode 100644 index 0000000..db3ea54 --- /dev/null +++ b/mne/datasets/eyelink/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Eyelink test dataset.""" + +from .eyelink import data_path, get_version diff --git a/mne/datasets/eyelink/eyelink.py b/mne/datasets/eyelink/eyelink.py new file mode 100644 index 0000000..918ac86 --- /dev/null +++ b/mne/datasets/eyelink/eyelink.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="eyelink", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="eyelink", conf="MNE_DATASETS_EYELINK_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("eyelink") + + +get_version.__doc__ = _version_doc.format(name="eyelink") diff --git a/mne/datasets/fieldtrip_cmc/__init__.py b/mne/datasets/fieldtrip_cmc/__init__.py new file mode 100644 index 0000000..0ed7446 --- /dev/null +++ b/mne/datasets/fieldtrip_cmc/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""fieldtrip Cortico-Muscular Coherence (CMC) Dataset.""" + +from .fieldtrip_cmc import data_path, get_version diff --git a/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py b/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py new file mode 100644 index 0000000..b61b1d7 --- /dev/null +++ b/mne/datasets/fieldtrip_cmc/fieldtrip_cmc.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="fieldtrip_cmc", + processor="nested_unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="fieldtrip_cmc", conf="MNE_DATASETS_FIELDTRIP_CMC_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("fieldtrip_cmc") + + +get_version.__doc__ = _version_doc.format(name="fieldtrip_cmc") diff --git a/mne/datasets/fnirs_motor/__init__.py b/mne/datasets/fnirs_motor/__init__.py new file mode 100644 index 0000000..61808bb --- /dev/null +++ b/mne/datasets/fnirs_motor/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""fNIRS motor dataset.""" + +from .fnirs_motor import data_path, get_version diff --git a/mne/datasets/fnirs_motor/fnirs_motor.py b/mne/datasets/fnirs_motor/fnirs_motor.py new file mode 100644 index 0000000..43a4ffc --- /dev/null +++ b/mne/datasets/fnirs_motor/fnirs_motor.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="fnirs_motor", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="fnirs_motor", conf="MNE_DATASETS_FNIRS_MOTOR_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("fnirs_motor") + + +get_version.__doc__ = _version_doc.format(name="fnirs_motor") diff --git a/mne/datasets/hf_sef/__init__.py b/mne/datasets/hf_sef/__init__.py new file mode 100644 index 0000000..3834a12 --- /dev/null +++ b/mne/datasets/hf_sef/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""HF-SEF dataset.""" + +from .hf_sef import data_path diff --git a/mne/datasets/hf_sef/hf_sef.py b/mne/datasets/hf_sef/hf_sef.py new file mode 100644 index 0000000..aff1831 --- /dev/null +++ b/mne/datasets/hf_sef/hf_sef.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python2 + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +import os +import os.path as op + +from ...utils import _check_option, verbose +from ..config import MNE_DATASETS +from ..utils import _do_path_update, _download_mne_dataset, _get_path + + +@verbose +def data_path( + dataset="evoked", path=None, force_update=False, update_path=True, *, verbose=None +): + """Get path to local copy of the high frequency SEF dataset. + + Gets a local copy of the high frequency SEF MEG dataset + :footcite:`NurminenEtAl2017`. + + Parameters + ---------- + dataset : 'evoked' | 'raw' + Whether to get the main dataset (evoked, structural and the rest) or + the separate dataset containing raw MEG data only. + path : None | str + Where to look for the HF-SEF data storing location. + If None, the environment variable or config parameter + ``MNE_DATASETS_HF_SEF_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the HF-SEF dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_HF_SEF_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : str + Local path to the directory where the HF-SEF data is stored. + + References + ---------- + .. footbibliography:: + """ + _check_option("dataset", dataset, ("evoked", "raw")) + if dataset == "raw": + data_dict = MNE_DATASETS["hf_sef_raw"] + data_dict["dataset_name"] = "hf_sef_raw" + else: + data_dict = MNE_DATASETS["hf_sef_evoked"] + data_dict["dataset_name"] = "hf_sef_evoked" + config_key = data_dict["config_key"] + folder_name = data_dict["folder_name"] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=folder_name) + final_path = op.join(path, folder_name) + megdir = op.join(final_path, "MEG", "subject_a") + has_raw = ( + dataset == "raw" + and op.isdir(megdir) + and any("raw" in filename for filename in os.listdir(megdir)) + ) + has_evoked = dataset == "evoked" and op.isdir(op.join(final_path, "subjects")) + # data not there, or force_update requested: + if has_raw or has_evoked and not force_update: + _do_path_update(path, update_path, config_key, folder_name) + return final_path + + # instantiate processor that unzips file + data_path = _download_mne_dataset( + name=data_dict["dataset_name"], + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=True, + ) + return data_path diff --git a/mne/datasets/kiloword/__init__.py b/mne/datasets/kiloword/__init__.py new file mode 100644 index 0000000..17b54ad --- /dev/null +++ b/mne/datasets/kiloword/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE visual_92_categories dataset.""" + +from .kiloword import data_path, get_version diff --git a/mne/datasets/kiloword/kiloword.py b/mne/datasets/kiloword/kiloword.py new file mode 100644 index 0000000..37fd3f1 --- /dev/null +++ b/mne/datasets/kiloword/kiloword.py @@ -0,0 +1,64 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): + """Get path to local copy of the kiloword dataset. + + This is the dataset from :footcite:`DufauEtAl2015`. + + Parameters + ---------- + path : None | str + Location of where to look for the kiloword data storing + location. If None, the environment variable or config parameter + MNE_DATASETS_KILOWORD_PATH is used. If it doesn't exist, + the "mne-python/examples" directory is used. If the + kiloword dataset is not found under the given path (e.g., + as "mne-python/examples/MNE-kiloword-data"), the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_KILOWORD_PATH in mne-python + config to the given path. If None, the user is prompted. + download : bool + If False and the kiloword dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list + of length one, for compatibility. + + References + ---------- + .. footbibliography:: + """ + return _download_mne_dataset( + name="kiloword", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +def get_version(): + """Get dataset version.""" + return _get_version("kiloword") + + +get_version.__doc__ = _version_doc.format(name="kiloword") diff --git a/mne/datasets/limo/__init__.py b/mne/datasets/limo/__init__.py new file mode 100644 index 0000000..b5622d9 --- /dev/null +++ b/mne/datasets/limo/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""LIMO Dataset.""" + +from .limo import data_path, load_data diff --git a/mne/datasets/limo/limo.py b/mne/datasets/limo/limo.py new file mode 100644 index 0000000..f0696a7 --- /dev/null +++ b/mne/datasets/limo/limo.py @@ -0,0 +1,372 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +import time +from pathlib import Path + +import numpy as np +from scipy.io import loadmat + +from ..._fiff.meas_info import create_info +from ...channels import make_standard_montage +from ...epochs import EpochsArray +from ...utils import _check_pandas_installed, logger, verbose +from ..utils import _do_path_update, _downloader_params, _get_path, _log_time_size + +# root url for LIMO files +root_url = "https://files.de-1.osf.io/v1/resources/52rea/providers/osfstorage/" + + +@verbose +def data_path( + subject, path=None, force_update=False, update_path=None, *, verbose=None +): + """Get path to local copy of LIMO dataset URL. + + This is a low-level function useful for getting a local copy of the + remote LIMO dataset :footcite:`Rousselet2016`. The complete dataset is + available at datashare.is.ed.ac.uk/. + + Parameters + ---------- + subject : int + Subject to download. Must be of :class:`ìnt` in the range from 1 + to 18 (inclusive). + path : None | str + Location of where to look for the LIMO data storing directory. + If None, the environment variable or config parameter + ``MNE_DATASETS_LIMO_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the LIMO dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_LIMO_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : str + Local path to the given data file. + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import limo + >>> limo.data_path(subject=1, path=os.getenv('HOME') + '/datasets') # doctest:+SKIP + + This would download the LIMO data file to the 'datasets' folder, + and prompt the user to save the 'datasets' path to the mne-python config, + if it isn't there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + import pooch + + t0 = time.time() + + downloader = pooch.HTTPDownloader(**_downloader_params()) + + # local storage patch + config_key = "MNE_DATASETS_LIMO_PATH" + name = "LIMO" + subj = f"S{subject}" + path = _get_path(path, config_key, name) + base_path = op.join(path, "MNE-limo-data") + subject_path = op.join(base_path, subj) + # the remote URLs are in the form of UUIDs: + urls = dict( + S18={ + "Yr.mat": "5cf839833a4d9500178a6ff8", + "LIMO.mat": "5cf83907e650a2001ad592e4", + }, + S17={ + "Yr.mat": "5cf838e83a4d9500168aeb76", + "LIMO.mat": "5cf83867a542b80019c87602", + }, + S16={ + "Yr.mat": "5cf83857e650a20019d5778f", + "LIMO.mat": "5cf837dc3a4d9500188a64fe", + }, + S15={ + "Yr.mat": "5cf837cce650a2001ad591e8", + "LIMO.mat": "5cf83758a542b8001ac7d11d", + }, + S14={ + "Yr.mat": "5cf837493a4d9500198a938f", + "LIMO.mat": "5cf836e4a542b8001bc7cc53", + }, + S13={ + "Yr.mat": "5cf836d23a4d9500178a6df7", + "LIMO.mat": "5cf836543a4d9500168ae7cb", + }, + S12={ + "Yr.mat": "5cf83643d4c7d700193e5954", + "LIMO.mat": "5cf835193a4d9500178a6c92", + }, + S11={ + "Yr.mat": "5cf8356ea542b8001cc81517", + "LIMO.mat": "5cf834f7d4c7d700163daab8", + }, + S10={ + "Yr.mat": "5cf833b0e650a20019d57454", + "LIMO.mat": "5cf83204e650a20018d59eb2", + }, + S9={ + "Yr.mat": "5cf83201a542b8001cc811cf", + "LIMO.mat": "5cf8316c3a4d9500168ae13b", + }, + S8={ + "Yr.mat": "5cf8326ce650a20017d60373", + "LIMO.mat": "5cf8316d3a4d9500198a8dc5", + }, + S7={ + "Yr.mat": "5cf834a03a4d9500168ae59b", + "LIMO.mat": "5cf83069e650a20017d600d7", + }, + S6={ + "Yr.mat": "5cf830e6a542b80019c86a70", + "LIMO.mat": "5cf83057a542b80019c869ca", + }, + S5={ + "Yr.mat": "5cf8115be650a20018d58041", + "LIMO.mat": "5cf80c0bd4c7d700193e213c", + }, + S4={ + "Yr.mat": "5cf810c9a542b80019c8450a", + "LIMO.mat": "5cf80bf83a4d9500198a6eb4", + }, + S3={ + "Yr.mat": "5cf80c55d4c7d700163d8f52", + "LIMO.mat": "5cf80bdea542b80019c83cab", + }, + S2={ + "Yr.mat": "5cde827123fec40019e01300", + "LIMO.mat": "5cde82682a50c4001677c259", + }, + S1={ + "Yr.mat": "5d6d3071536cf5001a8b0c78", + "LIMO.mat": "5d6d305f6f41fc001a3151d8", + }, + ) + # these can't be in the registry file (mne/data/dataset_checksums.txt) + # because of filename duplication + hashes = dict( + S18={ + "Yr.mat": "md5:87f883d442737971a80fc0a35d057e51", + "LIMO.mat": "md5:8b4879646f65d7876fa4adf2e40162c5", + }, + S17={ + "Yr.mat": "md5:7b667ec9eefd7a9996f61ae270e295ee", + "LIMO.mat": "md5:22eaca4e6fad54431fd61b307fc426b8", + }, + S16={ + "Yr.mat": "md5:c877afdb4897426421577e863a45921a", + "LIMO.mat": "md5:86672d7afbea1e8c39305bc3f852c8c2", + }, + S15={ + "Yr.mat": "md5:eea9e0140af598fefc08c886a6f05de5", + "LIMO.mat": "md5:aed5cb71ddbfd27c6a3ac7d3e613d07f", + }, + S14={ + "Yr.mat": "md5:8bd842cfd8588bd5d32e72fdbe70b66e", + "LIMO.mat": "md5:1e07d1f36f2eefad435a77530daf2680", + }, + S13={ + "Yr.mat": "md5:d7925d2af7288b8a5186dfb5dbb63d34", + "LIMO.mat": "md5:ba891015d2f9e447955fffa9833404ca", + }, + S12={ + "Yr.mat": "md5:0e1d05beaa4bf2726e0d0671b78fe41e", + "LIMO.mat": "md5:423fd479d71097995b6614ecb11df9ad", + }, + S11={ + "Yr.mat": "md5:1b0016fb9832e43b71f79c1992fcbbb1", + "LIMO.mat": "md5:1a281348c2a41ee899f42731d30cda70", + }, + S10={ + "Yr.mat": "md5:13c66f60e241b9a9cc576eaf1b55a417", + "LIMO.mat": "md5:3c4b41e221eb352a21bbef1a7e006f06", + }, + S9={ + "Yr.mat": "md5:3ae1d9c3a1d9325deea2f2dddd1ab507", + "LIMO.mat": "md5:5e204e2a4bcfe4f535b4b1af469b37f7", + }, + S8={ + "Yr.mat": "md5:7e9adbca4e03d8d7ce8ea07ccecdc8fd", + "LIMO.mat": "md5:88313c21d34428863590e586b2bc3408", + }, + S7={ + "Yr.mat": "md5:6b5290a6725ecebf1022d5d2789b186d", + "LIMO.mat": "md5:8c769219ebc14ce3f595063e84bfc0a9", + }, + S6={ + "Yr.mat": "md5:420c858a8340bf7c28910b7b0425dc5d", + "LIMO.mat": "md5:9cf4e1a405366d6bd0cc6d996e32fd63", + }, + S5={ + "Yr.mat": "md5:946436cfb474c8debae56ffb1685ecf3", + "LIMO.mat": "md5:241fac95d3a79d2cea081391fb7078bd", + }, + S4={ + "Yr.mat": "md5:c8216af78ac87b739e86e57b345cafdd", + "LIMO.mat": "md5:8e10ef36c2e075edc2f787581ba33459", + }, + S3={ + "Yr.mat": "md5:ff02e885b65b7b807146f259a30b1b5e", + "LIMO.mat": "md5:59b5fb3a9749003133608b5871309e2c", + }, + S2={ + "Yr.mat": "md5:a4329022e57fd07ceceb7d1735fd2718", + "LIMO.mat": "md5:98b284b567f2dd395c936366e404f2c6", + }, + S1={ + "Yr.mat": "md5:076c0ae78fb71d43409c1877707df30e", + "LIMO.mat": "md5:136c8cf89f8f111a11f531bd9fa6ae69", + }, + ) + # create the download manager + fetcher = pooch.create( + path=subject_path, + base_url="", + version=None, # Data versioning is decoupled from MNE-Python version. + registry=hashes[subj], + urls={key: f"{root_url}{uuid}" for key, uuid in urls[subj].items()}, + retry_if_failed=2, # 2 retries = 3 total attempts + ) + # use our logger level for pooch's logger too + pooch.get_logger().setLevel(logger.getEffectiveLevel()) + # fetch the data + sz = 0 + for fname in ("LIMO.mat", "Yr.mat"): + destination = Path(subject_path, fname) + if destination.exists(): + if force_update: + destination.unlink() + else: + continue + if sz == 0: # log once + logger.info("Downloading LIMO data") + # fetch the remote file (if local file missing or has hash mismatch) + fetcher.fetch(fname=fname, downloader=downloader) + sz += destination.stat().st_size + # update path in config if desired + _do_path_update(path, update_path, config_key, name) + if sz > 0: + _log_time_size(t0, sz) + return base_path + + +@verbose +def load_data(subject, path=None, force_update=False, update_path=None, verbose=None): + """Fetch subjects epochs data for the LIMO data set. + + Parameters + ---------- + subject : int + Subject to use. Must be of class ìnt in the range from 1 to 18. + path : str + Location of where to look for the LIMO data. + If None, the environment variable or config parameter + ``MNE_DATASETS_LIMO_PATH`` is used. If it doesn't exist, the + "~/mne_data" directory is used. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_LIMO_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs. + """ # noqa: E501 + pd = _check_pandas_installed() + # subject in question + if isinstance(subject, int) and 1 <= subject <= 18: + subj = f"S{subject}" + else: + raise ValueError("subject must be an int in the range from 1 to 18") + + # set limo path, download and decompress files if not found + limo_path = data_path(subject, path, force_update, update_path) + + # -- 1) import .mat files + # epochs info + fname_info = op.join(limo_path, subj, "LIMO.mat") + data_info = loadmat(fname_info) + # number of epochs per condition + design = data_info["LIMO"]["design"][0][0]["X"][0][0] + data_info = data_info["LIMO"]["data"][0][0][0][0] + # epochs data + fname_eeg = op.join(limo_path, subj, "Yr.mat") + data = loadmat(fname_eeg) + + # -- 2) get epochs information from structure + # sampling rate + sfreq = data_info["sampling_rate"][0][0] + # tmin and tmax + tmin = data_info["start"][0][0] + # create events matrix + sample = np.arange(len(design)) + prev_id = np.zeros(len(design)) + ev_id = design[:, 1] + events = np.array([sample, prev_id, ev_id]).astype(int).T + # event ids, such that Face B == 1 + event_id = {"Face/A": 0, "Face/B": 1} + + # -- 3) extract channel labels from LIMO structure + # get individual labels + labels = data_info["chanlocs"]["labels"] + labels = [label for label, *_ in labels[0]] + # get montage + montage = make_standard_montage("biosemi128") + # add external electrodes (e.g., eogs) + ch_names = montage.ch_names + ["EXG1", "EXG2", "EXG3", "EXG4"] + # match individual labels to labels in montage + found_inds = [ind for ind, name in enumerate(ch_names) if name in labels] + missing_chans = [name for name in ch_names if name not in labels] + assert labels == [ch_names[ind] for ind in found_inds] + + # -- 4) extract data from subjects Yr structure + # data is stored as channels x time points x epochs + # data['Yr'].shape # <-- see here + # transpose to epochs x channels time points + data = np.transpose(data["Yr"], (2, 0, 1)) + # initialize data in expected order + temp_data = np.empty((data.shape[0], len(ch_names), data.shape[2])) + # copy over the non-missing data + for source, target in enumerate(found_inds): + # avoid copy when fancy indexing + temp_data[:, target, :] = data[:, source, :] + # data to V (to match MNE's format) + data = temp_data / 1e6 + # create list containing channel types + types = ["eog" if ch.startswith("EXG") else "eeg" for ch in ch_names] + + # -- 5) Create custom info for mne epochs structure + # create info + info = create_info(ch_names, sfreq, types).set_montage(montage) + # get faces and noise variables from design matrix + event_list = list(events[:, 2]) + faces = ["B" if event else "A" for event in event_list] + noise = list(design[:, 2]) + # create epochs metadata + metadata = {"face": faces, "phase-coherence": noise} + metadata = pd.DataFrame(metadata) + + # -- 6) Create custom epochs array + epochs = EpochsArray( + data, info, events, tmin, event_id, metadata=metadata, verbose=False + ) + epochs.info["bads"] = missing_chans # missing channels are marked as bad. + + return epochs diff --git a/mne/datasets/misc/__init__.py b/mne/datasets/misc/__init__.py new file mode 100644 index 0000000..25377ca --- /dev/null +++ b/mne/datasets/misc/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE misc dataset.""" + +from ._misc import data_path, _pytest_mark diff --git a/mne/datasets/misc/_misc.py b/mne/datasets/misc/_misc.py new file mode 100644 index 0000000..ba7fd23 --- /dev/null +++ b/mne/datasets/misc/_misc.py @@ -0,0 +1,31 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, has_dataset + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="misc", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +def _pytest_mark(): + import pytest + + return pytest.mark.skipif( + not has_dataset(name="misc"), reason="Requires misc dataset" + ) + + +data_path.__doc__ = _data_path_doc.format(name="misc", conf="MNE_DATASETS_MISC_PATH") diff --git a/mne/datasets/mtrf/__init__.py b/mne/datasets/mtrf/__init__.py new file mode 100644 index 0000000..13828c0 --- /dev/null +++ b/mne/datasets/mtrf/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""mTRF Dataset.""" + +from .mtrf import data_path, get_version diff --git a/mne/datasets/mtrf/mtrf.py b/mne/datasets/mtrf/mtrf.py new file mode 100644 index 0000000..78c5002 --- /dev/null +++ b/mne/datasets/mtrf/mtrf.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + +data_name = "mtrf" + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name=data_name, + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format(name=data_name, conf="MNE_DATASETS_MTRF_PATH") + + +def get_version(): # noqa: D103 + return _get_version(data_name) + + +get_version.__doc__ = _version_doc.format(name=data_name) diff --git a/mne/datasets/multimodal/__init__.py b/mne/datasets/multimodal/__init__.py new file mode 100644 index 0000000..7bc3e55 --- /dev/null +++ b/mne/datasets/multimodal/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Multimodal dataset.""" + +from .multimodal import data_path, get_version diff --git a/mne/datasets/multimodal/multimodal.py b/mne/datasets/multimodal/multimodal.py new file mode 100644 index 0000000..60aa7d2 --- /dev/null +++ b/mne/datasets/multimodal/multimodal.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="multimodal", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="multimodal", conf="MNE_DATASETS_MULTIMODAL_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("multimodal") + + +get_version.__doc__ = _version_doc.format(name="multimodal") diff --git a/mne/datasets/opm/__init__.py b/mne/datasets/opm/__init__.py new file mode 100644 index 0000000..a587c5f --- /dev/null +++ b/mne/datasets/opm/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""OPM dataset.""" + +from .opm import data_path, get_version diff --git a/mne/datasets/opm/opm.py b/mne/datasets/opm/opm.py new file mode 100644 index 0000000..a5245ea --- /dev/null +++ b/mne/datasets/opm/opm.py @@ -0,0 +1,30 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="opm", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format(name="opm", conf="MNE_DATASETS_OPML_PATH") + + +def get_version(): # noqa: D103 + return _get_version("opm") + + +get_version.__doc__ = _version_doc.format(name="opm") diff --git a/mne/datasets/phantom_4dbti/__init__.py b/mne/datasets/phantom_4dbti/__init__.py new file mode 100644 index 0000000..c542617 --- /dev/null +++ b/mne/datasets/phantom_4dbti/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Multimodal dataset.""" + +from .phantom_4dbti import data_path, get_version diff --git a/mne/datasets/phantom_4dbti/phantom_4dbti.py b/mne/datasets/phantom_4dbti/phantom_4dbti.py new file mode 100644 index 0000000..59906a9 --- /dev/null +++ b/mne/datasets/phantom_4dbti/phantom_4dbti.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="phantom_4dbti", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="phantom_4dbti", conf="MNE_DATASETS_PHANTOM_4DBTI_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("phantom_4dbti") + + +get_version.__doc__ = _version_doc.format(name="phantom_4dbti") diff --git a/mne/datasets/phantom_kernel/__init__.py b/mne/datasets/phantom_kernel/__init__.py new file mode 100644 index 0000000..ad4aecf --- /dev/null +++ b/mne/datasets/phantom_kernel/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Multimodal dataset.""" + +from .phantom_kernel import data_path, get_version diff --git a/mne/datasets/phantom_kernel/phantom_kernel.py b/mne/datasets/phantom_kernel/phantom_kernel.py new file mode 100644 index 0000000..ed44a78 --- /dev/null +++ b/mne/datasets/phantom_kernel/phantom_kernel.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="phantom_kernel", + processor="nested_untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="phantom_kernel", conf="MNE_DATASETS_PHANTOM_KERNEL_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("phantom_kernel") + + +get_version.__doc__ = _version_doc.format(name="phantom_kernel") diff --git a/mne/datasets/phantom_kit/__init__.py b/mne/datasets/phantom_kit/__init__.py new file mode 100644 index 0000000..7fcc361 --- /dev/null +++ b/mne/datasets/phantom_kit/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""KIT phantom dataset.""" + +from .phantom_kit import data_path, get_version diff --git a/mne/datasets/phantom_kit/phantom_kit.py b/mne/datasets/phantom_kit/phantom_kit.py new file mode 100644 index 0000000..150e08d --- /dev/null +++ b/mne/datasets/phantom_kit/phantom_kit.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="phantom_kit", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="phantom_kit", conf="MNE_DATASETS_PHANTOM_KIT_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("phantom_kit") + + +get_version.__doc__ = _version_doc.format(name="phantom_kit") diff --git a/mne/datasets/refmeg_noise/__init__.py b/mne/datasets/refmeg_noise/__init__.py new file mode 100644 index 0000000..7437aaa --- /dev/null +++ b/mne/datasets/refmeg_noise/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MEG reference-noise data set.""" + +from .refmeg_noise import data_path, get_version diff --git a/mne/datasets/refmeg_noise/refmeg_noise.py b/mne/datasets/refmeg_noise/refmeg_noise.py new file mode 100644 index 0000000..c6c24ff --- /dev/null +++ b/mne/datasets/refmeg_noise/refmeg_noise.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="refmeg_noise", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="refmeg_noise", conf="MNE_DATASETS_REFMEG_NOISE_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("refmeg_noise") + + +get_version.__doc__ = _version_doc.format(name="refmeg_noise") diff --git a/mne/datasets/sample/__init__.py b/mne/datasets/sample/__init__.py new file mode 100644 index 0000000..2c465ad --- /dev/null +++ b/mne/datasets/sample/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE sample dataset.""" + +from .sample import data_path, get_version diff --git a/mne/datasets/sample/sample.py b/mne/datasets/sample/sample.py new file mode 100644 index 0000000..8dde8c6 --- /dev/null +++ b/mne/datasets/sample/sample.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="sample", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="sample", conf="MNE_DATASETS_SAMPLE_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("sample") + + +get_version.__doc__ = _version_doc.format(name="sample") diff --git a/mne/datasets/sleep_physionet/SHA1SUMS b/mne/datasets/sleep_physionet/SHA1SUMS new file mode 100644 index 0000000..1edcecc --- /dev/null +++ b/mne/datasets/sleep_physionet/SHA1SUMS @@ -0,0 +1,394 @@ +adabd3b01fc7bb75c523a974f38ee3ae4e57b40f SC4001E0-PSG.edf +21c998eadc8b1e3ea6727d3585186b8f76e7e70b SC4001EC-Hypnogram.edf +c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d SC4002E0-PSG.edf +386230188a3552b1fc90bba0fb7476ceaca174b6 SC4002EC-Hypnogram.edf +4d17451f7847355bcab17584de05e7e1df58c660 SC4011E0-PSG.edf +d582a3cbe2db481a362af890bc5a2f5ca7c878dc SC4011EH-Hypnogram.edf +a47d525f5147904b6890231e2ad338359c7ab94c SC4012E0-PSG.edf +fa99f60d7f54617cdd1128aff4f21c4daed763c7 SC4012EC-Hypnogram.edf +8b135afa7fb93bb5f1998fda50355944777c245e SC4021E0-PSG.edf +91043cfe46695088b17b6a02937b25efd674c3fb SC4021EH-Hypnogram.edf +d739e142b3b328c71b4752149901805dcd6d7e19 SC4022E0-PSG.edf +0c46a03699dd00e8f92a7edff99ebc4642cb3d48 SC4022EJ-Hypnogram.edf +85e58dc1e3303537dade8c5827ab58328239c384 SC4031E0-PSG.edf +6363d8b0fdc48cf396c9abf054bb4a9696d38bdb SC4031EC-Hypnogram.edf +43963d300642b3aa840e8c468f321b8162601772 SC4032E0-PSG.edf +7925514bc8d2ef3f1103130f08f7b3afd2136b88 SC4032EP-Hypnogram.edf +04d2b88d25f2ae4a65ba44cd9145bd12800a0e20 SC4041E0-PSG.edf +f148821669bd3588187b3b430bd79adf569f86d1 SC4041EC-Hypnogram.edf +76253d964d7797540ffd791e6e136023ed67a485 SC4042E0-PSG.edf +9873df429f971f8a4b720a454f6c0472b8a25ebb SC4042EC-Hypnogram.edf +ea073451b65ce8a6f1a02a8cc2b89d1a162ca0ae SC4051E0-PSG.edf +4159ef8a3e119d6dcc1bede806f6fbc017b27a0f SC4051EC-Hypnogram.edf +5a2efbd21be9b745fd534394eb2503caca7dc53f SC4052E0-PSG.edf +0e96482d44762df4da65dc4fdb970b342264d22a SC4052EC-Hypnogram.edf +1736736e585807c14f1ae8bc87a94cae222c5170 SC4061E0-PSG.edf +4bf99622c67c281b25ceccd35e7050328a2946e8 SC4061EC-Hypnogram.edf +763c7ac059f1771a0165e5cb351b176afb1cfe15 SC4062E0-PSG.edf +14f07411cd04d3b4b522d37c129334955287ff5f SC4062EC-Hypnogram.edf +1374b34f6139b6ff7e865d8243eef39ba334ef50 SC4071E0-PSG.edf +608024fd19a140ad233a4680e07c2495a74b69c2 SC4071EC-Hypnogram.edf +1c570644243d79396df612fa2b9bc027b24430e4 SC4072E0-PSG.edf +a8da6c20b9b48189f05ab537886b59dd141374d2 SC4072EH-Hypnogram.edf +0e1cc2c4e1da14ab94515e3e7e75e8ad30ec99cb SC4081E0-PSG.edf +9ec663ffa5c17afcaca59d7829d77b9165102237 SC4081EC-Hypnogram.edf +d57d4aa7cbc5045f611a3a3e342b501e086ea426 SC4082E0-PSG.edf +d43c785dba43063d7baa332671c6bac9c832b5b7 SC4082EP-Hypnogram.edf +b3502e0bd54683e973182c791aa962b804e79633 SC4091E0-PSG.edf +7aa63b408c769a4a983a908b6ba41d87dd743c6e SC4091EC-Hypnogram.edf +246e35852119b33d197db2f7bcfb1b46a5270a03 SC4092E0-PSG.edf +9d85766a83231b1c6076cb293367ccc354c57eeb SC4092EC-Hypnogram.edf +3ae168ff2c9c0c56f51205fdb10f05a4c6b2064e SC4101E0-PSG.edf +60d9c3913881e11b06ad99e9870bd1ca4d93c952 SC4101EC-Hypnogram.edf +86f307190961eaab0214fdc0213f8fe05812c7a5 SC4102E0-PSG.edf +8072e2d52bc6c19b45fbd921550e5243bc5a1de7 SC4102EC-Hypnogram.edf +e490956b4dce01c46ba88a2b847f091bb54ea16e SC4111E0-PSG.edf +12db1920e2f6083c8ab1f2c24fe35dfa03715e4a SC4111EC-Hypnogram.edf +ca24dc464df61144627588b29d35a85fcc7ac984 SC4112E0-PSG.edf +54dbc39015b0a445b51189987a00e08cc27d8f0c SC4112EC-Hypnogram.edf +33c72025a7a215ea5e255f4254cb0f93b1313369 SC4121E0-PSG.edf +daa57ece807cb5325c6d1ce059f0e8a8d1c85391 SC4121EC-Hypnogram.edf +34f5145ab62dcc5a53ba18735519e5bb2b13841a SC4122E0-PSG.edf +b7af1a32d8ca15e8185e4c94213ffc18ad7f6e8a SC4122EV-Hypnogram.edf +42ff97035aae6dd34ca9437857c48ac6f2ab97df SC4131E0-PSG.edf +5beef85170bdbb5cf2eea24a79f0f5c2c3975c4b SC4131EC-Hypnogram.edf +83493e1c32d441c9e5ee3de6a024bfb5e7ab9f5f SC4141E0-PSG.edf +511d398f22b9b2b304de27c40740a41584ff6af2 SC4141EU-Hypnogram.edf +63d13828b7ebe0d2ed7f491d2b5520e928b9b55d SC4142E0-PSG.edf +6f123e6fdc90a01b83e694d9744a6d27f3c87b25 SC4142EU-Hypnogram.edf +5a92d49699d4de369d66d9462e91b0dcb3312649 SC4151E0-PSG.edf +37dcbd339c95322d028b3a5466812697041cc373 SC4151EC-Hypnogram.edf +778626489bc4fe2c9137d2d361876d97dce97e5e SC4152E0-PSG.edf +294cdc47cd3d165031f7041c17f18dd013d216cb SC4152EC-Hypnogram.edf +e56ff3aa366fe9a04a0fdfdd4cd862e77e8ac807 SC4161E0-PSG.edf +56711b1bfed292032491f5cce57494629286a131 SC4161EC-Hypnogram.edf +722692f9940f3a1bccb9b4488c4477edf7fb128f SC4162E0-PSG.edf +c85647fb4bc1f382fe46bf9aaf579dc483115885 SC4162EC-Hypnogram.edf +f1a65522cb7d6c71ac47742535a12c88e2019dad SC4171E0-PSG.edf +dd257c8d922f08c2c8ca5236c9bf54da887c68e5 SC4171EU-Hypnogram.edf +572b81bc24c2c9482e6fc7ba9202a7bf253655e1 SC4172E0-PSG.edf +c9a3b590748d7d6c7ad97c62222bd53d8ebaf630 SC4172EC-Hypnogram.edf +23674d20572853eb6d988d24378c52123f66500c SC4181E0-PSG.edf +51fc3df2df7d4da654f3e18ed1b233d0c60cfa80 SC4181EC-Hypnogram.edf +83e8cbe882ba863da9fd3c11393c95b6fec5b7a5 SC4182E0-PSG.edf +43c487955edddb4ee2f60193a097c68c25c5dd4d SC4182EC-Hypnogram.edf +d6da621dbb20dec3494a38c7d2a0363793ac5ebe SC4191E0-PSG.edf +defc7b9368c2d3c4ab4a294757843825a83cdb5d SC4191EP-Hypnogram.edf +941353118732321d0246a1d58d72e903bd2f0d8f SC4192E0-PSG.edf +97b91b3067c5ecde766042fc2cff9e22f8023371 SC4192EV-Hypnogram.edf +38a0be6e45ddd9b1f17d09964a32e005dc5a6519 SC4201E0-PSG.edf +83822f9970d3959ad2e0613492ae39bd0fae6068 SC4201EC-Hypnogram.edf +aa69f5bd47c2ae03c9d38bfe6d0e58408744b885 SC4202E0-PSG.edf +5c5c63016b43421a523d1efcb34247e90aa6318b SC4202EC-Hypnogram.edf +c106ad072dbc975a3742f7eff151219870f0c794 SC4211E0-PSG.edf +9126937ea8a414d6ae9bc4a4194d841a891fa8a8 SC4211EC-Hypnogram.edf +a06ecb3f0a7b2c306f5ae4dbd83685f877cd945b SC4212E0-PSG.edf +a85f178b69a1cda47d11dd1e5394dfdcb58de1d4 SC4212EC-Hypnogram.edf +8733ea022d3778259a436507156cf3360ad8be06 SC4221E0-PSG.edf +b158eda4f81772095c129be77f8e60ec9d81b884 SC4221EJ-Hypnogram.edf +211410fab6381da0dfaef4134d5a05eec935a4ec SC4222E0-PSG.edf +1488fbfbc149499dafa8dafff4f7504053af429f SC4222EC-Hypnogram.edf +d96f1f35b2f77c7de706036c6e4114139e07b307 SC4231E0-PSG.edf +9f6df70676d6cddcf069ceb7f408a7989af99ce2 SC4231EJ-Hypnogram.edf +6b493fa424c1329ea1c13543d08ba82a9f1e85b6 SC4232E0-PSG.edf +d8ca7d694b3c48ab9d983b9cf67e17744c6b50fb SC4232EV-Hypnogram.edf +58719e53fe18d2fc4cb1776ab5d43306beb1325d SC4241E0-PSG.edf +fb1432e303a8f99a2256ce682db95d88772c479f SC4241EC-Hypnogram.edf +5a6277972c5f03572ed99d9ff63fb637945be778 SC4242E0-PSG.edf +bbbf097f4cc6560fc20c903fba2c7055e1549f85 SC4242EA-Hypnogram.edf +7dbc0289707ff70662d367d65de7bec188484d1b SC4251E0-PSG.edf +e38be8134e4a36eb418ca1f06a1fe02b52d0ebf1 SC4251EP-Hypnogram.edf +cb3922910ea03d06c1fc5c8f15b71339dc26bc9d SC4252E0-PSG.edf +4cb7a383736e09125a82ef7e4f17b41130c7ac00 SC4252EU-Hypnogram.edf +b81c9bd1875b33713b5eb56b58f1e120841b507f SC4261F0-PSG.edf +501eda59557bb99d530d01bdad3579f1e1158991 SC4261FM-Hypnogram.edf +c9f9ad7cd751d5be91396886a2b64a7c1de564ee SC4262F0-PSG.edf +7ccd12803c5fc602ac1929ff3afd914b894b9143 SC4262FC-Hypnogram.edf +20994715d34edb26113180ee330ce287dbf57b60 SC4271F0-PSG.edf +26c5c7f3a5c350d3505af2857835ce81252c5990 SC4271FC-Hypnogram.edf +9e79eb465e34b7eb6fe27ae3ce35d28d6693d44b SC4272F0-PSG.edf +956fe4b45d29a8999faf280a6168e332afab6abc SC4272FM-Hypnogram.edf +51811913d7854f95c319076e670d988687ca667c SC4281G0-PSG.edf +d188150831e912081dbeda2695231177200c39f9 SC4281GC-Hypnogram.edf +e9f080a766a9b7a247f228e44e9c4ec67e571c95 SC4282G0-PSG.edf +12d777787dd1975eef9015329fd774b2bfa1d53a SC4282GC-Hypnogram.edf +f81c7574a5e5829e006d0b705bf5208a3349c9c7 SC4291G0-PSG.edf +577c1345f6d070d975db5016048722f78b1b414e SC4291GA-Hypnogram.edf +7416f44a3b149b4ca1fc3e53d546a093a7333bb5 SC4292G0-PSG.edf +6e111a15160a31609761f742315df800b1311b3b SC4292GC-Hypnogram.edf +7818e5a02afa89e913111d91ecd651aa3e786e5d SC4301E0-PSG.edf +d49df84bfea28bb241c09b922cd2dc64f57c5ae5 SC4301EC-Hypnogram.edf +d52859ba6a7ded3364b0d8ef2b722e1d3edda060 SC4302E0-PSG.edf +b3d6f687831ee32f6df1da59f2d568c13f9c09d0 SC4302EV-Hypnogram.edf +b62f5104bddf452f4700c85997e51bec17f0243b SC4311E0-PSG.edf +812c34844e834b97949019741fa7f835d973725d SC4311EC-Hypnogram.edf +b0a9b4922665734773abbaba06e7aab32010b862 SC4312E0-PSG.edf +fca1935a8974eac27803e3125cea177995deca11 SC4312EM-Hypnogram.edf +335381ae310e9f1f053c37763eeee74d7d873471 SC4321E0-PSG.edf +67ba7d3b97354deb31db095e748ea3a4014fae2c SC4321EC-Hypnogram.edf +c9fdcfcce7e603b3289b7417891987fd67f6d921 SC4322E0-PSG.edf +40cf9a6397a52c7deda693ca596e928cc2b9f4e9 SC4322EC-Hypnogram.edf +f37cb4df27286e38c604cae943169ff29b1473fc SC4331F0-PSG.edf +ca943e2b73c6404f929c372ebd817b7b3b71b4dd SC4331FV-Hypnogram.edf +5bce6ea9b2d6c9bfb41065e92bf9cc05a11b5b75 SC4332F0-PSG.edf +e4595b0313d5320b0bffefa43260485e19977e3c SC4332FC-Hypnogram.edf +17de25c8f023fe632aa403a6d9525c1cde8eaef5 SC4341F0-PSG.edf +81ba3c0d8320c9ee306f678b4bc9e6e266165886 SC4341FA-Hypnogram.edf +b659037447a1871f4ba72bbe496cfbe507330530 SC4342F0-PSG.edf +e8e74c0905e89a59022ce0814ca9a050748ec9ae SC4342FA-Hypnogram.edf +631900bef36d359a0f5807a7e1b202f80b0427ac SC4351F0-PSG.edf +a15cdf3973b77198d8276dc505dbb35cb39a9b4a SC4351FA-Hypnogram.edf +325423a85890dcc921253bde7c7027d66f14033e SC4352F0-PSG.edf +1e0583b2a58432c964506ff44752d597753658c9 SC4352FV-Hypnogram.edf +30b90aaf965938d569ea362f66e2afa0c08c7017 SC4362F0-PSG.edf +fb870d50ce3f4d961d8b061a83d21e5467e4ae6c SC4362FC-Hypnogram.edf +0dc56fce13b6317f197d0b17c04f5be4af1c964f SC4371F0-PSG.edf +c19b6cbfdf3a33169ce9b4a5dc94f93b696a21ba SC4371FA-Hypnogram.edf +c024c491dd836ed0169300e7171c276fd14b1c44 SC4372F0-PSG.edf +97b2915a8a343efc7b785998c0532beaea2fbe91 SC4372FC-Hypnogram.edf +6098d2b501b82ca0ddc8893547c6990e204e8ba6 SC4381F0-PSG.edf +fdbf653a4a675843c97d0a76ef5e4cebf5d2dbcb SC4381FC-Hypnogram.edf +40ce0168d5f546fcd445996ab614f43823a7c2b1 SC4382F0-PSG.edf +796f8507254c2d8d345171c077dbd855e112eb47 SC4382FW-Hypnogram.edf +28fd8ad1aee307847e2eb579763ebca18e56f540 SC4401E0-PSG.edf +65b5671a89871351ee3da7ea800aad276a445b2a SC4401EC-Hypnogram.edf +3d4bafa57933cfb20c342e8cc54c15916a621454 SC4402E0-PSG.edf +037efea0fc8a6dfa8f85fa1f2fa6fd9a19f2c830 SC4402EW-Hypnogram.edf +30a533b67fdb2adac6a4e83088a07fe1bbaddb6c SC4411E0-PSG.edf +5df1bf20d4f29b95a2bdde853b2a157dd9530a8a SC4411EJ-Hypnogram.edf +bc8e6ea829f14da5396a4b250394c1b72d6631c3 SC4412E0-PSG.edf +f46b1dcfe4f4e3c9d4d4c8516dab9759f9c1224e SC4412EM-Hypnogram.edf +e8a5d9e0f160ae7bd0b35d75d77b4c872daa30f8 SC4421E0-PSG.edf +d2e34f9bcaac7af23da4448f742ac6ea3c895ed9 SC4421EA-Hypnogram.edf +80f246adffb92a3785f91368a77b0250aa040462 SC4422E0-PSG.edf +709251cc7ae6556544c153caf9dac7f82bba113b SC4422EA-Hypnogram.edf +194ae942cf80764e81b4cdabeed9e5a57916aab3 SC4431E0-PSG.edf +497ad7e671edab6e7adc9d35a6aa45b7fd9a706b SC4431EM-Hypnogram.edf +c45a66d27ea03bf448903fe30f17838e9a0fa0de SC4432E0-PSG.edf +10fe276e215f9406c0ddedaa48651cf480892476 SC4432EM-Hypnogram.edf +e3a09d832cb79b0095d7a311ef1b6ed7c569b79d SC4441E0-PSG.edf +68d4e44ad54069701972df66d8a81b4ca434bf2f SC4441EC-Hypnogram.edf +fe51d45e9f3e64a61fa8a5e5274b2e4951a9de43 SC4442E0-PSG.edf +efc2b86bb796b0143f61667402612dfbb85cbb78 SC4442EV-Hypnogram.edf +315db0f9d91988ddc2b198f89cc22f96190eff71 SC4451F0-PSG.edf +bc1f755c3367e378091c44481948a72fc7a928e5 SC4451FY-Hypnogram.edf +a06350e1c85b61c30c3d7d5dc640121b416fe30d SC4452F0-PSG.edf +0286d52cdf898ed8e3b17bb26b9c50ef512daf4d SC4452FW-Hypnogram.edf +e4295014c6d4474d8f7f7792c2ea088eb9e43e9f SC4461F0-PSG.edf +8980e770e58e5704bd36124f6b6bd8d5e3506e12 SC4461FA-Hypnogram.edf +53b69cb41339bc69144eaa5a5a42c2937f237fc9 SC4462F0-PSG.edf +0c6d3974e140c1e62ed2cadaed395781575af042 SC4462FJ-Hypnogram.edf +05d71b55de4c86791195391b1cec8b35e447922d SC4471F0-PSG.edf +ee235454dbfe947432f3f813c9a6384f6e42d36a SC4471FA-Hypnogram.edf +7a12c0d6f3005998472b128e06dd645a8619dae7 SC4472F0-PSG.edf +d234d5d6c396bf7ef0a2106a59ee8204429aa3c5 SC4472FA-Hypnogram.edf +c15f6a0e1802dcf74ecec41745677a4932375faf SC4481F0-PSG.edf +50fce6396aceaf35d9d7e16175053a3b78f214d0 SC4481FV-Hypnogram.edf +34d71530fd1da925ba20b4c48a07f7b18153e0c7 SC4482F0-PSG.edf +e3c48563e63eed27b071d4a7b37c45a0f9dc7eef SC4482FJ-Hypnogram.edf +23ea1f5f299c6cd99d434f014d7490621dbbc854 SC4491G0-PSG.edf +36c6c8112524c7bc9553db37601b38984946209b SC4491GJ-Hypnogram.edf +02c975bfc0773928095239b80d00ac5a7ea5880f SC4492G0-PSG.edf +3673eaad8396ef0ec36cb4299541c30653b72e1f SC4492GJ-Hypnogram.edf +1c31fc02412029bc7369979b8c9f5956420748f5 SC4501E0-PSG.edf +eb2621c1670a42eb38dfa86a9bc3326818365f3d SC4501EW-Hypnogram.edf +ff9eae25afa73115e2b184a68e3a72a39efd37e6 SC4502E0-PSG.edf +7605a1893701925ea0fdd047926bbd6c7c043875 SC4502EM-Hypnogram.edf +e12eb259c2894d45b8d0b2f0e75810c2de02237d SC4511E0-PSG.edf +e549275e9182b9e36ade5abb721098e235ecb164 SC4511EJ-Hypnogram.edf +53c5d982139d248736f6dd7ff3f97f635647eacd SC4512E0-PSG.edf +e22966c263f6ae7444704881f5249f6fb5dee0c1 SC4512EW-Hypnogram.edf +af70ffdbd3012615923f6a4901e7c0dd3a0fd8ca SC4522E0-PSG.edf +57af3eaed541229dcb2478c6050f0582e020f878 SC4522EM-Hypnogram.edf +71222ac5b7784ed1d3a79ee3e9036431d6eba9bd SC4531E0-PSG.edf +934dbfeb29f4f4db4b61e36fb8ddab4ddbf4ff94 SC4531EM-Hypnogram.edf +2d472fb64da5d05a546f780da876b90ad26208f9 SC4532E0-PSG.edf +708b43e7d43a6f5719f48c11bd6a81b037aabfc4 SC4532EV-Hypnogram.edf +4d3ec2f85149bb10fed1013831c3aa1f58049229 SC4541F0-PSG.edf +a301385e6fbde02c83f2545f17cdf75d594d37ce SC4541FA-Hypnogram.edf +2909f5b0d3fdb89e19d42b406798e9cbb4615bb6 SC4542F0-PSG.edf +9548ed641fb961fa46706339891a9453b731369f SC4542FW-Hypnogram.edf +0bf97e463cbcefb7df48bca712f29dcc74223330 SC4551F0-PSG.edf +e50b44e6b049baaeb528c31563642b2a2b933834 SC4551FC-Hypnogram.edf +dfa0adaae50110bdd0077483c31d57956020fcb9 SC4552F0-PSG.edf +7380403f8d72fa4c30013cd026cc1dad23ac2b3e SC4552FW-Hypnogram.edf +1a9baf1b072ca9d2784a404292169ff3177ea83f SC4561F0-PSG.edf +b31a2dfe652508df46f6afe03ab904c333f7b818 SC4561FJ-Hypnogram.edf +4c7081edf572cadee51d30174cd65aa6c658f5a9 SC4562F0-PSG.edf +676ab92dbc6532f67d672f80337c71f817fd3a6d SC4562FJ-Hypnogram.edf +e67f3bd381ddfb96d584f6c6d6f6762087d6553d SC4571F0-PSG.edf +08ee39eb94d819968512297ca883f9bca046de9c SC4571FV-Hypnogram.edf +deb2aef7a6a4b502c819345a7151ffc2529d4ba7 SC4572F0-PSG.edf +7a38cbe581167dfec27a15935e6d386b228616fa SC4572FC-Hypnogram.edf +16a1edbd6a089386fd7de72aef802182d0a2959d SC4581G0-PSG.edf +bfc729575cfdf5f409be2de47dad4e00d43195bf SC4581GM-Hypnogram.edf +9da93f4c2459dd4fe2e5ee6a171904d4f604cd6e SC4582G0-PSG.edf +acbade13cfae4fc5fbda2d0766feea83d114aa23 SC4582GP-Hypnogram.edf +017793b040df8a860df0e43e3e0a496e2cb3f9c1 SC4591G0-PSG.edf +f3bb949a7f82acb7fd3d8f35e92efee1402a383f SC4591GY-Hypnogram.edf +1e284bddd7952862327c83092db21805e6ab6c38 SC4592G0-PSG.edf +58d1678e9ec9f49c9c6a15031dee26d802026851 SC4592GY-Hypnogram.edf +ece6d6ce09fac6fc521cf3f1b536f1ea2a8a1778 SC4601E0-PSG.edf +8f77b05fe58f43cdfdcdba7cc3d27abcac7d37f2 SC4601EC-Hypnogram.edf +0e50df304ced29651267f43689ce49e063f808d6 SC4602E0-PSG.edf +1c52de92668fe4c89cd5e270e17017ef47880991 SC4602EJ-Hypnogram.edf +2cc6e418c0b7af472aa34d2bbd5ece85bdb6a879 SC4611E0-PSG.edf +f5715ab48f24221c28c1d5c45508c8bb58c912ec SC4611EG-Hypnogram.edf +6593e1af07101fa4c5bce8984296858be17e7d4f SC4612E0-PSG.edf +cedb61bbe7a273b12f45579963d5a84f2ab21811 SC4612EA-Hypnogram.edf +31cd2cae56977c6b872311f2a6e60827748b973d SC4621E0-PSG.edf +7acc5296b33ca4eee8d6577064c8c651ee96e527 SC4621EV-Hypnogram.edf +7a7e226d47dccd959305e3f633686335c8e66557 SC4622E0-PSG.edf +9957c9c9e0c705aac0f7125f411b2531a722601c SC4622EJ-Hypnogram.edf +6dfb32aa4c94968a52d61b90a38573d178669bfb SC4631E0-PSG.edf +48e28f93fc71ffc539776196f9d9d1365415e0b4 SC4631EM-Hypnogram.edf +3baa8081b30cc3dfece9d550289dfc94812530d5 SC4632E0-PSG.edf +cd2765ebdabc66cb4ac2320d02e3b7ab0340ede4 SC4632EA-Hypnogram.edf +0e5d109a929490cbecf59573577a97df07a05cd0 SC4641E0-PSG.edf +7b896dc5b34d71381d8462001dc3e05b145cf48c SC4641EP-Hypnogram.edf +03169b7ee9de83b2e17e9bd0d6274965e9518b37 SC4642E0-PSG.edf +d8a870d26e468a643eaebe3275e5e2912690c0d8 SC4642EP-Hypnogram.edf +f2134a2ad001bc146f3e2d9d76cb7f00f03bbe52 SC4651E0-PSG.edf +fad4311c7e11a9aa9a73a8e48d6fa966db61e71d SC4651EP-Hypnogram.edf +aa66553cb0132634d7d11ffe7fab80aa5119b3d7 SC4652E0-PSG.edf +6ed9c4f66c03e56f86730ddd8986f3600c040d4a SC4652EG-Hypnogram.edf +c6057505d2acf7b08371e266cf0fca1bfeb1e4e1 SC4661E0-PSG.edf +06474e72126d2a00c1968e70730e1deac060f94e SC4661EJ-Hypnogram.edf +24d278194360dc78ebd0cfe940fb4d5f7f93ccbc SC4662E0-PSG.edf +07ca0fbfb6030289a089f84e50d7bbfd043f31ad SC4662EJ-Hypnogram.edf +4357aa9fedf0b53896d41e5dccd7b525f7212177 SC4671G0-PSG.edf +459889157743c434933194446af5168cb145dfcb SC4671GJ-Hypnogram.edf +fd86b31a5c22176e1887e2fac460edce42bd2fdf SC4672G0-PSG.edf +dedb182b8c063cefabf1763eb19cd26d0608017f SC4672GV-Hypnogram.edf +3f60b5ad5e1092e90c38f2072b3c041bd7313550 SC4701E0-PSG.edf +196a388f60ee4aecfa982f89e2db03ff91e906e7 SC4701EC-Hypnogram.edf +a6853fee26b1541f85be7ddc3f42f06ccfe2fcfc SC4702E0-PSG.edf +464f7382ec11703b5bc6512930fdfbb1ab6d030a SC4702EA-Hypnogram.edf +e97d691bfecf770ca4e47289b846886c16ef19fb SC4711E0-PSG.edf +81ec5d0288f36c4368e5f06f21980f99774bf533 SC4711EC-Hypnogram.edf +9b99be6cb45af22bdbead7ea01f1375631c9b365 SC4712E0-PSG.edf +66b121441a45ae19852b7002fd78c2caf236631a SC4712EA-Hypnogram.edf +5c9caa01cc1f8065f87195c9f2dc2aeebf83c03d SC4721E0-PSG.edf +efe62b1e8bac1ea08dbf12374ca6812a6f271d5e SC4721EC-Hypnogram.edf +a473f32a6075e9ed830a8e9a246129e05959e8b7 SC4722E0-PSG.edf +efb2358de27da4219f64f7bfb37912dc9efb0281 SC4722EM-Hypnogram.edf +b03e4a2df4d086778f3426ed7b6c5bf800cbfe92 SC4731E0-PSG.edf +eb3dc65d7184d676a6678a70b18730d11a414588 SC4731EM-Hypnogram.edf +574ff5c0634137f7d5c51eb5f7626b451f1f9b9d SC4732E0-PSG.edf +77a523ca9ef4698885b681bf4e27d28dc5c58424 SC4732EJ-Hypnogram.edf +e6ff7462f4ce401e9aff9b3d9c93f0710bc37678 SC4741E0-PSG.edf +bda4d1ab190f4160ec7a3f4420e30d718f02369e SC4741EA-Hypnogram.edf +2b09f78a2f276061c8758a55585fae7355b38111 SC4742E0-PSG.edf +d4bb4266859c2f92ae8ba96111d59d8ab467f6a0 SC4742EC-Hypnogram.edf +17c356a283b026e507331209512453573bcfebe5 SC4751E0-PSG.edf +d35737e86979127ea01b95dcecea018dd2e44f45 SC4751EC-Hypnogram.edf +b650a49d6e3bb81971e4689c720ee079404857e6 SC4752E0-PSG.edf +3d1c86d8d7ecb6ff79ee12cb950690e929394161 SC4752EM-Hypnogram.edf +8bde3f0d5ab6a592f229dfd7886341b3f800bdb3 SC4761E0-PSG.edf +3dbf15f28a293ac89dcf458d844a8c6443aaf1e6 SC4761EP-Hypnogram.edf +7bdc8eacf1a6502c8f007b08556b7e8b52180d44 SC4762E0-PSG.edf +f6ae10f082a10ead671bfd5fdc50f62c42b9f10d SC4762EG-Hypnogram.edf +ac8c2be9175cb02e00cccb5d5df2acfaf05971cc SC4771G0-PSG.edf +09e80b973502d89368d7823ad4aec7417b735f6e SC4771GC-Hypnogram.edf +eea8671791936358037e5d096491865069989a85 SC4772G0-PSG.edf +25a3b8859091a70ca0cff9ebb777879aa156689e SC4772GC-Hypnogram.edf +0ce00a144dd9bc1b0e20cd30e6501a3852e4dbef SC4801G0-PSG.edf +f82d2b8e45723f2a69f8c30286cc68486b0792a6 SC4801GC-Hypnogram.edf +8959ada929c07945757bd6c9ef0267e7c9427a66 SC4802G0-PSG.edf +41ff2d1118425f5828342c07aa58b9d346755b1a SC4802GV-Hypnogram.edf +dcae3307af54ccf5349945e2fa493464de0a5da2 SC4811G0-PSG.edf +2406ce37b86fc3c7492a3ebe89ae58d15686b33d SC4811GG-Hypnogram.edf +fd93757cf6bcf45854fca960a067612352e05547 SC4812G0-PSG.edf +244b3bbb4987db0a9cef85950d14899ab9a3aec4 SC4812GV-Hypnogram.edf +9008c6ffc917fb90a3d399e768fe3c563a144a2f SC4821G0-PSG.edf +59534244c603cd5c3c27db26ae2f014983ec6c9b SC4821GC-Hypnogram.edf +84f9a60f6b0e7ac33388d8f6492096bcfa60bc18 SC4822G0-PSG.edf +8d14c371bc290658469729addee4461866bb67e2 SC4822GC-Hypnogram.edf +b9d11484126ebff1884034396d6a20c62c0ef48d ST7011J0-PSG.edf +ff28e5e01296cefed49ae0c27cfb3ebc42e710bf ST7011JP-Hypnogram.edf +b97c67d2ec40721349fd6faea32ea7155a11940a ST7012J0-PSG.edf +7a98a0ebba9e5e8fc4aac9ab82849385570d7789 ST7012JP-Hypnogram.edf +552e579d96e6c4ae083c7e1422e11b945ebcdabd ST7021J0-PSG.edf +635b07240047ade50649ff0f72ccde792f464f09 ST7021JM-Hypnogram.edf +ebabfa224599201d9baf91311f78f6410971810f ST7022J0-PSG.edf +228c608743abcc28f8c4946e8394ecf8e6ada89c ST7022JM-Hypnogram.edf +41f8e344b9872d93c8c2f2da283252231584b08f ST7041J0-PSG.edf +422655bae4525d121bd45fead048207be9b34c4b ST7041JO-Hypnogram.edf +229ee3bb4d060332c219c3dc1153732ab5499d57 ST7042J0-PSG.edf +eff297358a0c9d175109ba692ac3f9f4cd2c08ed ST7042JO-Hypnogram.edf +17b186214e8944667571f52098564e377b32d695 ST7051J0-PSG.edf +d7696bd1b891dd85e96e20ea727dcebe49ab6dfd ST7051JA-Hypnogram.edf +489fcb38c07688192d9c0eae5455d95241028ad8 ST7052J0-PSG.edf +64f2718c004e64ab598979da139b90452febc9bf ST7052JA-Hypnogram.edf +9fb2b4ed47a6d4b2f0b60a354123e491e8738b19 ST7061J0-PSG.edf +fd9214d026453fce71efa2975ea732e1c1458f69 ST7061JR-Hypnogram.edf +afc5599194648da5568dafa1a811818e77df4842 ST7062J0-PSG.edf +c2a4abe15f08f230b734a328494ab0d2ae9dc786 ST7062JR-Hypnogram.edf +010a65ad86b79d19c372a421f0e7c975e56278c8 ST7071J0-PSG.edf +bc08c797bb7aaf92de1c869d46c6dd4590939996 ST7071JA-Hypnogram.edf +15c5aa5591e35d60ba25044cdd4b3d748d3c0cfc ST7072J0-PSG.edf +1a7813b7a2389c0346e3844835590b9cb2f40f56 ST7072JA-Hypnogram.edf +cb66a0493d90d0d1204936e3e7c944ed536265e3 ST7081J0-PSG.edf +8259b52c62203b85268d23b3a2d87605fdcfa2a6 ST7081JW-Hypnogram.edf +b1cb29c7a7321b7e628d04a477338c4f62f0c093 ST7082J0-PSG.edf +bc33c3aba61c0fa937ef56d4ce7b1468c80663b5 ST7082JW-Hypnogram.edf +b046dd63d92339914eca0489d8a4c566b69e7723 ST7091J0-PSG.edf +af845641a8118d004bcfa6b597f23517e3a752e9 ST7091JE-Hypnogram.edf +2986f4d64f5118c5e356a2abe6bf86521ffde339 ST7092J0-PSG.edf +ec89bb908ff70e123ffa94bc2c11bb1ce54bcb6a ST7092JE-Hypnogram.edf +5662b560f095b8397303cced87e43d407a0d18f7 ST7101J0-PSG.edf +5919542c566d882fbf947c66f4858ad17199103a ST7101JE-Hypnogram.edf +f697a140f18d1005107fcbb7c81d85a5e8cb6ec6 ST7102J0-PSG.edf +1f05e92c9ca076350f981d0ec75ad720606bacbc ST7102JE-Hypnogram.edf +e2bf9db482f230a56372603d23fb12f5c56062f7 ST7111J0-PSG.edf +5964553fe07cbca302526b2153a2507f7d02fab8 ST7111JE-Hypnogram.edf +d3c7907b9b1e4f087f31bd655548b8673b6ec735 ST7112J0-PSG.edf +e4d8406eaca361d2c5d9953b3c67ed1098dd5925 ST7112JE-Hypnogram.edf +6e90bac48e48f71e5572944a364009eab6ea818d ST7121J0-PSG.edf +a991ed3d8be6d55ee563545077f3d280466a4989 ST7121JE-Hypnogram.edf +ae7426c464296ec0a839ccaa9763e3f2c57f41f1 ST7122J0-PSG.edf +b6c2c21e3cf17b371b31af78c64f28aa5811e36f ST7122JE-Hypnogram.edf +d0d6c83b76f627b067e0daac3c181e3666f8ab08 ST7131J0-PSG.edf +91ee1bd29b156b33e03cb8c324a8fac15ec06674 ST7131JR-Hypnogram.edf +54a50dcc40e3d6677b80c629b2f908339d9a7c3e ST7132J0-PSG.edf +028a5c4ed911d67a17b45f12966b32c46949d374 ST7132JR-Hypnogram.edf +6bf8feeabc2259d15f1f535abda90caacc8d4a86 ST7141J0-PSG.edf +203e78e02a92a9f85f07790398f64c66f248e5cc ST7141JE-Hypnogram.edf +b42eb28089bbdcbf3244dead53fd01d5f5ac3ddf ST7142J0-PSG.edf +1f7cc3a1923dd6a3504c82d76f820555ad0b6a1b ST7142JE-Hypnogram.edf +c0df1253b6509c4b4ed9e1283f26cf206a8c725c ST7151J0-PSG.edf +cfcb0089e22244bc5047f61e72a39735cbdc36cf ST7151JA-Hypnogram.edf +faefa07a1ca180861d6f26d5f35285c009dca21e ST7152J0-PSG.edf +27e9b4527eea33ded9072db3c6626f94a966da58 ST7152JA-Hypnogram.edf +8a4f1c44a17b5d665cc30f1141d003043274ac2b ST7161J0-PSG.edf +5a1ef1d375b01f83264e84db4af58acded68f15e ST7161JM-Hypnogram.edf +66925c8fa9f6da18f8590dcf2a6174cfe46e912d ST7162J0-PSG.edf +18b3d7eb9685ec8131fc0a8f81ba6205122595dc ST7162JM-Hypnogram.edf +67c47cb92de8806c60303a4baa87ca6cf52a2245 ST7171J0-PSG.edf +13c371fc4384751cc4bdd3044c6a0813ea12816e ST7171JA-Hypnogram.edf +a46118a5ca9cfaa62ca11c6a8b079e82877305ef ST7172J0-PSG.edf +8de0f3f59dd27d07f5f6a74216814ced08f104b5 ST7172JA-Hypnogram.edf +501f2f9d9ebe15e6dfc86fda6e90f9a54a39660a ST7181J0-PSG.edf +483aa0b448393d61043c98c204c93d4c60abb6bd ST7181JR-Hypnogram.edf +0eab40d3687a2cf708e48137eab26c0c43b75773 ST7182J0-PSG.edf +50efc607882659f8229db773703f5b973b471ed4 ST7182JR-Hypnogram.edf +b1b10cd45a7c0f91286c6fc3f755e59af483bac1 ST7191J0-PSG.edf +e7fcb89cf0f1484ab114bf40dcf2bf4cd413696b ST7191JR-Hypnogram.edf +e80de913aa41b987a43d94cf8f0106d61e4e883b ST7192J0-PSG.edf +def09a7d469984005b0c8414b7995ae8e269fd15 ST7192JR-Hypnogram.edf +454233ae9e6a948848030c5f4d9e60dfcb0facde ST7201J0-PSG.edf +17a0e8aebb885a960a74343bace57d2ab0b6296a ST7201JO-Hypnogram.edf +1e97e392968415da67432842c952344b6d3cdc8c ST7202J0-PSG.edf +ed26efdb6b2d9e815f2a725970262cb9c15c7b98 ST7202JO-Hypnogram.edf +c6582cfa8fcf6542a688fa8842011a93d86f2c60 ST7211J0-PSG.edf +b8756397056f623674c3b03db808b2c8c64b0a0a ST7211JJ-Hypnogram.edf +389f3920b39b4b9ad4fba6f91198299b7c6f6676 ST7212J0-PSG.edf +e25e47adf0c0f09df542ef061272ed9569fb80ea ST7212JJ-Hypnogram.edf +58315bec82d381dec56bf96924a94014462bb608 ST7221J0-PSG.edf +7656827835362b7b44b296bad83ff6001e14f489 ST7221JA-Hypnogram.edf +4961a08b87416246b8b8186190eca0e96da6a50d ST7222J0-PSG.edf +da840db60086e43a2429fb1322ede5e5976b3cda ST7222JA-Hypnogram.edf +7a850ce4bc6bd14ea072f3a45b002f8015cf2f14 ST7241J0-PSG.edf +bbaac4f2c2f330f70583eb179d855fcf42b4fbff ST7241JO-Hypnogram.edf +5c8bd182bfc9609929094769718b2835fe1099ad ST7242J0-PSG.edf +f70b3dfce2c14f01221a66a4acb522df1affffdb ST7242JO-Hypnogram.edf diff --git a/mne/datasets/sleep_physionet/__init__.py b/mne/datasets/sleep_physionet/__init__.py new file mode 100644 index 0000000..aff3f92 --- /dev/null +++ b/mne/datasets/sleep_physionet/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from . import age, temazepam, _utils diff --git a/mne/datasets/sleep_physionet/_utils.py b/mne/datasets/sleep_physionet/_utils.py new file mode 100644 index 0000000..b97d061 --- /dev/null +++ b/mne/datasets/sleep_physionet/_utils.py @@ -0,0 +1,244 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import os.path as op + +import numpy as np + +from ...utils import _check_pandas_installed, _on_missing, _TempDir, verbose +from ..utils import _downloader_params, _get_path + +AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), "age_records.csv") +TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__), "temazepam_records.csv") + +TEMAZEPAM_RECORDS_URL = ( + "https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls" # noqa: E501 +) +TEMAZEPAM_RECORDS_URL_SHA1 = "f52fffe5c18826a2bd4c5d5cb375bb4a9008c885" + +AGE_RECORDS_URL = "https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls" +AGE_RECORDS_URL_SHA1 = "0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f" + +sha1sums_fname = op.join(op.dirname(__file__), "SHA1SUMS") + + +def _fetch_one(fname, hashsum, path, force_update, base_url): + import pooch + + # Fetch the file + url = base_url + "/" + fname + destination = op.join(path, fname) + if op.isfile(destination) and not force_update: + return destination, False + if op.isfile(destination): + os.remove(destination) + if not op.isdir(op.dirname(destination)): + os.makedirs(op.dirname(destination)) + downloader = pooch.HTTPDownloader(**_downloader_params()) + pooch.retrieve( + url=url, + known_hash=f"sha1:{hashsum}", + path=path, + downloader=downloader, + fname=fname, + ) + return destination, True + + +@verbose +def _data_path(path=None, verbose=None): + """Get path to local copy of EEG Physionet age Polysomnography dataset URL. + + This is a low-level function useful for getting a local copy of a + remote Polysomnography dataset :footcite:`KempEtAl2000` which is available + at PhysioNet :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + path : None | str + Location of where to look for the data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the dataset is not found under the given path, + the data will be automatically downloaded to the specified folder. + %(verbose)s + + Returns + ------- + path : list of Path + Local path to the given data file. This path is contained inside a list + of length one, for compatibility. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + key = "PHYSIONET_SLEEP_PATH" + name = "PHYSIONET_SLEEP" + path = _get_path(path, key, name) + return op.join(path, "physionet-sleep-data") + + +def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): + """Help function to download Physionet's temazepam dataset records.""" + import pooch + + pd = _check_pandas_installed() + tmp = _TempDir() + + # Download subjects info. + subjects_fname = op.join(tmp, "ST-subjects.xls") + downloader = pooch.HTTPDownloader(**_downloader_params()) + pooch.retrieve( + url=TEMAZEPAM_RECORDS_URL, + known_hash=f"sha1:{TEMAZEPAM_RECORDS_URL_SHA1}", + path=tmp, + downloader=downloader, + fname=op.basename(subjects_fname), + ) + + # Load and Massage the checksums. + sha1_df = pd.read_csv( + sha1sums_fname, sep=" ", header=None, names=["sha", "fname"], engine="python" + ) + select_age_records = sha1_df.fname.str.startswith( + "ST" + ) & sha1_df.fname.str.endswith("edf") + sha1_df = sha1_df[select_age_records] + sha1_df["id"] = [name[:6] for name in sha1_df.fname] + + # Load and massage the data. + data = pd.read_excel(subjects_fname, header=[0, 1]) + data = data.set_index(("Subject - age - sex", "Nr")) + data.index.name = "subject" + data.columns.names = [None, None] + data = ( + data.set_index( + [("Subject - age - sex", "Age"), ("Subject - age - sex", "M1/F2")], + append=True, + ) + .stack(level=0) + .reset_index() + ) + + data = data.rename( + columns={ + ("Subject - age - sex", "Age"): "age", + ("Subject - age - sex", "M1/F2"): "sex", + "level_3": "drug", + } + ) + data["id"] = [f"ST7{s:02d}{n:1d}" for s, n in zip(data.subject, data["night nr"])] + + data = pd.merge(sha1_df, data, how="outer", on="id") + data["record type"] = ( + data.fname.str.split("-", expand=True)[1] + .str.split(".", expand=True)[0] + .astype("category") + ) + + data = data.set_index( + ["id", "subject", "age", "sex", "drug", "lights off", "night nr", "record type"] + ).unstack() + data.columns = [l1 + "_" + l2 for l1, l2 in data.columns] + data = data.reset_index().drop(columns=["id"]) + + data["sex"] = data.sex.astype("category").cat.rename_categories( + {1: "male", 2: "female"} + ) + + data["drug"] = data["drug"].str.split(expand=True)[0] + data["subject_orig"] = data["subject"] + data["subject"] = data.index // 2 # to make sure index is from 0 to 21 + + # Save the data. + data.to_csv(fname, index=False) + + +def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS): + """Help function to download Physionet's age dataset records.""" + import pooch + + pd = _check_pandas_installed() + tmp = _TempDir() + + # Download subjects info. + subjects_fname = op.join(tmp, "SC-subjects.xls") + downloader = pooch.HTTPDownloader(**_downloader_params()) + pooch.retrieve( + url=AGE_RECORDS_URL, + known_hash=f"sha1:{AGE_RECORDS_URL_SHA1}", + path=tmp, + downloader=downloader, + fname=op.basename(subjects_fname), + ) + + # Load and Massage the checksums. + sha1_df = pd.read_csv( + sha1sums_fname, sep=" ", header=None, names=["sha", "fname"], engine="python" + ) + select_age_records = sha1_df.fname.str.startswith( + "SC" + ) & sha1_df.fname.str.endswith("edf") + sha1_df = sha1_df[select_age_records] + sha1_df["id"] = [name[:6] for name in sha1_df.fname] + + # Load and massage the data. + data = pd.read_excel(subjects_fname) + data = data.rename( + index=str, columns={"sex (F=1)": "sex", "LightsOff": "lights off"} + ) + data["sex"] = data.sex.astype("category").cat.rename_categories( + {1: "female", 2: "male"} + ) + + data["id"] = [f"SC4{s:02d}{n:1d}" for s, n in zip(data.subject, data.night)] + + data = data.set_index("id").join(sha1_df.set_index("id")).dropna() + + data["record type"] = ( + data.fname.str.split("-", expand=True)[1] + .str.split(".", expand=True)[0] + .astype("category") + ) + + data = data.reset_index().drop(columns=["id"]) + data = data[ + ["subject", "night", "record type", "age", "sex", "lights off", "sha", "fname"] + ] + + # Save the data. + data.to_csv(fname, index=False) + + +def _check_subjects(subjects, n_subjects, missing=None, on_missing="raise"): + """Check whether subjects are available. + + Parameters + ---------- + subjects : list + Subject numbers to be checked. + n_subjects : int + Number of subjects available. + missing : list | None + Subject numbers that are missing. + on_missing : 'raise' | 'warn' | 'ignore' + What to do if one or several subjects are not available. Valid keys + are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing + is 'warn' it will proceed but warn, if 'ignore' it will proceed + silently. + """ + valid_subjects = np.arange(n_subjects) + if missing is not None: + valid_subjects = np.setdiff1d(valid_subjects, missing) + unknown_subjects = np.setdiff1d(subjects, valid_subjects) + if unknown_subjects.size > 0: + subjects_list = ", ".join([str(s) for s in unknown_subjects]) + msg = ( + f"This dataset contains subjects 0 to {n_subjects - 1} with " + f"missing subjects {missing}. Unknown subjects: " + f"{subjects_list}." + ) + _on_missing(on_missing, msg) diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py new file mode 100644 index 0000000..c14282e --- /dev/null +++ b/mne/datasets/sleep_physionet/age.py @@ -0,0 +1,153 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import time + +import numpy as np + +from ...utils import verbose +from ..utils import _log_time_size +from ._utils import ( + AGE_SLEEP_RECORDS, + _check_subjects, + _data_path, + _fetch_one, + _on_missing, +) + +data_path = _data_path # expose _data_path(..) as data_path(..) + +BASE_URL = "https://physionet.org/physiobank/database/sleep-edfx/sleep-cassette/" + + +@verbose +def fetch_data( + subjects, + recording=(1, 2), + path=None, + force_update=False, + base_url=BASE_URL, + on_missing="raise", + *, + verbose=None, +): # noqa: D301, E501 + """Get paths to local copies of PhysioNet Polysomnography dataset files. + + This will fetch data from the publicly available subjects from PhysioNet's + study of age effects on sleep in healthy subjects + :footcite:`MourtazaevEtAl1995,GoldbergerEtAl2000`. This + corresponds to a subset of 153 recordings from 37 males and 41 females that + were 25-101 years old at the time of the recordings. There are two night + recordings per subject except for subjects 13, 36 and 52 which have one + record missing each due to missing recording hardware. + + See more details in + `physionet website `_. + + Parameters + ---------- + subjects : list of int + The subjects to use. Can be in the range of 0-82 (inclusive), however + the following subjects are not available: 39, 68, 69, 78 and 79. + recording : list of int + The night recording indices. Valid values are : [1], [2], or [1, 2]. + The following recordings are not available: recording 1 for subject 36 + and 52, and recording 2 for subject 13. + path : None | str + Location of where to look for the PhysioNet data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the Polysomnography dataset is not found under + the given path, the data will be automatically downloaded to the + specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + base_url : str + The URL root. + on_missing : 'raise' | 'warn' | 'ignore' + What to do if one or several recordings are not available. Valid keys + are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing + is 'warn' it will proceed but warn, if 'ignore' it will proceed + silently. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + See Also + -------- + mne.datasets.sleep_physionet.temazepam.fetch_data + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import sleep_physionet + >>> sleep_physionet.age.fetch_data(subjects=[0]) # doctest: +SKIP + + This would download data for subject 0 if it isn't there already. + + References + ---------- + .. footbibliography:: + """ # noqa: E501 + t0 = time.time() + records = np.loadtxt( + AGE_SLEEP_RECORDS, + skiprows=1, + delimiter=",", + usecols=(0, 1, 2, 6, 7), + dtype={ + "names": ("subject", "record", "type", "sha", "fname"), + "formats": (" 0: + _log_time_size(t0, sz) + return fnames diff --git a/mne/datasets/sleep_physionet/age_records.csv b/mne/datasets/sleep_physionet/age_records.csv new file mode 100644 index 0000000..e172b6b --- /dev/null +++ b/mne/datasets/sleep_physionet/age_records.csv @@ -0,0 +1,307 @@ +subject,night,record type,age,sex,lights off,sha,fname +0,1,PSG,33,female,00:38:00,adabd3b01fc7bb75c523a974f38ee3ae4e57b40f,SC4001E0-PSG.edf +0,1,Hypnogram,33,female,00:38:00,21c998eadc8b1e3ea6727d3585186b8f76e7e70b,SC4001EC-Hypnogram.edf +0,2,PSG,33,female,21:57:00,c6b6d7a8605cc7e7602b6028ee77f6fbf5f7581d,SC4002E0-PSG.edf +0,2,Hypnogram,33,female,21:57:00,386230188a3552b1fc90bba0fb7476ceaca174b6,SC4002EC-Hypnogram.edf +1,1,PSG,33,female,22:44:00,4d17451f7847355bcab17584de05e7e1df58c660,SC4011E0-PSG.edf +1,1,Hypnogram,33,female,22:44:00,d582a3cbe2db481a362af890bc5a2f5ca7c878dc,SC4011EH-Hypnogram.edf +1,2,PSG,33,female,22:15:00,a47d525f5147904b6890231e2ad338359c7ab94c,SC4012E0-PSG.edf +1,2,Hypnogram,33,female,22:15:00,fa99f60d7f54617cdd1128aff4f21c4daed763c7,SC4012EC-Hypnogram.edf +2,1,PSG,26,female,22:50:00,8b135afa7fb93bb5f1998fda50355944777c245e,SC4021E0-PSG.edf +2,1,Hypnogram,26,female,22:50:00,91043cfe46695088b17b6a02937b25efd674c3fb,SC4021EH-Hypnogram.edf +2,2,PSG,26,female,22:57:00,d739e142b3b328c71b4752149901805dcd6d7e19,SC4022E0-PSG.edf +2,2,Hypnogram,26,female,22:57:00,0c46a03699dd00e8f92a7edff99ebc4642cb3d48,SC4022EJ-Hypnogram.edf +3,1,PSG,26,female,00:02:00,85e58dc1e3303537dade8c5827ab58328239c384,SC4031E0-PSG.edf +3,1,Hypnogram,26,female,00:02:00,6363d8b0fdc48cf396c9abf054bb4a9696d38bdb,SC4031EC-Hypnogram.edf +3,2,PSG,26,female,00:24:00,43963d300642b3aa840e8c468f321b8162601772,SC4032E0-PSG.edf +3,2,Hypnogram,26,female,00:24:00,7925514bc8d2ef3f1103130f08f7b3afd2136b88,SC4032EP-Hypnogram.edf +4,1,PSG,34,female,23:12:00,04d2b88d25f2ae4a65ba44cd9145bd12800a0e20,SC4041E0-PSG.edf +4,1,Hypnogram,34,female,23:12:00,f148821669bd3588187b3b430bd79adf569f86d1,SC4041EC-Hypnogram.edf +4,2,PSG,34,female,23:35:00,76253d964d7797540ffd791e6e136023ed67a485,SC4042E0-PSG.edf +4,2,Hypnogram,34,female,23:35:00,9873df429f971f8a4b720a454f6c0472b8a25ebb,SC4042EC-Hypnogram.edf +5,1,PSG,28,female,01:22:00,ea073451b65ce8a6f1a02a8cc2b89d1a162ca0ae,SC4051E0-PSG.edf +5,1,Hypnogram,28,female,01:22:00,4159ef8a3e119d6dcc1bede806f6fbc017b27a0f,SC4051EC-Hypnogram.edf +5,2,PSG,28,female,00:35:00,5a2efbd21be9b745fd534394eb2503caca7dc53f,SC4052E0-PSG.edf +5,2,Hypnogram,28,female,00:35:00,0e96482d44762df4da65dc4fdb970b342264d22a,SC4052EC-Hypnogram.edf +6,1,PSG,31,female,00:16:00,1736736e585807c14f1ae8bc87a94cae222c5170,SC4061E0-PSG.edf +6,1,Hypnogram,31,female,00:16:00,4bf99622c67c281b25ceccd35e7050328a2946e8,SC4061EC-Hypnogram.edf +6,2,PSG,31,female,22:44:00,763c7ac059f1771a0165e5cb351b176afb1cfe15,SC4062E0-PSG.edf +6,2,Hypnogram,31,female,22:44:00,14f07411cd04d3b4b522d37c129334955287ff5f,SC4062EC-Hypnogram.edf +7,1,PSG,30,female,00:36:00,1374b34f6139b6ff7e865d8243eef39ba334ef50,SC4071E0-PSG.edf +7,1,Hypnogram,30,female,00:36:00,608024fd19a140ad233a4680e07c2495a74b69c2,SC4071EC-Hypnogram.edf +7,2,PSG,30,female,00:41:00,1c570644243d79396df612fa2b9bc027b24430e4,SC4072E0-PSG.edf +7,2,Hypnogram,30,female,00:41:00,a8da6c20b9b48189f05ab537886b59dd141374d2,SC4072EH-Hypnogram.edf +8,1,PSG,25,female,23:35:00,0e1cc2c4e1da14ab94515e3e7e75e8ad30ec99cb,SC4081E0-PSG.edf +8,1,Hypnogram,25,female,23:35:00,9ec663ffa5c17afcaca59d7829d77b9165102237,SC4081EC-Hypnogram.edf +8,2,PSG,25,female,23:37:00,d57d4aa7cbc5045f611a3a3e342b501e086ea426,SC4082E0-PSG.edf +8,2,Hypnogram,25,female,23:37:00,d43c785dba43063d7baa332671c6bac9c832b5b7,SC4082EP-Hypnogram.edf +9,1,PSG,25,female,23:02:00,b3502e0bd54683e973182c791aa962b804e79633,SC4091E0-PSG.edf +9,1,Hypnogram,25,female,23:02:00,7aa63b408c769a4a983a908b6ba41d87dd743c6e,SC4091EC-Hypnogram.edf +9,2,PSG,25,female,23:01:00,246e35852119b33d197db2f7bcfb1b46a5270a03,SC4092E0-PSG.edf +9,2,Hypnogram,25,female,23:01:00,9d85766a83231b1c6076cb293367ccc354c57eeb,SC4092EC-Hypnogram.edf +10,1,PSG,26,male,22:59:00,3ae168ff2c9c0c56f51205fdb10f05a4c6b2064e,SC4101E0-PSG.edf +10,1,Hypnogram,26,male,22:59:00,60d9c3913881e11b06ad99e9870bd1ca4d93c952,SC4101EC-Hypnogram.edf +10,2,PSG,26,male,23:07:00,86f307190961eaab0214fdc0213f8fe05812c7a5,SC4102E0-PSG.edf +10,2,Hypnogram,26,male,23:07:00,8072e2d52bc6c19b45fbd921550e5243bc5a1de7,SC4102EC-Hypnogram.edf +11,1,PSG,26,male,23:00:00,e490956b4dce01c46ba88a2b847f091bb54ea16e,SC4111E0-PSG.edf +11,1,Hypnogram,26,male,23:00:00,12db1920e2f6083c8ab1f2c24fe35dfa03715e4a,SC4111EC-Hypnogram.edf +11,2,PSG,26,male,01:14:00,ca24dc464df61144627588b29d35a85fcc7ac984,SC4112E0-PSG.edf +11,2,Hypnogram,26,male,01:14:00,54dbc39015b0a445b51189987a00e08cc27d8f0c,SC4112EC-Hypnogram.edf +12,1,PSG,26,male,00:50:00,33c72025a7a215ea5e255f4254cb0f93b1313369,SC4121E0-PSG.edf +12,1,Hypnogram,26,male,00:50:00,daa57ece807cb5325c6d1ce059f0e8a8d1c85391,SC4121EC-Hypnogram.edf +12,2,PSG,26,male,01:03:00,34f5145ab62dcc5a53ba18735519e5bb2b13841a,SC4122E0-PSG.edf +12,2,Hypnogram,26,male,01:03:00,b7af1a32d8ca15e8185e4c94213ffc18ad7f6e8a,SC4122EV-Hypnogram.edf +13,1,PSG,27,male,00:14:00,42ff97035aae6dd34ca9437857c48ac6f2ab97df,SC4131E0-PSG.edf +13,1,Hypnogram,27,male,00:14:00,5beef85170bdbb5cf2eea24a79f0f5c2c3975c4b,SC4131EC-Hypnogram.edf +14,1,PSG,27,male,22:55:00,83493e1c32d441c9e5ee3de6a024bfb5e7ab9f5f,SC4141E0-PSG.edf +14,1,Hypnogram,27,male,22:55:00,511d398f22b9b2b304de27c40740a41584ff6af2,SC4141EU-Hypnogram.edf +14,2,PSG,27,male,23:22:00,63d13828b7ebe0d2ed7f491d2b5520e928b9b55d,SC4142E0-PSG.edf +14,2,Hypnogram,27,male,23:22:00,6f123e6fdc90a01b83e694d9744a6d27f3c87b25,SC4142EU-Hypnogram.edf +15,1,PSG,31,male,23:56:00,5a92d49699d4de369d66d9462e91b0dcb3312649,SC4151E0-PSG.edf +15,1,Hypnogram,31,male,23:56:00,37dcbd339c95322d028b3a5466812697041cc373,SC4151EC-Hypnogram.edf +15,2,PSG,31,male,23:38:00,778626489bc4fe2c9137d2d361876d97dce97e5e,SC4152E0-PSG.edf +15,2,Hypnogram,31,male,23:38:00,294cdc47cd3d165031f7041c17f18dd013d216cb,SC4152EC-Hypnogram.edf +16,1,PSG,32,male,22:16:00,e56ff3aa366fe9a04a0fdfdd4cd862e77e8ac807,SC4161E0-PSG.edf +16,1,Hypnogram,32,male,22:16:00,56711b1bfed292032491f5cce57494629286a131,SC4161EC-Hypnogram.edf +16,2,PSG,32,male,23:16:00,722692f9940f3a1bccb9b4488c4477edf7fb128f,SC4162E0-PSG.edf +16,2,Hypnogram,32,male,23:16:00,c85647fb4bc1f382fe46bf9aaf579dc483115885,SC4162EC-Hypnogram.edf +17,1,PSG,31,male,23:58:00,f1a65522cb7d6c71ac47742535a12c88e2019dad,SC4171E0-PSG.edf +17,1,Hypnogram,31,male,23:58:00,dd257c8d922f08c2c8ca5236c9bf54da887c68e5,SC4171EU-Hypnogram.edf +17,2,PSG,31,male,00:37:00,572b81bc24c2c9482e6fc7ba9202a7bf253655e1,SC4172E0-PSG.edf +17,2,Hypnogram,31,male,00:37:00,c9a3b590748d7d6c7ad97c62222bd53d8ebaf630,SC4172EC-Hypnogram.edf +18,1,PSG,28,male,23:25:00,23674d20572853eb6d988d24378c52123f66500c,SC4181E0-PSG.edf +18,1,Hypnogram,28,male,23:25:00,51fc3df2df7d4da654f3e18ed1b233d0c60cfa80,SC4181EC-Hypnogram.edf +18,2,PSG,28,male,23:45:00,83e8cbe882ba863da9fd3c11393c95b6fec5b7a5,SC4182E0-PSG.edf +18,2,Hypnogram,28,male,23:45:00,43c487955edddb4ee2f60193a097c68c25c5dd4d,SC4182EC-Hypnogram.edf +19,1,PSG,28,male,01:50:00,d6da621dbb20dec3494a38c7d2a0363793ac5ebe,SC4191E0-PSG.edf +19,1,Hypnogram,28,male,01:50:00,defc7b9368c2d3c4ab4a294757843825a83cdb5d,SC4191EP-Hypnogram.edf +19,2,PSG,28,male,00:57:00,941353118732321d0246a1d58d72e903bd2f0d8f,SC4192E0-PSG.edf +19,2,Hypnogram,28,male,00:57:00,97b91b3067c5ecde766042fc2cff9e22f8023371,SC4192EV-Hypnogram.edf +20,1,PSG,51,female,23:10:00,38a0be6e45ddd9b1f17d09964a32e005dc5a6519,SC4201E0-PSG.edf +20,1,Hypnogram,51,female,23:10:00,83822f9970d3959ad2e0613492ae39bd0fae6068,SC4201EC-Hypnogram.edf +20,2,PSG,51,female,23:15:00,aa69f5bd47c2ae03c9d38bfe6d0e58408744b885,SC4202E0-PSG.edf +20,2,Hypnogram,51,female,23:15:00,5c5c63016b43421a523d1efcb34247e90aa6318b,SC4202EC-Hypnogram.edf +21,1,PSG,51,female,23:28:00,c106ad072dbc975a3742f7eff151219870f0c794,SC4211E0-PSG.edf +21,1,Hypnogram,51,female,23:28:00,9126937ea8a414d6ae9bc4a4194d841a891fa8a8,SC4211EC-Hypnogram.edf +21,2,PSG,51,female,23:59:00,a06ecb3f0a7b2c306f5ae4dbd83685f877cd945b,SC4212E0-PSG.edf +21,2,Hypnogram,51,female,23:59:00,a85f178b69a1cda47d11dd1e5394dfdcb58de1d4,SC4212EC-Hypnogram.edf +22,1,PSG,56,female,23:47:00,8733ea022d3778259a436507156cf3360ad8be06,SC4221E0-PSG.edf +22,1,Hypnogram,56,female,23:47:00,b158eda4f81772095c129be77f8e60ec9d81b884,SC4221EJ-Hypnogram.edf +22,2,PSG,56,female,23:14:00,211410fab6381da0dfaef4134d5a05eec935a4ec,SC4222E0-PSG.edf +22,2,Hypnogram,56,female,23:14:00,1488fbfbc149499dafa8dafff4f7504053af429f,SC4222EC-Hypnogram.edf +23,1,PSG,50,female,00:51:00,d96f1f35b2f77c7de706036c6e4114139e07b307,SC4231E0-PSG.edf +23,1,Hypnogram,50,female,00:51:00,9f6df70676d6cddcf069ceb7f408a7989af99ce2,SC4231EJ-Hypnogram.edf +23,2,PSG,50,female,00:32:00,6b493fa424c1329ea1c13543d08ba82a9f1e85b6,SC4232E0-PSG.edf +23,2,Hypnogram,50,female,00:32:00,d8ca7d694b3c48ab9d983b9cf67e17744c6b50fb,SC4232EV-Hypnogram.edf +24,1,PSG,54,female,23:22:00,58719e53fe18d2fc4cb1776ab5d43306beb1325d,SC4241E0-PSG.edf +24,1,Hypnogram,54,female,23:22:00,fb1432e303a8f99a2256ce682db95d88772c479f,SC4241EC-Hypnogram.edf +24,2,PSG,54,female,22:50:00,5a6277972c5f03572ed99d9ff63fb637945be778,SC4242E0-PSG.edf +24,2,Hypnogram,54,female,22:50:00,bbbf097f4cc6560fc20c903fba2c7055e1549f85,SC4242EA-Hypnogram.edf +25,1,PSG,56,female,00:32:00,7dbc0289707ff70662d367d65de7bec188484d1b,SC4251E0-PSG.edf +25,1,Hypnogram,56,female,00:32:00,e38be8134e4a36eb418ca1f06a1fe02b52d0ebf1,SC4251EP-Hypnogram.edf +25,2,PSG,56,female,23:49:00,cb3922910ea03d06c1fc5c8f15b71339dc26bc9d,SC4252E0-PSG.edf +25,2,Hypnogram,56,female,23:49:00,4cb7a383736e09125a82ef7e4f17b41130c7ac00,SC4252EU-Hypnogram.edf +26,1,PSG,51,female,23:39:00,b81c9bd1875b33713b5eb56b58f1e120841b507f,SC4261F0-PSG.edf +26,1,Hypnogram,51,female,23:39:00,501eda59557bb99d530d01bdad3579f1e1158991,SC4261FM-Hypnogram.edf +26,2,PSG,51,female,00:20:00,c9f9ad7cd751d5be91396886a2b64a7c1de564ee,SC4262F0-PSG.edf +26,2,Hypnogram,51,female,00:20:00,7ccd12803c5fc602ac1929ff3afd914b894b9143,SC4262FC-Hypnogram.edf +27,1,PSG,54,female,23:41:00,20994715d34edb26113180ee330ce287dbf57b60,SC4271F0-PSG.edf +27,1,Hypnogram,54,female,23:41:00,26c5c7f3a5c350d3505af2857835ce81252c5990,SC4271FC-Hypnogram.edf +27,2,PSG,54,female,22:58:00,9e79eb465e34b7eb6fe27ae3ce35d28d6693d44b,SC4272F0-PSG.edf +27,2,Hypnogram,54,female,22:58:00,956fe4b45d29a8999faf280a6168e332afab6abc,SC4272FM-Hypnogram.edf +28,1,PSG,56,female,23:55:00,51811913d7854f95c319076e670d988687ca667c,SC4281G0-PSG.edf +28,1,Hypnogram,56,female,23:55:00,d188150831e912081dbeda2695231177200c39f9,SC4281GC-Hypnogram.edf +28,2,PSG,56,female,00:13:00,e9f080a766a9b7a247f228e44e9c4ec67e571c95,SC4282G0-PSG.edf +28,2,Hypnogram,56,female,00:13:00,12d777787dd1975eef9015329fd774b2bfa1d53a,SC4282GC-Hypnogram.edf +29,1,PSG,51,female,22:38:00,f81c7574a5e5829e006d0b705bf5208a3349c9c7,SC4291G0-PSG.edf +29,1,Hypnogram,51,female,22:38:00,577c1345f6d070d975db5016048722f78b1b414e,SC4291GA-Hypnogram.edf +29,2,PSG,51,female,23:04:00,7416f44a3b149b4ca1fc3e53d546a093a7333bb5,SC4292G0-PSG.edf +29,2,Hypnogram,51,female,23:04:00,6e111a15160a31609761f742315df800b1311b3b,SC4292GC-Hypnogram.edf +30,1,PSG,50,male,00:09:00,7818e5a02afa89e913111d91ecd651aa3e786e5d,SC4301E0-PSG.edf +30,1,Hypnogram,50,male,00:09:00,d49df84bfea28bb241c09b922cd2dc64f57c5ae5,SC4301EC-Hypnogram.edf +30,2,PSG,50,male,00:20:00,d52859ba6a7ded3364b0d8ef2b722e1d3edda060,SC4302E0-PSG.edf +30,2,Hypnogram,50,male,00:20:00,b3d6f687831ee32f6df1da59f2d568c13f9c09d0,SC4302EV-Hypnogram.edf +31,1,PSG,54,male,23:44:00,b62f5104bddf452f4700c85997e51bec17f0243b,SC4311E0-PSG.edf +31,1,Hypnogram,54,male,23:44:00,812c34844e834b97949019741fa7f835d973725d,SC4311EC-Hypnogram.edf +31,2,PSG,54,male,23:14:00,b0a9b4922665734773abbaba06e7aab32010b862,SC4312E0-PSG.edf +31,2,Hypnogram,54,male,23:14:00,fca1935a8974eac27803e3125cea177995deca11,SC4312EM-Hypnogram.edf +32,1,PSG,57,male,00:48:00,335381ae310e9f1f053c37763eeee74d7d873471,SC4321E0-PSG.edf +32,1,Hypnogram,57,male,00:48:00,67ba7d3b97354deb31db095e748ea3a4014fae2c,SC4321EC-Hypnogram.edf +32,2,PSG,57,male,00:15:00,c9fdcfcce7e603b3289b7417891987fd67f6d921,SC4322E0-PSG.edf +32,2,Hypnogram,57,male,00:15:00,40cf9a6397a52c7deda693ca596e928cc2b9f4e9,SC4322EC-Hypnogram.edf +33,1,PSG,60,male,22:58:00,f37cb4df27286e38c604cae943169ff29b1473fc,SC4331F0-PSG.edf +33,1,Hypnogram,60,male,22:58:00,ca943e2b73c6404f929c372ebd817b7b3b71b4dd,SC4331FV-Hypnogram.edf +33,2,PSG,60,male,22:55:00,5bce6ea9b2d6c9bfb41065e92bf9cc05a11b5b75,SC4332F0-PSG.edf +33,2,Hypnogram,60,male,22:55:00,e4595b0313d5320b0bffefa43260485e19977e3c,SC4332FC-Hypnogram.edf +34,1,PSG,54,male,23:03:00,17de25c8f023fe632aa403a6d9525c1cde8eaef5,SC4341F0-PSG.edf +34,1,Hypnogram,54,male,23:03:00,81ba3c0d8320c9ee306f678b4bc9e6e266165886,SC4341FA-Hypnogram.edf +34,2,PSG,54,male,22:30:00,b659037447a1871f4ba72bbe496cfbe507330530,SC4342F0-PSG.edf +34,2,Hypnogram,54,male,22:30:00,e8e74c0905e89a59022ce0814ca9a050748ec9ae,SC4342FA-Hypnogram.edf +35,1,PSG,57,male,00:02:00,631900bef36d359a0f5807a7e1b202f80b0427ac,SC4351F0-PSG.edf +35,1,Hypnogram,57,male,00:02:00,a15cdf3973b77198d8276dc505dbb35cb39a9b4a,SC4351FA-Hypnogram.edf +35,2,PSG,57,male,23:30:00,325423a85890dcc921253bde7c7027d66f14033e,SC4352F0-PSG.edf +35,2,Hypnogram,57,male,23:30:00,1e0583b2a58432c964506ff44752d597753658c9,SC4352FV-Hypnogram.edf +36,2,PSG,51,male,23:59:00,30b90aaf965938d569ea362f66e2afa0c08c7017,SC4362F0-PSG.edf +36,2,Hypnogram,51,male,23:59:00,fb870d50ce3f4d961d8b061a83d21e5467e4ae6c,SC4362FC-Hypnogram.edf +37,1,PSG,52,male,23:03:00,0dc56fce13b6317f197d0b17c04f5be4af1c964f,SC4371F0-PSG.edf +37,1,Hypnogram,52,male,23:03:00,c19b6cbfdf3a33169ce9b4a5dc94f93b696a21ba,SC4371FA-Hypnogram.edf +37,2,PSG,52,male,23:05:00,c024c491dd836ed0169300e7171c276fd14b1c44,SC4372F0-PSG.edf +37,2,Hypnogram,52,male,23:05:00,97b2915a8a343efc7b785998c0532beaea2fbe91,SC4372FC-Hypnogram.edf +38,1,PSG,51,male,23:12:00,6098d2b501b82ca0ddc8893547c6990e204e8ba6,SC4381F0-PSG.edf +38,1,Hypnogram,51,male,23:12:00,fdbf653a4a675843c97d0a76ef5e4cebf5d2dbcb,SC4381FC-Hypnogram.edf +38,2,PSG,51,male,23:47:00,40ce0168d5f546fcd445996ab614f43823a7c2b1,SC4382F0-PSG.edf +38,2,Hypnogram,51,male,23:47:00,796f8507254c2d8d345171c077dbd855e112eb47,SC4382FW-Hypnogram.edf +40,1,PSG,67,female,23:30:00,28fd8ad1aee307847e2eb579763ebca18e56f540,SC4401E0-PSG.edf +40,1,Hypnogram,67,female,23:30:00,65b5671a89871351ee3da7ea800aad276a445b2a,SC4401EC-Hypnogram.edf +40,2,PSG,67,female,23:55:00,3d4bafa57933cfb20c342e8cc54c15916a621454,SC4402E0-PSG.edf +40,2,Hypnogram,67,female,23:55:00,037efea0fc8a6dfa8f85fa1f2fa6fd9a19f2c830,SC4402EW-Hypnogram.edf +41,1,PSG,66,female,23:28:00,30a533b67fdb2adac6a4e83088a07fe1bbaddb6c,SC4411E0-PSG.edf +41,1,Hypnogram,66,female,23:28:00,5df1bf20d4f29b95a2bdde853b2a157dd9530a8a,SC4411EJ-Hypnogram.edf +41,2,PSG,66,female,23:30:00,bc8e6ea829f14da5396a4b250394c1b72d6631c3,SC4412E0-PSG.edf +41,2,Hypnogram,66,female,23:30:00,f46b1dcfe4f4e3c9d4d4c8516dab9759f9c1224e,SC4412EM-Hypnogram.edf +42,1,PSG,69,female,01:30:00,e8a5d9e0f160ae7bd0b35d75d77b4c872daa30f8,SC4421E0-PSG.edf +42,1,Hypnogram,69,female,01:30:00,d2e34f9bcaac7af23da4448f742ac6ea3c895ed9,SC4421EA-Hypnogram.edf +42,2,PSG,69,female,00:22:00,80f246adffb92a3785f91368a77b0250aa040462,SC4422E0-PSG.edf +42,2,Hypnogram,69,female,00:22:00,709251cc7ae6556544c153caf9dac7f82bba113b,SC4422EA-Hypnogram.edf +43,1,PSG,73,female,01:30:00,194ae942cf80764e81b4cdabeed9e5a57916aab3,SC4431E0-PSG.edf +43,1,Hypnogram,73,female,01:30:00,497ad7e671edab6e7adc9d35a6aa45b7fd9a706b,SC4431EM-Hypnogram.edf +43,2,PSG,73,female,00:47:00,c45a66d27ea03bf448903fe30f17838e9a0fa0de,SC4432E0-PSG.edf +43,2,Hypnogram,73,female,00:47:00,10fe276e215f9406c0ddedaa48651cf480892476,SC4432EM-Hypnogram.edf +44,1,PSG,74,female,00:18:00,e3a09d832cb79b0095d7a311ef1b6ed7c569b79d,SC4441E0-PSG.edf +44,1,Hypnogram,74,female,00:18:00,68d4e44ad54069701972df66d8a81b4ca434bf2f,SC4441EC-Hypnogram.edf +44,2,PSG,74,female,23:55:00,fe51d45e9f3e64a61fa8a5e5274b2e4951a9de43,SC4442E0-PSG.edf +44,2,Hypnogram,74,female,23:55:00,efc2b86bb796b0143f61667402612dfbb85cbb78,SC4442EV-Hypnogram.edf +45,1,PSG,66,female,00:26:00,315db0f9d91988ddc2b198f89cc22f96190eff71,SC4451F0-PSG.edf +45,1,Hypnogram,66,female,00:26:00,bc1f755c3367e378091c44481948a72fc7a928e5,SC4451FY-Hypnogram.edf +45,2,PSG,66,female,00:34:00,a06350e1c85b61c30c3d7d5dc640121b416fe30d,SC4452F0-PSG.edf +45,2,Hypnogram,66,female,00:34:00,0286d52cdf898ed8e3b17bb26b9c50ef512daf4d,SC4452FW-Hypnogram.edf +46,1,PSG,66,female,00:30:00,e4295014c6d4474d8f7f7792c2ea088eb9e43e9f,SC4461F0-PSG.edf +46,1,Hypnogram,66,female,00:30:00,8980e770e58e5704bd36124f6b6bd8d5e3506e12,SC4461FA-Hypnogram.edf +46,2,PSG,66,female,00:38:00,53b69cb41339bc69144eaa5a5a42c2937f237fc9,SC4462F0-PSG.edf +46,2,Hypnogram,66,female,00:38:00,0c6d3974e140c1e62ed2cadaed395781575af042,SC4462FJ-Hypnogram.edf +47,1,PSG,73,female,22:36:00,05d71b55de4c86791195391b1cec8b35e447922d,SC4471F0-PSG.edf +47,1,Hypnogram,73,female,22:36:00,ee235454dbfe947432f3f813c9a6384f6e42d36a,SC4471FA-Hypnogram.edf +47,2,PSG,73,female,22:25:00,7a12c0d6f3005998472b128e06dd645a8619dae7,SC4472F0-PSG.edf +47,2,Hypnogram,73,female,22:25:00,d234d5d6c396bf7ef0a2106a59ee8204429aa3c5,SC4472FA-Hypnogram.edf +48,1,PSG,67,female,23:04:00,c15f6a0e1802dcf74ecec41745677a4932375faf,SC4481F0-PSG.edf +48,1,Hypnogram,67,female,23:04:00,50fce6396aceaf35d9d7e16175053a3b78f214d0,SC4481FV-Hypnogram.edf +48,2,PSG,67,female,23:57:00,34d71530fd1da925ba20b4c48a07f7b18153e0c7,SC4482F0-PSG.edf +48,2,Hypnogram,67,female,23:57:00,e3c48563e63eed27b071d4a7b37c45a0f9dc7eef,SC4482FJ-Hypnogram.edf +49,1,PSG,67,female,23:26:00,23ea1f5f299c6cd99d434f014d7490621dbbc854,SC4491G0-PSG.edf +49,1,Hypnogram,67,female,23:26:00,36c6c8112524c7bc9553db37601b38984946209b,SC4491GJ-Hypnogram.edf +49,2,PSG,67,female,00:13:00,02c975bfc0773928095239b80d00ac5a7ea5880f,SC4492G0-PSG.edf +49,2,Hypnogram,67,female,00:13:00,3673eaad8396ef0ec36cb4299541c30653b72e1f,SC4492GJ-Hypnogram.edf +50,1,PSG,71,male,22:07:00,1c31fc02412029bc7369979b8c9f5956420748f5,SC4501E0-PSG.edf +50,1,Hypnogram,71,male,22:07:00,eb2621c1670a42eb38dfa86a9bc3326818365f3d,SC4501EW-Hypnogram.edf +50,2,PSG,71,male,23:40:00,ff9eae25afa73115e2b184a68e3a72a39efd37e6,SC4502E0-PSG.edf +50,2,Hypnogram,71,male,23:40:00,7605a1893701925ea0fdd047926bbd6c7c043875,SC4502EM-Hypnogram.edf +51,1,PSG,70,male,23:10:00,e12eb259c2894d45b8d0b2f0e75810c2de02237d,SC4511E0-PSG.edf +51,1,Hypnogram,70,male,23:10:00,e549275e9182b9e36ade5abb721098e235ecb164,SC4511EJ-Hypnogram.edf +51,2,PSG,70,male,00:03:00,53c5d982139d248736f6dd7ff3f97f635647eacd,SC4512E0-PSG.edf +51,2,Hypnogram,70,male,00:03:00,e22966c263f6ae7444704881f5249f6fb5dee0c1,SC4512EW-Hypnogram.edf +52,2,PSG,69,male,23:53:00,af70ffdbd3012615923f6a4901e7c0dd3a0fd8ca,SC4522E0-PSG.edf +52,2,Hypnogram,69,male,23:53:00,57af3eaed541229dcb2478c6050f0582e020f878,SC4522EM-Hypnogram.edf +53,1,PSG,67,male,23:49:00,71222ac5b7784ed1d3a79ee3e9036431d6eba9bd,SC4531E0-PSG.edf +53,1,Hypnogram,67,male,23:49:00,934dbfeb29f4f4db4b61e36fb8ddab4ddbf4ff94,SC4531EM-Hypnogram.edf +53,2,PSG,67,male,23:53:00,2d472fb64da5d05a546f780da876b90ad26208f9,SC4532E0-PSG.edf +53,2,Hypnogram,67,male,23:53:00,708b43e7d43a6f5719f48c11bd6a81b037aabfc4,SC4532EV-Hypnogram.edf +54,1,PSG,73,male,23:00:00,4d3ec2f85149bb10fed1013831c3aa1f58049229,SC4541F0-PSG.edf +54,1,Hypnogram,73,male,23:00:00,a301385e6fbde02c83f2545f17cdf75d594d37ce,SC4541FA-Hypnogram.edf +54,2,PSG,73,male,23:30:00,2909f5b0d3fdb89e19d42b406798e9cbb4615bb6,SC4542F0-PSG.edf +54,2,Hypnogram,73,male,23:30:00,9548ed641fb961fa46706339891a9453b731369f,SC4542FW-Hypnogram.edf +55,1,PSG,71,male,22:40:00,0bf97e463cbcefb7df48bca712f29dcc74223330,SC4551F0-PSG.edf +55,1,Hypnogram,71,male,22:40:00,e50b44e6b049baaeb528c31563642b2a2b933834,SC4551FC-Hypnogram.edf +55,2,PSG,71,male,22:29:00,dfa0adaae50110bdd0077483c31d57956020fcb9,SC4552F0-PSG.edf +55,2,Hypnogram,71,male,22:29:00,7380403f8d72fa4c30013cd026cc1dad23ac2b3e,SC4552FW-Hypnogram.edf +56,1,PSG,72,male,23:14:00,1a9baf1b072ca9d2784a404292169ff3177ea83f,SC4561F0-PSG.edf +56,1,Hypnogram,72,male,23:14:00,b31a2dfe652508df46f6afe03ab904c333f7b818,SC4561FJ-Hypnogram.edf +56,2,PSG,72,male,23:22:00,4c7081edf572cadee51d30174cd65aa6c658f5a9,SC4562F0-PSG.edf +56,2,Hypnogram,72,male,23:22:00,676ab92dbc6532f67d672f80337c71f817fd3a6d,SC4562FJ-Hypnogram.edf +57,1,PSG,66,male,23:02:00,e67f3bd381ddfb96d584f6c6d6f6762087d6553d,SC4571F0-PSG.edf +57,1,Hypnogram,66,male,23:02:00,08ee39eb94d819968512297ca883f9bca046de9c,SC4571FV-Hypnogram.edf +57,2,PSG,66,male,23:51:00,deb2aef7a6a4b502c819345a7151ffc2529d4ba7,SC4572F0-PSG.edf +57,2,Hypnogram,66,male,23:51:00,7a38cbe581167dfec27a15935e6d386b228616fa,SC4572FC-Hypnogram.edf +58,1,PSG,67,male,22:36:00,16a1edbd6a089386fd7de72aef802182d0a2959d,SC4581G0-PSG.edf +58,1,Hypnogram,67,male,22:36:00,bfc729575cfdf5f409be2de47dad4e00d43195bf,SC4581GM-Hypnogram.edf +58,2,PSG,67,male,23:04:00,9da93f4c2459dd4fe2e5ee6a171904d4f604cd6e,SC4582G0-PSG.edf +58,2,Hypnogram,67,male,23:04:00,acbade13cfae4fc5fbda2d0766feea83d114aa23,SC4582GP-Hypnogram.edf +59,1,PSG,67,male,23:25:00,017793b040df8a860df0e43e3e0a496e2cb3f9c1,SC4591G0-PSG.edf +59,1,Hypnogram,67,male,23:25:00,f3bb949a7f82acb7fd3d8f35e92efee1402a383f,SC4591GY-Hypnogram.edf +59,2,PSG,67,male,00:14:00,1e284bddd7952862327c83092db21805e6ab6c38,SC4592G0-PSG.edf +59,2,Hypnogram,67,male,00:14:00,58d1678e9ec9f49c9c6a15031dee26d802026851,SC4592GY-Hypnogram.edf +60,1,PSG,89,female,21:35:00,ece6d6ce09fac6fc521cf3f1b536f1ea2a8a1778,SC4601E0-PSG.edf +60,1,Hypnogram,89,female,21:35:00,8f77b05fe58f43cdfdcdba7cc3d27abcac7d37f2,SC4601EC-Hypnogram.edf +60,2,PSG,89,female,23:00:00,0e50df304ced29651267f43689ce49e063f808d6,SC4602E0-PSG.edf +60,2,Hypnogram,89,female,23:00:00,1c52de92668fe4c89cd5e270e17017ef47880991,SC4602EJ-Hypnogram.edf +61,1,PSG,101,female,00:20:00,2cc6e418c0b7af472aa34d2bbd5ece85bdb6a879,SC4611E0-PSG.edf +61,1,Hypnogram,101,female,00:20:00,f5715ab48f24221c28c1d5c45508c8bb58c912ec,SC4611EG-Hypnogram.edf +61,2,PSG,101,female,01:00:00,6593e1af07101fa4c5bce8984296858be17e7d4f,SC4612E0-PSG.edf +61,2,Hypnogram,101,female,01:00:00,cedb61bbe7a273b12f45579963d5a84f2ab21811,SC4612EA-Hypnogram.edf +62,1,PSG,95,female,21:00:00,31cd2cae56977c6b872311f2a6e60827748b973d,SC4621E0-PSG.edf +62,1,Hypnogram,95,female,21:00:00,7acc5296b33ca4eee8d6577064c8c651ee96e527,SC4621EV-Hypnogram.edf +62,2,PSG,95,female,21:00:00,7a7e226d47dccd959305e3f633686335c8e66557,SC4622E0-PSG.edf +62,2,Hypnogram,95,female,21:00:00,9957c9c9e0c705aac0f7125f411b2531a722601c,SC4622EJ-Hypnogram.edf +63,1,PSG,91,female,00:15:00,6dfb32aa4c94968a52d61b90a38573d178669bfb,SC4631E0-PSG.edf +63,1,Hypnogram,91,female,00:15:00,48e28f93fc71ffc539776196f9d9d1365415e0b4,SC4631EM-Hypnogram.edf +63,2,PSG,91,female,23:39:00,3baa8081b30cc3dfece9d550289dfc94812530d5,SC4632E0-PSG.edf +63,2,Hypnogram,91,female,23:39:00,cd2765ebdabc66cb4ac2320d02e3b7ab0340ede4,SC4632EA-Hypnogram.edf +64,1,PSG,85,female,22:30:00,0e5d109a929490cbecf59573577a97df07a05cd0,SC4641E0-PSG.edf +64,1,Hypnogram,85,female,22:30:00,7b896dc5b34d71381d8462001dc3e05b145cf48c,SC4641EP-Hypnogram.edf +64,2,PSG,85,female,22:17:00,03169b7ee9de83b2e17e9bd0d6274965e9518b37,SC4642E0-PSG.edf +64,2,Hypnogram,85,female,22:17:00,d8a870d26e468a643eaebe3275e5e2912690c0d8,SC4642EP-Hypnogram.edf +65,1,PSG,88,female,23:10:00,f2134a2ad001bc146f3e2d9d76cb7f00f03bbe52,SC4651E0-PSG.edf +65,1,Hypnogram,88,female,23:10:00,fad4311c7e11a9aa9a73a8e48d6fa966db61e71d,SC4651EP-Hypnogram.edf +65,2,PSG,88,female,23:00:00,aa66553cb0132634d7d11ffe7fab80aa5119b3d7,SC4652E0-PSG.edf +65,2,Hypnogram,88,female,23:00:00,6ed9c4f66c03e56f86730ddd8986f3600c040d4a,SC4652EG-Hypnogram.edf +66,1,PSG,88,female,21:52:00,c6057505d2acf7b08371e266cf0fca1bfeb1e4e1,SC4661E0-PSG.edf +66,1,Hypnogram,88,female,21:52:00,06474e72126d2a00c1968e70730e1deac060f94e,SC4661EJ-Hypnogram.edf +66,2,PSG,88,female,21:56:00,24d278194360dc78ebd0cfe940fb4d5f7f93ccbc,SC4662E0-PSG.edf +66,2,Hypnogram,88,female,21:56:00,07ca0fbfb6030289a089f84e50d7bbfd043f31ad,SC4662EJ-Hypnogram.edf +67,1,PSG,87,female,22:49:00,4357aa9fedf0b53896d41e5dccd7b525f7212177,SC4671G0-PSG.edf +67,1,Hypnogram,87,female,22:49:00,459889157743c434933194446af5168cb145dfcb,SC4671GJ-Hypnogram.edf +67,2,PSG,87,female,23:52:00,fd86b31a5c22176e1887e2fac460edce42bd2fdf,SC4672G0-PSG.edf +67,2,Hypnogram,87,female,23:52:00,dedb182b8c063cefabf1763eb19cd26d0608017f,SC4672GV-Hypnogram.edf +70,1,PSG,89,male,21:40:00,3f60b5ad5e1092e90c38f2072b3c041bd7313550,SC4701E0-PSG.edf +70,1,Hypnogram,89,male,21:40:00,196a388f60ee4aecfa982f89e2db03ff91e906e7,SC4701EC-Hypnogram.edf +70,2,PSG,89,male,21:39:00,a6853fee26b1541f85be7ddc3f42f06ccfe2fcfc,SC4702E0-PSG.edf +70,2,Hypnogram,89,male,21:39:00,464f7382ec11703b5bc6512930fdfbb1ab6d030a,SC4702EA-Hypnogram.edf +71,1,PSG,88,male,21:15:00,e97d691bfecf770ca4e47289b846886c16ef19fb,SC4711E0-PSG.edf +71,1,Hypnogram,88,male,21:15:00,81ec5d0288f36c4368e5f06f21980f99774bf533,SC4711EC-Hypnogram.edf +71,2,PSG,88,male,23:18:00,9b99be6cb45af22bdbead7ea01f1375631c9b365,SC4712E0-PSG.edf +71,2,Hypnogram,88,male,23:18:00,66b121441a45ae19852b7002fd78c2caf236631a,SC4712EA-Hypnogram.edf +72,1,PSG,88,male,23:04:00,5c9caa01cc1f8065f87195c9f2dc2aeebf83c03d,SC4721E0-PSG.edf +72,1,Hypnogram,88,male,23:04:00,efe62b1e8bac1ea08dbf12374ca6812a6f271d5e,SC4721EC-Hypnogram.edf +72,2,PSG,88,male,23:09:00,a473f32a6075e9ed830a8e9a246129e05959e8b7,SC4722E0-PSG.edf +72,2,Hypnogram,88,male,23:09:00,efb2358de27da4219f64f7bfb37912dc9efb0281,SC4722EM-Hypnogram.edf +73,1,PSG,97,male,22:30:00,b03e4a2df4d086778f3426ed7b6c5bf800cbfe92,SC4731E0-PSG.edf +73,1,Hypnogram,97,male,22:30:00,eb3dc65d7184d676a6678a70b18730d11a414588,SC4731EM-Hypnogram.edf +73,2,PSG,97,male,22:00:00,574ff5c0634137f7d5c51eb5f7626b451f1f9b9d,SC4732E0-PSG.edf +73,2,Hypnogram,97,male,22:00:00,77a523ca9ef4698885b681bf4e27d28dc5c58424,SC4732EJ-Hypnogram.edf +74,1,PSG,92,male,23:14:00,e6ff7462f4ce401e9aff9b3d9c93f0710bc37678,SC4741E0-PSG.edf +74,1,Hypnogram,92,male,23:14:00,bda4d1ab190f4160ec7a3f4420e30d718f02369e,SC4741EA-Hypnogram.edf +74,2,PSG,92,male,23:06:00,2b09f78a2f276061c8758a55585fae7355b38111,SC4742E0-PSG.edf +74,2,Hypnogram,92,male,23:06:00,d4bb4266859c2f92ae8ba96111d59d8ab467f6a0,SC4742EC-Hypnogram.edf +75,1,PSG,96,male,22:58:00,17c356a283b026e507331209512453573bcfebe5,SC4751E0-PSG.edf +75,1,Hypnogram,96,male,22:58:00,d35737e86979127ea01b95dcecea018dd2e44f45,SC4751EC-Hypnogram.edf +75,2,PSG,96,male,23:00:00,b650a49d6e3bb81971e4689c720ee079404857e6,SC4752E0-PSG.edf +75,2,Hypnogram,96,male,23:00:00,3d1c86d8d7ecb6ff79ee12cb950690e929394161,SC4752EM-Hypnogram.edf +76,1,PSG,90,male,23:28:00,8bde3f0d5ab6a592f229dfd7886341b3f800bdb3,SC4761E0-PSG.edf +76,1,Hypnogram,90,male,23:28:00,3dbf15f28a293ac89dcf458d844a8c6443aaf1e6,SC4761EP-Hypnogram.edf +76,2,PSG,90,male,01:29:00,7bdc8eacf1a6502c8f007b08556b7e8b52180d44,SC4762E0-PSG.edf +76,2,Hypnogram,90,male,01:29:00,f6ae10f082a10ead671bfd5fdc50f62c42b9f10d,SC4762EG-Hypnogram.edf +77,1,PSG,85,male,23:23:00,ac8c2be9175cb02e00cccb5d5df2acfaf05971cc,SC4771G0-PSG.edf +77,1,Hypnogram,85,male,23:23:00,09e80b973502d89368d7823ad4aec7417b735f6e,SC4771GC-Hypnogram.edf +77,2,PSG,85,male,00:10:00,eea8671791936358037e5d096491865069989a85,SC4772G0-PSG.edf +77,2,Hypnogram,85,male,00:10:00,25a3b8859091a70ca0cff9ebb777879aa156689e,SC4772GC-Hypnogram.edf +80,1,PSG,54,female,23:05:00,0ce00a144dd9bc1b0e20cd30e6501a3852e4dbef,SC4801G0-PSG.edf +80,1,Hypnogram,54,female,23:05:00,f82d2b8e45723f2a69f8c30286cc68486b0792a6,SC4801GC-Hypnogram.edf +80,2,PSG,54,female,23:18:00,8959ada929c07945757bd6c9ef0267e7c9427a66,SC4802G0-PSG.edf +80,2,Hypnogram,54,female,23:18:00,41ff2d1118425f5828342c07aa58b9d346755b1a,SC4802GV-Hypnogram.edf +81,1,PSG,57,female,22:00:00,dcae3307af54ccf5349945e2fa493464de0a5da2,SC4811G0-PSG.edf +81,1,Hypnogram,57,female,22:00:00,2406ce37b86fc3c7492a3ebe89ae58d15686b33d,SC4811GG-Hypnogram.edf +81,2,PSG,57,female,21:56:00,fd93757cf6bcf45854fca960a067612352e05547,SC4812G0-PSG.edf +81,2,Hypnogram,57,female,21:56:00,244b3bbb4987db0a9cef85950d14899ab9a3aec4,SC4812GV-Hypnogram.edf +82,1,PSG,56,female,23:59:00,9008c6ffc917fb90a3d399e768fe3c563a144a2f,SC4821G0-PSG.edf +82,1,Hypnogram,56,female,23:59:00,59534244c603cd5c3c27db26ae2f014983ec6c9b,SC4821GC-Hypnogram.edf +82,2,PSG,56,female,00:05:00,84f9a60f6b0e7ac33388d8f6492096bcfa60bc18,SC4822G0-PSG.edf +82,2,Hypnogram,56,female,00:05:00,8d14c371bc290658469729addee4461866bb67e2,SC4822GC-Hypnogram.edf diff --git a/mne/datasets/sleep_physionet/temazepam.py b/mne/datasets/sleep_physionet/temazepam.py new file mode 100644 index 0000000..443b405 --- /dev/null +++ b/mne/datasets/sleep_physionet/temazepam.py @@ -0,0 +1,119 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import time + +import numpy as np + +from ...utils import verbose +from ..utils import _log_time_size +from ._utils import TEMAZEPAM_SLEEP_RECORDS, _check_subjects, _data_path, _fetch_one + +data_path = _data_path # expose _data_path(..) as data_path(..) + +BASE_URL = "https://physionet.org/physiobank/database/sleep-edfx/sleep-telemetry/" # noqa: E501 + + +@verbose +def fetch_data( + subjects, path=None, force_update=False, base_url=BASE_URL, *, verbose=None +): + """Get paths to local copies of PhysioNet Polysomnography dataset files. + + This will fetch data from the publicly available subjects from PhysioNet's + study of Temazepam effects on sleep :footcite:`KempEtAl2000`. This + corresponds to a set of 22 subjects. Subjects had mild difficulty falling + asleep but were otherwise healthy. + + See more details in the `physionet website + `_ + :footcite:`GoldbergerEtAl2000`. + + Parameters + ---------- + subjects : list of int + The subjects to use. Can be in the range of 0-21 (inclusive). + path : None | str + Location of where to look for the PhysioNet data storing location. + If None, the environment variable or config parameter + ``PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" + directory is used. If the Polysomnography dataset is not found under + the given path, the data will be automatically downloaded to the + specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + base_url : str + The base URL to download from. + %(verbose)s + + Returns + ------- + paths : list + List of local data paths of the given type. + + See Also + -------- + mne.datasets.sleep_physionet.age.fetch_data + + Notes + ----- + For example, one could do: + + >>> from mne.datasets import sleep_physionet + >>> sleep_physionet.temazepam.fetch_data(subjects=[1]) # doctest: +SKIP + + This would download data for subject 0 if it isn't there already. + + References + ---------- + .. footbibliography:: + """ + t0 = time.time() + records = np.loadtxt( + TEMAZEPAM_SLEEP_RECORDS, + skiprows=1, + delimiter=",", + usecols=(0, 3, 6, 7, 8, 9), + dtype={ + "names": ( + "subject", + "record", + "hyp sha", + "psg sha", + "hyp fname", + "psg fname", + ), + "formats": (" 0: + _log_time_size(t0, sz) + return fnames diff --git a/mne/datasets/sleep_physionet/temazepam_records.csv b/mne/datasets/sleep_physionet/temazepam_records.csv new file mode 100644 index 0000000..3822219 --- /dev/null +++ b/mne/datasets/sleep_physionet/temazepam_records.csv @@ -0,0 +1,45 @@ +subject,age,sex,drug,lights off,night nr,sha_Hypnogram,sha_PSG,fname_Hypnogram,fname_PSG,subject_orig +0,60,male,Placebo,23:01:00,1,ff28e5e01296cefed49ae0c27cfb3ebc42e710bf,b9d11484126ebff1884034396d6a20c62c0ef48d,ST7011JP-Hypnogram.edf,ST7011J0-PSG.edf,1 +0,60,male,Temazepam,23:48:00,2,7a98a0ebba9e5e8fc4aac9ab82849385570d7789,b97c67d2ec40721349fd6faea32ea7155a11940a,ST7012JP-Hypnogram.edf,ST7012J0-PSG.edf,1 +1,35,female,Temazepam,00:00:00,1,635b07240047ade50649ff0f72ccde792f464f09,552e579d96e6c4ae083c7e1422e11b945ebcdabd,ST7021JM-Hypnogram.edf,ST7021J0-PSG.edf,2 +1,35,female,Placebo,23:27:00,2,228c608743abcc28f8c4946e8394ecf8e6ada89c,ebabfa224599201d9baf91311f78f6410971810f,ST7022JM-Hypnogram.edf,ST7022J0-PSG.edf,2 +2,18,female,Placebo,23:53:00,1,422655bae4525d121bd45fead048207be9b34c4b,41f8e344b9872d93c8c2f2da283252231584b08f,ST7041JO-Hypnogram.edf,ST7041J0-PSG.edf,4 +2,18,female,Temazepam,22:37:00,2,eff297358a0c9d175109ba692ac3f9f4cd2c08ed,229ee3bb4d060332c219c3dc1153732ab5499d57,ST7042JO-Hypnogram.edf,ST7042J0-PSG.edf,4 +3,32,female,Temazepam,23:34:00,1,d7696bd1b891dd85e96e20ea727dcebe49ab6dfd,17b186214e8944667571f52098564e377b32d695,ST7051JA-Hypnogram.edf,ST7051J0-PSG.edf,5 +3,32,female,Placebo,23:23:00,2,64f2718c004e64ab598979da139b90452febc9bf,489fcb38c07688192d9c0eae5455d95241028ad8,ST7052JA-Hypnogram.edf,ST7052J0-PSG.edf,5 +4,35,female,Placebo,23:28:00,1,fd9214d026453fce71efa2975ea732e1c1458f69,9fb2b4ed47a6d4b2f0b60a354123e491e8738b19,ST7061JR-Hypnogram.edf,ST7061J0-PSG.edf,6 +4,35,female,Temazepam,23:26:00,2,c2a4abe15f08f230b734a328494ab0d2ae9dc786,afc5599194648da5568dafa1a811818e77df4842,ST7062JR-Hypnogram.edf,ST7062J0-PSG.edf,6 +5,51,female,Placebo,00:02:00,1,bc08c797bb7aaf92de1c869d46c6dd4590939996,010a65ad86b79d19c372a421f0e7c975e56278c8,ST7071JA-Hypnogram.edf,ST7071J0-PSG.edf,7 +5,51,female,Temazepam,23:24:00,2,1a7813b7a2389c0346e3844835590b9cb2f40f56,15c5aa5591e35d60ba25044cdd4b3d748d3c0cfc,ST7072JA-Hypnogram.edf,ST7072J0-PSG.edf,7 +6,66,female,Temazepam,23:53:00,1,8259b52c62203b85268d23b3a2d87605fdcfa2a6,cb66a0493d90d0d1204936e3e7c944ed536265e3,ST7081JW-Hypnogram.edf,ST7081J0-PSG.edf,8 +6,66,female,Placebo,23:20:00,2,bc33c3aba61c0fa937ef56d4ce7b1468c80663b5,b1cb29c7a7321b7e628d04a477338c4f62f0c093,ST7082JW-Hypnogram.edf,ST7082J0-PSG.edf,8 +7,47,male,Temazepam,23:42:00,1,af845641a8118d004bcfa6b597f23517e3a752e9,b046dd63d92339914eca0489d8a4c566b69e7723,ST7091JE-Hypnogram.edf,ST7091J0-PSG.edf,9 +7,47,male,Placebo,00:30:00,2,ec89bb908ff70e123ffa94bc2c11bb1ce54bcb6a,2986f4d64f5118c5e356a2abe6bf86521ffde339,ST7092JE-Hypnogram.edf,ST7092J0-PSG.edf,9 +8,20,female,Placebo,23:21:00,1,5919542c566d882fbf947c66f4858ad17199103a,5662b560f095b8397303cced87e43d407a0d18f7,ST7101JE-Hypnogram.edf,ST7101J0-PSG.edf,10 +8,20,female,Temazepam,23:28:00,2,1f05e92c9ca076350f981d0ec75ad720606bacbc,f697a140f18d1005107fcbb7c81d85a5e8cb6ec6,ST7102JE-Hypnogram.edf,ST7102J0-PSG.edf,10 +9,21,female,Temazepam,23:38:00,1,5964553fe07cbca302526b2153a2507f7d02fab8,e2bf9db482f230a56372603d23fb12f5c56062f7,ST7111JE-Hypnogram.edf,ST7111J0-PSG.edf,11 +9,21,female,Placebo,23:52:00,2,e4d8406eaca361d2c5d9953b3c67ed1098dd5925,d3c7907b9b1e4f087f31bd655548b8673b6ec735,ST7112JE-Hypnogram.edf,ST7112J0-PSG.edf,11 +10,21,male,Placebo,23:46:00,1,a991ed3d8be6d55ee563545077f3d280466a4989,6e90bac48e48f71e5572944a364009eab6ea818d,ST7121JE-Hypnogram.edf,ST7121J0-PSG.edf,12 +10,21,male,Temazepam,23:56:00,2,b6c2c21e3cf17b371b31af78c64f28aa5811e36f,ae7426c464296ec0a839ccaa9763e3f2c57f41f1,ST7122JE-Hypnogram.edf,ST7122J0-PSG.edf,12 +11,22,male,Temazepam,00:38:00,1,91ee1bd29b156b33e03cb8c324a8fac15ec06674,d0d6c83b76f627b067e0daac3c181e3666f8ab08,ST7131JR-Hypnogram.edf,ST7131J0-PSG.edf,13 +11,22,male,Placebo,00:31:00,2,028a5c4ed911d67a17b45f12966b32c46949d374,54a50dcc40e3d6677b80c629b2f908339d9a7c3e,ST7132JR-Hypnogram.edf,ST7132J0-PSG.edf,13 +12,20,male,Placebo,00:40:00,1,203e78e02a92a9f85f07790398f64c66f248e5cc,6bf8feeabc2259d15f1f535abda90caacc8d4a86,ST7141JE-Hypnogram.edf,ST7141J0-PSG.edf,14 +12,20,male,Temazepam,00:53:00,2,1f7cc3a1923dd6a3504c82d76f820555ad0b6a1b,b42eb28089bbdcbf3244dead53fd01d5f5ac3ddf,ST7142JE-Hypnogram.edf,ST7142J0-PSG.edf,14 +13,66,female,Placebo,23:42:00,1,cfcb0089e22244bc5047f61e72a39735cbdc36cf,c0df1253b6509c4b4ed9e1283f26cf206a8c725c,ST7151JA-Hypnogram.edf,ST7151J0-PSG.edf,15 +13,66,female,Temazepam,23:33:00,2,27e9b4527eea33ded9072db3c6626f94a966da58,faefa07a1ca180861d6f26d5f35285c009dca21e,ST7152JA-Hypnogram.edf,ST7152J0-PSG.edf,15 +14,79,female,Temazepam,23:18:00,1,5a1ef1d375b01f83264e84db4af58acded68f15e,8a4f1c44a17b5d665cc30f1141d003043274ac2b,ST7161JM-Hypnogram.edf,ST7161J0-PSG.edf,16 +14,79,female,Placebo,23:21:00,2,18b3d7eb9685ec8131fc0a8f81ba6205122595dc,66925c8fa9f6da18f8590dcf2a6174cfe46e912d,ST7162JM-Hypnogram.edf,ST7162J0-PSG.edf,16 +15,48,female,Placebo,23:40:00,1,13c371fc4384751cc4bdd3044c6a0813ea12816e,67c47cb92de8806c60303a4baa87ca6cf52a2245,ST7171JA-Hypnogram.edf,ST7171J0-PSG.edf,17 +15,48,female,Temazepam,23:48:00,2,8de0f3f59dd27d07f5f6a74216814ced08f104b5,a46118a5ca9cfaa62ca11c6a8b079e82877305ef,ST7172JA-Hypnogram.edf,ST7172J0-PSG.edf,17 +16,53,female,Temazepam,23:24:00,1,483aa0b448393d61043c98c204c93d4c60abb6bd,501f2f9d9ebe15e6dfc86fda6e90f9a54a39660a,ST7181JR-Hypnogram.edf,ST7181J0-PSG.edf,18 +16,53,female,Placebo,23:38:00,2,50efc607882659f8229db773703f5b973b471ed4,0eab40d3687a2cf708e48137eab26c0c43b75773,ST7182JR-Hypnogram.edf,ST7182J0-PSG.edf,18 +17,28,female,Temazepam,23:44:00,1,e7fcb89cf0f1484ab114bf40dcf2bf4cd413696b,b1b10cd45a7c0f91286c6fc3f755e59af483bac1,ST7191JR-Hypnogram.edf,ST7191J0-PSG.edf,19 +17,28,female,Placebo,23:22:00,2,def09a7d469984005b0c8414b7995ae8e269fd15,e80de913aa41b987a43d94cf8f0106d61e4e883b,ST7192JR-Hypnogram.edf,ST7192J0-PSG.edf,19 +18,24,male,Placebo,23:47:00,1,17a0e8aebb885a960a74343bace57d2ab0b6296a,454233ae9e6a948848030c5f4d9e60dfcb0facde,ST7201JO-Hypnogram.edf,ST7201J0-PSG.edf,20 +18,24,male,Temazepam,00:01:00,2,ed26efdb6b2d9e815f2a725970262cb9c15c7b98,1e97e392968415da67432842c952344b6d3cdc8c,ST7202JO-Hypnogram.edf,ST7202J0-PSG.edf,20 +19,34,female,Temazepam,23:10:00,1,b8756397056f623674c3b03db808b2c8c64b0a0a,c6582cfa8fcf6542a688fa8842011a93d86f2c60,ST7211JJ-Hypnogram.edf,ST7211J0-PSG.edf,21 +19,34,female,Placebo,23:44:00,2,e25e47adf0c0f09df542ef061272ed9569fb80ea,389f3920b39b4b9ad4fba6f91198299b7c6f6676,ST7212JJ-Hypnogram.edf,ST7212J0-PSG.edf,21 +20,56,male,Placebo,23:22:00,1,7656827835362b7b44b296bad83ff6001e14f489,58315bec82d381dec56bf96924a94014462bb608,ST7221JA-Hypnogram.edf,ST7221J0-PSG.edf,22 +20,56,male,Temazepam,23:44:00,2,da840db60086e43a2429fb1322ede5e5976b3cda,4961a08b87416246b8b8186190eca0e96da6a50d,ST7222JA-Hypnogram.edf,ST7222J0-PSG.edf,22 +21,48,female,Placebo,23:27:00,1,bbaac4f2c2f330f70583eb179d855fcf42b4fbff,7a850ce4bc6bd14ea072f3a45b002f8015cf2f14,ST7241JO-Hypnogram.edf,ST7241J0-PSG.edf,24 +21,48,female,Temazepam,23:36:00,2,f70b3dfce2c14f01221a66a4acb522df1affffdb,5c8bd182bfc9609929094769718b2835fe1099ad,ST7242JO-Hypnogram.edf,ST7242J0-PSG.edf,24 diff --git a/mne/datasets/somato/__init__.py b/mne/datasets/somato/__init__.py new file mode 100644 index 0000000..24ad452 --- /dev/null +++ b/mne/datasets/somato/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Somatosensory dataset.""" + +from .somato import data_path, get_version diff --git a/mne/datasets/somato/somato.py b/mne/datasets/somato/somato.py new file mode 100644 index 0000000..177a642 --- /dev/null +++ b/mne/datasets/somato/somato.py @@ -0,0 +1,32 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="somato", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="somato", conf="MNE_DATASETS_SOMATO_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("somato") + + +get_version.__doc__ = _version_doc.format(name="somato") diff --git a/mne/datasets/spm_face/__init__.py b/mne/datasets/spm_face/__init__.py new file mode 100644 index 0000000..a49e343 --- /dev/null +++ b/mne/datasets/spm_face/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""SPM face dataset.""" + +from .spm_data import data_path, has_spm_data, get_version, requires_spm_data diff --git a/mne/datasets/spm_face/spm_data.py b/mne/datasets/spm_face/spm_data.py new file mode 100644 index 0000000..6653796 --- /dev/null +++ b/mne/datasets/spm_face/spm_data.py @@ -0,0 +1,53 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from functools import partial + +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) + +has_spm_data = partial(has_dataset, name="spm") + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="spm", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format(name="spm", conf="MNE_DATASETS_SPM_DATA_PATH") + + +def get_version(): # noqa: D103 + return _get_version("spm") + + +get_version.__doc__ = _version_doc.format(name="spm") + + +def _skip_spm_data(): + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" + skip = skip_testing or not has_spm_data() + return skip + + +def requires_spm_data(func): + """Skip testing data test.""" + import pytest + + return pytest.mark.skipif(_skip_spm_data(), reason="Requires spm dataset")(func) diff --git a/mne/datasets/ssvep/__init__.py b/mne/datasets/ssvep/__init__.py new file mode 100644 index 0000000..18229cf --- /dev/null +++ b/mne/datasets/ssvep/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""SSVEP dataset.""" + +from .ssvep import data_path, get_version diff --git a/mne/datasets/ssvep/ssvep.py b/mne/datasets/ssvep/ssvep.py new file mode 100644 index 0000000..9f13ace --- /dev/null +++ b/mne/datasets/ssvep/ssvep.py @@ -0,0 +1,30 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name="ssvep", + processor="unzip", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format(name="ssvep", conf="MNE_DATASETS_SSVEP_PATH") + + +def get_version(): # noqa: D103 + return _get_version("ssvep") + + +get_version.__doc__ = _version_doc.format(name="ssvep") diff --git a/mne/datasets/testing/__init__.py b/mne/datasets/testing/__init__.py new file mode 100644 index 0000000..07ad876 --- /dev/null +++ b/mne/datasets/testing/__init__.py @@ -0,0 +1,13 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE testing dataset.""" + +from ._testing import ( + data_path, + requires_testing_data, + get_version, + _pytest_param, + _pytest_mark, +) diff --git a/mne/datasets/testing/_testing.py b/mne/datasets/testing/_testing.py new file mode 100644 index 0000000..61d4548 --- /dev/null +++ b/mne/datasets/testing/_testing.py @@ -0,0 +1,77 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from functools import partial + +from ...utils import get_config, verbose +from ..utils import ( + _data_path_doc, + _download_mne_dataset, + _get_version, + _version_doc, + has_dataset, +) + +has_testing_data = partial(has_dataset, name="testing") + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + # Make sure we don't do something stupid + if download and get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true": + raise RuntimeError("Cannot download data if skipping is forced") + + return _download_mne_dataset( + name="testing", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="testing", conf="MNE_DATASETS_TESTING_PATH" +) + + +def get_version(): # noqa: D103 + return _get_version("testing") + + +get_version.__doc__ = _version_doc.format(name="testing") + + +# Allow forcing of testing dataset skip (for Debian tests) using: +# `make test-no-testing-data` +def _skip_testing_data(): + skip_testing = get_config("MNE_SKIP_TESTING_DATASET_TESTS", "false") == "true" + skip = skip_testing or not has_testing_data() + return skip + + +def requires_testing_data(func): + """Skip testing data test.""" + return _pytest_mark()(func) + + +def _pytest_param(*args, **kwargs): + if len(args) == 0: + args = ("testing_data",) + import pytest + + # turn anything that uses testing data into an auto-skipper by + # setting params=[testing._pytest_param()], or by parametrizing functions + # with testing._pytest_param(whatever) + kwargs["marks"] = kwargs.get("marks", list()) + [_pytest_mark()] + return pytest.param(*args, **kwargs) + + +def _pytest_mark(): + import pytest + + return pytest.mark.skipif(_skip_testing_data(), reason="Requires testing dataset") diff --git a/mne/datasets/ucl_opm_auditory/__init__.py b/mne/datasets/ucl_opm_auditory/__init__.py new file mode 100644 index 0000000..94f389a --- /dev/null +++ b/mne/datasets/ucl_opm_auditory/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""fNIRS motor dataset.""" + +from .ucl_opm_auditory import data_path, get_version diff --git a/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py b/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py new file mode 100644 index 0000000..a9f6f41 --- /dev/null +++ b/mne/datasets/ucl_opm_auditory/ucl_opm_auditory.py @@ -0,0 +1,36 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + +_NAME = "ucl_opm_auditory" +_PROCESSOR = "unzip" + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): # noqa: D103 + return _download_mne_dataset( + name=_NAME, + processor=_PROCESSOR, + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name=_NAME, + conf=f"MNE_DATASETS_{_NAME.upper()}_PATH", +) + + +def get_version(): # noqa: D103 + return _get_version(_NAME) + + +get_version.__doc__ = _version_doc.format(name=_NAME) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py new file mode 100644 index 0000000..452e42c --- /dev/null +++ b/mne/datasets/utils.py @@ -0,0 +1,827 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import importlib +import inspect +import logging +import os +import os.path as op +import sys +import tempfile +import time +import zipfile +from collections import OrderedDict +from pathlib import Path +from typing import cast + +import numpy as np + +from ..label import Label, read_labels_from_annot, write_labels_to_annot +from ..utils import ( + _pl, + _safe_input, + _validate_type, + get_config, + get_subjects_dir, + logger, + set_config, + verbose, +) +from ..utils.docs import _docformat, docdict +from .config import MNE_DATASETS, _hcp_mmp_license_text + +_data_path_doc = """Get path to local copy of {name} dataset. + + Parameters + ---------- + path : None | str + Location of where to look for the {name} dataset. + If None, the environment variable or config parameter + ``{conf}`` is used. If it doesn't exist, the + "~/mne_data" directory is used. If the {name} dataset + is not found under the given path, the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the {name} dataset even if a local copy exists. + Default is False. + update_path : bool | None + If True (default), set the ``{conf}`` in mne-python + config to the given path. If None, the user is prompted. + download : bool + If False and the {name} dataset has not been downloaded yet, + it will not be downloaded and the path will be returned as + '' (empty string). This is mostly used for debugging purposes + and can be safely ignored by most users. + %(verbose)s + + Returns + ------- + path : instance of Path + Path to {name} dataset directory. +""" +_data_path_doc_accept = _data_path_doc.split("%(verbose)s") +_data_path_doc_accept[-1] = "%(verbose)s" + _data_path_doc_accept[-1] +_data_path_doc_accept.insert(1, " %(accept)s") +_data_path_doc_accept = "".join(_data_path_doc_accept) +_data_path_doc = _docformat(_data_path_doc, docdict) +_data_path_doc_accept = _docformat(_data_path_doc_accept, docdict) + +_version_doc = """Get version of the local {name} dataset. + + Returns + ------- + version : str | None + Version of the {name} local dataset, or None if the dataset + does not exist locally. +""" + + +def _dataset_version(path, name): + """Get the version of the dataset.""" + ver_fname = op.join(path, "version.txt") + if op.exists(ver_fname): + with open(ver_fname) as fid: + version = fid.readline().strip() # version is on first line + else: + logger.debug(f"Version file missing: {ver_fname}") + # Sample dataset versioning was introduced after 0.3 + # SPM dataset was introduced with 0.7 + versions = dict(sample="0.7", spm="0.3") + version = versions.get(name, "0.0") + return version + + +def _get_path(path, key, name): + """Get a dataset path.""" + # 1. Input + _validate_type(path, ("path-like", None), path) + if path is not None: + return Path(path).expanduser() + # 2. get_config(key) — unless key is None or "" (special get_config values) + # 3. get_config('MNE_DATA') + path = get_config(key or "MNE_DATA", get_config("MNE_DATA")) + if path is not None: + path = Path(path).expanduser() + if not path.exists(): + msg = ( + f"Download location {path} as specified by MNE_DATA does " + f"not exist. Either create this directory manually and try " + f"again, or set MNE_DATA to an existing directory." + ) + raise FileNotFoundError(msg) + return path + # 4. ~/mne_data (but use a fake home during testing so we don't + # unnecessarily create ~/mne_data) + logger.info(f"Using default location ~/mne_data for {name}...") + path = Path(os.getenv("_MNE_FAKE_HOME_DIR", "~")).expanduser() / "mne_data" + if not path.is_dir(): + logger.info(f"Creating {path}") + try: + path.mkdir() + except OSError: + raise OSError( + "User does not have write permissions " + f"at '{path}', try giving the path as an " + "argument to data_path() where user has " + "write permissions, for ex:data_path" + "('/home/xyz/me2/')" + ) + return path + + +def _do_path_update(path, update_path, key, name): + """Update path.""" + path = op.abspath(path) + identical = get_config(key, "", use_env=False) == path + if not identical: + if update_path is None: + update_path = True + if "--update-dataset-path" in sys.argv: + answer = "y" + else: + msg = ( + f"Do you want to set the path:\n {path}\nas the default {name} " + "dataset path in the mne-python config [y]/n? " + ) + answer = _safe_input(msg, alt="pass update_path=True") + if answer.lower() == "n": + update_path = False + + if update_path: + set_config(key, str(path), set_env=False) + return path + + +# This is meant to be semi-public: let packages like mne-bids use it to make +# sure they don't accidentally set download=True in their tests, too +_MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS = ("mne",) + + +def _check_in_testing_and_raise(name, download): + """Check if we're in an MNE test and raise an error if download!=False.""" + root_dirs = [ + importlib.import_module(ns) + for ns in _MODULES_TO_ENSURE_DOWNLOAD_IS_FALSE_IN_TESTS + ] + root_dirs = [str(Path(ns.__file__).parent) for ns in root_dirs] + check = False + func = None + frame = inspect.currentframe() + try: + # First, traverse out of the data_path() call + while frame: + if frame.f_code.co_name in ("data_path", "load_data"): + func = frame.f_code.co_name + frame = frame.f_back.f_back # out of verbose decorator + break + frame = frame.f_back + # Next, see what the caller was + while frame: + fname = frame.f_code.co_filename + if fname is not None: + fname = Path(fname) + # in mne namespace, and + # (can't use is_relative_to here until 3.9) + if any(str(fname).startswith(rd) for rd in root_dirs) and ( + # in tests/*.py + fname.parent.stem == "tests" + or + # or in a conftest.py + fname.stem == "conftest.py" + ): + check = True + break + frame = frame.f_back + finally: + del frame + if check and download is not False: + raise RuntimeError( + f"Do not download dataset {repr(name)} in tests, pass " + f"{func}(download=False) to prevent accidental downloads" + ) + + +def _download_mne_dataset( + name, processor, path, force_update, update_path, download, accept=False +) -> Path: + """Aux function for downloading internal MNE datasets.""" + import pooch + + from mne.datasets._fetch import fetch_dataset + + _check_in_testing_and_raise(name, download) + + # import pooch library for handling the dataset downloading + dataset_params = MNE_DATASETS[name] + dataset_params["dataset_name"] = name + config_key = MNE_DATASETS[name]["config_key"] + folder_name = MNE_DATASETS[name]["folder_name"] + + # get download path for specific dataset + path = _get_path(path=path, key=config_key, name=name) + + # instantiate processor that unzips file + if processor == "nested_untar": + processor_ = pooch.Untar(extract_dir=op.join(path, folder_name)) + elif processor == "nested_unzip": + processor_ = pooch.Unzip(extract_dir=op.join(path, folder_name)) + else: + processor_ = processor + + # handle case of multiple sub-datasets with different urls + if name == "visual_92_categories": + dataset_params = [] + for name in ["visual_92_categories_1", "visual_92_categories_2"]: + this_dataset = MNE_DATASETS[name] + this_dataset["dataset_name"] = name + dataset_params.append(this_dataset) + + return cast( + Path, + fetch_dataset( + dataset_params=dataset_params, + processor=processor_, + path=path, + force_update=force_update, + update_path=update_path, + download=download, + accept=accept, + ), + ) + + +def _get_version(name): + """Get a dataset version.""" + from mne.datasets._fetch import fetch_dataset + + if not has_dataset(name): + return None + dataset_params = MNE_DATASETS[name] + dataset_params["dataset_name"] = name + config_key = MNE_DATASETS[name]["config_key"] + + # get download path for specific dataset + path = _get_path(path=None, key=config_key, name=name) + + return fetch_dataset(dataset_params, path=path, return_version=True)[1] + + +def has_dataset(name): + """Check for presence of a dataset. + + Parameters + ---------- + name : str | dict + The dataset to check. Strings refer to one of the supported datasets + listed :ref:`here `. A :class:`dict` can be used to check for + user-defined datasets (see the Notes section of :func:`fetch_dataset`), + and must contain keys ``dataset_name``, ``archive_name``, ``url``, + ``folder_name``, ``hash``. + + Returns + ------- + has : bool + True if the dataset is present. + """ + from mne.datasets._fetch import fetch_dataset + + if isinstance(name, dict): + dataset_name = name["dataset_name"] + dataset_params = name + else: + dataset_name = "spm" if name == "spm_face" else name + dataset_params = MNE_DATASETS[dataset_name] + dataset_params["dataset_name"] = dataset_name + + config_key = dataset_params["config_key"] + + # get download path for specific dataset + path = _get_path(path=None, key=config_key, name=dataset_name) + + dp = fetch_dataset(dataset_params, path=path, download=False, check_version=False) + if dataset_name.startswith("bst_"): + check = dataset_name + else: + check = MNE_DATASETS[dataset_name]["folder_name"] + return str(dp).endswith(check) + + +@verbose +def _download_all_example_data(verbose=True): + """Download all datasets used in examples and tutorials.""" + # This function is designed primarily to be used by CircleCI, to: + # + # 1. Streamline data downloading + # 2. Make CircleCI fail early (rather than later) if some necessary data + # cannot be retrieved. + # 3. Avoid download statuses and timing biases in rendered examples. + # + # verbose=True by default so we get nice status messages. + # Consider adding datasets from here to CircleCI for PR-auto-build + paths = dict() + for kind in ( + "sample testing misc spm_face somato hf_sef multimodal " + "fnirs_motor opm mtrf fieldtrip_cmc kiloword phantom_kit phantom_4dbti " + "refmeg_noise ssvep epilepsy_ecog ucl_opm_auditory eyelink " + "erp_core brainstorm.bst_raw brainstorm.bst_auditory " + "brainstorm.bst_resting brainstorm.bst_phantom_ctf " + "brainstorm.bst_phantom_elekta phantom_kernel" + ).split(): + mod = importlib.import_module(f"mne.datasets.{kind}") + data_path_func = getattr(mod, "data_path") + kwargs = dict() + if "accept" in inspect.getfullargspec(data_path_func).args: + kwargs["accept"] = True + paths[kind] = data_path_func(**kwargs) + logger.info(f"[done {kind}]") + + # Now for the exceptions: + from . import ( + eegbci, + fetch_fsaverage, + fetch_hcp_mmp_parcellation, + fetch_infant_template, + fetch_phantom, + limo, + sleep_physionet, + ) + + eegbci.load_data(subjects=1, runs=[6, 10, 14], update_path=True) + eegbci.load_data(subjects=range(1, 5), runs=[3], update_path=True) + logger.info("[done eegbci]") + + sleep_physionet.age.fetch_data(subjects=[0, 1], recording=[1]) + logger.info("[done sleep_physionet]") + + # If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one + # (probably on CircleCI, or otherwise advanced user) + fetch_fsaverage(subjects_dir=None) + logger.info("[done fsaverage]") + + # Now also update the sample dataset path, if not already SUBJECTS_DIR + # (some tutorials make use of these files) + fetch_fsaverage(subjects_dir=paths["sample"] / "subjects") + + fetch_infant_template("6mo") + logger.info("[done infant_template]") + + fetch_hcp_mmp_parcellation(subjects_dir=paths["sample"] / "subjects", accept=True) + logger.info("[done hcp_mmp_parcellation]") + + fetch_phantom("otaniemi", subjects_dir=paths["brainstorm.bst_phantom_elekta"]) + logger.info("[done phantom]") + + limo.load_data(subject=1, update_path=True) + logger.info("[done limo]") + + +@verbose +def fetch_aparc_sub_parcellation(subjects_dir=None, verbose=None): + """Fetch the modified subdivided aparc parcellation. + + This will download and install the subdivided aparc parcellation + :footcite:'KhanEtAl2018' files for + FreeSurfer's fsaverage to the specified directory. + + Parameters + ---------- + subjects_dir : path-like | None + The subjects directory to use. The file will be placed in + ``subjects_dir + '/fsaverage/label'``. + %(verbose)s + + References + ---------- + .. footbibliography:: + """ + import pooch + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + destination = subjects_dir / "fsaverage" / "label" + urls = dict(lh="https://osf.io/p92yb/download", rh="https://osf.io/4kxny/download") + hashes = dict( + lh="9e4d8d6b90242b7e4b0145353436ef77", rh="dd6464db8e7762d969fc1d8087cd211b" + ) + downloader = pooch.HTTPDownloader(**_downloader_params()) + for hemi in ("lh", "rh"): + fname = f"{hemi}.aparc_sub.annot" + fpath = destination / fname + if not fpath.is_file(): + pooch.retrieve( + url=urls[hemi], + known_hash=f"md5:{hashes[hemi]}", + path=destination, + downloader=downloader, + fname=fname, + ) + + +@verbose +def fetch_hcp_mmp_parcellation( + subjects_dir=None, combine=True, *, accept=False, verbose=None +): + """Fetch the HCP-MMP parcellation. + + This will download and install the HCP-MMP parcellation + :footcite:`GlasserEtAl2016` files for FreeSurfer's fsaverage + :footcite:`Mills2016` to the specified directory. + + Parameters + ---------- + subjects_dir : path-like | None + The subjects directory to use. The file will be placed in + ``subjects_dir + '/fsaverage/label'``. + combine : bool + If True, also produce the combined/reduced set of 23 labels per + hemisphere as ``HCPMMP1_combined.annot`` + :footcite:`GlasserEtAl2016supp`. + %(accept)s + %(verbose)s + + Notes + ----- + Use of this parcellation is subject to terms of use on the + `HCP-MMP webpage `_. + + References + ---------- + .. footbibliography:: + """ + import pooch + + subjects_dir = get_subjects_dir(subjects_dir, raise_error=True) + destination = subjects_dir / "fsaverage" / "label" + fnames = [destination / f"{hemi}.HCPMMP1.annot" for hemi in ("lh", "rh")] + urls = dict( + lh="https://ndownloader.figshare.com/files/5528816", + rh="https://ndownloader.figshare.com/files/5528819", + ) + hashes = dict( + lh="46a102b59b2fb1bb4bd62d51bf02e975", rh="75e96b331940227bbcb07c1c791c2463" + ) + if not all(fname.exists() for fname in fnames): + if accept or "--accept-hcpmmp-license" in sys.argv: + answer = "y" + else: + answer = _safe_input(f"{_hcp_mmp_license_text}\nAgree (y/[n])? ") + if answer.lower() != "y": + raise RuntimeError("You must agree to the license to use this dataset") + downloader = pooch.HTTPDownloader(**_downloader_params()) + for hemi, fpath in zip(("lh", "rh"), fnames): + if not op.isfile(fpath): + fname = fpath.name + pooch.retrieve( + url=urls[hemi], + known_hash=f"md5:{hashes[hemi]}", + path=destination, + downloader=downloader, + fname=fname, + ) + + if combine: + fnames = [ + op.join(destination, f"{hemi}.HCPMMP1_combined.annot") + for hemi in ("lh", "rh") + ] + if all(op.isfile(fname) for fname in fnames): + return + # otherwise, let's make them + logger.info("Creating combined labels") + groups = OrderedDict( + [ + ("Primary Visual Cortex (V1)", ("V1",)), + ("Early Visual Cortex", ("V2", "V3", "V4")), + ( + "Dorsal Stream Visual Cortex", + ("V3A", "V3B", "V6", "V6A", "V7", "IPS1"), + ), + ( + "Ventral Stream Visual Cortex", + ("V8", "VVC", "PIT", "FFC", "VMV1", "VMV2", "VMV3"), + ), + ( + "MT+ Complex and Neighboring Visual Areas", + ("V3CD", "LO1", "LO2", "LO3", "V4t", "FST", "MT", "MST", "PH"), + ), + ("Somatosensory and Motor Cortex", ("4", "3a", "3b", "1", "2")), + ( + "Paracentral Lobular and Mid Cingulate Cortex", + ( + "24dd", + "24dv", + "6mp", + "6ma", + "SCEF", + "5m", + "5L", + "5mv", + ), + ), + ("Premotor Cortex", ("55b", "6d", "6a", "FEF", "6v", "6r", "PEF")), + ( + "Posterior Opercular Cortex", + ("43", "FOP1", "OP4", "OP1", "OP2-3", "PFcm"), + ), + ("Early Auditory Cortex", ("A1", "LBelt", "MBelt", "PBelt", "RI")), + ( + "Auditory Association Cortex", + ( + "A4", + "A5", + "STSdp", + "STSda", + "STSvp", + "STSva", + "STGa", + "TA2", + ), + ), + ( + "Insular and Frontal Opercular Cortex", + ( + "52", + "PI", + "Ig", + "PoI1", + "PoI2", + "FOP2", + "FOP3", + "MI", + "AVI", + "AAIC", + "Pir", + "FOP4", + "FOP5", + ), + ), + ( + "Medial Temporal Cortex", + ( + "H", + "PreS", + "EC", + "PeEc", + "PHA1", + "PHA2", + "PHA3", + ), + ), + ( + "Lateral Temporal Cortex", + ( + "PHT", + "TE1p", + "TE1m", + "TE1a", + "TE2p", + "TE2a", + "TGv", + "TGd", + "TF", + ), + ), + ( + "Temporo-Parieto-Occipital Junction", + ( + "TPOJ1", + "TPOJ2", + "TPOJ3", + "STV", + "PSL", + ), + ), + ( + "Superior Parietal Cortex", + ( + "LIPv", + "LIPd", + "VIP", + "AIP", + "MIP", + "7PC", + "7AL", + "7Am", + "7PL", + "7Pm", + ), + ), + ( + "Inferior Parietal Cortex", + ( + "PGp", + "PGs", + "PGi", + "PFm", + "PF", + "PFt", + "PFop", + "IP0", + "IP1", + "IP2", + ), + ), + ( + "Posterior Cingulate Cortex", + ( + "DVT", + "ProS", + "POS1", + "POS2", + "RSC", + "v23ab", + "d23ab", + "31pv", + "31pd", + "31a", + "23d", + "23c", + "PCV", + "7m", + ), + ), + ( + "Anterior Cingulate and Medial Prefrontal Cortex", + ( + "33pr", + "p24pr", + "a24pr", + "p24", + "a24", + "p32pr", + "a32pr", + "d32", + "p32", + "s32", + "8BM", + "9m", + "10v", + "10r", + "25", + ), + ), + ( + "Orbital and Polar Frontal Cortex", + ( + "47s", + "47m", + "a47r", + "11l", + "13l", + "a10p", + "p10p", + "10pp", + "10d", + "OFC", + "pOFC", + ), + ), + ( + "Inferior Frontal Cortex", + ( + "44", + "45", + "IFJp", + "IFJa", + "IFSp", + "IFSa", + "47l", + "p47r", + ), + ), + ( + "DorsoLateral Prefrontal Cortex", + ( + "8C", + "8Av", + "i6-8", + "s6-8", + "SFL", + "8BL", + "9p", + "9a", + "8Ad", + "p9-46v", + "a9-46v", + "46", + "9-46d", + ), + ), + ("???", ("???",)), + ] + ) + assert len(groups) == 23 + labels_out = list() + + for hemi in ("lh", "rh"): + labels = read_labels_from_annot( + "fsaverage", "HCPMMP1", hemi=hemi, subjects_dir=subjects_dir, sort=False + ) + label_names = [ + "???" if label.name.startswith("???") else label.name.split("_")[1] + for label in labels + ] + used = np.zeros(len(labels), bool) + for key, want in groups.items(): + assert "\t" not in key + these_labels = [ + li + for li, label_name in enumerate(label_names) + if label_name in want + ] + assert not used[these_labels].any() + assert len(these_labels) == len(want) + used[these_labels] = True + these_labels = [labels[li] for li in these_labels] + # take a weighted average to get the color + # (here color == task activation) + w = np.array([len(label.vertices) for label in these_labels]) + w = w / float(w.sum()) + color = np.dot(w, [label.color for label in these_labels]) + these_labels = sum( + these_labels, Label([], subject="fsaverage", hemi=hemi) + ) + these_labels.name = key + these_labels.color = color + labels_out.append(these_labels) + assert used.all() + assert len(labels_out) == 46 + for hemi, side in (("lh", "left"), ("rh", "right")): + table_name = f"./{side}.fsaverage164.label.gii" + write_labels_to_annot( + labels_out, + "fsaverage", + "HCPMMP1_combined", + hemi=hemi, + subjects_dir=subjects_dir, + sort=False, + table_name=table_name, + ) + + +def _manifest_check_download(manifest_path, destination, url, hash_): + import pooch + + with open(manifest_path) as fid: + names = [name.strip() for name in fid.readlines()] + need = list() + for name in names: + if not (destination / name).is_file(): + need.append(name) + logger.info( + "%d file%s missing from %s in %s", + len(need), + _pl(need), + manifest_path.name, + destination, + ) + if len(need) > 0: + downloader = pooch.HTTPDownloader(**_downloader_params()) + with tempfile.TemporaryDirectory() as path: + logger.info("Downloading missing files remotely") + + path = Path(path) + fname_path = path / "temp.zip" + pooch.retrieve( + url=url, + known_hash=f"md5:{hash_}", + path=path, + downloader=downloader, + fname=fname_path.name, + ) + + logger.info(f"Extracting missing file{_pl(need)}") + with zipfile.ZipFile(fname_path, "r") as ff: + members = set(f for f in ff.namelist() if not f.endswith("/")) + missing = sorted(members.symmetric_difference(set(names))) + if len(missing): + raise RuntimeError( + "Zip file did not have correct names:\n{'\n'.join(missing)}" + ) + for name in need: + ff.extract(name, path=destination) + logger.info(f"Successfully extracted {len(need)} file{_pl(need)}") + + +def _log_time_size(t0, sz): + t = time.time() - t0 + fmt = "%Ss" + if t > 60: + fmt = f"%Mm{fmt}" + if t > 3600: + fmt = f"%Hh{fmt}" + sz = sz / 1048576 # 1024 ** 2 + t = time.strftime(fmt, time.gmtime(t)) + logger.info(f"Download complete in {t} ({sz:.1f} MB)") + + +def _downloader_params(*, auth=None, token=None): + params = dict(timeout=15) + params["progressbar"] = ( + logger.level <= logging.INFO and get_config("MNE_TQDM", "tqdm.auto") != "off" + ) + if auth is not None: + params["auth"] = auth + if token is not None: + params["headers"] = {"Authorization": f"token {token}"} + return params diff --git a/mne/datasets/visual_92_categories/__init__.py b/mne/datasets/visual_92_categories/__init__.py new file mode 100644 index 0000000..598795b --- /dev/null +++ b/mne/datasets/visual_92_categories/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""MNE visual_92_categories dataset.""" + +from .visual_92_categories import data_path, get_version diff --git a/mne/datasets/visual_92_categories/visual_92_categories.py b/mne/datasets/visual_92_categories/visual_92_categories.py new file mode 100644 index 0000000..b0ec1e6 --- /dev/null +++ b/mne/datasets/visual_92_categories/visual_92_categories.py @@ -0,0 +1,67 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import verbose +from ..utils import _data_path_doc, _download_mne_dataset, _get_version, _version_doc + + +@verbose +def data_path( + path=None, force_update=False, update_path=True, download=True, *, verbose=None +): + """ + Get path to local copy of visual_92_categories dataset. + + .. note:: The dataset contains four fif-files, the trigger files and the T1 + mri image. This dataset is rather big in size (more than 5 GB). + + Parameters + ---------- + path : None | str + Location of where to look for the visual_92_categories data storing + location. If None, the environment variable or config parameter + MNE_DATASETS_VISUAL_92_CATEGORIES_PATH is used. If it doesn't exist, + the "mne-python/examples" directory is used. If the + visual_92_categories dataset is not found under the given path (e.g., + as "mne-python/examples/MNE-visual_92_categories-data"), the data + will be automatically downloaded to the specified folder. + force_update : bool + Force update of the dataset even if a local copy exists. + update_path : bool | None + If True, set the MNE_DATASETS_VISUAL_92_CATEGORIES_PATH in mne-python + config to the given path. If None, the user is prompted. + %(verbose)s + + Returns + ------- + path : instance of Path + Local path to the given data file. + + Notes + ----- + The visual_92_categories dataset is documented in the following publication + Radoslaw M. Cichy, Dimitrios Pantazis, Aude Oliva (2014) Resolving + human object recognition in space and time. doi: 10.1038/NN.3635 + """ + return _download_mne_dataset( + name="visual_92_categories", + processor="untar", + path=path, + force_update=force_update, + update_path=update_path, + download=download, + ) + + +data_path.__doc__ = _data_path_doc.format( + name="visual_92_categories", conf="MNE_DATASETS_VISUAL_92_CATEGORIES_PATH" +) + + +def get_version(): + """Get dataset version.""" + return _get_version("visual_92_categories") + + +get_version.__doc__ = _version_doc.format(name="visual_92_categories") diff --git a/mne/decoding/__init__.py b/mne/decoding/__init__.py new file mode 100644 index 0000000..b0dc90e --- /dev/null +++ b/mne/decoding/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Decoding and encoding, including machine learning and receptive fields.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/decoding/__init__.pyi b/mne/decoding/__init__.pyi new file mode 100644 index 0000000..2b6c89b --- /dev/null +++ b/mne/decoding/__init__.pyi @@ -0,0 +1,45 @@ +__all__ = [ + "BaseEstimator", + "CSP", + "EMS", + "FilterEstimator", + "GeneralizingEstimator", + "LinearModel", + "PSDEstimator", + "ReceptiveField", + "SPoC", + "SSD", + "Scaler", + "SlidingEstimator", + "TemporalFilter", + "TimeDelayingRidge", + "TimeFrequency", + "TransformerMixin", + "UnsupervisedSpatialFilter", + "Vectorizer", + "compute_ems", + "cross_val_multiscore", + "get_coef", +] +from .base import ( + BaseEstimator, + LinearModel, + TransformerMixin, + cross_val_multiscore, + get_coef, +) +from .csp import CSP, SPoC +from .ems import EMS, compute_ems +from .receptive_field import ReceptiveField +from .search_light import GeneralizingEstimator, SlidingEstimator +from .ssd import SSD +from .time_delaying_ridge import TimeDelayingRidge +from .time_frequency import TimeFrequency +from .transformer import ( + FilterEstimator, + PSDEstimator, + Scaler, + TemporalFilter, + UnsupervisedSpatialFilter, + Vectorizer, +) diff --git a/mne/decoding/base.py b/mne/decoding/base.py new file mode 100644 index 0000000..85ed102 --- /dev/null +++ b/mne/decoding/base.py @@ -0,0 +1,528 @@ +"""Base class copy from sklearn.base.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime as dt +import numbers + +import numpy as np +from sklearn import model_selection as models +from sklearn.base import ( # noqa: F401 + BaseEstimator, + MetaEstimatorMixin, + TransformerMixin, + clone, + is_classifier, +) +from sklearn.linear_model import LogisticRegression +from sklearn.metrics import check_scoring +from sklearn.model_selection import KFold, StratifiedKFold, check_cv +from sklearn.utils import check_array, indexable + +from ..parallel import parallel_func +from ..utils import _pl, logger, verbose, warn + + +class LinearModel(MetaEstimatorMixin, BaseEstimator): + """Compute and store patterns from linear models. + + The linear model coefficients (filters) are used to extract discriminant + neural sources from the measured data. This class computes the + corresponding patterns of these linear filters to make them more + interpretable :footcite:`HaufeEtAl2014`. + + Parameters + ---------- + model : object | None + A linear model from scikit-learn with a fit method + that updates a ``coef_`` attribute. + If None the model will be LogisticRegression. + + Attributes + ---------- + filters_ : ndarray, shape ([n_targets], n_features) + If fit, the filters used to decompose the data. + patterns_ : ndarray, shape ([n_targets], n_features) + If fit, the patterns used to restore M/EEG signals. + + See Also + -------- + CSP + mne.preprocessing.ICA + mne.preprocessing.Xdawn + + Notes + ----- + .. versionadded:: 0.10 + + References + ---------- + .. footbibliography:: + """ + + # TODO: Properly refactor this using + # https://github.com/scikit-learn/scikit-learn/issues/30237#issuecomment-2465572885 + _model_attr_wrap = ( + "transform", + "predict", + "predict_proba", + "_estimator_type", + "__tags__", + "decision_function", + "score", + "classes_", + ) + + def __init__(self, model=None): + if model is None: + model = LogisticRegression(solver="liblinear") + + self.model = model + + def __sklearn_tags__(self): + """Get sklearn tags.""" + from sklearn.utils import get_tags # added in 1.6 + + return get_tags(self.model) + + def __getattr__(self, attr): + """Wrap to model for some attributes.""" + if attr in LinearModel._model_attr_wrap: + return getattr(self.model, attr) + elif attr == "fit_transform" and hasattr(self.model, "fit_transform"): + return super().__getattr__(self, "_fit_transform") + return super().__getattr__(self, attr) + + def _fit_transform(self, X, y): + return self.fit(X, y).transform(X) + + def fit(self, X, y, **fit_params): + """Estimate the coefficients of the linear model. + + Save the coefficients in the attribute ``filters_`` and + computes the attribute ``patterns_``. + + Parameters + ---------- + X : array, shape (n_samples, n_features) + The training input samples to estimate the linear coefficients. + y : array, shape (n_samples, [n_targets]) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + self : instance of LinearModel + Returns the modified instance. + """ + X = check_array(X, input_name="X") + if y is not None: + y = check_array(y, dtype=None, ensure_2d=False, input_name="y") + if y.ndim > 2: + raise ValueError( + f"LinearModel only accepts up to 2-dimensional y, got {y.shape} " + "instead." + ) + + # fit the Model + self.model.fit(X, y, **fit_params) + + # Computes patterns using Haufe's trick: A = Cov_X . W . Precision_Y + + inv_Y = 1.0 + X = X - X.mean(0, keepdims=True) + if y.ndim == 2 and y.shape[1] != 1: + y = y - y.mean(0, keepdims=True) + inv_Y = np.linalg.pinv(np.cov(y.T)) + self.patterns_ = np.cov(X.T).dot(self.filters_.T.dot(inv_Y)).T + + return self + + @property + def filters_(self): + if hasattr(self.model, "coef_"): + # Standard Linear Model + filters = self.model.coef_ + elif hasattr(self.model.best_estimator_, "coef_"): + # Linear Model with GridSearchCV + filters = self.model.best_estimator_.coef_ + else: + raise ValueError("model does not have a `coef_` attribute.") + if filters.ndim == 2 and filters.shape[0] == 1: + filters = filters[0] + return filters + + +def _set_cv(cv, estimator=None, X=None, y=None): + """Set the default CV depending on whether clf is classifier/regressor.""" + # Detect whether classification or regression + + if estimator in ["classifier", "regressor"]: + est_is_classifier = estimator == "classifier" + else: + est_is_classifier = is_classifier(estimator) + # Setup CV + if isinstance(cv, int | np.int64): + XFold = StratifiedKFold if est_is_classifier else KFold + cv = XFold(n_splits=cv) + elif isinstance(cv, str): + if not hasattr(models, cv): + raise ValueError("Unknown cross-validation") + cv = getattr(models, cv) + cv = cv() + cv = check_cv(cv=cv, y=y, classifier=est_is_classifier) + + # Extract train and test set to retrieve them at predict time + cv_splits = [(train, test) for train, test in cv.split(X=np.zeros_like(y), y=y)] + + if not np.all([len(train) for train, _ in cv_splits]): + raise ValueError("Some folds do not have any train epochs.") + + return cv, cv_splits + + +def _check_estimator(estimator, get_params=True): + """Check whether an object has the methods required by sklearn.""" + valid_methods = ("predict", "transform", "predict_proba", "decision_function") + if (not hasattr(estimator, "fit")) or ( + not any(hasattr(estimator, method) for method in valid_methods) + ): + raise ValueError( + "estimator must be a scikit-learn transformer or " + "an estimator with the fit and a predict-like (e.g. " + "predict_proba) or a transform method." + ) + + if get_params and not hasattr(estimator, "get_params"): + raise ValueError( + "estimator must be a scikit-learn transformer or an " + "estimator with the get_params method that allows " + "cloning." + ) + + +def _get_inverse_funcs(estimator, terminal=True): + """Retrieve the inverse functions of an pipeline or an estimator.""" + inverse_func = list() + estimators = list() + if hasattr(estimator, "steps"): + # if pipeline, retrieve all steps by nesting + for _, est in estimator.steps: + inverse_func.extend(_get_inverse_funcs(est, terminal=False)) + estimators.append(est.__class__.__name__) + elif hasattr(estimator, "inverse_transform"): + # if not pipeline attempt to retrieve inverse function + inverse_func.append(estimator.inverse_transform) + estimators.append(estimator.__class__.__name__) + else: + inverse_func.append(False) + estimators.append("Unknown") + + # If terminal node, check that that the last estimator is a classifier, + # and remove it from the transformers. + if terminal: + last_is_estimator = inverse_func[-1] is False + logger.debug(f" Last estimator is an estimator: {last_is_estimator}") + non_invertible = np.where( + [inv_func is False for inv_func in inverse_func[:-1]] + )[0] + if last_is_estimator and len(non_invertible) == 0: + # keep all inverse transformation and remove last estimation + logger.debug(" Removing inverse transformation from inverse list.") + inverse_func = inverse_func[:-1] + else: + if len(non_invertible): + bad = ", ".join(estimators[ni] for ni in non_invertible) + warn( + f"Cannot inverse transform non-invertible " + f"estimator{_pl(non_invertible)}: {bad}." + ) + inverse_func = list() + + return inverse_func + + +@verbose +def get_coef(estimator, attr="filters_", inverse_transform=False, *, verbose=None): + """Retrieve the coefficients of an estimator ending with a Linear Model. + + This is typically useful to retrieve "spatial filters" or "spatial + patterns" of decoding models :footcite:`HaufeEtAl2014`. + + Parameters + ---------- + estimator : object | None + An estimator from scikit-learn. + attr : str + The name of the coefficient attribute to retrieve, typically + ``'filters_'`` (default) or ``'patterns_'``. + inverse_transform : bool + If True, returns the coefficients after inverse transforming them with + the transformer steps of the estimator. + %(verbose)s + + Returns + ------- + coef : array + The coefficients. + + References + ---------- + .. footbibliography:: + """ + # Get the coefficients of the last estimator in case of nested pipeline + est = estimator + logger.debug(f"Getting coefficients from estimator: {est.__class__.__name__}") + while hasattr(est, "steps"): + est = est.steps[-1][1] + + squeeze_first_dim = False + + # If SlidingEstimator, loop across estimators + if hasattr(est, "estimators_"): + coef = list() + for ei, this_est in enumerate(est.estimators_): + if ei == 0: + logger.debug(" Extracting coefficients from SlidingEstimator.") + coef.append(get_coef(this_est, attr, inverse_transform)) + coef = np.transpose(coef) + coef = coef[np.newaxis] # fake a sample dimension + squeeze_first_dim = True + elif not hasattr(est, attr): + raise ValueError(f"This estimator does not have a {attr} attribute:\n{est}") + else: + coef = getattr(est, attr) + + if coef.ndim == 1: + coef = coef[np.newaxis] + squeeze_first_dim = True + + # inverse pattern e.g. to get back physical units + if inverse_transform: + if not hasattr(estimator, "steps") and not hasattr(est, "estimators_"): + raise ValueError( + "inverse_transform can only be applied onto pipeline estimators." + ) + # The inverse_transform parameter will call this method on any + # estimator contained in the pipeline, in reverse order. + for inverse_func in _get_inverse_funcs(estimator)[::-1]: + logger.debug(f" Applying inverse transformation: {inverse_func}.") + coef = inverse_func(coef) + + if squeeze_first_dim: + logger.debug(" Squeezing first dimension of coefficients.") + coef = coef[0] + + return coef + + +@verbose +def cross_val_multiscore( + estimator, + X, + y=None, + groups=None, + scoring=None, + cv=None, + n_jobs=None, + verbose=None, + fit_params=None, + pre_dispatch="2*n_jobs", +): + """Evaluate a score by cross-validation. + + Parameters + ---------- + estimator : instance of sklearn.base.BaseEstimator + The object to use to fit the data. + Must implement the 'fit' method. + X : array-like, shape (n_samples, n_dimensional_features,) + The data to fit. Can be, for example a list, or an array at least 2d. + y : array-like, shape (n_samples, n_targets,) + The target variable to try to predict in the case of + supervised learning. + groups : array-like, with shape (n_samples,) + Group labels for the samples used while splitting the dataset into + train/test set. + scoring : str, callable | None + A string (see model evaluation documentation) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. + Note that when using an estimator which inherently returns + multidimensional output - in particular, SlidingEstimator + or GeneralizingEstimator - you should set the scorer + there, not here. + cv : int, cross-validation generator | iterable + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross validation, + - integer, to specify the number of folds in a ``(Stratified)KFold``, + - An object to be used as a cross-validation generator. + - An iterable yielding train, test splits. + + For integer/None inputs, if the estimator is a classifier and ``y`` is + either binary or multiclass, + :class:`sklearn.model_selection.StratifiedKFold` is used. In all + other cases, :class:`sklearn.model_selection.KFold` is used. + %(n_jobs)s + %(verbose)s + fit_params : dict, optional + Parameters to pass to the fit method of the estimator. + pre_dispatch : int, or str, optional + Controls the number of jobs that get dispatched during parallel + execution. Reducing this number can be useful to avoid an + explosion of memory consumption when more jobs get dispatched + than CPUs can process. This parameter can be: + + - None, in which case all the jobs are immediately + created and spawned. Use this for lightweight and + fast-running jobs, to avoid delays due to on-demand + spawning of the jobs + - An int, giving the exact number of total jobs that are + spawned + - A string, giving an expression as a function of n_jobs, + as in '2*n_jobs' + + Returns + ------- + scores : array of float, shape (n_splits,) | shape (n_splits, n_scores) + Array of scores of the estimator for each run of the cross validation. + """ + # This code is copied from sklearn + X, y, groups = indexable(X, y, groups) + + cv = check_cv(cv, y, classifier=is_classifier(estimator)) + cv_iter = list(cv.split(X, y, groups)) + scorer = check_scoring(estimator, scoring=scoring) + # We clone the estimator to make sure that all the folds are + # independent, and that it is pickle-able. + # Note: this parallelization is implemented using MNE Parallel + parallel, p_func, n_jobs = parallel_func( + _fit_and_score, n_jobs, pre_dispatch=pre_dispatch + ) + position = hasattr(estimator, "position") + scores = parallel( + p_func( + estimator=clone(estimator), + X=X, + y=y, + scorer=scorer, + train=train, + test=test, + fit_params=fit_params, + verbose=verbose, + parameters=dict(position=ii % n_jobs) if position else None, + ) + for ii, (train, test) in enumerate(cv_iter) + ) + return np.array(scores)[:, 0, ...] # flatten over joblib output. + + +# This verbose is necessary to properly set the verbosity level +# during parallelization +@verbose +def _fit_and_score( + estimator, + X, + y, + scorer, + train, + test, + parameters, + fit_params, + return_train_score=False, + return_parameters=False, + return_n_test_samples=False, + return_times=False, + error_score="raise", + *, + verbose=None, + position=0, +): + """Fit estimator and compute scores for a given dataset split.""" + # This code is adapted from sklearn + from sklearn.model_selection import _validation + from sklearn.utils.metaestimators import _safe_split + from sklearn.utils.validation import _num_samples + + # Adjust length of sample weights + + fit_params = fit_params if fit_params is not None else {} + fit_params = { + k: _validation._index_param_value(X, v, train) for k, v in fit_params.items() + } + + if parameters is not None: + estimator.set_params(**parameters) + + start_time = dt.datetime.now() + + X_train, y_train = _safe_split(estimator, X, y, train) + X_test, y_test = _safe_split(estimator, X, y, test, train) + + try: + if y_train is None: + estimator.fit(X_train, **fit_params) + else: + estimator.fit(X_train, y_train, **fit_params) + + except Exception as e: + # Note fit time as time until error + fit_duration = dt.datetime.now() - start_time + score_duration = dt.timedelta(0) + if error_score == "raise": + raise + elif isinstance(error_score, numbers.Number): + test_score = error_score + if return_train_score: + train_score = error_score + warn( + "Classifier fit failed. The score on this train-test partition for " + f"these parameters will be set to {error_score}. Details: \n{e!r}" + ) + else: + raise ValueError( + "error_score must be the string 'raise' or a numeric value. (Hint: if " + "using 'raise', please make sure that it has been spelled correctly.)" + ) + + else: + fit_duration = dt.datetime.now() - start_time + test_score = _score(estimator, X_test, y_test, scorer) + score_duration = dt.datetime.now() - start_time - fit_duration + if return_train_score: + train_score = _score(estimator, X_train, y_train, scorer) + + ret = [train_score, test_score] if return_train_score else [test_score] + + if return_n_test_samples: + ret.append(_num_samples(X_test)) + if return_times: + ret.extend([fit_duration.total_seconds(), score_duration.total_seconds()]) + if return_parameters: + ret.append(parameters) + return ret + + +def _score(estimator, X_test, y_test, scorer): + """Compute the score of an estimator on a given test set. + + This code is the same as sklearn.model_selection._validation._score + but accepts to output arrays instead of floats. + """ + if y_test is None: + score = scorer(estimator, X_test) + else: + score = scorer(estimator, X_test, y_test) + if hasattr(score, "item"): + try: + # e.g. unwrap memmapped scalars + score = score.item() + except ValueError: + # non-scalar? + pass + return score diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py new file mode 100644 index 0000000..1261ca8 --- /dev/null +++ b/mne/decoding/csp.py @@ -0,0 +1,1003 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import copy as cp + +import numpy as np +from scipy.linalg import eigh +from sklearn.base import BaseEstimator, TransformerMixin + +from .._fiff.meas_info import create_info +from ..cov import _compute_rank_raw_array, _regularized_covariance, _smart_eigh +from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from ..evoked import EvokedArray +from ..utils import ( + _check_option, + _validate_type, + _verbose_safe_false, + fill_doc, + pinv, + warn, +) + + +@fill_doc +class CSP(TransformerMixin, BaseEstimator): + """M/EEG signal decomposition using the Common Spatial Patterns (CSP). + + This class can be used as a supervised decomposition to estimate spatial + filters for feature extraction. CSP in the context of EEG was first + described in :footcite:`KolesEtAl1990`; a comprehensive tutorial on CSP can + be found in :footcite:`BlankertzEtAl2008`. Multi-class solving is + implemented from :footcite:`Grosse-WentrupBuss2008`. + + Parameters + ---------- + n_components : int (default 4) + The number of components to decompose M/EEG signals. This number should + be set by cross-validation. + reg : float | str | None (default None) + If not None (same as ``'empirical'``, default), allow regularization + for covariance estimation. If float (between 0 and 1), shrinkage is + used. For str values, ``reg`` will be passed as ``method`` to + :func:`mne.compute_covariance`. + log : None | bool (default None) + If ``transform_into`` equals ``'average_power'`` and ``log`` is None or + True, then apply a log transform to standardize features, else features + are z-scored. If ``transform_into`` is ``'csp_space'``, ``log`` must be + None. + cov_est : 'concat' | 'epoch' (default 'concat') + If ``'concat'``, covariance matrices are estimated on concatenated + epochs for each class. If ``'epoch'``, covariance matrices are + estimated on each epoch separately and then averaged over each class. + transform_into : 'average_power' | 'csp_space' (default 'average_power') + If 'average_power' then ``self.transform`` will return the average + power of each spatial filter. If ``'csp_space'``, ``self.transform`` + will return the data in CSP space. + norm_trace : bool (default False) + Normalize class covariance by its trace. Trace normalization is a step + of the original CSP algorithm :footcite:`KolesEtAl1990` to eliminate + magnitude variations in the EEG between individuals. It is not applied + in more recent work :footcite:`BlankertzEtAl2008`, + :footcite:`Grosse-WentrupBuss2008` and can have a negative impact on + pattern order. + cov_method_params : dict | None + Parameters to pass to :func:`mne.compute_covariance`. + + .. versionadded:: 0.16 + %(rank_none)s + + .. versionadded:: 0.17 + component_order : 'mutual_info' | 'alternate' (default 'mutual_info') + If ``'mutual_info'`` order components by decreasing mutual information + (in the two-class case this uses a simplification which orders + components by decreasing absolute deviation of the eigenvalues from 0.5 + :footcite:`BarachantEtAl2010`). For the two-class case, ``'alternate'`` + orders components by starting with the largest eigenvalue, followed by + the smallest, the second-to-largest, the second-to-smallest, and so on + :footcite:`BlankertzEtAl2008`. + + .. versionadded:: 0.21 + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_channels) + If fit, the CSP components used to decompose the data, else None. + patterns_ : ndarray, shape (n_channels, n_channels) + If fit, the CSP patterns used to restore M/EEG signals, else None. + mean_ : ndarray, shape (n_components,) + If fit, the mean squared power for each component. + std_ : ndarray, shape (n_components,) + If fit, the std squared power for each component. + + See Also + -------- + mne.preprocessing.Xdawn, SPoC + + References + ---------- + .. footbibliography:: + """ + + def __init__( + self, + n_components=4, + reg=None, + log=None, + cov_est="concat", + transform_into="average_power", + norm_trace=False, + cov_method_params=None, + rank=None, + component_order="mutual_info", + ): + # Init default CSP + if not isinstance(n_components, int): + raise ValueError("n_components must be an integer.") + self.n_components = n_components + self.rank = rank + self.reg = reg + + # Init default cov_est + if not (cov_est == "concat" or cov_est == "epoch"): + raise ValueError("unknown covariance estimation method") + self.cov_est = cov_est + + # Init default transform_into + self.transform_into = _check_option( + "transform_into", transform_into, ["average_power", "csp_space"] + ) + + # Init default log + if transform_into == "average_power": + if log is not None and not isinstance(log, bool): + raise ValueError( + 'log must be a boolean if transform_into == "average_power".' + ) + else: + if log is not None: + raise ValueError('log must be a None if transform_into == "csp_space".') + self.log = log + + _validate_type(norm_trace, bool, "norm_trace") + self.norm_trace = norm_trace + self.cov_method_params = cov_method_params + self.component_order = _check_option( + "component_order", component_order, ("mutual_info", "alternate") + ) + + def _check_Xy(self, X, y=None): + """Check input data.""" + if not isinstance(X, np.ndarray): + raise ValueError(f"X should be of type ndarray (got {type(X)}).") + if y is not None: + if len(X) != len(y) or len(y) < 1: + raise ValueError("X and y must have the same length.") + if X.ndim < 3: + raise ValueError("X must have at least 3 dimensions.") + + def fit(self, X, y): + """Estimate the CSP decomposition on epochs. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_times) + The data on which to estimate the CSP. + y : array, shape (n_epochs,) + The class for each epoch. + + Returns + ------- + self : instance of CSP + Returns the modified instance. + """ + self._check_Xy(X, y) + + self._classes = np.unique(y) + n_classes = len(self._classes) + if n_classes < 2: + raise ValueError("n_classes must be >= 2.") + if n_classes > 2 and self.component_order == "alternate": + raise ValueError( + "component_order='alternate' requires two classes, but data contains " + f"{n_classes} classes; use component_order='mutual_info' instead." + ) + + # Convert rank to one that will run + _validate_type(self.rank, (dict, None, str), "rank") + + covs, sample_weights = self._compute_covariance_matrices(X, y) + eigen_vectors, eigen_values = self._decompose_covs(covs, sample_weights) + ix = self._order_components( + covs, sample_weights, eigen_vectors, eigen_values, self.component_order + ) + + eigen_vectors = eigen_vectors[:, ix] + + self.filters_ = eigen_vectors.T + self.patterns_ = pinv(eigen_vectors) + + pick_filters = self.filters_[: self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean power) + X = (X**2).mean(axis=2) + + # To standardize features + self.mean_ = X.mean(axis=0) + self.std_ = X.std(axis=0) + + return self + + def transform(self, X): + """Estimate epochs sources given the CSP filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : ndarray + If self.transform_into == 'average_power' then returns the power of + CSP features averaged over time and shape (n_epochs, n_components) + If self.transform_into == 'csp_space' then returns the data in CSP + space and shape is (n_epochs, n_components, n_times). + """ + if not isinstance(X, np.ndarray): + raise ValueError(f"X should be of type ndarray (got {type(X)}).") + if self.filters_ is None: + raise RuntimeError( + "No filters available. Please first fit CSP decomposition." + ) + + pick_filters = self.filters_[: self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean band power) + if self.transform_into == "average_power": + X = (X**2).mean(axis=2) + log = True if self.log is None else self.log + if log: + X = np.log(X) + else: + X -= self.mean_ + X /= self.std_ + return X + + def inverse_transform(self, X): + """Project CSP features back to sensor space. + + Parameters + ---------- + X : array, shape (n_epochs, n_components) + The data in CSP power space. + + Returns + ------- + X : ndarray + The data in sensor space and shape (n_epochs, n_channels, n_components). + """ + if self.transform_into != "average_power": + raise NotImplementedError( + "Can only inverse transform CSP features when transform_into is " + "'average_power'." + ) + if not (X.ndim == 2 and X.shape[1] == self.n_components): + raise ValueError( + f"X must be 2D with X[1]={self.n_components}, got {X.shape=}" + ) + return X[:, np.newaxis, :] * self.patterns_[: self.n_components].T + + def fit_transform(self, X, y=None, **fit_params): + """Fit CSP to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the CSP. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X_csp : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) + + @fill_doc + def plot_patterns( + self, + info, + components=None, + *, + average=None, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="CSP%01d", + nrows=1, + ncols="auto", + show=True, + ): + """Plot topographic patterns of components. + + The patterns explain how the measured data was generated from the + neural sources (a.k.a. the forward model). + + Parameters + ---------- + %(info_not_none)s Used for fitting. If not available, consider using + :func:`mne.create_info`. + components : float | array of float | None + The patterns to plot. If ``None``, all components will be shown. + %(average_plot_evoked_topomap)s + %(ch_type_topomap)s + scalings : dict | float | None + The scalings of the channel types to be applied for plotting. + If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``. + %(sensors_topomap)s + %(show_names_topomap)s + %(mask_patterns_topomap)s + %(mask_params_topomap)s + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 1.3 + %(border_topomap)s + + .. versionadded:: 1.3 + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap)s + + .. versionadded:: 1.3 + %(cnorm)s + + .. versionadded:: 1.3 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + %(units_topomap)s + %(axes_evoked_plot_topomap)s + name_format : str + String format for topomap values. Defaults to "CSP%%01d". + %(nrows_ncols_topomap)s + + .. versionadded:: 1.3 + %(show)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + if units is None: + units = "AU" + if components is None: + components = np.arange(self.n_components) + + if average is not None: + warn("`average` is deprecated and will be removed in 1.10.", FutureWarning) + + # set sampling frequency to have 1 component per time point + info = cp.deepcopy(info) + with info._unlock(): + info["sfreq"] = 1.0 + # create an evoked + patterns = EvokedArray(self.patterns_.T, info, tmin=0) + # the call plot_topomap + fig = patterns.plot_topomap( + times=components, + average=average, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + time_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) + return fig + + @fill_doc + def plot_filters( + self, + info, + components=None, + *, + average=None, + ch_type=None, + scalings=None, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap="RdBu_r", + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + name_format="CSP%01d", + nrows=1, + ncols="auto", + show=True, + ): + """Plot topographic filters of components. + + The filters are used to extract discriminant neural sources from + the measured data (a.k.a. the backward model). + + Parameters + ---------- + %(info_not_none)s Used for fitting. If not available, consider using + :func:`mne.create_info`. + components : float | array of float | None + The patterns to plot. If ``None``, all components will be shown. + %(average_plot_evoked_topomap)s + %(ch_type_topomap)s + scalings : dict | float | None + The scalings of the channel types to be applied for plotting. + If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``. + %(sensors_topomap)s + %(show_names_topomap)s + %(mask_patterns_topomap)s + %(mask_params_topomap)s + %(contours_topomap)s + %(outlines_topomap)s + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 1.3 + %(border_topomap)s + + .. versionadded:: 1.3 + %(res_topomap)s + %(size_topomap)s + %(cmap_topomap)s + %(vlim_plot_topomap_psd)s + + .. versionadded:: 1.3 + %(cnorm)s + + .. versionadded:: 1.3 + %(colorbar_topomap)s + %(cbar_fmt_topomap)s + %(units_topomap)s + %(axes_evoked_plot_topomap)s + name_format : str + String format for topomap values. Defaults to "CSP%%01d". + %(nrows_ncols_topomap)s + + .. versionadded:: 1.3 + %(show)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + """ + if units is None: + units = "AU" + if components is None: + components = np.arange(self.n_components) + + if average is not None: + warn("`average` is deprecated and will be removed in 1.10.", FutureWarning) + + # set sampling frequency to have 1 component per time point + info = cp.deepcopy(info) + with info._unlock(): + info["sfreq"] = 1.0 + # create an evoked + filters = EvokedArray(self.filters_.T, info, tmin=0) + # the call plot_topomap + fig = filters.plot_topomap( + times=components, + average=average, + ch_type=ch_type, + scalings=scalings, + sensors=sensors, + show_names=show_names, + mask=mask, + mask_params=mask_params, + contours=contours, + outlines=outlines, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + border=border, + res=res, + size=size, + cmap=cmap, + vlim=vlim, + cnorm=cnorm, + colorbar=colorbar, + cbar_fmt=cbar_fmt, + units=units, + axes=axes, + time_format=name_format, + nrows=nrows, + ncols=ncols, + show=show, + ) + return fig + + def _compute_covariance_matrices(self, X, y): + _, n_channels, _ = X.shape + + if self.cov_est == "concat": + cov_estimator = self._concat_cov + elif self.cov_est == "epoch": + cov_estimator = self._epoch_cov + + # Someday we could allow the user to pass this, then we wouldn't need to convert + # but in the meantime they can use a pipeline with a scaler + self._info = create_info(n_channels, 1000.0, "mag") + if isinstance(self.rank, dict): + self._rank = {"mag": sum(self.rank.values())} + else: + self._rank = _compute_rank_raw_array( + X.transpose(1, 0, 2).reshape(X.shape[1], -1), + self._info, + rank=self.rank, + scalings=None, + log_ch_type="data", + ) + + covs = [] + sample_weights = [] + for ci, this_class in enumerate(self._classes): + cov, weight = cov_estimator( + X[y == this_class], + cov_kind=f"class={this_class}", + log_rank=ci == 0, + ) + + if self.norm_trace: + cov /= np.trace(cov) + + covs.append(cov) + sample_weights.append(weight) + + return np.stack(covs), np.array(sample_weights) + + def _concat_cov(self, x_class, *, cov_kind, log_rank): + """Concatenate epochs before computing the covariance.""" + _, n_channels, _ = x_class.shape + + x_class = x_class.transpose(1, 0, 2).reshape(n_channels, -1) + cov = _regularized_covariance( + x_class, + reg=self.reg, + method_params=self.cov_method_params, + rank=self._rank, + info=self._info, + cov_kind=cov_kind, + log_rank=log_rank, + log_ch_type="data", + ) + weight = x_class.shape[0] + + return cov, weight + + def _epoch_cov(self, x_class, *, cov_kind, log_rank): + """Mean of per-epoch covariances.""" + cov = sum( + _regularized_covariance( + this_X, + reg=self.reg, + method_params=self.cov_method_params, + rank=self._rank, + info=self._info, + cov_kind=cov_kind, + log_rank=log_rank and ii == 0, + log_ch_type="data", + ) + for ii, this_X in enumerate(x_class) + ) + cov /= len(x_class) + weight = len(x_class) + + return cov, weight + + def _decompose_covs(self, covs, sample_weights): + n_classes = len(covs) + n_channels = covs[0].shape[0] + assert self._rank is not None # should happen in _compute_covariance_matrices + _, sub_vec, mask = _smart_eigh( + covs.mean(0), + self._info, + self._rank, + proj_subspace=True, + do_compute_rank=False, + log_ch_type="data", + verbose=_verbose_safe_false(), + ) + sub_vec = sub_vec[mask] + covs = np.array([sub_vec @ cov @ sub_vec.T for cov in covs], float) + assert covs[0].shape == (mask.sum(),) * 2 + if n_classes == 2: + eigen_values, eigen_vectors = eigh(covs[0], covs.sum(0)) + else: + # The multiclass case is adapted from + # http://github.com/alexandrebarachant/pyRiemann + eigen_vectors, D = _ajd_pham(covs) + eigen_vectors = self._normalize_eigenvectors( + eigen_vectors.T, covs, sample_weights + ) + eigen_values = None + # project back + eigen_vectors = sub_vec.T @ eigen_vectors + assert eigen_vectors.shape == (n_channels, mask.sum()) + return eigen_vectors, eigen_values + + def _compute_mutual_info(self, covs, sample_weights, eigen_vectors): + class_probas = sample_weights / sample_weights.sum() + + mutual_info = [] + for jj in range(eigen_vectors.shape[1]): + aa, bb = 0, 0 + for cov, prob in zip(covs, class_probas): + tmp = np.dot(np.dot(eigen_vectors[:, jj].T, cov), eigen_vectors[:, jj]) + aa += prob * np.log(np.sqrt(tmp)) + bb += prob * (tmp**2 - 1) + mi = -(aa + (3.0 / 16) * (bb**2)) + mutual_info.append(mi) + + return mutual_info + + def _normalize_eigenvectors(self, eigen_vectors, covs, sample_weights): + # Here we apply an euclidean mean. See pyRiemann for other metrics + mean_cov = np.average(covs, axis=0, weights=sample_weights) + + for ii in range(eigen_vectors.shape[1]): + tmp = np.dot(np.dot(eigen_vectors[:, ii].T, mean_cov), eigen_vectors[:, ii]) + eigen_vectors[:, ii] /= np.sqrt(tmp) + return eigen_vectors + + def _order_components( + self, covs, sample_weights, eigen_vectors, eigen_values, component_order + ): + n_classes = len(self._classes) + if component_order == "mutual_info" and n_classes > 2: + mutual_info = self._compute_mutual_info(covs, sample_weights, eigen_vectors) + ix = np.argsort(mutual_info)[::-1] + elif component_order == "mutual_info" and n_classes == 2: + ix = np.argsort(np.abs(eigen_values - 0.5))[::-1] + elif component_order == "alternate" and n_classes == 2: + i = np.argsort(eigen_values) + ix = np.empty_like(i) + ix[1::2] = i[: len(i) // 2] + ix[0::2] = i[len(i) // 2 :][::-1] + return ix + + +def _ajd_pham(X, eps=1e-6, max_iter=15): + """Approximate joint diagonalization based on Pham's algorithm. + + This is a direct implementation of the PHAM's AJD algorithm [1]. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_channels) + A set of covariance matrices to diagonalize. + eps : float, default 1e-6 + The tolerance for stopping criterion. + max_iter : int, default 1000 + The maximum number of iteration to reach convergence. + + Returns + ------- + V : ndarray, shape (n_channels, n_channels) + The diagonalizer. + D : ndarray, shape (n_epochs, n_channels, n_channels) + The set of quasi diagonal matrices. + + References + ---------- + .. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive + definite Hermitian matrices." SIAM Journal on Matrix Analysis and + Applications 22, no. 4 (2001): 1136-1152. + + """ + # Adapted from http://github.com/alexandrebarachant/pyRiemann + n_epochs = X.shape[0] + + # Reshape input matrix + A = np.concatenate(X, axis=0).T + + # Init variables + n_times, n_m = A.shape + V = np.eye(n_times) + epsilon = n_times * (n_times - 1) * eps + + for it in range(max_iter): + decr = 0 + for ii in range(1, n_times): + for jj in range(ii): + Ii = np.arange(ii, n_m, n_times) + Ij = np.arange(jj, n_m, n_times) + + c1 = A[ii, Ii] + c2 = A[jj, Ij] + + g12 = np.mean(A[ii, Ij] / c1) + g21 = np.mean(A[ii, Ij] / c2) + + omega21 = np.mean(c1 / c2) + omega12 = np.mean(c2 / c1) + omega = np.sqrt(omega12 * omega21) + + tmp = np.sqrt(omega21 / omega12) + tmp1 = (tmp * g12 + g21) / (omega + 1) + tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9) + + h12 = tmp1 + tmp2 + h21 = np.conj((tmp1 - tmp2) / tmp) + + decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0 + + tmp = 1 + 1.0j * 0.5 * np.imag(h12 * h21) + tmp = np.real(tmp + np.sqrt(tmp**2 - h12 * h21)) + tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]]) + + A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :]) + tmp = np.c_[A[:, Ii], A[:, Ij]] + tmp = np.reshape(tmp, (n_times * n_epochs, 2), order="F") + tmp = np.dot(tmp, tau.T) + + tmp = np.reshape(tmp, (n_times, n_epochs * 2), order="F") + A[:, Ii] = tmp[:, :n_epochs] + A[:, Ij] = tmp[:, n_epochs:] + V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :]) + if decr < epsilon: + break + D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2) + return V, D + + +@fill_doc +class SPoC(CSP): + """Implementation of the SPoC spatial filtering. + + Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to + extract spatial filters and + patterns by using a target (continuous) variable in the decomposition + process in order to give preference to components whose power correlates + with the target variable. + + SPoC can be seen as an extension of the CSP driven by a continuous + variable rather than a discrete variable. Typical applications include + extraction of motor patterns using EMG power or audio patterns using sound + envelope. + + Parameters + ---------- + n_components : int + The number of components to decompose M/EEG signals. + reg : float | str | None (default None) + If not None (same as ``'empirical'``, default), allow + regularization for covariance estimation. + If float, shrinkage is used (0 <= shrinkage <= 1). + For str options, ``reg`` will be passed to ``method`` to + :func:`mne.compute_covariance`. + log : None | bool (default None) + If transform_into == 'average_power' and log is None or True, then + applies a log transform to standardize the features, else the features + are z-scored. If transform_into == 'csp_space', then log must be None. + transform_into : {'average_power', 'csp_space'} + If 'average_power' then self.transform will return the average power of + each spatial filter. If 'csp_space' self.transform will return the data + in CSP space. Defaults to 'average_power'. + cov_method_params : dict | None + Parameters to pass to :func:`mne.compute_covariance`. + + .. versionadded:: 0.16 + %(rank_none)s + + .. versionadded:: 0.17 + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_channels) + If fit, the SPoC spatial filters, else None. + patterns_ : ndarray, shape (n_channels, n_channels) + If fit, the SPoC spatial patterns, else None. + mean_ : ndarray, shape (n_components,) + If fit, the mean squared power for each component. + std_ : ndarray, shape (n_components,) + If fit, the std squared power for each component. + + See Also + -------- + mne.preprocessing.Xdawn, CSP + + References + ---------- + .. footbibliography:: + """ + + def __init__( + self, + n_components=4, + reg=None, + log=None, + transform_into="average_power", + cov_method_params=None, + rank=None, + ): + """Init of SPoC.""" + super().__init__( + n_components=n_components, + reg=reg, + log=log, + cov_est="epoch", + norm_trace=False, + transform_into=transform_into, + rank=rank, + cov_method_params=cov_method_params, + ) + # Covariance estimation have to be done on the single epoch level, + # unlike CSP where covariance estimation can also be achieved through + # concatenation of all epochs from the same class. + delattr(self, "cov_est") + delattr(self, "norm_trace") + + def fit(self, X, y): + """Estimate the SPoC decomposition on epochs. + + Parameters + ---------- + X : ndarray, shape (n_epochs, n_channels, n_times) + The data on which to estimate the SPoC. + y : array, shape (n_epochs,) + The class for each epoch. + + Returns + ------- + self : instance of SPoC + Returns the modified instance. + """ + self._check_Xy(X, y) + + if len(np.unique(y)) < 2: + raise ValueError("y must have at least two distinct values.") + + # The following code is directly copied from pyRiemann + + # Normalize target variable + target = y.astype(np.float64) + target -= target.mean() + target /= target.std() + + n_epochs, n_channels = X.shape[:2] + + # Estimate single trial covariance + covs = np.empty((n_epochs, n_channels, n_channels)) + for ii, epoch in enumerate(X): + covs[ii] = _regularized_covariance( + epoch, + reg=self.reg, + method_params=self.cov_method_params, + rank=self.rank, + log_ch_type="data", + log_rank=ii == 0, + ) + + C = covs.mean(0) + Cz = np.mean(covs * target[:, np.newaxis, np.newaxis], axis=0) + + # solve eigenvalue decomposition + evals, evecs = eigh(Cz, C) + evals = evals.real + evecs = evecs.real + # sort vectors + ix = np.argsort(np.abs(evals))[::-1] + + # sort eigenvectors + evecs = evecs[:, ix].T + + # spatial patterns + self.patterns_ = pinv(evecs).T # n_channels x n_channels + self.filters_ = evecs # n_channels x n_channels + + pick_filters = self.filters_[: self.n_components] + X = np.asarray([np.dot(pick_filters, epoch) for epoch in X]) + + # compute features (mean band power) + X = (X**2).mean(axis=-1) + + # To standardize features + self.mean_ = X.mean(axis=0) + self.std_ = X.std(axis=0) + + return self + + def transform(self, X): + """Estimate epochs sources given the SPoC filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : ndarray + If self.transform_into == 'average_power' then returns the power of + CSP features averaged over time and shape (n_epochs, n_components) + If self.transform_into == 'csp_space' then returns the data in CSP + space and shape is (n_epochs, n_components, n_times). + """ + return super().transform(X) + + def fit_transform(self, X, y=None, **fit_params): + """Fit SPoC to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data on which to estimate the SPoC. + y : array, shape (n_epochs,) + The class for each epoch. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.CSP.fit` + method. Not used for this class. + + Returns + ------- + X : array, shape (n_epochs, n_components[, n_times]) + If ``self.transform_into == 'average_power'`` then returns the power of CSP + features averaged over time and shape is ``(n_epochs, n_components)``. If + ``self.transform_into == 'csp_space'`` then returns the data in CSP space + and shape is ``(n_epochs, n_components, n_times)``. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) diff --git a/mne/decoding/ems.py b/mne/decoding/ems.py new file mode 100644 index 0000000..911b25e --- /dev/null +++ b/mne/decoding/ems.py @@ -0,0 +1,221 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import Counter + +import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin + +from .._fiff.pick import _picks_to_idx, pick_info, pick_types +from ..parallel import parallel_func +from ..utils import logger, verbose +from .base import _set_cv + + +class EMS(TransformerMixin, BaseEstimator): + """Transformer to compute event-matched spatial filters. + + This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire + time course. No time + window needs to be specified. The result is a spatial filter at each + time point and a corresponding time course. Intuitively, the result + gives the similarity between the filter at each time point and the + data vector (sensors) at that time point. + + .. note:: EMS only works for binary classification. + + Attributes + ---------- + filters_ : ndarray, shape (n_channels, n_times) + The set of spatial filters. + classes_ : ndarray, shape (n_classes,) + The target classes. + + References + ---------- + .. footbibliography:: + """ + + def __repr__(self): # noqa: D105 + if hasattr(self, "filters_"): + return ( + f"" + ) + else: + return "" + + def fit(self, X, y): + """Fit the spatial filters. + + .. note : EMS is fitted on data normalized by channel type before the + fitting of the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The training data. + y : array of int, shape (n_epochs) + The target classes. + + Returns + ------- + self : instance of EMS + Returns self. + """ + classes = np.unique(y) + if len(classes) != 2: + raise ValueError("EMS only works for binary classification.") + self.classes_ = classes + filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0) + filters /= np.linalg.norm(filters, axis=0)[None, :] + self.filters_ = filters + return self + + def transform(self, X): + """Transform the data by the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The input data. + + Returns + ------- + X : array, shape (n_epochs, n_times) + The input data transformed by the spatial filters. + """ + Xt = np.sum(X * self.filters_, axis=1) + return Xt + + +@verbose +def compute_ems( + epochs, conditions=None, picks=None, n_jobs=None, cv=None, verbose=None +): + """Compute event-matched spatial filter on epochs. + + This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire + time course. No time + window needs to be specified. The result is a spatial filter at each + time point and a corresponding time course. Intuitively, the result + gives the similarity between the filter at each time point and the + data vector (sensors) at that time point. + + .. note : EMS only works for binary classification. + + .. note : The present function applies a leave-one-out cross-validation, + following Schurger et al's paper. However, we recommend using + a stratified k-fold cross-validation. Indeed, leave-one-out tends + to overfit and cannot be used to estimate the variance of the + prediction within a given fold. + + .. note : Because of the leave-one-out, this function needs an equal + number of epochs in each of the two conditions. + + Parameters + ---------- + epochs : instance of mne.Epochs + The epochs. + conditions : list of str | None, default None + If a list of strings, strings must match the epochs.event_id's key as + well as the number of conditions supported by the objective_function. + If None keys in epochs.event_id are used. + %(picks_good_data)s + %(n_jobs)s + cv : cross-validation object | str | None, default LeaveOneOut + The cross-validation scheme. + %(verbose)s + + Returns + ------- + surrogate_trials : ndarray, shape (n_trials // 2, n_times) + The trial surrogates. + mean_spatial_filter : ndarray, shape (n_channels, n_times) + The set of spatial filters. + conditions : ndarray, shape (n_classes,) + The conditions used. Values correspond to original event ids. + + References + ---------- + .. footbibliography:: + """ + logger.info("...computing surrogate time series. This can take some time") + + # Default to leave-one-out cv + cv = "LeaveOneOut" if cv is None else cv + picks = _picks_to_idx(epochs.info, picks) + + if not len(set(Counter(epochs.events[:, 2]).values())) == 1: + raise ValueError( + "The same number of epochs is required by " + "this function. Please consider " + "`epochs.equalize_event_counts`" + ) + + if conditions is None: + conditions = epochs.event_id.keys() + epochs = epochs.copy() + else: + epochs = epochs[conditions] + + epochs.drop_bad() + + if len(conditions) != 2: + raise ValueError( + "Currently this function expects exactly 2 " + f"conditions but you gave me {len(conditions)}" + ) + + ev = epochs.events[:, 2] + # Special care to avoid path dependent mappings and orders + conditions = list(sorted(conditions)) + cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions] + + info = pick_info(epochs.info, picks) + data = epochs.get_data(picks=picks) + + # Scale (z-score) the data by channel type + # XXX the z-scoring is applied outside the CV, which is not standard. + for ch_type in ["mag", "grad", "eeg"]: + if ch_type in epochs: + # FIXME should be applied to all sort of data channels + if ch_type == "eeg": + this_picks = pick_types(info, meg=False, eeg=True) + else: + this_picks = pick_types(info, meg=ch_type, eeg=False) + data[:, this_picks] /= np.std(data[:, this_picks]) + + # Setup cross-validation. Need to use _set_cv to deal with sklearn + # deprecation of cv objects. + y = epochs.events[:, 2] + _, cv_splits = _set_cv(cv, "classifier", X=y, y=y) + + parallel, p_func, n_jobs = parallel_func(_run_ems, n_jobs=n_jobs) + # FIXME this parallelization should be removed. + # 1) it's numpy computation so it's already efficient, + # 2) it duplicates the data in RAM, + # 3) the computation is already super fast. + out = parallel( + p_func(_ems_diff, data, cond_idx, train, test) for train, test in cv_splits + ) + + surrogate_trials, spatial_filter = zip(*out) + surrogate_trials = np.array(surrogate_trials) + spatial_filter = np.mean(spatial_filter, axis=0) + + return surrogate_trials, spatial_filter, epochs.events[:, 2] + + +def _ems_diff(data0, data1): + """Compute the default diff objective function.""" + return np.mean(data0, axis=0) - np.mean(data1, axis=0) + + +def _run_ems(objective_function, data, cond_idx, train, test): + """Run EMS.""" + d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx)) + d /= np.sqrt(np.sum(d**2, axis=0))[None, :] + # compute surrogates + return np.sum(data[test[0]] * d, axis=0), d diff --git a/mne/decoding/receptive_field.py b/mne/decoding/receptive_field.py new file mode 100644 index 0000000..99412cf --- /dev/null +++ b/mne/decoding/receptive_field.py @@ -0,0 +1,521 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numbers + +import numpy as np +from scipy.stats import pearsonr +from sklearn.base import ( + BaseEstimator, + MetaEstimatorMixin, + clone, + is_regressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.metrics import r2_score + +from ..utils import _validate_type, fill_doc, pinv +from .base import _check_estimator, get_coef +from .time_delaying_ridge import TimeDelayingRidge + + +@fill_doc +class ReceptiveField(MetaEstimatorMixin, BaseEstimator): + """Fit a receptive field model. + + This allows you to fit an encoding model (stimulus to brain) or a decoding + model (brain to stimulus) using time-lagged input features (for example, a + spectro- or spatio-temporal receptive field, or STRF) + :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,CrosseEtAl2016,HoldgrafEtAl2016`. + + Parameters + ---------- + tmin : float + The starting lag, in seconds (or samples if ``sfreq`` == 1). + tmax : float + The ending lag, in seconds (or samples if ``sfreq`` == 1). + Must be >= tmin. + sfreq : float + The sampling frequency used to convert times into samples. + feature_names : array, shape (n_features,) | None + Names for input features to the model. If None, feature names will + be auto-generated from the shape of input data after running `fit`. + estimator : instance of sklearn.base.BaseEstimator | float | None + The model used in fitting inputs and outputs. This can be any + scikit-learn-style model that contains a fit and predict method. If a + float is passed, it will be interpreted as the ``alpha`` parameter + to be passed to a Ridge regression model. If `None`, then a Ridge + regression model with an alpha of 0 will be used. + fit_intercept : bool | None + If True (default), the sample mean is removed before fitting. + If ``estimator`` is a :class:`sklearn.base.BaseEstimator`, + this must be None or match ``estimator.fit_intercept``. + scoring : ['r2', 'corrcoef'] + Defines how predictions will be scored. Currently must be one of + 'r2' (coefficient of determination) or 'corrcoef' (the correlation + coefficient). + patterns : bool + If True, inverse coefficients will be computed upon fitting using the + covariance matrix of the inputs, and the cross-covariance of the + inputs/outputs, according to :footcite:`HaufeEtAl2014`. Defaults to + False. + n_jobs : int | str + Number of jobs to run in parallel. Can be 'cuda' if CuPy + is installed properly and ``estimator is None``. + + .. versionadded:: 0.18 + edge_correction : bool + If True (default), correct the autocorrelation coefficients for + non-zero delays for the fact that fewer samples are available. + Disabling this speeds up performance at the cost of accuracy + depending on the relationship between epoch length and model + duration. Only used if ``estimator`` is float or None. + + .. versionadded:: 0.18 + + Attributes + ---------- + coef_ : array, shape ([n_outputs, ]n_features, n_delays) + The coefficients from the model fit, reshaped for easy visualization. + During :meth:`mne.decoding.ReceptiveField.fit`, if ``y`` has one + dimension (time), the ``n_outputs`` dimension here is omitted. + patterns_ : array, shape ([n_outputs, ]n_features, n_delays) + If fit, the inverted coefficients from the model. + delays_ : array, shape (n_delays,), dtype int + The delays used to fit the model, in indices. To return the delays + in seconds, use ``self.delays_ / self.sfreq`` + valid_samples_ : slice + The rows to keep during model fitting after removing rows with + missing values due to time delaying. This can be used to get an + output equivalent to using :func:`numpy.convolve` or + :func:`numpy.correlate` with ``mode='valid'``. + + See Also + -------- + mne.decoding.TimeDelayingRidge + + Notes + ----- + For a causal system, the encoding model will have significant + non-zero values only at positive lags. In other words, lags point + backward in time relative to the input, so positive lags correspond + to previous input time samples, while negative lags correspond to + future input time samples. + + References + ---------- + .. footbibliography:: + """ # noqa E501 + + def __init__( + self, + tmin, + tmax, + sfreq, + feature_names=None, + estimator=None, + fit_intercept=None, + scoring="r2", + patterns=False, + n_jobs=None, + edge_correction=True, + ): + self.tmin = tmin + self.tmax = tmax + self.sfreq = sfreq + self.feature_names = feature_names + self.estimator = 0.0 if estimator is None else estimator + self.fit_intercept = fit_intercept + self.scoring = scoring + self.patterns = patterns + self.n_jobs = n_jobs + self.edge_correction = edge_correction + + def __repr__(self): # noqa: D105 + s = f"tmin, tmax : ({self.tmin:.3f}, {self.tmax:.3f}), " + estimator = self.estimator + if not isinstance(estimator, str): + estimator = type(self.estimator) + s += f"estimator : {estimator}, " + if hasattr(self, "coef_"): + if self.feature_names is not None: + feats = self.feature_names + if len(feats) == 1: + s += f"feature: {feats[0]}, " + else: + s += f"features : [{feats[0]}, ..., {feats[-1]}], " + s += "fit: True" + else: + s += "fit: False" + if hasattr(self, "scores_"): + s += f"scored ({self.scoring})" + return f"" + + def _delay_and_reshape(self, X, y=None): + """Delay and reshape the variables.""" + if not isinstance(self.estimator_, TimeDelayingRidge): + # X is now shape (n_times, n_epochs, n_feats, n_delays) + X = _delay_time_series( + X, + self.tmin, + self.tmax, + self.sfreq_, + fill_mean=self.fit_intercept_, + ) + X = _reshape_for_est(X) + # Concat times + epochs + if y is not None: + y = y.reshape(-1, y.shape[-1], order="F") + return X, y + + def fit(self, X, y): + """Fit a receptive field model. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_features) + The input features for the model. + y : array, shape (n_times[, n_epochs][, n_outputs]) + The output features for the model. + + Returns + ------- + self : instance + The instance so you can chain operations. + """ + if self.scoring not in _SCORERS.keys(): + raise ValueError( + f"scoring must be one of {sorted(_SCORERS.keys())}, got {self.scoring} " + ) + self.sfreq_ = float(self.sfreq) + X, y, _, self._y_dim = self._check_dimensions(X, y) + + if self.tmin > self.tmax: + raise ValueError(f"tmin ({self.tmin}) must be at most tmax ({self.tmax})") + # Initialize delays + self.delays_ = _times_to_delays(self.tmin, self.tmax, self.sfreq_) + + # Define the slice that we should use in the middle + self.valid_samples_ = _delays_to_slice(self.delays_) + + if isinstance(self.estimator, numbers.Real): + if self.fit_intercept is None: + self.fit_intercept_ = True + else: + self.fit_intercept_ = self.fit_intercept + estimator = TimeDelayingRidge( + self.tmin, + self.tmax, + self.sfreq_, + alpha=self.estimator, + fit_intercept=self.fit_intercept_, + n_jobs=self.n_jobs, + edge_correction=self.edge_correction, + ) + elif is_regressor(self.estimator): + estimator = clone(self.estimator) + if ( + self.fit_intercept is not None + and estimator.fit_intercept != self.fit_intercept + ): + raise ValueError( + f"Estimator fit_intercept ({estimator.fit_intercept}) != " + f"initialization fit_intercept ({self.fit_intercept}), initialize " + "ReceptiveField with the same fit_intercept value or use " + "fit_intercept=None" + ) + self.fit_intercept_ = estimator.fit_intercept + else: + raise ValueError( + "`estimator` must be a float or an instance of `BaseEstimator`, got " + f"type {self.estimator}." + ) + self.estimator_ = estimator + del estimator + _check_estimator(self.estimator_) + + # Create input features + n_times, n_epochs, n_feats = X.shape + n_outputs = y.shape[-1] + n_delays = len(self.delays_) + + # Update feature names if we have none + if (self.feature_names is not None) and (len(self.feature_names) != n_feats): + raise ValueError( + f"n_features in X does not match feature names ({n_feats} != " + f"{len(self.feature_names)})" + ) + + # Create input features + X, y = self._delay_and_reshape(X, y) + + self.estimator_.fit(X, y) + coef = get_coef(self.estimator_, "coef_") # (n_targets, n_features) + shape = [n_feats, n_delays] + if self._y_dim > 1: + shape.insert(0, -1) + self.coef_ = coef.reshape(shape) + + # Inverse-transform model weights + if self.patterns: + if isinstance(self.estimator_, TimeDelayingRidge): + cov_ = self.estimator_.cov_ / float(n_times * n_epochs - 1) + y = y.reshape(-1, y.shape[-1], order="F") + else: + X = X - X.mean(0, keepdims=True) + cov_ = np.cov(X.T) + del X + + # Inverse output covariance + if y.ndim == 2 and y.shape[1] != 1: + y = y - y.mean(0, keepdims=True) + inv_Y = pinv(np.cov(y.T)) + else: + inv_Y = 1.0 / float(n_times * n_epochs - 1) + del y + + # Inverse coef according to Haufe's method + # patterns has shape (n_feats * n_delays, n_outputs) + coef = np.reshape(self.coef_, (n_feats * n_delays, n_outputs)) + patterns = cov_.dot(coef.dot(inv_Y)) + self.patterns_ = patterns.reshape(shape) + + return self + + def predict(self, X): + """Generate predictions with a receptive field. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_channels) + The input features for the model. + + Returns + ------- + y_pred : array, shape (n_times[, n_epochs][, n_outputs]) + The output predictions. "Note that valid samples (those + unaffected by edge artifacts during the time delaying step) can + be obtained using ``y_pred[rf.valid_samples_]``. + """ + if not hasattr(self, "delays_"): + raise NotFittedError("Estimator has not been fit yet.") + X, _, X_dim = self._check_dimensions(X, None, predict=True)[:3] + del _ + # convert to sklearn and back + pred_shape = X.shape[:-1] + if self._y_dim > 1: + pred_shape = pred_shape + (self.coef_.shape[0],) + X, _ = self._delay_and_reshape(X) + y_pred = self.estimator_.predict(X) + y_pred = y_pred.reshape(pred_shape, order="F") + shape = list(y_pred.shape) + if X_dim <= 2: + shape.pop(1) # epochs + extra = 0 + else: + extra = 1 + shape = shape[: self._y_dim + extra] + y_pred.shape = shape + return y_pred + + def score(self, X, y): + """Score predictions generated with a receptive field. + + This calls ``self.predict``, then masks the output of this + and ``y` with ``self.valid_samples_``. Finally, it passes + this to a :mod:`sklearn.metrics` scorer. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_channels) + The input features for the model. + y : array, shape (n_times[, n_epochs][, n_outputs]) + Used for scikit-learn compatibility. + + Returns + ------- + scores : list of float, shape (n_outputs,) + The scores estimated by the model for each output (e.g. mean + R2 of ``predict(X)``). + """ + # Create our scoring object + scorer_ = _SCORERS[self.scoring] + + # Generate predictions, then reshape so we can mask time + X, y = self._check_dimensions(X, y, predict=True)[:2] + n_times, n_epochs, n_outputs = y.shape + y_pred = self.predict(X) + y_pred = y_pred[self.valid_samples_] + y = y[self.valid_samples_] + + # Re-vectorize and call scorer + y = y.reshape([-1, n_outputs], order="F") + y_pred = y_pred.reshape([-1, n_outputs], order="F") + assert y.shape == y_pred.shape + scores = scorer_(y, y_pred, multioutput="raw_values") + return scores + + def _check_dimensions(self, X, y, predict=False): + _validate_type(X, "array-like", "X") + _validate_type(y, ("array-like", None), "y") + X_dim = X.ndim + y_dim = y.ndim if y is not None else 0 + if X_dim == 2: + # Ensure we have a 3D input by adding singleton epochs dimension + X = X[:, np.newaxis, :] + if y is not None: + if y_dim == 1: + y = y[:, np.newaxis, np.newaxis] # epochs, outputs + elif y_dim == 2: + y = y[:, np.newaxis, :] # epochs + else: + raise ValueError( + "y must be shape (n_times[, n_epochs][,n_outputs], got " + f"{y.shape}" + ) + elif X.ndim == 3: + if y is not None: + if y.ndim == 2: + y = y[:, :, np.newaxis] # Add an outputs dim + elif y.ndim != 3: + raise ValueError( + "If X has 3 dimensions, y must have 2 or 3 dimensions" + ) + else: + raise ValueError( + f"X must be shape (n_times[, n_epochs], n_features), got {X.shape}" + ) + if y is not None: + if X.shape[0] != y.shape[0]: + raise ValueError( + f"X and y do not have the same n_times\n{X.shape[0]} != " + f"{y.shape[0]}" + ) + if X.shape[1] != y.shape[1]: + raise ValueError( + f"X and y do not have the same n_epochs\n{X.shape[1]} != " + f"{y.shape[1]}" + ) + if predict and y.shape[-1] not in (len(self.estimator_.coef_), 1): + raise ValueError( + "Number of outputs does not match estimator coefficients dimensions" + ) + return X, y, X_dim, y_dim + + +def _delay_time_series(X, tmin, tmax, sfreq, fill_mean=False): + """Return a time-lagged input time series. + + Parameters + ---------- + X : array, shape (n_times[, n_epochs], n_features) + The time series to delay. Must be 2D or 3D. + tmin : int | float + The starting lag. + tmax : int | float + The ending lag. + Must be >= tmin. + sfreq : int | float + The sampling frequency of the series. Defaults to 1.0. + fill_mean : bool + If True, the fill value will be the mean along the time dimension + of the feature, and each cropped and delayed segment of data + will be shifted to have the same mean value (ensuring that mean + subtraction works properly). If False, the fill value will be zero. + + Returns + ------- + delayed : array, shape(n_times[, n_epochs][, n_features], n_delays) + The delayed data. It has the same shape as X, with an extra dimension + appended to the end. + + Examples + -------- + >>> tmin, tmax = -0.1, 0.2 + >>> sfreq = 10. + >>> x = np.arange(1, 6) + >>> x_del = _delay_time_series(x, tmin, tmax, sfreq) + >>> print(x_del) # doctest:+SKIP + [[2. 1. 0. 0.] + [3. 2. 1. 0.] + [4. 3. 2. 1.] + [5. 4. 3. 2.] + [0. 5. 4. 3.]] + """ + _check_delayer_params(tmin, tmax, sfreq) + delays = _times_to_delays(tmin, tmax, sfreq) + # Iterate through indices and append + delayed = np.zeros(X.shape + (len(delays),)) + if fill_mean: + mean_value = X.mean(axis=0) + if X.ndim == 3: + mean_value = np.mean(mean_value, axis=0) + delayed[:] = mean_value[:, np.newaxis] + for ii, ix_delay in enumerate(delays): + # Create zeros to populate w/ delays + if ix_delay < 0: + out = delayed[:ix_delay, ..., ii] + use_X = X[-ix_delay:] + elif ix_delay > 0: + out = delayed[ix_delay:, ..., ii] + use_X = X[:-ix_delay] + else: # == 0 + out = delayed[..., ii] + use_X = X + out[:] = use_X + if fill_mean: + out[:] += mean_value - use_X.mean(axis=0) + return delayed + + +def _times_to_delays(tmin, tmax, sfreq): + """Convert a tmin/tmax in seconds to delays.""" + # Convert seconds to samples + delays = np.arange(int(np.round(tmin * sfreq)), int(np.round(tmax * sfreq) + 1)) + return delays + + +def _delays_to_slice(delays): + """Find the slice to be taken in order to remove missing values.""" + # Negative values == cut off rows at the end + min_delay = None if delays[-1] <= 0 else delays[-1] + # Positive values == cut off rows at the end + max_delay = None if delays[0] >= 0 else delays[0] + return slice(min_delay, max_delay) + + +def _check_delayer_params(tmin, tmax, sfreq): + """Check delayer input parameters. For future custom delay support.""" + _validate_type(sfreq, "numeric", "`sfreq`") + + for tlim in (tmin, tmax): + _validate_type(tlim, "numeric", "tmin/tmax") + if not tmin <= tmax: + raise ValueError("tmin must be <= tmax") + + +def _reshape_for_est(X_del): + """Convert X_del to a sklearn-compatible shape.""" + n_times, n_epochs, n_feats, n_delays = X_del.shape + X_del = X_del.reshape(n_times, n_epochs, -1) # concatenate feats + X_del = X_del.reshape(n_times * n_epochs, -1, order="F") + return X_del + + +# Create a correlation scikit-learn-style scorer +def _corr_score(y_true, y, multioutput=None): + assert multioutput == "raw_values" + for this_y in (y_true, y): + if this_y.ndim != 2: + raise ValueError( + f"inputs must be shape (samples, outputs), got {this_y.shape}" + ) + return np.array([pearsonr(y_true[:, ii], y[:, ii])[0] for ii in range(y.shape[-1])]) + + +def _r2_score(y_true, y, multioutput=None): + return r2_score(y_true, y, multioutput=multioutput) + + +_SCORERS = {"r2": _r2_score, "corrcoef": _corr_score} diff --git a/mne/decoding/search_light.py b/mne/decoding/search_light.py new file mode 100644 index 0000000..e3059a3 --- /dev/null +++ b/mne/decoding/search_light.py @@ -0,0 +1,759 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import logging + +import numpy as np +from sklearn.base import BaseEstimator, MetaEstimatorMixin, TransformerMixin, clone +from sklearn.metrics import check_scoring +from sklearn.preprocessing import LabelEncoder +from sklearn.utils import check_array + +from ..parallel import parallel_func +from ..utils import ProgressBar, _parse_verbose, array_split_idx, fill_doc, verbose +from .base import _check_estimator + + +@fill_doc +class SlidingEstimator(MetaEstimatorMixin, TransformerMixin, BaseEstimator): + """Search Light. + + Fit, predict and score a series of models to each subset of the dataset + along the last dimension. Each entry in the last dimension is referred + to as a task. + + Parameters + ---------- + %(base_estimator)s + %(scoring)s + %(n_jobs)s + %(position)s + %(allow_2d)s + %(verbose)s + + Attributes + ---------- + estimators_ : array-like, shape (n_tasks,) + List of fitted scikit-learn estimators (one per task). + """ + + @verbose + def __init__( + self, + base_estimator, + scoring=None, + n_jobs=None, + *, + position=0, + allow_2d=False, + verbose=None, + ): + _check_estimator(base_estimator) + self.base_estimator = base_estimator + self.n_jobs = n_jobs + self.scoring = scoring + self.position = position + self.allow_2d = allow_2d + self.verbose = verbose + + @property + def _estimator_type(self): + return getattr(self.base_estimator, "_estimator_type", None) + + def __sklearn_tags__(self): + """Get sklearn tags.""" + from sklearn.utils import get_tags + + tags = super().__sklearn_tags__() + sub_tags = get_tags(self.base_estimator) + tags.estimator_type = sub_tags.estimator_type + for kind in ("classifier", "regressor", "transformer"): + if tags.estimator_type == kind: + attr = f"{kind}_tags" + setattr(tags, attr, getattr(sub_tags, attr)) + break + return tags + + def __repr__(self): # noqa: D105 + repr_str = "<" + super().__repr__() + if hasattr(self, "estimators_"): + repr_str = repr_str[:-1] + repr_str += f", fitted with {len(self.estimators_)} estimators" + return repr_str + ">" + + def fit(self, X, y, **fit_params): + """Fit a series of independent estimators to the dataset. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The training input samples. For each data slice, a clone estimator + is fitted independently. The feature dimension can be + multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + self : object + Return self. + """ + X = self._check_Xy(X, y) + parallel, p_func, n_jobs = parallel_func( + _sl_fit, self.n_jobs, max_jobs=X.shape[-1], verbose=False + ) + self.estimators_ = list() + self.fit_params_ = fit_params + + # For fitting, the parallelization is across estimators. + context = _create_progressbar_context(self, X, "Fitting") + with context as pb: + estimators = parallel( + p_func(self.base_estimator, split, y, pb.subset(pb_idx), **fit_params) + for pb_idx, split in array_split_idx(X, n_jobs, axis=-1) + ) + + # Each parallel job can have a different number of training estimators + # We can't directly concatenate them because of sklearn's Bagging API + # (see scikit-learn #9720) + self.estimators_ = np.empty(X.shape[-1], dtype=object) + idx = 0 + for job_estimators in estimators: + for est in job_estimators: + self.estimators_[idx] = est + idx += 1 + return self + + def fit_transform(self, X, y, **fit_params): + """Fit and transform a series of independent estimators to the dataset. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The training input samples. For each task, a clone estimator + is fitted independently. The feature dimension can be + multidimensional, e.g.:: + + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + **fit_params : dict of string -> object + Parameters to pass to the fit method of the estimator. + + Returns + ------- + y_pred : array, shape (n_samples, n_tasks) | (n_samples, n_tasks, n_targets) + The predicted values for each estimator. + """ # noqa: E501 + return self.fit(X, y, **fit_params).transform(X) + + def _transform(self, X, method): + """Aux. function to make parallel predictions/transformation.""" + X = self._check_Xy(X) + method = _check_method(self.base_estimator, method) + if X.shape[-1] != len(self.estimators_): + raise ValueError("The number of estimators does not match X.shape[-1]") + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + parallel, p_func, n_jobs = parallel_func( + _sl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False + ) + + X_splits = np.array_split(X, n_jobs, axis=-1) + idx, est_splits = zip(*array_split_idx(self.estimators_, n_jobs)) + + context = _create_progressbar_context(self, X, "Transforming") + with context as pb: + y_pred = parallel( + p_func(est, x, method, pb.subset(pb_idx)) + for pb_idx, est, x in zip(idx, est_splits, X_splits) + ) + + y_pred = np.concatenate(y_pred, axis=1) + return y_pred + + def transform(self, X): + """Transform each data slice/task with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice/task, the corresponding + estimator makes a transformation of the data, e.g. + ``[estimators[ii].transform(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + Xt : array, shape (n_samples, n_estimators) + The transformed values generated by each estimator. + """ # noqa: E501 + return self._transform(X, "transform").astype(X.dtype) + + def predict(self, X): + """Predict each data slice/task with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + makes the sample predictions, e.g.: + ``[estimators[ii].predict(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators) | (n_samples, n_tasks, n_targets) + Predicted values for each estimator/data slice. + """ # noqa: E501 + return self._transform(X, "predict") + + def predict_proba(self, X): + """Predict each data slice with a series of independent estimators. + + The number of tasks in X should match the number of tasks/estimators + given at fit time. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + makes the sample probabilistic predictions, e.g.: + ``[estimators[ii].predict_proba(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + + Returns + ------- + y_pred : array, shape (n_samples, n_tasks, n_classes) + Predicted probabilities for each estimator/data slice/task. + """ # noqa: E501 + return self._transform(X, "predict_proba") + + def decision_function(self, X): + """Estimate distances of each data slice to the hyperplanes. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + outputs the distance to the hyperplane, e.g.: + ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2) + Predicted distances for each estimator/data slice. + + Notes + ----- + This requires base_estimator to have a ``decision_function`` method. + """ # noqa: E501 + return self._transform(X, "decision_function") + + def _check_Xy(self, X, y=None): + """Aux. function to check input data.""" + # Once we require sklearn 1.1+ we should do something like: + X = check_array(X, ensure_2d=False, allow_nd=True, input_name="X") + if y is not None: + y = check_array(y, dtype=None, ensure_2d=False, input_name="y") + if len(X) != len(y) or len(y) < 1: + raise ValueError("X and y must have the same length.") + if X.ndim < 3: + err = None + if not self.allow_2d: + err = 3 + elif X.ndim < 2: + err = 2 + if err: + raise ValueError(f"X must have at least {err} dimensions.") + X = X[..., np.newaxis] + return X + + def score(self, X, y): + """Score each estimator on each task. + + The number of tasks in X should match the number of tasks/estimators + given at fit time, i.e. we need + ``X.shape[-1] == len(self.estimators_)``. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_tasks) + The input samples. For each data slice, the corresponding estimator + scores the prediction, e.g.: + ``[estimators[ii].score(X[..., ii], y) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks). + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_samples, n_estimators) + Score for each estimator/task. + """ # noqa: E501 + X = self._check_Xy(X, y) + if X.shape[-1] != len(self.estimators_): + raise ValueError("The number of estimators does not match X.shape[-1]") + + scoring = check_scoring(self.base_estimator, self.scoring) + y = _fix_auc(scoring, y) + + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + parallel, p_func, n_jobs = parallel_func( + _sl_score, self.n_jobs, max_jobs=X.shape[-1], verbose=False + ) + X_splits = np.array_split(X, n_jobs, axis=-1) + est_splits = np.array_split(self.estimators_, n_jobs) + score = parallel( + p_func(est, scoring, x, y) for (est, x) in zip(est_splits, X_splits) + ) + + score = np.concatenate(score, axis=0) + return score + + @property + def classes_(self): + if not hasattr(self.estimators_[0], "classes_"): + raise AttributeError( + "classes_ attribute available only if base_estimator has it, and " + f"estimator {self.estimators_[0]} does not" + ) + return self.estimators_[0].classes_ + + +@fill_doc +def _sl_fit(estimator, X, y, pb, **fit_params): + """Aux. function to fit SlidingEstimator in parallel. + + Fit a clone estimator to each slice of data. + + Parameters + ---------- + %(base_estimator)s + X : array, shape (n_samples, nd_features, n_estimators) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_sample, ) + The target values. + pb : instance of ProgressBar + The progress bar to update. + fit_params : dict | None + Parameters to pass to the fit method of the estimator. + + Returns + ------- + estimators_ : list of estimators + The fitted estimators. + """ + estimators_ = list() + for ii in range(X.shape[-1]): + est = clone(estimator) + est.fit(X[..., ii], y, **fit_params) + estimators_.append(est) + + pb.update(ii + 1) + return estimators_ + + +def _sl_transform(estimators, X, method, pb): + """Aux. function to transform SlidingEstimator in parallel. + + Applies transform/predict/decision_function etc for each slice of data. + + Parameters + ---------- + estimators : list of estimators + The fitted estimators. + X : array, shape (n_samples, nd_features, n_estimators) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + method : str + The estimator method to use (e.g. 'predict', 'transform'). + pb : instance of ProgressBar + The progress bar to update. + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_classes * (n_classes-1) // 2) + The transformations for each slice of data. + """ # noqa: E501 + for ii, est in enumerate(estimators): + transform = getattr(est, method) + _y_pred = transform(X[..., ii]) + # Initialize array of predictions on the first transform iteration + if ii == 0: + y_pred = _sl_init_pred(_y_pred, X) + y_pred[:, ii, ...] = _y_pred + + pb.update(ii + 1) + return y_pred + + +def _sl_init_pred(y_pred, X): + """Aux. function to SlidingEstimator to initialize y_pred.""" + n_sample, n_tasks = X.shape[0], X.shape[-1] + y_pred = np.zeros((n_sample, n_tasks) + y_pred.shape[1:], y_pred.dtype) + return y_pred + + +def _sl_score(estimators, scoring, X, y): + """Aux. function to score SlidingEstimator in parallel. + + Predict and score each slice of data. + + Parameters + ---------- + estimators : list, shape (n_tasks,) + The fitted estimators. + X : array, shape (n_samples, nd_features, n_tasks) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_tasks) + scoring : callable, str or None + If scoring is None (default), the predictions are internally + generated by estimator.score(). Else, we must first get the + predictions to pass them to ad-hoc scorer. + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_tasks,) + The score for each task / slice of data. + """ + n_tasks = X.shape[-1] + score = np.zeros(n_tasks) + for ii, est in enumerate(estimators): + score[ii] = scoring(est, X[..., ii], y) + return score + + +def _check_method(estimator, method): + """Check that an estimator has the method attribute. + + If method == 'transform' and estimator does not have 'transform', use + 'predict' instead. + """ + if method == "transform" and not hasattr(estimator, "transform"): + method = "predict" + if not hasattr(estimator, method): + ValueError(f"base_estimator does not have `{method}` method.") + return method + + +@fill_doc +class GeneralizingEstimator(SlidingEstimator): + """Generalization Light. + + Fit a search-light along the last dimension and use them to apply a + systematic cross-tasks generalization. + + Parameters + ---------- + %(base_estimator)s + %(scoring)s + %(n_jobs)s + %(position)s + %(allow_2d)s + %(verbose)s + """ + + def __repr__(self): # noqa: D105 + repr_str = super().__repr__() + if hasattr(self, "estimators_"): + repr_str = repr_str[:-1] + repr_str += f", fitted with {len(self.estimators_)} estimators>" + return repr_str + + def _transform(self, X, method): + """Aux. function to make parallel predictions/transformation.""" + X = self._check_Xy(X) + method = _check_method(self.base_estimator, method) + + parallel, p_func, n_jobs = parallel_func( + _gl_transform, self.n_jobs, max_jobs=X.shape[-1], verbose=False + ) + + context = _create_progressbar_context(self, X, "Transforming") + with context as pb: + y_pred = parallel( + p_func(self.estimators_, x_split, method, pb.subset(pb_idx)) + for pb_idx, x_split in array_split_idx( + X, n_jobs, axis=-1, n_per_split=len(self.estimators_) + ) + ) + + y_pred = np.concatenate(y_pred, axis=2) + return y_pred + + def transform(self, X): + """Transform each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The input samples. For estimator the corresponding data slice is + used to make a transformation. The feature dimension can be + multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + Xt : array, shape (n_samples, n_estimators, n_slices) + The transformed values generated by each estimator. + """ + return self._transform(X, "transform") + + def predict(self, X): + """Predict each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a fitted estimator + predicts each slice of the data independently. The feature + dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators). + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices) | (n_samples, n_estimators, n_slices, n_targets) + The predicted values for each estimator. + """ # noqa: E501 + return self._transform(X, "predict") + + def predict_proba(self, X): + """Estimate probabilistic estimates of each data slice with all possible estimators. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a fitted estimator + predicts a slice of the data. The feature dimension can be + multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes) + The predicted values for each estimator. + + Notes + ----- + This requires ``base_estimator`` to have a ``predict_proba`` method. + """ # noqa: E501 + return self._transform(X, "predict_proba") + + def decision_function(self, X): + """Estimate distances of each data slice to all hyperplanes. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. Each estimator outputs the distance to + its hyperplane, e.g.: + ``[estimators[ii].decision_function(X[..., ii]) for ii in range(n_estimators)]``. + The feature dimension can be multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + + Returns + ------- + y_pred : array, shape (n_samples, n_estimators, n_slices, n_classes * (n_classes-1) // 2) + The predicted values for each estimator. + + Notes + ----- + This requires ``base_estimator`` to have a ``decision_function`` + method. + """ # noqa: E501 + return self._transform(X, "decision_function") + + def score(self, X, y): + """Score each of the estimators on the tested dimensions. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The input samples. For each data slice, the corresponding estimator + scores the prediction, e.g.: + ``[estimators[ii].score(X[..., ii], y) for ii in range(n_slices)]``. + The feature dimension can be multidimensional e.g. + ``X.shape = (n_samples, n_features_1, n_features_2, n_estimators)``. + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + + Returns + ------- + score : array, shape (n_samples, n_estimators, n_slices) + Score for each estimator / data slice couple. + """ # noqa: E501 + X = self._check_Xy(X, y) + # For predictions/transforms the parallelization is across the data and + # not across the estimators to avoid memory load. + parallel, p_func, n_jobs = parallel_func( + _gl_score, self.n_jobs, max_jobs=X.shape[-1], verbose=False + ) + scoring = check_scoring(self.base_estimator, self.scoring) + y = _fix_auc(scoring, y) + + context = _create_progressbar_context(self, X, "Scoring") + with context as pb: + score = parallel( + p_func(self.estimators_, scoring, x, y, pb.subset(pb_idx)) + for pb_idx, x in array_split_idx( + X, n_jobs, axis=-1, n_per_split=len(self.estimators_) + ) + ) + + score = np.concatenate(score, axis=1) + return score + + +def _gl_transform(estimators, X, method, pb): + """Transform the dataset. + + This will apply each estimator to all slices of the data. + + Parameters + ---------- + X : array, shape (n_samples, nd_features, n_slices) + The training input samples. For each data slice, a clone estimator + is fitted independently. The feature dimension can be multidimensional + e.g. X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + method : str + The method to call for each estimator. + pb : instance of ProgressBar + The progress bar to update. + + Returns + ------- + Xt : array, shape (n_samples, n_slices) + The transformed values generated by each estimator. + """ + n_sample, n_iter = X.shape[0], X.shape[-1] + for ii, est in enumerate(estimators): + # stack generalized data for faster prediction + X_stack = X.transpose(np.r_[0, X.ndim - 1, range(1, X.ndim - 1)]) + X_stack = X_stack.reshape(np.r_[n_sample * n_iter, X_stack.shape[2:]]) + transform = getattr(est, method) + _y_pred = transform(X_stack) + # unstack generalizations + if _y_pred.ndim == 2: + _y_pred = np.reshape(_y_pred, [n_sample, n_iter, _y_pred.shape[1]]) + else: + shape = np.r_[n_sample, n_iter, _y_pred.shape[1:]].astype(int) + _y_pred = np.reshape(_y_pred, shape) + # Initialize array of predictions on the first transform iteration + if ii == 0: + y_pred = _gl_init_pred(_y_pred, X, len(estimators)) + y_pred[:, ii, ...] = _y_pred + + pb.update((ii + 1) * n_iter) + return y_pred + + +def _gl_init_pred(y_pred, X, n_train): + """Aux. function to GeneralizingEstimator to initialize y_pred.""" + n_sample, n_iter = X.shape[0], X.shape[-1] + if y_pred.ndim == 3: + y_pred = np.zeros((n_sample, n_train, n_iter, y_pred.shape[-1]), y_pred.dtype) + else: + y_pred = np.zeros((n_sample, n_train, n_iter), y_pred.dtype) + return y_pred + + +def _gl_score(estimators, scoring, X, y, pb): + """Score GeneralizingEstimator in parallel. + + Predict and score each slice of data. + + Parameters + ---------- + estimators : list of estimators + The fitted estimators. + scoring : callable, string or None + If scoring is None (default), the predictions are internally + generated by estimator.score(). Else, we must first get the + predictions to pass them to ad-hoc scorer. + X : array, shape (n_samples, nd_features, n_slices) + The target data. The feature dimension can be multidimensional e.g. + X.shape = (n_samples, n_features_1, n_features_2, n_estimators) + y : array, shape (n_samples,) | (n_samples, n_targets) + The target values. + pb : instance of ProgressBar + The progress bar to update. + + Returns + ------- + score : array, shape (n_estimators, n_slices) + The score for each slice of data. + """ + # FIXME: The level parallelization may be a bit high, and might be memory + # consuming. Perhaps need to lower it down to the loop across X slices. + score_shape = [len(estimators), X.shape[-1]] + for jj in range(X.shape[-1]): + for ii, est in enumerate(estimators): + _score = scoring(est, X[..., jj], y) + # Initialize array of predictions on the first score iteration + if (ii == 0) and (jj == 0): + dtype = type(_score) + score = np.zeros(score_shape, dtype) + score[ii, jj, ...] = _score + + pb.update(jj * len(estimators) + ii + 1) + return score + + +def _fix_auc(scoring, y): + # This fixes sklearn's inability to compute roc_auc when y not in [0, 1] + # scikit-learn/scikit-learn#6874 + if scoring is not None: + score_func = getattr(scoring, "_score_func", None) + kwargs = getattr(scoring, "_kwargs", {}) + if ( + getattr(score_func, "__name__", "") == "roc_auc_score" + and kwargs.get("multi_class", "raise") == "raise" + ): + if np.ndim(y) != 1 or len(set(y)) != 2: + raise ValueError( + "roc_auc scoring can only be computed for two-class problems." + ) + y = LabelEncoder().fit_transform(y) + return y + + +def _create_progressbar_context(inst, X, message): + """Create a progress bar taking into account ``inst.verbose``.""" + multiply = len(inst.estimators_) if isinstance(inst, GeneralizingEstimator) else 1 + n_steps = X.shape[-1] * max(1, multiply) + mesg = f"{message} {inst.__class__.__name__}" + + which_tqdm = "off" if not _check_verbose(inst.verbose) else None + context = ProgressBar( + n_steps, mesg=mesg, position=inst.position, which_tqdm=which_tqdm + ) + + return context + + +def _check_verbose(verbose): + """Check if verbose is above or equal 'INFO' level.""" + logging_level = _parse_verbose(verbose) + bool_verbose = logging_level <= logging.INFO + return bool_verbose diff --git a/mne/decoding/ssd.py b/mne/decoding/ssd.py new file mode 100644 index 0000000..8bc0036 --- /dev/null +++ b/mne/decoding/ssd.py @@ -0,0 +1,419 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from scipy.linalg import eigh +from sklearn.base import BaseEstimator, TransformerMixin + +from .._fiff.pick import _picks_to_idx +from ..cov import Covariance, _regularized_covariance +from ..defaults import _handle_default +from ..filter import filter_data +from ..rank import compute_rank +from ..time_frequency import psd_array_welch +from ..utils import ( + _check_option, + _time_mask, + _validate_type, + _verbose_safe_false, + fill_doc, + logger, +) + + +@fill_doc +class SSD(TransformerMixin, BaseEstimator): + """ + Signal decomposition using the Spatio-Spectral Decomposition (SSD). + + SSD seeks to maximize the power at a frequency band of interest while + simultaneously minimizing it at the flanking (surrounding) frequency bins + (considered noise). It extremizes the covariance matrices associated with + signal and noise :footcite:`NikulinEtAl2011`. + + SSD can either be used as a dimensionality reduction method or a + ‘denoised’ low rank factorization method :footcite:`HaufeEtAl2014b`. + + Parameters + ---------- + %(info_not_none)s Must match the input data. + filt_params_signal : dict + Filtering for the frequencies of interest. + filt_params_noise : dict + Filtering for the frequencies of non-interest. + reg : float | str | None (default) + Which covariance estimator to use. + If not None (same as 'empirical'), allow regularization for covariance + estimation. If float, shrinkage is used (0 <= shrinkage <= 1). For str + options, reg will be passed to method :func:`mne.compute_covariance`. + n_components : int | None (default None) + The number of components to extract from the signal. + If None, the number of components equal to the rank of the data are + returned (see ``rank``). + picks : array of int | None (default None) + The indices of good channels. + sort_by_spectral_ratio : bool (default True) + If set to True, the components are sorted according to the spectral + ratio. + See Eq. (24) in :footcite:`NikulinEtAl2011`. + return_filtered : bool (default False) + If return_filtered is True, data is bandpassed and projected onto the + SSD components. + n_fft : int (default None) + If sort_by_spectral_ratio is set to True, then the SSD sources will be + sorted according to their spectral ratio which is calculated based on + :func:`mne.time_frequency.psd_array_welch`. The n_fft parameter sets the + length of FFT used. + See :func:`mne.time_frequency.psd_array_welch` for more information. + cov_method_params : dict | None (default None) + As in :class:`mne.decoding.SPoC` + The default is None. + rank : None | dict | ‘info’ | ‘full’ + As in :class:`mne.decoding.SPoC` + This controls the rank computation that can be read from the + measurement info or estimated from the data, which determines the + maximum possible number of components. + See Notes of :func:`mne.compute_rank` for details. + We recommend to use 'full' when working with epoched data. + + Attributes + ---------- + filters_ : array, shape (n_channels, n_components) + The spatial filters to be multiplied with the signal. + patterns_ : array, shape (n_components, n_channels) + The patterns for reconstructing the signal from the filtered data. + + References + ---------- + .. footbibliography:: + """ + + def __init__( + self, + info, + filt_params_signal, + filt_params_noise, + reg=None, + n_components=None, + picks=None, + sort_by_spectral_ratio=True, + return_filtered=False, + n_fft=None, + cov_method_params=None, + rank=None, + ): + """Initialize instance.""" + dicts = {"signal": filt_params_signal, "noise": filt_params_noise} + for param, dd in [("l", 0), ("h", 0), ("l", 1), ("h", 1)]: + key = ("signal", "noise")[dd] + if param + "_freq" not in dicts[key]: + raise ValueError( + f"{param + '_freq'} must be defined in filter parameters for {key}" + ) + val = dicts[key][param + "_freq"] + if not isinstance(val, int | float): + _validate_type(val, ("numeric",), f"{key} {param}_freq") + # check freq bands + if ( + filt_params_noise["l_freq"] > filt_params_signal["l_freq"] + or filt_params_signal["h_freq"] > filt_params_noise["h_freq"] + ): + raise ValueError( + "Wrongly specified frequency bands!\n" + "The signal band-pass must be within the noise " + "band-pass!" + ) + self.picks = picks + del picks + self.info = info + self.freqs_signal = (filt_params_signal["l_freq"], filt_params_signal["h_freq"]) + self.freqs_noise = (filt_params_noise["l_freq"], filt_params_noise["h_freq"]) + self.filt_params_signal = filt_params_signal + self.filt_params_noise = filt_params_noise + # check if boolean + if not isinstance(sort_by_spectral_ratio, (bool)): + raise ValueError("sort_by_spectral_ratio must be boolean") + self.sort_by_spectral_ratio = sort_by_spectral_ratio + if n_fft is None: + self.n_fft = int(self.info["sfreq"]) + else: + self.n_fft = int(n_fft) + # check if boolean + if not isinstance(return_filtered, (bool)): + raise ValueError("return_filtered must be boolean") + self.return_filtered = return_filtered + self.reg = reg + self.n_components = n_components + self.rank = rank + self.cov_method_params = cov_method_params + + def _check_X(self, X): + """Check input data.""" + _validate_type(X, np.ndarray, "X") + _check_option("X.ndim", X.ndim, (2, 3)) + n_chan = X.shape[-2] + if n_chan != self.info["nchan"]: + raise ValueError( + "Info must match the input data." + f"Found {n_chan} channels but expected {self.info['nchan']}." + ) + + def fit(self, X, y=None): + """Estimate the SSD decomposition on raw or epoched data. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + y : None + Ignored; exists for compatibility with scikit-learn pipelines. + + Returns + ------- + self : instance of SSD + Returns the modified instance. + """ + ch_types = self.info.get_channel_types(picks=self.picks, unique=True) + if len(ch_types) > 1: + raise ValueError( + "At this point SSD only supports fitting " + f"single channel types. Your info has {len(ch_types)} types." + ) + self.picks_ = _picks_to_idx(self.info, self.picks, none="data", exclude="bads") + self._check_X(X) + X_aux = X[..., self.picks_, :] + + X_signal = filter_data(X_aux, self.info["sfreq"], **self.filt_params_signal) + X_noise = filter_data(X_aux, self.info["sfreq"], **self.filt_params_noise) + X_noise -= X_signal + if X.ndim == 3: + X_signal = np.hstack(X_signal) + X_noise = np.hstack(X_noise) + + # prevent rank change when computing cov with rank='full' + cov_signal = _regularized_covariance( + X_signal, + reg=self.reg, + method_params=self.cov_method_params, + rank="full", + info=self.info, + ) + cov_noise = _regularized_covariance( + X_noise, + reg=self.reg, + method_params=self.cov_method_params, + rank="full", + info=self.info, + ) + + # project cov to rank subspace + cov_signal, cov_noise, rank_proj = _dimensionality_reduction( + cov_signal, cov_noise, self.info, self.rank + ) + + eigvals_, eigvects_ = eigh(cov_signal, cov_noise) + # sort in descending order + ix = np.argsort(eigvals_)[::-1] + self.eigvals_ = eigvals_[ix] + # project back to sensor space + self.filters_ = np.matmul(rank_proj, eigvects_[:, ix]) + self.patterns_ = np.linalg.pinv(self.filters_) + + # We assume that ordering by spectral ratio is more important + # than the initial ordering. This ordering should be also learned when + # fitting. + X_ssd = self.filters_.T @ X[..., self.picks_, :] + sorter_spec = Ellipsis + if self.sort_by_spectral_ratio: + _, sorter_spec = self.get_spectral_ratio(ssd_sources=X_ssd) + self.sorter_spec = sorter_spec + logger.info("Done.") + return self + + def transform(self, X): + """Estimate epochs sources given the SSD filters. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + + Returns + ------- + X_ssd : array, shape ([n_epochs, ]n_components, n_times) + The processed data. + """ + self._check_X(X) + if self.filters_ is None: + raise RuntimeError("No filters available. Please first call fit") + if self.return_filtered: + X_aux = X[..., self.picks_, :] + X = filter_data(X_aux, self.info["sfreq"], **self.filt_params_signal) + X_ssd = self.filters_.T @ X[..., self.picks_, :] + if X.ndim == 2: + X_ssd = X_ssd[self.sorter_spec][: self.n_components] + else: + X_ssd = X_ssd[:, self.sorter_spec, :][:, : self.n_components, :] + return X_ssd + + def fit_transform(self, X, y=None, **fit_params): + """Fit SSD to data, then transform it. + + Fits transformer to ``X`` and ``y`` with optional parameters ``fit_params``, and + returns a transformed version of ``X``. + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array obtained from + continuous data or 3D array obtained from epoched data. + y : None + Ignored; exists for compatibility with scikit-learn pipelines. + **fit_params : dict + Additional fitting parameters passed to the :meth:`mne.decoding.SSD.fit` + method. Not used for this class. + + Returns + ------- + X_ssd : array, shape ([n_epochs, ]n_components, n_times) + The processed data. + """ + # use parent TransformerMixin method but with custom docstring + return super().fit_transform(X, y=y, **fit_params) + + def get_spectral_ratio(self, ssd_sources): + """Get the spectal signal-to-noise ratio for each spatial filter. + + Spectral ratio measure for best n_components selection + See :footcite:`NikulinEtAl2011`, Eq. (24). + + Parameters + ---------- + ssd_sources : array + Data projected to SSD space. + + Returns + ------- + spec_ratio : array, shape (n_channels) + Array with the sprectal ratio value for each component. + sorter_spec : array, shape (n_channels) + Array of indices for sorting spec_ratio. + + References + ---------- + .. footbibliography:: + """ + psd, freqs = psd_array_welch( + ssd_sources, sfreq=self.info["sfreq"], n_fft=self.n_fft + ) + sig_idx = _time_mask(freqs, *self.freqs_signal) + noise_idx = _time_mask(freqs, *self.freqs_noise) + if psd.ndim == 3: + mean_sig = psd[:, :, sig_idx].mean(axis=2).mean(axis=0) + mean_noise = psd[:, :, noise_idx].mean(axis=2).mean(axis=0) + spec_ratio = mean_sig / mean_noise + else: + mean_sig = psd[:, sig_idx].mean(axis=1) + mean_noise = psd[:, noise_idx].mean(axis=1) + spec_ratio = mean_sig / mean_noise + sorter_spec = spec_ratio.argsort()[::-1] + return spec_ratio, sorter_spec + + def inverse_transform(self): + """Not implemented yet.""" + raise NotImplementedError("inverse_transform is not yet available.") + + def apply(self, X): + """Remove selected components from the signal. + + This procedure will reconstruct M/EEG signals from which the dynamics + described by the excluded components is subtracted + (denoised by low-rank factorization). + See :footcite:`HaufeEtAl2014b` for more information. + + .. note:: Unlike in other classes with an apply method, + only NumPy arrays are supported (not instances of MNE objects). + + Parameters + ---------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The input data from which to estimate the SSD. Either 2D array + obtained from continuous data or 3D array obtained from epoched + data. + + Returns + ------- + X : array, shape ([n_epochs, ]n_channels, n_times) + The processed data. + """ + X_ssd = self.transform(X) + pick_patterns = self.patterns_[self.sorter_spec][: self.n_components].T + X = pick_patterns @ X_ssd + return X + + +def _dimensionality_reduction(cov_signal, cov_noise, info, rank): + """Perform dimensionality reduction on the covariance matrices.""" + n_channels = cov_signal.shape[0] + + # find ranks of covariance matrices + rank_signal = list( + compute_rank( + Covariance( + cov_signal, + info.ch_names, + list(), + list(), + 0, + verbose=_verbose_safe_false(), + ), + rank, + _handle_default("scalings_cov_rank", None), + info, + ).values() + )[0] + rank_noise = list( + compute_rank( + Covariance( + cov_noise, + info.ch_names, + list(), + list(), + 0, + verbose=_verbose_safe_false(), + ), + rank, + _handle_default("scalings_cov_rank", None), + info, + ).values() + )[0] + rank = np.min([rank_signal, rank_noise]) # should be identical + + if rank < n_channels: + eigvals, eigvects = eigh(cov_signal) + # sort in descending order + ix = np.argsort(eigvals)[::-1] + eigvals = eigvals[ix] + eigvects = eigvects[:, ix] + # compute rank subspace projection matrix + rank_proj = np.matmul( + eigvects[:, :rank], np.eye(rank) * (eigvals[:rank] ** -0.5) + ) + logger.info( + "Projecting covariance of %i channels to %i rank subspace", + n_channels, + rank, + ) + else: + rank_proj = np.eye(n_channels) + logger.info("Preserving covariance rank (%i)", rank) + + # project covariance matrices to rank subspace + cov_signal = np.matmul(rank_proj.T, np.matmul(cov_signal, rank_proj)) + cov_noise = np.matmul(rank_proj.T, np.matmul(cov_noise, rank_proj)) + return cov_signal, cov_noise, rank_proj diff --git a/mne/decoding/time_delaying_ridge.py b/mne/decoding/time_delaying_ridge.py new file mode 100644 index 0000000..b08f997 --- /dev/null +++ b/mne/decoding/time_delaying_ridge.py @@ -0,0 +1,395 @@ +"""TimeDelayingRidge class.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from scipy import linalg +from scipy.signal import fftconvolve +from scipy.sparse.csgraph import laplacian +from sklearn.base import BaseEstimator, RegressorMixin + +from ..cuda import _setup_cuda_fft_multiply_repeated +from ..filter import next_fast_len +from ..fixes import jit +from ..utils import ProgressBar, _check_option, _validate_type, logger, warn + + +def _compute_corrs( + X, y, smin, smax, n_jobs=None, fit_intercept=False, edge_correction=True +): + """Compute auto- and cross-correlations.""" + if fit_intercept: + # We could do this in the Fourier domain, too, but it should + # be a bit cleaner numerically to do it here. + X_offset = np.mean(X, axis=0) + y_offset = np.mean(y, axis=0) + if X.ndim == 3: + X_offset = X_offset.mean(axis=0) + y_offset = np.mean(y_offset, axis=0) + X = X - X_offset + y = y - y_offset + else: + X_offset = y_offset = 0.0 + if X.ndim == 2: + assert y.ndim == 2 + X = X[:, np.newaxis, :] + y = y[:, np.newaxis, :] + assert X.shape[:2] == y.shape[:2] + len_trf = smax - smin + len_x, n_epochs, n_ch_x = X.shape + len_y, n_epochs_y, n_ch_y = y.shape + assert len_x == len_y + assert n_epochs == n_epochs_y + + n_fft = next_fast_len(2 * X.shape[0] - 1) + + _, cuda_dict = _setup_cuda_fft_multiply_repeated( + n_jobs, [1.0], n_fft, "correlation calculations" + ) + del n_jobs # only used to set as CUDA + + # create our Toeplitz indexer + ij = np.empty((len_trf, len_trf), int) + for ii in range(len_trf): + ij[ii, ii:] = np.arange(len_trf - ii) + x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1) + ij[ii + 1 :, ii] = x + + x_xt = np.zeros([n_ch_x * len_trf] * 2) + x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order="F") + n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x) + logger.info(f"Fitting {n_epochs} epochs, {n_ch_x} channels") + pb = ProgressBar(n, mesg="Sample") + count = 0 + pb.update(count) + for ei in range(n_epochs): + this_X = X[:, ei, :] + # XXX maybe this is what we should parallelize over CPUs at some point + X_fft = cuda_dict["rfft"](this_X, n=n_fft, axis=0) + X_fft_conj = X_fft.conj() + y_fft = cuda_dict["rfft"](y[:, ei, :], n=n_fft, axis=0) + + for ch0 in range(n_ch_x): + for oi, ch1 in enumerate(range(ch0, n_ch_x)): + this_result = cuda_dict["irfft"]( + X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0 + ) + # Our autocorrelation structure is a Toeplitz matrix, but + # it's faster to create the Toeplitz ourselves than use + # linalg.toeplitz. + this_result = this_result[ij] + # However, we need to adjust for coeffs that are cut off, + # i.e. the non-zero delays should not have the same AC value + # as the zero-delay ones (because they actually have fewer + # coefficients). + # + # These adjustments also follow a Toeplitz structure, so we + # construct a matrix of what has been left off, compute their + # inner products, and remove them. + if edge_correction: + _edge_correct(this_result, this_X, smax, smin, ch0, ch1) + + # Store the results in our output matrix + x_xt[ + ch0 * len_trf : (ch0 + 1) * len_trf, + ch1 * len_trf : (ch1 + 1) * len_trf, + ] += this_result + if ch0 != ch1: + x_xt[ + ch1 * len_trf : (ch1 + 1) * len_trf, + ch0 * len_trf : (ch0 + 1) * len_trf, + ] += this_result.T + count += 1 + pb.update(count) + + # compute the crosscorrelations + cc_temp = cuda_dict["irfft"]( + y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0 + ) + if smin < 0 and smax >= 0: + x_y[:-smin, ch0] += cc_temp[smin:] + x_y[len_trf - smax :, ch0] += cc_temp[:smax] + else: + x_y[:, ch0] += cc_temp[smin:smax] + count += 1 + pb.update(count) + + x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order="F") + return x_xt, x_y, n_ch_x, X_offset, y_offset + + +@jit() +def _edge_correct(this_result, this_X, smax, smin, ch0, ch1): + if smax > 0: + tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0], this_X[-1:-smax:-1, ch1]) + if smin > 0: + tail = tail[smin - 1 :, smin - 1 :] + this_result[max(-smin + 1, 0) :, max(-smin + 1, 0) :] -= tail + if smin < 0: + head = _toeplitz_dot(this_X[:-smin, ch0], this_X[:-smin, ch1])[::-1, ::-1] + if smax < 0: + head = head[:smax, :smax] + this_result[:-smin, :-smin] -= head + + +@jit() +def _toeplitz_dot(a, b): + """Create upper triangular Toeplitz matrices & compute the dot product.""" + # This is equivalent to: + # a = linalg.toeplitz(a) + # b = linalg.toeplitz(b) + # a[np.triu_indices(len(a), 1)] = 0 + # b[np.triu_indices(len(a), 1)] = 0 + # out = np.dot(a.T, b) + assert a.shape == b.shape and a.ndim == 1 + out = np.outer(a, b) + for ii in range(1, len(a)): + out[ii, ii:] += out[ii - 1, ii - 1 : -1] + out[ii + 1 :, ii] += out[ii:-1, ii - 1] + return out + + +def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method="direct", normed=False): + """Compute regularization parameter from neighbors.""" + known_types = ("ridge", "laplacian") + if isinstance(reg_type, str): + reg_type = (reg_type,) * 2 + if len(reg_type) != 2: + raise ValueError(f"reg_type must have two elements, got {len(reg_type)}") + for r in reg_type: + if r not in known_types: + raise ValueError(f"reg_type entries must be one of {known_types}, got {r}") + reg_time = reg_type[0] == "laplacian" and n_delays > 1 + reg_chs = reg_type[1] == "laplacian" and n_ch_x > 1 + if not reg_time and not reg_chs: + return np.eye(n_ch_x * n_delays) + # regularize time + if reg_time: + reg = np.eye(n_delays) + stride = n_delays + 1 + reg.flat[1::stride] += -1 + reg.flat[n_delays::stride] += -1 + reg.flat[n_delays + 1 : -n_delays - 1 : stride] += 1 + args = [reg] * n_ch_x + reg = linalg.block_diag(*args) + else: + reg = np.zeros((n_delays * n_ch_x,) * 2) + + # regularize features + if reg_chs: + block = n_delays * n_delays + row_offset = block * n_ch_x + stride = n_delays * n_ch_x + 1 + reg.flat[n_delays:-row_offset:stride] += -1 + reg.flat[n_delays + row_offset :: stride] += 1 + reg.flat[row_offset:-n_delays:stride] += -1 + reg.flat[: -(n_delays + row_offset) : stride] += 1 + assert np.array_equal(reg[::-1, ::-1], reg) + + if method == "direct": + if normed: + norm = np.sqrt(np.diag(reg)) + reg /= norm + reg /= norm[:, np.newaxis] + return reg + else: + # Use csgraph. Note that our -1's above are really the neighbors! + # If we ever want to allow arbitrary adjacency matrices, this is how + # we'd want to do it. + reg = laplacian(-reg, normed=normed) + return reg + + +def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in): + """Fit the model using correlation matrices.""" + # do the regularized solving + n_ch_out = x_y.shape[1] + assert x_y.shape[0] % n_ch_x == 0 + n_delays = x_y.shape[0] // n_ch_x + reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type) + mat = x_xt + alpha * reg + # From sklearn + try: + # Note: we must use overwrite_a=False in order to be able to + # use the fall-back solution below in case a LinAlgError + # is raised + w = linalg.solve(mat, x_y, overwrite_a=False, assume_a="pos") + except np.linalg.LinAlgError: + warn( + "Singular matrix in solving dual problem. Using " + "least-squares solution instead." + ) + w = linalg.lstsq(mat, x_y, lapack_driver="gelsy")[0] + w = w.T.reshape([n_ch_out, n_ch_in, n_delays]) + return w + + +class TimeDelayingRidge(RegressorMixin, BaseEstimator): + """Ridge regression of data with time delays. + + Parameters + ---------- + tmin : int | float + The starting lag, in seconds (or samples if ``sfreq`` == 1). + Negative values correspond to times in the past. + tmax : int | float + The ending lag, in seconds (or samples if ``sfreq`` == 1). + Positive values correspond to times in the future. + Must be >= tmin. + sfreq : float + The sampling frequency used to convert times into samples. + alpha : float + The ridge (or laplacian) regularization factor. + reg_type : str | list + Can be ``"ridge"`` (default) or ``"laplacian"``. + Can also be a 2-element list specifying how to regularize in time + and across adjacent features. + fit_intercept : bool + If True (default), the sample mean is removed before fitting. + n_jobs : int | str + The number of jobs to use. Can be an int (default 1) or ``'cuda'``. + + .. versionadded:: 0.18 + edge_correction : bool + If True (default), correct the autocorrelation coefficients for + non-zero delays for the fact that fewer samples are available. + Disabling this speeds up performance at the cost of accuracy + depending on the relationship between epoch length and model + duration. Only used if ``estimator`` is float or None. + + .. versionadded:: 0.18 + + See Also + -------- + mne.decoding.ReceptiveField + + Notes + ----- + This class is meant to be used with :class:`mne.decoding.ReceptiveField` + by only implicitly doing the time delaying. For reasonable receptive + field and input signal sizes, it should be more CPU and memory + efficient by using frequency-domain methods (FFTs) to compute the + auto- and cross-correlations. + """ + + _estimator_type = "regressor" + + def __init__( + self, + tmin, + tmax, + sfreq, + alpha=0.0, + reg_type="ridge", + fit_intercept=True, + n_jobs=None, + edge_correction=True, + ): + self.tmin = tmin + self.tmax = tmax + self.sfreq = sfreq + self.alpha = alpha + self.reg_type = reg_type + self.fit_intercept = fit_intercept + self.edge_correction = edge_correction + self.n_jobs = n_jobs + + @property + def _smin(self): + return int(round(self.tmin_ * self.sfreq_)) + + @property + def _smax(self): + return int(round(self.tmax_ * self.sfreq_)) + 1 + + def fit(self, X, y): + """Estimate the coefficients of the linear model. + + Parameters + ---------- + X : array, shape (n_samples[, n_epochs], n_features) + The training input samples to estimate the linear coefficients. + y : array, shape (n_samples[, n_epochs], n_outputs) + The target values. + + Returns + ------- + self : instance of TimeDelayingRidge + Returns the modified instance. + """ + _validate_type(X, "array-like", "X") + _validate_type(y, "array-like", "y") + self.tmin_ = float(self.tmin) + self.tmax_ = float(self.tmax) + self.sfreq_ = float(self.sfreq) + self.alpha_ = float(self.alpha) + if self.tmin_ > self.tmax_: + raise ValueError(f"tmin must be <= tmax, got {self.tmin_} and {self.tmax_}") + X = np.asarray(X, dtype=float) + y = np.asarray(y, dtype=float) + if X.ndim == 3: + assert y.ndim == 3 + assert X.shape[:2] == y.shape[:2] + else: + if X.ndim == 1: + X = X[:, np.newaxis] + if y.ndim == 1: + y = y[:, np.newaxis] + assert X.ndim == 2 + assert y.ndim == 2 + _check_option("y.shape[0]", y.shape[0], (X.shape[0],)) + # These are split into two functions because it's possible that we + # might want to allow people to do them separately (e.g., to test + # different regularization parameters). + self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs( + X, + y, + self._smin, + self._smax, + self.n_jobs, + self.fit_intercept, + self.edge_correction, + ) + self.coef_ = _fit_corrs( + self.cov_, x_y_, n_ch_x, self.reg_type, self.alpha_, n_ch_x + ) + # This is the sklearn formula from LinearModel (will be 0. for no fit) + if self.fit_intercept: + self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T) + else: + self.intercept_ = 0.0 + return self + + def predict(self, X): + """Predict the output. + + Parameters + ---------- + X : array, shape (n_samples[, n_epochs], n_features) + The data. + + Returns + ------- + X : ndarray + The predicted response. + """ + if X.ndim == 2: + X = X[:, np.newaxis, :] + singleton = True + else: + singleton = False + out = np.zeros(X.shape[:2] + (self.coef_.shape[0],)) + smin = self._smin + offset = max(smin, 0) + for ei in range(X.shape[1]): + for oi in range(self.coef_.shape[0]): + for fi in range(self.coef_.shape[1]): + temp = fftconvolve(X[:, ei, fi], self.coef_[oi, fi]) + temp = temp[max(-smin, 0) :][: len(out) - offset] + out[offset : len(temp) + offset, ei, oi] += temp + out += self.intercept_ + if singleton: + out = out[:, 0, :] + return out diff --git a/mne/decoding/time_frequency.py b/mne/decoding/time_frequency.py new file mode 100644 index 0000000..de6ec52 --- /dev/null +++ b/mne/decoding/time_frequency.py @@ -0,0 +1,168 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin + +from ..time_frequency.tfr import _compute_tfr +from ..utils import _check_option, fill_doc, verbose + + +@fill_doc +class TimeFrequency(TransformerMixin, BaseEstimator): + """Time frequency transformer. + + Time-frequency transform of times series along the last axis. + + Parameters + ---------- + freqs : array-like of float, shape (n_freqs,) + The frequencies. + sfreq : float | int, default 1.0 + Sampling frequency of the data. + method : 'multitaper' | 'morlet', default 'morlet' + The time-frequency method. 'morlet' convolves a Morlet wavelet. + 'multitaper' uses Morlet wavelets windowed with multiple DPSS + multitapers. + n_cycles : float | array of float, default 7.0 + Number of cycles in the Morlet wavelet. Fixed number + or one per frequency. + time_bandwidth : float, default None + If None and method=multitaper, will be set to 4.0 (3 tapers). + Time x (Full) Bandwidth product. Only applies if + method == 'multitaper'. The number of good tapers (low-bias) is + chosen automatically based on this to equal floor(time_bandwidth - 1). + use_fft : bool, default True + Use the FFT for convolutions or not. + decim : int | slice, default 1 + To reduce memory usage, decimation factor after time-frequency + decomposition. + If `int`, returns tfr[..., ::decim]. + If `slice`, returns tfr[..., decim]. + + .. note:: Decimation may create aliasing artifacts, yet decimation + is done after the convolutions. + + output : str, default 'complex' + * 'complex' : single trial complex. + * 'power' : single trial power. + * 'phase' : single trial phase. + %(n_jobs)s + The number of epochs to process at the same time. The parallelization + is implemented across channels. + %(verbose)s + + See Also + -------- + mne.time_frequency.tfr_morlet + mne.time_frequency.tfr_multitaper + """ + + @verbose + def __init__( + self, + freqs, + sfreq=1.0, + method="morlet", + n_cycles=7.0, + time_bandwidth=None, + use_fft=True, + decim=1, + output="complex", + n_jobs=1, + verbose=None, + ): + """Init TimeFrequency transformer.""" + # Check non-average output + output = _check_option("output", output, ["complex", "power", "phase"]) + + self.freqs = freqs + self.sfreq = sfreq + self.method = method + self.n_cycles = n_cycles + self.time_bandwidth = time_bandwidth + self.use_fft = use_fft + self.decim = decim + # Check that output is not an average metric (e.g. ITC) + self.output = output + self.n_jobs = n_jobs + self.verbose = verbose + + def fit_transform(self, X, y=None): + """Time-frequency transform of times series along the last axis. + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data samples. The channel dimension can be zero- or + 1-dimensional. + y : None + For scikit-learn compatibility purposes. + + Returns + ------- + Xt : array, shape (n_samples, n_channels, n_freqs, n_times) + The time-frequency transform of the data, where n_channels can be + zero- or 1-dimensional. + """ + return self.fit(X, y).transform(X) + + def fit(self, X, y=None): # noqa: D401 + """Do nothing (for scikit-learn compatibility purposes). + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data. + y : array | None + The target values. + + Returns + ------- + self : object + Return self. + """ + return self + + def transform(self, X): + """Time-frequency transform of times series along the last axis. + + Parameters + ---------- + X : array, shape (n_samples, n_channels, n_times) + The training data samples. The channel dimension can be zero- or + 1-dimensional. + + Returns + ------- + Xt : array, shape (n_samples, n_channels, n_freqs, n_times) + The time-frequency transform of the data, where n_channels can be + zero- or 1-dimensional. + """ + # Ensure 3-dimensional X + shape = X.shape[1:-1] + if not shape: + X = X[:, np.newaxis, :] + + # Compute time-frequency + Xt = _compute_tfr( + X, + freqs=self.freqs, + sfreq=self.sfreq, + method=self.method, + n_cycles=self.n_cycles, + zero_mean=True, + time_bandwidth=self.time_bandwidth, + use_fft=self.use_fft, + decim=self.decim, + output=self.output, + n_jobs=self.n_jobs, + verbose=self.verbose, + ) + + # Back to original shape + if not shape: + Xt = Xt[:, 0, :] + + return Xt diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py new file mode 100644 index 0000000..8eb2dcc --- /dev/null +++ b/mne/decoding/transformer.py @@ -0,0 +1,920 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np +from sklearn.base import BaseEstimator, TransformerMixin + +from .._fiff.pick import ( + _pick_data_channels, + _picks_by_type, + _picks_to_idx, + pick_info, + pick_types, +) +from ..cov import _check_scalings_user +from ..filter import filter_data +from ..time_frequency import psd_array_multitaper +from ..utils import _check_option, _validate_type, fill_doc, verbose + + +class _ConstantScaler: + """Scale channel types using constant values.""" + + def __init__(self, info, scalings, do_scaling=True): + self._scalings = scalings + self._info = info + self._do_scaling = do_scaling + + def fit(self, X, y=None): + scalings = _check_scalings_user(self._scalings) + picks_by_type = _picks_by_type( + pick_info(self._info, _pick_data_channels(self._info, exclude=())) + ) + std = np.ones(sum(len(p[1]) for p in picks_by_type)) + if X.shape[1] != len(std): + raise ValueError( + f"info had {len(std)} data channels but X has {len(X)} channels" + ) + if self._do_scaling: # this is silly, but necessary for completeness + for kind, picks in picks_by_type: + std[picks] = 1.0 / scalings[kind] + self.std_ = std + self.mean_ = np.zeros_like(std) + return self + + def transform(self, X): + return X / self.std_ + + def inverse_transform(self, X, y=None): + return X * self.std_ + + def fit_transform(self, X, y=None): + return self.fit(X, y).transform(X) + + +def _sklearn_reshape_apply(func, return_result, X, *args, **kwargs): + """Reshape epochs and apply function.""" + if not isinstance(X, np.ndarray): + raise ValueError(f"data should be an np.ndarray, got {type(X)}.") + orig_shape = X.shape + X = np.reshape(X.transpose(0, 2, 1), (-1, orig_shape[1])) + X = func(X, *args, **kwargs) + if return_result: + X.shape = (orig_shape[0], orig_shape[2], orig_shape[1]) + X = X.transpose(0, 2, 1) + return X + + +@fill_doc +class Scaler(TransformerMixin, BaseEstimator): + """Standardize channel data. + + This class scales data for each channel. It differs from scikit-learn + classes (e.g., :class:`sklearn.preprocessing.StandardScaler`) in that + it scales each *channel* by estimating μ and σ using data from all + time points and epochs, as opposed to standardizing each *feature* + (i.e., each time point for each channel) by estimating using μ and σ + using data from all epochs. + + Parameters + ---------- + %(info)s Only necessary if ``scalings`` is a dict or None. + scalings : dict, str, default None + Scaling method to be applied to data channel wise. + + * if scalings is None (default), scales mag by 1e15, grad by 1e13, + and eeg by 1e6. + * if scalings is :class:`dict`, keys are channel types and values + are scale factors. + * if ``scalings=='median'``, + :class:`sklearn.preprocessing.RobustScaler` + is used (requires sklearn version 0.17+). + * if ``scalings=='mean'``, + :class:`sklearn.preprocessing.StandardScaler` + is used. + + with_mean : bool, default True + If True, center the data using mean (or median) before scaling. + Ignored for channel-type scaling. + with_std : bool, default True + If True, scale the data to unit variance (``scalings='mean'``), + quantile range (``scalings='median``), or using channel type + if ``scalings`` is a dict or None). + """ + + def __init__(self, info=None, scalings=None, with_mean=True, with_std=True): + self.info = info + self.with_mean = with_mean + self.with_std = with_std + self.scalings = scalings + + if not (scalings is None or isinstance(scalings, dict | str)): + raise ValueError( + f"scalings type should be dict, str, or None, got {type(scalings)}" + ) + if isinstance(scalings, str): + _check_option("scalings", scalings, ["mean", "median"]) + if scalings is None or isinstance(scalings, dict): + if info is None: + raise ValueError( + f'Need to specify "info" if scalings is {type(scalings)}' + ) + self._scaler = _ConstantScaler(info, scalings, self.with_std) + elif scalings == "mean": + from sklearn.preprocessing import StandardScaler + + self._scaler = StandardScaler( + with_mean=self.with_mean, with_std=self.with_std + ) + else: # scalings == 'median': + from sklearn.preprocessing import RobustScaler + + self._scaler = RobustScaler( + with_centering=self.with_mean, with_scaling=self.with_std + ) + + def fit(self, epochs_data, y=None): + """Standardize data across channels. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data to concatenate channels. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of Scaler + The modified instance. + """ + _validate_type(epochs_data, np.ndarray, "epochs_data") + if epochs_data.ndim == 2: + epochs_data = epochs_data[..., np.newaxis] + assert epochs_data.ndim == 3, epochs_data.shape + _sklearn_reshape_apply(self._scaler.fit, False, epochs_data, y=y) + return self + + def transform(self, epochs_data): + """Standardize data across channels. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels[, n_times]) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + _validate_type(epochs_data, np.ndarray, "epochs_data") + if epochs_data.ndim == 2: # can happen with SlidingEstimator + if self.info is not None: + assert len(self.info["ch_names"]) == epochs_data.shape[1] + epochs_data = epochs_data[..., np.newaxis] + assert epochs_data.ndim == 3, epochs_data.shape + return _sklearn_reshape_apply(self._scaler.transform, True, epochs_data) + + def fit_transform(self, epochs_data, y=None): + """Fit to data, then transform it. + + Fits transformer to epochs_data and y and returns a transformed version + of epochs_data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : None | array, shape (n_epochs,) + The label for each epoch. + Defaults to None. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + return self.fit(epochs_data, y).transform(epochs_data) + + def inverse_transform(self, epochs_data): + """Invert standardization of data across channels. + + Parameters + ---------- + epochs_data : array, shape ([n_epochs, ]n_channels, n_times) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data concatenated over channels. + + Notes + ----- + This function makes a copy of the data before the operations and the + memory usage may be large with big data. + """ + squeeze = False + # Can happen with CSP + if epochs_data.ndim == 2: + squeeze = True + epochs_data = epochs_data[..., np.newaxis] + assert epochs_data.ndim == 3, epochs_data.shape + out = _sklearn_reshape_apply(self._scaler.inverse_transform, True, epochs_data) + if squeeze: + out = out[..., 0] + return out + + +class Vectorizer(TransformerMixin): + """Transform n-dimensional array into 2D array of n_samples by n_features. + + This class reshapes an n-dimensional array into an n_samples * n_features + array, usable by the estimators and transformers of scikit-learn. + + Attributes + ---------- + features_shape_ : tuple + Stores the original shape of data. + + Examples + -------- + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> clf = make_pipeline(Vectorizer(), StandardScaler(), LogisticRegression()) + """ + + def fit(self, X, y=None): + """Store the shape of the features of X. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + self : instance of Vectorizer + Return the modified instance. + """ + X = np.asarray(X) + self.features_shape_ = X.shape[1:] + return self + + def transform(self, X): + """Convert given array into two dimensions. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + + Returns + ------- + X : array, shape (n_samples, n_features) + The transformed data. + """ + X = np.asarray(X) + if X.shape[1:] != self.features_shape_: + raise ValueError("Shape of X used in fit and transform must be same") + return X.reshape(len(X), -1) + + def fit_transform(self, X, y=None): + """Fit the data, then transform in one step. + + Parameters + ---------- + X : array-like + The data to fit. Can be, for example a list, or an array of at + least 2d. The first dimension must be of length n_samples, where + samples are the independent samples used by the estimator + (e.g. n_epochs for epoched data). + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + X : array, shape (n_samples, -1) + The transformed data. + """ + return self.fit(X).transform(X) + + def inverse_transform(self, X): + """Transform 2D data back to its original feature shape. + + Parameters + ---------- + X : array-like, shape (n_samples, n_features) + Data to be transformed back to original shape. + + Returns + ------- + X : array + The data transformed into shape as used in fit. The first + dimension is of length n_samples. + """ + X = np.asarray(X) + if X.ndim not in (2, 3): + raise ValueError( + f"X should be of 2 or 3 dimensions but has shape {X.shape}" + ) + return X.reshape(X.shape[:-1] + self.features_shape_) + + +@fill_doc +class PSDEstimator(TransformerMixin): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + sfreq : float + The sampling frequency. + fmin : float + The lower frequency of interest. + fmax : float + The upper frequency of interest. + bandwidth : float + The bandwidth of the multi taper windowing function in Hz. + adaptive : bool + Use adaptive weights to combine the tapered spectra into PSD + (slow, use n_jobs >> 1 to speed up computation). + low_bias : bool + Only use tapers with more than 90%% spectral concentration within + bandwidth. + n_jobs : int + Number of parallel jobs to use (only used if adaptive=True). + %(normalization)s + %(verbose)s + + See Also + -------- + mne.time_frequency.psd_array_multitaper + mne.io.Raw.compute_psd + mne.Epochs.compute_psd + mne.Evoked.compute_psd + """ + + @verbose + def __init__( + self, + sfreq=2 * np.pi, + fmin=0, + fmax=np.inf, + bandwidth=None, + adaptive=False, + low_bias=True, + n_jobs=None, + normalization="length", + *, + verbose=None, + ): + self.sfreq = sfreq + self.fmin = fmin + self.fmax = fmax + self.bandwidth = bandwidth + self.adaptive = adaptive + self.low_bias = low_bias + self.n_jobs = n_jobs + self.normalization = normalization + + def fit(self, epochs_data, y): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of PSDEstimator + The modified instance. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError( + f"epochs_data should be of type ndarray (got {type(epochs_data)})." + ) + + return self + + def transform(self, epochs_data): + """Compute power spectral density (PSD) using a multi-taper method. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + psd : array, shape (n_signals, n_freqs) or (n_freqs,) + The computed PSD. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError( + f"epochs_data should be of type ndarray (got {type(epochs_data)})." + ) + psd, _ = psd_array_multitaper( + epochs_data, + sfreq=self.sfreq, + fmin=self.fmin, + fmax=self.fmax, + bandwidth=self.bandwidth, + adaptive=self.adaptive, + low_bias=self.low_bias, + normalization=self.normalization, + n_jobs=self.n_jobs, + ) + return psd + + +@fill_doc +class FilterEstimator(TransformerMixin): + """Estimator to filter RtEpochs. + + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by "picks". + + l_freq and h_freq are the frequencies below which and above which, + respectively, to filter out of the data. Thus the uses are: + + - l_freq < h_freq: band-pass filter + - l_freq > h_freq: band-stop filter + - l_freq is not None, h_freq is None: low-pass filter + - l_freq is None, h_freq is not None: high-pass filter + + If n_jobs > 1, more memory is required as "len(picks) * n_times" + additional time points need to be temporarily stored in memory. + + Parameters + ---------- + %(info_not_none)s + %(l_freq)s + %(h_freq)s + %(picks_good_data)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + n_jobs : int | str + Number of jobs to run in parallel. + Can be 'cuda' if ``cupy`` is installed properly and method='fir'. + method : str + 'fir' will use overlap-add FIR filtering, 'iir' will use IIR filtering. + iir_params : dict | None + Dictionary of parameters to use for IIR filtering. + See mne.filter.construct_iir_filter for details. If iir_params + is None and method="iir", 4th order Butterworth will be used. + %(fir_design)s + %(verbose)s + + See Also + -------- + TemporalFilter + + Notes + ----- + This is primarily meant for use in realtime applications. + In general it is not recommended in a normal processing pipeline as it may result + in edge artifacts. Use with caution. + """ + + def __init__( + self, + info, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + fir_design="firwin", + *, + verbose=None, + ): + self.info = info + self.l_freq = l_freq + self.h_freq = h_freq + self.picks = _picks_to_idx(info, picks) + self.filter_length = filter_length + self.l_trans_bandwidth = l_trans_bandwidth + self.h_trans_bandwidth = h_trans_bandwidth + self.n_jobs = n_jobs + self.method = method + self.iir_params = iir_params + self.fir_design = fir_design + + def fit(self, epochs_data, y): + """Filter data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + y : array, shape (n_epochs,) + The label for each epoch. + + Returns + ------- + self : instance of FilterEstimator + The modified instance. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError( + f"epochs_data should be of type ndarray (got {type(epochs_data)})." + ) + + if self.picks is None: + self.picks = pick_types( + self.info, meg=True, eeg=True, ref_meg=False, exclude=[] + ) + + if self.l_freq == 0: + self.l_freq = None + if self.h_freq is not None and self.h_freq > (self.info["sfreq"] / 2.0): + self.h_freq = None + if self.l_freq is not None and not isinstance(self.l_freq, float): + self.l_freq = float(self.l_freq) + if self.h_freq is not None and not isinstance(self.h_freq, float): + self.h_freq = float(self.h_freq) + + if self.info["lowpass"] is None or ( + self.h_freq is not None + and (self.l_freq is None or self.l_freq < self.h_freq) + and self.h_freq < self.info["lowpass"] + ): + with self.info._unlock(): + self.info["lowpass"] = self.h_freq + + if self.info["highpass"] is None or ( + self.l_freq is not None + and (self.h_freq is None or self.l_freq < self.h_freq) + and self.l_freq > self.info["highpass"] + ): + with self.info._unlock(): + self.info["highpass"] = self.l_freq + + return self + + def transform(self, epochs_data): + """Filter data. + + Parameters + ---------- + epochs_data : array, shape (n_epochs, n_channels, n_times) + The data. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The data after filtering. + """ + if not isinstance(epochs_data, np.ndarray): + raise ValueError( + f"epochs_data should be of type ndarray (got {type(epochs_data)})." + ) + epochs_data = np.atleast_3d(epochs_data) + return filter_data( + epochs_data, + self.info["sfreq"], + self.l_freq, + self.h_freq, + self.picks, + self.filter_length, + self.l_trans_bandwidth, + self.h_trans_bandwidth, + method=self.method, + iir_params=self.iir_params, + n_jobs=self.n_jobs, + copy=False, + fir_design=self.fir_design, + verbose=False, + ) + + +class UnsupervisedSpatialFilter(TransformerMixin, BaseEstimator): + """Use unsupervised spatial filtering across time and samples. + + Parameters + ---------- + estimator : instance of sklearn.base.BaseEstimator + Estimator using some decomposition algorithm. + average : bool, default False + If True, the estimator is fitted on the average across samples + (e.g. epochs). + """ + + def __init__(self, estimator, average=False): + # XXX: Use _check_estimator #3381 + for attr in ("fit", "transform", "fit_transform"): + if not hasattr(estimator, attr): + raise ValueError( + "estimator must be a scikit-learn " + f"transformer, missing {attr} method" + ) + + if not isinstance(average, bool): + raise ValueError( + f"average parameter must be of bool type, got {type(bool)} instead" + ) + + self.estimator = estimator + self.average = average + + def fit(self, X, y=None): + """Fit the spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + self : instance of UnsupervisedSpatialFilter + Return the modified instance. + """ + if self.average: + X = np.mean(X, axis=0).T + else: + n_epochs, n_channels, n_times = X.shape + # trial as time samples + X = np.transpose(X, (1, 0, 2)).reshape((n_channels, n_epochs * n_times)).T + self.estimator.fit(X) + return self + + def fit_transform(self, X, y=None): + """Transform the data to its filtered components after fitting. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + y : None | array, shape (n_samples,) + Used for scikit-learn compatibility. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self.fit(X).transform(X) + + def transform(self, X): + """Transform the data to its spatial filters. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) + The data to be filtered. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self._apply_method(X, "transform") + + def inverse_transform(self, X): + """Inverse transform the data to its original space. + + Parameters + ---------- + X : array, shape (n_epochs, n_components, n_times) + The data to be inverted. + + Returns + ------- + X : array, shape (n_epochs, n_channels, n_times) + The transformed data. + """ + return self._apply_method(X, "inverse_transform") + + def _apply_method(self, X, method): + """Vectorize time samples as trials, apply method and reshape back. + + Parameters + ---------- + X : array, shape (n_epochs, n_dims, n_times) + The data to be inverted. + + Returns + ------- + X : array, shape (n_epochs, n_dims, n_times) + The transformed data. + """ + n_epochs, n_channels, n_times = X.shape + # trial as time samples + X = np.transpose(X, [1, 0, 2]) + X = np.reshape(X, [n_channels, n_epochs * n_times]).T + # apply method + method = getattr(self.estimator, method) + X = method(X) + # put it back to n_epochs, n_dimensions + X = np.reshape(X.T, [-1, n_epochs, n_times]).transpose([1, 0, 2]) + return X + + +@fill_doc +class TemporalFilter(TransformerMixin): + """Estimator to filter data array along the last dimension. + + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels. + + l_freq and h_freq are the frequencies below which and above which, + respectively, to filter out of the data. Thus the uses are: + + - l_freq < h_freq: band-pass filter + - l_freq > h_freq: band-stop filter + - l_freq is not None, h_freq is None: low-pass filter + - l_freq is None, h_freq is not None: high-pass filter + + See :func:`mne.filter.filter_data`. + + Parameters + ---------- + l_freq : float | None + Low cut-off frequency in Hz. If None the data are only low-passed. + h_freq : float | None + High cut-off frequency in Hz. If None the data are only + high-passed. + sfreq : float, default 1.0 + Sampling frequency in Hz. + filter_length : str | int, default 'auto' + Length of the FIR filter to use (if applicable): + + * int: specified length in samples. + * 'auto' (default in 0.14): the filter length is chosen based + on the size of the transition regions (7 times the reciprocal + of the shortest transition band). + * str: (default in 0.13 is "10s") a human-readable time in + units of "s" or "ms" (e.g., "10s" or "5500ms") will be + converted to that number of samples if ``phase="zero"``, or + the shortest power-of-two length at least that duration for + ``phase="zero-double"``. + + l_trans_bandwidth : float | str + Width of the transition band at the low cut-off frequency in Hz + (high pass or cutoff 1 in bandpass). Can be "auto" + (default in 0.14) to use a multiple of ``l_freq``:: + + min(max(l_freq * 0.25, 2), l_freq) + + Only used for ``method='fir'``. + h_trans_bandwidth : float | str + Width of the transition band at the high cut-off frequency in Hz + (low pass or cutoff 2 in bandpass). Can be "auto" + (default in 0.14) to use a multiple of ``h_freq``:: + + min(max(h_freq * 0.25, 2.), info['sfreq'] / 2. - h_freq) + + Only used for ``method='fir'``. + n_jobs : int | str, default 1 + Number of jobs to run in parallel. + Can be 'cuda' if ``cupy`` is installed properly and method='fir'. + method : str, default 'fir' + 'fir' will use overlap-add FIR filtering, 'iir' will use IIR + forward-backward filtering (via filtfilt). + iir_params : dict | None, default None + Dictionary of parameters to use for IIR filtering. + See mne.filter.construct_iir_filter for details. If iir_params + is None and method="iir", 4th order Butterworth will be used. + fir_window : str, default 'hamming' + The window to use in FIR design, can be "hamming", "hann", + or "blackman". + fir_design : str + Can be "firwin" (default) to use :func:`scipy.signal.firwin`, + or "firwin2" to use :func:`scipy.signal.firwin2`. "firwin" uses + a time-domain design technique that generally gives improved + attenuation using fewer samples than "firwin2". + + .. versionadded:: 0.15 + %(verbose)s + + See Also + -------- + FilterEstimator + Vectorizer + mne.filter.filter_data + """ + + @verbose + def __init__( + self, + l_freq=None, + h_freq=None, + sfreq=1.0, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + fir_window="hamming", + fir_design="firwin", + *, + verbose=None, + ): + self.l_freq = l_freq + self.h_freq = h_freq + self.sfreq = sfreq + self.filter_length = filter_length + self.l_trans_bandwidth = l_trans_bandwidth + self.h_trans_bandwidth = h_trans_bandwidth + self.n_jobs = n_jobs + self.method = method + self.iir_params = iir_params + self.fir_window = fir_window + self.fir_design = fir_design + + if not isinstance(self.n_jobs, int) and self.n_jobs == "cuda": + raise ValueError( + f'n_jobs must be int or "cuda", got {type(self.n_jobs)} instead.' + ) + + def fit(self, X, y=None): + """Do nothing (for scikit-learn compatibility purposes). + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) or or shape (n_channels, n_times) + The data to be filtered over the last dimension. The channels + dimension can be zero when passing a 2D array. + y : None + Not used, for scikit-learn compatibility issues. + + Returns + ------- + self : instance of TemporalFilter + The modified instance. + """ # noqa: E501 + return self + + def transform(self, X): + """Filter data along the last dimension. + + Parameters + ---------- + X : array, shape (n_epochs, n_channels, n_times) or shape (n_channels, n_times) + The data to be filtered over the last dimension. The channels + dimension can be zero when passing a 2D array. + + Returns + ------- + X : array + The data after filtering. + """ # noqa: E501 + X = np.atleast_2d(X) + + if X.ndim > 3: + raise ValueError( + "Array must be of at max 3 dimensions instead " + f"got {X.ndim} dimensional matrix" + ) + + shape = X.shape + X = X.reshape(-1, shape[-1]) + X = filter_data( + X, + self.sfreq, + self.l_freq, + self.h_freq, + filter_length=self.filter_length, + l_trans_bandwidth=self.l_trans_bandwidth, + h_trans_bandwidth=self.h_trans_bandwidth, + n_jobs=self.n_jobs, + method=self.method, + iir_params=self.iir_params, + copy=False, + fir_window=self.fir_window, + fir_design=self.fir_design, + ) + return X.reshape(shape) diff --git a/mne/defaults.py b/mne/defaults.py new file mode 100644 index 0000000..d5aab1a --- /dev/null +++ b/mne/defaults.py @@ -0,0 +1,379 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy + +DEFAULTS = dict( + color=dict( + mag="darkblue", + grad="b", + eeg="k", + eog="k", + ecg="m", + emg="k", + ref_meg="steelblue", + misc="k", + stim="k", + resp="k", + chpi="k", + exci="k", + ias="k", + syst="k", + seeg="saddlebrown", + dbs="seagreen", + dipole="k", + gof="k", + bio="k", + ecog="k", + hbo="#AA3377", + hbr="b", + fnirs_cw_amplitude="k", + fnirs_fd_ac_amplitude="k", + fnirs_fd_phase="k", + fnirs_od="k", + csd="k", + whitened="k", + gsr="#666633", + temperature="#663333", + eyegaze="k", + pupil="k", + ), + si_units=dict( + mag="T", + grad="T/m", + eeg="V", + eog="V", + ecg="V", + emg="V", + misc="AU", + seeg="V", + dbs="V", + dipole="Am", + gof="GOF", + bio="V", + ecog="V", + hbo="M", + hbr="M", + ref_meg="T", + fnirs_cw_amplitude="V", + fnirs_fd_ac_amplitude="V", + fnirs_fd_phase="rad", + fnirs_od="V", + csd="V/m²", + whitened="Z", + gsr="S", + temperature="C", + eyegaze="rad", + pupil="m", + ), + units=dict( + mag="fT", + grad="fT/cm", + eeg="µV", + eog="µV", + ecg="µV", + emg="µV", + misc="AU", + seeg="mV", + dbs="µV", + dipole="nAm", + gof="GOF", + bio="µV", + ecog="µV", + hbo="µM", + hbr="µM", + ref_meg="fT", + fnirs_cw_amplitude="V", + fnirs_fd_ac_amplitude="V", + fnirs_fd_phase="rad", + fnirs_od="V", + csd="mV/m²", + whitened="Z", + gsr="S", + temperature="C", + eyegaze="rad", + pupil="µm", + ), + # scalings for the units + scalings=dict( + mag=1e15, + grad=1e13, + eeg=1e6, + eog=1e6, + emg=1e6, + ecg=1e6, + misc=1.0, + seeg=1e3, + dbs=1e6, + ecog=1e6, + dipole=1e9, + gof=1.0, + bio=1e6, + hbo=1e6, + hbr=1e6, + ref_meg=1e15, + fnirs_cw_amplitude=1.0, + fnirs_fd_ac_amplitude=1.0, + fnirs_fd_phase=1.0, + fnirs_od=1.0, + csd=1e3, + whitened=1.0, + gsr=1.0, + temperature=1.0, + eyegaze=1.0, + pupil=1e6, + ), + # rough guess for a good plot + scalings_plot_raw=dict( + mag=1e-12, + grad=4e-11, + eeg=20e-6, + eog=150e-6, + ecg=5e-4, + emg=1e-3, + ref_meg=1e-12, + misc="auto", + stim=1, + resp=1, + chpi=1e-4, + exci=1, + ias=1, + syst=1, + seeg=1e-4, + dbs=1e-4, + bio=1e-6, + ecog=1e-4, + hbo=10e-6, + hbr=10e-6, + whitened=10.0, + fnirs_cw_amplitude=2e-2, + fnirs_fd_ac_amplitude=2e-2, + fnirs_fd_phase=2e-1, + fnirs_od=2e-2, + csd=200e-4, + dipole=1e-7, + gof=1e2, + gsr=1.0, + temperature=0.1, + eyegaze=2e-1, + pupil=1e-2, + ), + scalings_cov_rank=dict( + mag=1e12, + grad=1e11, + eeg=1e5, # ~100x scalings + seeg=1e1, + dbs=1e4, + ecog=1e4, + hbo=1e4, + hbr=1e4, + ), + ylim=dict( + mag=(-600.0, 600.0), + grad=(-200.0, 200.0), + eeg=(-200.0, 200.0), + misc=(-5.0, 5.0), + seeg=(-20.0, 20.0), + dbs=(-200.0, 200.0), + dipole=(-100.0, 100.0), + gof=(0.0, 1.0), + bio=(-500.0, 500.0), + ecog=(-200.0, 200.0), + hbo=(0, 20), + hbr=(0, 20), + csd=(-50.0, 50.0), + eyegaze=(-1, 1), + pupil=(-1.0, 1.0), + ), + titles=dict( + mag="Magnetometers", + grad="Gradiometers", + eeg="EEG", + eog="EOG", + ecg="ECG", + emg="EMG", + misc="misc", + seeg="sEEG", + dbs="DBS", + bio="BIO", + dipole="Dipole", + ecog="ECoG", + hbo="Oxyhemoglobin", + ref_meg="Reference Magnetometers", + fnirs_cw_amplitude="fNIRS (CW amplitude)", + fnirs_fd_ac_amplitude="fNIRS (FD AC amplitude)", + fnirs_fd_phase="fNIRS (FD phase)", + fnirs_od="fNIRS (OD)", + hbr="Deoxyhemoglobin", + gof="Goodness of fit", + csd="Current source density", + stim="Stimulus", + gsr="Galvanic skin response", + temperature="Temperature", + eyegaze="Eye-tracking (Gaze position)", + pupil="Eye-tracking (Pupil size)", + resp="Respiration monitoring channel", + chpi="Continuous head position indicator (HPI) coil channels", + exci="Flux excitation channel", + ias="Internal Active Shielding data (Triux systems)", + syst="System status channel information (Triux systems)", + whitened="Whitened data", + ), + mask_params=dict( + marker="o", + markerfacecolor="w", + markeredgecolor="k", + linewidth=0, + markeredgewidth=1, + markersize=4, + ), + coreg=dict( + mri_fid_opacity=1.0, + dig_fid_opacity=1.0, + # go from unit scaling (e.g., unit-radius sphere) to meters + mri_fid_scale=5e-3, + dig_fid_scale=8e-3, + extra_scale=4e-3, + eeg_scale=4e-3, + eegp_scale=20e-3, + eegp_height=0.1, + ecog_scale=2e-3, + seeg_scale=2e-3, + meg_scale=1.0, # sensors are already in SI units + ref_meg_scale=1.0, + dbs_scale=5e-3, + fnirs_scale=5e-3, + source_scale=5e-3, + detector_scale=5e-3, + hpi_scale=4e-3, + head_color=(0.988, 0.89, 0.74), + hpi_color=(1.0, 0.0, 1.0), + extra_color=(1.0, 1.0, 1.0), + meg_color=(0.0, 0.25, 0.5), + ref_meg_color=(0.5, 0.5, 0.5), + helmet_color=(0.0, 0.0, 0.6), + eeg_color=(1.0, 0.596, 0.588), + eegp_color=(0.839, 0.15, 0.16), + ecog_color=(1.0, 1.0, 1.0), + dbs_color=(0.82, 0.455, 0.659), + seeg_color=(1.0, 1.0, 0.3), + fnirs_color=(1.0, 0.647, 0.0), + source_color=(1.0, 0.05, 0.0), + detector_color=(0.3, 0.15, 0.15), + lpa_color=(1.0, 0.0, 0.0), + nasion_color=(0.0, 1.0, 0.0), + rpa_color=(0.0, 0.0, 1.0), + ), + noise_std=dict(grad=5e-13, mag=20e-15, eeg=0.2e-6), + eloreta_options=dict(eps=1e-6, max_iter=20, force_equal=False), + depth_mne=dict( + exp=0.8, + limit=10.0, + limit_depth_chs=True, + combine_xyz="spectral", + allow_fixed_depth=False, + ), + depth_sparse=dict( + exp=0.8, + limit=None, + limit_depth_chs="whiten", + combine_xyz="fro", + allow_fixed_depth=True, + ), + interpolation_method=dict( + eeg="spline", meg="MNE", fnirs="nearest", ecog="spline", seeg="spline" + ), + volume_options=dict( + alpha=None, + resolution=1.0, + surface_alpha=None, + blending="mip", + silhouette_alpha=None, + silhouette_linewidth=2.0, + ), + prefixes={ + "k": 1e-3, + "h": 1e-2, + "": 1e0, + "d": 1e1, + "c": 1e2, + "m": 1e3, + "µ": 1e6, + "u": 1e6, + "n": 1e9, + "p": 1e12, + "f": 1e15, + }, + transform_zooms=dict(translation=None, rigid=None, affine=None, sdr=None), + transform_niter=dict( + translation=(10000, 1000, 100), + rigid=(10000, 1000, 100), + affine=(10000, 1000, 100), + sdr=(10, 10, 5), + ), + volume_label_indices=( + # Left and middle + 4, # Left-Lateral-Ventricle + 5, # Left-Inf-Lat-Vent + 8, # Left-Cerebellum-Cortex + 10, # Left-Thalamus-Proper + 11, # Left-Caudate + 12, # Left-Putamen + 13, # Left-Pallidum + 14, # 3rd-Ventricle + 15, # 4th-Ventricle + 16, # Brain-Stem + 17, # Left-Hippocampus + 18, # Left-Amygdala + 26, # Left-Accumbens-area + 28, # Left-VentralDC + # Right + 43, # Right-Lateral-Ventricle + 44, # Right-Inf-Lat-Vent + 47, # Right-Cerebellum-Cortex + 49, # Right-Thalamus-Proper + 50, # Right-Caudate + 51, # Right-Putamen + 52, # Right-Pallidum + 53, # Right-Hippocampus + 54, # Right-Amygdala + 58, # Right-Accumbens-area + 60, # Right-VentralDC + ), + report_stc_plot_kwargs=dict( + views=("lateral", "medial"), + hemi="split", + backend="pyvistaqt", + time_viewer=False, + show_traces=False, + size=(450, 450), + background="white", + time_label=None, + add_data_kwargs={"colorbar_kwargs": {"label_font_size": 12, "n_labels": 5}}, + ), +) + + +def _handle_default(k, v=None): + """Avoid dicts as default keyword arguments. + + Use this function instead to resolve default dict values. Example usage:: + + scalings = _handle_default('scalings', scalings) + + """ + this_mapping = deepcopy(DEFAULTS[k]) + if v is not None: + if isinstance(v, dict): + this_mapping.update(v) + else: + for key in this_mapping: + this_mapping[key] = v + return this_mapping + + +HEAD_SIZE_DEFAULT = 0.095 # in [m] +_BORDER_DEFAULT = "mean" +_INTERPOLATION_DEFAULT = "cubic" +_EXTRAPOLATE_DEFAULT = "auto" diff --git a/mne/dipole.py b/mne/dipole.py new file mode 100644 index 0000000..a40e970 --- /dev/null +++ b/mne/dipole.py @@ -0,0 +1,1919 @@ +"""Single-dipole functions and classes.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import functools +import re +from copy import deepcopy +from functools import partial + +import numpy as np +from scipy.linalg import eigh +from scipy.optimize import fmin_cobyla + +from ._fiff.constants import FIFF +from ._fiff.pick import pick_types +from ._fiff.proj import _needs_eeg_average_ref_proj, make_projector +from ._freesurfer import _get_aseg, head_to_mni, head_to_mri, read_freesurfer_lut +from .bem import _bem_find_surface, _bem_surf_name, _fit_sphere +from .cov import _ensure_cov, compute_whitener +from .evoked import _aspect_rev, _read_evoked, _write_evokeds +from .fixes import _safe_svd +from .forward._compute_forward import _compute_forwards_meeg, _prep_field_computation +from .forward._make_forward import ( + _get_trans, + _prep_eeg_channels, + _prep_meg_channels, + _setup_bem, +) +from .parallel import parallel_func +from .source_space._source_space import SourceSpaces, _make_volume_source_space +from .surface import _compute_nearest, _points_outside_surface, transform_surface_to +from .transforms import _coord_frame_name, _print_coord_trans, apply_trans +from .utils import ( + ExtendedTimeMixin, + TimeMixin, + _check_fname, + _check_option, + _get_blas_funcs, + _pl, + _repeated_svd, + _svd_lwork, + _time_mask, + _validate_type, + _verbose_safe_false, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + logger, + pinvh, + verbose, + warn, +) +from .viz import plot_dipole_amplitudes, plot_dipole_locations +from .viz.evoked import _plot_evoked + + +@fill_doc +class Dipole(TimeMixin): + """Dipole class for sequential dipole fits. + + .. note:: + This class should usually not be instantiated directly via + ``mne.Dipole(...)``. Instead, use one of the functions + listed in the See Also section below. + + Used to store positions, orientations, amplitudes, times, goodness of fit + of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit + or certain inverse solvers. Note that dipole position vectors are given in + the head coordinate frame. + + Parameters + ---------- + times : array, shape (n_dipoles,) + The time instants at which each dipole was fitted (s). + pos : array, shape (n_dipoles, 3) + The dipoles positions (m) in head coordinates. + amplitude : array, shape (n_dipoles,) + The amplitude of the dipoles (Am). + ori : array, shape (n_dipoles, 3) + The dipole orientations (normalized to unit length). + gof : array, shape (n_dipoles,) + The goodness of fit. + name : str | None + Name of the dipole. + conf : dict + Confidence limits in dipole orientation for "vol" in m^3 (volume), + "depth" in m (along the depth axis), "long" in m (longitudinal axis), + "trans" in m (transverse axis), "qlong" in Am, and "qtrans" in Am + (currents). The current confidence limit in the depth direction is + assumed to be zero (although it can be non-zero when a BEM is used). + + .. versionadded:: 0.15 + khi2 : array, shape (n_dipoles,) + The χ^2 values for the fits. + + .. versionadded:: 0.15 + nfree : array, shape (n_dipoles,) + The number of free parameters for each fit. + + .. versionadded:: 0.15 + %(verbose)s + + See Also + -------- + fit_dipole + DipoleFixed + read_dipole + + Notes + ----- + This class is for sequential dipole fits, where the position + changes as a function of time. For fixed dipole fits, where the + position is fixed as a function of time, use :class:`mne.DipoleFixed`. + """ + + @verbose + def __init__( + self, + times, + pos, + amplitude, + ori, + gof, + name=None, + conf=None, + khi2=None, + nfree=None, + *, + verbose=None, + ): + self._set_times(np.array(times)) + self.pos = np.array(pos) + self.amplitude = np.array(amplitude) + self.ori = np.array(ori) + self.gof = np.array(gof) + self.name = name + self.conf = dict() + if conf is not None: + for key, value in conf.items(): + self.conf[key] = np.array(value) + self.khi2 = np.array(khi2) if khi2 is not None else None + self.nfree = np.array(nfree) if nfree is not None else None + + def __repr__(self): # noqa: D105 + s = f"n_times : {len(self.times)}" + s += f", tmin : {np.min(self.times):0.3f}" + s += f", tmax : {np.max(self.times):0.3f}" + return f"" + + @verbose + def save(self, fname, overwrite=False, *, verbose=None): + """Save dipole in a ``.dip`` or ``.bdip`` file. + + Parameters + ---------- + fname : path-like + The name of the ``.dip`` or ``.bdip`` file. + %(overwrite)s + + .. versionadded:: 0.20 + %(verbose)s + + Notes + ----- + .. versionchanged:: 0.20 + Support for writing bdip (Xfit binary) files. + """ + # obligatory fields + fname = _check_fname(fname, overwrite=overwrite) + if fname.suffix == ".bdip": + _write_dipole_bdip(fname, self) + else: + _write_dipole_text(fname, self) + + @verbose + def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): + """Crop data to a given time interval. + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + %(include_tmax)s + %(verbose)s + + Returns + ------- + self : instance of Dipole + The cropped instance. + """ + sfreq = None + if len(self.times) > 1: + sfreq = 1.0 / np.median(np.diff(self.times)) + mask = _time_mask( + self.times, tmin, tmax, sfreq=sfreq, include_tmax=include_tmax + ) + self._set_times(self.times[mask]) + for attr in ("pos", "gof", "amplitude", "ori", "khi2", "nfree"): + if getattr(self, attr) is not None: + setattr(self, attr, getattr(self, attr)[mask]) + for key in self.conf.keys(): + self.conf[key] = self.conf[key][mask] + return self + + def copy(self): + """Copy the Dipoles object. + + Returns + ------- + dip : instance of Dipole + The copied dipole instance. + """ + return deepcopy(self) + + @verbose + @copy_function_doc_to_method_doc(plot_dipole_locations) + def plot_locations( + self, + trans, + subject, + subjects_dir=None, + mode="orthoview", + coord_frame="mri", + idx="gof", + show_all=True, + ax=None, + block=False, + show=True, + scale=None, + color=None, + *, + highlight_color="r", + fig=None, + title=None, + head_source="seghead", + surf="pial", + width=None, + verbose=None, + ): + return plot_dipole_locations( + self, + trans, + subject, + subjects_dir, + mode, + coord_frame, + idx, + show_all, + ax, + block, + show, + scale=scale, + color=color, + highlight_color=highlight_color, + fig=fig, + title=title, + head_source=head_source, + surf=surf, + width=width, + ) + + @verbose + def to_mni(self, subject, trans, subjects_dir=None, verbose=None): + """Convert dipole location from head to MNI coordinates. + + Parameters + ---------- + %(subject)s + %(trans_not_none)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + pos_mni : array, shape (n_pos, 3) + The MNI coordinates (in mm) of pos. + """ + mri_head_t, trans = _get_trans(trans) + return head_to_mni( + self.pos, subject, mri_head_t, subjects_dir=subjects_dir, verbose=verbose + ) + + @verbose + def to_mri(self, subject, trans, subjects_dir=None, verbose=None): + """Convert dipole location from head to MRI surface RAS coordinates. + + Parameters + ---------- + %(subject)s + %(trans_not_none)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + pos_mri : array, shape (n_pos, 3) + The Freesurfer surface RAS coordinates (in mm) of pos. + """ + mri_head_t, trans = _get_trans(trans) + return head_to_mri( + self.pos, + subject, + mri_head_t, + subjects_dir=subjects_dir, + verbose=verbose, + kind="mri", + ) + + @verbose + def to_volume_labels( + self, + trans, + subject="fsaverage", + aseg="aparc+aseg", + subjects_dir=None, + verbose=None, + ): + """Find an ROI in atlas for the dipole positions. + + Parameters + ---------- + %(trans)s + + .. versionchanged:: 0.19 + Support for 'fsaverage' argument. + %(subject)s + %(aseg)s + %(subjects_dir)s + %(verbose)s + + Returns + ------- + labels : list + List of anatomical region names from anatomical segmentation atlas. + + Notes + ----- + .. versionadded:: 0.24 + """ + aseg_img, aseg_data = _get_aseg(aseg, subject, subjects_dir) + mri_vox_t = np.linalg.inv(aseg_img.header.get_vox2ras_tkr()) + + # Load freesurface atlas LUT + lut_inv = read_freesurfer_lut()[0] + lut = {v: k for k, v in lut_inv.items()} + + # transform to voxel space from head space + pos = self.to_mri(subject, trans, subjects_dir=subjects_dir, verbose=verbose) + pos = apply_trans(mri_vox_t, pos) + pos = np.rint(pos).astype(int) + + # Get voxel value and label from LUT + labels = [lut.get(aseg_data[tuple(coord)], "Unknown") for coord in pos] + return labels + + def plot_amplitudes(self, color="k", show=True): + """Plot the dipole amplitudes as a function of time. + + Parameters + ---------- + color : matplotlib color + Color to use for the trace. + show : bool + Show figure if True. + + Returns + ------- + fig : matplotlib.figure.Figure + The figure object containing the plot. + """ + return plot_dipole_amplitudes([self], [color], show) + + def __getitem__(self, item): + """Get a time slice. + + Parameters + ---------- + item : array-like or slice + The slice of time points to use. + + Returns + ------- + dip : instance of Dipole + The sliced dipole. + """ + if isinstance(item, int): # make sure attributes stay 2d + item = [item] + + selected_times = self.times[item].copy() + selected_pos = self.pos[item, :].copy() + selected_amplitude = self.amplitude[item].copy() + selected_ori = self.ori[item, :].copy() + selected_gof = self.gof[item].copy() + selected_name = self.name + selected_conf = dict() + for key in self.conf.keys(): + selected_conf[key] = self.conf[key][item] + selected_khi2 = self.khi2[item] if self.khi2 is not None else None + selected_nfree = self.nfree[item] if self.nfree is not None else None + return Dipole( + selected_times, + selected_pos, + selected_amplitude, + selected_ori, + selected_gof, + selected_name, + selected_conf, + selected_khi2, + selected_nfree, + ) + + def __len__(self): + """Return the number of dipoles. + + Returns + ------- + len : int + The number of dipoles. + + Examples + -------- + This can be used as:: + + >>> len(dipoles) # doctest: +SKIP + 10 + """ + return self.pos.shape[0] + + +def _read_dipole_fixed(fname): + """Read a fixed dipole FIF file.""" + logger.info(f"Reading {fname} ...") + info, nave, aspect_kind, comment, times, data, _ = _read_evoked(fname) + return DipoleFixed(info, data, times, nave, aspect_kind, comment=comment) + + +@fill_doc +class DipoleFixed(ExtendedTimeMixin): + """Dipole class for fixed-position dipole fits. + + .. note:: + This class should usually not be instantiated directly + via ``mne.DipoleFixed(...)``. Instead, use one of the functions + listed in the See Also section below. + + Parameters + ---------- + %(info_not_none)s + data : array, shape (n_channels, n_times) + The dipole data. + times : array, shape (n_times,) + The time points. + nave : int + Number of averages. + aspect_kind : int + The kind of data. + comment : str + The dipole comment. + %(verbose)s + + See Also + -------- + read_dipole + Dipole + fit_dipole + + Notes + ----- + This class is for fixed-position dipole fits, where the position + (and maybe orientation) is static over time. For sequential dipole fits, + where the position can change a function of time, use :class:`mne.Dipole`. + + .. versionadded:: 0.12 + """ + + @verbose + def __init__( + self, info, data, times, nave, aspect_kind, comment="", *, verbose=None + ): + self.info = info + self.nave = nave + self._aspect_kind = aspect_kind + self.kind = _aspect_rev.get(aspect_kind, "unknown") + self.comment = comment + self._set_times(np.array(times)) + self.data = data + self.preload = True + self._update_first_last() + + def __repr__(self): # noqa: D105 + s = f"n_times : {len(self.times)}" + s += f", tmin : {np.min(self.times)}" + s += f", tmax : {np.max(self.times)}" + return f"" + + def copy(self): + """Copy the DipoleFixed object. + + Returns + ------- + inst : instance of DipoleFixed + The copy. + + Notes + ----- + .. versionadded:: 0.16 + """ + return deepcopy(self) + + @property + def ch_names(self): + """Channel names.""" + return self.info["ch_names"] + + @verbose + def save(self, fname, verbose=None): + """Save dipole in a .fif file. + + Parameters + ---------- + fname : path-like + The name of the .fif file. Must end with ``'.fif'`` or + ``'.fif.gz'`` to make it explicit that the file contains + dipole information in FIF format. + %(verbose)s + """ + check_fname( + fname, + "DipoleFixed", + ( + "-dip.fif", + "-dip.fif.gz", + "_dip.fif", + "_dip.fif.gz", + ), + (".fif", ".fif.gz"), + ) + _write_evokeds(fname, self, check=False) + + def plot(self, show=True, time_unit="s"): + """Plot dipole data. + + Parameters + ---------- + show : bool + Call pyplot.show() at the end or not. + time_unit : str + The units for the time axis, can be "ms" or "s" (default). + + .. versionadded:: 0.16 + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure containing the time courses. + """ + return _plot_evoked( + self, + picks=None, + exclude=(), + unit=True, + show=show, + ylim=None, + xlim="tight", + proj=False, + hline=None, + units=None, + scalings=None, + titles=None, + axes=None, + gfp=False, + window_title=None, + spatial_colors=False, + plot_type="butterfly", + selectable=False, + time_unit=time_unit, + ) + + +# ############################################################################# +# IO +@verbose +def read_dipole(fname, verbose=None): + """Read ``.dip`` file from Neuromag/xfit or MNE. + + Parameters + ---------- + fname : path-like + The name of the ``.dip`` or ``.fif`` file. + %(verbose)s + + Returns + ------- + %(dipole)s + + See Also + -------- + Dipole + DipoleFixed + fit_dipole + + Notes + ----- + .. versionchanged:: 0.20 + Support for reading bdip (Xfit binary) format. + """ + fname = _check_fname(fname, overwrite="read", must_exist=True) + if fname.suffix == ".fif" or fname.name.endswith(".fif.gz"): + return _read_dipole_fixed(fname) + elif fname.suffix == ".bdip": + return _read_dipole_bdip(fname) + else: + return _read_dipole_text(fname) + + +def _read_dipole_text(fname): + """Read a dipole text file.""" + # Figure out the special fields + need_header = True + def_line = name = None + # There is a bug in older np.loadtxt regarding skipping fields, + # so just read the data ourselves (need to get name and header anyway) + data = list() + with open(fname) as fid: + for line in fid: + if not (line.startswith("%") or line.startswith("#")): + need_header = False + data.append(line.strip().split()) + else: + if need_header: + def_line = line + if line.startswith("##") or line.startswith("%%"): + m = re.search('Name "(.*) dipoles"', line) + if m: + name = m.group(1) + del line + data = np.atleast_2d(np.array(data, float)) + if def_line is None: + raise OSError( + "Dipole text file is missing field definition comment, cannot parse " + f"{fname}" + ) + # actually parse the fields + def_line = def_line.lstrip("%").lstrip("#").strip() + # MNE writes it out differently than Elekta, let's standardize them... + fields = re.sub( + r"([X|Y|Z] )\(mm\)", # "X (mm)", etc. + lambda match: match.group(1).strip() + "/mm", + def_line, + ) + fields = re.sub( + r"\((.*?)\)", + lambda match: "/" + match.group(1), + fields, # "Q(nAm)", etc. + ) + fields = re.sub( + "(begin|end) ", # "begin" and "end" with no units + lambda match: match.group(1) + "/ms", + fields, + ) + fields = fields.lower().split() + required_fields = ( + "begin/ms", + "x/mm", + "y/mm", + "z/mm", + "q/nam", + "qx/nam", + "qy/nam", + "qz/nam", + "g/%", + ) + optional_fields = ( + "khi^2", + "free", # standard ones + # now the confidence fields (up to 5!) + "vol/mm^3", + "depth/mm", + "long/mm", + "trans/mm", + "qlong/nam", + "qtrans/nam", + ) + conf_scales = [1e-9, 1e-3, 1e-3, 1e-3, 1e-9, 1e-9] + missing_fields = sorted(set(required_fields) - set(fields)) + if len(missing_fields) > 0: + raise RuntimeError( + f"Could not find necessary fields in header: {missing_fields}" + ) + handled_fields = set(required_fields) | set(optional_fields) + assert len(handled_fields) == len(required_fields) + len(optional_fields) + ignored_fields = sorted(set(fields) - set(handled_fields) - {"end/ms"}) + if len(ignored_fields) > 0: + warn(f"Ignoring extra fields in dipole file: {ignored_fields}") + if len(fields) != data.shape[1]: + raise OSError( + f"More data fields ({len(fields)}) found than data columns ({data.shape[1]}" + f"): {fields}" + ) + + logger.info(f"{len(data)} dipole(s) found") + + if "end/ms" in fields: + if np.diff( + data[:, [fields.index("begin/ms"), fields.index("end/ms")]], 1, -1 + ).any(): + warn( + "begin and end fields differed, but only begin will be used " + "to store time values" + ) + + # Find the correct column in our data array, then scale to proper units + idx = [fields.index(field) for field in required_fields] + assert len(idx) >= 9 + times = data[:, idx[0]] / 1000.0 + pos = 1e-3 * data[:, idx[1:4]] # put data in meters + amplitude = data[:, idx[4]] + norm = amplitude.copy() + amplitude /= 1e9 + norm[norm == 0] = 1 + ori = data[:, idx[5:8]] / norm[:, np.newaxis] + gof = data[:, idx[8]] + # Deal with optional fields + optional = [None] * 2 + for fi, field in enumerate(optional_fields[:2]): + if field in fields: + optional[fi] = data[:, fields.index(field)] + khi2, nfree = optional + conf = dict() + for field, scale in zip(optional_fields[2:], conf_scales): # confidence + if field in fields: + conf[field.split("/")[0]] = scale * data[:, fields.index(field)] + return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) + + +def _write_dipole_text(fname, dip): + fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.2f" + header = ( + "# begin end X (mm) Y (mm) Z (mm)" + " Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%" + ) + t = dip.times[:, np.newaxis] * 1000.0 + gof = dip.gof[:, np.newaxis] + amp = 1e9 * dip.amplitude[:, np.newaxis] + out = (t, t, dip.pos / 1e-3, amp, dip.ori * amp, gof) + + # optional fields + fmts = dict( + khi2=(" khi^2", " %8.1f", 1.0), + nfree=(" free", " %5d", 1), + vol=(" vol/mm^3", " %9.3f", 1e9), + depth=(" depth/mm", " %9.3f", 1e3), + long=(" long/mm", " %8.3f", 1e3), + trans=(" trans/mm", " %9.3f", 1e3), + qlong=(" Qlong/nAm", " %10.3f", 1e9), + qtrans=(" Qtrans/nAm", " %11.3f", 1e9), + ) + for key in ("khi2", "nfree"): + data = getattr(dip, key) + if data is not None: + header += fmts[key][0] + fmt += fmts[key][1] + out += (data[:, np.newaxis] * fmts[key][2],) + for key in ("vol", "depth", "long", "trans", "qlong", "qtrans"): + data = dip.conf.get(key) + if data is not None: + header += fmts[key][0] + fmt += fmts[key][1] + out += (data[:, np.newaxis] * fmts[key][2],) + out = np.concatenate(out, axis=-1) + + # NB CoordinateSystem is hard-coded as Head here + with open(fname, "wb") as fid: + fid.write(b'# CoordinateSystem "Head"\n') + fid.write((header + "\n").encode("utf-8")) + np.savetxt(fid, out, fmt=fmt) + if dip.name is not None: + fid.write((f'## Name "{dip.name} dipoles" Style "Dipoles"').encode()) + + +_BDIP_ERROR_KEYS = ("depth", "long", "trans", "qlong", "qtrans") + + +def _read_dipole_bdip(fname): + name = None + nfree = None + with open(fname, "rb") as fid: + # Which dipole in a multi-dipole set + times = list() + pos = list() + amplitude = list() + ori = list() + gof = list() + conf = dict(vol=list()) + khi2 = list() + has_errors = None + while True: + num = np.frombuffer(fid.read(4), ">i4") + if len(num) == 0: + break + times.append(np.frombuffer(fid.read(4), ">f4")[0]) + fid.read(4) # end + fid.read(12) # r0 + pos.append(np.frombuffer(fid.read(12), ">f4")) + Q = np.frombuffer(fid.read(12), ">f4") + amplitude.append(np.linalg.norm(Q)) + ori.append(Q / amplitude[-1]) + gof.append(100 * np.frombuffer(fid.read(4), ">f4")[0]) + this_has_errors = bool(np.frombuffer(fid.read(4), ">i4")[0]) + if has_errors is None: + has_errors = this_has_errors + for key in _BDIP_ERROR_KEYS: + conf[key] = list() + assert has_errors == this_has_errors + fid.read(4) # Noise level used for error computations + limits = np.frombuffer(fid.read(20), ">f4") # error limits + for key, lim in zip(_BDIP_ERROR_KEYS, limits): + conf[key].append(lim) + fid.read(100) # (5, 5) fully describes the conf. ellipsoid + conf["vol"].append(np.frombuffer(fid.read(4), ">f4")[0]) + khi2.append(np.frombuffer(fid.read(4), ">f4")[0]) + fid.read(4) # prob + fid.read(4) # total noise estimate + return Dipole(times, pos, amplitude, ori, gof, name, conf, khi2, nfree) + + +def _write_dipole_bdip(fname, dip): + with open(fname, "wb+") as fid: + for ti, t in enumerate(dip.times): + fid.write(np.zeros(1, ">i4").tobytes()) # int dipole + fid.write(np.array([t, 0]).astype(">f4").tobytes()) + fid.write(np.zeros(3, ">f4").tobytes()) # r0 + fid.write(dip.pos[ti].astype(">f4").tobytes()) # pos + Q = dip.amplitude[ti] * dip.ori[ti] + fid.write(Q.astype(">f4").tobytes()) + fid.write(np.array(dip.gof[ti] / 100.0, ">f4").tobytes()) + has_errors = int(bool(len(dip.conf))) + fid.write(np.array(has_errors, ">i4").tobytes()) # has_errors + fid.write(np.zeros(1, ">f4").tobytes()) # noise level + for key in _BDIP_ERROR_KEYS: + val = dip.conf[key][ti] if key in dip.conf else 0.0 + assert val.shape == () + fid.write(np.array(val, ">f4").tobytes()) + fid.write(np.zeros(25, ">f4").tobytes()) + conf = dip.conf["vol"][ti] if "vol" in dip.conf else 0.0 + fid.write(np.array(conf, ">f4").tobytes()) + khi2 = dip.khi2[ti] if dip.khi2 is not None else 0 + fid.write(np.array(khi2, ">f4").tobytes()) + fid.write(np.zeros(1, ">f4").tobytes()) # prob + fid.write(np.zeros(1, ">f4").tobytes()) # total noise est + + +# ############################################################################# +# Fitting + + +def _dipole_forwards(*, sensors, fwd_data, whitener, rr, n_jobs=None): + """Compute the forward solution and do other nice stuff.""" + B = _compute_forwards_meeg( + rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs, silent=True + ) + B = np.concatenate(list(B.values()), axis=1) + assert np.isfinite(B).all() + B_orig = B.copy() + + # Apply projection and whiten (cov has projections already) + _, _, dgemm = _get_ddot_dgemv_dgemm() + B = dgemm(1.0, B, whitener.T) + + # column normalization doesn't affect our fitting, so skip for now + # S = np.sum(B * B, axis=1) # across channels + # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)), + # axis=1)), 3) + # B *= scales[:, np.newaxis] + scales = np.ones(3) + return B, B_orig, scales + + +@verbose +def _make_guesses(surf, grid, exclude, mindist, n_jobs=None, verbose=None): + """Make a guess space inside a sphere or BEM surface.""" + if "rr" in surf: + logger.info( + "Guess surface ({}) is in {} coordinates".format( + _bem_surf_name[surf["id"]], _coord_frame_name(surf["coord_frame"]) + ) + ) + else: + logger.info( + "Making a spherical guess space with radius {:7.1f} mm...".format( + 1000 * surf["R"] + ) + ) + logger.info("Filtering (grid = %6.f mm)..." % (1000 * grid)) + src = _make_volume_source_space( + surf, grid, exclude, 1000 * mindist, do_neighbors=False, n_jobs=n_jobs + )[0] + assert "vertno" in src + # simplify the result to make things easier later + src = dict( + rr=src["rr"][src["vertno"]], + nn=src["nn"][src["vertno"]], + nuse=src["nuse"], + coord_frame=src["coord_frame"], + vertno=np.arange(src["nuse"]), + type="discrete", + ) + return SourceSpaces([src]) + + +def _fit_eval(rd, B, B2, *, sensors, fwd_data, whitener, lwork, fwd_svd): + """Calculate the residual sum of squares.""" + if fwd_svd is None: + assert sensors is not None + fwd = _dipole_forwards( + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis, :] + )[0] + uu, sing, vv = _repeated_svd(fwd, lwork, overwrite_a=True) + else: + uu, sing, vv = fwd_svd + gof = _dipole_gof(uu, sing, vv, B, B2)[0] + # mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version + return 1.0 - gof + + +@functools.lru_cache(None) +def _get_ddot_dgemv_dgemm(): + return _get_blas_funcs(np.float64, ("dot", "gemv", "gemm")) + + +def _dipole_gof(uu, sing, vv, B, B2): + """Calculate the goodness of fit from the forward SVD.""" + ddot, dgemv, _ = _get_ddot_dgemv_dgemm() + ncomp = 3 if sing[2] / (sing[0] if sing[0] > 0 else 1.0) > 0.2 else 2 + one = dgemv(1.0, vv[:ncomp], B) # np.dot(vv[:ncomp], B) + Bm2 = ddot(one, one) # np.sum(one * one) + gof = Bm2 / B2 + return gof, one + + +def _fit_Q(*, sensors, fwd_data, whitener, B, B2, B_orig, rd, ori=None): + """Fit the dipole moment once the location is known.""" + if "fwd" in fwd_data: + # should be a single precomputed "guess" (i.e., fixed position) + assert rd is None + fwd = fwd_data["fwd"] + assert fwd.shape[0] == 3 + fwd_orig = fwd_data["fwd_orig"] + assert fwd_orig.shape[0] == 3 + scales = fwd_data["scales"] + assert scales.shape == (3,) + fwd_svd = fwd_data["fwd_svd"][0] + else: + fwd, fwd_orig, scales = _dipole_forwards( + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis, :] + ) + fwd_svd = None + if ori is None: + if fwd_svd is None: + fwd_svd = _safe_svd(fwd, full_matrices=False) + uu, sing, vv = fwd_svd + gof, one = _dipole_gof(uu, sing, vv, B, B2) + ncomp = len(one) + one /= sing[:ncomp] + Q = np.dot(one, uu.T[:ncomp]) + else: + fwd = np.dot(ori[np.newaxis], fwd) + sing = np.linalg.norm(fwd) + one = np.dot(fwd / sing, B) + gof = (one * one)[0] / B2 + Q = ori * np.sum(one / sing) + ncomp = 3 + # Counteract the effect of column normalization + Q *= scales[0] + B_residual_noproj = B_orig - np.dot(fwd_orig.T, Q) + return Q, gof, B_residual_noproj, ncomp + + +def _fit_dipoles( + fun, + min_dist_to_inner_skull, + data, + times, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + ori, + n_jobs, + rank, + rhoend, +): + """Fit a single dipole to the given whitened, projected data.""" + parallel, p_fun, n_jobs = parallel_func(fun, n_jobs) + # parallel over time points + res = parallel( + p_fun( + min_dist_to_inner_skull, + B, + t, + guess_rrs, + guess_data, + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + fmin_cobyla=fmin_cobyla, + ori=ori, + rank=rank, + rhoend=rhoend, + ) + for B, t in zip(data.T, times) + ) + pos = np.array([r[0] for r in res]) + amp = np.array([r[1] for r in res]) + ori = np.array([r[2] for r in res]) + gof = np.array([r[3] for r in res]) * 100 # convert to percentage + conf = None + if res[0][4] is not None: + conf = np.array([r[4] for r in res]) + keys = ["vol", "depth", "long", "trans", "qlong", "qtrans"] + conf = {key: conf[:, ki] for ki, key in enumerate(keys)} + khi2 = np.array([r[5] for r in res]) + nfree = np.array([r[6] for r in res]) + residual_noproj = np.array([r[7] for r in res]).T + + return pos, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +'''Simplex code in case we ever want/need it for testing + +def _make_tetra_simplex(): + """Make the initial tetrahedron""" + # + # For this definition of a regular tetrahedron, see + # + # http://mathworld.wolfram.com/Tetrahedron.html + # + x = np.sqrt(3.0) / 3.0 + r = np.sqrt(6.0) / 12.0 + R = 3 * r + d = x / 2.0 + simplex = 1e-2 * np.array([[x, 0.0, -r], + [-d, 0.5, -r], + [-d, -0.5, -r], + [0., 0., R]]) + return simplex + + +def try_(p, y, psum, ndim, fun, ihi, neval, fac): + """Helper to try a value""" + ptry = np.empty(ndim) + fac1 = (1.0 - fac) / ndim + fac2 = fac1 - fac + ptry = psum * fac1 - p[ihi] * fac2 + ytry = fun(ptry) + neval += 1 + if ytry < y[ihi]: + y[ihi] = ytry + psum[:] += ptry - p[ihi] + p[ihi] = ptry + return ytry, neval + + +def _simplex_minimize(p, ftol, stol, fun, max_eval=1000): + """Minimization with the simplex algorithm + + Modified from Numerical recipes""" + y = np.array([fun(s) for s in p]) + ndim = p.shape[1] + assert p.shape[0] == ndim + 1 + mpts = ndim + 1 + neval = 0 + psum = p.sum(axis=0) + + loop = 1 + while(True): + ilo = 1 + if y[1] > y[2]: + ihi = 1 + inhi = 2 + else: + ihi = 2 + inhi = 1 + for i in range(mpts): + if y[i] < y[ilo]: + ilo = i + if y[i] > y[ihi]: + inhi = ihi + ihi = i + elif y[i] > y[inhi]: + if i != ihi: + inhi = i + + rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo])) + if rtol < ftol: + break + if neval >= max_eval: + raise RuntimeError('Maximum number of evaluations exceeded.') + if stol > 0: # Has the simplex collapsed? + dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2)) + if loop > 5 and dsum < stol: + break + + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.) + if ytry <= y[ilo]: + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.) + elif ytry >= y[inhi]: + ysave = y[ihi] + ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5) + if ytry >= ysave: + for i in range(mpts): + if i != ilo: + psum[:] = 0.5 * (p[i] + p[ilo]) + p[i] = psum + y[i] = fun(psum) + neval += ndim + psum = p.sum(axis=0) + loop += 1 +''' + + +def _fit_confidence(*, rd, Q, ori, whitener, fwd_data, sensors): + # As describedd in the Xfit manual, confidence intervals can be calculated + # by examining a linearization of model at the best-fitting location, + # i.e. taking the Jacobian and using the whitener: + # + # J = [∂b/∂x ∂b/∂y ∂b/∂z ∂b/∂Qx ∂b/∂Qy ∂b/∂Qz] + # C = (J.T C^-1 J)^-1 + # + # And then the confidence interval is the diagonal of C, scaled by 1.96 + # (for 95% confidence). + direction = np.empty((3, 3)) + # The coordinate system has the x axis aligned with the dipole orientation, + direction[0] = ori + # the z axis through the origin of the sphere model + rvec = rd - fwd_data["inner_skull"]["r0"] + direction[2] = rvec - ori * np.dot(ori, rvec) # orthogonalize + direction[2] /= np.linalg.norm(direction[2]) + # and the y axis perpendical with these forming a right-handed system. + direction[1] = np.cross(direction[2], direction[0]) + assert np.allclose(np.dot(direction, direction.T), np.eye(3)) + # Get spatial deltas in dipole coordinate directions + deltas = (-1e-4, 1e-4) + J = np.empty((whitener.shape[0], 6)) + for ii in range(3): + fwds = [] + for delta in deltas: + this_r = rd[np.newaxis] + delta * direction[ii] + fwds.append( + np.dot( + Q, + _dipole_forwards( + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=this_r + )[0], + ) + ) + J[:, ii] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0] + # Get current (Q) deltas in the dipole directions + deltas = np.array([-0.01, 0.01]) * np.linalg.norm(Q) + this_fwd = _dipole_forwards( + sensors=sensors, fwd_data=fwd_data, whitener=whitener, rr=rd[np.newaxis] + )[0] + for ii in range(3): + fwds = [] + for delta in deltas: + fwds.append(np.dot(Q + delta * direction[ii], this_fwd)) + J[:, ii + 3] = np.diff(fwds, axis=0)[0] / np.diff(deltas)[0] + # J is already whitened, so we don't need to do np.dot(whitener, J). + # However, the units in the Jacobian are potentially quite different, + # so we need to do some normalization during inversion, then revert. + direction_norm = np.linalg.norm(J[:, :3]) + Q_norm = np.linalg.norm(J[:, 3:5]) # omit possible zero Z + norm = np.array([direction_norm] * 3 + [Q_norm] * 3) + J /= norm + J = np.dot(J.T, J) + C = pinvh(J, rtol=1e-14) + C /= norm + C /= norm[:, np.newaxis] + conf = 1.96 * np.sqrt(np.diag(C)) + # The confidence volume of the dipole location is obtained from by + # taking the eigenvalues of the upper left submatrix and computing + # v = 4Ï€/3 √(c^3 λ1 λ2 λ3) with c = 7.81, or: + vol_conf = ( + 4 + * np.pi + / 3.0 + * np.sqrt(476.379541 * np.prod(eigh(C[:3, :3], eigvals_only=True))) + ) + conf = np.concatenate([conf, [vol_conf]]) + # Now we reorder and subselect the proper columns: + # vol, depth, long, trans, Qlong, Qtrans (discard Qdepth, assumed zero) + conf = conf[[6, 2, 0, 1, 3, 4]] + return conf + + +def _surface_constraint(rd, surf, min_dist_to_inner_skull): + """Surface fitting constraint.""" + dist = _compute_nearest(surf["rr"], rd[np.newaxis, :], return_dists=True)[1][0] + if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]: + dist *= -1.0 + # Once we know the dipole is below the inner skull, + # let's check if its distance to the inner skull is at least + # min_dist_to_inner_skull. This can be enforced by adding a + # constrain proportional to its distance. + dist -= min_dist_to_inner_skull + return dist + + +def _sphere_constraint(rd, r0, R_adj): + """Sphere fitting constraint.""" + return R_adj - np.sqrt(np.sum((rd - r0) ** 2)) + + +def _fit_dipole( + min_dist_to_inner_skull, + B_orig, + t, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + fmin_cobyla, + ori, + rank, + rhoend, +): + """Fit a single bit of data.""" + B = np.dot(whitener, B_orig) + + # make constraint function to keep the solver within the inner skull + if "rr" in fwd_data["inner_skull"]: # bem + surf = fwd_data["inner_skull"] + constraint = partial( + _surface_constraint, + surf=surf, + min_dist_to_inner_skull=min_dist_to_inner_skull, + ) + else: # sphere + surf = None + constraint = partial( + _sphere_constraint, + r0=fwd_data["inner_skull"]["r0"], + R_adj=fwd_data["inner_skull"]["R"] - min_dist_to_inner_skull, + ) + + # Find a good starting point (find_best_guess in C) + B2 = np.dot(B, B) + if B2 == 0: + warn(f"Zero field found for time {t}") + return np.zeros(3), 0, np.zeros(3), 0, B + + idx = np.argmin( + [ + _fit_eval( + guess_rrs[[fi], :], + B, + B2, + fwd_svd=fwd_svd, + fwd_data=None, + sensors=None, + whitener=None, + lwork=None, + ) + for fi, fwd_svd in enumerate(guess_data["fwd_svd"]) + ] + ) + x0 = guess_rrs[idx] + lwork = _svd_lwork((3, B.shape[0])) + fun = partial( + _fit_eval, + B=B, + B2=B2, + fwd_data=fwd_data, + whitener=whitener, + lwork=lwork, + sensors=sensors, + fwd_svd=None, + ) + + # Tested minimizers: + # Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC + # Several were similar, but COBYLA won for having a handy constraint + # function we can use to ensure we stay inside the inner skull / + # smallest sphere + rd_final = fmin_cobyla( + fun, x0, (constraint,), consargs=(), rhobeg=5e-2, rhoend=rhoend, disp=False + ) + + # simplex = _make_tetra_simplex() + x0 + # _simplex_minimize(simplex, 1e-4, 2e-4, fun) + # rd_final = simplex[0] + + # Compute the dipole moment at the final point + Q, gof, residual_noproj, n_comp = _fit_Q( + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + B=B, + B2=B2, + B_orig=B_orig, + rd=rd_final, + ori=ori, + ) + khi2 = (1 - gof) * B2 + nfree = rank - n_comp + amp = np.sqrt(np.dot(Q, Q)) + norm = 1.0 if amp == 0.0 else amp + ori = Q / norm + + conf = _fit_confidence( + sensors=sensors, rd=rd_final, Q=Q, ori=ori, whitener=whitener, fwd_data=fwd_data + ) + + msg = "---- Fitted : %7.1f ms" % (1000.0 * t) + if surf is not None: + dist_to_inner_skull = _compute_nearest( + surf["rr"], rd_final[np.newaxis, :], return_dists=True + )[1][0] + msg += ", distance to inner skull : %2.4f mm" % (dist_to_inner_skull * 1000.0) + + logger.info(msg) + return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +def _fit_dipole_fixed( + min_dist_to_inner_skull, + B_orig, + t, + guess_rrs, + guess_data, + *, + sensors, + fwd_data, + whitener, + fmin_cobyla, + ori, + rank, + rhoend, +): + """Fit a data using a fixed position.""" + B = np.dot(whitener, B_orig) + B2 = np.dot(B, B) + if B2 == 0: + warn(f"Zero field found for time {t}") + return np.zeros(3), 0, np.zeros(3), 0, np.zeros(6) + # Compute the dipole moment + Q, gof, residual_noproj = _fit_Q( + fwd_data=guess_data, + whitener=whitener, + B=B, + B2=B2, + B_orig=B_orig, + sensors=sensors, + rd=None, + ori=ori, + )[:3] + if ori is None: + amp = np.sqrt(np.dot(Q, Q)) + norm = 1.0 if amp == 0.0 else amp + ori = Q / norm + else: + amp = np.dot(Q, ori) + rd_final = guess_rrs[0] + # This will be slow, and we don't use it anyway, so omit it for now: + # conf = _fit_confidence(rd_final, Q, ori, whitener, fwd_data) + conf = khi2 = nfree = None + # No corresponding 'logger' message here because it should go *very* fast + return rd_final, amp, ori, gof, conf, khi2, nfree, residual_noproj + + +@verbose +def fit_dipole( + evoked, + cov, + bem, + trans=None, + min_dist=5.0, + n_jobs=None, + pos=None, + ori=None, + rank=None, + accuracy="normal", + tol=5e-5, + verbose=None, +): + """Fit a dipole. + + Parameters + ---------- + evoked : instance of Evoked + The dataset to fit. + cov : str | instance of Covariance + The noise covariance. + bem : path-like | instance of ConductorModel + The BEM filename (str) or conductor model. + trans : path-like | None + The head<->MRI transform filename. Must be provided unless BEM + is a sphere model. + min_dist : float + Minimum distance (in millimeters) from the dipole to the inner skull. + Must be positive. Note that because this is a constraint passed to + a solver it is not strict but close, i.e. for a ``min_dist=5.`` the + fits could be 4.9 mm from the inner skull. + %(n_jobs)s + It is used in field computation and fitting. + pos : ndarray, shape (3,) | None + Position of the dipole to use. If None (default), sequential + fitting (different position and orientation for each time instance) + is performed. If a position (in head coords) is given as an array, + the position is fixed during fitting. + + .. versionadded:: 0.12 + ori : ndarray, shape (3,) | None + Orientation of the dipole to use. If None (default), the + orientation is free to change as a function of time. If an + orientation (in head coordinates) is given as an array, ``pos`` + must also be provided, and the routine computes the amplitude and + goodness of fit of the dipole at the given position and orientation + for each time instant. + + .. versionadded:: 0.12 + %(rank_none)s + + .. versionadded:: 0.20 + accuracy : str + Can be ``"normal"`` (default) or ``"accurate"``, which gives the most + accurate coil definition but is typically not necessary for real-world + data. + + .. versionadded:: 0.24 + tol : float + Final accuracy of the optimization (see ``rhoend`` argument of + :func:`scipy.optimize.fmin_cobyla`). + + .. versionadded:: 0.24 + %(verbose)s + + Returns + ------- + dip : instance of Dipole or DipoleFixed + The dipole fits. A :class:`mne.DipoleFixed` is returned if + ``pos`` and ``ori`` are both not None, otherwise a + :class:`mne.Dipole` is returned. + residual : instance of Evoked + The M-EEG data channels with the fitted dipolar activity removed. + + See Also + -------- + mne.beamformer.rap_music + Dipole + DipoleFixed + read_dipole + + Notes + ----- + .. versionadded:: 0.9.0 + """ + # This could eventually be adapted to work with other inputs, these + # are what is needed: + + evoked = evoked.copy() + _validate_type(accuracy, str, "accuracy") + _check_option("accuracy", accuracy, ("accurate", "normal")) + + # Determine if a list of projectors has an average EEG ref + if _needs_eeg_average_ref_proj(evoked.info): + raise ValueError("EEG average reference is mandatory for dipole fitting.") + if min_dist < 0: + raise ValueError(f"min_dist should be positive. Got {min_dist}") + if ori is not None and pos is None: + raise ValueError("pos must be provided if ori is not None") + + data = evoked.data + if not np.isfinite(data).all(): + raise ValueError("Evoked data must be finite") + info = evoked.info + times = evoked.times.copy() + comment = evoked.comment + + # Convert the min_dist to meters + min_dist_to_inner_skull = min_dist / 1000.0 + del min_dist + + # Figure out our inputs + neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=[])) + if isinstance(bem, str): + bem_extra = bem + else: + bem_extra = repr(bem) + logger.info(f"BEM : {bem_extra}") + mri_head_t, trans = _get_trans(trans) + logger.info(f"MRI transform : {trans}") + safe_false = _verbose_safe_false() + bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=safe_false) + if not bem["is_sphere"]: + # Find the best-fitting sphere + inner_skull = _bem_find_surface(bem, "inner_skull") + inner_skull = inner_skull.copy() + R, r0 = _fit_sphere(inner_skull["rr"], disp=False) + # r0 back to head frame for logging + r0 = apply_trans(mri_head_t["trans"], r0[np.newaxis, :])[0] + inner_skull["r0"] = r0 + logger.info( + f"Head origin : {1000 * r0[0]:6.1f} {1000 * r0[1]:6.1f} " + f"{1000 * r0[2]:6.1f} mm rad = {1000 * R:6.1f} mm." + ) + del R, r0 + else: + r0 = bem["r0"] + if len(bem.get("layers", [])) > 0: + R = bem["layers"][0]["rad"] + kind = "rad" + else: # MEG-only + # Use the minimum distance to the MEG sensors as the radius then + R = np.dot( + np.linalg.inv(info["dev_head_t"]["trans"]), np.hstack([r0, [1.0]]) + )[:3] # r0 -> device + R = R - [ + info["chs"][pick]["loc"][:3] + for pick in pick_types(info, meg=True, exclude=[]) + ] + if len(R) == 0: + raise RuntimeError( + "No MEG channels found, but MEG-only sphere model used" + ) + R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors + kind = "max_rad" + logger.info( + f"Sphere model : origin at ({1000 * r0[0]: 7.2f} {1000 * r0[1]: 7.2f} " + f"{1000 * r0[2]: 7.2f}) mm, {kind} = {R:6.1f} mm" + ) + inner_skull = dict(R=R, r0=r0) # NB sphere model defined in head frame + del R, r0 + + # Deal with DipoleFixed cases here + if pos is not None: + fixed_position = True + pos = np.array(pos, float) + if pos.shape != (3,): + raise ValueError(f"pos must be None or a 3-element array-like, got {pos}") + logger.info( + "Fixed position : {:6.1f} {:6.1f} {:6.1f} mm".format(*tuple(1000 * pos)) + ) + if ori is not None: + ori = np.array(ori, float) + if ori.shape != (3,): + raise ValueError( + f"oris must be None or a 3-element array-like, got {ori}" + ) + norm = np.sqrt(np.sum(ori * ori)) + if not np.isclose(norm, 1): + raise ValueError(f"ori must be a unit vector, got length {norm}") + logger.info( + "Fixed orientation : {:6.4f} {:6.4f} {:6.4f} mm".format(*tuple(ori)) + ) + else: + logger.info("Free orientation : ") + fit_n_jobs = 1 # only use 1 job to do the guess fitting + else: + fixed_position = False + # Eventually these could be parameters, but they are just used for + # the initial grid anyway + guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf + guess_mindist = max(0.005, min_dist_to_inner_skull) + guess_exclude = 0.02 + + logger.info(f"Guess grid : {1000 * guess_grid:6.1f} mm") + if guess_mindist > 0.0: + logger.info(f"Guess mindist : {1000 * guess_mindist:6.1f} mm") + if guess_exclude > 0: + logger.info(f"Guess exclude : {1000 * guess_exclude:6.1f} mm") + logger.info(f"Using {accuracy} MEG coil definitions.") + fit_n_jobs = n_jobs + cov = _ensure_cov(cov) + logger.info("") + + _print_coord_trans(mri_head_t) + _print_coord_trans(info["dev_head_t"]) + logger.info(f"{len(info['bads'])} bad channels total") + + # Forward model setup (setup_forward_model from setup.c) + ch_types = evoked.get_channel_types() + + sensors = dict() + if "grad" in ch_types or "mag" in ch_types: + sensors["meg"] = _prep_meg_channels( + info, exclude="bads", accuracy=accuracy, verbose=verbose + ) + if "eeg" in ch_types: + sensors["eeg"] = _prep_eeg_channels(info, exclude="bads", verbose=verbose) + + # Ensure that MEG and/or EEG channels are present + if len(sensors) == 0: + raise RuntimeError("No MEG or EEG channels found.") + + # Whitener for the data + logger.info("Decomposing the sensor noise covariance matrix...") + picks = pick_types(info, meg=True, eeg=True, ref_meg=False) + + # In case we want to more closely match MNE-C for debugging: + # from ._fiff.pick import pick_info + # from .cov import prepare_noise_cov + # info_nb = pick_info(info, picks) + # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False) + # nzero = (cov['eig'] > 0) + # n_chan = len(info_nb['ch_names']) + # whitener = np.zeros((n_chan, n_chan), dtype=np.float64) + # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero]) + # whitener = np.dot(whitener, cov['eigvec']) + + whitener, _, rank = compute_whitener( + cov, info, picks=picks, rank=rank, return_rank=True + ) + + # Proceed to computing the fits (make_guess_data) + if fixed_position: + guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True])) + logger.info("Compute forward for dipole location...") + else: + logger.info("\n---- Computing the forward solution for the guesses...") + guess_src = _make_guesses( + inner_skull, guess_grid, guess_exclude, guess_mindist, n_jobs=n_jobs + )[0] + # grid coordinates go from mri to head frame + transform_surface_to(guess_src, "head", mri_head_t) + logger.info("Go through all guess source locations...") + + # inner_skull goes from mri to head frame + if "rr" in inner_skull: + transform_surface_to(inner_skull, "head", mri_head_t) + if fixed_position: + if "rr" in inner_skull: + check = _surface_constraint(pos, inner_skull, min_dist_to_inner_skull) + else: + check = _sphere_constraint( + pos, inner_skull["r0"], R_adj=inner_skull["R"] - min_dist_to_inner_skull + ) + if check <= 0: + raise ValueError( + f"fixed position is {-1000 * check:0.1f}mm outside the inner skull " + "boundary" + ) + + # C code computes guesses w/sphere model for speed, don't bother here + fwd_data = _prep_field_computation( + guess_src["rr"], sensors=sensors, bem=bem, n_jobs=n_jobs, verbose=safe_false + ) + fwd_data["inner_skull"] = inner_skull + guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards( + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + rr=guess_src["rr"], + n_jobs=fit_n_jobs, + ) + # decompose ahead of time + guess_fwd_svd = [ + _safe_svd(fwd, full_matrices=False) + for fwd in np.array_split(guess_fwd, len(guess_src["rr"])) + ] + guess_data = dict( + fwd=guess_fwd, + fwd_svd=guess_fwd_svd, + fwd_orig=guess_fwd_orig, + scales=guess_fwd_scales, + ) + del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed + logger.info("[done %d source%s]", guess_src["nuse"], _pl(guess_src["nuse"])) + + # Do actual fits + data = data[picks] + ch_names = [info["ch_names"][p] for p in picks] + proj_op = make_projector(info["projs"], ch_names, info["bads"])[0] + fun = _fit_dipole_fixed if fixed_position else _fit_dipole + out = _fit_dipoles( + fun, + min_dist_to_inner_skull, + data, + times, + guess_src["rr"], + guess_data, + sensors=sensors, + fwd_data=fwd_data, + whitener=whitener, + ori=ori, + n_jobs=n_jobs, + rank=rank, + rhoend=tol, + ) + assert len(out) == 8 + if fixed_position and ori is not None: + # DipoleFixed + data = np.array([out[1], out[3]]) + out_info = deepcopy(info) + loc = np.concatenate([pos, ori, np.zeros(6)]) + out_info._unlocked = True + out_info["chs"] = [ + dict( + ch_name="dip 01", + loc=loc, + kind=FIFF.FIFFV_DIPOLE_WAVE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + unit=FIFF.FIFF_UNIT_AM, + coil_type=FIFF.FIFFV_COIL_DIPOLE, + unit_mul=0, + range=1, + cal=1.0, + scanno=1, + logno=1, + ), + dict( + ch_name="goodness", + loc=np.full(12, np.nan), + kind=FIFF.FIFFV_GOODNESS_FIT, + unit=FIFF.FIFF_UNIT_AM, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + unit_mul=0, + range=1.0, + cal=1.0, + scanno=2, + logno=100, + ), + ] + for key in ["hpi_meas", "hpi_results", "projs"]: + out_info[key] = list() + for key in [ + "acq_pars", + "acq_stim", + "description", + "dig", + "experimenter", + "hpi_subsystem", + "proj_id", + "proj_name", + "subject_info", + ]: + out_info[key] = None + out_info._unlocked = False + out_info["bads"] = [] + out_info._update_redundant() + out_info._check_consistency() + dipoles = DipoleFixed( + out_info, data, times, evoked.nave, evoked._aspect_kind, comment=comment + ) + else: + dipoles = Dipole( + times, out[0], out[1], out[2], out[3], comment, out[4], out[5], out[6] + ) + residual = evoked.copy().apply_proj() # set the projs active + residual.data[picks] = np.dot(proj_op, out[-1]) + logger.info("%d time points fitted", len(dipoles.times)) + return dipoles, residual + + +# Every other row of Table 3 from OyamaEtAl2015 +_OYAMA = """ +0.00 56.29 -27.50 +32.50 56.29 5.00 +0.00 65.00 5.00 +-32.50 56.29 5.00 +0.00 56.29 37.50 +0.00 32.50 61.29 +-56.29 0.00 -27.50 +-56.29 32.50 5.00 +-65.00 0.00 5.00 +-56.29 -32.50 5.00 +-56.29 0.00 37.50 +-32.50 0.00 61.29 +0.00 -56.29 -27.50 +-32.50 -56.29 5.00 +0.00 -65.00 5.00 +32.50 -56.29 5.00 +0.00 -56.29 37.50 +0.00 -32.50 61.29 +56.29 0.00 -27.50 +56.29 -32.50 5.00 +65.00 0.00 5.00 +56.29 32.50 5.00 +56.29 0.00 37.50 +32.50 0.00 61.29 +0.00 0.00 70.00 +""" + + +def get_phantom_dipoles(kind="vectorview"): + """Get standard phantom dipole locations and orientations. + + Parameters + ---------- + kind : str + Get the information for the given system: + + ``vectorview`` (default) + The Neuromag VectorView phantom. + ``otaniemi`` + The older Neuromag phantom used at Otaniemi. + ``oyama`` + The phantom from :footcite:`OyamaEtAl2015`. + + .. versionchanged:: 1.6 + Support added for ``'oyama'``. + + Returns + ------- + pos : ndarray, shape (n_dipoles, 3) + The dipole positions. + ori : ndarray, shape (n_dipoles, 3) + The dipole orientations. + + See Also + -------- + mne.datasets.fetch_phantom + + Notes + ----- + The Elekta phantoms have a radius of 79.5mm, and HPI coil locations + in the XY-plane at the axis extrema (e.g., (79.5, 0), (0, -79.5), ...). + + References + ---------- + .. footbibliography:: + """ + _validate_type(kind, str, "kind") + _check_option("kind", kind, ["vectorview", "otaniemi", "oyama"]) + if kind == "vectorview": + # these values were pulled from a scanned image provided by + # Elekta folks + a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9]) + b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20.0, 19.3]) + x = np.concatenate((a, [0] * 8, -b, [0] * 8)) + y = np.concatenate(([0] * 8, -a, [0] * 8, b)) + c = [22.9, 23.5, 25.5, 23.1, 52.0, 46.4, 41.0, 33.0] + d = [44.4, 34.0, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9] + z = np.concatenate((c, c, d, d)) + signs = ([1, -1] * 4 + [-1, 1] * 4) * 2 + elif kind == "otaniemi": + # these values were pulled from an Neuromag manual + # (NM20456A, 13.7.1999, p.65) + a = np.array([56.3, 47.6, 39.0, 30.3]) + b = np.array([32.5, 27.5, 22.5, 17.5]) + c = np.zeros(4) + x = np.concatenate((a, b, c, c, -a, -b, c, c)) + y = np.concatenate((c, c, -a, -b, c, c, b, a)) + z = np.concatenate((b, a, b, a, b, a, a, b)) + signs = [-1] * 8 + [1] * 16 + [-1] * 8 + else: + assert kind == "oyama" + xyz = np.fromstring(_OYAMA.strip().replace("\n", " "), sep=" ").reshape(25, 3) + xyz = np.repeat(xyz, 2, axis=0) + x, y, z = xyz.T + signs = [1] * 50 + pos = np.vstack((x, y, z)).T / 1000.0 + # For Neuromag-style phantoms, + # Locs are always in XZ or YZ, and so are the oris. The oris are + # also in the same plane and tangential, so it's easy to determine + # the orientation. + # For Oyama, vectors are orthogonal to the position vector and oriented with one + # pointed toward the north pole (except for the topmost points, which are just xy). + ori = list() + for pi, this_pos in enumerate(pos): + this_ori = np.zeros(3) + idx = np.where(this_pos == 0)[0] + # assert len(idx) == 1 + if len(idx) == 0: # oyama + idx = [np.argmin(this_pos)] + idx = np.setdiff1d(np.arange(3), idx[0]) + this_ori[idx] = (this_pos[idx][::-1] / np.linalg.norm(this_pos[idx])) * [1, -1] + if kind == "oyama": + # Ensure it's orthogonal to the position vector + pos_unit = this_pos / np.linalg.norm(this_pos) + this_ori -= pos_unit * np.dot(this_ori, pos_unit) + this_ori /= np.linalg.norm(this_ori) + # This was empirically determined by looking at the dipole fits + if np.abs(this_ori[2]) >= 1e-6: # if it's not in the XY plane + this_ori *= -1 * np.sign(this_ori[2]) # point downward + elif np.abs(this_ori[0]) < 1e-6: # in the XY plane (at the north pole) + this_ori *= -1 * np.sign(this_ori[1]) # point backward + # Odd ones create a RH coordinate system with their ori + if pi % 2: + this_ori = np.cross(pos_unit, this_ori) + else: + this_ori *= signs[pi] + # Now we have this quality, which we could uncomment to + # double-check: + # np.testing.assert_allclose(np.dot(this_ori, this_pos) / + # np.linalg.norm(this_pos), 0, + # atol=1e-15) + ori.append(this_ori) + ori = np.array(ori) + return pos, ori + + +def _concatenate_dipoles(dipoles): + """Concatenate a list of dipoles.""" + times, pos, amplitude, ori, gof = [], [], [], [], [] + for dipole in dipoles: + times.append(dipole.times) + pos.append(dipole.pos) + amplitude.append(dipole.amplitude) + ori.append(dipole.ori) + gof.append(dipole.gof) + + return Dipole( + np.concatenate(times), + np.concatenate(pos), + np.concatenate(amplitude), + np.concatenate(ori), + np.concatenate(gof), + name=None, + ) diff --git a/mne/epochs.py b/mne/epochs.py new file mode 100644 index 0000000..04b1a28 --- /dev/null +++ b/mne/epochs.py @@ -0,0 +1,4990 @@ +"""Tools for working with epoched data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import json +import operator +import os.path as op +from collections import Counter +from copy import deepcopy +from functools import partial +from inspect import getfullargspec +from pathlib import Path + +import numpy as np +from scipy.interpolate import interp1d + +from ._fiff.constants import FIFF +from ._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + read_meas_info, + write_meas_info, +) +from ._fiff.open import _get_next_fname, fiff_open +from ._fiff.pick import ( + _DATA_CH_TYPES_SPLIT, + _pick_data_channels, + _picks_to_idx, + channel_indices_by_type, + channel_type, + pick_channels, + pick_info, +) +from ._fiff.proj import ProjMixin, setup_proj +from ._fiff.tag import _read_tag_header, read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.utils import _make_split_fnames +from ._fiff.write import ( + _NEXT_FILE_BUFFER, + INT32_MAX, + _get_split_size, + end_block, + start_and_end_file, + start_block, + write_complex_double_matrix, + write_complex_float_matrix, + write_double_matrix, + write_float, + write_float_matrix, + write_id, + write_int, + write_string, +) +from .annotations import ( + EpochAnnotationsMixin, + _read_annotations_fif, + _write_annotations, + events_from_annotations, +) +from .baseline import _check_baseline, _log_rescale, rescale +from .bem import _check_origin +from .channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin +from .event import _read_events_fif, make_fixed_length_events, match_event_names +from .evoked import EvokedArray +from .filter import FilterMixin, _check_fun, detrend +from .fixes import rng_uniform +from .html_templates import _get_html_template +from .parallel import parallel_func +from .time_frequency.spectrum import EpochsSpectrum, SpectrumMixin, _validate_method +from .time_frequency.tfr import AverageTFR, EpochsTFR +from .utils import ( + ExtendedTimeMixin, + GetEpochsMixin, + SizeMixin, + _build_data_frame, + _check_combine, + _check_event_id, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _ensure_events, + _gen_events, + _on_missing, + _path_like, + _pl, + _prepare_read_metadata, + _prepare_write_metadata, + _scale_dataframe_data, + _validate_type, + check_fname, + check_random_state, + copy_function_doc_to_method_doc, + logger, + object_size, + repr_html, + sizeof_fmt, + verbose, + warn, +) +from .utils.docs import fill_doc +from .viz import plot_drop_log, plot_epochs, plot_epochs_image, plot_topo_image_epochs + + +def _pack_reject_params(epochs): + reject_params = dict() + for key in ("reject", "flat", "reject_tmin", "reject_tmax"): + val = getattr(epochs, key, None) + if val is not None: + reject_params[key] = val + return reject_params + + +def _save_split(epochs, split_fnames, part_idx, n_parts, fmt, overwrite): + """Split epochs. + + Anything new added to this function also needs to be added to + BaseEpochs.save to account for new file sizes. + """ + # insert index in filename + this_fname = split_fnames[part_idx] + _check_fname(this_fname, overwrite=overwrite) + + next_fname, next_idx = None, None + if part_idx < n_parts - 1: + next_idx = part_idx + 1 + next_fname = split_fnames[next_idx] + + with start_and_end_file(this_fname) as fid: + _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx) + + +def _save_part(fid, epochs, fmt, n_parts, next_fname, next_idx): + info = epochs.info + meas_id = info["meas_id"] + + start_block(fid, FIFF.FIFFB_MEAS) + write_id(fid, FIFF.FIFF_BLOCK_ID) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) + + # Write measurement info + write_meas_info(fid, info) + + # One or more evoked data sets + start_block(fid, FIFF.FIFFB_PROCESSED_DATA) + start_block(fid, FIFF.FIFFB_MNE_EPOCHS) + + # write events out after getting data to ensure bad events are dropped + data = epochs.get_data(copy=False) + + _check_option("fmt", fmt, ["single", "double"]) + + if np.iscomplexobj(data): + if fmt == "single": + write_function = write_complex_float_matrix + elif fmt == "double": + write_function = write_complex_double_matrix + else: + if fmt == "single": + write_function = write_float_matrix + elif fmt == "double": + write_function = write_double_matrix + + # Epoch annotations are written if there are any + annotations = getattr(epochs, "annotations", []) + if annotations is not None and len(annotations): + _write_annotations(fid, annotations) + + # write Epoch event windows + start_block(fid, FIFF.FIFFB_MNE_EVENTS) + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T) + write_string(fid, FIFF.FIFF_DESCRIPTION, _event_id_string(epochs.event_id)) + end_block(fid, FIFF.FIFFB_MNE_EVENTS) + + # Metadata + if epochs.metadata is not None: + start_block(fid, FIFF.FIFFB_MNE_METADATA) + metadata = _prepare_write_metadata(epochs.metadata) + write_string(fid, FIFF.FIFF_DESCRIPTION, metadata) + end_block(fid, FIFF.FIFFB_MNE_METADATA) + + # First and last sample + first = int(round(epochs.tmin * info["sfreq"])) # round just to be safe + last = first + len(epochs.times) - 1 + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first) + write_int(fid, FIFF.FIFF_LAST_SAMPLE, last) + + # write raw original sampling rate + write_float(fid, FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ, epochs._raw_sfreq) + + # save baseline + if epochs.baseline is not None: + bmin, bmax = epochs.baseline + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) + + # The epochs itself + decal = np.empty(info["nchan"]) + for k in range(info["nchan"]): + decal[k] = 1.0 / (info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0)) + + data *= decal[np.newaxis, :, np.newaxis] + + write_function(fid, FIFF.FIFF_EPOCH, data) + + # undo modifications to data + data /= decal[np.newaxis, :, np.newaxis] + + write_string(fid, FIFF.FIFF_MNE_EPOCHS_DROP_LOG, json.dumps(epochs.drop_log)) + + reject_params = _pack_reject_params(epochs) + if reject_params: + write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT, json.dumps(reject_params)) + + write_int(fid, FIFF.FIFF_MNE_EPOCHS_SELECTION, epochs.selection) + + # And now write the next file info in case epochs are split on disk + if next_fname is not None and n_parts > 1: + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname)) + if meas_id is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx) + end_block(fid, FIFF.FIFFB_REF) + + end_block(fid, FIFF.FIFFB_MNE_EPOCHS) + end_block(fid, FIFF.FIFFB_PROCESSED_DATA) + end_block(fid, FIFF.FIFFB_MEAS) + + +def _event_id_string(event_id): + return ";".join([k + ":" + str(v) for k, v in event_id.items()]) + + +def _merge_events(events, event_id, selection): + """Merge repeated events.""" + event_id = event_id.copy() + new_events = events.copy() + event_idxs_to_delete = list() + unique_events, counts = np.unique(events[:, 0], return_counts=True) + for ev in unique_events[counts > 1]: + # indices at which the non-unique events happened + idxs = (events[:, 0] == ev).nonzero()[0] + + # Figure out new value for events[:, 1]. Set to 0, if mixed vals exist + unique_priors = np.unique(events[idxs, 1]) + new_prior = unique_priors[0] if len(unique_priors) == 1 else 0 + + # If duplicate time samples have same event val, "merge" == "drop" + # and no new event_id key will be created + ev_vals = np.unique(events[idxs, 2]) + if len(ev_vals) <= 1: + new_event_val = ev_vals[0] + + # Else, make a new event_id for the merged event + else: + # Find all event_id keys involved in duplicated events. These + # keys will be merged to become a new entry in "event_id" + event_id_keys = list(event_id.keys()) + event_id_vals = list(event_id.values()) + new_key_comps = [ + event_id_keys[event_id_vals.index(value)] for value in ev_vals + ] + + # Check if we already have an entry for merged keys of duplicate + # events ... if yes, reuse it + for key in event_id: + if set(key.split("/")) == set(new_key_comps): + new_event_val = event_id[key] + break + + # Else, find an unused value for the new key and make an entry into + # the event_id dict + else: + ev_vals = np.unique( + np.concatenate( + (list(event_id.values()), events[:, 1:].flatten()), axis=0 + ) + ) + if ev_vals[0] > 1: + new_event_val = 1 + else: + diffs = np.diff(ev_vals) + idx = np.where(diffs > 1)[0] + idx = -1 if len(idx) == 0 else idx[0] + new_event_val = ev_vals[idx] + 1 + + new_event_id_key = "/".join(sorted(new_key_comps)) + event_id[new_event_id_key] = int(new_event_val) + + # Replace duplicate event times with merged event and remember which + # duplicate indices to delete later + new_events[idxs[0], 1] = new_prior + new_events[idxs[0], 2] = new_event_val + event_idxs_to_delete.extend(idxs[1:]) + + # Delete duplicate event idxs + new_events = np.delete(new_events, event_idxs_to_delete, 0) + new_selection = np.delete(selection, event_idxs_to_delete, 0) + + return new_events, event_id, new_selection + + +def _handle_event_repeated(events, event_id, event_repeated, selection, drop_log): + """Handle repeated events. + + Note that drop_log will be modified inplace + """ + assert len(events) == len(selection) + selection = np.asarray(selection) + + unique_events, u_ev_idxs = np.unique(events[:, 0], return_index=True) + + # Return early if no duplicates + if len(unique_events) == len(events): + return events, event_id, selection, drop_log + + # Else, we have duplicates. Triage ... + _check_option("event_repeated", event_repeated, ["error", "drop", "merge"]) + drop_log = list(drop_log) + if event_repeated == "error": + raise RuntimeError( + "Event time samples were not unique. Consider " + 'setting the `event_repeated` parameter."' + ) + + elif event_repeated == "drop": + logger.info( + "Multiple event values for single event times found. " + "Keeping the first occurrence and dropping all others." + ) + new_events = events[u_ev_idxs] + new_selection = selection[u_ev_idxs] + drop_ev_idxs = np.setdiff1d(selection, new_selection) + for idx in drop_ev_idxs: + drop_log[idx] = drop_log[idx] + ("DROP DUPLICATE",) + selection = new_selection + elif event_repeated == "merge": + logger.info( + "Multiple event values for single event times found. " + "Creating new event value to reflect simultaneous events." + ) + new_events, event_id, new_selection = _merge_events(events, event_id, selection) + drop_ev_idxs = np.setdiff1d(selection, new_selection) + for idx in drop_ev_idxs: + drop_log[idx] = drop_log[idx] + ("MERGE DUPLICATE",) + selection = new_selection + drop_log = tuple(drop_log) + + # Remove obsolete kv-pairs from event_id after handling + keys = new_events[:, 1:].flatten() + event_id = {k: v for k, v in event_id.items() if v in keys} + + return new_events, event_id, selection, drop_log + + +@fill_doc +class BaseEpochs( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + FilterMixin, + ExtendedTimeMixin, + SizeMixin, + GetEpochsMixin, + EpochAnnotationsMixin, + SpectrumMixin, +): + """Abstract base class for `~mne.Epochs`-type classes. + + .. note:: + This class should not be instantiated directly via + ``mne.BaseEpochs(...)``. Instead, use one of the functions listed in + the See Also section below. + + Parameters + ---------- + %(info_not_none)s + data : ndarray | None + If ``None``, data will be read from the Raw object. If ndarray, must be + of shape (n_epochs, n_channels, n_times). + %(events_epochs)s + %(event_id)s + %(epochs_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(raw_epochs)s + %(picks_all)s + %(reject_epochs)s + %(flat)s + %(decim)s + %(epochs_reject_tmin_tmax)s + %(detrend_epochs)s + %(proj_epochs)s + %(on_missing_epochs)s + preload_at_end : bool + %(epochs_preload)s + %(selection)s + + .. versionadded:: 0.16 + %(drop_log)s + filename : Path | None + The filename (if the epochs are read from disk). + %(metadata_epochs)s + + .. versionadded:: 0.16 + %(event_repeated_epochs)s + %(raw_sfreq)s + annotations : instance of mne.Annotations | None + Annotations to set. + %(verbose)s + + See Also + -------- + Epochs + EpochsArray + make_fixed_length_epochs + + Notes + ----- + The ``BaseEpochs`` class is public to allow for stable type-checking in + user code (i.e., ``isinstance(my_epochs, BaseEpochs)``) but should not be + used as a constructor for Epochs objects (use instead :class:`mne.Epochs`). + """ + + @verbose + def __init__( + self, + info, + data, + events, + event_id=None, + tmin=-0.2, + tmax=0.5, + baseline=(None, 0), + raw=None, + picks=None, + reject=None, + flat=None, + decim=1, + reject_tmin=None, + reject_tmax=None, + detrend=None, + proj=True, + on_missing="raise", + preload_at_end=False, + selection=None, + drop_log=None, + filename=None, + metadata=None, + event_repeated="error", + *, + raw_sfreq=None, + annotations=None, + verbose=None, + ): + if events is not None: # RtEpochs can have events=None + events = _ensure_events(events) + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if len(events) == 0: + self._allow_empty = True + selection = None + else: + self._allow_empty = False + events_max = events.max() + if events_max > INT32_MAX: + raise ValueError( + f"events array values must not exceed {INT32_MAX}, " + f"got {events_max}" + ) + event_id = _check_event_id(event_id, events) + self.event_id = event_id + del event_id + + if events is not None: # RtEpochs can have events=None + for key, val in self.event_id.items(): + if val not in events[:, 2]: + msg = f"No matching events found for {key} (event id {val})" + _on_missing(on_missing, msg) + + # ensure metadata matches original events size + self.selection = np.arange(len(events)) + self.events = events + + # same as self.metadata = metadata, but suppress log in favor + # of logging below (after setting self.selection) + GetEpochsMixin.metadata.fset(self, metadata, verbose=False) + del events + + values = list(self.event_id.values()) + selected = np.where(np.isin(self.events[:, 2], values))[0] + if selection is None: + selection = selected + else: + selection = np.array(selection, int) + if selection.shape != (len(selected),): + raise ValueError( + f"selection must be shape {selected.shape} got shape " + f"{selection.shape}" + ) + self.selection = selection + if drop_log is None: + self.drop_log = tuple( + () if k in self.selection else ("IGNORED",) + for k in range(max(len(self.events), max(self.selection) + 1)) + ) + else: + self.drop_log = drop_log + + self.events = self.events[selected] + + ( + self.events, + self.event_id, + self.selection, + self.drop_log, + ) = _handle_event_repeated( + self.events, + self.event_id, + event_repeated, + self.selection, + self.drop_log, + ) + + # then subselect + sub = np.where(np.isin(selection, self.selection))[0] + if isinstance(metadata, list): + metadata = [metadata[s] for s in sub] + elif metadata is not None: + metadata = metadata.iloc[sub] + + # Remove temporarily set metadata from above, and set + # again to get the correct log ("adding metadata", instead of + # "replacing existing metadata") + GetEpochsMixin.metadata.fset(self, None, verbose=False) + self.metadata = metadata + del metadata + + n_events = len(self.events) + if n_events > 1: + if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0: + warn( + "The events passed to the Epochs constructor are not " + "chronologically ordered.", + RuntimeWarning, + ) + + if n_events > 0: + logger.info(f"{n_events} matching events found") + else: + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if not self._allow_empty: + raise ValueError("No desired events found.") + else: + self.drop_log = tuple() + self.selection = np.array([], int) + self.metadata = metadata + # do not set self.events here, let subclass do it + + if (detrend not in [None, 0, 1]) or isinstance(detrend, bool): + raise ValueError("detrend must be None, 0, or 1") + self.detrend = detrend + + self._raw = raw + info._check_consistency() + self.picks = _picks_to_idx( + info, picks, none="all", exclude=(), allow_empty=False + ) + self.info = pick_info(info, self.picks) + del info + self._current = 0 + + if data is None: + self.preload = False + self._data = None + self._do_baseline = True + else: + assert decim == 1 + if ( + data.ndim != 3 + or data.shape[2] != round((tmax - tmin) * self.info["sfreq"]) + 1 + ): + raise RuntimeError("bad data shape") + if data.shape[0] != len(self.events): + raise ValueError( + "The number of epochs and the number of events must match" + ) + self.preload = True + self._data = data + self._do_baseline = False + self._offset = None + + if tmin > tmax: + raise ValueError("tmin has to be less than or equal to tmax") + + # Handle times + sfreq = float(self.info["sfreq"]) + start_idx = int(round(tmin * sfreq)) + self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq + self._set_times(self._raw_times) + + # check reject_tmin and reject_tmax + if reject_tmin is not None: + if np.isclose(reject_tmin, tmin): + # adjust for potential small deviations due to sampling freq + reject_tmin = self.tmin + elif reject_tmin < tmin: + raise ValueError( + f"reject_tmin needs to be None or >= tmin (got {reject_tmin})" + ) + + if reject_tmax is not None: + if np.isclose(reject_tmax, tmax): + # adjust for potential small deviations due to sampling freq + reject_tmax = self.tmax + elif reject_tmax > tmax: + raise ValueError( + f"reject_tmax needs to be None or <= tmax (got {reject_tmax})" + ) + + if (reject_tmin is not None) and (reject_tmax is not None): + if reject_tmin >= reject_tmax: + raise ValueError( + f"reject_tmin ({reject_tmin}) needs to be " + f" < reject_tmax ({reject_tmax})" + ) + + self.reject_tmin = reject_tmin + self.reject_tmax = reject_tmax + + # decimation + self._decim = 1 + self.decimate(decim) + + # baseline correction: replace `None` tuple elements with actual times + self.baseline = _check_baseline( + baseline, times=self.times, sfreq=self.info["sfreq"] + ) + if self.baseline is not None and self.baseline != baseline: + logger.info( + f"Setting baseline interval to " + f"[{self.baseline[0]}, {self.baseline[1]}] s" + ) + + logger.info(_log_rescale(self.baseline)) + + # setup epoch rejection + self.reject = None + self.flat = None + self._reject_setup(reject, flat) + + # do the rest + valid_proj = [True, "delayed", False] + if proj not in valid_proj: + raise ValueError(f'"proj" must be one of {valid_proj}, not {proj}') + if proj == "delayed": + self._do_delayed_proj = True + logger.info("Entering delayed SSP mode.") + else: + self._do_delayed_proj = False + activate = False if self._do_delayed_proj else proj + self._projector, self.info = setup_proj(self.info, False, activate=activate) + if preload_at_end: + assert self._data is None + assert self.preload is False + self.load_data() # this will do the projection + elif proj is True and self._projector is not None and data is not None: + # let's make sure we project if data was provided and proj + # requested + # we could do this with np.einsum, but iteration should be + # more memory safe in most instances + for ii, epoch in enumerate(self._data): + self._data[ii] = np.dot(self._projector, epoch) + self.filename = filename if filename is not None else filename + if raw_sfreq is None: + raw_sfreq = self.info["sfreq"] + self._raw_sfreq = raw_sfreq + self._check_consistency() + self.set_annotations(annotations, on_missing="ignore") + + def _check_consistency(self): + """Check invariants of epochs object.""" + if hasattr(self, "events"): + assert len(self.selection) == len(self.events) + assert len(self.drop_log) >= len(self.events) + assert len(self.selection) == sum(len(dl) == 0 for dl in self.drop_log) + assert hasattr(self, "_times_readonly") + assert not self.times.flags["WRITEABLE"] + assert isinstance(self.drop_log, tuple) + assert all(isinstance(log, tuple) for log in self.drop_log) + assert all(isinstance(s, str) for log in self.drop_log for s in log) + + def reset_drop_log_selection(self): + """Reset the drop_log and selection entries. + + This method will simplify ``self.drop_log`` and ``self.selection`` + so that they are meaningless (tuple of empty tuples and increasing + integers, respectively). This can be useful when concatenating + many Epochs instances, as ``drop_log`` can accumulate many entries + which can become problematic when saving. + """ + self.selection = np.arange(len(self.events)) + self.drop_log = (tuple(),) * len(self.events) + self._check_consistency() + + def load_data(self): + """Load the data if not already preloaded. + + Returns + ------- + epochs : instance of Epochs + The epochs object. + + Notes + ----- + This function operates in-place. + + .. versionadded:: 0.10.0 + """ + if self.preload: + return self + self._data = self._get_data() + self.preload = True + self._do_baseline = False + self._decim_slice = slice(None, None, None) + self._decim = 1 + self._raw_times = self.times + assert self._data.shape[-1] == len(self.times) + self._raw = None # shouldn't need it anymore + return self + + @verbose + def apply_baseline(self, baseline=(None, 0), *, verbose=None): + """Baseline correct epochs. + + Parameters + ---------- + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The baseline-corrected Epochs object. + + Notes + ----- + Baseline correction can be done multiple times, but can never be + reverted once the data has been loaded. + + .. versionadded:: 0.10.0 + """ + baseline = _check_baseline(baseline, times=self.times, sfreq=self.info["sfreq"]) + + if self.preload: + if self.baseline is not None and baseline is None: + raise RuntimeError( + "You cannot remove baseline correction " + "from preloaded data once it has been " + "applied." + ) + self._do_baseline = True + picks = self._detrend_picks + rescale(self._data, self.times, baseline, copy=False, picks=picks) + self._do_baseline = False + else: # logging happens in "rescale" in "if" branch + logger.info(_log_rescale(baseline)) + # For EpochsArray and Epochs, this is already True: + # assert self._do_baseline is True + # ... but for EpochsFIF it's not, so let's set it explicitly + self._do_baseline = True + self.baseline = baseline + return self + + def _reject_setup(self, reject, flat, *, allow_callable=False): + """Set self._reject_time and self._channel_type_idx.""" + idx = channel_indices_by_type(self.info) + reject = deepcopy(reject) if reject is not None else dict() + flat = deepcopy(flat) if flat is not None else dict() + for rej, kind in zip((reject, flat), ("reject", "flat")): + _validate_type(rej, dict, kind) + bads = set(rej.keys()) - set(idx.keys()) + if len(bads) > 0: + raise KeyError(f"Unknown channel types found in {kind}: {bads}") + + for key in idx.keys(): + # don't throw an error if rejection/flat would do nothing + if len(idx[key]) == 0 and ( + np.isfinite(reject.get(key, np.inf)) or flat.get(key, -1) >= 0 + ): + # This is where we could eventually add e.g. + # self.allow_missing_reject_keys check to allow users to + # provide keys that don't exist in data + raise ValueError( + f"No {key.upper()} channel found. Cannot reject based on " + f"{key.upper()}." + ) + + # check for invalid values + for rej, kind in zip((reject, flat), ("Rejection", "Flat")): + for key, val in rej.items(): + name = f"{kind} dict value for {key}" + if callable(val) and allow_callable: + continue + extra_str = "" + if allow_callable: + extra_str = "or callable" + _validate_type(val, "numeric", name, extra=extra_str) + if val is None or val < 0: + raise ValueError( + f"If using numerical {name} criteria, the value " + f"must be >= 0, not {repr(val)}" + ) + + # now check to see if our rejection and flat are getting more + # restrictive + old_reject = self.reject if self.reject is not None else dict() + old_flat = self.flat if self.flat is not None else dict() + bad_msg = ( + '{kind}["{key}"] == {new} {op} {old} (old value), new ' + "{kind} values must be at least as stringent as " + "previous ones" + ) + + # copy thresholds for channel types that were used previously, but not + # passed this time + for key in set(old_reject) - set(reject): + reject[key] = old_reject[key] + # make sure new thresholds are at least as stringent as the old ones + for key in reject: + # Skip this check if old_reject and reject are callables + if callable(reject[key]) and allow_callable: + continue + if key in old_reject and reject[key] > old_reject[key]: + raise ValueError( + bad_msg.format( + kind="reject", + key=key, + new=reject[key], + old=old_reject[key], + op=">", + ) + ) + + # same for flat thresholds + for key in set(old_flat) - set(flat): + flat[key] = old_flat[key] + for key in flat: + if callable(flat[key]) and allow_callable: + continue + if key in old_flat and flat[key] < old_flat[key]: + raise ValueError( + bad_msg.format( + kind="flat", key=key, new=flat[key], old=old_flat[key], op="<" + ) + ) + + # after validation, set parameters + self._bad_dropped = False + self._channel_type_idx = idx + self.reject = reject if len(reject) > 0 else None + self.flat = flat if len(flat) > 0 else None + + if (self.reject_tmin is None) and (self.reject_tmax is None): + self._reject_time = None + else: + if self.reject_tmin is None: + reject_imin = None + else: + idxs = np.nonzero(self.times >= self.reject_tmin)[0] + reject_imin = idxs[0] + if self.reject_tmax is None: + reject_imax = None + else: + idxs = np.nonzero(self.times <= self.reject_tmax)[0] + reject_imax = idxs[-1] + self._reject_time = slice(reject_imin, reject_imax) + + @verbose # verbose is used by mne-realtime + def _is_good_epoch(self, data, verbose=None): + """Determine if epoch is good.""" + if isinstance(data, str): + return False, (data,) + if data is None: + return False, ("NO_DATA",) + n_times = len(self.times) + if data.shape[1] < n_times: + # epoch is too short ie at the end of the data + return False, ("TOO_SHORT",) + if self.reject is None and self.flat is None: + return True, None + else: + if self._reject_time is not None: + data = data[:, self._reject_time] + + return _is_good( + data, + self.ch_names, + self._channel_type_idx, + self.reject, + self.flat, + full_report=True, + ignore_chs=self.info["bads"], + ) + + @verbose + def _detrend_offset_decim(self, epoch, picks, verbose=None): + """Aux Function: detrend, baseline correct, offset, decim. + + Note: operates inplace + """ + if (epoch is None) or isinstance(epoch, str): + return epoch + + # Detrend + if self.detrend is not None: + # We explicitly detrend just data channels (not EMG, ECG, EOG which + # are processed by baseline correction) + use_picks = _pick_data_channels(self.info, exclude=()) + epoch[use_picks] = detrend(epoch[use_picks], self.detrend, axis=1) + + # Baseline correct + if self._do_baseline: + rescale( + epoch, + self._raw_times, + self.baseline, + picks=picks, + copy=False, + verbose=False, + ) + + # Decimate if necessary (i.e., epoch not preloaded) + epoch = epoch[:, self._decim_slice] + + # handle offset + if self._offset is not None: + epoch += self._offset + + return epoch + + def iter_evoked(self, copy=False): + """Iterate over epochs as a sequence of Evoked objects. + + The Evoked objects yielded will each contain a single epoch (i.e., no + averaging is performed). + + This method resets the object iteration state to the first epoch. + + Parameters + ---------- + copy : bool + If False copies of data and measurement info will be omitted + to save time. + """ + self.__iter__() + + while True: + try: + out = self.__next__(True) + except StopIteration: + break + data, event_id = out + tmin = self.times[0] + info = self.info + if copy: + info = deepcopy(self.info) + data = data.copy() + + yield EvokedArray(data, info, tmin, comment=str(event_id)) + + def subtract_evoked(self, evoked=None): + """Subtract an evoked response from each epoch. + + Can be used to exclude the evoked response when analyzing induced + activity, see e.g. [1]_. + + Parameters + ---------- + evoked : instance of Evoked | None + The evoked response to subtract. If None, the evoked response + is computed from Epochs itself. + + Returns + ------- + self : instance of Epochs + The modified instance (instance is also modified inplace). + + References + ---------- + .. [1] David et al. "Mechanisms of evoked and induced responses in + MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006. + """ + logger.info("Subtracting Evoked from Epochs") + if evoked is None: + picks = _pick_data_channels(self.info, exclude=[]) + evoked = self.average(picks) + + # find the indices of the channels to use + picks = pick_channels(evoked.ch_names, include=self.ch_names, ordered=False) + + # make sure the omitted channels are not data channels + if len(picks) < len(self.ch_names): + sel_ch = [evoked.ch_names[ii] for ii in picks] + diff_ch = list(set(self.ch_names).difference(sel_ch)) + diff_idx = [self.ch_names.index(ch) for ch in diff_ch] + diff_types = [channel_type(self.info, idx) for idx in diff_idx] + bad_idx = [ + diff_types.index(t) for t in diff_types if t in _DATA_CH_TYPES_SPLIT + ] + if len(bad_idx) > 0: + bad_str = ", ".join([diff_ch[ii] for ii in bad_idx]) + raise ValueError( + "The following data channels are missing " + f"in the evoked response: {bad_str}" + ) + logger.info( + " The following channels are not included in the subtraction: " + + ", ".join(diff_ch) + ) + + # make sure the times match + if ( + len(self.times) != len(evoked.times) + or np.max(np.abs(self.times - evoked.times)) >= 1e-7 + ): + raise ValueError( + "Epochs and Evoked object do not contain the same time points." + ) + + # handle SSPs + if not self.proj and evoked.proj: + warn("Evoked has SSP applied while Epochs has not.") + if self.proj and not evoked.proj: + evoked = evoked.copy().apply_proj() + + # find the indices of the channels to use in Epochs + ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks] + + # do the subtraction + if self.preload: + self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :] + else: + if self._offset is None: + self._offset = np.zeros( + (len(self.ch_names), len(self.times)), dtype=np.float64 + ) + self._offset[ep_picks] -= evoked.data[picks] + logger.info("[done]") + + return self + + @fill_doc + def average(self, picks=None, method="mean", by_event_type=False): + """Compute an average over epochs. + + Parameters + ---------- + %(picks_all_data)s + method : str | callable + How to combine the data. If "mean"/"median", the mean/median + are returned. + Otherwise, must be a callable which, when passed an array of shape + (n_epochs, n_channels, n_time) returns an array of shape + (n_channels, n_time). + Note that due to file type limitations, the kind for all + these will be "average". + %(by_event_type)s + + Returns + ------- + %(evoked_by_event_type_returns)s + + Notes + ----- + Computes an average of all epochs in the instance, even if + they correspond to different conditions. To average by condition, + do ``epochs[condition].average()`` for each condition separately. + + When picks is None and epochs contain only ICA channels, no channels + are selected, resulting in an error. This is because ICA channels + are not considered data channels (they are of misc type) and only data + channels are selected when picks is None. + + The ``method`` parameter allows e.g. robust averaging. + For example, one could do: + + >>> from scipy.stats import trim_mean # doctest:+SKIP + >>> trim = lambda x: trim_mean(x, 0.1, axis=0) # doctest:+SKIP + >>> epochs.average(method=trim) # doctest:+SKIP + + This would compute the trimmed mean. + """ + self._handle_empty("raise", "average") + if by_event_type: + evokeds = list() + for event_type in self.event_id.keys(): + ev = self[event_type]._compute_aggregate(picks=picks, mode=method) + ev.comment = event_type + evokeds.append(ev) + else: + evokeds = self._compute_aggregate(picks=picks, mode=method) + return evokeds + + @fill_doc + def standard_error(self, picks=None, by_event_type=False): + """Compute standard error over epochs. + + Parameters + ---------- + %(picks_all_data)s + %(by_event_type)s + + Returns + ------- + %(std_err_by_event_type_returns)s + """ + return self.average(picks=picks, method="std", by_event_type=by_event_type) + + def _compute_aggregate(self, picks, mode="mean"): + """Compute the mean, median, or std over epochs and return Evoked.""" + # if instance contains ICA channels they won't be included unless picks + # is specified + if picks is None: + check_ICA = [x.startswith("ICA") for x in self.ch_names] + if np.all(check_ICA): + raise TypeError( + "picks must be specified (i.e. not None) for ICA channel data" + ) + elif np.any(check_ICA): + warn( + "ICA channels will not be included unless explicitly " + "selected in picks" + ) + + n_channels = len(self.ch_names) + n_times = len(self.times) + + if self.preload: + n_events = len(self.events) + fun = _check_combine(mode, valid=("mean", "median", "std")) + data = fun(self._data) + assert len(self.events) == len(self._data) + if data.shape != self._data.shape[1:]: + raise RuntimeError( + f"You passed a function that resulted n data of shape " + f"{data.shape}, but it should be {self._data.shape[1:]}." + ) + else: + if mode not in {"mean", "std"}: + raise ValueError( + "If data are not preloaded, can only compute " + "mean or standard deviation." + ) + data = np.zeros((n_channels, n_times)) + n_events = 0 + for e in self: + if np.iscomplexobj(e): + data = data.astype(np.complex128) + data += e + n_events += 1 + + if n_events > 0: + data /= n_events + else: + data.fill(np.nan) + + # convert to stderr if requested, could do in one pass but do in + # two (slower) in case there are large numbers + if mode == "std": + data_mean = data.copy() + data.fill(0.0) + for e in self: + data += (e - data_mean) ** 2 + data = np.sqrt(data / n_events) + + if mode == "std": + kind = "standard_error" + data /= np.sqrt(n_events) + else: + kind = "average" + + return self._evoked_from_epoch_data( + data, self.info, picks, n_events, kind, self._name + ) + + @property + def _name(self): + """Give a nice string representation based on event ids.""" + return self._get_name() + + def _get_name(self, count="frac", ms="×", sep="+"): + """Generate human-readable name for epochs and evokeds from event_id. + + Parameters + ---------- + count : 'frac' | 'total' + Whether to include the fraction or total number of epochs that each + event type contributes to the number of all epochs. + Ignored if only one event type is present. + ms : str | None + The multiplication sign to use. Pass ``None`` to omit the sign. + Ignored if only one event type is present. + sep : str + How to separate the different events names. Ignored if only one + event type is present. + """ + _check_option("count", value=count, allowed_values=["frac", "total"]) + + if len(self.event_id) == 1: + comment = next(iter(self.event_id.keys())) + else: + counter = Counter(self.events[:, 2]) + comments = list() + + # Take care of padding + if ms is None: + ms = " " + else: + ms = f" {ms} " + + for event_name, event_code in self.event_id.items(): + if count == "frac": + frac = float(counter[event_code]) / len(self.events) + comment = f"{frac:.2f}{ms}{event_name}" + else: # 'total' + comment = f"{counter[event_code]}{ms}{event_name}" + comments.append(comment) + + comment = f" {sep} ".join(comments) + return comment + + def _evoked_from_epoch_data(self, data, info, picks, n_events, kind, comment): + """Create an evoked object from epoch data.""" + info = deepcopy(info) + # don't apply baseline correction; we'll set evoked.baseline manually + evoked = EvokedArray( + data, + info, + tmin=self.times[0], + comment=comment, + nave=n_events, + kind=kind, + baseline=None, + ) + evoked.baseline = self.baseline + + # the above constructor doesn't recreate the times object precisely + # due to numerical precision issues + evoked._set_times(self.times.copy()) + + # pick channels + picks = _picks_to_idx(self.info, picks, "data_or_ica", ()) + ch_names = [evoked.ch_names[p] for p in picks] + evoked.pick(ch_names) + + if len(evoked.info["ch_names"]) == 0: + raise ValueError("No data channel found when averaging.") + + if evoked.nave < 1: + warn("evoked object is empty (based on less than 1 epoch)") + + return evoked + + @property + def ch_names(self): + """Channel names.""" + return self.info["ch_names"] + + @copy_function_doc_to_method_doc(plot_epochs) + def plot( + self, + picks=None, + scalings=None, + n_epochs=20, + n_channels=20, + title=None, + events=False, + event_color=None, + order=None, + show=True, + block=False, + decim="auto", + noise_cov=None, + butterfly=False, + show_scrollbars=True, + show_scalebars=True, + epoch_colors=None, + event_id=None, + group_by="type", + precompute=None, + use_opengl=None, + *, + theme=None, + overview_mode=None, + splash=True, + ): + return plot_epochs( + self, + picks=picks, + scalings=scalings, + n_epochs=n_epochs, + n_channels=n_channels, + title=title, + events=events, + event_color=event_color, + order=order, + show=show, + block=block, + decim=decim, + noise_cov=noise_cov, + butterfly=butterfly, + show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, + epoch_colors=epoch_colors, + event_id=event_id, + group_by=group_by, + precompute=precompute, + use_opengl=use_opengl, + theme=theme, + overview_mode=overview_mode, + splash=splash, + ) + + @copy_function_doc_to_method_doc(plot_topo_image_epochs) + def plot_topo_image( + self, + layout=None, + sigma=0.0, + vmin=None, + vmax=None, + colorbar=None, + order=None, + cmap="RdBu_r", + layout_scale=0.95, + title=None, + scalings=None, + border="none", + fig_facecolor="k", + fig_background=None, + font_color="w", + show=True, + ): + return plot_topo_image_epochs( + self, + layout=layout, + sigma=sigma, + vmin=vmin, + vmax=vmax, + colorbar=colorbar, + order=order, + cmap=cmap, + layout_scale=layout_scale, + title=title, + scalings=scalings, + border=border, + fig_facecolor=fig_facecolor, + fig_background=fig_background, + font_color=font_color, + show=show, + ) + + @verbose + def drop_bad(self, reject="existing", flat="existing", verbose=None): + """Drop bad epochs without retaining the epochs data. + + Should be used before slicing operations. + + .. warning:: This operation is slow since all epochs have to be read + from disk. To avoid reading epochs from disk multiple + times, use :meth:`mne.Epochs.load_data()`. + + .. note:: To constrain the time period used for estimation of signal + quality, set ``epochs.reject_tmin`` and + ``epochs.reject_tmax``, respectively. + + Parameters + ---------- + %(reject_drop_bad)s + %(flat_drop_bad)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs with bad epochs dropped. Operates in-place. + + Notes + ----- + Dropping bad epochs can be done multiple times with different + ``reject`` and ``flat`` parameters. However, once an epoch is + dropped, it is dropped forever, so if more lenient thresholds may + subsequently be applied, :meth:`epochs.copy ` should be + used. + """ + if reject == "existing": + if flat == "existing" and self._bad_dropped: + return + reject = self.reject + if flat == "existing": + flat = self.flat + if any(isinstance(rej, str) and rej != "existing" for rej in (reject, flat)): + raise ValueError('reject and flat, if strings, must be "existing"') + self._reject_setup(reject, flat, allow_callable=True) + self._get_data(out=False, verbose=verbose) + return self + + def drop_log_stats(self, ignore=("IGNORED",)): + """Compute the channel stats based on a drop_log from Epochs. + + Parameters + ---------- + ignore : list + The drop reasons to ignore. + + Returns + ------- + perc : float + Total percentage of epochs dropped. + + See Also + -------- + plot_drop_log + """ + return _drop_log_stats(self.drop_log, ignore) + + @copy_function_doc_to_method_doc(plot_drop_log) + def plot_drop_log( + self, + threshold=0, + n_max_plot=20, + subject=None, + color=(0.9, 0.9, 0.9), + width=0.8, + ignore=("IGNORED",), + show=True, + ): + if not self._bad_dropped: + raise ValueError( + "You cannot use plot_drop_log since bad " + "epochs have not yet been dropped. " + "Use epochs.drop_bad()." + ) + return plot_drop_log( + self.drop_log, + threshold, + n_max_plot, + subject, + color=color, + width=width, + ignore=ignore, + show=show, + ) + + @copy_function_doc_to_method_doc(plot_epochs_image) + def plot_image( + self, + picks=None, + sigma=0.0, + vmin=None, + vmax=None, + colorbar=True, + order=None, + show=True, + units=None, + scalings=None, + cmap=None, + fig=None, + axes=None, + overlay_times=None, + combine=None, + group_by=None, + evoked=True, + ts_args=None, + title=None, + clear=False, + ): + return plot_epochs_image( + self, + picks=picks, + sigma=sigma, + vmin=vmin, + vmax=vmax, + colorbar=colorbar, + order=order, + show=show, + units=units, + scalings=scalings, + cmap=cmap, + fig=fig, + axes=axes, + overlay_times=overlay_times, + combine=combine, + group_by=group_by, + evoked=evoked, + ts_args=ts_args, + title=title, + clear=clear, + ) + + @verbose + def drop(self, indices, reason="USER", verbose=None): + """Drop epochs based on indices or boolean mask. + + .. note:: The indices refer to the current set of undropped epochs + rather than the complete set of dropped and undropped epochs. + They are therefore not necessarily consistent with any + external indices (e.g., behavioral logs). To drop epochs + based on external criteria, do not use the ``preload=True`` + flag when constructing an Epochs object, and call this + method before calling the :meth:`mne.Epochs.drop_bad` or + :meth:`mne.Epochs.load_data` methods. + + Parameters + ---------- + indices : array of int or bool + Set epochs to remove by specifying indices to remove or a boolean + mask to apply (where True values get removed). Events are + correspondingly modified. + reason : list | tuple | str + Reason(s) for dropping the epochs ('ECG', 'timeout', 'blink' etc). + Reason(s) are applied to all indices specified. + Default: 'USER'. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs with indices dropped. Operates in-place. + """ + indices = np.atleast_1d(indices) + + if indices.ndim > 1: + raise TypeError("indices must be a scalar or a 1-d array") + # Check if indices and reasons are of the same length + # if using collection to drop epochs + + if indices.dtype == np.dtype(bool): + indices = np.where(indices)[0] + try_idx = np.where(indices < 0, indices + len(self.events), indices) + + out_of_bounds = (try_idx < 0) | (try_idx >= len(self.events)) + if out_of_bounds.any(): + first = indices[out_of_bounds][0] + raise IndexError(f"Epoch index {first} is out of bounds") + keep = np.setdiff1d(np.arange(len(self.events)), try_idx) + self._getitem(keep, reason, copy=False, drop_event_id=False) + count = len(try_idx) + logger.info( + "Dropped %d epoch%s: %s", + count, + _pl(count), + ", ".join(map(str, np.sort(try_idx))), + ) + + return self + + def _get_epoch_from_raw(self, idx, verbose=None): + """Get a given epoch from disk.""" + raise NotImplementedError + + def _project_epoch(self, epoch): + """Process a raw epoch based on the delayed param.""" + # whenever requested, the first epoch is being projected. + if (epoch is None) or isinstance(epoch, str): + # can happen if t < 0 or reject based on annotations + return epoch + proj = self._do_delayed_proj or self.proj + if self._projector is not None and proj is True: + epoch = np.dot(self._projector, epoch) + return epoch + + def _handle_empty(self, on_empty, meth): + if len(self.events) == 0: + msg = ( + f"epochs.{meth}() can't run because this Epochs-object is empty. " + f"You might want to check Epochs.drop_log or Epochs.plot_drop_log()" + f" to see why epochs were dropped." + ) + _on_missing(on_empty, msg, error_klass=RuntimeError) + + @verbose + def _get_data( + self, + out=True, + picks=None, + item=None, + *, + units=None, + tmin=None, + tmax=None, + copy=False, + on_empty="warn", + verbose=None, + ): + """Load all data, dropping bad epochs along the way. + + Parameters + ---------- + out : bool + Return the data. Setting this to False is used to reject bad + epochs without caching all the data, which saves memory. + %(picks_all)s + item : slice | array-like | str | list | None + See docstring of get_data method. + %(units)s + tmin : int | float | None + Start time of data to get in seconds. + tmax : int | float | None + End time of data to get in seconds. + %(verbose)s + """ + from .io.base import _get_ch_factors + + if copy is not None: + _validate_type(copy, bool, "copy") + + # Handle empty epochs + self._handle_empty(on_empty, "_get_data") + # if called with 'out=False', the call came from 'drop_bad()' + # if no reasons to drop, just declare epochs as good and return + if not out: + # make sure first and last epoch not out of bounds of raw + in_bounds = self.preload or ( + self._get_epoch_from_raw(idx=0) is not None + and self._get_epoch_from_raw(idx=-1) is not None + ) + # might be BaseEpochs or Epochs, only the latter has the attribute + reject_by_annotation = getattr(self, "reject_by_annotation", False) + if ( + self.reject is None + and self.flat is None + and in_bounds + and self._reject_time is None + and not reject_by_annotation + ): + logger.debug("_get_data is a noop, returning") + self._bad_dropped = True + return None + start, stop = self._handle_tmin_tmax(tmin, tmax) + + if item is None: + item = slice(None) + elif not self._bad_dropped: + raise ValueError( + "item must be None in epochs.get_data() unless bads have been " + "dropped. Consider using epochs.drop_bad()." + ) + select = self._item_to_select(item) # indices or slice + use_idx = np.arange(len(self.events))[select] + n_events = len(use_idx) + # in case there are no good events + if self.preload: + # we will store our result in our existing array + data = self._data + else: + # we start out with an empty array, allocate only if necessary + data = np.empty((0, len(self.info["ch_names"]), len(self.times))) + msg = ( + f"for {n_events} events and {len(self._raw_times)} " + "original time points" + ) + if self._decim > 1: + msg += " (prior to decimation)" + if getattr(self._raw, "preload", False): + logger.info(f"Using data from preloaded Raw {msg} ...") + else: + logger.info(f"Loading data {msg} ...") + + orig_picks = picks + if orig_picks is None: + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + else: + picks = _picks_to_idx(self.info, picks) + + # handle units param only if we are going to return data (out==True) + if (units is not None) and out: + ch_factors = _get_ch_factors(self, units, picks) + else: + ch_factors = None + + if self._bad_dropped: + if not out: + return + if self.preload: + return self._data_sel_copy_scale( + data, + select=select, + orig_picks=orig_picks, + picks=picks, + ch_factors=ch_factors, + start=start, + stop=stop, + copy=copy, + ) + + # we need to load from disk, drop, and return data + detrend_picks = self._detrend_picks + for ii, idx in enumerate(use_idx): + # faster to pre-allocate memory here + epoch_noproj = self._get_epoch_from_raw(idx) + epoch_noproj = self._detrend_offset_decim(epoch_noproj, detrend_picks) + if self._do_delayed_proj: + epoch_out = epoch_noproj + else: + epoch_out = self._project_epoch(epoch_noproj) + if ii == 0: + data = np.empty( + (n_events, len(self.ch_names), len(self.times)), + dtype=epoch_out.dtype, + ) + data[ii] = epoch_out + else: + # bads need to be dropped, this might occur after a preload + # e.g., when calling drop_bad w/new params + good_idx = [] + n_out = 0 + drop_log = list(self.drop_log) + assert n_events == len(self.selection) + if not self.preload: + detrend_picks = self._detrend_picks + for idx, sel in enumerate(self.selection): + if self.preload: # from memory + if self._do_delayed_proj: + epoch_noproj = self._data[idx] + epoch = self._project_epoch(epoch_noproj) + else: + epoch_noproj = None + epoch = self._data[idx] + else: # from disk + epoch_noproj = self._get_epoch_from_raw(idx) + epoch_noproj = self._detrend_offset_decim( + epoch_noproj, detrend_picks + ) + epoch = self._project_epoch(epoch_noproj) + + epoch_out = epoch_noproj if self._do_delayed_proj else epoch + is_good, bad_tuple = self._is_good_epoch(epoch, verbose=verbose) + if not is_good: + assert isinstance(bad_tuple, tuple) + assert all(isinstance(x, str) for x in bad_tuple) + drop_log[sel] = drop_log[sel] + bad_tuple + continue + good_idx.append(idx) + + # store the epoch if there is a reason to (output or update) + if out or self.preload: + # faster to pre-allocate, then trim as necessary + if n_out == 0 and not self.preload: + data = np.empty( + (n_events, epoch_out.shape[0], epoch_out.shape[1]), + dtype=epoch_out.dtype, + order="C", + ) + data[n_out] = epoch_out + n_out += 1 + self.drop_log = tuple(drop_log) + del drop_log + + self._bad_dropped = True + n_bads_dropped = n_events - len(good_idx) + logger.info(f"{n_bads_dropped} bad epochs dropped") + + if n_bads_dropped == n_events: + warn( + "All epochs were dropped!\n" + "You might need to alter reject/flat-criteria " + "or drop bad channels to avoid this. " + "You can use Epochs.plot_drop_log() to see which " + "channels are responsible for the dropping of epochs." + ) + + # adjust the data size if there is a reason to (output or update) + if out or self.preload: + if data.flags["OWNDATA"] and data.flags["C_CONTIGUOUS"]: + data.resize((n_out,) + data.shape[1:], refcheck=False) + else: + data = data[:n_out] + if self.preload: + self._data = data + + # Now update our properties (excepd data, which is already fixed) + self._getitem( + good_idx, None, copy=False, drop_event_id=False, select_data=False + ) + + if not out: + return + return self._data_sel_copy_scale( + data, + select=slice(None), + orig_picks=orig_picks, + picks=picks, + ch_factors=ch_factors, + start=start, + stop=stop, + copy=copy, + ) + + def _data_sel_copy_scale( + self, data, *, select, orig_picks, picks, ch_factors, start, stop, copy + ): + # data arg starts out as self._data when data is preloaded + data_is_self_data = bool(self.preload) + logger.debug(f"Data is self data: {data_is_self_data}") + # only two types of epoch subselection allowed + assert isinstance(select, slice | np.ndarray), type(select) + if not isinstance(select, slice): + logger.debug(" Copying, fancy indexed epochs") + data_is_self_data = False # copy (fancy indexing) + elif select != slice(None): + logger.debug(" Slicing epochs") + if orig_picks is not None: + logger.debug(" Copying, fancy indexed picks") + assert isinstance(picks, np.ndarray), type(picks) + data_is_self_data = False # copy (fancy indexing) + else: + picks = slice(None) + if not all(isinstance(x, slice) and x == slice(None) for x in (select, picks)): + data = data[select][:, picks] + del picks + if start != 0 or stop != self.times.size: + logger.debug(" Slicing time") + data = data[..., start:stop] # view (slice) + if ch_factors is not None: + if data_is_self_data: + logger.debug(" Copying, scale factors applied") + data = data.copy() + data_is_self_data = False + data *= ch_factors[:, np.newaxis] + if not data_is_self_data: + return data + if copy: + logger.debug(" Copying, copy=True") + data = data.copy() + return data + + @property + def _detrend_picks(self): + if self._do_baseline: + return _pick_data_channels( + self.info, with_ref_meg=True, with_aux=True, exclude=() + ) + else: + return [] + + @verbose + def get_data( + self, + picks=None, + item=None, + units=None, + tmin=None, + tmax=None, + *, + copy=True, + verbose=None, + ): + """Get all epochs as a 3D array. + + Parameters + ---------- + %(picks_all)s + item : slice | array-like | str | list | None + The items to get. See :meth:`mne.Epochs.__getitem__` for + a description of valid options. This can be substantially faster + for obtaining an ndarray than :meth:`~mne.Epochs.__getitem__` + for repeated access on large Epochs objects. + None (default) is an alias for ``slice(None)``. + + .. versionadded:: 0.20 + %(units)s + + .. versionadded:: 0.24 + tmin : int | float | None + Start time of data to get in seconds. + + .. versionadded:: 0.24.0 + tmax : int | float | None + End time of data to get in seconds. + + .. versionadded:: 0.24.0 + copy : bool + Whether to return a copy of the object's data, or (if possible) a view. + See :ref:`the NumPy docs ` for an + explanation. Default is ``False`` in 1.6 but will change to ``True`` in 1.7, + set it explicitly to avoid a warning in some cases. A view is only possible + when ``item is None``, ``picks is None``, ``units is None``, and data are + preloaded. + + .. warning:: + Using ``copy=False`` and then modifying the returned ``data`` will in + turn modify the Epochs object. Use with caution! + + .. versionchanged:: 1.7 + The default changed from ``False`` to ``True``. + + .. versionadded:: 1.6 + %(verbose)s + + Returns + ------- + data : array of shape (n_epochs, n_channels, n_times) + The epochs data. Will be a copy when ``copy=True`` and will be a view + when possible when ``copy=False``. + """ + return self._get_data( + picks=picks, item=item, units=units, tmin=tmin, tmax=tmax, copy=copy + ) + + @verbose + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + verbose=None, + **kwargs, + ): + """Apply a function to a subset of channels. + + %(applyfun_summary_epochs)s + + Parameters + ---------- + %(fun_applyfun)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s Ignored if ``channel_wise=False`` as the workload + is split across channels. + %(channel_wise_applyfun_epo)s + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Epochs + The epochs object with transformed data. + """ + _check_preload(self, "epochs.apply_function") + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError("fun needs to be a function") + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") + + if channel_wise: + parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) + if n_jobs == 1: + _fun = partial(_check_fun, fun) + # modify data inplace to save memory + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[:, ch_idx, :] = np.apply_along_axis( + _fun, -1, data_in[:, ch_idx, :], **kwargs + ) + else: + # use parallel function + _fun = partial(np.apply_along_axis, fun, -1) + data_picks_new = parallel( + p_fun( + _fun, + data_in[:, ch_idx, :], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[:, ch_idx, :] = data_picks_new[run_idx] + else: + self._data = _check_fun(fun, data_in, **kwargs) + + return self + + @property + def filename(self) -> Path | None: + """The filename if the epochs are loaded from disk. + + :type: :class:`pathlib.Path` | ``None`` + """ + return self._filename + + @filename.setter + def filename(self, value): + if value is not None: + value = _check_fname(value, overwrite="read", must_exist=True) + self._filename = value + + def __repr__(self): + """Build string representation.""" + s = f"{len(self.events)} events " + s += "(all good)" if self._bad_dropped else "(good & bad)" + s += f", {self.tmin:.3f}".rstrip("0").rstrip(".") + s += f" – {self.tmax:.3f}".rstrip("0").rstrip(".") + s += " s (baseline " + if self.baseline is None: + s += "off" + else: + s += f"{self.baseline[0]:.3f}".rstrip("0").rstrip(".") + s += f" – {self.baseline[1]:.3f}".rstrip("0").rstrip(".") + s += " s" + if self.baseline != _check_baseline( + self.baseline, + times=self.times, + sfreq=self.info["sfreq"], + on_baseline_outside_data="adjust", + ): + s += " (baseline period was cropped after baseline correction)" + + s += f"), ~{sizeof_fmt(self._size)}" + s += f", data{'' if self.preload else ' not'} loaded" + s += ", with metadata" if self.metadata is not None else "" + max_events = 10 + counts = [ + f"{k!r}: {sum(self.events[:, 2] == v)}" + for k, v in list(self.event_id.items())[:max_events] + ] + if len(self.event_id) > 0: + s += "," + "\n ".join([""] + counts) + if len(self.event_id) > max_events: + not_shown_events = len(self.event_id) - max_events + s += f"\n and {not_shown_events} more events ..." + class_name = self.__class__.__name__ + class_name = "Epochs" if class_name == "BaseEpochs" else class_name + return f"<{class_name} | {s}>" + + @repr_html + def _repr_html_(self): + if isinstance(self.event_id, dict): + event_strings = [] + for k, v in sorted(self.event_id.items()): + n_events = sum(self.events[:, 2] == v) + event_strings.append(f"{k}: {n_events}") + elif isinstance(self.event_id, list): + event_strings = [] + for k in self.event_id: + n_events = sum(self.events[:, 2] == k) + event_strings.append(f"{k}: {n_events}") + elif isinstance(self.event_id, int): + n_events = len(self.events[:, 2]) + event_strings = [f"{self.event_id}: {n_events}"] + else: + event_strings = None + + t = _get_html_template("repr", "epochs.html.jinja") + t = t.render( + inst=self, + filenames=( + [Path(self.filename).name] + if getattr(self, "filename", None) is not None + else None + ), + event_counts=event_strings, + ) + return t + + @verbose + def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None): + """Crop a time interval from the epochs. + + Parameters + ---------- + tmin : float | None + Start time of selection in seconds. + tmax : float | None + End time of selection in seconds. + %(include_tmax)s + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The cropped epochs object, modified in-place. + + Notes + ----- + %(notes_tmax_included_by_default)s + """ + # XXX this could be made to work on non-preloaded data... + _check_preload(self, "Modifying data of epochs") + + super().crop(tmin=tmin, tmax=tmax, include_tmax=include_tmax) + + # Adjust rejection period + if self.reject_tmin is not None and self.reject_tmin < self.tmin: + logger.info( + f"reject_tmin is not in epochs time interval. " + f"Setting reject_tmin to epochs.tmin ({self.tmin} s)" + ) + self.reject_tmin = self.tmin + if self.reject_tmax is not None and self.reject_tmax > self.tmax: + logger.info( + f"reject_tmax is not in epochs time interval. " + f"Setting reject_tmax to epochs.tmax ({self.tmax} s)" + ) + self.reject_tmax = self.tmax + return self + + def copy(self): + """Return copy of Epochs instance. + + Returns + ------- + epochs : instance of Epochs + A copy of the object. + """ + return deepcopy(self) + + def __deepcopy__(self, memodict): + """Make a deepcopy.""" + cls = self.__class__ + result = cls.__new__(cls) + for k, v in self.__dict__.items(): + # drop_log is immutable and _raw is private (and problematic to + # deepcopy) + if k in ("drop_log", "_raw", "_times_readonly"): + memodict[id(v)] = v + else: + v = deepcopy(v, memodict) + result.__dict__[k] = v + return result + + @verbose + def save( + self, + fname, + split_size="2GB", + fmt="single", + overwrite=False, + split_naming="neuromag", + verbose=None, + ): + """Save epochs in a fif file. + + Parameters + ---------- + fname : path-like + The name of the file, which should end with ``-epo.fif`` or + ``-epo.fif.gz``. + split_size : str | int + Large raw files are automatically split into multiple pieces. This + parameter specifies the maximum size of each piece. If the + parameter is an integer, it specifies the size in Bytes. It is + also possible to pass a human-readable string, e.g., 100MB. + Note: Due to FIFF file limitations, the maximum split size is 2GB. + + .. versionadded:: 0.10.0 + fmt : str + Format to save data. Valid options are 'double' or + 'single' for 64- or 32-bit float, or for 128- or + 64-bit complex numbers respectively. Note: Data are processed with + double precision. Choosing single-precision, the saved data + will slightly differ due to the reduction in precision. + + .. versionadded:: 0.17 + %(overwrite)s + To overwrite original file (the same one that was loaded), + data must be preloaded upon reading. This defaults to True in 0.18 + but will change to False in 0.19. + + .. versionadded:: 0.18 + %(split_naming)s + + .. versionadded:: 0.24 + %(verbose)s + + Returns + ------- + fnames : List of path-like + List of path-like objects containing the path to each file split. + .. versionadded:: 1.9 + + Notes + ----- + Bad epochs will be dropped before saving the epochs to disk. + """ + check_fname( + fname, "epochs", ("-epo.fif", "-epo.fif.gz", "_epo.fif", "_epo.fif.gz") + ) + + # check for file existence and expand `~` if present + fname = str( + _check_fname( + fname=fname, + overwrite=overwrite, + check_bids_split=True, + name="fname", + ) + ) + + split_size_bytes = _get_split_size(split_size) + + _check_option("fmt", fmt, ["single", "double"]) + + # to know the length accurately. The get_data() call would drop + # bad epochs anyway + self.drop_bad() + # total_size tracks sizes that get split + # over_size tracks overhead (tags, things that get written to each) + if len(self) == 0: + warn("Saving epochs with no data") + total_size = 0 + else: + d = self[0].get_data(copy=False) + # this should be guaranteed by subclasses + assert d.dtype in (">f8", "c16", "= 1, n_parts + if n_parts > 1: + logger.info(f"Splitting into {n_parts} parts") + if n_parts > 100: # This must be an error + raise ValueError( + f"Split size {split_size} would result in writing " + f"{n_parts} files" + ) + + if len(self.drop_log) > 100000: + warn( + f"epochs.drop_log contains {len(self.drop_log)} entries " + f"which will incur up to a {sizeof_fmt(drop_size)} writing " + f"overhead (per split file), consider using " + f"epochs.reset_drop_log_selection() prior to writing" + ) + + epoch_idxs = np.array_split(np.arange(n_epochs), n_parts) + + _check_option("split_naming", split_naming, ("neuromag", "bids")) + split_fnames = _make_split_fnames(fname, n_parts, split_naming) + for part_idx, epoch_idx in enumerate(epoch_idxs): + this_epochs = self[epoch_idx] if n_parts > 1 else self + # avoid missing event_ids in splits + this_epochs.event_id = self.event_id + + _save_split(this_epochs, split_fnames, part_idx, n_parts, fmt, overwrite) + return split_fnames + + @verbose + def export(self, fname, fmt="auto", *, overwrite=False, verbose=None): + """Export Epochs to external formats. + + %(export_fmt_support_epochs)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + %(export_fmt_params_epochs)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_epochs)s + %(export_eeglab_note)s + """ + from .export import export_epochs + + export_epochs(fname, self, fmt, overwrite=overwrite, verbose=verbose) + + @fill_doc + def equalize_event_counts( + self, event_ids=None, method="mintime", *, random_state=None + ): + """Equalize the number of trials in each condition. + + It tries to make the remaining epochs occurring as close as possible in + time. This method works based on the idea that if there happened to be + some time-varying (like on the scale of minutes) noise characteristics + during a recording, they could be compensated for (to some extent) in + the equalization process. This method thus seeks to reduce any of + those effects by minimizing the differences in the times of the events + within a `~mne.Epochs` instance. For example, if one event type + occurred at time points ``[1, 2, 3, 4, 120, 121]`` and the another one + at ``[3.5, 4.5, 120.5, 121.5]``, this method would remove the events at + times ``[1, 2]`` for the first event type – and not the events at times + ``[120, 121]``. + + Parameters + ---------- + event_ids : None | list | dict + The event types to equalize. + + If ``None`` (default), equalize the counts of **all** event types + present in the `~mne.Epochs` instance. + + If a list, each element can either be a string (event name) or a + list of strings. In the case where one of the entries is a list of + strings, event types in that list will be grouped together before + equalizing trial counts across conditions. + + If a dictionary, the keys are considered as the event names whose + counts to equalize, i.e., passing ``dict(A=1, B=2)`` will have the + same effect as passing ``['A', 'B']``. This is useful if you intend + to pass an ``event_id`` dictionary that was used when creating + `~mne.Epochs`. + + In the case where partial matching is used (using ``/`` in + the event names), the event types will be matched according to the + provided tags, that is, processing works as if the ``event_ids`` + matched by the provided tags had been supplied instead. + The ``event_ids`` must identify non-overlapping subsets of the + epochs. + %(equalize_events_method)s + %(random_state)s Used only if ``method='random'``. + + Returns + ------- + epochs : instance of Epochs + The modified instance. It is modified in-place. + indices : array of int + Indices from the original events list that were dropped. + + Notes + ----- + For example (if ``epochs.event_id`` was ``{'Left': 1, 'Right': 2, + 'Nonspatial':3}``: + + epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial']) + + would equalize the number of trials in the ``'Nonspatial'`` condition + with the total number of trials in the ``'Left'`` and ``'Right'`` + conditions combined. + + If multiple indices are provided (e.g. ``'Left'`` and ``'Right'`` in + the example above), it is not guaranteed that after equalization the + conditions will contribute equally. E.g., it is possible to end up + with 70 ``'Nonspatial'`` epochs, 69 ``'Left'`` and 1 ``'Right'``. + + .. versionchanged:: 0.23 + Default to equalizing all events in the passed instance if no + event names were specified explicitly. + """ + from collections.abc import Iterable + + _validate_type( + event_ids, + types=(Iterable, None), + item_name="event_ids", + type_name="list-like or None", + ) + if isinstance(event_ids, str): + raise TypeError( + f"event_ids must be list-like or None, but " + f"received a string: {event_ids}" + ) + + if event_ids is None: + event_ids = list(self.event_id) + elif not event_ids: + raise ValueError("event_ids must have at least one element") + + if not self._bad_dropped: + self.drop_bad() + # figure out how to equalize + eq_inds = list() + + # deal with hierarchical tags + ids = self.event_id + orig_ids = list(event_ids) + tagging = False + if "/" in "".join(ids): + # make string inputs a list of length 1 + event_ids = [[x] if isinstance(x, str) else x for x in event_ids] + for ids_ in event_ids: # check if tagging is attempted + if any([id_ not in ids for id_ in ids_]): + tagging = True + # 1. treat everything that's not in event_id as a tag + # 2a. for tags, find all the event_ids matched by the tags + # 2b. for non-tag ids, just pass them directly + # 3. do this for every input + event_ids = [ + [ + k for k in ids if all(tag in k.split("/") for tag in id_) + ] # ids matching all tags + if all(id__ not in ids for id__ in id_) + else id_ # straight pass for non-tag inputs + for id_ in event_ids + ] + for ii, id_ in enumerate(event_ids): + if len(id_) == 0: + raise KeyError( + f"{orig_ids[ii]} not found in the epoch object's event_id." + ) + elif len({sub_id in ids for sub_id in id_}) != 1: + err = ( + "Don't mix hierarchical and regular event_ids" + f" like in '{', '.join(id_)}'." + ) + raise ValueError(err) + + # raise for non-orthogonal tags + if tagging is True: + events_ = [set(self[x].events[:, 0]) for x in event_ids] + doubles = events_[0].intersection(events_[1]) + if len(doubles): + raise ValueError( + "The two sets of epochs are " + "overlapping. Provide an " + "orthogonal selection." + ) + + for eq in event_ids: + eq_inds.append(self._keys_to_idx(eq)) + + sample_nums = [self.events[e, 0] for e in eq_inds] + indices = _get_drop_indices(sample_nums, method, random_state) + # need to re-index indices + indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)]) + self.drop(indices, reason="EQUALIZED_COUNT") + # actually remove the indices + return self, indices + + @verbose + def compute_psd( + self, + method="multitaper", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + remove_dc=True, + exclude=(), + *, + n_jobs=1, + verbose=None, + **method_kw, + ): + """Perform spectral analysis on sensor data. + + Parameters + ---------- + %(method_psd)s + Default is ``'multitaper'``. + %(fmin_fmax_psd)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(remove_dc)s + %(exclude_psd)s + %(n_jobs)s + %(verbose)s + %(method_kw_psd)s + + Returns + ------- + spectrum : instance of EpochsSpectrum + The spectral representation of each epoch. + + Notes + ----- + .. versionadded:: 1.2 + + References + ---------- + .. footbibliography:: + """ + method = _validate_method(method, type(self).__name__) + self._set_legacy_nfft_default(tmin, tmax, method, method_kw) + + return EpochsSpectrum( + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + average=False, + return_itc=False, + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of epoched data. + + Parameters + ---------- + %(method_tfr_epochs)s + %(freqs_tfr_epochs)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + average : bool + Whether to return average power across epochs (instead of single-trial + power). ``average=True`` is not compatible with ``output="complex"`` or + ``output="phase"``. Ignored if ``method="stockwell"`` (Stockwell method + *requires* averaging). Default is ``False``. + return_itc : bool + Whether to return inter-trial coherence (ITC) as well as power estimates. + If ``True`` then must specify ``average=True`` (or ``method="stockwell", + average="auto"``). Default is ``False``. + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_epochs_tfr)s + + Returns + ------- + tfr : instance of EpochsTFR or AverageTFR + The time-frequency-resolved power estimates. + itc : instance of AverageTFR + The inter-trial coherence (ITC). Only returned if ``return_itc=True``. + + Notes + ----- + If ``average=True`` (or ``method="stockwell", average="auto"``) the result will + be an :class:`~mne.time_frequency.AverageTFR` instead of an + :class:`~mne.time_frequency.EpochsTFR`. + + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + if method == "stockwell" and not average: # stockwell method *must* average + logger.info( + 'Requested `method="stockwell"` so ignoring parameter `average=False`.' + ) + average = True + if average: + # augment `output` value for use by tfr_array_* functions + _check_option("output", output, ("power",), extra=" when average=True") + method_kw["output"] = "avg_power_itc" if return_itc else "avg_power" + else: + msg = ( + "compute_tfr() got incompatible parameters `average=False` and `{}` " + "({} requires averaging over epochs)." + ) + if return_itc: + raise ValueError(msg.format("return_itc=True", "computing ITC")) + if method == "stockwell": + raise ValueError(msg.format('method="stockwell"', "Stockwell method")) + # `average` and `return_itc` both False, so "phase" and "complex" are OK + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + + if method == "stockwell": + method_kw["return_itc"] = return_itc + method_kw.pop("output") + if isinstance(freqs, str): + _check_option("freqs", freqs, "auto") + else: + _validate_type(freqs, "array-like") + _check_option( + "freqs", np.array(freqs).shape, ((2,),), extra=" (wrong shape)." + ) + if average: + out = AverageTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + # tfr_array_stockwell always returns ITC (but sometimes it's None) + if hasattr(out, "_itc"): + if out._itc is not None: + state = out.__getstate__() + state["data"] = out._itc + state["data_type"] = "Inter-trial coherence" + itc = AverageTFR(inst=state) + del out._itc + return out, itc + del out._itc + return out + # now handle average=False + return EpochsTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def plot_psd( + self, + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + *, + method="auto", + average=False, + dB=True, + estimate="power", + xscale="linear", + area_mode="std", + area_alpha=0.33, + color="black", + line_alpha=None, + spatial_colors=True, + sphere=None, + exclude="bads", + ax=None, + show=True, + n_jobs=1, + verbose=None, + **method_kw, + ): + """%(plot_psd_doc)s. + + Parameters + ---------- + %(fmin_fmax_psd)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(method_plot_psd_auto)s + %(average_plot_psd)s + %(dB_plot_psd)s + %(estimate_plot_psd)s + %(xscale_plot_psd)s + %(area_mode_plot_psd)s + %(area_alpha_plot_psd)s + %(color_plot_psd)s + %(line_alpha_plot_psd)s + %(spatial_colors_psd)s + %(sphere_topomap_auto)s + + .. versionadded:: 0.22.0 + exclude : list of str | 'bads' + Channels names to exclude from being shown. If 'bads', the bad + channels are excluded. Pass an empty list to plot all channels + (including channels marked "bad", if any). + + .. versionadded:: 0.24.0 + %(ax_plot_psd)s + %(show)s + %(n_jobs)s + %(verbose)s + %(method_kw_psd)s + + Returns + ------- + fig : instance of Figure + Figure with frequency spectra of the data channels. + + Notes + ----- + %(notes_plot_psd_meth)s + """ + return super().plot_psd( + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=False, + method=method, + average=average, + dB=dB, + estimate=estimate, + xscale=xscale, + area_mode=area_mode, + area_alpha=area_alpha, + color=color, + line_alpha=line_alpha, + spatial_colors=spatial_colors, + sphere=sphere, + exclude=exclude, + ax=ax, + show=show, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + long_format=False, + time_format=None, + *, + verbose=None, + ): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, + additional columns "time", "epoch" (epoch number), and "condition" + (epoch event description) are added, unless ``index`` is not ``None`` + (in which case the columns specified in ``index`` will be used to form + the DataFrame's index instead). + + Parameters + ---------- + %(picks_all)s + %(index_df_epo)s + Valid string values are 'time', 'epoch', and 'condition'. + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + %(long_format_df_epo)s + %(time_format_df)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ["time", "epoch", "condition"] + valid_time_formats = ["ms", "timedelta"] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats) + # get data + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + data = self._get_data(on_empty="raise")[:, picks, :] + times = self.times + n_epochs, n_picks, n_times = data.shape + data = np.hstack(data).T # (time*epochs) x signals + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = np.tile(times, n_epochs) + times = _convert_times(times, time_format, self.info["meas_date"]) + mindex.append(("time", times)) + rev_event_id = {v: k for k, v in self.event_id.items()} + conditions = [rev_event_id[k] for k in self.events[:, 2]] + mindex.append(("condition", np.repeat(conditions, n_times))) + mindex.append(("epoch", np.repeat(self.selection, n_times))) + assert all(len(mdx) == len(mindex[0]) for mdx in mindex) + # build DataFrame + df = _build_data_frame( + self, + data, + picks, + long_format, + mindex, + index, + default_index=["condition", "epoch", "time"], + ) + return df + + def as_type(self, ch_type="grad", mode="fast"): + """Compute virtual epochs using interpolated fields. + + .. Warning:: Using virtual epochs to compute inverse can yield + unexpected results. The virtual channels have ``'_v'`` appended + at the end of the names to emphasize that the data contained in + them are interpolated. + + Parameters + ---------- + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + + Returns + ------- + epochs : instance of mne.EpochsArray + The transformed epochs object containing only virtual channels. + + Notes + ----- + This method returns a copy and does not modify the data it + operates on. It also returns an EpochsArray instance. + + .. versionadded:: 0.20.0 + """ + from .forward import _as_meg_type_inst + + self._handle_empty("raise", "as_type") + return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) + + +def _drop_log_stats(drop_log, ignore=("IGNORED",)): + """Compute drop log stats. + + Parameters + ---------- + drop_log : list of list + Epoch drop log from Epochs.drop_log. + ignore : list + The drop reasons to ignore. + + Returns + ------- + perc : float + Total percentage of epochs dropped. + """ + if ( + not isinstance(drop_log, tuple) + or not all(isinstance(d, tuple) for d in drop_log) + or not all(isinstance(s, str) for d in drop_log for s in d) + ): + raise TypeError("drop_log must be a tuple of tuple of str") + perc = 100 * np.mean( + [len(d) > 0 for d in drop_log if not any(r in ignore for r in d)] + ) + return perc + + +def make_metadata( + events, + event_id, + tmin, + tmax, + sfreq, + row_events=None, + keep_first=None, + keep_last=None, +): + """Automatically generate metadata for use with `mne.Epochs` from events. + + This function mimics the epoching process (it constructs time windows + around time-locked "events of interest") and collates information about + any other events that occurred within those time windows. The information + is returned as a :class:`pandas.DataFrame`, suitable for use as + `~mne.Epochs` metadata: one row per time-locked event, and columns + indicating presence or absence and latency of each ancillary event type. + + The function will also return a new ``events`` array and ``event_id`` + dictionary that correspond to the generated metadata, which together can then be + readily fed into `~mne.Epochs`. + + Parameters + ---------- + events : array, shape (m, 3) + The :term:`events array `. By default, the returned metadata + :class:`~pandas.DataFrame` will have as many rows as the events array. + To create rows for only a subset of events, pass the ``row_events`` + parameter. + event_id : dict + A mapping from event names (keys) to event IDs (values). The event + names will be incorporated as columns of the returned metadata + :class:`~pandas.DataFrame`. + tmin, tmax : float | str | list of str | None + If float, start and end of the time interval for metadata generation in seconds, + relative to the time-locked event of the respective time window (the "row + events"). + + .. note:: + If you are planning to attach the generated metadata to + `~mne.Epochs` and intend to include only events that fall inside + your epoch's time interval, pass the same ``tmin`` and ``tmax`` + values here as you use for your epochs. + + If ``None``, the time window used for metadata generation is bounded by the + ``row_events``. This is can be particularly practical if trial duration varies + greatly, but each trial starts with a known event (e.g., a visual cue or + fixation). + + .. note:: + If ``tmin=None``, the first time window for metadata generation starts with + the first row event. If ``tmax=None``, the last time window for metadata + generation ends with the last event in ``events``. + + If a string or a list of strings, the events bounding the metadata around each + "row event". For ``tmin``, the events are assumed to occur **before** the row + event, and for ``tmax``, the events are assumed to occur **after** – unless + ``tmin`` or ``tmax`` are equal to a row event, in which case the row event + serves as the bound. + + .. versionchanged:: 1.6.0 + Added support for ``None``. + + .. versionadded:: 1.7.0 + Added support for strings. + sfreq : float + The sampling frequency of the data from which the events array was + extracted. + row_events : list of str | str | None + Event types around which to create the time windows. For each of these + time-locked events, we will create a **row** in the returned metadata + :class:`pandas.DataFrame`. If provided, the string(s) must be keys of + ``event_id``. If ``None`` (default), rows are created for **all** event types + present in ``event_id``. + keep_first : str | list of str | None + Specify subsets of :term:`hierarchical event descriptors` (HEDs, + inspired by :footcite:`BigdelyShamloEtAl2013`) matching events of which + the **first occurrence** within each time window shall be stored in + addition to the original events. + + .. note:: + There is currently no way to retain **all** occurrences of a + repeated event. The ``keep_first`` parameter can be used to specify + subsets of HEDs, effectively creating a new event type that is the + union of all events types described by the matching HED pattern. + Only the very first event of this set will be kept. + + For example, you might have two response events types, + ``response/left`` and ``response/right``; and in trials with both + responses occurring, you want to keep only the first response. In this + case, you can pass ``keep_first='response'``. This will add two new + columns to the metadata: ``response``, indicating at what **time** the + event occurred, relative to the time-locked event; and + ``first_response``, stating which **type** (``'left'`` or ``'right'``) + of event occurred. + To match specific subsets of HEDs describing different sets of events, + pass a list of these subsets, e.g. + ``keep_first=['response', 'stimulus']``. If ``None`` (default), no + event aggregation will take place and no new columns will be created. + + .. note:: + By default, this function will always retain the first instance + of any event in each time window. For example, if a time window + contains two ``'response'`` events, the generated ``response`` + column will automatically refer to the first of the two events. In + this specific case, it is therefore **not** necessary to make use of + the ``keep_first`` parameter – unless you need to differentiate + between two types of responses, like in the example above. + + keep_last : list of str | None + Same as ``keep_first``, but for keeping only the **last** occurrence + of matching events. The column indicating the **type** of an event + ``myevent`` will be named ``last_myevent``. + + Returns + ------- + metadata : pandas.DataFrame + Metadata for each row event, with the following columns: + + - ``event_name``, with strings indicating the name of the time-locked + event ("row event") for that specific time window + + - one column per event type in ``event_id``, with the same name; floats + indicating the latency of the event in seconds, relative to the + time-locked event + + - if applicable, additional columns named after the ``keep_first`` and + ``keep_last`` event types; floats indicating the latency of the + event in seconds, relative to the time-locked event + + - if applicable, additional columns ``first_{event_type}`` and + ``last_{event_type}`` for ``keep_first`` and ``keep_last`` event + types, respetively; the values will be strings indicating which event + types were matched by the provided HED patterns + + events : array, shape (n, 3) + The events corresponding to the generated metadata, i.e. one + time-locked event per row. + event_id : dict + The event dictionary corresponding to the new events array. This will + be identical to the input dictionary unless ``row_events`` is supplied, + in which case it will only contain the events provided there. + + Notes + ----- + The time window used for metadata generation need not correspond to the + time window used to create the `~mne.Epochs`, to which the metadata will + be attached; it may well be much shorter or longer, or not overlap at all, + if desired. This can be useful, for example, to include events that + occurred before or after an epoch, e.g. during the inter-trial interval. + If either ``tmin``, ``tmax``, or both are ``None``, or a string referring e.g. to a + response event, the time window will typically vary, too. + + .. versionadded:: 0.23 + + References + ---------- + .. footbibliography:: + """ + pd = _check_pandas_installed() + + _validate_type(events, types=("array-like",), item_name="events") + _validate_type(event_id, types=(dict,), item_name="event_id") + _validate_type(sfreq, types=("numeric",), item_name="sfreq") + _validate_type(tmin, types=("numeric", str, "array-like", None), item_name="tmin") + _validate_type(tmax, types=("numeric", str, "array-like", None), item_name="tmax") + _validate_type(row_events, types=(None, str, "array-like"), item_name="row_events") + _validate_type(keep_first, types=(None, str, "array-like"), item_name="keep_first") + _validate_type(keep_last, types=(None, str, "array-like"), item_name="keep_last") + + if not event_id: + raise ValueError("event_id dictionary must contain at least one entry") + + def _ensure_list(x): + if x is None: + return [] + elif isinstance(x, str): + return [x] + else: + return list(x) + + row_events = _ensure_list(row_events) + keep_first = _ensure_list(keep_first) + keep_last = _ensure_list(keep_last) + + # Turn tmin, tmax into a list if they're strings or arrays of strings + try: + _validate_type(tmin, types=(str, "array-like"), item_name="tmin") + tmin = _ensure_list(tmin) + except TypeError: + pass + + try: + _validate_type(tmax, types=(str, "array-like"), item_name="tmax") + tmax = _ensure_list(tmax) + except TypeError: + pass + + keep_first_and_last = set(keep_first) & set(keep_last) + if keep_first_and_last: + raise ValueError( + f"The event names in keep_first and keep_last must " + f"be mutually exclusive. Specified in both: " + f'{", ".join(sorted(keep_first_and_last))}' + ) + del keep_first_and_last + + for param_name, values in dict(keep_first=keep_first, keep_last=keep_last).items(): + for first_last_event_name in values: + try: + match_event_names(event_id, [first_last_event_name]) + except KeyError: + raise ValueError( + f'Event "{first_last_event_name}", specified in ' + f"{param_name}, cannot be found in event_id dictionary" + ) + + # If tmin, tmax are strings, ensure these event names are present in event_id + def _diff_input_strings_vs_event_id(input_strings, input_name, event_id): + event_name_diff = sorted(set(input_strings) - set(event_id.keys())) + if event_name_diff: + raise ValueError( + f"Present in {input_name}, but missing from event_id: " + f'{", ".join(event_name_diff)}' + ) + + _diff_input_strings_vs_event_id( + input_strings=row_events, input_name="row_events", event_id=event_id + ) + if isinstance(tmin, list): + _diff_input_strings_vs_event_id( + input_strings=tmin, input_name="tmin", event_id=event_id + ) + if isinstance(tmax, list): + _diff_input_strings_vs_event_id( + input_strings=tmax, input_name="tmax", event_id=event_id + ) + + # First and last sample of each epoch, relative to the time-locked event + # This follows the approach taken in mne.Epochs + # For strings and None, we don't know the start and stop samples in advance as the + # time window can vary. + if isinstance(tmin, type(None) | list): + start_sample = None + else: + start_sample = int(round(tmin * sfreq)) + + if isinstance(tmax, type(None) | list): + stop_sample = None + else: + stop_sample = int(round(tmax * sfreq)) + 1 + + # Make indexing easier + # We create the DataFrame before subsetting the events so we end up with + # indices corresponding to the original event indices. Not used for now, + # but might come in handy sometime later + events_df = pd.DataFrame(events, columns=("sample", "prev_id", "id")) + id_to_name_map = {v: k for k, v in event_id.items()} + + # Only keep events that are of interest + events = events[np.isin(events[:, 2], list(event_id.values()))] + events_df = events_df.loc[events_df["id"].isin(event_id.values()), :] + + # Prepare & condition the metadata DataFrame + + # Avoid column name duplications if the exact same event name appears in + # event_id.keys() and keep_first / keep_last simultaneously + keep_first_cols = [col for col in keep_first if col not in event_id] + keep_last_cols = [col for col in keep_last if col not in event_id] + first_cols = [f"first_{col}" for col in keep_first_cols] + last_cols = [f"last_{col}" for col in keep_last_cols] + + columns = [ + "event_name", + *event_id.keys(), + *keep_first_cols, + *keep_last_cols, + *first_cols, + *last_cols, + ] + + data = np.empty((len(events_df), len(columns)), float) + metadata = pd.DataFrame(data=data, columns=columns, index=events_df.index) + + # Event names + metadata["event_name"] = "" + + # Event times + start_idx = 1 + stop_idx = start_idx + len(event_id.keys()) + len(keep_first_cols + keep_last_cols) + metadata.iloc[:, start_idx:stop_idx] = np.nan + + # keep_first and keep_last names + start_idx = stop_idx + metadata[columns[start_idx:]] = None + + # We're all set, let's iterate over all events and fill in in the + # respective cells in the metadata. We will subset this to include only + # `row_events` later + for row_event in events_df.itertuples(name="RowEvent"): + row_idx = row_event.Index + metadata.loc[row_idx, "event_name"] = id_to_name_map[row_event.id] + + # Determine which events fall into the current time window + if start_sample is None and isinstance(tmin, list): + # Lower bound is the the current or the closest previpus event with a name + # in "tmin"; if there is no such event (e.g., beginning of the recording is + # being approached), the upper lower becomes the last event in the + # recording. + prev_matching_events = events_df.loc[ + (events_df["sample"] <= row_event.sample) + & (events_df["id"].isin([event_id[name] for name in tmin])), + :, + ] + if prev_matching_events.size == 0: + # No earlier matching event. Use the current one as the beginning of the + # time window. This may occur at the beginning of a recording. + window_start_sample = row_event.sample + else: + # At least one earlier matching event. Use the closest one. + window_start_sample = prev_matching_events.iloc[-1]["sample"] + elif start_sample is None: + # Lower bound is the current event. + window_start_sample = row_event.sample + else: + # Lower bound is determined by tmin. + window_start_sample = row_event.sample + start_sample + + if stop_sample is None and isinstance(tmax, list): + # Upper bound is the the current or the closest following event with a name + # in "tmax"; if there is no such event (e.g., end of the recording is being + # approached), the upper bound becomes the last event in the recording. + next_matching_events = events_df.loc[ + (events_df["sample"] >= row_event.sample) + & (events_df["id"].isin([event_id[name] for name in tmax])), + :, + ] + if next_matching_events.size == 0: + # No matching event after the current one; use the end of the recording + # as upper bound. This may occur at the end of a recording. + window_stop_sample = events_df["sample"].iloc[-1] + else: + # At least one matching later event. Use the closest one.. + window_stop_sample = next_matching_events.iloc[0]["sample"] + elif stop_sample is None: + # Upper bound: next event of the same type, or the last event (of + # any type) if no later event of the same type can be found. + next_events = events_df.loc[ + (events_df["sample"] > row_event.sample), + :, + ] + if next_events.size == 0: + # We've reached the last event in the recording. + window_stop_sample = row_event.sample + elif next_events.loc[next_events["id"] == row_event.id, :].size > 0: + # There's still an event of the same type appearing after the + # current event. Stop one sample short, we don't want to include that + # last event here, but in the next iteration. + window_stop_sample = ( + next_events.loc[next_events["id"] == row_event.id, :].iloc[0][ + "sample" + ] + - 1 + ) + else: + # There are still events after the current one, but not of the + # same type. + window_stop_sample = next_events.iloc[-1]["sample"] + else: + # Upper bound is determined by tmax. + window_stop_sample = row_event.sample + stop_sample + + events_in_window = events_df.loc[ + (events_df["sample"] >= window_start_sample) + & (events_df["sample"] <= window_stop_sample), + :, + ] + + assert not events_in_window.empty + + # Store the metadata + for event in events_in_window.itertuples(name="Event"): + event_sample = event.sample - row_event.sample + event_time = event_sample / sfreq + event_time = 0 if np.isclose(event_time, 0) else event_time + event_name = id_to_name_map[event.id] + + if not np.isnan(metadata.loc[row_idx, event_name]): + # Event already exists in current time window! + assert metadata.loc[row_idx, event_name] <= event_time + + if event_name not in keep_last: + continue + + metadata.loc[row_idx, event_name] = event_time + + # Handle keep_first and keep_last event aggregation + for event_group_name in keep_first + keep_last: + if event_name not in match_event_names(event_id, [event_group_name]): + continue + + if event_group_name in keep_first: + first_last_col = f"first_{event_group_name}" + else: + first_last_col = f"last_{event_group_name}" + + old_time = metadata.loc[row_idx, event_group_name] + if not np.isnan(old_time): + if (event_group_name in keep_first and old_time <= event_time) or ( + event_group_name in keep_last and old_time >= event_time + ): + continue + + if event_group_name not in event_id: + # This is an HED. Strip redundant information from the + # event name + name = ( + event_name.replace(event_group_name, "") + .replace("//", "/") + .strip("/") + ) + metadata.loc[row_idx, first_last_col] = name + del name + + metadata.loc[row_idx, event_group_name] = event_time + + # Only keep rows of interest + if row_events: + event_id_timelocked = { + name: val for name, val in event_id.items() if name in row_events + } + events = events[np.isin(events[:, 2], list(event_id_timelocked.values()))] + metadata = metadata.loc[metadata["event_name"].isin(event_id_timelocked)] + assert len(events) == len(metadata) + event_id = event_id_timelocked + + return metadata, events, event_id + + +def _events_from_annotations(raw, events, event_id, annotations, on_missing): + """Generate events and event_ids from annotations.""" + events, event_id_tmp = events_from_annotations(raw) + if events.size == 0: + raise RuntimeError( + "No usable annotations found in the raw object. " + "Either `events` must be provided or the raw " + "object must have annotations to construct epochs" + ) + if any(raw.annotations.duration > 0): + logger.info( + "Ignoring annotation durations and creating fixed-duration epochs " + "around annotation onsets." + ) + if event_id is None: + event_id = event_id_tmp + # if event_id is the names of events, map to events integers + if isinstance(event_id, str): + event_id = [event_id] + if isinstance(event_id, list | tuple | set): + if not set(event_id).issubset(set(event_id_tmp)): + msg = ( + "No matching annotations found for event_id(s) " + f"{set(event_id) - set(event_id_tmp)}" + ) + _on_missing(on_missing, msg) + # remove extras if on_missing not error + event_id = set(event_id) & set(event_id_tmp) + event_id = {my_id: event_id_tmp[my_id] for my_id in event_id} + # remove any non-selected annotations + annotations.delete(~np.isin(raw.annotations.description, list(event_id))) + return events, event_id, annotations + + +@fill_doc +class Epochs(BaseEpochs): + """Epochs extracted from a Raw instance. + + Parameters + ---------- + %(raw_epochs)s + + .. note:: + If ``raw`` contains annotations, ``Epochs`` can be constructed around + ``raw.annotations.onset``, but note that the durations of the annotations + are ignored in this case. + %(events_epochs)s + + .. versionchanged:: 1.7 + Allow ``events=None`` to use ``raw.annotations.onset`` as the source of + epoch times. + %(event_id)s + %(epochs_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(picks_all)s + preload : bool + %(epochs_preload)s + %(reject_epochs)s + %(flat)s + %(proj_epochs)s + %(decim)s + %(epochs_reject_tmin_tmax)s + %(detrend_epochs)s + %(on_missing_epochs)s + %(reject_by_annotation_epochs)s + %(metadata_epochs)s + + .. versionadded:: 0.16 + %(event_repeated_epochs)s + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + %(event_id_attr)s + ch_names : list of string + List of channel names. + %(selection_attr)s + preload : bool + Indicates whether epochs are in memory. + drop_log : tuple of tuple + A tuple of the same length as the event array used to initialize the + Epochs object. If the i-th original event is still part of the + selection, drop_log[i] will be an empty tuple; otherwise it will be + a tuple of the reasons the event is not longer in the selection, e.g.: + + - 'IGNORED' + If it isn't part of the current subset defined by the user + - 'NO_DATA' or 'TOO_SHORT' + If epoch didn't contain enough data names of channels that exceeded + the amplitude threshold + - 'EQUALIZED_COUNTS' + See :meth:`~mne.Epochs.equalize_event_counts` + - 'USER' + For user-defined reasons (see :meth:`~mne.Epochs.drop`). + + When dropping based on flat or reject parameters the tuple of + reasons contains a tuple of channels that satisfied the rejection + criteria. + filename : str + The filename of the object. + times : ndarray + Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval + between consecutive time samples is equal to the inverse of the + sampling frequency. + + See Also + -------- + mne.epochs.combine_event_ids + mne.Epochs.equalize_event_counts + + Notes + ----- + When accessing data, Epochs are detrended, baseline-corrected, and + decimated, then projectors are (optionally) applied. + + For indexing and slicing using ``epochs[...]``, see + :meth:`mne.Epochs.__getitem__`. + + All methods for iteration over objects (using :meth:`mne.Epochs.__iter__`, + :meth:`mne.Epochs.iter_evoked` or :meth:`mne.Epochs.next`) use the same + internal state. + + If ``event_repeated`` is set to ``'merge'``, the coinciding events + (duplicates) will be merged into a single event_id and assigned a new + id_number as:: + + event_id['{event_id_1}/{event_id_2}/...'] = new_id_number + + For example with the event_id ``{'aud': 1, 'vis': 2}`` and the events + ``[[0, 0, 1], [0, 0, 2]]``, the "merge" behavior will update both event_id + and events to be: ``{'aud/vis': 3}`` and ``[[0, 0, 3]]`` respectively. + + There is limited support for :class:`~mne.Annotations` in the + :class:`~mne.Epochs` class. Currently annotations that are present in the + :class:`~mne.io.Raw` object will be preserved in the resulting + :class:`~mne.Epochs` object, but: + + 1. It is not yet possible to add annotations + to the Epochs object programmatically (via code) or interactively + (through the plot window) + 2. Concatenating :class:`~mne.Epochs` objects + that contain annotations is not supported, and any annotations will + be dropped when concatenating. + 3. Annotations will be lost on save. + """ + + @verbose + def __init__( + self, + raw, + events=None, + event_id=None, + tmin=-0.2, + tmax=0.5, + baseline=(None, 0), + picks=None, + preload=False, + reject=None, + flat=None, + proj=True, + decim=1, + reject_tmin=None, + reject_tmax=None, + detrend=None, + on_missing="raise", + reject_by_annotation=True, + metadata=None, + event_repeated="error", + verbose=None, + ): + from .io import BaseRaw + + if not isinstance(raw, BaseRaw): + raise ValueError( + "The first argument to `Epochs` must be an " + "instance of mne.io.BaseRaw" + ) + info = deepcopy(raw.info) + annotations = raw.annotations.copy() + + # proj is on when applied in Raw + proj = proj or raw.proj + + self.reject_by_annotation = reject_by_annotation + + # keep track of original sfreq (needed for annotations) + raw_sfreq = raw.info["sfreq"] + + # get events from annotations if no events given + if events is None: + events, event_id, annotations = _events_from_annotations( + raw, events, event_id, annotations, on_missing + ) + + # call BaseEpochs constructor + super().__init__( + info, + None, + events, + event_id, + tmin, + tmax, + metadata=metadata, + baseline=baseline, + raw=raw, + picks=picks, + reject=reject, + flat=flat, + decim=decim, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + detrend=detrend, + proj=proj, + on_missing=on_missing, + preload_at_end=preload, + event_repeated=event_repeated, + verbose=verbose, + raw_sfreq=raw_sfreq, + annotations=annotations, + ) + + @verbose + def _get_epoch_from_raw(self, idx, verbose=None): + """Load one epoch from disk. + + Returns + ------- + data : array | str | None + If string, it's details on rejection reason. + If array, it's the data in the desired range (good segment) + If None, it means no data is available. + """ + if self._raw is None: + # This should never happen, as raw=None only if preload=True + raise ValueError( + "An error has occurred, no valid raw file found. " + "Please report this to the mne-python " + "developers." + ) + sfreq = self._raw.info["sfreq"] + event_samp = self.events[idx, 0] + # Read a data segment from "start" to "stop" in samples + first_samp = self._raw.first_samp + start = int(round(event_samp + self._raw_times[0] * sfreq)) + start -= first_samp + stop = start + len(self._raw_times) + + # reject_tmin, and reject_tmax need to be converted to samples to + # check the reject_by_annotation boundaries: reject_start, reject_stop + reject_tmin = self.reject_tmin + if reject_tmin is None: + reject_tmin = self._raw_times[0] + reject_start = int(round(event_samp + reject_tmin * sfreq)) + reject_start -= first_samp + + reject_tmax = self.reject_tmax + if reject_tmax is None: + reject_tmax = self._raw_times[-1] + diff = int(round((self._raw_times[-1] - reject_tmax) * sfreq)) + reject_stop = stop - diff + + logger.debug(f" Getting epoch for {start}-{stop}") + data = self._raw._check_bad_segment( + start, + stop, + self.picks, + reject_start, + reject_stop, + self.reject_by_annotation, + ) + return data + + +@fill_doc +class EpochsArray(BaseEpochs): + """Epochs object from numpy array. + + Parameters + ---------- + data : array, shape (n_epochs, n_channels, n_times) + The channels' time series for each epoch. See notes for proper units of + measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate this + structure. + %(events_epochs)s + %(tmin_epochs)s + %(event_id)s + %(reject_epochs)s + %(flat)s + %(epochs_reject_tmin_tmax)s + %(baseline_epochs)s + Defaults to ``None``, i.e. no baseline correction. + %(proj_epochs)s + %(on_missing_epochs)s + %(metadata_epochs)s + + .. versionadded:: 0.16 + %(selection)s + %(drop_log)s + + .. versionadded:: 1.3 + %(raw_sfreq)s + + .. versionadded:: 1.3 + %(verbose)s + + See Also + -------- + create_info + EvokedArray + io.RawArray + + Notes + ----- + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + + EpochsArray does not set `Annotations`. If you would like to create + simulated data with Annotations that are then preserved in the Epochs + object, you would use `mne.io.RawArray` first and then create an + `mne.Epochs` object. + """ + + @verbose + def __init__( + self, + data, + info, + events=None, + tmin=0.0, + event_id=None, + reject=None, + flat=None, + reject_tmin=None, + reject_tmax=None, + baseline=None, + proj=True, + on_missing="raise", + metadata=None, + selection=None, + *, + drop_log=None, + raw_sfreq=None, + verbose=None, + ): + dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 + data = np.asanyarray(data, dtype=dtype) + if data.ndim != 3: + raise ValueError( + "Data must be a 3D array of shape (n_epochs, n_channels, n_samples)" + ) + + if len(info["ch_names"]) != data.shape[1]: + raise ValueError("Info and data must have same number of channels.") + if events is None: + n_epochs = len(data) + events = _gen_events(n_epochs) + info = info.copy() # do not modify original info + tmax = (data.shape[2] - 1) / info["sfreq"] + tmin + + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + decim=1, + metadata=metadata, + selection=selection, + proj=proj, + on_missing=on_missing, + drop_log=drop_log, + raw_sfreq=raw_sfreq, + verbose=verbose, + ) + if self.baseline is not None: + self._do_baseline = True + if ( + len(events) + != np.isin(self.events[:, 2], list(self.event_id.values())).sum() + ): + raise ValueError("The events must only contain event numbers from event_id") + detrend_picks = self._detrend_picks + for e in self._data: + # This is safe without assignment b/c there is no decim + self._detrend_offset_decim(e, detrend_picks) + self.drop_bad() + + +def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True): + """Collapse event_ids from an epochs instance into a new event_id. + + Parameters + ---------- + epochs : instance of Epochs + The epochs to operate on. + old_event_ids : str, or list + Conditions to collapse together. + new_event_id : dict, or int + A one-element dict (or a single integer) for the new + condition. Note that for safety, this cannot be any + existing id (in epochs.event_id.values()). + copy : bool + Whether to return a new instance or modify in place. + + Returns + ------- + epochs : instance of Epochs + The modified epochs. + + Notes + ----- + This For example (if epochs.event_id was ``{'Left': 1, 'Right': 2}``:: + + combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12}) + + would create a 'Directional' entry in epochs.event_id replacing + 'Left' and 'Right' (combining their trials). + """ + epochs = epochs.copy() if copy else epochs + old_event_ids = np.asanyarray(old_event_ids) + if isinstance(new_event_id, int): + new_event_id = {str(new_event_id): new_event_id} + else: + if not isinstance(new_event_id, dict): + raise ValueError("new_event_id must be a dict or int") + if not len(list(new_event_id.keys())) == 1: + raise ValueError("new_event_id dict must have one entry") + new_event_num = list(new_event_id.values())[0] + new_event_num = operator.index(new_event_num) + if new_event_num in epochs.event_id.values(): + raise ValueError("new_event_id value must not already exist") + # could use .pop() here, but if a latter one doesn't exist, we're + # in trouble, so run them all here and pop() later + old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids]) + # find the ones to replace + inds = np.any( + epochs.events[:, 2][:, np.newaxis] == old_event_nums[np.newaxis, :], axis=1 + ) + # replace the event numbers in the events list + epochs.events[inds, 2] = new_event_num + # delete old entries + for key in old_event_ids: + epochs.event_id.pop(key) + # add the new entry + epochs.event_id.update(new_event_id) + return epochs + + +@fill_doc +def equalize_epoch_counts(epochs_list, method="mintime", *, random_state=None): + """Equalize the number of trials in multiple Epochs or EpochsTFR instances. + + Parameters + ---------- + epochs_list : list of Epochs instances + The Epochs instances to equalize trial counts for. + %(equalize_events_method)s + %(random_state)s Used only if ``method='random'``. + + Notes + ----- + The method ``'mintime'`` tries to make the remaining epochs occurring as close as + possible in time. This method is motivated by the possibility that if there happened + to be some time-varying (like on the scale of minutes) noise characteristics during + a recording, they could be compensated for (to some extent) in the + equalization process. This method thus seeks to reduce any of those effects + by minimizing the differences in the times of the events in the two sets of + epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the + other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times + [1, 2] in the first epochs and not [120, 121]. + + Examples + -------- + >>> equalize_epoch_counts([epochs1, epochs2]) # doctest: +SKIP + """ + if not all(isinstance(epoch, BaseEpochs | EpochsTFR) for epoch in epochs_list): + raise ValueError("All inputs must be Epochs instances") + # make sure bad epochs are dropped + for epoch in epochs_list: + if not epoch._bad_dropped: + epoch.drop_bad() + sample_nums = [epoch.events[:, 0] for epoch in epochs_list] + indices = _get_drop_indices(sample_nums, method, random_state) + for epoch, inds in zip(epochs_list, indices): + epoch.drop(inds, reason="EQUALIZED_COUNT") + + +def _get_drop_indices(sample_nums, method, random_state): + """Get indices to drop from multiple event timing lists.""" + small_idx = np.argmin([e.size for e in sample_nums]) + small_epoch_indices = sample_nums[small_idx] + _check_option("method", method, ["mintime", "truncate", "random"]) + indices = list() + for event in sample_nums: + if method == "mintime": + mask = _minimize_time_diff(small_epoch_indices, event) + elif method == "truncate": + mask = np.ones(event.size, dtype=bool) + mask[small_epoch_indices.size :] = False + elif method == "random": + rng = check_random_state(random_state) + mask = np.zeros(event.size, dtype=bool) + idx = rng.choice( + np.arange(event.size), size=small_epoch_indices.size, replace=False + ) + mask[idx] = True + indices.append(np.where(np.logical_not(mask))[0]) + return indices + + +def _minimize_time_diff(t_shorter, t_longer): + """Find a boolean mask to minimize timing differences.""" + keep = np.ones((len(t_longer)), dtype=bool) + # special case: length zero or one + if len(t_shorter) < 2: # interp1d won't work + keep.fill(False) + if len(t_shorter) == 1: + idx = np.argmin(np.abs(t_longer - t_shorter)) + keep[idx] = True + return keep + scores = np.ones(len(t_longer)) + x1 = np.arange(len(t_shorter)) + # The first set of keep masks to test + kwargs = dict(copy=False, bounds_error=False, assume_sorted=True) + shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1], **kwargs) + for ii in range(len(t_longer) - len(t_shorter)): + scores.fill(np.inf) + # set up the keep masks to test, eliminating any rows that are already + # gone + keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep] + keep_mask[:, ~keep] = False + # Check every possible removal to see if it minimizes + x2 = np.arange(len(t_longer) - ii - 1) + t_keeps = np.array([t_longer[km] for km in keep_mask]) + longer_interp = interp1d( + x2, t_keeps, axis=1, fill_value=t_keeps[:, -1], **kwargs + ) + d1 = longer_interp(x1) - t_shorter + d2 = shorter_interp(x2) - t_keeps + scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1) + keep[np.argmin(scores)] = False + return keep + + +@verbose +def _is_good( + e, + ch_names, + channel_type_idx, + reject, + flat, + full_report=False, + ignore_chs=(), + verbose=None, +): + """Test if data segment e is good according to reject and flat. + + The reject and flat parameters can accept functions as values. + + If full_report=True, it will give True/False as well as a list of all + offending channels. + """ + bad_tuple = tuple() + has_printed = False + checkable = np.ones(len(ch_names), dtype=bool) + checkable[np.array([c in ignore_chs for c in ch_names], dtype=bool)] = False + + for refl, f, t in zip([reject, flat], [np.greater, np.less], ["", "flat"]): + if refl is not None: + for key, refl in refl.items(): + criterion = refl + idx = channel_type_idx[key] + name = key.upper() + if len(idx) > 0: + e_idx = e[idx] + checkable_idx = checkable[idx] + # Check if criterion is a function and apply it + if callable(criterion): + result = criterion(e_idx) + _validate_type(result, tuple, "reject/flat output") + if len(result) != 2: + raise TypeError( + "Function criterion must return a tuple of length 2" + ) + cri_truth, reasons = result + _validate_type(cri_truth, (bool, np.bool_), cri_truth, "bool") + _validate_type( + reasons, (str, list, tuple), reasons, "str, list, or tuple" + ) + idx_deltas = np.where(np.logical_and(cri_truth, checkable_idx))[ + 0 + ] + else: + deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1) + idx_deltas = np.where( + np.logical_and(f(deltas, criterion), checkable_idx) + )[0] + + if len(idx_deltas) > 0: + # Check to verify that refl is a callable that returns + # (bool, reason). Reason must be a str/list/tuple. + # If using tuple + if callable(refl): + if isinstance(reasons, str): + reasons = (reasons,) + for idx, reason in enumerate(reasons): + _validate_type(reason, str, reason) + bad_tuple += tuple(reasons) + else: + bad_names = [ch_names[idx[i]] for i in idx_deltas] + if not has_printed: + logger.info( + f" Rejecting {t} epoch based on {name} : " + f"{bad_names}" + ) + has_printed = True + if not full_report: + return False + else: + bad_tuple += tuple(bad_names) + + if not full_report: + return True + else: + if bad_tuple == (): + return True, None + else: + return False, bad_tuple + + +def _read_one_epoch_file(f, tree, preload): + """Read a single FIF file.""" + with f as fid: + # Read the measurement info + info, meas = read_meas_info(fid, tree, clean_bads=True) + + # read in the Annotations if they exist + annotations = _read_annotations_fif(fid, tree) + try: + events, mappings = _read_events_fif(fid, tree) + except ValueError as e: + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if str(e) == "Could not find any events": + events = np.empty((0, 3), dtype=np.int32) + mappings = dict() + else: + raise + # Metadata + metadata = None + metadata_tree = dir_tree_find(tree, FIFF.FIFFB_MNE_METADATA) + if len(metadata_tree) > 0: + for dd in metadata_tree[0]["directory"]: + kind = dd.kind + pos = dd.pos + if kind == FIFF.FIFF_DESCRIPTION: + metadata = read_tag(fid, pos).data + metadata = _prepare_read_metadata(metadata) + break + + # Locate the data of interest + processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) + del meas + if len(processed) == 0: + raise ValueError("Could not find processed data") + + epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS) + if len(epochs_node) == 0: + # before version 0.11 we errantly saved with this tag instead of + # an MNE tag + epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS) + if len(epochs_node) == 0: + epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11 + if len(epochs_node) == 0: + raise ValueError("Could not find epochs data") + + my_epochs = epochs_node[0] + + # Now find the data in the block + data = None + data_tag = None + bmin, bmax = None, None + baseline = None + selection = None + drop_log = None + raw_sfreq = None + reject_params = {} + for k in range(my_epochs["nent"]): + kind = my_epochs["directory"][k].kind + pos = my_epochs["directory"][k].pos + if kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, pos) + first = int(tag.data.item()) + elif kind == FIFF.FIFF_LAST_SAMPLE: + tag = read_tag(fid, pos) + last = int(tag.data.item()) + elif kind == FIFF.FIFF_EPOCH: + # delay reading until later + fid.seek(pos, 0) + data_tag = _read_tag_header(fid, pos) + data_tag.type = data_tag.type ^ (1 << 30) + elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]: + # Constant 304 was used before v0.11 + tag = read_tag(fid, pos) + bmin = float(tag.data.item()) + elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]: + # Constant 305 was used before v0.11 + tag = read_tag(fid, pos) + bmax = float(tag.data.item()) + elif kind == FIFF.FIFF_MNE_EPOCHS_SELECTION: + tag = read_tag(fid, pos) + selection = np.array(tag.data) + elif kind == FIFF.FIFF_MNE_EPOCHS_DROP_LOG: + tag = read_tag(fid, pos) + drop_log = tag.data + drop_log = json.loads(drop_log) + drop_log = tuple(tuple(x) for x in drop_log) + elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT: + tag = read_tag(fid, pos) + reject_params = json.loads(tag.data) + elif kind == FIFF.FIFF_MNE_EPOCHS_RAW_SFREQ: + tag = read_tag(fid, pos) + raw_sfreq = tag.data + + if bmin is not None or bmax is not None: + baseline = (bmin, bmax) + + n_samp = last - first + 1 + logger.info(" Found the data of interest:") + logger.info( + f" t = {1000 * first / info['sfreq']:10.2f} ... " + f"{1000 * last / info['sfreq']:10.2f} ms" + ) + if info["comps"] is not None: + logger.info( + f" {len(info['comps'])} CTF compensation matrices available" + ) + + # Inspect the data + if data_tag is None: + raise ValueError("Epochs data not found") + epoch_shape = (len(info["ch_names"]), n_samp) + size_expected = len(events) * np.prod(epoch_shape) + # on read double-precision is always used + if data_tag.type == FIFF.FIFFT_FLOAT: + datatype = np.float64 + fmt = ">f4" + elif data_tag.type == FIFF.FIFFT_DOUBLE: + datatype = np.float64 + fmt = ">f8" + elif data_tag.type == FIFF.FIFFT_COMPLEX_FLOAT: + datatype = np.complex128 + fmt = ">c8" + elif data_tag.type == FIFF.FIFFT_COMPLEX_DOUBLE: + datatype = np.complex128 + fmt = ">c16" + fmt_itemsize = np.dtype(fmt).itemsize + assert fmt_itemsize in (4, 8, 16) + size_actual = data_tag.size // fmt_itemsize - 16 // fmt_itemsize + + if not size_actual == size_expected: + raise ValueError( + f"Incorrect number of samples ({size_actual} instead of " + f"{size_expected})." + ) + + # Calibration factors + cals = np.array( + [ + [info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0)] + for k in range(info["nchan"]) + ], + np.float64, + ) + + # Read the data + if preload: + data = read_tag(fid, data_tag.pos).data.astype(datatype) + data *= cals + + # Put it all together + tmin = first / info["sfreq"] + tmax = last / info["sfreq"] + event_id = ( + {str(e): e for e in np.unique(events[:, 2])} + if mappings is None + else mappings + ) + # In case epochs didn't have a FIFF.FIFF_MNE_EPOCHS_SELECTION tag + # (version < 0.8): + if selection is None: + selection = np.arange(len(events)) + if drop_log is None: + drop_log = ((),) * len(events) + + return ( + info, + data, + data_tag, + events, + event_id, + metadata, + tmin, + tmax, + baseline, + selection, + drop_log, + epoch_shape, + cals, + reject_params, + fmt, + annotations, + raw_sfreq, + ) + + +@verbose +def read_epochs(fname, proj=True, preload=True, verbose=None) -> "EpochsFIF": + """Read epochs from a fif file. + + Parameters + ---------- + %(fname_epochs)s + %(proj_epochs)s + preload : bool + If True, read all epochs from disk immediately. If ``False``, epochs + will be read on demand. + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + The epochs. + """ + return EpochsFIF(fname, proj, preload, verbose) + + +class _RawContainer: + """Helper for a raw data container.""" + + def __init__(self, fid, data_tag, event_samps, epoch_shape, cals, fmt): + self.fid = fid + self.data_tag = data_tag + self.event_samps = event_samps + self.epoch_shape = epoch_shape + self.cals = cals + self.proj = False + self.fmt = fmt + + def __del__(self): # noqa: D105 + self.fid.close() + + +@fill_doc +class EpochsFIF(BaseEpochs): + """Epochs read from disk. + + Parameters + ---------- + %(fname_epochs)s + %(proj_epochs)s + preload : bool + If True, read all epochs from disk immediately. If False, epochs will + be read on demand. + %(verbose)s + + See Also + -------- + mne.Epochs + mne.epochs.combine_event_ids + mne.Epochs.equalize_event_counts + """ + + @verbose + def __init__(self, fname, proj=True, preload=True, verbose=None): + from .io.base import _get_fname_rep + + if _path_like(fname): + check_fname( + fname=fname, + filetype="epochs", + endings=("-epo.fif", "-epo.fif.gz", "_epo.fif", "_epo.fif.gz"), + ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + elif not preload: + raise ValueError("preload must be used with file-like objects") + + fnames = [fname] + fname_rep = _get_fname_rep(fname) + ep_list = list() + raw = list() + for fname in fnames: + logger.info(f"Reading {fname_rep} ...") + fid, tree, _ = fiff_open(fname, preload=preload) + next_fname = _get_next_fname(fid, fname, tree) + ( + info, + data, + data_tag, + events, + event_id, + metadata, + tmin, + tmax, + baseline, + selection, + drop_log, + epoch_shape, + cals, + reject_params, + fmt, + annotations, + raw_sfreq, + ) = _read_one_epoch_file(fid, tree, preload) + + if (events[:, 0] < 0).any(): + events = events.copy() + warn( + "Incorrect events detected on disk, setting event " + "numbers to consecutive increasing integers" + ) + events[:, 0] = np.arange(1, len(events) + 1) + # here we ignore missing events, since users should already be + # aware of missing events if they have saved data that way + # we also retain original baseline without re-applying baseline + # correction (data is being baseline-corrected when written to + # disk) + epoch = BaseEpochs( + info, + data, + events, + event_id, + tmin, + tmax, + baseline=None, + metadata=metadata, + on_missing="ignore", + selection=selection, + drop_log=drop_log, + proj=False, + verbose=False, + raw_sfreq=raw_sfreq, + ) + epoch.baseline = baseline + epoch._do_baseline = False # might be superfluous but won't hurt + ep_list.append(epoch) + + if not preload: + # store everything we need to index back to the original data + raw.append( + _RawContainer( + fiff_open(fname)[0], + data_tag, + events[:, 0].copy(), + epoch_shape, + cals, + fmt, + ) + ) + + if next_fname is not None: + fnames.append(next_fname) + + unsafe_annot_add = raw_sfreq is None + ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) = _concatenate_epochs( + ep_list, + with_data=preload, + add_offset=False, + on_mismatch="raise", + ) + # we need this uniqueness for non-preloaded data to work properly + if len(np.unique(events[:, 0])) != len(events): + raise RuntimeError("Event time samples were not unique") + + # correct the drop log + assert len(drop_log) % len(fnames) == 0 + step = len(drop_log) // len(fnames) + offsets = np.arange(step, len(drop_log) + 1, step) + drop_log = list(drop_log) + for i1, i2 in zip(offsets[:-1], offsets[1:]): + other_log = drop_log[i1:i2] + for k, (a, b) in enumerate(zip(drop_log, other_log)): + if a == ("IGNORED",) and b != ("IGNORED",): + drop_log[k] = b + drop_log = tuple(drop_log[:step]) + + # call BaseEpochs constructor + # again, ensure we're retaining the baseline period originally loaded + # from disk without trying to re-apply baseline correction + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline=None, + raw=raw, + proj=proj, + preload_at_end=False, + on_missing="ignore", + selection=selection, + drop_log=drop_log, + filename=fname_rep, + metadata=metadata, + verbose=verbose, + raw_sfreq=raw_sfreq, + annotations=annotations, + **reject_params, + ) + self.baseline = baseline + self._do_baseline = False + # use the private property instead of drop_bad so that epochs + # are not all read from disk for preload=False + self._bad_dropped = True + # private property to suggest that people re-save epochs if they add + # annotations + self._unsafe_annot_add = unsafe_annot_add + + @verbose + def _get_epoch_from_raw(self, idx, verbose=None): + """Load one epoch from disk.""" + # Find the right file and offset to use + event_samp = self.events[idx, 0] + for raw in self._raw: + idx = np.where(raw.event_samps == event_samp)[0] + if len(idx) == 1: + fmt = raw.fmt + idx = idx[0] + size = np.prod(raw.epoch_shape) * np.dtype(fmt).itemsize + offset = idx * size + 16 # 16 = Tag header + break + else: + # read the correct subset of the data + raise RuntimeError( + "Correct epoch could not be found, please " + "contact mne-python developers" + ) + # the following is equivalent to this, but faster: + # + # >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float) + # >>> data *= raw.cals[np.newaxis, :, :] + # >>> data = data[idx] + # + # Eventually this could be refactored in io/tag.py if other functions + # could make use of it + raw.fid.seek(raw.data_tag.pos + offset, 0) + if fmt == ">c8": + read_fmt = ">f4" + elif fmt == ">c16": + read_fmt = ">f8" + else: + read_fmt = fmt + data = np.frombuffer(raw.fid.read(size), read_fmt) + if read_fmt != fmt: + data = data.view(fmt) + data = data.astype(np.complex128) + else: + data = data.astype(np.float64) + + data.shape = raw.epoch_shape + data *= raw.cals + return data + + +@fill_doc +def bootstrap(epochs, random_state=None): + """Compute epochs selected by bootstrapping. + + Parameters + ---------- + epochs : Epochs instance + epochs data to be bootstrapped + %(random_state)s + + Returns + ------- + epochs : Epochs instance + The bootstrap samples + """ + if not epochs.preload: + raise RuntimeError( + "Modifying data of epochs is only supported " + "when preloading is used. Use preload=True " + "in the constructor." + ) + + rng = check_random_state(random_state) + epochs_bootstrap = epochs.copy() + n_events = len(epochs_bootstrap.events) + idx = rng_uniform(rng)(0, n_events, n_events) + epochs_bootstrap = epochs_bootstrap[idx] + return epochs_bootstrap + + +def _concatenate_epochs( + epochs_list, *, with_data=True, add_offset=True, on_mismatch="raise" +): + """Auxiliary function for concatenating epochs.""" + if not isinstance(epochs_list, list | tuple): + raise TypeError(f"epochs_list must be a list or tuple, got {type(epochs_list)}") + + # to make warning messages only occur once during concatenation + warned = False + + for ei, epochs in enumerate(epochs_list): + if not isinstance(epochs, BaseEpochs): + raise TypeError( + f"epochs_list[{ei}] must be an instance of Epochs, got {type(epochs)}" + ) + + if ( + getattr(epochs, "annotations", None) is not None + and len(epochs.annotations) > 0 + and not warned + ): + warned = True + warn( + "Concatenation of Annotations within Epochs is not supported yet. All " + "annotations will be dropped." + ) + + # create a copy, so that the Annotations are not modified in place + # from the original object + epochs = epochs.copy() + epochs.set_annotations(None) + out = epochs_list[0] + offsets = [0] + if with_data: + out.drop_bad() + offsets.append(len(out)) + events = [out.events] + metadata = [out.metadata] + baseline, tmin, tmax = out.baseline, out.tmin, out.tmax + raw_sfreq = out._raw_sfreq + info = deepcopy(out.info) + drop_log = out.drop_log + event_id = deepcopy(out.event_id) + selection = out.selection + # offset is the last epoch + tmax + 10 second + shift = np.int64((10 + tmax) * out.info["sfreq"]) + # Allow reading empty epochs (ToDo: Maybe not anymore in the future) + if out._allow_empty: + events_offset = 0 + else: + events_offset = int(np.max(events[0][:, 0])) + shift + events_offset = np.int64(events_offset) + events_overflow = False + warned = False + for ii, epochs in enumerate(epochs_list[1:], 1): + _ensure_infos_match(epochs.info, info, f"epochs[{ii}]", on_mismatch=on_mismatch) + if not np.allclose(epochs.times, epochs_list[0].times): + raise ValueError("Epochs must have same times") + + if epochs.baseline != baseline: + raise ValueError("Baseline must be same for all epochs") + + if epochs._raw_sfreq != raw_sfreq and not warned: + warned = True + warn( + "The original raw sampling rate of the Epochs does not " + "match for all Epochs. Please proceed cautiously." + ) + + # compare event_id + common_keys = list(set(event_id).intersection(set(epochs.event_id))) + for key in common_keys: + if not event_id[key] == epochs.event_id[key]: + msg = ( + "event_id values must be the same for identical keys " + 'for all concatenated epochs. Key "{}" maps to {} in ' + "some epochs and to {} in others." + ) + raise ValueError(msg.format(key, event_id[key], epochs.event_id[key])) + + if with_data: + epochs.drop_bad() + offsets.append(len(epochs)) + evs = epochs.events.copy() + if len(epochs.events) == 0: + warn("One of the Epochs objects to concatenate was empty.") + elif add_offset: + # We need to cast to a native Python int here to detect an + # overflow of a numpy int32 (which is the default on windows) + max_timestamp = int(np.max(evs[:, 0])) + evs[:, 0] += events_offset + events_offset += max_timestamp + shift + if events_offset > INT32_MAX: + warn( + f"Event number greater than {INT32_MAX} created, " + "events[:, 0] will be assigned consecutive increasing " + "integer values" + ) + events_overflow = True + add_offset = False # we no longer need to add offset + events.append(evs) + selection = np.concatenate((selection, epochs.selection)) + drop_log = drop_log + epochs.drop_log + event_id.update(epochs.event_id) + metadata.append(epochs.metadata) + events = np.concatenate(events, axis=0) + # check to see if we exceeded our maximum event offset + if events_overflow: + events[:, 0] = np.arange(1, len(events) + 1) + + # Create metadata object (or make it None) + n_have = sum(this_meta is not None for this_meta in metadata) + if n_have == 0: + metadata = None + elif n_have != len(metadata): + raise ValueError( + f"{n_have} of {len(metadata)} epochs instances have metadata, either " + "all or none must have metadata" + ) + else: + pd = _check_pandas_installed(strict=False) + if pd is not False: + metadata = pd.concat(metadata) + else: # dict of dicts + metadata = sum(metadata, list()) + assert len(offsets) == (len(epochs_list) if with_data else 0) + 1 + data = None + if with_data: + offsets = np.cumsum(offsets) + for start, stop, epochs in zip(offsets[:-1], offsets[1:], epochs_list): + this_data = epochs.get_data(copy=False) + if data is None: + data = np.empty( + (offsets[-1], len(out.ch_names), len(out.times)), + dtype=this_data.dtype, + ) + data[start:stop] = this_data + return ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) + + +@verbose +def concatenate_epochs( + epochs_list, add_offset=True, *, on_mismatch="raise", verbose=None +): + """Concatenate a list of `~mne.Epochs` into one `~mne.Epochs` object. + + .. note:: Unlike `~mne.concatenate_raws`, this function does **not** + modify any of the input data. + + Parameters + ---------- + epochs_list : list + List of `~mne.Epochs` instances to concatenate (in that order). + add_offset : bool + If True, a fixed offset is added to the event times from different + Epochs sets, such that they are easy to distinguish after the + concatenation. + If False, the event times are unaltered during the concatenation. + %(on_mismatch_info)s + %(verbose)s + + .. versionadded:: 0.24 + + Returns + ------- + epochs : instance of EpochsArray + The result of the concatenation. All data will be loaded into memory. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + ( + info, + data, + raw_sfreq, + events, + event_id, + tmin, + tmax, + metadata, + baseline, + selection, + drop_log, + ) = _concatenate_epochs( + epochs_list, + with_data=True, + add_offset=add_offset, + on_mismatch=on_mismatch, + ) + selection = np.where([len(d) == 0 for d in drop_log])[0] + out = EpochsArray( + data=data, + info=info, + events=events, + event_id=event_id, + tmin=tmin, + baseline=baseline, + selection=selection, + drop_log=drop_log, + proj=False, + on_missing="ignore", + metadata=metadata, + raw_sfreq=raw_sfreq, + ) + out.drop_bad() + return out + + +@verbose +def average_movements( + epochs, + head_pos=None, + orig_sfreq=None, + picks=None, + origin="auto", + weight_all=True, + int_order=8, + ext_order=3, + destination=None, + ignore_ref=False, + return_mapping=False, + mag_scale=100.0, + verbose=None, +): + """Average data using Maxwell filtering, transforming using head positions. + + Parameters + ---------- + epochs : instance of Epochs + The epochs to operate on. + %(head_pos_maxwell)s + orig_sfreq : float | None + The original sample frequency of the data (that matches the + event sample numbers in ``epochs.events``). Can be ``None`` + if data have not been decimated or resampled. + %(picks_all_data)s + %(origin_maxwell)s + weight_all : bool + If True, all channels are weighted by the SSS basis weights. + If False, only MEG channels are weighted, other channels + receive uniform weight per epoch. + %(int_order_maxwell)s + %(ext_order_maxwell)s + %(destination_maxwell_dest)s + %(ignore_ref_maxwell)s + return_mapping : bool + If True, return the mapping matrix. + %(mag_scale_maxwell)s + + .. versionadded:: 0.13 + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The averaged epochs. + + See Also + -------- + mne.preprocessing.maxwell_filter + mne.chpi.read_head_pos + + Notes + ----- + The Maxwell filtering version of this algorithm is described in [1]_, + in section V.B "Virtual signals and movement correction", equations + 40-44. For additional validation, see [2]_. + + Regularization has not been added because in testing it appears to + decrease dipole localization accuracy relative to using all components. + Fine calibration and cross-talk cancellation, however, could be added + to this algorithm based on user demand. + + .. versionadded:: 0.11 + + References + ---------- + .. [1] Taulu S. and Kajola M. "Presentation of electromagnetic + multichannel data: The signal space separation method," + Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005. + .. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements + of children in MEG: Quantification, effects on source + estimation, and compensation. NeuroImage 40:541–550, 2008. + """ # noqa: E501 + from .preprocessing.maxwell import ( + _check_destination, + _check_usable, + _col_norm_pinv, + _get_coil_scale, + _get_mf_picks_fix_mags, + _get_n_moments, + _get_sensor_operator, + _prep_mf_coils, + _remove_meg_projs_comps, + _reset_meg_bads, + _trans_sss_basis, + ) + + if head_pos is None: + raise TypeError("head_pos must be provided and cannot be None") + from .chpi import head_pos_to_trans_rot_t + + if not isinstance(epochs, BaseEpochs): + raise TypeError(f"epochs must be an instance of Epochs, not {type(epochs)}") + orig_sfreq = epochs.info["sfreq"] if orig_sfreq is None else orig_sfreq + orig_sfreq = float(orig_sfreq) + if isinstance(head_pos, np.ndarray): + head_pos = head_pos_to_trans_rot_t(head_pos) + trn, rot, t = head_pos + del head_pos + _check_usable(epochs, ignore_ref) + origin = _check_origin(origin, epochs.info, "head") + recon_trans = _check_destination(destination, epochs.info, True) + + logger.info(f"Aligning and averaging up to {len(epochs.events)} epochs") + if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])): + raise RuntimeError("Epochs must have monotonically increasing events") + info_to = epochs.info.copy() + meg_picks, mag_picks, grad_picks, good_mask, _ = _get_mf_picks_fix_mags( + info_to, int_order, ext_order, ignore_ref + ) + coil_scale, mag_scale = _get_coil_scale( + meg_picks, mag_picks, grad_picks, mag_scale, info_to + ) + mult = _get_sensor_operator(epochs, meg_picks) + n_channels, n_times = len(epochs.ch_names), len(epochs.times) + other_picks = np.setdiff1d(np.arange(n_channels), meg_picks) + data = np.zeros((n_channels, n_times)) + count = 0 + # keep only MEG w/bad channels marked in "info_from" + info_from = pick_info(info_to, meg_picks[good_mask], copy=True) + all_coils_recon = _prep_mf_coils(info_to, ignore_ref=ignore_ref) + all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref) + # remove MEG bads in "to" info + _reset_meg_bads(info_to) + # set up variables + w_sum = 0.0 + n_in, n_out = _get_n_moments([int_order, ext_order]) + S_decomp = 0.0 # this will end up being a weighted average + last_trans = None + decomp_coil_scale = coil_scale[good_mask] + exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True, origin=origin) + n_in = _get_n_moments(int_order) + for ei, epoch in enumerate(epochs): + event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq + use_idx = np.where(t <= event_time)[0] + if len(use_idx) == 0: + trans = info_to["dev_head_t"]["trans"] + else: + use_idx = use_idx[-1] + trans = np.vstack( + [np.hstack([rot[use_idx], trn[[use_idx]].T]), [[0.0, 0.0, 0.0, 1.0]]] + ) + loc_str = ", ".join(f"{tr:0.1f}" for tr in (trans[:3, 3] * 1000)) + if last_trans is None or not np.allclose(last_trans, trans): + logger.info( + f" Processing epoch {ei + 1} (device location: {loc_str} mm)" + ) + reuse = False + last_trans = trans + else: + logger.info(f" Processing epoch {ei + 1} (device location: same)") + reuse = True + epoch = epoch.copy() # because we operate inplace + if not reuse: + S = _trans_sss_basis(exp, all_coils, trans, coil_scale=decomp_coil_scale) + # Get the weight from the un-regularized version (eq. 44) + weight = np.linalg.norm(S[:, :n_in]) + # XXX Eventually we could do cross-talk and fine-cal here + S *= weight + S_decomp += S # eq. 41 + epoch[slice(None) if weight_all else meg_picks] *= weight + data += epoch # eq. 42 + w_sum += weight + count += 1 + del info_from + mapping = None + if count == 0: + data.fill(np.nan) + else: + data[meg_picks] /= w_sum + data[other_picks] /= w_sum if weight_all else count + # Finalize weighted average decomp matrix + S_decomp /= w_sum + # Get recon matrix + # (We would need to include external here for regularization to work) + exp["ext_order"] = 0 + S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans) + if mult is not None: + S_decomp = mult @ S_decomp + S_recon = mult @ S_recon + exp["ext_order"] = ext_order + # We could determine regularization on basis of destination basis + # matrix, restricted to good channels, as regularizing individual + # matrices within the loop above does not seem to work. But in + # testing this seemed to decrease localization quality in most cases, + # so we do not provide the option here. + S_recon /= coil_scale + # Invert + pS_ave = _col_norm_pinv(S_decomp)[0][:n_in] + pS_ave *= decomp_coil_scale.T + # Get mapping matrix + mapping = np.dot(S_recon, pS_ave) + # Apply mapping + data[meg_picks] = np.dot(mapping, data[meg_picks[good_mask]]) + info_to["dev_head_t"] = recon_trans # set the reconstruction transform + evoked = epochs._evoked_from_epoch_data( + data, info_to, picks, n_events=count, kind="average", comment=epochs._name + ) + _remove_meg_projs_comps(evoked, ignore_ref) + logger.info(f"Created Evoked dataset from {count} epochs") + return (evoked, mapping) if return_mapping else evoked + + +@verbose +def make_fixed_length_epochs( + raw, + duration=1.0, + preload=False, + reject_by_annotation=True, + proj=True, + overlap=0.0, + id=1, # noqa: A002 + verbose=None, +): + """Divide continuous raw data into equal-sized consecutive epochs. + + Parameters + ---------- + raw : instance of Raw + Raw data to divide into segments. + duration : float + Duration of each epoch in seconds. Defaults to 1. + %(preload)s + %(reject_by_annotation_epochs)s + + .. versionadded:: 0.21.0 + %(proj_epochs)s + + .. versionadded:: 0.22.0 + overlap : float + The overlap between epochs, in seconds. Must be + ``0 <= overlap < duration``. Default is 0, i.e., no overlap. + + .. versionadded:: 0.23.0 + id : int + The id to use (default 1). + + .. versionadded:: 0.24.0 + %(verbose)s + + Returns + ------- + epochs : instance of Epochs + Segmented data. + + Notes + ----- + .. versionadded:: 0.20 + """ + events = make_fixed_length_events(raw, id=id, duration=duration, overlap=overlap) + delta = 1.0 / raw.info["sfreq"] + return Epochs( + raw, + events, + event_id=[id], + tmin=0, + tmax=duration - delta, + baseline=None, + preload=preload, + reject_by_annotation=reject_by_annotation, + proj=proj, + verbose=verbose, + ) diff --git a/mne/event.py b/mne/event.py new file mode 100644 index 0000000..723615e --- /dev/null +++ b/mne/event.py @@ -0,0 +1,1691 @@ +"""IO with fif files containing events.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections.abc import Sequence +from pathlib import Path + +import numpy as np + +from ._fiff.constants import FIFF +from ._fiff.open import fiff_open +from ._fiff.pick import pick_channels +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import end_block, start_and_end_file, start_block, write_int +from .utils import ( + _check_fname, + _check_integer_or_list, + _check_on_missing, + _check_option, + _get_stim_channel, + _on_missing, + _pl, + _validate_type, + check_fname, + fill_doc, + logger, + verbose, + warn, +) + + +@fill_doc +def pick_events(events, include=None, exclude=None, step=False): + """Select some :term:`events`. + + Parameters + ---------- + %(events)s + include : int | list | None + A event id to include or a list of them. + If None all events are included. + exclude : int | list | None + A event id to exclude or a list of them. + If None no event is excluded. If include is not None + the exclude parameter is ignored. + step : bool + If True (default is False), events have a step format according + to the argument output='step' in the function find_events(). + In this case, the two last columns are considered in inclusion/ + exclusion criteria. + + Returns + ------- + events : array, shape (n_events, 3) + The list of events. + """ + if include is not None: + include = _check_integer_or_list(include, "include") + mask = np.zeros(len(events), dtype=bool) + for e in include: + mask = np.logical_or(mask, events[:, 2] == e) + if step: + mask = np.logical_or(mask, events[:, 1] == e) + events = events[mask] + elif exclude is not None: + exclude = _check_integer_or_list(exclude, "exclude") + mask = np.ones(len(events), dtype=bool) + for e in exclude: + mask = np.logical_and(mask, events[:, 2] != e) + if step: + mask = np.logical_and(mask, events[:, 1] != e) + events = events[mask] + else: + events = np.copy(events) + + if len(events) == 0: + raise RuntimeError("No events found") + + return events + + +def define_target_events( + events, reference_id, target_id, sfreq, tmin, tmax, new_id=None, fill_na=None +): + """Define new events by co-occurrence of existing events. + + This function can be used to evaluate events depending on the + temporal lag to another event. For example, this can be used to + analyze evoked responses which were followed by a button press within + a defined time window. + + Parameters + ---------- + events : ndarray + Array as returned by mne.find_events. + reference_id : int + The reference event. The event defining the epoch of interest. + target_id : int + The target event. The event co-occurring in within a certain time + window around the reference event. + sfreq : float + The sampling frequency of the data. + tmin : float + The lower limit in seconds from the target event. + tmax : float + The upper limit border in seconds from the target event. + new_id : int + New ID for the new event. + fill_na : int | None + Fill event to be inserted if target is not available within the time + window specified. If None, the 'null' events will be dropped. + + Returns + ------- + new_events : ndarray + The new defined events. + lag : ndarray + Time lag between reference and target in milliseconds. + """ + if new_id is None: + new_id = reference_id + + tsample = 1e3 / sfreq + imin = int(tmin * sfreq) + imax = int(tmax * sfreq) + + new_events = [] + lag = [] + for event in events.copy().astype(int): + if event[2] == reference_id: + lower = event[0] + imin + upper = event[0] + imax + res = events[ + (events[:, 0] > lower) + & (events[:, 0] < upper) + & (events[:, 2] == target_id) + ] + if res.any(): + lag += [event[0] - res[0][0]] + event[2] = new_id + new_events += [event] + elif fill_na is not None: + event[2] = fill_na + new_events += [event] + lag.append(np.nan) + + new_events = np.array(new_events) + + with np.errstate(invalid="ignore"): # casting nans + lag = np.abs(lag, dtype="f8") + if lag.any(): + lag *= tsample + else: + lag = np.array([]) + + return new_events if new_events.any() else np.array([]), lag + + +def _read_events_fif(fid, tree): + """Aux function.""" + # Find the desired block + events = dir_tree_find(tree, FIFF.FIFFB_MNE_EVENTS) + + if len(events) == 0: + fid.close() + raise ValueError("Could not find event data") + + events = events[0] + event_list = None + event_id = None + for d in events["directory"]: + kind = d.kind + pos = d.pos + if kind == FIFF.FIFF_MNE_EVENT_LIST: + tag = read_tag(fid, pos) + event_list = tag.data + break + if event_list is None: + raise ValueError("Could not find any events") + else: + event_list.shape = (-1, 3) + for d in events["directory"]: + kind = d.kind + pos = d.pos + if kind == FIFF.FIFF_DESCRIPTION: + tag = read_tag(fid, pos) + event_id = tag.data + m_ = [[s[::-1] for s in m[::-1].split(":", 1)] for m in event_id.split(";")] + event_id = {k: int(v) for v, k in m_} + break + elif kind == FIFF.FIFF_MNE_EVENT_COMMENTS: + tag = read_tag(fid, pos) + event_id = tag.data + event_id = event_id.tobytes().decode("latin-1").split("\x00")[:-1] + assert len(event_id) == len(event_list) + event_id = {k: v[2] for k, v in zip(event_id, event_list)} + break + return event_list, event_id + + +@verbose +def read_events( + filename, + include=None, + exclude=None, + mask=None, + mask_type="and", + return_event_id=False, + verbose=None, +): + """Read :term:`events` from fif or text file. + + See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` + for more information about events. + + Parameters + ---------- + filename : path-like + Name of the input file. + If the extension is ``.fif``, events are read assuming + the file is in FIF format, otherwise (e.g., ``.eve``, + ``.lst``, ``.txt``) events are read as coming from text. + Note that new format event files do not contain + the ``"time"`` column (used to be the second column). + include : int | list | None + A event id to include or a list of them. + If None all events are included. + exclude : int | list | None + A event id to exclude or a list of them. + If None no event is excluded. If include is not None + the exclude parameter is ignored. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + mask_type : ``'and'`` | ``'not_and'`` + The type of operation between the mask and the trigger. + Choose 'and' (default) for MNE-C masking behavior. + + .. versionadded:: 0.13 + return_event_id : bool + If True, ``event_id`` will be returned. This is only possible for + ``-annot.fif`` files produced with MNE-C ``mne_browse_raw``. + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(events)s + event_id : dict + Dictionary of ``{str: int}`` mappings of event IDs. + + See Also + -------- + find_events, write_events + + Notes + ----- + This function will discard the offset line (i.e., first line with zero + event number) if it is present in a text file. + + For more information on ``mask`` and ``mask_type``, see + :func:`mne.find_events`. + """ + check_fname( + filename, + "events", + ( + ".eve", + "-eve.fif", + "-eve.fif.gz", + "-eve.lst", + "-eve.txt", + "_eve.fif", + "_eve.fif.gz", + "_eve.lst", + "_eve.txt", + "-annot.fif", # MNE-C annot + ), + ) + filename = Path(filename) + if filename.suffix in (".fif", ".gz"): + fid, tree, _ = fiff_open(filename) + with fid as f: + event_list, event_id = _read_events_fif(f, tree) + # hack fix for windows to avoid bincount problems + event_list = event_list.astype(int) + else: + # Have to read this in as float64 then convert because old style + # eve/lst files had a second float column that will raise errors + lines = np.loadtxt(filename, dtype=np.float64).astype(int) + if len(lines) == 0: + raise ValueError("No text lines found") + + if lines.ndim == 1: # Special case for only one event + lines = lines[np.newaxis, :] + + if len(lines[0]) == 4: # Old format eve/lst + goods = [0, 2, 3] # Omit "time" variable + elif len(lines[0]) == 3: + goods = [0, 1, 2] + else: + raise ValueError("Unknown number of columns in event text file") + + event_list = lines[:, goods] + if mask is not None and event_list.shape[0] > 0 and event_list[0, 2] == 0: + event_list = event_list[1:] + warn("first row of event file discarded (zero-valued)") + event_id = None + + event_list = pick_events(event_list, include, exclude) + unmasked_len = event_list.shape[0] + if mask is not None: + event_list = _mask_trigs(event_list, mask, mask_type) + masked_len = event_list.shape[0] + if masked_len < unmasked_len: + warn(f"{unmasked_len - masked_len} of {unmasked_len} events masked") + out = event_list + if return_event_id: + if event_id is None: + raise RuntimeError("No event_id found in the file") + out = (out, event_id) + return out + + +@verbose +def write_events(filename, events, *, overwrite=False, verbose=None): + """Write :term:`events` to file. + + Parameters + ---------- + filename : path-like + Name of the output file. + If the extension is ``.fif``, events are written in + binary FIF format, otherwise (e.g., ``.eve``, + ``.lst``, ``.txt``) events are written as plain text. + Note that new format event files do not contain + the ``"time"`` column (used to be the second column). + %(events)s + %(overwrite)s + %(verbose)s + + See Also + -------- + read_events + """ + filename = _check_fname(filename, overwrite=overwrite) + check_fname( + filename, + "events", + ( + ".eve", + "-eve.fif", + "-eve.fif.gz", + "-eve.lst", + "-eve.txt", + "_eve.fif", + "_eve.fif.gz", + "_eve.lst", + "_eve.txt", + ), + ) + if filename.suffix in (".fif", ".gz"): + # Start writing... + with start_and_end_file(filename) as fid: + start_block(fid, FIFF.FIFFB_MNE_EVENTS) + write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, events.T) + end_block(fid, FIFF.FIFFB_MNE_EVENTS) + else: + with open(filename, "w") as f: + for e in events: + f.write(f"{e[0]:6d} {e[1]:6d} {e[2]:3d}\n") + + +def _find_stim_steps(data, first_samp, pad_start=None, pad_stop=None, merge=0): + changed = np.diff(data, axis=1) != 0 + idx = np.where(np.all(changed, axis=0))[0] + if len(idx) == 0: + return np.empty((0, 3), dtype="int32") + + pre_step = data[0, idx] + idx += 1 + post_step = data[0, idx] + idx += first_samp + steps = np.c_[idx, pre_step, post_step] + + if pad_start is not None: + v = steps[0, 1] + if v != pad_start: + steps = np.insert(steps, 0, [0, pad_start, v], axis=0) + + if pad_stop is not None: + v = steps[-1, 2] + if v != pad_stop: + last_idx = len(data[0]) + first_samp + steps = np.append(steps, [[last_idx, v, pad_stop]], axis=0) + + if merge != 0: + diff = np.diff(steps[:, 0]) + idx = diff <= abs(merge) + if np.any(idx): + where = np.where(idx)[0] + keep = np.logical_not(idx) + if merge > 0: + # drop the earlier event + steps[where + 1, 1] = steps[where, 1] + keep = np.append(keep, True) + else: + # drop the later event + steps[where, 2] = steps[where + 1, 2] + keep = np.insert(keep, 0, True) + + is_step = steps[:, 1] != steps[:, 2] + keep = np.logical_and(keep, is_step) + steps = steps[keep] + + return steps + + +def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, stim_channel=None): + """Find all steps in data from a stim channel. + + Parameters + ---------- + raw : Raw object + The raw data. + pad_start : None | int + Values to assume outside of the stim channel (e.g., if pad_start=0 and + the stim channel starts with value 5, an event of [0, 0, 5] will be + inserted at the beginning). With None, no steps will be inserted. + pad_stop : None | int + Values to assume outside of the stim channel, see ``pad_start``. + merge : int + Merge steps occurring in neighboring samples. The integer value + indicates over how many samples events should be merged, and the sign + indicates in which direction they should be merged (negative means + towards the earlier event, positive towards the later event). + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by the trigger. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will default to + 'STI101' or 'STI 014', whichever is present. + + Returns + ------- + steps : array, shape = (n_samples, 3) + For each step in the stim channel the values [sample, v_from, v_to]. + The first column contains the event time in samples (the first sample + with the new value). The second column contains the stim channel value + before the step, and the third column contains value after the step. + + See Also + -------- + find_events : More sophisticated options for finding events in a Raw file. + """ + # pull stim channel from config if necessary + stim_channel = _get_stim_channel(stim_channel, raw.info) + + picks = pick_channels(raw.info["ch_names"], include=stim_channel, ordered=False) + if len(picks) == 0: + raise ValueError("No stim channel found to extract event triggers.") + data, _ = raw[picks, :] + if np.any(data < 0): + warn("Trigger channel contains negative values, using absolute value.") + data = np.abs(data) # make sure trig channel is positive + data = data.astype(np.int64) + + return _find_stim_steps( + data, raw.first_samp, pad_start=pad_start, pad_stop=pad_stop, merge=merge + ) + + +@verbose +def _find_events( + data, + first_samp, + *, + verbose=None, + output="onset", + consecutive="increasing", + min_samples=0, + mask=None, + uint_cast=False, + mask_type="and", + initial_event=False, + ch_name=None, +): + """Help find events.""" + assert data.shape[0] == 1 # data should be only a row vector + + if min_samples > 0: + merge = int(min_samples // 1) + if merge == min_samples: + merge -= 1 + else: + merge = 0 + + data = data.astype(np.int64) + if uint_cast: + data = data.astype(np.uint16).astype(np.int64) + if data.min() < 0: + warn( + "Trigger channel contains negative values, using absolute " + "value. If data were acquired on a Neuromag system with " + "STI016 active, consider using uint_cast=True to work around " + "an acquisition bug" + ) + data = np.abs(data) # make sure trig channel is positive + + events = _find_stim_steps(data, first_samp, pad_stop=0, merge=merge) + initial_value = data[0, 0] + if initial_value != 0: + if initial_event: + events = np.insert(events, 0, [first_samp, 0, initial_value], axis=0) + else: + logger.info( + f"Trigger channel {ch_name} has a non-zero initial value of " + f"{initial_value} (consider using initial_event=True to detect this " + "event)" + ) + + events = _mask_trigs(events, mask, mask_type) + + # Determine event onsets and offsets + if consecutive == "increasing": + onsets = events[:, 2] > events[:, 1] + offsets = np.logical_and( + np.logical_or(onsets, (events[:, 2] == 0)), (events[:, 1] > 0) + ) + elif consecutive: + onsets = events[:, 2] > 0 + offsets = events[:, 1] > 0 + else: + onsets = events[:, 1] == 0 + offsets = events[:, 2] == 0 + + onset_idx = np.where(onsets)[0] + offset_idx = np.where(offsets)[0] + + if len(onset_idx) == 0 or len(offset_idx) == 0: + return np.empty((0, 3), dtype="int32") + + # delete orphaned onsets/offsets + if onset_idx[0] > offset_idx[0]: + logger.info("Removing orphaned offset at the beginning of the file.") + offset_idx = np.delete(offset_idx, 0) + + if onset_idx[-1] > offset_idx[-1]: + logger.info("Removing orphaned onset at the end of the file.") + onset_idx = np.delete(onset_idx, -1) + + _check_option("output", output, ("onset", "step", "offset")) + if output == "onset": + events = events[onset_idx] + elif output == "step": + idx = np.union1d(onset_idx, offset_idx) + events = events[idx] + else: + assert output == "offset" + event_id = events[onset_idx, 2] + events = events[offset_idx] + events[:, 1] = events[:, 2] + events[:, 2] = event_id + events[:, 0] -= 1 + + logger.info(f"{len(events)} event{_pl(events)} found on stim channel {ch_name}") + logger.info(f"Event IDs: {np.unique(events[:, 2])}") + + return events + + +def _find_unique_events(events): + """Uniquify events (ie remove duplicated rows.""" + e = np.ascontiguousarray(events).view( + np.dtype((np.void, events.dtype.itemsize * events.shape[1])) + ) + _, idx = np.unique(e, return_index=True) + n_dupes = len(events) - len(idx) + if n_dupes > 0: + warn( + "Some events are duplicated in your different stim channels. " + f"{n_dupes} events were ignored during deduplication." + ) + return events[idx] + + +@verbose +def find_events( + raw, + stim_channel=None, + output="onset", + consecutive="increasing", + min_duration=0, + shortest_event=2, + mask=None, + uint_cast=False, + mask_type="and", + initial_event=False, + verbose=None, +): + """Find :term:`events` from raw file. + + See :ref:`tut-events-vs-annotations` and :ref:`tut-event-arrays` + for more information about events. + + Parameters + ---------- + raw : Raw object + The raw data. + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by triggers. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will fall back to + 'STI 014' if present, then fall back to the first channel of type + 'stim', if present. If multiple channels are provided + then the returned events are the union of all the events + extracted from individual stim channels. + output : 'onset' | 'offset' | 'step' + Whether to report when events start, when events end, or both. + consecutive : bool | 'increasing' + If True, consider instances where the value of the events + channel changes without first returning to zero as multiple + events. If False, report only instances where the value of the + events channel changes from/to zero. If 'increasing', report + adjacent events only when the second event code is greater than + the first. + min_duration : float + The minimum duration of a change in the events channel required + to consider it as an event (in seconds). + shortest_event : int + Minimum number of samples an event must last (default is 2). If the + duration is less than this an exception will be raised. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + uint_cast : bool + If True (default False), do a cast to ``uint16`` on the channel + data. This can be used to fix a bug with STI101 and STI014 in + Neuromag acquisition setups that use channel STI016 (channel 16 + turns data into e.g. -32768), similar to ``mne_fix_stim14 --32`` + in MNE-C. + + .. versionadded:: 0.12 + mask_type : 'and' | 'not_and' + The type of operation between the mask and the trigger. + Choose 'and' (default) for MNE-C masking behavior. + + .. versionadded:: 0.13 + initial_event : bool + If True (default False), an event is created if the stim channel has a + value different from 0 as its first sample. This is useful if an event + at t=0s is present. + + .. versionadded:: 0.16 + %(verbose)s + + Returns + ------- + %(events)s + + See Also + -------- + find_stim_steps : Find all the steps in the stim channel. + read_events : Read events from disk. + write_events : Write events to disk. + + Notes + ----- + .. warning:: If you are working with downsampled data, events computed + before decimation are no longer valid. Please recompute + your events after decimation, but note this reduces the + precision of event timing. + + Examples + -------- + Consider data with a stim channel that looks like:: + + [0, 32, 32, 33, 32, 0] + + By default, find_events returns all samples at which the value of the + stim channel increases:: + + >>> print(find_events(raw)) # doctest: +SKIP + [[ 1 0 32] + [ 3 32 33]] + + If consecutive is False, find_events only returns the samples at which + the stim channel changes from zero to a non-zero value:: + + >>> print(find_events(raw, consecutive=False)) # doctest: +SKIP + [[ 1 0 32]] + + If consecutive is True, find_events returns samples at which the + event changes, regardless of whether it first returns to zero:: + + >>> print(find_events(raw, consecutive=True)) # doctest: +SKIP + [[ 1 0 32] + [ 3 32 33] + [ 4 33 32]] + + If output is 'offset', find_events returns the last sample of each event + instead of the first one:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... output='offset')) + [[ 2 33 32] + [ 3 32 33] + [ 4 0 32]] + + If output is 'step', find_events returns the samples at which an event + starts or ends:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... output='step')) + [[ 1 0 32] + [ 3 32 33] + [ 4 33 32] + [ 5 32 0]] + + To ignore spurious events, it is also possible to specify a minimum + event duration. Assuming our events channel has a sample rate of + 1000 Hz:: + + >>> print(find_events(raw, consecutive=True, # doctest: +SKIP + ... min_duration=0.002)) + [[ 1 0 32]] + + For the digital mask, if mask_type is set to 'and' it will take the + binary representation of the digital mask, e.g. 5 -> '00000101', and will + allow the values to pass where mask is one, e.g.:: + + 7 '0000111' <- trigger value + 37 '0100101' <- mask + ---------------- + 5 '0000101' + + For the digital mask, if mask_type is set to 'not_and' it will take the + binary representation of the digital mask, e.g. 5 -> '00000101', and will + block the values where mask is one, e.g.:: + + 7 '0000111' <- trigger value + 37 '0100101' <- mask + ---------------- + 2 '0000010' + """ + min_samples = min_duration * raw.info["sfreq"] + + # pull stim channel from config if necessary + try: + stim_channel = _get_stim_channel(stim_channel, raw.info) + except ValueError: + if len(raw.annotations) > 0: + raise ValueError( + "No stim channels found, but the raw object has " + "annotations. Consider using " + "mne.events_from_annotations to convert these to " + "events." + ) + else: + raise + + picks = pick_channels(raw.info["ch_names"], include=stim_channel) + if len(picks) == 0: + raise ValueError("No stim channel found to extract event triggers.") + data, _ = raw[picks, :] + + events_list = [] + for d, ch_name in zip(data, stim_channel): + events = _find_events( + d[np.newaxis, :], + raw.first_samp, + verbose=verbose, + output=output, + consecutive=consecutive, + min_samples=min_samples, + mask=mask, + uint_cast=uint_cast, + mask_type=mask_type, + initial_event=initial_event, + ch_name=ch_name, + ) + # add safety check for spurious events (for ex. from neuromag syst.) by + # checking the number of low sample events + n_short_events = np.sum(np.diff(events[:, 0]) < shortest_event) + if n_short_events > 0: + raise ValueError( + f"You have {n_short_events} events shorter than the shortest_event. " + "These are very unusual and you may want to set min_duration to a " + "larger value e.g. x / raw.info['sfreq']. Where x = 1 sample shorter " + "than the shortest event length." + ) + + events_list.append(events) + + events = np.concatenate(events_list, axis=0) + events = _find_unique_events(events) + events = events[np.argsort(events[:, 0])] + return events + + +def _mask_trigs(events, mask, mask_type): + """Mask digital trigger values.""" + _check_option("mask_type", mask_type, ["not_and", "and"]) + if mask is not None: + _validate_type(mask, "int", "mask", "int or None") + n_events = len(events) + if n_events == 0: + return events.copy() + + if mask is not None: + if mask_type == "not_and": + mask = np.bitwise_not(mask) + elif mask_type != "and": + raise ValueError( + "'mask_type' should be either 'and'" + f" or 'not_and', instead of '{mask_type}'" + ) + events[:, 1:] = np.bitwise_and(events[:, 1:], mask) + events = events[events[:, 1] != events[:, 2]] + + return events + + +def merge_events(events, ids, new_id, replace_events=True): + """Merge a set of :term:`events`. + + Parameters + ---------- + events : array, shape (n_events_in, 3) + Events. + ids : array of int + The ids of events to merge. + new_id : int + The new id. + replace_events : bool + If True (default), old event ids are replaced. Otherwise, + new events will be added to the old event list. + + Returns + ------- + new_events : array, shape (n_events_out, 3) + The new events. + + Notes + ----- + Rather than merging events you can use hierarchical event_id + in Epochs. For example, here:: + + >>> event_id = {'auditory/left': 1, 'auditory/right': 2} + + And the condition 'auditory' would correspond to either 1 or 2. + + Examples + -------- + Here is quick example of the behavior:: + + >>> events = [[134, 0, 1], [341, 0, 2], [502, 0, 3]] + >>> merge_events(events, [1, 2], 12, replace_events=True) + array([[134, 0, 12], + [341, 0, 12], + [502, 0, 3]]) + >>> merge_events(events, [1, 2], 12, replace_events=False) + array([[134, 0, 1], + [134, 0, 12], + [341, 0, 2], + [341, 0, 12], + [502, 0, 3]]) + """ + events = np.asarray(events) + events_out = events.copy() + idx_touched = [] # to keep track of the original events we can keep + for col in [1, 2]: + for i in ids: + mask = events[:, col] == i + events_out[mask, col] = new_id + idx_touched.append(np.where(mask)[0]) + if not replace_events: + idx_touched = np.unique(np.concatenate(idx_touched)) + events_out = np.concatenate((events_out, events[idx_touched]), axis=0) + # Now sort in lexical order + events_out = events_out[np.lexsort(events_out.T[::-1])] + return events_out + + +@fill_doc +def shift_time_events(events, ids, tshift, sfreq): + """Shift a set of :term:`events`. + + Parameters + ---------- + %(events)s + ids : ndarray of int | None + The ids of events to shift. + tshift : float + Time-shift event. Use positive value tshift for forward shifting + the event and negative value for backward shift. + sfreq : float + The sampling frequency of the data. + + Returns + ------- + new_events : array of int, shape (n_new_events, 3) + The new events. + """ + events = events.copy() + if ids is None: + mask = slice(None) + else: + mask = np.isin(events[:, 2], ids) + events[mask, 0] += int(tshift * sfreq) + + return events + + +@fill_doc +def make_fixed_length_events( + raw, + id=1, # noqa: A002 + start=0, + stop=None, + duration=1.0, + first_samp=True, + overlap=0.0, +): + """Make a set of :term:`events` separated by a fixed duration. + + Parameters + ---------- + raw : instance of Raw + A raw object to use the data from. + id : int + The id to use (default 1). + start : float + Time of first event (in seconds). + stop : float | None + Maximum time of last event (in seconds). If None, events extend to the + end of the recording. + duration : float + The duration to separate events by (in seconds). + first_samp : bool + If True (default), times will have :term:`first_samp` added to them, as + in :func:`mne.find_events`. This behavior is not desirable if the + returned events will be combined with event times that already + have :term:`first_samp` added to them, e.g. event times that come + from :func:`mne.find_events`. + overlap : float + The overlap between events (in seconds). + Must be ``0 <= overlap < duration``. + + .. versionadded:: 0.18 + + Returns + ------- + %(events)s + """ + from .io import BaseRaw + + _validate_type(raw, BaseRaw, "raw") + _validate_type(id, "int", "id") + _validate_type(duration, "numeric", "duration") + _validate_type(overlap, "numeric", "overlap") + duration, overlap = float(duration), float(overlap) + if not 0 <= overlap < duration: + raise ValueError( + f"overlap must be >=0 but < duration ({duration}), got {overlap}" + ) + + start = raw.time_as_index(start, use_rounding=True)[0] + if stop is not None: + stop = raw.time_as_index(stop, use_rounding=True)[0] + else: + stop = raw.last_samp + 1 + if first_samp: + start = start + raw.first_samp + stop = min([stop + raw.first_samp, raw.last_samp + 1]) + else: + stop = min([stop, len(raw.times)]) + # Make sure we don't go out the end of the file: + stop -= int(np.round(raw.info["sfreq"] * duration)) + # This should be inclusive due to how we generally use start and stop... + ts = np.arange(start, stop + 1, raw.info["sfreq"] * (duration - overlap)).astype( + int + ) + n_events = len(ts) + if n_events == 0: + raise ValueError( + "No events produced, check the values of start, stop, and duration" + ) + events = np.c_[ts, np.zeros(n_events, dtype=int), id * np.ones(n_events, dtype=int)] + return events + + +def concatenate_events(events, first_samps, last_samps): + """Concatenate event lists to be compatible with concatenate_raws. + + This is useful, for example, if you processed and/or changed + events in raw files separately before combining them using + :func:`mne.concatenate_raws`. + + Parameters + ---------- + events : list of array + List of :term:`events` arrays, typically each extracted from a + corresponding raw file that is being concatenated. + first_samps : list or array of int + First sample numbers of the raw files concatenated. + last_samps : list or array of int + Last sample numbers of the raw files concatenated. + + Returns + ------- + events : array + The concatenated events. + + See Also + -------- + mne.concatenate_raws + """ + _validate_type(events, list, "events") + if not (len(events) == len(last_samps) and len(events) == len(first_samps)): + raise ValueError( + "events, first_samps, and last_samps must all have the same lengths" + ) + first_samps = np.array(first_samps) + last_samps = np.array(last_samps) + n_samps = np.cumsum(last_samps - first_samps + 1) + events_out = events[0] + for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]): + # remove any skip since it doesn't exist in concatenated files + e2 = e.copy() + e2[:, 0] -= f + # add offset due to previous files, plus original file offset + e2[:, 0] += n + first_samps[0] + events_out = np.concatenate((events_out, e2), axis=0) + + return events_out + + +@fill_doc +class AcqParserFIF: + """Parser for Elekta data acquisition settings. + + This class parses parameters (e.g. events and averaging categories) that + are defined in the Elekta TRIUX/VectorView data acquisition software (DACQ) + and stored in ``info['acq_pars']``. It can be used to reaverage raw data + according to DACQ settings and modify original averaging settings if + necessary. + + Parameters + ---------- + %(info_not_none)s This is where the DACQ parameters will be taken from. + + Attributes + ---------- + categories : list + List of averaging categories marked active in DACQ. + events : list + List of events that are in use (referenced by some averaging category). + reject : dict + Rejection criteria from DACQ that can be used with mne.Epochs. + Note that mne does not support all DACQ rejection criteria + (e.g. spike, slope). + flat : dict + Flatness rejection criteria from DACQ that can be used with mne.Epochs. + acq_dict : dict + All DACQ parameters. + + See Also + -------- + mne.io.Raw.acqparser : Access the parser through a Raw attribute. + + Notes + ----- + Any averaging category (also non-active ones) can be accessed by indexing + as ``acqparserfif['category_name']``. + """ + + # DACQ variables always start with one of these + _acq_var_magic = ["ERF", "DEF", "ACQ", "TCP"] + + # averager related DACQ variable names (without preceding 'ERF') + # old versions (DACQ < 3.4) + _dacq_vars_compat = ( + "megMax", + "megMin", + "megNoise", + "megSlope", + "megSpike", + "eegMax", + "eegMin", + "eegNoise", + "eegSlope", + "eegSpike", + "eogMax", + "ecgMax", + "ncateg", + "nevent", + "stimSource", + "triggerMap", + "update", + "artefIgnore", + "averUpdate", + ) + + _event_vars_compat = ("Comment", "Delay") + + _cat_vars = ( + "Comment", + "Display", + "Start", + "State", + "End", + "Event", + "Nave", + "ReqEvent", + "ReqWhen", + "ReqWithin", + "SubAve", + ) + + # new versions only (DACQ >= 3.4) + _dacq_vars = _dacq_vars_compat + ( + "magMax", + "magMin", + "magNoise", + "magSlope", + "magSpike", + "version", + ) + + _event_vars = _event_vars_compat + ( + "Name", + "Channel", + "NewBits", + "OldBits", + "NewMask", + "OldMask", + ) + + def __init__(self, info): + acq_pars = info["acq_pars"] + if not acq_pars: + raise ValueError("No acquisition parameters") + self.acq_dict = dict(self._acqpars_gen(acq_pars)) + if "ERFversion" in self.acq_dict: + self.compat = False # DACQ ver >= 3.4 + elif "ERFncateg" in self.acq_dict: # probably DACQ < 3.4 + self.compat = True + else: + raise ValueError("Cannot parse acquisition parameters") + dacq_vars = self._dacq_vars_compat if self.compat else self._dacq_vars + # set instance variables + for var in dacq_vars: + val = self.acq_dict["ERF" + var] + if var[:3] in ["mag", "meg", "eeg", "eog", "ecg"]: + val = float(val) + elif var in ["ncateg", "nevent"]: + val = int(val) + setattr(self, var.lower(), val) + self.stimsource = "Internal" if self.stimsource == "1" else "External" + # collect all events and categories + self._events = self._events_from_acq_pars() + self._categories = self._categories_from_acq_pars() + # mark events that are used by a category + for cat in self._categories.values(): + if cat["event"]: + self._events[cat["event"]]["in_use"] = True + if cat["reqevent"]: + self._events[cat["reqevent"]]["in_use"] = True + # make mne rejection dicts based on the averager parameters + self.reject = { + "grad": self.megmax, + "eeg": self.eegmax, + "eog": self.eogmax, + "ecg": self.ecgmax, + } + if not self.compat: + self.reject["mag"] = self.magmax + self.reject = {k: float(v) for k, v in self.reject.items() if float(v) > 0} + self.flat = {"grad": self.megmin, "eeg": self.eegmin} + if not self.compat: + self.flat["mag"] = self.magmin + self.flat = {k: float(v) for k, v in self.flat.items() if float(v) > 0} + + def __repr__(self): # noqa: D105 + s = " bits for old DACQ versions + _compat_event_lookup = { + 1: 1, + 2: 2, + 3: 4, + 4: 8, + 5: 16, + 6: 32, + 7: 3, + 8: 5, + 9: 6, + 10: 7, + 11: 9, + 12: 10, + 13: 11, + 14: 12, + 15: 13, + 16: 14, + 17: 15, + } + events = dict() + for evnum in range(1, self.nevent + 1): + evnum_s = str(evnum).zfill(2) # '01', '02' etc. + evdi = dict() + event_vars = self._event_vars_compat if self.compat else self._event_vars + for var in event_vars: + # name of DACQ variable, e.g. 'ERFeventNewBits01' + acq_key = "ERFevent" + var + evnum_s + # corresponding dict key, e.g. 'newbits' + dict_key = var.lower() + val = self.acq_dict[acq_key] + # type convert numeric values + if dict_key in ["newbits", "oldbits", "newmask", "oldmask"]: + val = int(val) + elif dict_key in ["delay"]: + val = float(val) + evdi[dict_key] = val + evdi["in_use"] = False # __init__() will set this + evdi["index"] = evnum + if self.compat: + evdi["name"] = str(evnum) + evdi["oldmask"] = 63 + evdi["newmask"] = 63 + evdi["oldbits"] = 0 + evdi["newbits"] = _compat_event_lookup[evnum] + events[evnum] = evdi + return events + + def _acqpars_gen(self, acq_pars): + """Yield key/value pairs from ``info['acq_pars'])``.""" + key, val = "", "" + for line in acq_pars.split(): + if any([line.startswith(x) for x in self._acq_var_magic]): + key = line + val = "" + else: + if not key: + raise ValueError("Cannot parse acquisition parameters") + # DACQ splits items with spaces into multiple lines + val += " " + line if val else line + yield key, val + + def _categories_from_acq_pars(self): + """Collect DACQ averaging categories into a dict. + + Categories are keyed by the comment field in DACQ. Each category is + itself represented a dict containing the category parameters. + """ + cats = dict() + for catnum in [str(x).zfill(2) for x in range(1, self.nevent + 1)]: + catdi = dict() + # read all category variables + for var in self._cat_vars: + acq_key = "ERFcat" + var + catnum + class_key = var.lower() + val = self.acq_dict[acq_key] + catdi[class_key] = val + # some type conversions + catdi["display"] = catdi["display"] == "1" + catdi["state"] = catdi["state"] == "1" + for key in ["start", "end", "reqwithin"]: + catdi[key] = float(catdi[key]) + for key in ["nave", "event", "reqevent", "reqwhen", "subave"]: + catdi[key] = int(catdi[key]) + # some convenient extra (non-DACQ) vars + catdi["index"] = int(catnum) # index of category in DACQ list + cats[catdi["comment"]] = catdi + return cats + + def _events_mne_to_dacq(self, mne_events): + """Create list of DACQ events based on mne trigger transitions list. + + mne_events is typically given by mne.find_events (use consecutive=True + to get all transitions). Output consists of rows in the form + [t, 0, event_codes] where t is time in samples and event_codes is all + DACQ events compatible with the transition, bitwise ORed together: + e.g. [t1, 0, 5] means that events 1 and 3 occurred at time t1, + as 2**(1 - 1) + 2**(3 - 1) = 5. + """ + events_ = mne_events.copy() + events_[:, 1:3] = 0 + for n, ev in self._events.items(): + if ev["in_use"]: + pre_ok = ( + np.bitwise_and(ev["oldmask"], mne_events[:, 1]) == ev["oldbits"] + ) + post_ok = ( + np.bitwise_and(ev["newmask"], mne_events[:, 2]) == ev["newbits"] + ) + ok_ind = np.where(pre_ok & post_ok) + events_[ok_ind, 2] |= 1 << (n - 1) + return events_ + + def _mne_events_to_category_t0(self, cat, mne_events, sfreq): + """Translate mne_events to epoch zero times (t0). + + First mne events (trigger transitions) are converted into DACQ events. + Then the zero times for the epochs are obtained by considering the + reference and conditional (required) events and the delay to stimulus. + """ + cat_ev = cat["event"] + cat_reqev = cat["reqevent"] + # first convert mne events to dacq event list + events = self._events_mne_to_dacq(mne_events) + # next, take req. events and delays into account + times = events[:, 0] + # indices of times where ref. event occurs + refEvents_inds = np.where(events[:, 2] & (1 << cat_ev - 1))[0] + refEvents_t = times[refEvents_inds] + if cat_reqev: + # indices of times where req. event occurs + reqEvents_inds = np.where(events[:, 2] & (1 << cat_reqev - 1))[0] + reqEvents_t = times[reqEvents_inds] + # relative (to refevent) time window where req. event + # must occur (e.g. [0 .2]) + twin = [0, (-1) ** (cat["reqwhen"]) * cat["reqwithin"]] + win = np.round(np.array(sorted(twin)) * sfreq) # to samples + refEvents_wins = refEvents_t[:, None] + win + req_acc = np.zeros(refEvents_inds.shape, dtype=bool) + for t in reqEvents_t: + # mark time windows where req. condition is satisfied + reqEvent_in_win = np.logical_and( + t >= refEvents_wins[:, 0], t <= refEvents_wins[:, 1] + ) + req_acc |= reqEvent_in_win + # drop ref. events where req. event condition is not satisfied + refEvents_inds = refEvents_inds[np.where(req_acc)] + refEvents_t = times[refEvents_inds] + # adjust for trigger-stimulus delay by delaying the ref. event + refEvents_t += int(np.round(self._events[cat_ev]["delay"] * sfreq)) + return refEvents_t + + @property + def categories(self): + """Return list of averaging categories ordered by DACQ index. + + Only returns categories marked active in DACQ. + """ + cats = sorted(self._categories_in_use.values(), key=lambda cat: cat["index"]) + return cats + + @property + def events(self): + """Return events ordered by DACQ index. + + Only returns events that are in use (referred to by a category). + """ + evs = sorted(self._events_in_use.values(), key=lambda ev: ev["index"]) + return evs + + @property + def _categories_in_use(self): + return {k: v for k, v in self._categories.items() if v["state"]} + + @property + def _events_in_use(self): + return {k: v for k, v in self._events.items() if v["in_use"]} + + def get_condition( + self, + raw, + condition=None, + stim_channel=None, + mask=None, + uint_cast=None, + mask_type="and", + delayed_lookup=True, + ): + """Get averaging parameters for a condition (averaging category). + + Output is designed to be used with the Epochs class to extract the + corresponding epochs. + + Parameters + ---------- + raw : Raw object + An instance of Raw. + condition : None | str | dict | list of dict + Condition or a list of conditions. Conditions can be strings + (DACQ comment field, e.g. 'Auditory left') or category dicts + (e.g. acqp['Auditory left'], where acqp is an instance of + AcqParserFIF). If None, get all conditions marked active in + DACQ. + stim_channel : None | str | list of str + Name of the stim channel or all the stim channels + affected by the trigger. If None, the config variables + 'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2', + etc. are read. If these are not found, it will fall back to + 'STI101' or 'STI 014' if present, then fall back to the first + channel of type 'stim', if present. + mask : int | None + The value of the digital mask to apply to the stim channel values. + If None (default), no masking is performed. + uint_cast : bool + If True (default False), do a cast to ``uint16`` on the channel + data. This can be used to fix a bug with STI101 and STI014 in + Neuromag acquisition setups that use channel STI016 (channel 16 + turns data into e.g. -32768), similar to ``mne_fix_stim14 --32`` + in MNE-C. + mask_type : 'and' | 'not_and' + The type of operation between the mask and the trigger. + Choose 'and' for MNE-C masking behavior. + delayed_lookup : bool + If True, use the 'delayed lookup' procedure implemented in Elekta + software. When a trigger transition occurs, the lookup of + the new trigger value will not happen immediately at the following + sample, but with a 1-sample delay. This allows a slight + asynchrony between trigger onsets, when they are intended to be + synchronous. If you have accurate hardware and want to detect + transitions with a resolution of one sample, use + delayed_lookup=False. + + Returns + ------- + conds_data : dict or list of dict + Each dict has the following keys: + + events : array, shape (n_epochs_out, 3) + List of zero time points (t0) for the epochs matching the + condition. Use as the ``events`` parameter to Epochs. Note + that these are not (necessarily) actual events. + event_id : dict + Name of condition and index compatible with ``events``. + Should be passed as the ``event_id`` parameter to Epochs. + tmin : float + Epoch starting time relative to t0. Use as the ``tmin`` + parameter to Epochs. + tmax : float + Epoch ending time relative to t0. Use as the ``tmax`` + parameter to Epochs. + """ + if condition is None: + condition = self.categories # get all + if not isinstance(condition, list): + condition = [condition] # single cond -> listify + conds_data = list() + for cat in condition: + if isinstance(cat, str): + cat = self[cat] + mne_events = find_events( + raw, + stim_channel=stim_channel, + mask=mask, + mask_type=mask_type, + output="step", + uint_cast=uint_cast, + consecutive=True, + verbose=False, + shortest_event=1, + ) + if delayed_lookup: + ind = np.where(np.diff(mne_events[:, 0]) == 1)[0] + if 1 in np.diff(ind): + raise ValueError( + "There are several subsequent " + "transitions on the trigger channel. " + "This will not work well with " + "delayed_lookup=True. You may want to " + "check your trigger data and " + "set delayed_lookup=False." + ) + mne_events[ind, 2] = mne_events[ind + 1, 2] + mne_events = np.delete(mne_events, ind + 1, axis=0) + sfreq = raw.info["sfreq"] + cat_t0_ = self._mne_events_to_category_t0(cat, mne_events, sfreq) + # make it compatible with the usual events array + cat_t0 = np.c_[ + cat_t0_, np.zeros(cat_t0_.shape), cat["index"] * np.ones(cat_t0_.shape) + ].astype(np.uint32) + cat_id = {cat["comment"]: cat["index"]} + tmin, tmax = cat["start"], cat["end"] + conds_data.append( + dict(events=cat_t0, event_id=cat_id, tmin=tmin, tmax=tmax) + ) + return conds_data[0] if len(conds_data) == 1 else conds_data + + +def match_event_names(event_names, keys, *, on_missing="raise"): + """Search a collection of event names for matching (sub-)groups of events. + + This function is particularly helpful when using grouped event names + (i.e., event names containing forward slashes ``/``). Please see the + Examples section below for a working example. + + Parameters + ---------- + event_names : array-like of str | dict + Either a collection of event names, or the ``event_id`` dictionary + mapping event names to event codes. + keys : array-like of str | str + One or multiple event names or groups to search for in ``event_names``. + on_missing : 'raise' | 'warn' | 'ignore' + How to handle situations when none of the ``keys`` can be found in + ``event_names``. If ``'warn'`` or ``'ignore'``, an empty list will be + returned. + + Returns + ------- + matches : list of str + All event names that match any of the ``keys`` provided. + + Notes + ----- + .. versionadded:: 1.0 + + Examples + -------- + Assuming the following grouped event names in the data, you could easily + query for all ``auditory`` and ``left`` event names:: + + >>> event_names = [ + ... 'auditory/left', + ... 'auditory/right', + ... 'visual/left', + ... 'visual/right' + ... ] + >>> match_event_names( + ... event_names=event_names, + ... keys=['auditory', 'left'] + ... ) + ['auditory/left', 'auditory/right', 'visual/left'] + """ + _check_on_missing(on_missing) + + if isinstance(event_names, dict): + event_names = list(event_names) + + # ensure we have a list of `keys` + if isinstance(keys, Sequence | np.ndarray) and not isinstance(keys, str): + keys = list(keys) + else: + keys = [keys] + + matches = [] + + # form the hierarchical event name mapping + for key in keys: + if not isinstance(key, str): + raise ValueError(f"keys must be strings, got {type(key)} ({key})") + + matches.extend( + name + for name in event_names + if set(key.split("/")).issubset(name.split("/")) + ) + + if not matches: + _on_missing( + on_missing=on_missing, + msg=f'Event name "{key}" could not be found. The following events ' + f'are present in the data: {", ".join(event_names)}', + error_klass=KeyError, + ) + + matches = sorted(set(matches)) # deduplicate if necessary + return matches + + +def count_events(events, ids=None): + """Count events. + + Parameters + ---------- + events : ndarray, shape (N, 3) + The events array (consisting of N events). + ids : array-like of int | None + If ``None``, count all event types present in the input. If array-like + of int, count only those event types given by ``ids``. + + Returns + ------- + counts : dict + A dictionary containing the event types as keys with their counts as + values. + + Examples + -------- + >>> events = np.array([[0, 0, 1], [0, 0, 1], [0, 0, 5]]) + >>> count_events(events) + {1: 2, 5: 1} + >>> count_events(events, ids=[1, 5]) + {1: 2, 5: 1} + >>> count_events(events, ids=[1, 11]) + {1: 2, 11: 0} + """ + counts = np.bincount(events[:, 2]) + counts = {i: int(count) for i, count in enumerate(counts) if count > 0} + if ids is not None: + counts = {id_: counts.get(id_, 0) for id_ in ids} + return counts diff --git a/mne/evoked.py b/mne/evoked.py new file mode 100644 index 0000000..5fb09db --- /dev/null +++ b/mne/evoked.py @@ -0,0 +1,2166 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 + +from copy import deepcopy +from inspect import getfullargspec +from pathlib import Path + +import numpy as np + +from ._fiff.constants import FIFF +from ._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + _read_extended_ch_info, + _rename_list, + read_meas_info, + write_meas_info, +) +from ._fiff.open import fiff_open +from ._fiff.pick import _FNIRS_CH_TYPES_SPLIT, _picks_to_idx, pick_types +from ._fiff.proj import ProjMixin +from ._fiff.tag import read_tag +from ._fiff.tree import dir_tree_find +from ._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_complex_float_matrix, + write_float, + write_float_matrix, + write_id, + write_int, + write_string, +) +from .baseline import _check_baseline, _log_rescale, rescale +from .channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin +from .channels.layout import _merge_ch_data, _pair_grad_sensors +from .defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT +from .filter import FilterMixin, _check_fun, detrend +from .html_templates import _get_html_template +from .parallel import parallel_func +from .time_frequency.spectrum import Spectrum, SpectrumMixin, _validate_method +from .time_frequency.tfr import AverageTFR +from .utils import ( + ExtendedTimeMixin, + SizeMixin, + _build_data_frame, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _scale_dataframe_data, + _validate_type, + check_fname, + copy_function_doc_to_method_doc, + fill_doc, + logger, + repr_html, + sizeof_fmt, + verbose, + warn, +) +from .viz import ( + plot_evoked, + plot_evoked_field, + plot_evoked_image, + plot_evoked_topo, + plot_evoked_topomap, +) +from .viz.evoked import plot_evoked_joint, plot_evoked_white +from .viz.topomap import _topomap_animation + +_aspect_dict = { + "average": FIFF.FIFFV_ASPECT_AVERAGE, + "standard_error": FIFF.FIFFV_ASPECT_STD_ERR, + "single_epoch": FIFF.FIFFV_ASPECT_SINGLE, + "partial_average": FIFF.FIFFV_ASPECT_SUBAVERAGE, + "alternating_subaverage": FIFF.FIFFV_ASPECT_ALTAVERAGE, + "sample_cut_out_by_graph": FIFF.FIFFV_ASPECT_SAMPLE, + "power_density_spectrum": FIFF.FIFFV_ASPECT_POWER_DENSITY, + "dipole_amplitude_cuvre": FIFF.FIFFV_ASPECT_DIPOLE_WAVE, + "squid_modulation_lower_bound": FIFF.FIFFV_ASPECT_IFII_LOW, + "squid_modulation_upper_bound": FIFF.FIFFV_ASPECT_IFII_HIGH, + "squid_gate_setting": FIFF.FIFFV_ASPECT_GATE, +} +_aspect_rev = {val: key for key, val in _aspect_dict.items()} + + +@fill_doc +class Evoked( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + FilterMixin, + ExtendedTimeMixin, + SizeMixin, + SpectrumMixin, +): + """Evoked data. + + Parameters + ---------- + fname : path-like + Name of evoked/average FIF file to load. + If None no data is loaded. + condition : int, or str + Dataset ID number (int) or comment/name (str). Optional if there is + only one data set in file. + proj : bool, optional + Apply SSP projection vectors. + kind : str + Either ``'average'`` or ``'standard_error'``. The type of data to read. + Only used if 'condition' is a str. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be ``"yes"`` to load without eliciting a warning. + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + ch_names : list of str + List of channels' names. + nave : int + Number of averaged epochs. + kind : str + Type of data, either average or standard_error. + comment : str + Comment on dataset. Can be the condition. + data : array of shape (n_channels, n_times) + Evoked response. + first : int + First time sample. + last : int + Last time sample. + tmin : float + The first time point in seconds. + tmax : float + The last time point in seconds. + times : array + Time vector in seconds. Goes from ``tmin`` to ``tmax``. Time interval + between consecutive time samples is equal to the inverse of the + sampling frequency. + baseline : None | tuple of length 2 + This attribute reflects whether the data has been baseline-corrected + (it will be a ``tuple`` then) or not (it will be ``None``). + + Notes + ----- + Evoked objects can only contain the average of a single set of conditions. + """ + + @verbose + def __init__( + self, + fname, + condition=None, + proj=True, + kind="average", + allow_maxshield=False, + *, + verbose=None, + ): + _validate_type(proj, bool, "'proj'") + # Read the requested data + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + ( + self.info, + self.nave, + self._aspect_kind, + self.comment, + times, + self.data, + self.baseline, + ) = _read_evoked(fname, condition, kind, allow_maxshield) + self._set_times(times) + self._raw_times = self.times.copy() + self._decim = 1 + + self._update_first_last() + self.preload = True + # project and baseline correct + if proj: + self.apply_proj() + self.filename = fname + + @property + def filename(self) -> Path | None: + """The filename of the evoked object, if it exists. + + :type: :class:`~pathlib.Path` | None + """ + return self._filename + + @filename.setter + def filename(self, value): + self._filename = Path(value) if value is not None else value + + @property + def kind(self): + """The data kind.""" + return _aspect_rev[self._aspect_kind] + + @kind.setter + def kind(self, kind): + _check_option("kind", kind, list(_aspect_dict.keys())) + self._aspect_kind = _aspect_dict[kind] + + @property + def data(self): + """The data matrix.""" + return self._data + + @data.setter + def data(self, data): + """Set the data matrix.""" + self._data = data + + @fill_doc + def get_data(self, picks=None, units=None, tmin=None, tmax=None): + """Get evoked data as 2D array. + + Parameters + ---------- + %(picks_all)s + %(units)s + tmin : float | None + Start time of data to get in seconds. + tmax : float | None + End time of data to get in seconds. + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + A view on evoked data. + + Notes + ----- + .. versionadded:: 0.24 + """ + # Avoid circular import + from .io.base import _get_ch_factors + + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + + start, stop = self._handle_tmin_tmax(tmin, tmax) + + data = self.data[picks, start:stop] + + if units is not None: + ch_factors = _get_ch_factors(self, units, picks) + data *= ch_factors[:, np.newaxis] + + return data + + @verbose + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + *, + verbose=None, + **kwargs, + ): + """Apply a function to a subset of channels. + + %(applyfun_summary_evoked)s + + Parameters + ---------- + %(fun_applyfun_evoked)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s Ignored if ``channel_wise=False`` as the workload + is split across channels. + %(channel_wise_applyfun)s + + .. versionadded:: 1.6 + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Evoked + The evoked object with transformed data. + """ + _check_preload(self, "evoked.apply_function") + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError("fun needs to be a function") + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") + + # check the dimension of the incoming evoked data + _check_option("evoked.ndim", self._data.ndim, [2]) + + if channel_wise: + parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) + if n_jobs == 1: + # modify data inplace to save memory + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[ch_idx, :] = _check_fun( + fun, data_in[ch_idx, :], **kwargs + ) + else: + # use parallel function + data_picks_new = parallel( + p_fun( + fun, + data_in[ch_idx, :], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[ch_idx, :] = data_picks_new[run_idx] + else: + self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs) + + return self + + @verbose + def apply_baseline(self, baseline=(None, 0), *, verbose=None): + """Baseline correct evoked data. + + Parameters + ---------- + %(baseline_evoked)s + Defaults to ``(None, 0)``, i.e. beginning of the the data until + time point zero. + %(verbose)s + + Returns + ------- + evoked : instance of Evoked + The baseline-corrected Evoked object. + + Notes + ----- + Baseline correction can be done multiple times. + + .. versionadded:: 0.13.0 + """ + baseline = _check_baseline(baseline, times=self.times, sfreq=self.info["sfreq"]) + if self.baseline is not None and baseline is None: + raise ValueError( + "The data has already been baseline-corrected. " + "Cannot remove existing baseline correction." + ) + elif baseline is None: + # Do not rescale + logger.info(_log_rescale(None)) + else: + # Actually baseline correct the data. Logging happens in rescale(). + self.data = rescale(self.data, self.times, baseline, copy=False) + self.baseline = baseline + + return self + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save evoked data to a file. + + Parameters + ---------- + fname : path-like + The name of the file, which should end with ``-ave.fif(.gz)`` or + ``_ave.fif(.gz)``. + %(overwrite)s + %(verbose)s + + Notes + ----- + To write multiple conditions into a single file, use + `mne.write_evokeds`. + + .. versionchanged:: 0.23 + Information on baseline correction will be stored with the data, + and will be restored when reading again via `mne.read_evokeds`. + """ + write_evokeds(fname, self, overwrite=overwrite) + + @verbose + def export(self, fname, fmt="auto", *, overwrite=False, verbose=None): + """Export Evoked to external formats. + + %(export_fmt_support_evoked)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + %(export_fmt_params_evoked)s + %(overwrite)s + %(verbose)s + + Notes + ----- + .. versionadded:: 1.1 + + %(export_warning_note_evoked)s + """ + from .export import export_evokeds + + export_evokeds(fname, self, fmt, overwrite=overwrite, verbose=verbose) + + def __repr__(self): # noqa: D105 + max_comment_length = 1000 + if len(self.comment) > max_comment_length: + comment = self.comment[:max_comment_length] + comment += "..." + else: + comment = self.comment + s = f"'{comment}' ({self.kind}, N={self.nave})" + s += f", {self.times[0]:0.5g} – {self.times[-1]:0.5g} s" + s += ", baseline " + if self.baseline is None: + s += "off" + else: + s += f"{self.baseline[0]:g} – {self.baseline[1]:g} s" + if self.baseline != _check_baseline( + self.baseline, + times=self.times, + sfreq=self.info["sfreq"], + on_baseline_outside_data="adjust", + ): + s += " (baseline period was cropped after baseline correction)" + s += f", {self.data.shape[0]} ch" + s += f", ~{sizeof_fmt(self._size)}" + return f"" + + @repr_html + def _repr_html_(self): + t = _get_html_template("repr", "evoked.html.jinja") + t = t.render( + inst=self, + filenames=( + [Path(self.filename).name] + if getattr(self, "filename", None) is not None + else None + ), + ) + return t + + @property + def ch_names(self): + """Channel names.""" + return self.info["ch_names"] + + @copy_function_doc_to_method_doc(plot_evoked) + def plot( + self, + picks=None, + exclude="bads", + unit=True, + show=True, + ylim=None, + xlim="tight", + proj=False, + hline=None, + units=None, + scalings=None, + titles=None, + axes=None, + gfp=False, + window_title=None, + spatial_colors="auto", + zorder="unsorted", + selectable=True, + noise_cov=None, + time_unit="s", + sphere=None, + *, + highlight=None, + verbose=None, + ): + return plot_evoked( + self, + picks=picks, + exclude=exclude, + unit=unit, + show=show, + ylim=ylim, + proj=proj, + xlim=xlim, + hline=hline, + units=units, + scalings=scalings, + titles=titles, + axes=axes, + gfp=gfp, + window_title=window_title, + spatial_colors=spatial_colors, + zorder=zorder, + selectable=selectable, + noise_cov=noise_cov, + time_unit=time_unit, + sphere=sphere, + highlight=highlight, + verbose=verbose, + ) + + @copy_function_doc_to_method_doc(plot_evoked_image) + def plot_image( + self, + picks=None, + exclude="bads", + unit=True, + show=True, + clim=None, + xlim="tight", + proj=False, + units=None, + scalings=None, + titles=None, + axes=None, + cmap="RdBu_r", + colorbar=True, + mask=None, + mask_style=None, + mask_cmap="Greys", + mask_alpha=0.25, + time_unit="s", + show_names=None, + group_by=None, + sphere=None, + ): + return plot_evoked_image( + self, + picks=picks, + exclude=exclude, + unit=unit, + show=show, + clim=clim, + xlim=xlim, + proj=proj, + units=units, + scalings=scalings, + titles=titles, + axes=axes, + cmap=cmap, + colorbar=colorbar, + mask=mask, + mask_style=mask_style, + mask_cmap=mask_cmap, + mask_alpha=mask_alpha, + time_unit=time_unit, + show_names=show_names, + group_by=group_by, + sphere=sphere, + ) + + @copy_function_doc_to_method_doc(plot_evoked_topo) + def plot_topo( + self, + layout=None, + layout_scale=0.945, + color=None, + border="none", + ylim=None, + scalings=None, + title=None, + proj=False, + vline=(0.0,), + fig_background=None, + merge_grads=False, + legend=True, + axes=None, + background_color="w", + noise_cov=None, + exclude="bads", + show=True, + ): + """. + + Notes + ----- + .. versionadded:: 0.10.0 + """ + return plot_evoked_topo( + self, + layout=layout, + layout_scale=layout_scale, + color=color, + border=border, + ylim=ylim, + scalings=scalings, + title=title, + proj=proj, + vline=vline, + fig_background=fig_background, + merge_grads=merge_grads, + legend=legend, + axes=axes, + background_color=background_color, + noise_cov=noise_cov, + exclude=exclude, + show=show, + ) + + @copy_function_doc_to_method_doc(plot_evoked_topomap) + def plot_topomap( + self, + times="auto", + *, + average=None, + ch_type=None, + scalings=None, + proj=False, + sensors=True, + show_names=False, + mask=None, + mask_params=None, + contours=6, + outlines="head", + sphere=None, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + border=_BORDER_DEFAULT, + res=64, + size=1, + cmap=None, + vlim=(None, None), + cnorm=None, + colorbar=True, + cbar_fmt="%3.1f", + units=None, + axes=None, + time_unit="s", + time_format=None, + nrows=1, + ncols="auto", + show=True, + ): + return plot_evoked_topomap( + self, + times=times, + ch_type=ch_type, + vlim=vlim, + cmap=cmap, + cnorm=cnorm, + sensors=sensors, + colorbar=colorbar, + scalings=scalings, + units=units, + res=res, + size=size, + cbar_fmt=cbar_fmt, + time_unit=time_unit, + time_format=time_format, + proj=proj, + show=show, + show_names=show_names, + mask=mask, + mask_params=mask_params, + outlines=outlines, + contours=contours, + image_interp=image_interp, + average=average, + axes=axes, + extrapolate=extrapolate, + sphere=sphere, + border=border, + nrows=nrows, + ncols=ncols, + ) + + @copy_function_doc_to_method_doc(plot_evoked_field) + def plot_field( + self, + surf_maps, + time=None, + time_label="t = %0.0f ms", + n_jobs=None, + fig=None, + vmax=None, + n_contours=21, + *, + show_density=True, + alpha=None, + interpolation="nearest", + interaction="terrain", + time_viewer="auto", + verbose=None, + ): + return plot_evoked_field( + self, + surf_maps, + time=time, + time_label=time_label, + n_jobs=n_jobs, + fig=fig, + vmax=vmax, + n_contours=n_contours, + show_density=show_density, + alpha=alpha, + interpolation=interpolation, + interaction=interaction, + time_viewer=time_viewer, + verbose=verbose, + ) + + @copy_function_doc_to_method_doc(plot_evoked_white) + def plot_white( + self, + noise_cov, + show=True, + rank=None, + time_unit="s", + sphere=None, + axes=None, + *, + spatial_colors="auto", + verbose=None, + ): + return plot_evoked_white( + self, + noise_cov=noise_cov, + rank=rank, + show=show, + time_unit=time_unit, + sphere=sphere, + axes=axes, + spatial_colors=spatial_colors, + verbose=verbose, + ) + + @copy_function_doc_to_method_doc(plot_evoked_joint) + def plot_joint( + self, + times="peaks", + title="", + picks=None, + exclude="bads", + show=True, + ts_args=None, + topomap_args=None, + ): + return plot_evoked_joint( + self, + times=times, + title=title, + picks=picks, + exclude=exclude, + show=show, + ts_args=ts_args, + topomap_args=topomap_args, + ) + + @fill_doc + def animate_topomap( + self, + ch_type=None, + times=None, + frame_rate=None, + butterfly=False, + blit=True, + show=True, + time_unit="s", + sphere=None, + *, + image_interp=_INTERPOLATION_DEFAULT, + extrapolate=_EXTRAPOLATE_DEFAULT, + vmin=None, + vmax=None, + verbose=None, + ): + """Make animation of evoked data as topomap timeseries. + + The animation can be paused/resumed with left mouse button. + Left and right arrow keys can be used to move backward or forward + in time. + + Parameters + ---------- + ch_type : str | None + Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg', + 'hbo', 'hbr', 'fnirs_cw_amplitude', + 'fnirs_fd_ac_amplitude', 'fnirs_fd_phase', and 'fnirs_od'. + If None, first available channel type from the above list is used. + Defaults to None. + times : array of float | None + The time points to plot. If None, 10 evenly spaced samples are + calculated over the evoked time series. Defaults to None. + frame_rate : int | None + Frame rate for the animation in Hz. If None, + frame rate = sfreq / 10. Defaults to None. + butterfly : bool + Whether to plot the data as butterfly plot under the topomap. + Defaults to False. + blit : bool + Whether to use blit to optimize drawing. In general, it is + recommended to use blit in combination with ``show=True``. If you + intend to save the animation it is better to disable blit. + Defaults to True. + show : bool + Whether to show the animation. Defaults to True. + time_unit : str + The units for the time axis, can be "ms" (default in 0.16) + or "s" (will become the default in 0.17). + + .. versionadded:: 0.16 + %(sphere_topomap_auto)s + %(image_interp_topomap)s + %(extrapolate_topomap)s + + .. versionadded:: 0.22 + %(vmin_vmax_topomap)s + + .. versionadded:: 1.1.0 + %(verbose)s + + Returns + ------- + fig : instance of matplotlib.figure.Figure + The figure. + anim : instance of matplotlib.animation.FuncAnimation + Animation of the topomap. + + Notes + ----- + .. versionadded:: 0.12.0 + """ + return _topomap_animation( + self, + ch_type=ch_type, + times=times, + frame_rate=frame_rate, + butterfly=butterfly, + blit=blit, + show=show, + time_unit=time_unit, + sphere=sphere, + image_interp=image_interp, + extrapolate=extrapolate, + vmin=vmin, + vmax=vmax, + verbose=verbose, + ) + + def as_type(self, ch_type="grad", mode="fast"): + """Compute virtual evoked using interpolated fields. + + .. Warning:: Using virtual evoked to compute inverse can yield + unexpected results. The virtual channels have ``'_v'`` appended + at the end of the names to emphasize that the data contained in + them are interpolated. + + Parameters + ---------- + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + + Returns + ------- + evoked : instance of mne.Evoked + The transformed evoked object containing only virtual channels. + + Notes + ----- + This method returns a copy and does not modify the data it + operates on. It also returns an EvokedArray instance. + + .. versionadded:: 0.9.0 + """ + from .forward import _as_meg_type_inst + + return _as_meg_type_inst(self, ch_type=ch_type, mode=mode) + + @fill_doc + def detrend(self, order=1, picks=None): + """Detrend data. + + This function operates in-place. + + Parameters + ---------- + order : int + Either 0 or 1, the order of the detrending. 0 is a constant + (DC) detrend, 1 is a linear detrend. + %(picks_good_data)s + + Returns + ------- + evoked : instance of Evoked + The detrended evoked object. + """ + picks = _picks_to_idx(self.info, picks) + self.data[picks] = detrend(self.data[picks], order, axis=-1) + return self + + def copy(self): + """Copy the instance of evoked. + + Returns + ------- + evoked : instance of Evoked + A copy of the object. + """ + evoked = deepcopy(self) + return evoked + + def __neg__(self): + """Negate channel responses. + + Returns + ------- + evoked_neg : instance of Evoked + The Evoked instance with channel data negated and '-' + prepended to the comment. + """ + out = self.copy() + out.data *= -1 + + if out.comment is not None and " + " in out.comment: + out.comment = f"({out.comment})" # multiple conditions in evoked + out.comment = f'- {out.comment or "unknown"}' + return out + + def get_peak( + self, + ch_type=None, + tmin=None, + tmax=None, + mode="abs", + time_as_index=False, + merge_grads=False, + return_amplitude=False, + *, + strict=True, + ): + """Get location and latency of peak amplitude. + + Parameters + ---------- + ch_type : str | None + The channel type to use. Defaults to None. If more than one channel + type is present in the data, this value **must** be provided. + tmin : float | None + The minimum point in time to be considered for peak getting. + If None (default), the beginning of the data is used. + tmax : float | None + The maximum point in time to be considered for peak getting. + If None (default), the end of the data is used. + mode : 'pos' | 'neg' | 'abs' + How to deal with the sign of the data. If 'pos' only positive + values will be considered. If 'neg' only negative values will + be considered. If 'abs' absolute values will be considered. + Defaults to 'abs'. + time_as_index : bool + Whether to return the time index instead of the latency in seconds. + merge_grads : bool + If True, compute peak from merged gradiometer data. + return_amplitude : bool + If True, return also the amplitude at the maximum response. + + .. versionadded:: 0.16 + strict : bool + If True, raise an error if values are all positive when detecting + a minimum (mode='neg'), or all negative when detecting a maximum + (mode='pos'). Defaults to True. + + .. versionadded:: 1.7 + + Returns + ------- + ch_name : str + The channel exhibiting the maximum response. + latency : float | int + The time point of the maximum response, either latency in seconds + or index. + amplitude : float + The amplitude of the maximum response. Only returned if + return_amplitude is True. + + .. versionadded:: 0.16 + """ # noqa: E501 + supported = ( + "mag", + "grad", + "eeg", + "seeg", + "dbs", + "ecog", + "misc", + "None", + ) + _FNIRS_CH_TYPES_SPLIT + types_used = self.get_channel_types(unique=True, only_data_chs=True) + + _check_option("ch_type", str(ch_type), supported) + + if ch_type is not None and ch_type not in types_used: + raise ValueError( + f'Channel type "{ch_type}" not found in this evoked object.' + ) + + elif len(types_used) > 1 and ch_type is None: + raise RuntimeError( + 'Multiple data channel types found. Please pass the "ch_type" ' + "parameter." + ) + + if merge_grads: + if ch_type != "grad": + raise ValueError('Channel type must be "grad" for merge_grads') + elif mode == "neg": + raise ValueError( + "Negative mode (mode=neg) does not make " + "sense with merge_grads=True" + ) + + meg = eeg = misc = seeg = dbs = ecog = fnirs = False + picks = None + if ch_type in ("mag", "grad"): + meg = ch_type + elif ch_type == "eeg": + eeg = True + elif ch_type == "misc": + misc = True + elif ch_type == "seeg": + seeg = True + elif ch_type == "dbs": + dbs = True + elif ch_type == "ecog": + ecog = True + elif ch_type in _FNIRS_CH_TYPES_SPLIT: + fnirs = ch_type + + if ch_type is not None: + if merge_grads: + picks = _pair_grad_sensors(self.info, topomap_coords=False) + else: + picks = pick_types( + self.info, + meg=meg, + eeg=eeg, + misc=misc, + seeg=seeg, + ecog=ecog, + ref_meg=False, + fnirs=fnirs, + dbs=dbs, + ) + data = self.data + ch_names = self.ch_names + + if picks is not None: + data = data[picks] + ch_names = [ch_names[k] for k in picks] + + if merge_grads: + data, _ = _merge_ch_data(data, ch_type, []) + ch_names = [ch_name[:-1] + "X" for ch_name in ch_names[::2]] + + ch_idx, time_idx, max_amp = _get_peak( + data, + self.times, + tmin, + tmax, + mode, + strict=strict, + ) + + out = (ch_names[ch_idx], time_idx if time_as_index else self.times[time_idx]) + + if return_amplitude: + out += (max_amp,) + + return out + + @verbose + def compute_psd( + self, + method="multitaper", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + remove_dc=True, + exclude=(), + *, + n_jobs=1, + verbose=None, + **method_kw, + ): + """Perform spectral analysis on sensor data. + + Parameters + ---------- + %(method_psd)s + Default is ``'multitaper'``. + %(fmin_fmax_psd)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(remove_dc)s + %(exclude_psd)s + %(n_jobs)s + %(verbose)s + %(method_kw_psd)s + + Returns + ------- + spectrum : instance of Spectrum + The spectral representation of the data. + + Notes + ----- + .. versionadded:: 1.2 + + References + ---------- + .. footbibliography:: + """ + method = _validate_method(method, type(self).__name__) + self._set_legacy_nfft_default(tmin, tmax, method, method_kw) + + return Spectrum( + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + reject_by_annotation=False, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of evoked data. + + Parameters + ---------- + %(method_tfr)s + %(freqs_tfr)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_tfr)s + + Returns + ------- + tfr : instance of AverageTFR + The time-frequency-resolved power estimates of the data. + + Notes + ----- + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + return AverageTFR( + inst=self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def plot_psd( + self, + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + proj=False, + *, + method="auto", + average=False, + dB=True, + estimate="power", + xscale="linear", + area_mode="std", + area_alpha=0.33, + color="black", + line_alpha=None, + spatial_colors=True, + sphere=None, + exclude="bads", + ax=None, + show=True, + n_jobs=1, + verbose=None, + **method_kw, + ): + """%(plot_psd_doc)s. + + Parameters + ---------- + %(fmin_fmax_psd)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(method_plot_psd_auto)s + %(average_plot_psd)s + %(dB_plot_psd)s + %(estimate_plot_psd)s + %(xscale_plot_psd)s + %(area_mode_plot_psd)s + %(area_alpha_plot_psd)s + %(color_plot_psd)s + %(line_alpha_plot_psd)s + %(spatial_colors_psd)s + %(sphere_topomap_auto)s + + .. versionadded:: 0.22.0 + exclude : list of str | 'bads' + Channels names to exclude from being shown. If 'bads', the bad + channels are excluded. Pass an empty list to plot all channels + (including channels marked "bad", if any). + + .. versionadded:: 0.24.0 + %(ax_plot_psd)s + %(show)s + %(n_jobs)s + %(verbose)s + %(method_kw_psd)s + + Returns + ------- + fig : instance of Figure + Figure with frequency spectra of the data channels. + + Notes + ----- + %(notes_plot_psd_meth)s + """ + return super().plot_psd( + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=False, + method=method, + average=average, + dB=dB, + estimate=estimate, + xscale=xscale, + area_mode=area_mode, + area_alpha=area_alpha, + color=color, + line_alpha=line_alpha, + spatial_colors=spatial_colors, + sphere=sphere, + exclude=exclude, + ax=ax, + show=show, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + long_format=False, + time_format=None, + *, + verbose=None, + ): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, + an additional column "time" is added, unless ``index='time'`` + (in which case time values form the DataFrame's index). + + Parameters + ---------- + %(picks_all)s + %(index_df_evk)s + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + %(long_format_df_raw)s + %(time_format_df)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ["time"] + valid_time_formats = ["ms", "timedelta"] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format(time_format, valid_time_formats) + # get data + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + data = self.data[picks, :] + times = self.times + data = data.T + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = _convert_times(times, time_format, self.info["meas_date"]) + mindex.append(("time", times)) + # build DataFrame + df = _build_data_frame( + self, data, picks, long_format, mindex, index, default_index=["time"] + ) + return df + + +@fill_doc +class EvokedArray(Evoked): + """Evoked object from numpy array. + + Parameters + ---------- + data : array of shape (n_channels, n_times) + The channels' evoked response. See notes for proper units of measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate this + structure. + tmin : float + Start time before event. Defaults to 0. + comment : str + Comment on dataset. Can be the condition. Defaults to ''. + nave : int + Number of averaged epochs. Defaults to 1. + kind : str + Type of data, either average or standard_error. Defaults to 'average'. + %(baseline_evoked)s + Defaults to ``None``, i.e. no baseline correction. + + .. versionadded:: 0.23 + %(verbose)s + + See Also + -------- + EpochsArray, io.RawArray, create_info + + Notes + ----- + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + + @verbose + def __init__( + self, + data, + info, + tmin=0.0, + comment="", + nave=1, + kind="average", + baseline=None, + *, + verbose=None, + ): + dtype = np.complex128 if np.iscomplexobj(data) else np.float64 + data = np.asanyarray(data, dtype=dtype) + + if data.ndim != 2: + raise ValueError( + "Data must be a 2D array of shape (n_channels, n_samples), got shape " + f"{data.shape}" + ) + + if len(info["ch_names"]) != np.shape(data)[0]: + raise ValueError( + f"Info ({len(info['ch_names'])}) and data ({np.shape(data)[0]}) must " + "have same number of channels." + ) + + self.data = data + + self.first = int(round(tmin * info["sfreq"])) + self.last = self.first + np.shape(data)[-1] - 1 + self._set_times( + np.arange(self.first, self.last + 1, dtype=np.float64) / info["sfreq"] + ) + self._raw_times = self.times.copy() + self._decim = 1 + self.info = info.copy() # do not modify original info + self.nave = nave + self.kind = kind + self.comment = comment + self.picks = None + self.preload = True + self._projector = None + _validate_type(self.kind, "str", "kind") + if self.kind not in _aspect_dict: + raise ValueError( + f'unknown kind "{self.kind}", should be "average" or "standard_error"' + ) + self._aspect_kind = _aspect_dict[self.kind] + + self.baseline = baseline + if self.baseline is not None: # omit log msg if not baselining + self.apply_baseline(self.baseline) + self._filename = None + + +def _get_entries(fid, evoked_node, allow_maxshield=False): + """Get all evoked entries.""" + comments = list() + aspect_kinds = list() + for ev in evoked_node: + for k in range(ev["nent"]): + my_kind = ev["directory"][k].kind + pos = ev["directory"][k].pos + if my_kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comments.append(tag.data) + my_aspect = _get_aspect(ev, allow_maxshield)[0] + for k in range(my_aspect["nent"]): + my_kind = my_aspect["directory"][k].kind + pos = my_aspect["directory"][k].pos + if my_kind == FIFF.FIFF_ASPECT_KIND: + tag = read_tag(fid, pos) + aspect_kinds.append(int(tag.data.item())) + comments = np.atleast_1d(comments) + aspect_kinds = np.atleast_1d(aspect_kinds) + if len(comments) != len(aspect_kinds) or len(comments) == 0: + fid.close() + raise ValueError("Dataset names in FIF file could not be found.") + t = [_aspect_rev[a] for a in aspect_kinds] + t = ['"' + c + '" (' + tt + ")" for tt, c in zip(t, comments)] + t = "\n".join(t) + return comments, aspect_kinds, t + + +def _get_aspect(evoked, allow_maxshield): + """Get Evoked data aspect.""" + from .io.base import _check_maxshield + + is_maxshield = False + aspect = dir_tree_find(evoked, FIFF.FIFFB_ASPECT) + if len(aspect) == 0: + _check_maxshield(allow_maxshield) + aspect = dir_tree_find(evoked, FIFF.FIFFB_IAS_ASPECT) + is_maxshield = True + if len(aspect) > 1: + logger.info("Multiple data aspects found. Taking first one.") + return aspect[0], is_maxshield + + +def _get_evoked_node(fname): + """Get info in evoked file.""" + f, tree, _ = fiff_open(fname) + with f as fid: + _, meas = read_meas_info(fid, tree, verbose=False) + evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED) + return evoked_node + + +def _check_evokeds_ch_names_times(all_evoked): + evoked = all_evoked[0] + ch_names = evoked.ch_names + for ii, ev in enumerate(all_evoked[1:]): + if ev.ch_names != ch_names: + if set(ev.ch_names) != set(ch_names): + raise ValueError(f"{evoked} and {ev} do not contain the same channels.") + else: + warn("Order of channels differs, reordering channels ...") + ev = ev.copy() + ev.reorder_channels(ch_names) + all_evoked[ii + 1] = ev + if not np.max(np.abs(ev.times - evoked.times)) < 1e-7: + raise ValueError(f"{evoked} and {ev} do not contain the same time instants") + return all_evoked + + +def combine_evoked(all_evoked, weights): + """Merge evoked data by weighted addition or subtraction. + + Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the + same time instants. Subtraction can be performed by passing + ``weights=[1, -1]``. + + .. Warning:: + Other than cases like simple subtraction mentioned above (where all + weights are ``-1`` or ``1``), if you provide numeric weights instead of using + ``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's + ``.nave`` attribute (which is used to scale noise covariance when + applying the inverse operator) may not be suitable for inverse imaging. + + Parameters + ---------- + all_evoked : list of Evoked + The evoked datasets. + weights : list of float | ``'equal'`` | ``'nave'`` + The weights to apply to the data of each evoked instance, or a string + describing the weighting strategy to apply: ``'nave'`` computes + sum-to-one weights proportional to each object's ``nave`` attribute; + ``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``. + + Returns + ------- + evoked : Evoked + The new evoked data. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + naves = np.array([evk.nave for evk in all_evoked], float) + if isinstance(weights, str): + _check_option("weights", weights, ["nave", "equal"]) + if weights == "nave": + weights = naves / naves.sum() + else: + weights = np.ones_like(naves) / len(naves) + else: + weights = np.array(weights, float) + + if weights.ndim != 1 or weights.size != len(all_evoked): + raise ValueError("weights must be the same size as all_evoked") + + # cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on + # "weighted sample variance". The variance of a weighted sample mean is: + # + # σ² = w₲ σ₲ + w₂² σ₂² + ... + wₙ² σₙ² + # + # We estimate the variance of each evoked instance as 1 / nave to get: + # + # σ² = w₲ / naveâ‚ + w₂² / naveâ‚‚ + ... + wₙ² / naveâ‚™ + # + # And our resulting nave is the reciprocal of this: + new_nave = 1.0 / np.sum(weights**2 / naves) + # This general formula is equivalent to formulae in Matti's manual + # (pp 128-129), where: + # new_nave = sum(naves) when weights='nave' and + # new_nave = 1. / sum(1. / naves) when weights are all 1. + + all_evoked = _check_evokeds_ch_names_times(all_evoked) + evoked = all_evoked[0].copy() + + # use union of bad channels + bads = list(set(b for e in all_evoked for b in e.info["bads"])) + evoked.info["bads"] = bads + evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked)) + evoked.nave = new_nave + + comment = "" + for idx, (w, e) in enumerate(zip(weights, all_evoked)): + # pick sign + sign = "" if w >= 0 else "-" + # format weight + weight = "" if np.isclose(abs(w), 1.0) else f"{abs(w):0.3f}" + # format multiplier + multiplier = " × " if weight else "" + # format comment + if e.comment is not None and " + " in e.comment: # multiple conditions + this_comment = f"({e.comment})" + else: + this_comment = f'{e.comment or "unknown"}' + # assemble everything + if idx == 0: + comment += f"{sign}{weight}{multiplier}{this_comment}" + else: + comment += f' {sign or "+"} {weight}{multiplier}{this_comment}' + # special-case: combine_evoked([e1, -e2], [1, -1]) + evoked.comment = comment.replace(" - - ", " + ") + return evoked + + +@verbose +def read_evokeds( + fname, + condition=None, + baseline=None, + kind="average", + proj=True, + allow_maxshield=False, + verbose=None, +) -> list[Evoked] | Evoked: + """Read evoked dataset(s). + + Parameters + ---------- + fname : path-like + The filename, which should end with ``-ave.fif`` or ``-ave.fif.gz``. + condition : int or str | list of int or str | None + The index or list of indices of the evoked dataset to read. FIF files + can contain multiple datasets. If None, all datasets are returned as a + list. + %(baseline_evoked)s + If ``None`` (default), do not apply baseline correction. + + .. note:: Note that if the read `~mne.Evoked` objects have already + been baseline-corrected, the data retrieved from disk will + **always** be baseline-corrected (in fact, only the + baseline-corrected version of the data will be saved, so + there is no way to undo this procedure). Only **after** the + data has been loaded, a custom (additional) baseline + correction **may** be optionally applied by passing a tuple + here. Passing ``None`` will **not** remove an existing + baseline correction, but merely omit the optional, additional + baseline correction. + kind : str + Either ``'average'`` or ``'standard_error'``, the type of data to read. + proj : bool + If False, available projectors won't be applied to the data. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be ``"yes"`` to load without eliciting a warning. + %(verbose)s + + Returns + ------- + evoked : Evoked or list of Evoked + The evoked dataset(s); one `~mne.Evoked` if ``condition`` is an + integer or string; or a list of `~mne.Evoked` if ``condition`` is + ``None`` or a list. + + See Also + -------- + write_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + If the read `~mne.Evoked` objects had been baseline-corrected before + saving, this will be reflected in their ``baseline`` attribute after + reading. + """ + fname = _check_fname(fname, overwrite="read", must_exist=True) + check_fname(fname, "evoked", ("-ave.fif", "-ave.fif.gz", "_ave.fif", "_ave.fif.gz")) + logger.info(f"Reading {fname} ...") + return_list = True + if condition is None: + evoked_node = _get_evoked_node(fname) + condition = range(len(evoked_node)) + elif not isinstance(condition, list): + condition = [condition] + return_list = False + + out = [] + for c in condition: + evoked = Evoked( + fname, + c, + kind=kind, + proj=proj, + allow_maxshield=allow_maxshield, + verbose=verbose, + ) + if baseline is None and evoked.baseline is None: + logger.info(_log_rescale(None)) + elif baseline is None and evoked.baseline is not None: + # Don't touch an existing baseline + bmin, bmax = evoked.baseline + logger.info( + f"Loaded Evoked data is baseline-corrected " + f"(baseline: [{bmin:g}, {bmax:g}] s)" + ) + else: + evoked.apply_baseline(baseline) + out.append(evoked) + + return out if return_list else out[0] + + +def _read_evoked(fname, condition=None, kind="average", allow_maxshield=False): + """Read evoked data from a FIF file.""" + if fname is None: + raise ValueError("No evoked filename specified") + + f, tree, _ = fiff_open(fname) + with f as fid: + # Read the measurement info + info, meas = read_meas_info(fid, tree, clean_bads=True) + + # Locate the data of interest + processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA) + if len(processed) == 0: + raise ValueError("Could not find processed data") + + evoked_node = dir_tree_find(meas, FIFF.FIFFB_EVOKED) + if len(evoked_node) == 0: + raise ValueError("Could not find evoked data") + + # find string-based entry + if isinstance(condition, str): + if kind not in _aspect_dict.keys(): + raise ValueError('kind must be "average" or "standard_error"') + + comments, aspect_kinds, t = _get_entries(fid, evoked_node, allow_maxshield) + goods = np.isin(comments, [condition]) & np.isin( + aspect_kinds, [_aspect_dict[kind]] + ) + found_cond = np.where(goods)[0] + if len(found_cond) != 1: + raise ValueError( + f'condition "{condition}" ({kind}) not found, out of found ' + f"datasets:\n{t}" + ) + condition = found_cond[0] + elif condition is None: + if len(evoked_node) > 1: + _, _, conditions = _get_entries(fid, evoked_node, allow_maxshield) + raise TypeError( + "Evoked file has more than one condition, the condition parameters " + f"must be specified from:\n{conditions}" + ) + else: + condition = 0 + + if condition >= len(evoked_node) or condition < 0: + raise ValueError("Data set selector out of range") + + my_evoked = evoked_node[condition] + + # Identify the aspects + with info._unlock(): + my_aspect, info["maxshield"] = _get_aspect(my_evoked, allow_maxshield) + + # Now find the data in the evoked block + nchan = 0 + sfreq = -1 + chs = [] + baseline = bmin = bmax = None + comment = last = first = first_time = nsamp = None + for k in range(my_evoked["nent"]): + my_kind = my_evoked["directory"][k].kind + pos = my_evoked["directory"][k].pos + if my_kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comment = tag.data + elif my_kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, pos) + first = int(tag.data.item()) + elif my_kind == FIFF.FIFF_LAST_SAMPLE: + tag = read_tag(fid, pos) + last = int(tag.data.item()) + elif my_kind == FIFF.FIFF_NCHAN: + tag = read_tag(fid, pos) + nchan = int(tag.data.item()) + elif my_kind == FIFF.FIFF_SFREQ: + tag = read_tag(fid, pos) + sfreq = float(tag.data.item()) + elif my_kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + elif my_kind == FIFF.FIFF_FIRST_TIME: + tag = read_tag(fid, pos) + first_time = float(tag.data.item()) + elif my_kind == FIFF.FIFF_NO_SAMPLES: + tag = read_tag(fid, pos) + nsamp = int(tag.data.item()) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MIN: + tag = read_tag(fid, pos) + bmin = float(tag.data.item()) + elif my_kind == FIFF.FIFF_MNE_BASELINE_MAX: + tag = read_tag(fid, pos) + bmax = float(tag.data.item()) + + if comment is None: + comment = "No comment" + + if bmin is not None or bmax is not None: + # None's should've been replaced with floats + assert bmin is not None and bmax is not None + baseline = (bmin, bmax) + + # Local channel information? + if nchan > 0: + if chs is None: + raise ValueError( + "Local channel information was not found when it was expected." + ) + + if len(chs) != nchan: + raise ValueError( + "Number of channels and number of " + "channel definitions are different" + ) + + ch_names_mapping = _read_extended_ch_info(chs, my_evoked, fid) + info["chs"] = chs + info["bads"][:] = _rename_list(info["bads"], ch_names_mapping) + logger.info( + f" Found channel information in evoked data. nchan = {nchan}" + ) + if sfreq > 0: + info["sfreq"] = sfreq + + # Read the data in the aspect block + nave = 1 + epoch = [] + for k in range(my_aspect["nent"]): + kind = my_aspect["directory"][k].kind + pos = my_aspect["directory"][k].pos + if kind == FIFF.FIFF_COMMENT: + tag = read_tag(fid, pos) + comment = tag.data + elif kind == FIFF.FIFF_ASPECT_KIND: + tag = read_tag(fid, pos) + aspect_kind = int(tag.data.item()) + elif kind == FIFF.FIFF_NAVE: + tag = read_tag(fid, pos) + nave = int(tag.data.item()) + elif kind == FIFF.FIFF_EPOCH: + tag = read_tag(fid, pos) + epoch.append(tag) + + nepoch = len(epoch) + if nepoch != 1 and nepoch != info["nchan"]: + raise ValueError( + "Number of epoch tags is unreasonable " + f"(nepoch = {nepoch} nchan = {info['nchan']})" + ) + + if nepoch == 1: + # Only one epoch + data = epoch[0].data + # May need a transpose if the number of channels is one + if data.shape[1] == 1 and info["nchan"] == 1: + data = data.T + else: + # Put the old style epochs together + data = np.concatenate([e.data[None, :] for e in epoch], axis=0) + if np.isrealobj(data): + data = data.astype(np.float64) + else: + data = data.astype(np.complex128) + + if first_time is not None and nsamp is not None: + times = first_time + np.arange(nsamp) / info["sfreq"] + elif first is not None: + nsamp = last - first + 1 + times = np.arange(first, last + 1) / info["sfreq"] + else: + raise RuntimeError("Could not read time parameters") + del first, last + if nsamp is not None and data.shape[1] != nsamp: + raise ValueError( + f"Incorrect number of samples ({data.shape[1]} instead of {nsamp})" + ) + logger.info(" Found the data of interest:") + logger.info( + f" t = {1000 * times[0]:10.2f} ... {1000 * times[-1]:10.2f} ms (" + f"{comment})" + ) + if info["comps"] is not None: + logger.info( + f" {len(info['comps'])} CTF compensation matrices available" + ) + logger.info(f" nave = {nave} - aspect type = {aspect_kind}") + + # Calibrate + cals = np.array( + [ + info["chs"][k]["cal"] * info["chs"][k].get("scale", 1.0) + for k in range(info["nchan"]) + ] + ) + data *= cals[:, np.newaxis] + + return info, nave, aspect_kind, comment, times, data, baseline + + +@verbose +def write_evokeds(fname, evoked, *, on_mismatch="raise", overwrite=False, verbose=None): + """Write an evoked dataset to a file. + + Parameters + ---------- + fname : path-like + The file name, which should end with ``-ave.fif`` or ``-ave.fif.gz``. + evoked : Evoked instance, or list of Evoked instances + The evoked dataset, or list of evoked datasets, to save in one file. + Note that the measurement info from the first evoked instance is used, + so be sure that information matches. + %(on_mismatch_info)s + %(overwrite)s + + .. versionadded:: 1.0 + %(verbose)s + + .. versionadded:: 0.24 + + See Also + -------- + read_evokeds + + Notes + ----- + .. versionchanged:: 0.23 + Information on baseline correction will be stored with each individual + `~mne.Evoked` object, and will be restored when reading the data again + via `mne.read_evokeds`. + """ + _write_evokeds(fname, evoked, on_mismatch=on_mismatch, overwrite=overwrite) + + +def _write_evokeds(fname, evoked, check=True, *, on_mismatch="raise", overwrite=False): + """Write evoked data.""" + from .dipole import DipoleFixed # avoid circular import + + fname = _check_fname(fname=fname, overwrite=overwrite) + if check: + check_fname( + fname, "evoked", ("-ave.fif", "-ave.fif.gz", "_ave.fif", "_ave.fif.gz") + ) + + if not isinstance(evoked, list | tuple): + evoked = [evoked] + + warned = False + # Create the file and save the essentials + with start_and_end_file(fname) as fid: + start_block(fid, FIFF.FIFFB_MEAS) + write_id(fid, FIFF.FIFF_BLOCK_ID) + if evoked[0].info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, evoked[0].info["meas_id"]) + + # Write measurement info + write_meas_info(fid, evoked[0].info) + + # One or more evoked data sets + start_block(fid, FIFF.FIFFB_PROCESSED_DATA) + for ei, e in enumerate(evoked): + if ei: + _ensure_infos_match( + info1=evoked[0].info, + info2=e.info, + name=f"evoked[{ei}]", + on_mismatch=on_mismatch, + ) + start_block(fid, FIFF.FIFFB_EVOKED) + + # Comment is optional + if e.comment is not None and len(e.comment) > 0: + write_string(fid, FIFF.FIFF_COMMENT, e.comment) + + # First time, num. samples, first and last sample + write_float(fid, FIFF.FIFF_FIRST_TIME, e.times[0]) + write_int(fid, FIFF.FIFF_NO_SAMPLES, len(e.times)) + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, e.first) + write_int(fid, FIFF.FIFF_LAST_SAMPLE, e.last) + + # Baseline + if not isinstance(e, DipoleFixed) and e.baseline is not None: + bmin, bmax = e.baseline + write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin) + write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax) + + # The evoked data itself + if e.info.get("maxshield"): + aspect = FIFF.FIFFB_IAS_ASPECT + else: + aspect = FIFF.FIFFB_ASPECT + start_block(fid, aspect) + + write_int(fid, FIFF.FIFF_ASPECT_KIND, e._aspect_kind) + # convert nave to integer to comply with FIFF spec + nave_int = int(round(e.nave)) + if nave_int != e.nave and not warned: + warn( + 'converting "nave" to integer before saving evoked; this ' + "can have a minor effect on the scale of source " + 'estimates that are computed using "nave".' + ) + warned = True + write_int(fid, FIFF.FIFF_NAVE, nave_int) + del nave_int + + decal = np.zeros((e.info["nchan"], 1)) + for k in range(e.info["nchan"]): + decal[k] = 1.0 / ( + e.info["chs"][k]["cal"] * e.info["chs"][k].get("scale", 1.0) + ) + + if np.iscomplexobj(e.data): + write_function = write_complex_float_matrix + else: + write_function = write_float_matrix + + write_function(fid, FIFF.FIFF_EPOCH, decal * e.data) + end_block(fid, aspect) + end_block(fid, FIFF.FIFFB_EVOKED) + + end_block(fid, FIFF.FIFFB_PROCESSED_DATA) + end_block(fid, FIFF.FIFFB_MEAS) + + +def _get_peak(data, times, tmin=None, tmax=None, mode="abs", *, strict=True): + """Get feature-index and time of maximum signal from 2D array. + + Note. This is a 'getter', not a 'finder'. For non-evoked type + data and continuous signals, please use proper peak detection algorithms. + + Parameters + ---------- + data : instance of numpy.ndarray (n_locations, n_times) + The data, either evoked in sensor or source space. + times : instance of numpy.ndarray (n_times) + The times in seconds. + tmin : float | None + The minimum point in time to be considered for peak getting. + tmax : float | None + The maximum point in time to be considered for peak getting. + mode : {'pos', 'neg', 'abs'} + How to deal with the sign of the data. If 'pos' only positive + values will be considered. If 'neg' only negative values will + be considered. If 'abs' absolute values will be considered. + Defaults to 'abs'. + strict : bool + If True, raise an error if values are all positive when detecting + a minimum (mode='neg'), or all negative when detecting a maximum + (mode='pos'). Defaults to True. + + Returns + ------- + max_loc : int + The index of the feature with the maximum value. + max_time : int + The time point of the maximum response, index. + max_amp : float + Amplitude of the maximum response. + """ + _check_option("mode", mode, ["abs", "neg", "pos"]) + + if tmin is None: + tmin = times[0] + if tmax is None: + tmax = times[-1] + + if tmin < times.min() or tmax > times.max(): + if tmin < times.min(): + param_name = "tmin" + param_val = tmin + else: + param_name = "tmax" + param_val = tmax + + raise ValueError( + f"{param_name} ({param_val}) is out of bounds. It must be " + f"between {times.min()} and {times.max()}" + ) + elif tmin > tmax: + raise ValueError(f"tmin ({tmin}) must be <= tmax ({tmax})") + + time_win = (times >= tmin) & (times <= tmax) + mask = np.ones_like(data).astype(bool) + mask[:, time_win] = False + + maxfun = np.argmax + if mode == "pos": + if strict and not np.any(data[~mask] > 0): + raise ValueError( + "No positive values encountered. Cannot operate in pos mode." + ) + elif mode == "neg": + if strict and not np.any(data[~mask] < 0): + raise ValueError( + "No negative values encountered. Cannot operate in neg mode." + ) + maxfun = np.argmin + + masked_index = np.ma.array(np.abs(data) if mode == "abs" else data, mask=mask) + + max_loc, max_time = np.unravel_index(maxfun(masked_index), data.shape) + + return max_loc, max_time, data[max_loc, max_time] diff --git a/mne/export/__init__.py b/mne/export/__init__.py new file mode 100644 index 0000000..7a07df6 --- /dev/null +++ b/mne/export/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Functions for exporting data to non-FIF formats.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/export/__init__.pyi b/mne/export/__init__.pyi new file mode 100644 index 0000000..6cd2a37 --- /dev/null +++ b/mne/export/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["export_epochs", "export_evokeds", "export_evokeds_mff", "export_raw"] +from ._egimff import export_evokeds_mff +from ._export import export_epochs, export_evokeds, export_raw diff --git a/mne/export/_brainvision.py b/mne/export/_brainvision.py new file mode 100644 index 0000000..ba64ba0 --- /dev/null +++ b/mne/export/_brainvision.py @@ -0,0 +1,158 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +from pathlib import Path + +import numpy as np + +from mne.channels.channels import _unit2human +from mne.io.constants import FIFF +from mne.utils import _check_pybv_installed, warn + +_check_pybv_installed() +from pybv import write_brainvision # noqa: E402 + + +def _export_mne_raw(*, raw, fname, events=None, overwrite=False): + """Export raw data from MNE-Python. + + Parameters + ---------- + raw : mne.io.Raw + The raw data to export. + fname : str | pathlib.Path + The name of the file where raw data will be exported to. Must end with + ``".vhdr"``, and accompanying *.vmrk* and *.eeg* files will be written inside + the same directory. + events : np.ndarray | None + Events to be written to the marker file (*.vmrk*). If array, must be in + `MNE-Python format `_. If + ``None`` (default), events will be written based on ``raw.annotations``. + overwrite : bool + Whether or not to overwrite existing data. Defaults to ``False``. + + """ + # prepare file location + if not str(fname).endswith(".vhdr"): + raise ValueError("`fname` must have the '.vhdr' extension for BrainVision.") + fname = Path(fname) + folder_out = fname.parents[0] + fname_base = fname.stem + + # prepare data from raw + data = raw.get_data() # gets data starting from raw.first_samp + sfreq = raw.info["sfreq"] # in Hz + meas_date = raw.info["meas_date"] # datetime.datetime + ch_names = raw.ch_names + + # write voltage units as micro-volts and all other units without scaling + # write units that we don't know as n/a + unit = [] + for ch in raw.info["chs"]: + if ch["unit"] == FIFF.FIFF_UNIT_V: + unit.append("µV") + elif ch["unit"] == FIFF.FIFF_UNIT_CEL: + unit.append("°C") + else: + unit.append(_unit2human.get(ch["unit"], "n/a")) + unit = [u if u != "NA" else "n/a" for u in unit] + + # enforce conversion to float32 format + # XXX: Could add a feature that checks data and optimizes `unit`, `resolution`, and + # `format` so that raw.orig_format could be retained if reasonable. + if raw.orig_format != "single": + warn( + f"Encountered data in '{raw.orig_format}' format. Converting to float32.", + RuntimeWarning, + ) + + fmt = "binary_float32" + resolution = 0.1 + + # handle events + # if we got an ndarray, this is in MNE-Python format + msg = "`events` must be None or array in MNE-Python format." + if events is not None: + # subtract raw.first_samp because brainvision marks events starting from the + # first available data point and ignores the raw.first_samp + assert isinstance(events, np.ndarray), msg + assert events.ndim == 2, msg + assert events.shape[-1] == 3, msg + events[:, 0] -= raw.first_samp + events = events[:, [0, 2]] # reorder for pybv required order + else: # else, prepare pybv style events from raw.annotations + events = _mne_annots2pybv_events(raw) + + # no information about reference channels in mne currently + ref_ch_names = None + + # write to BrainVision + write_brainvision( + data=data, + sfreq=sfreq, + ch_names=ch_names, + ref_ch_names=ref_ch_names, + fname_base=fname_base, + folder_out=folder_out, + overwrite=overwrite, + events=events, + resolution=resolution, + unit=unit, + fmt=fmt, + meas_date=meas_date, + ) + + +def _mne_annots2pybv_events(raw): + """Convert mne Annotations to pybv events.""" + events = [] + for annot in raw.annotations: + # handle onset and duration: seconds to sample, relative to + # raw.first_samp / raw.first_time + onset = annot["onset"] - raw.first_time + onset = raw.time_as_index(onset).astype(int)[0] + duration = int(annot["duration"] * raw.info["sfreq"]) + + # triage type and description + # defaults to type="Comment" and the full description + etype = "Comment" + description = annot["description"] + for start in ["Stimulus/S", "Response/R", "Comment/"]: + if description.startswith(start): + etype = start.split("/")[0] + description = description.replace(start, "") + break + + if etype in ["Stimulus", "Response"] and description.strip().isdigit(): + description = int(description.strip()) + else: + # if cannot convert to int, we must use this as "Comment" + etype = "Comment" + + event_dict = dict( + onset=onset, # in samples + duration=duration, # in samples + description=description, + type=etype, + ) + + if "ch_names" in annot: + # handle channels + channels = list(annot["ch_names"]) + event_dict["channels"] = channels + + # add a "pybv" event + events += [event_dict] + + return events + + +def _export_raw(fname, raw, overwrite): + """Export Raw object to BrainVision via pybv.""" + fname = str(fname) + ext = os.path.splitext(fname)[-1] + if ext != ".vhdr": + fname = fname.replace(ext, ".vhdr") + _export_mne_raw(raw=raw, fname=fname, overwrite=overwrite) diff --git a/mne/export/_edf.py b/mne/export/_edf.py new file mode 100644 index 0000000..ef87069 --- /dev/null +++ b/mne/export/_edf.py @@ -0,0 +1,226 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime as dt +from collections.abc import Callable + +import numpy as np + +from ..utils import _check_edfio_installed, warn + +_check_edfio_installed() +from edfio import Edf, EdfAnnotation, EdfSignal, Patient, Recording # noqa: E402 + + +# copied from edfio (Apache license) +def _round_float_to_8_characters( + value: float, + round_func: Callable[[float], int], +) -> float: + if isinstance(value, int) or value.is_integer(): + return value + length = 8 + integer_part_length = str(value).find(".") + if integer_part_length == length: + return round_func(value) + factor = 10 ** (length - 1 - integer_part_length) + return round_func(value * factor) / factor + + +def _export_raw(fname, raw, physical_range, add_ch_type): + """Export Raw objects to EDF files. + + TODO: if in future the Info object supports transducer or technician information, + allow writing those here. + """ + # get voltage-based data in uV + units = dict( + eeg="uV", ecog="uV", seeg="uV", eog="uV", ecg="uV", emg="uV", bio="uV", dbs="uV" + ) + + digital_min, digital_max = -32767, 32767 + annotations = [] + + # load data first + raw.load_data() + + ch_types = np.array(raw.get_channel_types()) + n_times = raw.n_times + + # get the entire dataset in uV + data = raw.get_data(units=units) + + # Sampling frequency in EDF only supports integers, so to allow for float sampling + # rates from Raw, we adjust the output sampling rate for all channels and the data + # record duration. + sfreq = raw.info["sfreq"] + if float(sfreq).is_integer(): + out_sfreq = int(sfreq) + data_record_duration = None + # make non-integer second durations work + if (pad_width := int(np.ceil(n_times / sfreq) * sfreq - n_times)) > 0: + warn( + "EDF format requires equal-length data blocks, so " + f"{pad_width / sfreq:.3g} seconds of edge values were appended to all " + "channels when writing the final block." + ) + data = np.pad(data, (0, int(pad_width)), "edge") + annotations.append( + EdfAnnotation( + raw.times[-1] + 1 / sfreq, pad_width / sfreq, "BAD_ACQ_SKIP" + ) + ) + else: + data_record_duration = _round_float_to_8_characters( + np.floor(sfreq) / sfreq, round + ) + out_sfreq = np.floor(sfreq) / data_record_duration + warn( + f"Data has a non-integer sampling rate of {sfreq}; writing to EDF format " + "may cause a small change to sample times." + ) + + # get any filter information applied to the data + lowpass = raw.info["lowpass"] + highpass = raw.info["highpass"] + linefreq = raw.info["line_freq"] + filter_str_info = f"HP:{highpass}Hz LP:{lowpass}Hz" + if linefreq is not None: + filter_str_info += " N:{linefreq}Hz" + + if physical_range == "auto": + # get max and min for each channel type data + ch_types_phys_max = dict() + ch_types_phys_min = dict() + + for _type in np.unique(ch_types): + _picks = [n for n, t in zip(raw.ch_names, ch_types) if t == _type] + _data = raw.get_data(units=units, picks=_picks) + ch_types_phys_max[_type] = _data.max() + ch_types_phys_min[_type] = _data.min() + elif physical_range == "channelwise": + prange = None + else: + # get the physical min and max of the data in uV + # Physical ranges of the data in uV are usually set by the manufacturer and + # electrode properties. In general, physical min and max should be the clipping + # levels of the ADC input, and they should be the same for all channels. For + # example, Nihon Kohden uses ±3200 uV for all EEG channels (corresponding to the + # actual clipping levels of their input amplifiers & ADC). For a discussion, + # see https://github.com/sccn/eeglab/issues/246 + pmin, pmax = physical_range[0], physical_range[1] + + # check that physical min and max is not exceeded + if data.max() > pmax: + warn( + f"The maximum μV of the data {data.max()} is more than the physical max" + f" passed in {pmax}." + ) + if data.min() < pmin: + warn( + f"The minimum μV of the data {data.min()} is less than the physical min" + f" passed in {pmin}." + ) + data = np.clip(data, pmin, pmax) + prange = pmin, pmax + signals = [] + for idx, ch in enumerate(raw.ch_names): + ch_type = ch_types[idx] + signal_label = f"{ch_type.upper()} {ch}" if add_ch_type else ch + if len(signal_label) > 16: + raise RuntimeError( + f"Signal label for {ch} ({ch_type}) is longer than 16 characters, which" + " is not supported by the EDF standard. Please shorten the channel name" + "before exporting to EDF." + ) + + if physical_range == "auto": # per channel type + pmin = ch_types_phys_min[ch_type] + pmax = ch_types_phys_max[ch_type] + if pmax == pmin: + pmax = pmin + 1 + prange = pmin, pmax + + signals.append( + EdfSignal( + data[idx], + out_sfreq, + label=signal_label, + transducer_type="", + physical_dimension="" if ch_type == "stim" else "uV", + physical_range=prange, + digital_range=(digital_min, digital_max), + prefiltering=filter_str_info, + ) + ) + + # set patient info + subj_info = raw.info.get("subject_info") + if subj_info is not None: + # get the full name of subject if available + first_name = subj_info.get("first_name", "") + middle_name = subj_info.get("middle_name", "") + last_name = subj_info.get("last_name", "") + name = "_".join(filter(None, [first_name, middle_name, last_name])) + + birthday = subj_info.get("birthday") + hand = subj_info.get("hand") + weight = subj_info.get("weight") + height = subj_info.get("height") + sex = subj_info.get("sex") + + additional_patient_info = [] + for key, value in [("height", height), ("weight", weight), ("hand", hand)]: + if value: + additional_patient_info.append(f"{key}={value}") + + patient = Patient( + code=subj_info.get("his_id") or "X", + sex={0: "X", 1: "M", 2: "F", None: "X"}[sex], + birthdate=birthday, + name=name or "X", + additional=additional_patient_info, + ) + else: + patient = None + + # set measurement date + if (meas_date := raw.info["meas_date"]) is not None: + startdate = dt.date(meas_date.year, meas_date.month, meas_date.day) + starttime = dt.time( + meas_date.hour, meas_date.minute, meas_date.second, meas_date.microsecond + ) + else: + startdate = None + starttime = None + + device_info = raw.info.get("device_info") + if device_info is not None: + device_type = device_info.get("type") or "X" + recording = Recording(startdate=startdate, equipment_code=device_type) + else: + recording = Recording(startdate=startdate) + + for desc, onset, duration, ch_names in zip( + raw.annotations.description, + raw.annotations.onset, + raw.annotations.duration, + raw.annotations.ch_names, + ): + if ch_names: + for ch_name in ch_names: + annotations.append( + EdfAnnotation(onset, duration, desc + f"@@{ch_name}") + ) + else: + annotations.append(EdfAnnotation(onset, duration, desc)) + + Edf( + signals=signals, + patient=patient, + recording=recording, + starttime=starttime, + data_record_duration=data_record_duration, + annotations=annotations, + ).write(fname) diff --git a/mne/export/_eeglab.py b/mne/export/_eeglab.py new file mode 100644 index 0000000..3c8f896 --- /dev/null +++ b/mne/export/_eeglab.py @@ -0,0 +1,92 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..utils import _check_eeglabio_installed + +_check_eeglabio_installed() +import eeglabio.epochs # noqa: E402 +import eeglabio.raw # noqa: E402 + + +def _export_raw(fname, raw): + # load data first + raw.load_data() + + # remove extra epoc and STI channels + drop_chs = ["epoc"] + # filenames attribute of RawArray is filled with None + if raw.filenames[0] and raw.filenames[0].suffix != ".fif": + drop_chs.append("STI 014") + + ch_names = [ch for ch in raw.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(raw.info["chs"], drop_chs) + + annotations = [ + raw.annotations.description, + raw.annotations.onset, + raw.annotations.duration, + ] + eeglabio.raw.export_set( + fname, + data=raw.get_data(picks=ch_names), + sfreq=raw.info["sfreq"], + ch_names=ch_names, + ch_locs=cart_coords, + annotations=annotations, + ) + + +def _export_epochs(fname, epochs): + _check_eeglabio_installed() + # load data first + epochs.load_data() + + # remove extra epoc and STI channels + drop_chs = ["epoc", "STI 014"] + ch_names = [ch for ch in epochs.ch_names if ch not in drop_chs] + cart_coords = _get_als_coords_from_chs(epochs.info["chs"], drop_chs) + + if epochs.annotations: + annot = [ + epochs.annotations.description, + epochs.annotations.onset, + epochs.annotations.duration, + ] + else: + annot = None + + eeglabio.epochs.export_set( + fname, + data=epochs.get_data(picks=ch_names), + sfreq=epochs.info["sfreq"], + events=epochs.events, + tmin=epochs.tmin, + tmax=epochs.tmax, + ch_names=ch_names, + event_id=epochs.event_id, + ch_locs=cart_coords, + annotations=annot, + ) + + +def _get_als_coords_from_chs(chs, drop_chs=None): + """Extract channel locations in ALS format (x, y, z) from a chs instance. + + Returns + ------- + None if no valid coordinates are found (all zeros) + """ + if drop_chs is None: + drop_chs = [] + cart_coords = np.array([d["loc"][:3] for d in chs if d["ch_name"] not in drop_chs]) + if cart_coords.any(): # has coordinates + # (-y x z) to (x y z) + cart_coords[:, 0] = -cart_coords[:, 0] # -y to y + # swap x (1) and y (0) + cart_coords[:, [0, 1]] = cart_coords[:, [1, 0]] + else: + cart_coords = None + return cart_coords diff --git a/mne/export/_egimff.py b/mne/export/_egimff.py new file mode 100644 index 0000000..3792ea4 --- /dev/null +++ b/mne/export/_egimff.py @@ -0,0 +1,176 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import os +import os.path as op +import shutil + +import numpy as np + +from .._fiff.pick import pick_channels, pick_types +from ..io.egi.egimff import _import_mffpy +from ..utils import _check_fname, verbose, warn + + +@verbose +def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, verbose=None): + """Export evoked dataset to MFF. + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + evoked : list of Evoked instances + List of evoked datasets to export to one file. Note that the + measurement info from the first evoked instance is used, so be sure + that information matches. + history : None (default) | list of dict + Optional list of history entries (dictionaries) to be written to + history.xml. This must adhere to the format described in + mffpy.xml_files.History.content. If None, no history.xml will be + written. + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_evoked)s + + Only EEG channels are written to the output file. + ``info['device_info']['type']`` must be a valid MFF recording device + (e.g. 'HydroCel GSN 256 1.0'). This field is automatically populated when + using MFF read functions. + """ + mffpy = _import_mffpy("Export evokeds to MFF.") + + info = evoked[0].info + if np.round(info["sfreq"]) != info["sfreq"]: + raise ValueError( + f'Sampling frequency must be a whole number. sfreq: {info["sfreq"]}' + ) + sampling_rate = int(info["sfreq"]) + + # check for unapplied projectors + if any(not proj["active"] for proj in evoked[0].info["projs"]): + warn( + "Evoked instance has unapplied projectors. Consider applying " + "them before exporting with evoked.apply_proj()." + ) + + # Initialize writer + # Future changes: conditions based on version or mffpy requirement if + # https://github.com/BEL-Public/mffpy/pull/92 is merged and released. + fname = str(_check_fname(fname, overwrite=overwrite)) + if op.exists(fname): + os.remove(fname) if op.isfile(fname) else shutil.rmtree(fname) + writer = mffpy.Writer(fname) + current_time = datetime.datetime.now(datetime.timezone.utc) + writer.addxml("fileInfo", recordTime=current_time) + try: + device = info["device_info"]["type"] + except (TypeError, KeyError): + raise ValueError("No device type. Cannot determine sensor layout.") + writer.add_coordinates_and_sensor_layout(device) + + # Add EEG data + eeg_channels = pick_types(info, eeg=True, exclude=[]) + eeg_bin = mffpy.bin_writer.BinWriter(sampling_rate) + for ave in evoked: + # Signals are converted to µV + block = (ave.data[eeg_channels] * 1e6).astype(np.float32) + eeg_bin.add_block(block, offset_us=0) + writer.addbin(eeg_bin) + + # Add categories + categories_content = _categories_content_from_evokeds(evoked) + writer.addxml("categories", categories=categories_content) + + # Add history + if history: + writer.addxml("historyEntries", entries=history) + + writer.write() + + +def _categories_content_from_evokeds(evoked): + """Return categories.xml content for evoked dataset.""" + content = dict() + begin_time = 0 + for ave in evoked: + # Times are converted to microseconds + sfreq = ave.info["sfreq"] + duration = np.round(len(ave.times) / sfreq * 1e6).astype(int) + end_time = begin_time + duration + event_time = begin_time - np.round(ave.tmin * 1e6).astype(int) + eeg_bads = _get_bad_eeg_channels(ave.info) + content[ave.comment] = [ + _build_segment_content( + begin_time, + end_time, + event_time, + eeg_bads, + name="Average", + nsegs=ave.nave, + ) + ] + begin_time += duration + return content + + +def _get_bad_eeg_channels(info): + """Return a list of bad EEG channels formatted for categories.xml. + + Given a list of only the EEG channels in file, return the indices of this + list (starting at 1) that correspond to bad channels. + """ + if len(info["bads"]) == 0: + return [] + eeg_channels = pick_types(info, eeg=True, exclude=[]) + bad_channels = pick_channels(info["ch_names"], info["bads"]) + bads_elementwise = np.isin(eeg_channels, bad_channels) + return list(np.flatnonzero(bads_elementwise) + 1) + + +def _build_segment_content( + begin_time, + end_time, + event_time, + eeg_bads, + status="unedited", + name=None, + pns_bads=None, + nsegs=None, +): + """Build content for a single segment in categories.xml. + + Segments are sorted into categories in categories.xml. In a segmented MFF + each category can contain multiple segments, but in an averaged MFF each + category only contains one segment (the average). + """ + channel_status = [ + {"signalBin": 1, "exclusion": "badChannels", "channels": eeg_bads} + ] + if pns_bads: + channel_status.append( + {"signalBin": 2, "exclusion": "badChannels", "channels": pns_bads} + ) + content = { + "status": status, + "beginTime": begin_time, + "endTime": end_time, + "evtBegin": event_time, + "evtEnd": event_time, + "channelStatus": channel_status, + } + if name: + content["name"] = name + if nsegs: + content["keys"] = {"#seg": {"type": "long", "data": nsegs}} + return content diff --git a/mne/export/_export.py b/mne/export/_export.py new file mode 100644 index 0000000..490bf98 --- /dev/null +++ b/mne/export/_export.py @@ -0,0 +1,222 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op + +from ..utils import _check_fname, _validate_type, logger, verbose, warn +from ._egimff import export_evokeds_mff + + +@verbose +def export_raw( + fname, + raw, + fmt="auto", + physical_range="auto", + add_ch_type=False, + *, + overwrite=False, + verbose=None, +): + """Export Raw to external formats. + + %(export_fmt_support_raw)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + raw : instance of Raw + The raw instance to export. + %(export_fmt_params_raw)s + %(physical_range_export_params)s + %(add_ch_type_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_raw)s + %(export_eeglab_note)s + %(export_edf_note)s + """ + fname = str(_check_fname(fname, overwrite=overwrite)) + supported_export_formats = { # format : (extensions,) + "eeglab": ("set",), + "edf": ("edf",), + "brainvision": ( + "eeg", + "vmrk", + "vhdr", + ), + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + # check for unapplied projectors + if any(not proj["active"] for proj in raw.info["projs"]): + warn( + "Raw instance has unapplied projectors. Consider applying " + "them before exporting with raw.apply_proj()." + ) + + if fmt == "eeglab": + from ._eeglab import _export_raw + + _export_raw(fname, raw) + elif fmt == "edf": + from ._edf import _export_raw + + _export_raw(fname, raw, physical_range, add_ch_type) + elif fmt == "brainvision": + from ._brainvision import _export_raw + + _export_raw(fname, raw, overwrite) + + +@verbose +def export_epochs(fname, epochs, fmt="auto", *, overwrite=False, verbose=None): + """Export Epochs to external formats. + + %(export_fmt_support_epochs)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + epochs : instance of Epochs + The epochs to export. + %(export_fmt_params_epochs)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_epochs)s + %(export_eeglab_note)s + """ + fname = str(_check_fname(fname, overwrite=overwrite)) + supported_export_formats = { + "eeglab": ("set",), + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + # check for unapplied projectors + if any(not proj["active"] for proj in epochs.info["projs"]): + warn( + "Epochs instance has unapplied projectors. Consider applying " + "them before exporting with epochs.apply_proj()." + ) + + if fmt == "eeglab": + from ._eeglab import _export_epochs + + _export_epochs(fname, epochs) + + +@verbose +def export_evokeds(fname, evoked, fmt="auto", *, overwrite=False, verbose=None): + """Export evoked dataset to external formats. + + This function is a wrapper for format-specific export functions. The export + function is selected based on the inferred file format. For additional + options, use the format-specific functions. + + %(export_fmt_support_evoked)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + evoked : Evoked instance, or list of Evoked instances + The evoked dataset, or list of evoked datasets, to export to one file. + Note that the measurement info from the first evoked instance is used, + so be sure that information matches. + %(export_fmt_params_evoked)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + See Also + -------- + mne.write_evokeds + mne.export.export_evokeds_mff + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_evoked)s + """ + fname = str(_check_fname(fname, overwrite=overwrite)) + supported_export_formats = { + "mff": ("mff",), + } + fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats) + + if not isinstance(evoked, list): + evoked = [evoked] + + logger.info(f"Exporting evoked dataset to {fname}...") + + if fmt == "mff": + export_evokeds_mff(fname, evoked, overwrite=overwrite) + + +def _infer_check_export_fmt(fmt, fname, supported_formats): + """Infer export format from filename extension if auto. + + Raises error if fmt is auto and no file extension found, + then checks format against supported formats, raises error if format is not + supported. + + Parameters + ---------- + fmt : str + Format of the export, will only infer the format from filename if fmt + is auto. + fname : str + Name of the target export file, only used when fmt is auto. + supported_formats : dict of str : tuple/list + Dictionary containing supported formats (as keys) and each format's + corresponding file extensions in a tuple (e.g., {'eeglab': ('set',)}) + """ + _validate_type(fmt, str, "fmt") + fmt = fmt.lower() + if fmt == "auto": + fmt = op.splitext(fname)[1] + if fmt: + fmt = fmt[1:].lower() + # find fmt in supported formats dict's tuples + fmt = next( + (k for k, v in supported_formats.items() if fmt in v), fmt + ) # default to original fmt for raising error later + else: + raise ValueError( + f"Couldn't infer format from filename {fname} (no extension found)" + ) + + if fmt not in supported_formats: + supported = [] + for supp_format, extensions in supported_formats.items(): + ext_str = ", ".join(f"*.{ext}" for ext in extensions) + supported.append(f"{supp_format} ({ext_str})") + + supported_str = ", ".join(supported) + raise ValueError( + f"Format '{fmt}' is not supported. " + f"Supported formats are {supported_str}." + ) + return fmt diff --git a/mne/filter.py b/mne/filter.py new file mode 100644 index 0000000..ee5b34c --- /dev/null +++ b/mne/filter.py @@ -0,0 +1,2948 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""IIR and FIR filtering and resampling functions.""" + +from collections import Counter +from copy import deepcopy +from functools import partial +from math import gcd + +import numpy as np +from scipy import fft, signal +from scipy.stats import f as fstat + +from ._fiff.pick import _picks_to_idx +from ._ola import _COLA +from .cuda import ( + _fft_multiply_repeated, + _fft_resample, + _setup_cuda_fft_multiply_repeated, + _setup_cuda_fft_resample, + _smart_pad, +) +from .fixes import minimum_phase +from .parallel import parallel_func +from .utils import ( + _check_option, + _check_preload, + _ensure_int, + _pl, + _validate_type, + logger, + sum_squared, + verbose, + warn, +) + +# These values from Ifeachor and Jervis. +_length_factors = dict(hann=3.1, hamming=3.3, blackman=5.0) + + +def next_fast_len(target): + """Find the next fast size of input data to `fft`, for zero-padding, etc. + + SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this + returns the next composite of the prime factors 2, 3, and 5 which is + greater than or equal to `target`. (These are also known as 5-smooth + numbers, regular numbers, or Hamming numbers.) + + Parameters + ---------- + target : int + Length to start searching from. Must be a positive integer. + + Returns + ------- + out : int + The first 5-smooth number greater than or equal to `target`. + + Notes + ----- + Copied from SciPy with minor modifications. + """ + from bisect import bisect_left + + hams = ( + 8, + 9, + 10, + 12, + 15, + 16, + 18, + 20, + 24, + 25, + 27, + 30, + 32, + 36, + 40, + 45, + 48, + 50, + 54, + 60, + 64, + 72, + 75, + 80, + 81, + 90, + 96, + 100, + 108, + 120, + 125, + 128, + 135, + 144, + 150, + 160, + 162, + 180, + 192, + 200, + 216, + 225, + 240, + 243, + 250, + 256, + 270, + 288, + 300, + 320, + 324, + 360, + 375, + 384, + 400, + 405, + 432, + 450, + 480, + 486, + 500, + 512, + 540, + 576, + 600, + 625, + 640, + 648, + 675, + 720, + 729, + 750, + 768, + 800, + 810, + 864, + 900, + 960, + 972, + 1000, + 1024, + 1080, + 1125, + 1152, + 1200, + 1215, + 1250, + 1280, + 1296, + 1350, + 1440, + 1458, + 1500, + 1536, + 1600, + 1620, + 1728, + 1800, + 1875, + 1920, + 1944, + 2000, + 2025, + 2048, + 2160, + 2187, + 2250, + 2304, + 2400, + 2430, + 2500, + 2560, + 2592, + 2700, + 2880, + 2916, + 3000, + 3072, + 3125, + 3200, + 3240, + 3375, + 3456, + 3600, + 3645, + 3750, + 3840, + 3888, + 4000, + 4050, + 4096, + 4320, + 4374, + 4500, + 4608, + 4800, + 4860, + 5000, + 5120, + 5184, + 5400, + 5625, + 5760, + 5832, + 6000, + 6075, + 6144, + 6250, + 6400, + 6480, + 6561, + 6750, + 6912, + 7200, + 7290, + 7500, + 7680, + 7776, + 8000, + 8100, + 8192, + 8640, + 8748, + 9000, + 9216, + 9375, + 9600, + 9720, + 10000, + ) + + if target <= 6: + return target + + # Quickly check if it's already a power of 2 + if not (target & (target - 1)): + return target + + # Get result quickly for small sizes, since FFT itself is similarly fast. + if target <= hams[-1]: + return hams[bisect_left(hams, target)] + + match = float("inf") # Anything found will be smaller + p5 = 1 + while p5 < target: + p35 = p5 + while p35 < target: + # Ceiling integer division, avoiding conversion to float + # (quotient = ceil(target / p35)) + quotient = -(-target // p35) + + p2 = 2 ** int(quotient - 1).bit_length() + + N = p2 * p35 + if N == target: + return N + elif N < match: + match = N + p35 *= 3 + if p35 == target: + return p35 + if p35 < match: + match = p35 + p5 *= 5 + if p5 == target: + return p5 + if p5 < match: + match = p5 + return match + + +def _overlap_add_filter( + x, + h, + n_fft=None, + phase="zero", + picks=None, + n_jobs=None, + copy=True, + pad="reflect_limited", +): + """Filter the signal x using h with overlap-add FFTs.""" + # set up array for filtering, reshape to 2D, operate on last axis + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + # Extend the signal by mirroring the edges to reduce transient filter + # response + _check_zero_phase_length(len(h), phase) + if len(h) == 1: + return x * h**2 if phase == "zero-double" else x * h + n_edge = max(min(len(h), x.shape[1]) - 1, 0) + logger.debug(f"Smart-padding with: {n_edge} samples on each edge") + n_x = x.shape[1] + 2 * n_edge + + if phase == "zero-double": + h = np.convolve(h, h[::-1]) + + # Determine FFT length to use + min_fft = 2 * len(h) - 1 + if n_fft is None: + max_fft = n_x + if max_fft >= min_fft: + # cost function based on number of multiplications + N = 2 ** np.arange( + np.ceil(np.log2(min_fft)), np.ceil(np.log2(max_fft)) + 1, dtype=int + ) + cost = ( + np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) + * N + * (np.log2(N) + 1) + ) + + # add a heuristic term to prevent too-long FFT's which are slow + # (not predicted by mult. cost alone, 4e-5 exp. determined) + cost += 4e-5 * N * n_x + + n_fft = N[np.argmin(cost)] + else: + # Use only a single block + n_fft = next_fast_len(min_fft) + logger.debug(f"FFT block length: {n_fft}") + if n_fft < min_fft: + raise ValueError( + f"n_fft is too short, has to be at least 2 * len(h) - 1 ({min_fft}), got " + f"{n_fft}" + ) + + # Figure out if we should use CUDA + n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(n_jobs, h, n_fft) + + # Process each row separately + picks = _picks_to_idx(len(x), picks) + parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs) + if n_jobs == 1: + for p in picks: + x[p] = _1d_overlap_filter( + x[p], len(h), n_edge, phase, cuda_dict, pad, n_fft + ) + else: + data_new = parallel( + p_fun(x[p], len(h), n_edge, phase, cuda_dict, pad, n_fft) for p in picks + ) + for pp, p in enumerate(picks): + x[p] = data_new[pp] + + x.shape = orig_shape + return x + + +def _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft): + """Do one-dimensional overlap-add FFT FIR filtering.""" + # pad to reduce ringing + x_ext = _smart_pad(x, (n_edge, n_edge), pad) + n_x = len(x_ext) + x_filtered = np.zeros_like(x_ext) + + n_seg = n_fft - n_h + 1 + n_segments = int(np.ceil(n_x / float(n_seg))) + shift = ((n_h - 1) // 2 if phase.startswith("zero") else 0) + n_edge + + # Now the actual filtering step is identical for zero-phase (filtfilt-like) + # or single-pass + for seg_idx in range(n_segments): + start = seg_idx * n_seg + stop = (seg_idx + 1) * n_seg + seg = x_ext[start:stop] + seg = np.concatenate([seg, np.zeros(n_fft - len(seg))]) + + prod = _fft_multiply_repeated(seg, cuda_dict) + + start_filt = max(0, start - shift) + stop_filt = min(start - shift + n_fft, n_x) + start_prod = max(0, shift - start) + stop_prod = start_prod + stop_filt - start_filt + x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod] + + # Remove mirrored edges that we added and cast (n_edge can be zero) + x_filtered = x_filtered[: n_x - 2 * n_edge].astype(x.dtype) + return x_filtered + + +def _filter_attenuation(h, freq, gain): + """Compute minimum attenuation at stop frequency.""" + _, filt_resp = signal.freqz(h.ravel(), worN=np.pi * freq) + filt_resp = np.abs(filt_resp) # use amplitude response + filt_resp[np.where(gain == 1)] = 0 + idx = np.argmax(filt_resp) + att_db = -20 * np.log10(np.maximum(filt_resp[idx], 1e-20)) + att_freq = freq[idx] + return att_db, att_freq + + +def _prep_for_filtering(x, copy, picks=None): + """Set up array as 2D for filtering ease.""" + x = _check_filterable(x) + if copy is True: + x = x.copy() + orig_shape = x.shape + x = np.atleast_2d(x) + picks = _picks_to_idx(x.shape[-2], picks) + x.shape = (np.prod(x.shape[:-1]), x.shape[-1]) + if len(orig_shape) == 3: + n_epochs, n_channels, n_times = orig_shape + offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels), len(picks)) + picks = np.tile(picks, n_epochs) + offset + elif len(orig_shape) > 3: + raise ValueError( + "picks argument is not supported for data with more" + " than three dimensions" + ) + assert all(0 <= pick < x.shape[0] for pick in picks) # guaranteed by above + + return x, orig_shape, picks + + +def _firwin_design(N, freq, gain, window, sfreq): + """Construct a FIR filter using firwin.""" + assert freq[0] == 0 + assert len(freq) > 1 + assert len(freq) == len(gain) + assert N % 2 == 1 + h = np.zeros(N) + prev_freq = freq[-1] + prev_gain = gain[-1] + if gain[-1] == 1: + h[N // 2] = 1 # start with "all up" + assert prev_gain in (0, 1) + for this_freq, this_gain in zip(freq[::-1][1:], gain[::-1][1:]): + assert this_gain in (0, 1) + if this_gain != prev_gain: + # Get the correct N to satisfy the requested transition bandwidth + transition = (prev_freq - this_freq) / 2.0 + this_N = int(round(_length_factors[window] / transition)) + this_N += 1 - this_N % 2 # make it odd + if this_N > N: + raise ValueError( + f"The requested filter length {N} is too short for the requested " + f"{transition * sfreq / 2.0:0.2f} Hz transition band, which " + f"requires {this_N} samples" + ) + # Construct a lowpass + this_h = signal.firwin( + this_N, + (prev_freq + this_freq) / 2.0, + window=window, + pass_zero=True, + fs=freq[-1] * 2, + ) + assert this_h.shape == (this_N,) + offset = (N - this_N) // 2 + if this_gain == 0: + h[offset : N - offset] -= this_h + else: + h[offset : N - offset] += this_h + prev_gain = this_gain + prev_freq = this_freq + return h + + +def _construct_fir_filter( + sfreq, freq, gain, filter_length, phase, fir_window, fir_design +): + """Filter signal using gain control points in the frequency domain. + + The filter impulse response is constructed from a Hann window (window + used in "firwin2" function) to avoid ripples in the frequency response + (windowing is a smoothing in frequency domain). + + If x is multi-dimensional, this operates along the last dimension. + """ + assert freq[0] == 0 + if fir_design == "firwin2": + fir_design = signal.firwin2 + else: + assert fir_design == "firwin" + fir_design = partial(_firwin_design, sfreq=sfreq) + # issue a warning if attenuation is less than this + min_att_db = 12 if phase == "minimum-half" else 20 + + # normalize frequencies + freq = np.array(freq) / (sfreq / 2.0) + if freq[0] != 0 or freq[-1] != 1: + raise ValueError( + f"freq must start at 0 and end an Nyquist ({sfreq / 2.0}), got {freq}" + ) + gain = np.array(gain) + + # Use overlap-add filter with a fixed length + N = _check_zero_phase_length(filter_length, phase, gain[-1]) + # construct symmetric (linear phase) filter + if phase == "minimum-half": + h = fir_design(N * 2 - 1, freq, gain, window=fir_window) + h = minimum_phase(h) + else: + h = fir_design(N, freq, gain, window=fir_window) + if phase == "minimum": + h = minimum_phase(h, half=False) + assert h.size == N + att_db, att_freq = _filter_attenuation(h, freq, gain) + if phase == "zero-double": + att_db += 6 + if att_db < min_att_db: + att_freq *= sfreq / 2.0 + warn( + f"Attenuation at stop frequency {att_freq:0.2f} Hz is only {att_db:0.2f} " + "dB. Increase filter_length for higher attenuation." + ) + return h + + +def _check_zero_phase_length(N, phase, gain_nyq=0): + N = int(N) + if N % 2 == 0: + if phase == "zero": + raise RuntimeError(f'filter_length must be odd if phase="zero", got {N}') + elif phase == "zero-double" and gain_nyq == 1: + N += 1 + return N + + +def _check_coefficients(system): + """Check for filter stability.""" + if isinstance(system, tuple): + z, p, k = signal.tf2zpk(*system) + else: # sos + z, p, k = signal.sos2zpk(system) + if np.any(np.abs(p) > 1.0): + raise RuntimeError( + "Filter poles outside unit circle, filter will be " + "unstable. Consider using different filter " + "coefficients." + ) + + +def _iir_filter(x, iir_params, picks, n_jobs, copy, phase="zero"): + """Call filtfilt or lfilter.""" + # set up array for filtering, reshape to 2D, operate on last axis + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + if phase in ("zero", "zero-double"): + padlen = min(iir_params["padlen"], x.shape[-1] - 1) + if "sos" in iir_params: + fun = partial( + signal.sosfiltfilt, sos=iir_params["sos"], padlen=padlen, axis=-1 + ) + _check_coefficients(iir_params["sos"]) + else: + fun = partial( + signal.filtfilt, + b=iir_params["b"], + a=iir_params["a"], + padlen=padlen, + axis=-1, + ) + _check_coefficients((iir_params["b"], iir_params["a"])) + else: + if "sos" in iir_params: + fun = partial(signal.sosfilt, sos=iir_params["sos"], axis=-1) + _check_coefficients(iir_params["sos"]) + else: + fun = partial(signal.lfilter, b=iir_params["b"], a=iir_params["a"], axis=-1) + _check_coefficients((iir_params["b"], iir_params["a"])) + parallel, p_fun, n_jobs = parallel_func(fun, n_jobs) + if n_jobs == 1: + for p in picks: + x[p] = fun(x=x[p]) + else: + data_new = parallel(p_fun(x=x[p]) for p in picks) + for pp, p in enumerate(picks): + x[p] = data_new[pp] + x.shape = orig_shape + return x + + +def estimate_ringing_samples(system, max_try=100000): + """Estimate filter ringing. + + Parameters + ---------- + system : tuple | ndarray + A tuple of (b, a) or ndarray of second-order sections coefficients. + max_try : int + Approximate maximum number of samples to try. + This will be changed to a multiple of 1000. + + Returns + ------- + n : int + The approximate ringing. + """ + if isinstance(system, tuple): # TF + kind = "ba" + b, a = system + zi = [0.0] * (len(a) - 1) + else: + kind = "sos" + sos = system + zi = [[0.0] * 2] * len(sos) + n_per_chunk = 1000 + n_chunks_max = int(np.ceil(max_try / float(n_per_chunk))) + x = np.zeros(n_per_chunk) + x[0] = 1 + last_good = n_per_chunk + thresh_val = 0 + for ii in range(n_chunks_max): + if kind == "ba": + h, zi = signal.lfilter(b, a, x, zi=zi) + else: + h, zi = signal.sosfilt(sos, x, zi=zi) + x[0] = 0 # for subsequent iterations we want zero input + h = np.abs(h) + thresh_val = max(0.001 * np.max(h), thresh_val) + idx = np.where(np.abs(h) > thresh_val)[0] + if len(idx) > 0: + last_good = idx[-1] + else: # this iteration had no sufficiently lange values + idx = (ii - 1) * n_per_chunk + last_good + break + else: + warn("Could not properly estimate ringing for the filter") + idx = n_per_chunk * n_chunks_max + return idx + + +_ftype_dict = { + "butter": "Butterworth", + "cheby1": "Chebyshev I", + "cheby2": "Chebyshev II", + "ellip": "Cauer/elliptic", + "bessel": "Bessel/Thomson", +} + + +@verbose +def construct_iir_filter( + iir_params, + f_pass=None, + f_stop=None, + sfreq=None, + btype=None, + return_copy=True, + *, + phase="zero", + verbose=None, +): + """Use IIR parameters to get filtering coefficients. + + This function works like a wrapper for iirdesign and iirfilter in + scipy.signal to make filter coefficients for IIR filtering. It also + estimates the number of padding samples based on the filter ringing. + It creates a new iir_params dict (or updates the one passed to the + function) with the filter coefficients ('b' and 'a') and an estimate + of the padding necessary ('padlen') so IIR filtering can be performed. + + Parameters + ---------- + iir_params : dict + Dictionary of parameters to use for IIR filtering. + + * If ``iir_params['sos']`` exists, it will be used as + second-order sections to perform IIR filtering. + + .. versionadded:: 0.13 + + * Otherwise, if ``iir_params['b']`` and ``iir_params['a']`` + exist, these will be used as coefficients to perform IIR + filtering. + * Otherwise, if ``iir_params['order']`` and + ``iir_params['ftype']`` exist, these will be used with + `scipy.signal.iirfilter` to make a filter. + You should also supply ``iir_params['rs']`` and + ``iir_params['rp']`` if using elliptic or Chebychev filters. + * Otherwise, if ``iir_params['gpass']`` and + ``iir_params['gstop']`` exist, these will be used with + `scipy.signal.iirdesign` to design a filter. + * ``iir_params['padlen']`` defines the number of samples to pad + (and an estimate will be calculated if it is not given). + See Notes for more details. + * ``iir_params['output']`` defines the system output kind when + designing filters, either "sos" or "ba". For 0.13 the + default is 'ba' but will change to 'sos' in 0.14. + + f_pass : float or list of float + Frequency for the pass-band. Low-pass and high-pass filters should + be a float, band-pass should be a 2-element list of float. + f_stop : float or list of float + Stop-band frequency (same size as f_pass). Not used if 'order' is + specified in iir_params. + sfreq : float | None + The sample rate. + btype : str + Type of filter. Should be 'lowpass', 'highpass', or 'bandpass' + (or analogous string representations known to + :func:`scipy.signal.iirfilter`). + return_copy : bool + If False, the 'sos', 'b', 'a', and 'padlen' entries in + ``iir_params`` will be set inplace (if they weren't already). + Otherwise, a new ``iir_params`` instance will be created and + returned with these entries. + phase : str + Phase of the filter. + ``phase='zero'`` (default) or equivalently ``'zero-double'`` constructs and + applies IIR filter twice, once forward, and once backward (making it non-causal) + using :func:`~scipy.signal.filtfilt`; ``phase='forward'`` will apply + the filter once in the forward (causal) direction using + :func:`~scipy.signal.lfilter`. + + .. versionadded:: 0.13 + %(verbose)s + + Returns + ------- + iir_params : dict + Updated iir_params dict, with the entries (set only if they didn't + exist before) for 'sos' (or 'b', 'a'), and 'padlen' for + IIR filtering. + + See Also + -------- + mne.filter.filter_data + mne.io.Raw.filter + + Notes + ----- + This function triages calls to :func:`scipy.signal.iirfilter` and + :func:`scipy.signal.iirdesign` based on the input arguments (see + linked functions for more details). + + .. versionchanged:: 0.14 + Second-order sections are used in filter design by default (replacing + ``output='ba'`` by ``output='sos'``) to help ensure filter stability + and reduce numerical error. + + Examples + -------- + iir_params can have several forms. Consider constructing a low-pass + filter at 40 Hz with 1000 Hz sampling rate. + + In the most basic (2-parameter) form of iir_params, the order of the + filter 'N' and the type of filtering 'ftype' are specified. To get + coefficients for a 4th-order Butterworth filter, this would be: + + >>> iir_params = dict(order=4, ftype='butter', output='sos') # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False) # doctest:+SKIP + >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP + (4, 82) + + Filters can also be constructed using filter design methods. To get a + 40 Hz Chebyshev type 1 lowpass with specific gain characteristics in the + pass and stop bands (assuming the desired stop band is at 45 Hz), this + would be a filter with much longer ringing: + + >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20, output='sos') # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low') # doctest:+SKIP + >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP + (6, 439) + + Padding and/or filter coefficients can also be manually specified. For + a 10-sample moving window with no padding during filtering, for example, + one can just do: + + >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0) # doctest:+SKIP + >>> iir_params = construct_iir_filter(iir_params, return_copy=False) # doctest:+SKIP + >>> print((iir_params['b'], iir_params['a'], iir_params['padlen'])) # doctest:+SKIP + (array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]), [1, 0], 0) + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample`. + """ # noqa: E501 + known_filters = ( + "bessel", + "butter", + "butterworth", + "cauer", + "cheby1", + "cheby2", + "chebyshev1", + "chebyshev2", + "chebyshevi", + "chebyshevii", + "ellip", + "elliptic", + ) + if not isinstance(iir_params, dict): + raise TypeError(f"iir_params must be a dict, got {type(iir_params)}") + # if the filter has been designed, we're good to go + Wp = None + if "sos" in iir_params: + system = iir_params["sos"] + output = "sos" + elif "a" in iir_params and "b" in iir_params: + system = (iir_params["b"], iir_params["a"]) + output = "ba" + else: + output = iir_params.get("output", "sos") + _check_option("output", output, ("ba", "sos")) + # ensure we have a valid ftype + if "ftype" not in iir_params: + raise RuntimeError( + "ftype must be an entry in iir_params if 'b' and 'a' are not specified." + ) + ftype = iir_params["ftype"] + if ftype not in known_filters: + raise RuntimeError( + "ftype must be in filter_dict from scipy.signal (e.g., butter, cheby1, " + f"etc.) not {ftype}" + ) + + # use order-based design + f_pass = np.atleast_1d(f_pass) + if f_pass.ndim > 1: + raise ValueError(f"frequencies must be 1D, got {f_pass.ndim}D") + edge_freqs = ", ".join(f"{f:0.2f}" for f in f_pass) + Wp = f_pass / (float(sfreq) / 2) + # IT will de designed + ftype_nice = _ftype_dict.get(ftype, ftype) + _validate_type(phase, str, "phase") + _check_option("phase", phase, ("zero", "zero-double", "forward")) + if phase in ("zero-double", "zero"): + ptype = "zero-phase (two-pass forward and reverse) non-causal" + else: + ptype = "non-linear phase (one-pass forward) causal" + logger.info("") + logger.info("IIR filter parameters") + logger.info("---------------------") + logger.info(f"{ftype_nice} {btype} {ptype} filter:") + # SciPy designs forward for -3dB, so forward-backward is -6dB + if "order" in iir_params: + singleton = btype in ("low", "lowpass", "high", "highpass") + use_Wp = Wp.item() if singleton else Wp + kwargs = dict( + N=iir_params["order"], + Wn=use_Wp, + btype=btype, + ftype=ftype, + output=output, + ) + for key in ("rp", "rs"): + if key in iir_params: + kwargs[key] = iir_params[key] + system = signal.iirfilter(**kwargs) + if phase in ("zero", "zero-double"): + ptype, pmul = "(effective, after forward-backward)", 2 + else: + ptype, pmul = "(forward)", 1 + logger.info( + "- Filter order %d %s", pmul * iir_params["order"] * len(Wp), ptype + ) + else: + # use gpass / gstop design + Ws = np.asanyarray(f_stop) / (float(sfreq) / 2) + if "gpass" not in iir_params or "gstop" not in iir_params: + raise ValueError( + "iir_params must have at least 'gstop' and 'gpass' (or N) entries." + ) + system = signal.iirdesign( + Wp, + Ws, + iir_params["gpass"], + iir_params["gstop"], + ftype=ftype, + output=output, + ) + + if system is None: + raise RuntimeError("coefficients could not be created from iir_params") + # do some sanity checks + _check_coefficients(system) + + # get the gains at the cutoff frequencies + if Wp is not None: + if output == "sos": + cutoffs = signal.sosfreqz(system, worN=Wp * np.pi)[1] + else: + cutoffs = signal.freqz(system[0], system[1], worN=Wp * np.pi)[1] + cutoffs = 20 * np.log10(np.abs(cutoffs)) + # 2 * 20 here because we do forward-backward filtering + if phase in ("zero", "zero-double"): + cutoffs *= 2 + cutoffs = ", ".join([f"{c:0.2f}" for c in cutoffs]) + logger.info(f"- Cutoff{_pl(f_pass)} at {edge_freqs} Hz: {cutoffs} dB") + # now deal with padding + if "padlen" not in iir_params: + padlen = estimate_ringing_samples(system) + else: + padlen = iir_params["padlen"] + + if return_copy: + iir_params = deepcopy(iir_params) + + iir_params.update(dict(padlen=padlen)) + if output == "sos": + iir_params.update(sos=system) + else: + iir_params.update(b=system[0], a=system[1]) + logger.info("") + return iir_params + + +def _check_method(method, iir_params, extra_types=()): + """Parse method arguments.""" + allowed_types = ["iir", "fir", "fft"] + list(extra_types) + _validate_type(method, "str", "method") + _check_option("method", method, allowed_types) + if method == "fft": + method = "fir" # use the better name + if method == "iir": + if iir_params is None: + iir_params = dict() + if len(iir_params) == 0 or (len(iir_params) == 1 and "output" in iir_params): + iir_params = dict( + order=4, ftype="butter", output=iir_params.get("output", "sos") + ) + elif iir_params is not None: + raise ValueError('iir_params must be None if method != "iir"') + return iir_params, method + + +@verbose +def filter_data( + data, + sfreq, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + copy=True, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + *, + verbose=None, +): + """Filter a subset of channels. + + Parameters + ---------- + data : ndarray, shape (..., n_times) + The data to filter. + sfreq : float + The sample frequency in Hz. + %(l_freq)s + %(h_freq)s + %(picks_nostr)s + Currently this is only supported for 2D (n_channels, n_times) and + 3D (n_epochs, n_channels, n_times) arrays. + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(verbose)s + + Returns + ------- + data : ndarray, shape (..., n_times) + The filtered data. + + See Also + -------- + construct_iir_filter + create_filter + mne.io.Raw.filter + notch_filter + resample + + Notes + ----- + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by ``picks``. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporarily stored in memory. + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample` and + :func:`mne.filter.create_filter`. + """ + data = _check_filterable(data) + iir_params, method = _check_method(method, iir_params) + filt = create_filter( + data, + sfreq, + l_freq, + h_freq, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + method, + iir_params, + phase, + fir_window, + fir_design, + ) + if method in ("fir", "fft"): + data = _overlap_add_filter(data, filt, None, phase, picks, n_jobs, copy, pad) + else: + data = _iir_filter(data, filt, picks, n_jobs, copy, phase) + return data + + +@verbose +def create_filter( + data, + sfreq, + l_freq, + h_freq, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + verbose=None, +): + r"""Create a FIR or IIR filter. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + Parameters + ---------- + data : ndarray, shape (..., n_times) | None + The data that will be filtered. This is used for sanity checking + only. If None, no sanity checking related to the length of the signal + relative to the filter order will be performed. + sfreq : float + The sample frequency in Hz. + %(l_freq)s + %(h_freq)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(method_fir)s + %(iir_params)s + %(phase)s + %(fir_window)s + %(fir_design)s + %(verbose)s + + Returns + ------- + filt : array or dict + Will be an array of FIR coefficients for method='fir', and dict + with IIR parameters for method='iir'. + + See Also + -------- + filter_data + + Notes + ----- + .. note:: For FIR filters, the *cutoff frequency*, i.e. the -6 dB point, + is in the middle of the transition band (when using phase='zero' + and fir_design='firwin'). For IIR filters, the cutoff frequency + is given by ``l_freq`` or ``h_freq`` directly, and + ``l_trans_bandwidth`` and ``h_trans_bandwidth`` are ignored. + + **Band-pass filter** + + The frequency response is (approximately) given by:: + + 1-| ---------- + | /| | \ + |H| | / | | \ + | / | | \ + | / | | \ + 0-|---------- | | -------------- + | | | | | | + 0 Fs1 Fp1 Fp2 Fs2 Nyq + + Where: + + * Fs1 = Fp1 - l_trans_bandwidth in Hz + * Fs2 = Fp2 + h_trans_bandwidth in Hz + + **Band-stop filter** + + The frequency response is (approximately) given by:: + + 1-|--------- ---------- + | \ / + |H| | \ / + | \ / + | \ / + 0-| ----------- + | | | | | | + 0 Fp1 Fs1 Fs2 Fp2 Nyq + + Where ``Fs1 = Fp1 + l_trans_bandwidth`` and + ``Fs2 = Fp2 - h_trans_bandwidth``. + + Multiple stop bands can be specified using arrays. + + **Low-pass filter** + + The frequency response is (approximately) given by:: + + 1-|------------------------ + | \ + |H| | \ + | \ + | \ + 0-| ---------------- + | | | | + 0 Fp Fstop Nyq + + Where ``Fstop = Fp + trans_bandwidth``. + + **High-pass filter** + + The frequency response is (approximately) given by:: + + 1-| ----------------------- + | / + |H| | / + | / + | / + 0-|--------- + | | | | + 0 Fstop Fp Nyq + + Where ``Fstop = Fp - trans_bandwidth``. + + .. versionadded:: 0.14 + """ + sfreq = float(sfreq) + if sfreq < 0: + raise ValueError("sfreq must be positive") + # If no data specified, sanity checking will be skipped + if data is None: + logger.info( + "No data specified. Sanity checks related to the length of the signal " + "relative to the filter order will be skipped." + ) + if h_freq is not None: + h_freq = np.array(h_freq, float).ravel() + if (h_freq > (sfreq / 2.0)).any(): + raise ValueError( + f"h_freq ({h_freq}) must be less than the Nyquist frequency " + f"{sfreq / 2.0}" + ) + if l_freq is not None: + l_freq = np.array(l_freq, float).ravel() + if (l_freq == 0).all(): + l_freq = None + iir_params, method = _check_method(method, iir_params) + if l_freq is None and h_freq is None: + ( + data, + sfreq, + _, + _, + _, + _, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + None, + None, + None, + None, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = dict() if iir_params is None else deepcopy(iir_params) + out.update(b=np.array([1.0]), a=np.array([1.0])) + else: + freq = [0, sfreq / 2.0] + gain = [1.0, 1.0] + if l_freq is None and h_freq is not None: + h_freq = h_freq.item() + logger.info(f"Setting up low-pass filter at {h_freq:0.2g} Hz") + ( + data, + sfreq, + _, + f_p, + _, + f_s, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + None, + h_freq, + None, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, f_p, f_s, sfreq, "lowpass", phase=phase + ) + else: # 'fir' + freq = [0, f_p, f_s] + gain = [1, 1, 0] + if f_s != sfreq / 2.0: + freq += [sfreq / 2.0] + gain += [0] + elif l_freq is not None and h_freq is None: + l_freq = l_freq.item() + logger.info(f"Setting up high-pass filter at {l_freq:0.2g} Hz") + ( + data, + sfreq, + pass_, + _, + stop, + _, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + l_freq, + None, + l_trans_bandwidth, + None, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, pass_, stop, sfreq, "highpass", phase=phase + ) + else: # 'fir' + freq = [stop, pass_, sfreq / 2.0] + gain = [0, 1, 1] + if stop != 0: + freq = [0] + freq + gain = [0] + gain + elif l_freq is not None and h_freq is not None: + if (l_freq < h_freq).any(): + l_freq, h_freq = l_freq.item(), h_freq.item() + logger.info( + f"Setting up band-pass filter from {l_freq:0.2g} - {h_freq:0.2g} Hz" + ) + ( + data, + sfreq, + f_p1, + f_p2, + f_s1, + f_s2, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + l_freq, + h_freq, + l_trans_bandwidth, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + ) + if method == "iir": + out = construct_iir_filter( + iir_params, + [f_p1, f_p2], + [f_s1, f_s2], + sfreq, + "bandpass", + phase=phase, + ) + else: # 'fir' + freq = [f_s1, f_p1, f_p2, f_s2] + gain = [0, 1, 1, 0] + if f_s2 != sfreq / 2.0: + freq += [sfreq / 2.0] + gain += [0] + if f_s1 != 0: + freq = [0] + freq + gain = [0] + gain + else: + # This could possibly be removed after 0.14 release, but might + # as well leave it in to sanity check notch_filter + if len(l_freq) != len(h_freq): + raise ValueError("l_freq and h_freq must be the same length") + msg = "Setting up band-stop filter" + if len(l_freq) == 1: + l_freq, h_freq = l_freq.item(), h_freq.item() + msg += f" from {h_freq:0.2g} - {l_freq:0.2g} Hz" + logger.info(msg) + # Note: order of outputs is intentionally switched here! + ( + data, + sfreq, + f_s1, + f_s2, + f_p1, + f_p2, + filter_length, + phase, + fir_window, + fir_design, + ) = _triage_filter_params( + data, + sfreq, + h_freq, + l_freq, + h_trans_bandwidth, + l_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + bands="arr", + reverse=True, + ) + if method == "iir": + if len(f_p1) != 1: + raise ValueError( + "Multiple stop-bands can only be used with method='fir' " + "and method='spectrum_fit'" + ) + out = construct_iir_filter( + iir_params, + [f_p1[0], f_p2[0]], + [f_s1[0], f_s2[0]], + sfreq, + "bandstop", + phase=phase, + ) + else: # 'fir' + freq = np.r_[f_p1, f_s1, f_s2, f_p2] + gain = np.r_[ + np.ones_like(f_p1), + np.zeros_like(f_s1), + np.zeros_like(f_s2), + np.ones_like(f_p2), + ] + order = np.argsort(freq) + freq = freq[order] + gain = gain[order] + if freq[0] != 0: + freq = np.r_[[0.0], freq] + gain = np.r_[[1.0], gain] + if freq[-1] != sfreq / 2.0: + freq = np.r_[freq, [sfreq / 2.0]] + gain = np.r_[gain, [1.0]] + if np.any(np.abs(np.diff(gain, 2)) > 1): + raise ValueError("Stop bands are not sufficiently separated.") + if method == "fir": + out = _construct_fir_filter( + sfreq, freq, gain, filter_length, phase, fir_window, fir_design + ) + return out + + +@verbose +def notch_filter( + x, + Fs, + freqs, + filter_length="auto", + notch_widths=None, + trans_bandwidth=1, + method="fir", + iir_params=None, + mt_bandwidth=None, + p_value=0.05, + picks=None, + n_jobs=None, + copy=True, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + *, + verbose=None, +): + r"""Notch filter for the signal x. + + Applies a zero-phase notch filter to the signal x, operating on the last + dimension. + + Parameters + ---------- + x : array + Signal to filter. + Fs : float + Sampling rate in Hz. + freqs : float | array of float | None + Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60). + Multiple stop-bands can only be used with method='fir' + and method='spectrum_fit'. None can only be used with the mode + 'spectrum_fit', where an F test is used to find sinusoidal components. + %(filter_length_notch)s + notch_widths : float | array of float | None + Width of the stop band (centred at each freq in freqs) in Hz. + If None, freqs / 200 is used. + trans_bandwidth : float + Width of the transition band in Hz. + Only used for ``method='fir'`` and ``method='iir'``. + %(method_fir)s + 'spectrum_fit' will use multi-taper estimation of sinusoidal + components. If freqs=None and method='spectrum_fit', significant + sinusoidal components are detected using an F test, and noted by + logging. + %(iir_params)s + mt_bandwidth : float | None + The bandwidth of the multitaper windowing function in Hz. + Only used in 'spectrum_fit' mode. + p_value : float + P-value to use in F-test thresholding to determine significant + sinusoidal components to remove when method='spectrum_fit' and + freqs=None. Note that this will be Bonferroni corrected for the + number of frequencies, so large p-values may be justified. + %(picks_nostr)s + Only supported for 2D (n_channels, n_times) and 3D + (n_epochs, n_channels, n_times) data. + %(n_jobs_fir)s + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + %(verbose)s + + Returns + ------- + xf : array + The x array filtered. + + See Also + -------- + filter_data + resample + + Notes + ----- + The frequency response is (approximately) given by:: + + 1-|---------- ----------- + | \ / + |H| | \ / + | \ / + | \ / + 0-| - + | | | | | + 0 Fp1 freq Fp2 Nyq + + For each freq in freqs, where ``Fp1 = freq - trans_bandwidth / 2`` and + ``Fs2 = freq + trans_bandwidth / 2``. + + References + ---------- + Multi-taper removal is inspired by code from the Chronux toolbox, see + www.chronux.org and the book "Observed Brain Dynamics" by Partha Mitra + & Hemant Bokil, Oxford University Press, New York, 2008. Please + cite this in publications if method 'spectrum_fit' is used. + """ + x = _check_filterable(x, "notch filtered", "notch_filter") + iir_params, method = _check_method(method, iir_params, ["spectrum_fit"]) + + if freqs is not None: + freqs = np.atleast_1d(freqs) + elif method != "spectrum_fit": + raise ValueError("freqs=None can only be used with method spectrum_fit") + + # Only have to deal with notch_widths for non-autodetect + if freqs is not None: + if notch_widths is None: + notch_widths = freqs / 200.0 + elif np.any(notch_widths < 0): + raise ValueError("notch_widths must be >= 0") + else: + notch_widths = np.atleast_1d(notch_widths) + if len(notch_widths) == 1: + notch_widths = notch_widths[0] * np.ones_like(freqs) + elif len(notch_widths) != len(freqs): + raise ValueError( + "notch_widths must be None, scalar, or the same length as freqs" + ) + + if method in ("fir", "iir"): + # Speed this up by computing the fourier coefficients once + tb_2 = trans_bandwidth / 2.0 + lows = [freq - nw / 2.0 - tb_2 for freq, nw in zip(freqs, notch_widths)] + highs = [freq + nw / 2.0 + tb_2 for freq, nw in zip(freqs, notch_widths)] + xf = filter_data( + x, + Fs, + highs, + lows, + picks, + filter_length, + tb_2, + tb_2, + n_jobs, + method, + iir_params, + copy, + phase, + fir_window, + fir_design, + pad=pad, + ) + elif method == "spectrum_fit": + xf = _mt_spectrum_proc( + x, + Fs, + freqs, + notch_widths, + mt_bandwidth, + p_value, + picks, + n_jobs, + copy, + filter_length, + ) + + return xf + + +def _get_window_thresh(n_times, sfreq, mt_bandwidth, p_value): + from .time_frequency.multitaper import _compute_mt_params + + # figure out what tapers to use + window_fun, _, _ = _compute_mt_params( + n_times, sfreq, mt_bandwidth, False, False, verbose=False + ) + + # F-stat of 1-p point + threshold = fstat.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2) + return window_fun, threshold + + +def _mt_spectrum_proc( + x, + sfreq, + line_freqs, + notch_widths, + mt_bandwidth, + p_value, + picks, + n_jobs, + copy, + filter_length, +): + """Call _mt_spectrum_remove.""" + # set up array for filtering, reshape to 2D, operate on last axis + x, orig_shape, picks = _prep_for_filtering(x, copy, picks) + if isinstance(filter_length, str) and filter_length == "auto": + filter_length = "10s" + if filter_length is None: + filter_length = x.shape[-1] + filter_length = min(_to_samples(filter_length, sfreq, "", ""), x.shape[-1]) + get_wt = partial( + _get_window_thresh, sfreq=sfreq, mt_bandwidth=mt_bandwidth, p_value=p_value + ) + window_fun, threshold = get_wt(filter_length) + parallel, p_fun, n_jobs = parallel_func(_mt_spectrum_remove_win, n_jobs) + if n_jobs == 1: + freq_list = list() + for ii, x_ in enumerate(x): + if ii in picks: + x[ii], f = _mt_spectrum_remove_win( + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_wt + ) + freq_list.append(f) + else: + data_new = parallel( + p_fun(x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_wt) + for xi, x_ in enumerate(x) + if xi in picks + ) + freq_list = [d[1] for d in data_new] + data_new = np.array([d[0] for d in data_new]) + x[picks, :] = data_new + + # report found frequencies, but do some sanitizing first by binning into + # 1 Hz bins + counts = Counter( + sum((np.unique(np.round(ff)).tolist() for f in freq_list for ff in f), list()) + ) + kind = "Detected" if line_freqs is None else "Removed" + found_freqs = ( + "\n".join( + f" {freq:6.2f} : {counts[freq]:4d} window{_pl(counts[freq])}" + for freq in sorted(counts) + ) + or " None" + ) + logger.info(f"{kind} notch frequencies (Hz):\n{found_freqs}") + + x.shape = orig_shape + return x + + +def _mt_spectrum_remove_win( + x, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh +): + n_times = x.shape[-1] + n_samples = window_fun.shape[1] + n_overlap = (n_samples + 1) // 2 + x_out = np.zeros_like(x) + rm_freqs = list() + idx = [0] + + # Define how to process a chunk of data + def process(x_): + out = _mt_spectrum_remove( + x_, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh + ) + rm_freqs.append(out[1]) + return (out[0],) # must return a tuple + + # Define how to store a chunk of fully processed data (it's trivial) + def store(x_): + stop = idx[0] + x_.shape[-1] + x_out[..., idx[0] : stop] += x_ + idx[0] = stop + + _COLA(process, store, n_times, n_samples, n_overlap, sfreq, verbose=False).feed(x) + assert idx[0] == n_times + return x_out, rm_freqs + + +def _mt_spectrum_remove( + x, sfreq, line_freqs, notch_widths, window_fun, threshold, get_thresh +): + """Use MT-spectrum to remove line frequencies. + + Based on Chronux. If line_freqs is specified, all freqs within notch_width + of each line_freq is set to zero. + """ + from .time_frequency.multitaper import _mt_spectra + + assert x.ndim == 1 + if x.shape[-1] != window_fun.shape[-1]: + window_fun, threshold = get_thresh(x.shape[-1]) + # drop the even tapers + n_tapers = len(window_fun) + tapers_odd = np.arange(0, n_tapers, 2) + tapers_even = np.arange(1, n_tapers, 2) + tapers_use = window_fun[tapers_odd] + + # sum tapers for (used) odd prolates across time (n_tapers, 1) + H0 = np.sum(tapers_use, axis=1) + + # sum of squares across tapers (1, ) + H0_sq = sum_squared(H0) + + # make "time" vector + rads = 2 * np.pi * (np.arange(x.size) / float(sfreq)) + + # compute mt_spectrum (returning n_ch, n_tapers, n_freq) + x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq) + + # sum of the product of x_p and H0 across tapers (1, n_freqs) + x_p_H0 = np.sum(x_p[:, tapers_odd, :] * H0[np.newaxis, :, np.newaxis], axis=1) + + # resulting calculated amplitudes for all freqs + A = x_p_H0 / H0_sq + + if line_freqs is None: + # figure out which freqs to remove using F stat + + # estimated coefficient + x_hat = A * H0[:, np.newaxis] + + # numerator for F-statistic + num = (n_tapers - 1) * (A * A.conj()).real * H0_sq + # denominator for F-statistic + den = np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) + np.sum( + np.abs(x_p[:, tapers_even, :]) ** 2, 1 + ) + den[den == 0] = np.inf + f_stat = num / den + + # find frequencies to remove + indices = np.where(f_stat > threshold)[1] + rm_freqs = freqs[indices] + else: + # specify frequencies + indices_1 = np.unique([np.argmin(np.abs(freqs - lf)) for lf in line_freqs]) + indices_2 = [ + np.logical_and(freqs > lf - nw / 2.0, freqs < lf + nw / 2.0) + for lf, nw in zip(line_freqs, notch_widths) + ] + indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0] + indices = np.unique(np.r_[indices_1, indices_2]) + rm_freqs = freqs[indices] + + fits = list() + for ind in indices: + c = 2 * A[0, ind] + fit = np.abs(c) * np.cos(freqs[ind] * rads + np.angle(c)) + fits.append(fit) + + if len(fits) == 0: + datafit = 0.0 + else: + # fitted sinusoids are summed, and subtracted from data + datafit = np.sum(fits, axis=0) + + return x - datafit, rm_freqs + + +def _check_filterable(x, kind="filtered", alternative="filter"): + # Let's be fairly strict about this -- users can easily coerce to ndarray + # at their end, and we already should do it internally any time we are + # using these low-level functions. At the same time, let's + # help people who might accidentally use low-level functions that they + # shouldn't use by pushing them in the right direction + from .epochs import BaseEpochs + from .evoked import Evoked + from .io import BaseRaw + + if isinstance(x, BaseRaw | BaseEpochs | Evoked): + try: + name = x.__class__.__name__ + except Exception: + pass + else: + raise TypeError( + "This low-level function only operates on np.ndarray instances. To get " + f"a {kind} {name} instance, use a method like `inst_new = inst.copy()." + f"{alternative}(...)` instead." + ) + _validate_type(x, (np.ndarray, list, tuple), f"Data to be {kind}") + x = np.asanyarray(x) + if x.dtype != np.float64: + raise ValueError(f"Data to be {kind} must be real floating, got {x.dtype}") + return x + + +def _resamp_ratio_len(up, down, n): + ratio = float(up) / down + return ratio, max(int(round(ratio * n)), 1) + + +@verbose +def resample( + x, + up=1.0, + down=1.0, + *, + axis=-1, + window="auto", + n_jobs=None, + pad="auto", + npad=100, + method="fft", + verbose=None, +): + """Resample an array. + + Operates along the last dimension of the array. + + Parameters + ---------- + x : ndarray + Signal to resample. + up : float + Factor to upsample by. + down : float + Factor to downsample by. + axis : int + Axis along which to resample (default is the last axis). + %(window_resample)s + %(n_jobs_cuda)s + ``n_jobs='cuda'`` is only supported when ``method="fft"``. + %(pad_resample_auto)s + + .. versionadded:: 0.15 + %(npad_resample)s + %(method_resample)s + + .. versionadded:: 1.7 + %(verbose)s + + Returns + ------- + y : array + The x array resampled. + + Notes + ----- + When using ``method="fft"`` (default), + this uses (hopefully) intelligent edge padding and frequency-domain + windowing improve :func:`scipy.signal.resample`'s resampling method, which + we have adapted for our use here. Choices of npad and window have + important consequences, and the default choices should work well + for most natural signals. + """ + _validate_type(method, str, "method") + _validate_type(pad, str, "pad") + _check_option("method", method, ("fft", "polyphase")) + + # make sure our arithmetic will work + x = _check_filterable(x, "resampled", "resample") + ratio, final_len = _resamp_ratio_len(up, down, x.shape[axis]) + del up, down + if axis < 0: + axis = x.ndim + axis + if x.shape[axis] == 0: + warn(f"x has zero length along axis={axis}, returning a copy of x") + return x.copy() + + # prep for resampling along the last axis (swap axis with last then reshape) + out_shape = list(x.shape) + out_shape.pop(axis) + out_shape.append(final_len) + x = np.atleast_2d(x.swapaxes(axis, -1).reshape((-1, x.shape[axis]))) + + # do the resampling using FFT or polyphase methods + kwargs = dict(pad=pad, window=window, n_jobs=n_jobs) + if method == "fft": + y = _resample_fft(x, npad=npad, ratio=ratio, final_len=final_len, **kwargs) + else: + up, down, kwargs["window"] = _prep_polyphase( + ratio, x.shape[-1], final_len, window + ) + half_len = len(window) // 2 + logger.info( + f"Polyphase resampling neighborhood: ±{half_len} " + f"input sample{_pl(half_len)}" + ) + y = _resample_polyphase(x, up=up, down=down, **kwargs) + assert y.shape[-1] == final_len + + # restore dimensions (reshape then swap axis with last) + y = y.reshape(out_shape).swapaxes(axis, -1) + + return y + + +def _prep_polyphase(ratio, x_len, final_len, window): + if isinstance(window, str) and window == "auto": + window = ("kaiser", 5.0) # SciPy default + up = final_len + down = x_len + g_ = gcd(up, down) + up = up // g_ + down = down // g_ + # Figure out our signal neighborhood and design window (adapted from SciPy) + if not isinstance(window, list | np.ndarray): + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1.0 / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + window = signal.firwin(2 * half_len + 1, f_c, window=window) + return up, down, window + + +def _resample_polyphase(x, *, up, down, pad, window, n_jobs): + if pad == "auto": + pad = "reflect" + kwargs = dict(padtype=pad, window=window, up=up, down=down) + _validate_type( + n_jobs, (None, "int-like"), "n_jobs", extra="when method='polyphase'" + ) + parallel, p_fun, n_jobs = parallel_func(signal.resample_poly, n_jobs) + if n_jobs == 1: + y = signal.resample_poly(x, axis=-1, **kwargs) + else: + y = np.array(parallel(p_fun(x_, **kwargs) for x_ in x)) + return y + + +def _resample_fft(x_flat, *, ratio, final_len, pad, window, npad, n_jobs): + x_len = x_flat.shape[-1] + pad = "reflect_limited" if pad == "auto" else pad + if (isinstance(window, str) and window == "auto") or window is None: + window = "boxcar" + if isinstance(npad, str): + _check_option("npad", npad, ("auto",), extra="when a string") + # Figure out reasonable pad that gets us to a power of 2 + min_add = min(x_len // 8, 100) * 2 + npad = 2 ** int(np.ceil(np.log2(x_len + min_add))) - x_len + npad, extra = divmod(npad, 2) + npads = np.array([npad, npad + extra], int) + else: + npad = _ensure_int(npad, "npad", extra="or 'auto'") + npads = np.array([npad, npad], int) + del npad + + # prep for resampling now + orig_len = x_len + npads.sum() # length after padding + new_len = max(int(round(ratio * orig_len)), 1) # length after resampling + to_removes = [int(round(ratio * npads[0]))] + to_removes.append(new_len - final_len - to_removes[0]) + to_removes = np.array(to_removes) + # This should hold: + # assert np.abs(to_removes[1] - to_removes[0]) <= int(np.ceil(ratio)) + + # figure out windowing function + if callable(window): + W = window(fft.fftfreq(orig_len)) + elif isinstance(window, np.ndarray) and window.shape == (orig_len,): + W = window + else: + W = fft.ifftshift(signal.get_window(window, orig_len)) + W *= float(new_len) / float(orig_len) + + # figure out if we should use CUDA + n_jobs, cuda_dict = _setup_cuda_fft_resample(n_jobs, W, new_len) + + # do the resampling using an adaptation of scipy's FFT-based resample() + # use of the 'flat' window is recommended for minimal ringing + parallel, p_fun, n_jobs = parallel_func(_fft_resample, n_jobs) + if n_jobs == 1: + y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x_flat.dtype) + for xi, x_ in enumerate(x_flat): + y[xi] = _fft_resample(x_, new_len, npads, to_removes, cuda_dict, pad) + else: + y = parallel( + p_fun(x_, new_len, npads, to_removes, cuda_dict, pad) for x_ in x_flat + ) + y = np.array(y) + + return y + + +def _resample_stim_channels(stim_data, up, down): + """Resample stim channels, carefully. + + Parameters + ---------- + stim_data : array, shape (n_samples,) or (n_stim_channels, n_samples) + Stim channels to resample. + up : float + Factor to upsample by. + down : float + Factor to downsample by. + + Returns + ------- + stim_resampled : array, shape (n_stim_channels, n_samples_resampled) + The resampled stim channels. + + Note + ---- + The approach taken here is equivalent to the approach in the C-code. + See the decimate_stimch function in MNE/mne_browse_raw/save.c + """ + stim_data = np.atleast_2d(stim_data) + n_stim_channels, n_samples = stim_data.shape + + ratio = float(up) / down + resampled_n_samples = int(round(n_samples * ratio)) + + stim_resampled = np.zeros((n_stim_channels, resampled_n_samples)) + + # Figure out which points in old data to subsample protect against + # out-of-bounds, which can happen (having one sample more than + # expected) due to padding + sample_picks = np.minimum( + (np.arange(resampled_n_samples) / ratio).astype(int), n_samples - 1 + ) + + # Create windows starting from sample_picks[i], ending at sample_picks[i+1] + windows = zip(sample_picks, np.r_[sample_picks[1:], n_samples]) + + # Use the first non-zero value in each window + for window_i, window in enumerate(windows): + for stim_num, stim in enumerate(stim_data): + nonzero = stim[window[0] : window[1]].nonzero()[0] + if len(nonzero) > 0: + val = stim[window[0] + nonzero[0]] + else: + val = stim[window[0]] + stim_resampled[stim_num, window_i] = val + + return stim_resampled + + +def detrend(x, order=1, axis=-1): + """Detrend the array x. + + Parameters + ---------- + x : n-d array + Signal to detrend. + order : int + Fit order. Currently must be '0' or '1'. + axis : int + Axis of the array to operate on. + + Returns + ------- + y : array + The x array detrended. + + Examples + -------- + As in :func:`scipy.signal.detrend`:: + + >>> randgen = np.random.RandomState(9) + >>> npoints = int(1e3) + >>> noise = randgen.randn(npoints) + >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise + >>> bool((detrend(x) - noise).max() < 0.01) + True + """ + if axis > len(x.shape): + raise ValueError(f"x does not have {axis} axes") + if order == 0: + fit = "constant" + elif order == 1: + fit = "linear" + else: + raise ValueError("order must be 0 or 1") + + y = signal.detrend(x, axis=axis, type=fit) + + return y + + +# Taken from Ifeachor and Jervis p. 356. +# Note that here the passband ripple and stopband attenuation are +# rendundant. The scalar passband ripple δp is expressed in dB as +# 20 * log10(1+δp), but the scalar stopband ripple δs is expressed in dB as +# -20 * log10(δs). So if we know that our stopband attenuation is 53 dB +# (Hamming) then δs = 10 ** (53 / -20.), which means that the passband +# deviation should be 20 * np.log10(1 + 10 ** (53 / -20.)) == 0.0194. +_fir_window_dict = { + "hann": dict(name="Hann", ripple=0.0546, attenuation=44), + "hamming": dict(name="Hamming", ripple=0.0194, attenuation=53), + "blackman": dict(name="Blackman", ripple=0.0017, attenuation=74), +} +_known_fir_windows = tuple(sorted(_fir_window_dict.keys())) +_known_phases_fir = ("linear", "zero", "zero-double", "minimum", "minimum-half") +_known_phases_iir = ("zero", "zero-double", "forward") +_known_fir_designs = ("firwin", "firwin2") +_fir_design_dict = { + "firwin": "Windowed time-domain", + "firwin2": "Windowed frequency-domain", +} + + +def _to_samples(filter_length, sfreq, phase, fir_design): + _validate_type(filter_length, (str, "int-like"), "filter_length") + if isinstance(filter_length, str): + filter_length = filter_length.lower() + err_msg = ( + "filter_length, if a string, must be a " + 'human-readable time, e.g. "10s", or "auto", not ' + f'"{filter_length}"' + ) + if filter_length.lower().endswith("ms"): + mult_fact = 1e-3 + filter_length = filter_length[:-2] + elif filter_length[-1].lower() == "s": + mult_fact = 1 + filter_length = filter_length[:-1] + else: + raise ValueError(err_msg) + # now get the number + try: + filter_length = float(filter_length) + except ValueError: + raise ValueError(err_msg) + filter_length = max(int(np.ceil(filter_length * mult_fact * sfreq)), 1) + if fir_design == "firwin": + filter_length += (filter_length - 1) % 2 + filter_length = _ensure_int(filter_length, "filter_length") + return filter_length + + +def _triage_filter_params( + x, + sfreq, + l_freq, + h_freq, + l_trans_bandwidth, + h_trans_bandwidth, + filter_length, + method, + phase, + fir_window, + fir_design, + bands="scalar", + reverse=False, +): + """Validate and automate filter parameter selection.""" + _validate_type(phase, "str", "phase") + if method == "fir": + _check_option("phase", phase, _known_phases_fir, extra="when FIR filtering") + else: + _check_option("phase", phase, _known_phases_iir, extra="when IIR filtering") + _validate_type(fir_window, "str", "fir_window") + _check_option("fir_window", fir_window, _known_fir_windows) + _validate_type(fir_design, "str", "fir_design") + _check_option("fir_design", fir_design, _known_fir_designs) + + # Helpers for reporting + report_phase = "non-linear phase" if phase == "minimum" else "zero-phase" + causality = "causal" if phase == "minimum" else "non-causal" + if phase == "zero-double": + report_pass = "two-pass forward and reverse" + else: + report_pass = "one-pass" + if l_freq is not None: + if h_freq is not None: + kind = "bandstop" if reverse else "bandpass" + else: + kind = "highpass" + assert not reverse + elif h_freq is not None: + kind = "lowpass" + assert not reverse + else: + kind = "allpass" + + def float_array(c): + return np.array(c, float).ravel() + + if bands == "arr": + cast = float_array + else: + cast = float + sfreq = float(sfreq) + if l_freq is not None: + l_freq = cast(l_freq) + if np.any(l_freq <= 0): + raise ValueError(f"highpass frequency {l_freq} must be greater than zero") + if h_freq is not None: + h_freq = cast(h_freq) + if np.any(h_freq >= sfreq / 2.0): + raise ValueError( + f"lowpass frequency {h_freq} must be less than Nyquist ({sfreq / 2.0})" + ) + + dB_cutoff = False # meaning, don't try to compute or report + if bands == "scalar" or (len(h_freq) == 1 and len(l_freq) == 1): + if phase == "zero": + dB_cutoff = "-6 dB" + elif phase == "zero-double": + dB_cutoff = "-12 dB" + + # we go to the next power of two when in FIR and zero-double mode + if method == "iir": + # Ignore these parameters, effectively + l_stop, h_stop = l_freq, h_freq + else: # method == 'fir' + l_stop = h_stop = None + logger.info("") + logger.info("FIR filter parameters") + logger.info("---------------------") + logger.info( + f"Designing a {report_pass}, {report_phase}, {causality} {kind} filter:" + ) + logger.info(f"- {_fir_design_dict[fir_design]} design ({fir_design}) method") + this_dict = _fir_window_dict[fir_window] + if fir_design == "firwin": + logger.info( + "- {name:s} window with {ripple:0.4f} passband ripple " + "and {attenuation:d} dB stopband attenuation".format(**this_dict) + ) + else: + logger.info("- {name:s} window".format(**this_dict)) + + if l_freq is not None: # high-pass component + if isinstance(l_trans_bandwidth, str): + if l_trans_bandwidth != "auto": + raise ValueError( + 'l_trans_bandwidth must be "auto" if string, got "' + f'{l_trans_bandwidth}"' + ) + l_trans_bandwidth = np.minimum(np.maximum(0.25 * l_freq, 2.0), l_freq) + l_trans_rep = np.array(l_trans_bandwidth, float) + if l_trans_rep.size == 1: + l_trans_rep = f"{l_trans_rep.item():0.2f}" + with np.printoptions(precision=2, floatmode="fixed"): + msg = f"- Lower transition bandwidth: {l_trans_rep} Hz" + if dB_cutoff: + l_freq_rep = np.array(l_freq, float) + if l_freq_rep.size == 1: + l_freq_rep = f"{l_freq_rep.item():0.2f}" + cutoff_rep = np.array(l_freq - l_trans_bandwidth / 2.0, float) + if cutoff_rep.size == 1: + cutoff_rep = f"{cutoff_rep.item():0.2f}" + # Could be an array + logger.info(f"- Lower passband edge: {l_freq_rep}") + msg += f" ({dB_cutoff} cutoff frequency: {cutoff_rep} Hz)" + logger.info(msg) + l_trans_bandwidth = cast(l_trans_bandwidth) + if np.any(l_trans_bandwidth <= 0): + raise ValueError( + f"l_trans_bandwidth must be positive, got {l_trans_bandwidth}" + ) + l_stop = l_freq - l_trans_bandwidth + if reverse: # band-stop style + l_stop += l_trans_bandwidth + l_freq += l_trans_bandwidth + if np.any(l_stop < 0): + raise ValueError( + "Filter specification invalid: Lower stop frequency negative (" + f"{l_stop:0.2f} Hz). Increase pass frequency or reduce the " + "transition bandwidth (l_trans_bandwidth)" + ) + if h_freq is not None: # low-pass component + if isinstance(h_trans_bandwidth, str): + if h_trans_bandwidth != "auto": + raise ValueError( + 'h_trans_bandwidth must be "auto" if ' + f'string, got "{h_trans_bandwidth}"' + ) + h_trans_bandwidth = np.minimum( + np.maximum(0.25 * h_freq, 2.0), sfreq / 2.0 - h_freq + ) + h_trans_rep = np.array(h_trans_bandwidth, float) + if h_trans_rep.size == 1: + h_trans_rep = f"{h_trans_rep.item():0.2f}" + with np.printoptions(precision=2, floatmode="fixed"): + msg = f"- Upper transition bandwidth: {h_trans_rep} Hz" + if dB_cutoff: + h_freq_rep = np.array(h_freq, float) + if h_freq_rep.size == 1: + h_freq_rep = f"{h_freq_rep.item():0.2f}" + cutoff_rep = np.array(h_freq + h_trans_bandwidth / 2.0, float) + if cutoff_rep.size == 1: + cutoff_rep = f"{cutoff_rep.item():0.2f}" + logger.info(f"- Upper passband edge: {h_freq_rep} Hz") + msg += f" ({dB_cutoff} cutoff frequency: {cutoff_rep} Hz)" + logger.info(msg) + h_trans_bandwidth = cast(h_trans_bandwidth) + if np.any(h_trans_bandwidth <= 0): + raise ValueError( + f"h_trans_bandwidth must be positive, got {h_trans_bandwidth}" + ) + h_stop = h_freq + h_trans_bandwidth + if reverse: # band-stop style + h_stop -= h_trans_bandwidth + h_freq -= h_trans_bandwidth + if np.any(h_stop > sfreq / 2): + raise ValueError( + f"Effective band-stop frequency ({h_stop}) is too high (maximum " + f"based on Nyquist is {sfreq / 2.0})" + ) + + if isinstance(filter_length, str) and filter_length.lower() == "auto": + filter_length = filter_length.lower() + h_check = l_check = np.inf + if h_freq is not None: + h_check = min(np.atleast_1d(h_trans_bandwidth)) + if l_freq is not None: + l_check = min(np.atleast_1d(l_trans_bandwidth)) + mult_fact = 2.0 if fir_design == "firwin2" else 1.0 + filter_length = f"{_length_factors[fir_window] * mult_fact / float(min(h_check, l_check))}s" # noqa: E501 + next_pow_2 = False # disable old behavior + else: + next_pow_2 = isinstance(filter_length, str) and phase == "zero-double" + + filter_length = _to_samples(filter_length, sfreq, phase, fir_design) + + # use correct type of filter (must be odd length for firwin and for + # zero phase) + if fir_design == "firwin" or phase == "zero": + filter_length += (filter_length - 1) % 2 + + logger.info( + f"- Filter length: {filter_length} samples ({filter_length / sfreq:0.3f} s)" + ) + logger.info("") + + if filter_length <= 0: + raise ValueError(f"filter_length must be positive, got {filter_length}") + + if next_pow_2: + filter_length = 2 ** int(np.ceil(np.log2(filter_length))) + if fir_design == "firwin": + filter_length += (filter_length - 1) % 2 + + # If we have data supplied, do a sanity check + if x is not None: + x = _check_filterable(x) + len_x = x.shape[-1] + if method != "fir": + filter_length = len_x + if filter_length > len_x and not (l_freq is None and h_freq is None): + warn( + f"filter_length ({filter_length}) is longer than the signal ({len_x}), " + "distortion is likely. Reduce filter length or filter a longer signal." + ) + + logger.debug(f"Using filter length: {filter_length}") + return ( + x, + sfreq, + l_freq, + h_freq, + l_stop, + h_stop, + filter_length, + phase, + fir_window, + fir_design, + ) + + +def _check_resamp_noop(sfreq, o_sfreq, rtol=1e-6): + if np.isclose(sfreq, o_sfreq, atol=0, rtol=rtol): + logger.info( + f"Sampling frequency of the instance is already {sfreq}, returning " + "unmodified." + ) + return True + return False + + +class FilterMixin: + """Object for Epoch/Evoked filtering.""" + + @verbose + def savgol_filter(self, h_freq, verbose=None): + """Filter the data using Savitzky-Golay polynomial method. + + Parameters + ---------- + h_freq : float + Approximate high cut-off frequency in Hz. Note that this + is not an exact cutoff, since Savitzky-Golay filtering + :footcite:`SavitzkyGolay1964` is done using polynomial fits + instead of FIR/IIR filtering. This parameter is thus used to + determine the length of the window over which a 5th-order + polynomial smoothing is used. + %(verbose)s + + Returns + ------- + inst : instance of Epochs, Evoked or SourceEstimate + The object with the filtering applied. + + See Also + -------- + mne.io.Raw.filter + + Notes + ----- + For Savitzky-Golay low-pass approximation, see: + + https://gist.github.com/larsoner/bbac101d50176611136b + + When working on SourceEstimates the sample rate of the original data is inferred from tstep. + + .. versionadded:: 0.9.0 + + References + ---------- + .. footbibliography:: + + Examples + -------- + >>> import mne + >>> from os import path as op + >>> evoked_fname = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis-ave.fif') # doctest:+SKIP + >>> evoked = mne.read_evokeds(evoked_fname, baseline=(None, 0))[0] # doctest:+SKIP + >>> evoked.savgol_filter(10.) # low-pass at around 10 Hz # doctest:+SKIP + >>> evoked.plot() # doctest:+SKIP + """ # noqa: E501 + from .source_estimate import _BaseSourceEstimate + + _check_preload(self, "inst.savgol_filter") + if not isinstance(self, _BaseSourceEstimate): + s_freq = self.info["sfreq"] + else: + s_freq = 1 / self.tstep + h_freq = float(h_freq) + if h_freq >= s_freq / 2.0: + raise ValueError("h_freq must be less than half the sample rate") + + # savitzky-golay filtering + window_length = (int(np.round(s_freq / h_freq)) // 2) * 2 + 1 + logger.info("Using savgol length %d", window_length) + self._data[:] = signal.savgol_filter( + self._data, axis=-1, polyorder=5, window_length=window_length + ) + return self + + @verbose + def filter( + self, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + skip_by_annotation=("edge", "bad_acq_skip"), + pad="edge", + *, + verbose=None, + ): + """Filter a subset of channels/vertices. + + Parameters + ---------- + %(l_freq)s + %(h_freq)s + %(picks_all_data)s + %(filter_length)s + %(l_trans_bandwidth)s + %(h_trans_bandwidth)s + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + %(phase)s + %(fir_window)s + %(fir_design)s + %(skip_by_annotation)s + + .. versionadded:: 0.16. + %(pad_fir)s + %(verbose)s + + Returns + ------- + inst : instance of Epochs, Evoked, SourceEstimate, or Raw + The filtered data. + + See Also + -------- + mne.filter.create_filter + mne.Evoked.savgol_filter + mne.io.Raw.notch_filter + mne.io.Raw.resample + mne.filter.create_filter + mne.filter.filter_data + mne.filter.construct_iir_filter + + Notes + ----- + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by ``picks``. + The data are modified inplace. + + The object has to have the data loaded e.g. with ``preload=True`` + or ``self.load_data()``. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + ``self.info['lowpass']`` and ``self.info['highpass']`` are only + updated with picks=None. + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporarily stored in memory. + + When working on SourceEstimates the sample rate of the original + data is inferred from tstep. + + For more information, see the tutorials + :ref:`disc-filtering` and :ref:`tut-filter-resample` and + :func:`mne.filter.create_filter`. + + .. versionadded:: 0.15 + """ + from .annotations import _annotations_starts_stops + from .io import BaseRaw + from .source_estimate import _BaseSourceEstimate + + _check_preload(self, "inst.filter") + if not isinstance(self, _BaseSourceEstimate): + update_info, picks = _filt_check_picks(self.info, picks, l_freq, h_freq) + s_freq = self.info["sfreq"] + else: + s_freq = 1.0 / self.tstep + if pad is None and method != "iir": + pad = "edge" + if isinstance(self, BaseRaw): + # Deal with annotations + onsets, ends = _annotations_starts_stops( + self, skip_by_annotation, invert=True + ) + logger.info( + "Filtering raw data in %d contiguous segment%s", + len(onsets), + _pl(onsets), + ) + else: + onsets, ends = np.array([0]), np.array([self._data.shape[1]]) + max_idx = (ends - onsets).argmax() + for si, (start, stop) in enumerate(zip(onsets, ends)): + # Only output filter params once (for info level), and only warn + # once about the length criterion (longest segment is too short) + use_verbose = verbose if si == max_idx else "error" + filter_data( + self._data[:, start:stop], + s_freq, + l_freq, + h_freq, + picks, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + n_jobs, + method, + iir_params, + copy=False, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + pad=pad, + verbose=use_verbose, + ) + # update info if filter is applied to all data channels/vertices, + # and it's not a band-stop filter + if not isinstance(self, _BaseSourceEstimate): + _filt_update_info(self.info, update_info, l_freq, h_freq) + return self + + @verbose + def resample( + self, + sfreq, + *, + npad="auto", + window="auto", + n_jobs=None, + pad="edge", + method="fft", + verbose=None, + ): + """Resample data. + + If appropriate, an anti-aliasing filter is applied before resampling. + See :ref:`resampling-and-decimating` for more information. + + .. note:: Data must be loaded. + + Parameters + ---------- + sfreq : float + New sample rate to use. + %(npad)s + %(window_resample)s + %(n_jobs_cuda)s + %(pad_resample)s + + .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 + %(verbose)s + + Returns + ------- + inst : instance of Epochs or Evoked + The resampled object. + + See Also + -------- + mne.io.Raw.resample + + Notes + ----- + For some data, it may be more accurate to use npad=0 to reduce + artifacts. This is dataset dependent -- check your data! + """ + from .epochs import BaseEpochs + from .evoked import Evoked + + # Should be guaranteed by our inheritance, and the fact that + # mne.io.BaseRaw and _BaseSourceEstimate overrides this method + assert isinstance(self, BaseEpochs | Evoked) + + sfreq = float(sfreq) + o_sfreq = self.info["sfreq"] + if _check_resamp_noop(sfreq, o_sfreq): + return self + + _check_preload(self, "inst.resample") + self._data = resample( + self._data, + sfreq, + o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, + ) + lowpass = self.info.get("lowpass") + lowpass = np.inf if lowpass is None else lowpass + with self.info._unlock(): + self.info["lowpass"] = min(lowpass, sfreq / 2.0) + self.info["sfreq"] = float(sfreq) + new_times = ( + np.arange(self._data.shape[-1], dtype=np.float64) / sfreq + self.times[0] + ) + # adjust indirectly affected variables + self._set_times(new_times) + self._raw_times = self.times + self._update_first_last() + return self + + @verbose + def apply_hilbert( + self, picks=None, envelope=False, n_jobs=None, n_fft="auto", *, verbose=None + ): + """Compute analytic signal or envelope for a subset of channels/vertices. + + Parameters + ---------- + %(picks_all_data_noref)s + envelope : bool + Compute the envelope signal of each channel/vertex. Default False. + See Notes. + %(n_jobs)s + n_fft : int | None | str + Points to use in the FFT for Hilbert transformation. The signal + will be padded with zeros before computing Hilbert, then cut back + to original length. If None, n == self.n_times. If 'auto', + the next highest fast FFT length will be use. + %(verbose)s + + Returns + ------- + self : instance of Raw, Epochs, Evoked or SourceEstimate + The raw object with transformed data. + + Notes + ----- + **Parameters** + + If ``envelope=False``, the analytic signal for the channels/vertices defined in + ``picks`` is computed and the data of the Raw object is converted to + a complex representation (the analytic signal is complex valued). + + If ``envelope=True``, the absolute value of the analytic signal for the + channels/vertices defined in ``picks`` is computed, resulting in the envelope + signal. + + .. warning: Do not use ``envelope=True`` if you intend to compute + an inverse solution from the raw data. If you want to + compute the envelope in source space, use + ``envelope=False`` and compute the envelope after the + inverse solution has been obtained. + + If envelope=False, more memory is required since the original raw data + as well as the analytic signal have temporarily to be stored in memory. + If n_jobs > 1, more memory is required as ``len(picks) * n_times`` + additional time points need to be temporarily stored in memory. + + Also note that the ``n_fft`` parameter will allow you to pad the signal + with zeros before performing the Hilbert transform. This padding + is cut off, but it may result in a slightly different result + (particularly around the edges). Use at your own risk. + + **Analytic signal** + + The analytic signal "x_a(t)" of "x(t)" is:: + + x_a = F^{-1}(F(x) 2U) = x + i y + + where "F" is the Fourier transform, "U" the unit step function, + and "y" the Hilbert transform of "x". One usage of the analytic + signal is the computation of the envelope signal, which is given by + "e(t) = abs(x_a(t))". Due to the linearity of Hilbert transform and the + MNE inverse solution, the enevlope in source space can be obtained + by computing the analytic signal in sensor space, applying the MNE + inverse, and computing the envelope in source space. + """ + from .source_estimate import _BaseSourceEstimate + + if not isinstance(self, _BaseSourceEstimate): + use_info = self.info + else: + use_info = len(self._data) + _check_preload(self, "inst.apply_hilbert") + picks = _picks_to_idx(use_info, picks, exclude=(), with_ref_meg=False) + + if n_fft is None: + n_fft = len(self.times) + elif isinstance(n_fft, str): + if n_fft != "auto": + raise ValueError( + f"n_fft must be an integer, string, or None, got {type(n_fft)}" + ) + n_fft = next_fast_len(len(self.times)) + n_fft = int(n_fft) + if n_fft < len(self.times): + raise ValueError( + f"n_fft ({n_fft}) must be at least the number of time points (" + f"{len(self.times)})" + ) + dtype = None if envelope else np.complex128 + args, kwargs = (), dict(n_fft=n_fft, envelope=envelope) + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) + if n_jobs == 1: + # modify data inplace to save memory + for idx in picks: + self._data[..., idx, :] = _check_fun( + _my_hilbert, data_in[..., idx, :], *args, **kwargs + ) + else: + # use parallel function + data_picks_new = parallel( + p_fun(_my_hilbert, data_in[..., p, :], *args, **kwargs) for p in picks + ) + for pp, p in enumerate(picks): + self._data[..., p, :] = data_picks_new[pp] + return self + + +def _check_fun(fun, d, *args, **kwargs): + """Check shapes.""" + want_shape = d.shape + d = fun(d, *args, **kwargs) + if not isinstance(d, np.ndarray): + raise TypeError("Return value must be an ndarray") + if d.shape != want_shape: + raise ValueError(f"Return data must have shape {want_shape} not {d.shape}") + return d + + +def _my_hilbert(x, n_fft=None, envelope=False): + """Compute Hilbert transform of signals w/ zero padding. + + Parameters + ---------- + x : array, shape (n_times) + The signal to convert + n_fft : int + Size of the FFT to perform, must be at least ``len(x)``. + The signal will be cut back to original length. + envelope : bool + Whether to compute amplitude of the hilbert transform in order + to return the signal envelope. + + Returns + ------- + out : array, shape (n_times) + The hilbert transform of the signal, or the envelope. + """ + n_x = x.shape[-1] + out = signal.hilbert(x, N=n_fft, axis=-1)[..., :n_x] + if envelope: + out = np.abs(out) + return out + + +@verbose +def design_mne_c_filter( + sfreq, + l_freq=None, + h_freq=40.0, + l_trans_bandwidth=None, + h_trans_bandwidth=5.0, + verbose=None, +): + """Create a FIR filter like that used by MNE-C. + + Parameters + ---------- + sfreq : float + The sample frequency. + l_freq : float | None + The low filter frequency in Hz, default None. + Can be None to avoid high-passing. + h_freq : float + The high filter frequency in Hz, default 40. + Can be None to avoid low-passing. + l_trans_bandwidth : float | None + Low transition bandwidthin Hz. Can be None (default) to use 3 samples. + h_trans_bandwidth : float + High transition bandwidth in Hz. + %(verbose)s + + Returns + ------- + h : ndarray, shape (8193,) + The linear-phase (symmetric) FIR filter coefficients. + + Notes + ----- + This function is provided mostly for reference purposes. + + MNE-C uses a frequency-domain filter design technique by creating a + linear-phase filter of length 8193. In the frequency domain, the + 4197 frequencies are directly constructed, with zeroes in the stop-band + and ones in the passband, with squared cosine ramps in between. + """ + n_freqs = (4096 + 2 * 2048) // 2 + 1 + freq_resp = np.ones(n_freqs) + l_freq = 0 if l_freq is None else float(l_freq) + if l_trans_bandwidth is None: + l_width = 3 + else: + l_width = (int(((n_freqs - 1) * l_trans_bandwidth) / (0.5 * sfreq)) + 1) // 2 + l_start = int(((n_freqs - 1) * l_freq) / (0.5 * sfreq)) + h_freq = sfreq / 2.0 if h_freq is None else float(h_freq) + h_width = (int(((n_freqs - 1) * h_trans_bandwidth) / (0.5 * sfreq)) + 1) // 2 + h_start = int(((n_freqs - 1) * h_freq) / (0.5 * sfreq)) + logger.info( + "filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d " "hpw : %d lpw : %d", + l_freq, + h_freq, + l_start, + h_start, + n_freqs, + l_width, + h_width, + ) + if l_freq > 0: + start = l_start - l_width + 1 + stop = start + 2 * l_width - 1 + if start < 0 or stop >= n_freqs: + raise RuntimeError("l_freq too low or l_trans_bandwidth too large") + freq_resp[:start] = 0.0 + k = np.arange(-l_width + 1, l_width) / float(l_width) + 3.0 + freq_resp[start:stop] = np.cos(np.pi / 4.0 * k) ** 2 + + if h_freq < sfreq / 2.0: + start = h_start - h_width + 1 + stop = start + 2 * h_width - 1 + if start < 0 or stop >= n_freqs: + raise RuntimeError("h_freq too high or h_trans_bandwidth too large") + k = np.arange(-h_width + 1, h_width) / float(h_width) + 1.0 + freq_resp[start:stop] *= np.cos(np.pi / 4.0 * k) ** 2 + freq_resp[stop:] = 0.0 + # Get the time-domain version of this signal + h = fft.irfft(freq_resp, n=2 * len(freq_resp) - 1) + h = np.roll(h, n_freqs - 1) # center the impulse like a linear-phase filt + return h + + +def _filt_check_picks(info, picks, h_freq, l_freq): + update_info = False + # This will pick *all* data channels + picks = _picks_to_idx(info, picks, "data_or_ica", exclude=()) + if h_freq is not None or l_freq is not None: + data_picks = _picks_to_idx( + info, None, "data_or_ica", exclude=(), allow_empty=True + ) + if len(data_picks) == 0: + logger.info( + "No data channels found. The highpass and " + "lowpass values in the measurement info will not " + "be updated." + ) + elif np.isin(data_picks, picks).all(): + update_info = True + else: + logger.info( + "Filtering a subset of channels. The highpass and " + "lowpass values in the measurement info will not " + "be updated." + ) + return update_info, picks + + +def _filt_update_info(info, update_info, l_freq, h_freq): + if update_info: + if ( + h_freq is not None + and (l_freq is None or l_freq < h_freq) + and (info["lowpass"] is None or h_freq < info["lowpass"]) + ): + with info._unlock(): + info["lowpass"] = float(h_freq) + if ( + l_freq is not None + and (h_freq is None or l_freq < h_freq) + and (info["highpass"] is None or l_freq > info["highpass"]) + ): + with info._unlock(): + info["highpass"] = float(l_freq) diff --git a/mne/fixes.py b/mne/fixes.py new file mode 100644 index 0000000..2b37de9 --- /dev/null +++ b/mne/fixes.py @@ -0,0 +1,726 @@ +"""Compatibility fixes for older versions of libraries. + +If you add content to this file, please give the version of the package +at which the fix is no longer needed. + +# originally copied from scikit-learn + +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# NOTE: +# Imports for SciPy submodules need to stay nested in this module +# because this module is imported many places (but not always used)! + +import inspect +import operator as operator_module +import os +import warnings +from math import log + +import numpy as np + +############################################################################### +# distutils + +# distutils has been deprecated since Python 3.10 and was removed +# from the standard library with the release of Python 3.12. + + +def _compare_version(version_a, operator, version_b): + """Compare two version strings via a user-specified operator. + + Parameters + ---------- + version_a : str + First version string. + operator : '==' | '>' | '<' | '>=' | '<=' + Operator to compare ``version_a`` and ``version_b`` in the form of + ``version_a operator version_b``. + version_b : str + Second version string. + + Returns + ------- + bool + The result of the version comparison. + """ + from packaging.version import parse + + mapping = {"<": "lt", "<=": "le", "==": "eq", "!=": "ne", ">=": "ge", ">": "gt"} + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore") + ver_a = parse(version_a) + ver_b = parse(version_b) + return getattr(operator_module, mapping[operator])(ver_a, ver_b) + + +############################################################################### +# Misc + + +def _median_complex(data, axis): + """Compute marginal median on complex data safely. + + Can be removed when numpy introduces a fix. + See: https://github.com/scipy/scipy/pull/12676/. + """ + # np.median must be passed real arrays for the desired result + if np.iscomplexobj(data): + data = np.median(np.real(data), axis=axis) + 1j * np.median( + np.imag(data), axis=axis + ) + else: + data = np.median(data, axis=axis) + return data + + +def _safe_svd(A, **kwargs): + """Get around the SVD did not converge error of death.""" + # Intel has a bug with their GESVD driver: + # https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501 + # For SciPy 0.18 and up, we can work around it by using + # lapack_driver='gesvd' instead. + from scipy import linalg + + if kwargs.get("overwrite_a", False): + raise ValueError("Cannot set overwrite_a=True with this function") + try: + return linalg.svd(A, **kwargs) + except np.linalg.LinAlgError as exp: + from .utils import warn + + warn(f"SVD error ({exp}), attempting to use GESVD instead of GESDD") + return linalg.svd(A, lapack_driver="gesvd", **kwargs) + + +def _csc_array_cast(x): + from scipy.sparse import csc_array + + return csc_array(x) + + +# Can be replaced with sparse.eye_array once we depend on SciPy >= 1.12 +def _eye_array(n, *, format="csr"): # noqa: A002 + from scipy import sparse + + return sparse.dia_array((np.ones(n), 0), shape=(n, n)).asformat(format) + + +############################################################################### +# NumPy Generator (NumPy 1.17) + + +def rng_uniform(rng): + """Get the uniform/randint from the rng.""" + # prefer Generator.integers, fall back to RandomState.randint + return getattr(rng, "integers", getattr(rng, "randint", None)) + + +############################################################################### +# Misc utilities + + +# get_fdata() requires knowing the dtype ahead of time, so let's triage on our +# own instead +def _get_img_fdata(img): + data = np.asanyarray(img.dataobj) + dtype = np.complex128 if np.iscomplexobj(data) else np.float64 + return data.astype(dtype) + + +############################################################################### +# Copied from sklearn to simplify code paths + + +def empirical_covariance(X, assume_centered=False): + """Compute the Maximum likelihood covariance estimator. + + Parameters + ---------- + X : ndarray, shape (n_samples, n_features) + Data from which to compute the covariance estimate + + assume_centered : Boolean + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False, data are centered before computation. + + Returns + ------- + covariance : 2D ndarray, shape (n_features, n_features) + Empirical covariance (Maximum Likelihood Estimator). + """ + X = np.asarray(X) + if X.ndim == 1: + X = np.reshape(X, (1, -1)) + + if X.shape[0] == 1: + warnings.warn( + "Only one sample available. You may want to reshape your data array" + ) + + if assume_centered: + covariance = np.dot(X.T, X) / X.shape[0] + else: + covariance = np.cov(X.T, bias=1) + + if covariance.ndim == 0: + covariance = np.array([[covariance]]) + return covariance + + +class _EstimatorMixin: + def __sklearn_tags__(self): + # If we get here, we should have sklearn installed + from sklearn.utils import Tags, TargetTags + + return Tags( + estimator_type=None, + target_tags=TargetTags(required=False), + transformer_tags=None, + regressor_tags=None, + classifier_tags=None, + ) + + def _param_names(self): + return inspect.getfullargspec(self.__init__).args[1:] + + def get_params(self, deep=True): + """Get parameters for this estimator. + + Parameters + ---------- + deep : bool, default=True + If True, will return the parameters for this estimator and + contained subobjects that are estimators. + + Returns + ------- + params : dict + Parameter names mapped to their values. + """ + out = dict() + for key in self._param_names(): + out[key] = getattr(self, key) + return out + + def set_params(self, **params): + """Set the parameters of this estimator. + + The method works on simple estimators as well as on nested objects + (such as pipelines). The latter have parameters of the form + ``__`` so that it's possible to update each + component of a nested object. + + Parameters + ---------- + **params : dict + Estimator parameters. + + Returns + ------- + self : object + Estimator instance. + """ + param_names = self._param_names() + for key in params: + if key in param_names: + setattr(self, key, params[key]) + + +class EmpiricalCovariance(_EstimatorMixin): + """Maximum likelihood covariance estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + store_precision : bool + Specifies if the estimated precision is stored. + + assume_centered : bool + If True, data are not centered before computation. + Useful when working with data whose mean is almost, but not exactly + zero. + If False (default), data are centered before computation. + + Attributes + ---------- + covariance_ : 2D ndarray, shape (n_features, n_features) + Estimated covariance matrix + + precision_ : 2D ndarray, shape (n_features, n_features) + Estimated pseudo-inverse matrix. + (stored only if store_precision is True) + """ + + def __init__(self, store_precision=True, assume_centered=False): + self.store_precision = store_precision + self.assume_centered = assume_centered + + def _set_covariance(self, covariance): + """Save the covariance and precision estimates. + + Storage is done accordingly to `self.store_precision`. + Precision stored only if invertible. + + Parameters + ---------- + covariance : 2D ndarray, shape (n_features, n_features) + Estimated covariance matrix to be stored, and from which precision + is computed. + """ + from scipy import linalg + + # covariance = check_array(covariance) + # set covariance + self.covariance_ = covariance + # set precision + if self.store_precision: + self.precision_ = linalg.pinvh(covariance) + else: + self.precision_ = None + + def get_precision(self): + """Getter for the precision matrix. + + Returns + ------- + precision_ : array-like, + The precision matrix associated to the current covariance object. + + """ + from scipy import linalg + + if self.store_precision: + precision = self.precision_ + else: + precision = linalg.pinvh(self.covariance_) + return precision + + def fit(self, X, y=None): + """Fit the Maximum Likelihood Estimator covariance model. + + Parameters + ---------- + X : array-like, shape = [n_samples, n_features] + Training data, where n_samples is the number of samples and + n_features is the number of features. + y : ndarray | None + Not used, present for API consistency. + + Returns + ------- + self : object + Returns self. + """ # noqa: E501 + # X = check_array(X) + if self.assume_centered: + self.location_ = np.zeros(X.shape[1]) + else: + self.location_ = X.mean(0) + covariance = empirical_covariance(X, assume_centered=self.assume_centered) + self._set_covariance(covariance) + + return self + + def score(self, X_test, y=None): + """Compute the log-likelihood of a Gaussian dataset. + + Uses ``self.covariance_`` as an estimator of its covariance matrix. + + Parameters + ---------- + X_test : array-like, shape = [n_samples, n_features] + Test data of which we compute the likelihood, where n_samples is + the number of samples and n_features is the number of features. + X_test is assumed to be drawn from the same distribution than + the data used in fit (including centering). + y : ndarray | None + Not used, present for API consistency. + + Returns + ------- + res : float + The likelihood of the data set with `self.covariance_` as an + estimator of its covariance matrix. + """ + # compute empirical covariance of the test set + test_cov = empirical_covariance(X_test - self.location_, assume_centered=True) + # compute log likelihood + res = log_likelihood(test_cov, self.get_precision()) + + return res + + def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True): + """Compute the Mean Squared Error between two covariance estimators. + + Parameters + ---------- + comp_cov : array-like, shape = [n_features, n_features] + The covariance to compare with. + norm : str + The type of norm used to compute the error. Available error types: + - 'frobenius' (default): sqrt(tr(A^t.A)) + - 'spectral': sqrt(max(eigenvalues(A^t.A)) + where A is the error ``(comp_cov - self.covariance_)``. + scaling : bool + If True (default), the squared error norm is divided by n_features. + If False, the squared error norm is not rescaled. + squared : bool + Whether to compute the squared error norm or the error norm. + If True (default), the squared error norm is returned. + If False, the error norm is returned. + + Returns + ------- + The Mean Squared Error (in the sense of the Frobenius norm) between + `self` and `comp_cov` covariance estimators. + """ + from scipy import linalg + + # compute the error + error = comp_cov - self.covariance_ + # compute the error norm + if norm == "frobenius": + squared_norm = np.sum(error**2) + elif norm == "spectral": + squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error))) + else: + raise NotImplementedError( + "Only spectral and frobenius norms are implemented" + ) + # optionally scale the error norm + if scaling: + squared_norm = squared_norm / error.shape[0] + # finally get either the squared norm or the norm + if squared: + result = squared_norm + else: + result = np.sqrt(squared_norm) + + return result + + def mahalanobis(self, observations): + """Compute the squared Mahalanobis distances of given observations. + + Parameters + ---------- + observations : array-like, shape = [n_observations, n_features] + The observations, the Mahalanobis distances of the which we + compute. Observations are assumed to be drawn from the same + distribution than the data used in fit. + + Returns + ------- + mahalanobis_distance : array, shape = [n_observations,] + Squared Mahalanobis distances of the observations. + """ + precision = self.get_precision() + # compute mahalanobis distances + centered_obs = observations - self.location_ + mahalanobis_dist = np.sum(np.dot(centered_obs, precision) * centered_obs, 1) + + return mahalanobis_dist + + +def log_likelihood(emp_cov, precision): + """Compute the sample mean of the log_likelihood under a covariance model. + + computes the empirical expected log-likelihood (accounting for the + normalization terms and scaling), allowing for universal comparison (beyond + this software package) + + Parameters + ---------- + emp_cov : 2D ndarray (n_features, n_features) + Maximum Likelihood Estimator of covariance + + precision : 2D ndarray (n_features, n_features) + The precision matrix of the covariance model to be tested + + Returns + ------- + sample mean of the log-likelihood + """ + p = precision.shape[0] + log_likelihood_ = -np.sum(emp_cov * precision) + _logdet(precision) + log_likelihood_ -= p * np.log(2 * np.pi) + log_likelihood_ /= 2.0 + return log_likelihood_ + + +# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues + + +def _logdet(A): + """Compute the log det of a positive semidefinite matrix.""" + from scipy import linalg + + vals = linalg.eigvalsh(A) + # avoid negative (numerical errors) or zero (semi-definite matrix) values + tol = vals.max() * vals.size * np.finfo(np.float64).eps + vals = np.where(vals > tol, vals, tol) + return np.sum(np.log(vals)) + + +def _infer_dimension_(spectrum, n_samples, n_features): + """Infer the dimension of a dataset of shape (n_samples, n_features). + + The dataset is described by its spectrum `spectrum`. + """ + n_spectrum = len(spectrum) + ll = np.empty(n_spectrum) + for rank in range(n_spectrum): + ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features) + return ll.argmax() + + +def _assess_dimension_(spectrum, rank, n_samples, n_features): + from scipy.special import gammaln + + if rank > len(spectrum): + raise ValueError("The tested rank cannot exceed the rank of the dataset") + + pu = -rank * log(2.0) + for i in range(rank): + pu += gammaln((n_features - i) / 2.0) - log(np.pi) * (n_features - i) / 2.0 + + pl = np.sum(np.log(spectrum[:rank])) + pl = -pl * n_samples / 2.0 + + if rank == n_features: + pv = 0 + v = 1 + else: + v = np.sum(spectrum[rank:]) / (n_features - rank) + pv = -np.log(v) * n_samples * (n_features - rank) / 2.0 + + m = n_features * rank - rank * (rank + 1.0) / 2.0 + pp = log(2.0 * np.pi) * (m + rank + 1.0) / 2.0 + + pa = 0.0 + spectrum_ = spectrum.copy() + spectrum_[rank:n_features] = v + for i in range(rank): + for j in range(i + 1, len(spectrum)): + pa += log( + (spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i]) + ) + log(n_samples) + + ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0 + + return ll + + +def svd_flip(u, v, u_based_decision=True): # noqa: D103 + if u_based_decision: + # columns of u, rows of v + max_abs_cols = np.argmax(np.abs(u), axis=0) + signs = np.sign(u[max_abs_cols, np.arange(u.shape[1])]) + u *= signs + v *= signs[:, np.newaxis] + else: + # rows of v, columns of u + max_abs_rows = np.argmax(np.abs(v), axis=1) + signs = np.sign(v[np.arange(v.shape[0]), max_abs_rows]) + u *= signs + v *= signs[:, np.newaxis] + return u, v + + +def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08): + """Use high precision for cumsum and check that final value matches sum. + + Parameters + ---------- + arr : array-like + To be cumulatively summed as flat + axis : int, optional + Axis along which the cumulative sum is computed. + The default (None) is to compute the cumsum over the flattened array. + rtol : float + Relative tolerance, see ``np.allclose`` + atol : float + Absolute tolerance, see ``np.allclose`` + """ + out = np.cumsum(arr, axis=axis, dtype=np.float64) + expected = np.sum(arr, axis=axis, dtype=np.float64) + if not np.all( + np.isclose( + out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True + ) + ): + warnings.warn( + "cumsum was found to be unstable: " + "its last element does not correspond to sum", + RuntimeWarning, + ) + return out + + +############################################################################### +# From nilearn + + +def _crop_colorbar(cbar, cbar_vmin, cbar_vmax): + """Crop a colorbar to show from cbar_vmin to cbar_vmax. + + Used when symmetric_cbar=False is used. + """ + if (cbar_vmin is None) and (cbar_vmax is None): + return + cbar_tick_locs = cbar.locator.locs + if cbar_vmax is None: + cbar_vmax = cbar_tick_locs.max() + if cbar_vmin is None: + cbar_vmin = cbar_tick_locs.min() + new_tick_locs = np.linspace(cbar_vmin, cbar_vmax, len(cbar_tick_locs)) + + cbar.ax.set_ylim(cbar_vmin, cbar_vmax) + X = cbar._mesh()[0] + X = np.array([X[0], X[-1]]) + Y = np.array([[cbar_vmin, cbar_vmin], [cbar_vmax, cbar_vmax]]) + N = X.shape[0] + ii = [0, 1, N - 2, N - 1, 2 * N - 1, 2 * N - 2, N + 1, N, 0] + x = X.T.reshape(-1)[ii] + y = Y.T.reshape(-1)[ii] + xy = ( + np.column_stack([y, x]) + if cbar.orientation == "horizontal" + else np.column_stack([x, y]) + ) + cbar.outline.set_xy(xy) + + cbar.set_ticks(new_tick_locs) + cbar.update_ticks() + + +############################################################################### +# Numba (optional requirement) + +# Here we choose different defaults to speed things up by default +try: + import numba + + if _compare_version(numba.__version__, "<", "0.56.4"): + raise ImportError + prange = numba.prange + + def jit(nopython=True, nogil=True, fastmath=True, cache=True, **kwargs): # noqa + return numba.jit( + nopython=nopython, nogil=nogil, fastmath=fastmath, cache=cache, **kwargs + ) + +except Exception: # could be ImportError, SystemError, etc. + has_numba = False +else: + has_numba = os.getenv("MNE_USE_NUMBA", "true").lower() == "true" + + +if not has_numba: + + def jit(**kwargs): # noqa + def _jit(func): + return func + + return _jit + + prange = range + bincount = np.bincount + +else: + + @jit() + def bincount(x, weights, minlength): # noqa: D103 + out = np.zeros(minlength) + for idx, w in zip(x, weights): + out[idx] += w + return out + + +############################################################################### +# Matplotlib + + +# workaround: plt.close() doesn't spawn close_event on Agg backend +# https://github.com/matplotlib/matplotlib/issues/18609 +def _close_event(fig): + """Force calling of the MPL figure close event.""" + from matplotlib import backend_bases + + from .utils import logger + + try: + fig.canvas.callbacks.process( + "close_event", + backend_bases.CloseEvent(name="close_event", canvas=fig.canvas), + ) + logger.debug(f"Called {fig!r}.canvas.close_event()") + except ValueError: # old mpl with Qt + logger.debug(f"Calling {fig!r}.canvas.close_event() failed") + pass # pragma: no cover + + +############################################################################### +# SciPy 1.14+ minimum_phase half=True option + + +def minimum_phase(h, method="homomorphic", n_fft=None, *, half=True): + """Wrap scipy.signal.minimum_phase with half option.""" + # Can be removed once + from scipy.fft import fft, ifft + from scipy.signal import minimum_phase as sp_minimum_phase + + assert isinstance(method, str) and method == "homomorphic" + + if "half" in inspect.getfullargspec(sp_minimum_phase).kwonlyargs: + return sp_minimum_phase(h, method=method, n_fft=n_fft, half=half) + h = np.asarray(h) + if np.iscomplexobj(h): + raise ValueError("Complex filters not supported") + if h.ndim != 1 or h.size <= 2: + raise ValueError("h must be 1-D and at least 2 samples long") + n_half = len(h) // 2 + if not np.allclose(h[-n_half:][::-1], h[:n_half]): + warnings.warn( + "h does not appear to by symmetric, conversion may fail", + RuntimeWarning, + stacklevel=2, + ) + if n_fft is None: + n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01))) + n_fft = int(n_fft) + if n_fft < len(h): + raise ValueError(f"n_fft must be at least len(h)=={len(h)}") + + # zero-pad; calculate the DFT + h_temp = np.abs(fft(h, n_fft)) + # take 0.25*log(|H|**2) = 0.5*log(|H|) + h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up + np.log(h_temp, out=h_temp) + if half: # halving of magnitude spectrum optional + h_temp *= 0.5 + # IDFT + h_temp = ifft(h_temp).real + # multiply pointwise by the homomorphic filter + # lmin[n] = 2u[n] - d[n] + # i.e., double the positive frequencies and zero out the negative ones; + # Oppenheim+Shafer 3rd ed p991 eq13.42b and p1004 fig13.7 + win = np.zeros(n_fft) + win[0] = 1 + stop = n_fft // 2 + win[1:stop] = 2 + if n_fft % 2: + win[stop] = 1 + h_temp *= win + h_temp = ifft(np.exp(fft(h_temp))) + h_minimum = h_temp.real + + n_out = (n_half + len(h) % 2) if half else len(h) + return h_minimum[:n_out] diff --git a/mne/forward/__init__.py b/mne/forward/__init__.py new file mode 100644 index 0000000..3dcafb6 --- /dev/null +++ b/mne/forward/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Forward modeling code.""" +import lazy_loader as lazy # for testing purposes + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/forward/__init__.pyi b/mne/forward/__init__.pyi new file mode 100644 index 0000000..dcac0d3 --- /dev/null +++ b/mne/forward/__init__.pyi @@ -0,0 +1,86 @@ +__all__ = [ + "Forward", + "_apply_forward", + "_as_meg_type_inst", + "_compute_forwards", + "_concatenate_coils", + "_create_meg_coils", + "_do_forward_solution", + "_fill_measurement_info", + "_lead_dots", + "_magnetic_dipole_field_vec", + "_make_surface_mapping", + "_map_meg_or_eeg_channels", + "_merge_fwds", + "_prep_eeg_channels", + "_prep_meg_channels", + "_prepare_for_forward", + "_read_coil_defs", + "_read_forward_meas_info", + "_select_orient_forward", + "_stc_src_sel", + "_subject_from_forward", + "_to_forward_dict", + "_transform_orig_meg_coils", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "compute_depth_prior", + "compute_orient_prior", + "convert_forward_solution", + "is_fixed_orient", + "make_field_map", + "make_forward_dipole", + "make_forward_solution", + "read_forward_solution", + "restrict_forward_to_label", + "restrict_forward_to_stc", + "use_coil_def", + "write_forward_solution", +] +from . import _lead_dots +from ._compute_forward import ( + _compute_forwards, + _concatenate_coils, + _magnetic_dipole_field_vec, +) +from ._field_interpolation import ( + _as_meg_type_inst, + _make_surface_mapping, + _map_meg_or_eeg_channels, + make_field_map, +) +from ._make_forward import ( + _create_meg_coils, + _prep_eeg_channels, + _prep_meg_channels, + _prepare_for_forward, + _read_coil_defs, + _to_forward_dict, + _transform_orig_meg_coils, + make_forward_dipole, + make_forward_solution, + use_coil_def, +) +from .forward import ( + Forward, + _apply_forward, + _do_forward_solution, + _fill_measurement_info, + _merge_fwds, + _read_forward_meas_info, + _select_orient_forward, + _stc_src_sel, + _subject_from_forward, + apply_forward, + apply_forward_raw, + average_forward_solutions, + compute_depth_prior, + compute_orient_prior, + convert_forward_solution, + is_fixed_orient, + read_forward_solution, + restrict_forward_to_label, + restrict_forward_to_stc, + write_forward_solution, +) diff --git a/mne/forward/_compute_forward.py b/mne/forward/_compute_forward.py new file mode 100644 index 0000000..b048d96 --- /dev/null +++ b/mne/forward/_compute_forward.py @@ -0,0 +1,897 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. +# +# Many of the idealized equations behind these calculations can be found in: +# 1) Realistic conductivity geometry model of the human head for interpretation +# of neuromagnetic data. Hämäläinen and Sarvas, 1989. Specific to MNE +# 2) EEG and MEG: forward solutions for inverse methods. Mosher, Leahy, and +# Lewis, 1999. Generalized discussion of forward solutions. + +from copy import deepcopy + +import numpy as np + +from .._fiff.constants import FIFF +from ..bem import _import_openmeeg, _make_openmeeg_geometry +from ..fixes import bincount, jit +from ..parallel import parallel_func +from ..surface import _jit_cross, _project_onto_surface +from ..transforms import apply_trans, invert_transform +from ..utils import _check_option, _pl, fill_doc, logger, verbose, warn + +# ############################################################################# +# COIL SPECIFICATION AND FIELD COMPUTATION MATRIX + + +def _dup_coil_set(coils, coord_frame, t): + """Make a duplicate.""" + if t is not None and coord_frame != t["from"]: + raise RuntimeError("transformation frame does not match the coil set") + coils = deepcopy(coils) + if t is not None: + coord_frame = t["to"] + for coil in coils: + for key in ("ex", "ey", "ez"): + if key in coil: + coil[key] = apply_trans(t["trans"], coil[key], False) + coil["r0"] = apply_trans(t["trans"], coil["r0"]) + coil["rmag"] = apply_trans(t["trans"], coil["rmag"]) + coil["cosmag"] = apply_trans(t["trans"], coil["cosmag"], False) + coil["coord_frame"] = t["to"] + return coils, coord_frame + + +def _check_coil_frame(coils, coord_frame, bem): + """Check to make sure the coils are in the correct coordinate frame.""" + if coord_frame != FIFF.FIFFV_COORD_MRI: + if coord_frame == FIFF.FIFFV_COORD_HEAD: + # Make a transformed duplicate + coils, coord_frame = _dup_coil_set(coils, coord_frame, bem["head_mri_t"]) + else: + raise RuntimeError(f"Bad coil coordinate frame {coord_frame}") + return coils, coord_frame + + +@fill_doc +def _lin_field_coeff(surf, mult, rmags, cosmags, ws, bins, n_jobs): + """Parallel wrapper for _do_lin_field_coeff to compute linear coefficients. + + Parameters + ---------- + surf : dict + Dict containing information for one surface of the BEM + mult : float + Multiplier for particular BEM surface (Iso Skull Approach discussed in + Mosher et al., 1999 and Hämäläinen and Sarvas, 1989 Section III?) + rmag : ndarray, shape (n_integration_pts, 3) + 3D positions of MEG coil integration points (from coil['rmag']) + cosmag : ndarray, shape (n_integration_pts, 3) + Direction of the MEG coil integration points (from coil['cosmag']) + ws : ndarray, shape (n_integration_pts,) + Weights for MEG coil integration points + bins : ndarray, shape (n_integration_points,) + The sensor assignments for each rmag/cosmag/w. + %(n_jobs)s + + Returns + ------- + coeff : list + Linear coefficients with lead fields for each BEM vertex on each sensor + (?) + """ + parallel, p_fun, n_jobs = parallel_func( + _do_lin_field_coeff, n_jobs, max_jobs=len(surf["tris"]) + ) + nas = np.array_split + coeffs = parallel( + p_fun(surf["rr"], t, tn, ta, rmags, cosmags, ws, bins) + for t, tn, ta in zip( + nas(surf["tris"], n_jobs), + nas(surf["tri_nn"], n_jobs), + nas(surf["tri_area"], n_jobs), + ) + ) + return mult * np.sum(coeffs, axis=0) + + +@jit() +def _do_lin_field_coeff(bem_rr, tris, tn, ta, rmags, cosmags, ws, bins): + """Compute field coefficients (parallel-friendly). + + See section IV of Mosher et al., 1999 (specifically equation 35). + + Parameters + ---------- + bem_rr : ndarray, shape (n_BEM_vertices, 3) + Positions on one BEM surface in 3-space. 2562 BEM vertices for BEM with + 5120 triangles (ico-4) + tris : ndarray, shape (n_BEM_vertices, 3) + Vertex indices for each triangle (referring to bem_rr) + tn : ndarray, shape (n_BEM_vertices, 3) + Triangle unit normal vectors + ta : ndarray, shape (n_BEM_vertices,) + Triangle areas + rmag : ndarray, shape (n_sensor_pts, 3) + 3D positions of MEG coil integration points (from coil['rmag']) + cosmag : ndarray, shape (n_sensor_pts, 3) + Direction of the MEG coil integration points (from coil['cosmag']) + ws : ndarray, shape (n_sensor_pts,) + Weights for MEG coil integration points + bins : ndarray, shape (n_sensor_pts,) + The sensor assignments for each rmag/cosmag/w. + + Returns + ------- + coeff : ndarray, shape (n_MEG_sensors, n_BEM_vertices) + Linear coefficients with effect of each BEM vertex on each sensor (?) + """ + coeff = np.zeros((bins[-1] + 1, len(bem_rr))) + w_cosmags = ws.reshape(-1, 1) * cosmags + diff = rmags.reshape(rmags.shape[0], 1, rmags.shape[1]) - bem_rr + den = np.sum(diff * diff, axis=-1) + den *= np.sqrt(den) + den *= 3 + for ti in range(len(tris)): + tri, tri_nn, tri_area = tris[ti], tn[ti], ta[ti] + # Accumulate the coefficients for each triangle node and add to the + # corresponding coefficient matrix + + # Simple version (bem_lin_field_coeffs_simple) + # The following is equivalent to: + # tri_rr = bem_rr[tri] + # for j, coil in enumerate(coils['coils']): + # x = func(coil['rmag'], coil['cosmag'], + # tri_rr, tri_nn, tri_area) + # res = np.sum(coil['w'][np.newaxis, :] * x, axis=1) + # coeff[j][tri + off] += mult * res + + c = np.empty((diff.shape[0], tri.shape[0], diff.shape[2])) + _jit_cross(c, diff[:, tri], tri_nn) + c *= w_cosmags.reshape(w_cosmags.shape[0], 1, w_cosmags.shape[1]) + for ti in range(3): + x = np.sum(c[:, ti], axis=-1) + x /= den[:, tri[ti]] / tri_area + coeff[:, tri[ti]] += bincount(bins, weights=x, minlength=bins[-1] + 1) + return coeff + + +def _concatenate_coils(coils): + """Concatenate MEG coil parameters.""" + rmags = np.concatenate([coil["rmag"] for coil in coils]) + cosmags = np.concatenate([coil["cosmag"] for coil in coils]) + ws = np.concatenate([coil["w"] for coil in coils]) + n_int = np.array([len(coil["rmag"]) for coil in coils]) + if n_int[-1] == 0: + # We assume each sensor has at least one integration point, + # which should be a safe assumption. But let's check it here, since + # our code elsewhere relies on bins[-1] + 1 being the number of sensors + raise RuntimeError("not supported") + bins = np.repeat(np.arange(len(n_int)), n_int) + return rmags, cosmags, ws, bins + + +@fill_doc +def _bem_specify_coils(bem, coils, coord_frame, mults, n_jobs): + """Set up for computing the solution at a set of MEG coils. + + Parameters + ---------- + bem : instance of ConductorModel + BEM information + coils : list of dict, len(n_MEG_sensors) + MEG sensor information dicts + coord_frame : int + Class constant identifying coordinate frame + mults : ndarray, shape (1, n_BEM_vertices) + Multiplier for every vertex in BEM + %(n_jobs)s + + Returns + ------- + sol: ndarray, shape (n_MEG_sensors, n_BEM_vertices) + MEG solution + """ + # Make sure MEG coils are in MRI coordinate frame to match BEM coords + coils, coord_frame = _check_coil_frame(coils, coord_frame, bem) + + # leaving this in in case we want to easily add in the future + # if method != 'simple': # in ['ferguson', 'urankar']: + # raise NotImplementedError + + # Compute the weighting factors to obtain the magnetic field in the linear + # potential approximation + + # Process each of the surfaces + rmags, cosmags, ws, bins = _triage_coils(coils) + del coils + lens = np.cumsum(np.r_[0, [len(s["rr"]) for s in bem["surfs"]]]) + sol = np.zeros((bins[-1] + 1, bem["solution"].shape[1])) + + lims = np.concatenate([np.arange(0, sol.shape[0], 100), [sol.shape[0]]]) + # Put through the bem (in channel-based chunks to save memory) + for start, stop in zip(lims[:-1], lims[1:]): + mask = np.logical_and(bins >= start, bins < stop) + r, c, w, b = rmags[mask], cosmags[mask], ws[mask], bins[mask] - start + # Compute coeffs for each surface, one at a time + for o1, o2, surf, mult in zip( + lens[:-1], lens[1:], bem["surfs"], bem["field_mult"] + ): + coeff = _lin_field_coeff(surf, mult, r, c, w, b, n_jobs) + sol[start:stop] += np.dot(coeff, bem["solution"][o1:o2]) + sol *= mults + return sol + + +def _bem_specify_els(bem, els, mults): + """Set up for computing the solution at a set of EEG electrodes. + + Parameters + ---------- + bem : instance of ConductorModel + BEM information + els : list of dict, len(n_EEG_sensors) + List of EEG sensor information dicts + mults: ndarray, shape (1, n_BEM_vertices) + Multiplier for every vertex in BEM + + Returns + ------- + sol : ndarray, shape (n_EEG_sensors, n_BEM_vertices) + EEG solution + """ + sol = np.zeros((len(els), bem["solution"].shape[1])) + scalp = bem["surfs"][0] + + # Operate on all integration points for all electrodes (in MRI coords) + rrs = np.concatenate( + [apply_trans(bem["head_mri_t"]["trans"], el["rmag"]) for el in els], axis=0 + ) + ws = np.concatenate([el["w"] for el in els]) + tri_weights, tri_idx = _project_onto_surface(rrs, scalp) + tri_weights *= ws[:, np.newaxis] + weights = np.matmul( + tri_weights[:, np.newaxis], bem["solution"][scalp["tris"][tri_idx]] + )[:, 0] + # there are way more vertices than electrodes generally, so let's iterate + # over the electrodes + edges = np.concatenate([[0], np.cumsum([len(el["w"]) for el in els])]) + for ii, (start, stop) in enumerate(zip(edges[:-1], edges[1:])): + sol[ii] = weights[start:stop].sum(0) + sol *= mults + return sol + + +# ############################################################################# +# BEM COMPUTATION + +_MAG_FACTOR = 1e-7 # μ_0 / (4Ï€) + +# def _bem_inf_pot(rd, Q, rp): +# """The infinite medium potential in one direction. See Eq. (8) in +# Mosher, 1999""" +# NOTE: the (μ_0 / (4Ï€) factor has been moved to _prep_field_communication +# diff = rp - rd # (Observation point position) - (Source position) +# diff2 = np.sum(diff * diff, axis=1) # Squared magnitude of diff +# # (Dipole moment) dot (diff) / (magnitude ^ 3) +# return np.sum(Q * diff, axis=1) / (diff2 * np.sqrt(diff2)) + + +@jit() +def _bem_inf_pots(mri_rr, bem_rr, mri_Q=None): + """Compute the infinite medium potential in all 3 directions. + + Parameters + ---------- + mri_rr : ndarray, shape (n_dipole_vertices, 3) + Chunk of 3D dipole positions in MRI coordinates + bem_rr: ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for one BEM surface + mri_Q : ndarray, shape (3, 3) + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + + Returns + ------- + ndarray : shape(n_dipole_vertices, 3, n_BEM_vertices) + """ + # NOTE: the (μ_0 / (4Ï€) factor has been moved to _prep_field_communication + # Get position difference vector between BEM vertex and dipole + diff = np.empty((len(mri_rr), 3, len(bem_rr))) + for ri in range(mri_rr.shape[0]): + rr = mri_rr[ri] + this_diff = bem_rr - rr + diff_norm = np.sum(this_diff * this_diff, axis=1) + diff_norm *= np.sqrt(diff_norm) + diff_norm[diff_norm == 0] = 1.0 + if mri_Q is not None: + this_diff = np.dot(this_diff, mri_Q.T) + this_diff /= diff_norm.reshape(-1, 1) + diff[ri] = this_diff.T + + return diff + + +# This function has been refactored to process all points simultaneously +# def _bem_inf_field(rd, Q, rp, d): +# """Infinite-medium magnetic field. See (7) in Mosher, 1999""" +# # Get vector from source to sensor integration point +# diff = rp - rd +# diff2 = np.sum(diff * diff, axis=1) # Get magnitude of diff +# +# # Compute cross product between diff and dipole to get magnetic field at +# # integration point +# x = fast_cross_3d(Q[np.newaxis, :], diff) +# +# # Take magnetic field dotted by integration point normal to get magnetic +# # field threading the current loop. Divide by R^3 (equivalently, R^2 * R) +# return np.sum(x * d, axis=1) / (diff2 * np.sqrt(diff2)) + + +@jit() +def _bem_inf_fields(rr, rmag, cosmag): + """Compute infinite-medium magnetic field at one MEG sensor. + + This operates on all dipoles in all 3 basis directions. + + Parameters + ---------- + rr : ndarray, shape (n_source_points, 3) + 3D dipole source positions + rmag : ndarray, shape (n_sensor points, 3) + 3D positions of 1 MEG coil's integration points (from coil['rmag']) + cosmag : ndarray, shape (n_sensor_points, 3) + Direction of 1 MEG coil's integration points (from coil['cosmag']) + + Returns + ------- + ndarray, shape (n_dipoles, 3, n_integration_pts) + Magnetic field from all dipoles at each MEG sensor integration point + """ + # rr, rmag refactored according to Equation (19) in Mosher, 1999 + # Knowing that we're doing all directions, refactor above function: + + # rr, 3, rmag + diff = rmag.T.reshape(1, 3, rmag.shape[0]) - rr.reshape(rr.shape[0], 3, 1) + diff_norm = np.sum(diff * diff, axis=1) # rr, rmag + diff_norm *= np.sqrt(diff_norm) # Get magnitude of distance cubed + diff_norm_ = diff_norm.reshape(-1) + diff_norm_[diff_norm_ == 0] = 1 # avoid nans + + # This is the result of cross-prod calcs with basis vectors, + # as if we had taken (Q=np.eye(3)), then multiplied by cosmags + # factor, and then summed across directions + x = np.empty((rr.shape[0], 3, rmag.shape[0])) + x[:, 0] = diff[:, 1] * cosmag[:, 2] - diff[:, 2] * cosmag[:, 1] + x[:, 1] = diff[:, 2] * cosmag[:, 0] - diff[:, 0] * cosmag[:, 2] + x[:, 2] = diff[:, 0] * cosmag[:, 1] - diff[:, 1] * cosmag[:, 0] + diff_norm = diff_norm_.reshape((rr.shape[0], 1, rmag.shape[0])) + x /= diff_norm + # x.shape == (rr.shape[0], 3, rmag.shape[0]) + return x + + +@fill_doc +def _bem_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, coil_type): + """Calculate the magnetic field or electric potential forward solution. + + The code is very similar between EEG and MEG potentials, so combine them. + This does the work of "fwd_comp_field" (which wraps to "fwd_bem_field") + and "fwd_bem_pot_els" in MNE-C. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions + mri_rr : ndarray, shape (n_dipoles, 3) + 3D source positions in MRI coordinates + mri_Q : + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + coils : list of dict, len(sensors) + List of sensors where each element contains sensor specific information + solution : ndarray, shape (n_sensors, n_BEM_rr) + Comes from _bem_specify_coils + bem_rr : ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for all surfaces in the BEM + %(n_jobs)s + coil_type : str + 'meg' or 'eeg' + + Returns + ------- + B : ndarray, shape (n_dipoles * 3, n_sensors) + Forward solution for a set of sensors + """ + # Both MEG and EEG have the inifinite-medium potentials + # This could be just vectorized, but eats too much memory, so instead we + # reduce memory by chunking within _do_inf_pots and parallelize, too: + parallel, p_fun, n_jobs = parallel_func(_do_inf_pots, n_jobs, max_jobs=len(rr)) + nas = np.array_split + B = np.sum( + parallel( + p_fun( + mri_rr, sr.copy(), np.ascontiguousarray(mri_Q), np.array(sol) + ) # copy and contig + for sr, sol in zip(nas(bem_rr, n_jobs), nas(solution.T, n_jobs)) + ), + axis=0, + ) + # The copy()s above should make it so the whole objects don't need to be + # pickled... + + # Only MEG coils are sensitive to the primary current distribution. + if coil_type == "meg": + # Primary current contribution (can be calc. in coil/dipole coords) + parallel, p_fun, n_jobs = parallel_func(_do_prim_curr, n_jobs) + pcc = np.concatenate(parallel(p_fun(r, coils) for r in nas(rr, n_jobs)), axis=0) + B += pcc + B *= _MAG_FACTOR + return B + + +def _do_prim_curr(rr, coils): + """Calculate primary currents in a set of MEG coils. + + See Mosher et al., 1999 Section II for discussion of primary vs. volume + currents. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in head coordinates + coils : list of dict + List of MEG coils where each element contains coil specific information + + Returns + ------- + pc : ndarray, shape (n_sources, n_MEG_sensors) + Primary current for set of MEG coils due to all sources + """ + rmags, cosmags, ws, bins = _triage_coils(coils) + n_coils = bins[-1] + 1 + del coils + pc = np.empty((len(rr) * 3, n_coils)) + for start, stop in _rr_bounds(rr, chunk=1): + pp = _bem_inf_fields(rr[start:stop], rmags, cosmags) + pp *= ws + pp.shape = (3 * (stop - start), -1) + pc[3 * start : 3 * stop] = [ + bincount(bins, this_pp, bins[-1] + 1) for this_pp in pp + ] + return pc + + +def _rr_bounds(rr, chunk=200): + # chunk data nicely + bounds = np.concatenate([np.arange(0, len(rr), chunk), [len(rr)]]) + return zip(bounds[:-1], bounds[1:]) + + +def _do_inf_pots(mri_rr, bem_rr, mri_Q, sol): + """Calculate infinite potentials for MEG or EEG sensors using chunks. + + Parameters + ---------- + mri_rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in MRI coordinates + bem_rr : ndarray, shape (n_BEM_vertices, 3) + 3D vertex positions for all surfaces in the BEM + mri_Q : + 3x3 head -> MRI transform. I.e., head_mri_t.dot(np.eye(3)) + sol : ndarray, shape (n_sensors_subset, n_BEM_vertices_subset) + Comes from _bem_specify_coils + + Returns + ------- + B : ndarray, (n_dipoles * 3, n_sensors) + Forward solution for sensors due to volume currents + """ + # Doing work of 'fwd_bem_pot_calc' in MNE-C + # The following code is equivalent to this, but saves memory + # v0s = _bem_inf_pots(rr, bem_rr, Q) # n_rr x 3 x n_bem_rr + # v0s.shape = (len(rr) * 3, v0s.shape[2]) + # B = np.dot(v0s, sol) + + # We chunk the source mri_rr's in order to save memory + B = np.empty((len(mri_rr) * 3, sol.shape[1])) + for start, stop in _rr_bounds(mri_rr): + # v0 in Hämäläinen et al., 1989 == v_inf in Mosher, et al., 1999 + v0s = _bem_inf_pots(mri_rr[start:stop], bem_rr, mri_Q) + v0s = v0s.reshape(-1, v0s.shape[2]) + B[3 * start : 3 * stop] = np.dot(v0s, sol) + return B + + +# ############################################################################# +# SPHERE COMPUTATION + + +def _sphere_pot_or_field(rr, mri_rr, mri_Q, coils, solution, bem_rr, n_jobs, coil_type): + """Do potential or field for spherical model.""" + fun = _eeg_spherepot_coil if coil_type == "eeg" else _sphere_field + parallel, p_fun, n_jobs = parallel_func(fun, n_jobs, max_jobs=len(rr)) + B = np.concatenate( + parallel(p_fun(r, coils, sphere=solution) for r in np.array_split(rr, n_jobs)) + ) + return B + + +def _sphere_field(rrs, coils, sphere): + """Compute field for spherical model using Jukka Sarvas' field computation. + + Jukka Sarvas, "Basic mathematical and electromagnetic concepts of the + biomagnetic inverse problem", Phys. Med. Biol. 1987, Vol. 32, 1, 11-22. + + The formulas have been manipulated for efficient computation + by Matti Hämäläinen, February 1990 + """ + rmags, cosmags, ws, bins = _triage_coils(coils) + return _do_sphere_field(rrs, rmags, cosmags, ws, bins, sphere["r0"]) + + +@jit() +def _do_sphere_field(rrs, rmags, cosmags, ws, bins, r0): + n_coils = bins[-1] + 1 + # Shift to the sphere model coordinates + rrs = rrs - r0 + B = np.zeros((3 * len(rrs), n_coils)) + for ri in range(len(rrs)): + rr = rrs[ri] + # Check for a dipole at the origin + if np.sqrt(np.dot(rr, rr)) <= 1e-10: + continue + this_poss = rmags - r0 + + # Vector from dipole to the field point + a_vec = this_poss - rr + a = np.sqrt(np.sum(a_vec * a_vec, axis=1)) + r = np.sqrt(np.sum(this_poss * this_poss, axis=1)) + rr0 = np.sum(this_poss * rr, axis=1) + ar = (r * r) - rr0 + ar0 = ar / a + F = a * (r * a + ar) + gr = (a * a) / r + ar0 + 2.0 * (a + r) + g0 = a + 2 * r + ar0 + # Compute the dot products needed + re = np.sum(this_poss * cosmags, axis=1) + r0e = np.sum(rr * cosmags, axis=1) + g = (g0 * r0e - gr * re) / (F * F) + good = (a > 0) | (r > 0) | ((a * r) + 1 > 1e-5) + rr_ = rr.reshape(1, 3) + v1 = np.empty((cosmags.shape[0], 3)) + _jit_cross(v1, rr_, cosmags) + v2 = np.empty((cosmags.shape[0], 3)) + _jit_cross(v2, rr_, this_poss) + xx = (good * ws).reshape(-1, 1) * ( + v1 / F.reshape(-1, 1) + v2 * g.reshape(-1, 1) + ) + for jj in range(3): + zz = bincount(bins, xx[:, jj], n_coils) + B[3 * ri + jj, :] = zz + B *= _MAG_FACTOR + return B + + +def _eeg_spherepot_coil(rrs, coils, sphere): + """Calculate the EEG in the sphere model.""" + rmags, cosmags, ws, bins = _triage_coils(coils) + n_coils = bins[-1] + 1 + del coils + + # Shift to the sphere model coordinates + rrs = rrs - sphere["r0"] + + B = np.zeros((3 * len(rrs), n_coils)) + for ri, rr in enumerate(rrs): + # Only process dipoles inside the innermost sphere + if np.sqrt(np.dot(rr, rr)) >= sphere["layers"][0]["rad"]: + continue + # fwd_eeg_spherepot_vec + vval_one = np.zeros((len(rmags), 3)) + + # Make a weighted sum over the equivalence parameters + for eq in range(sphere["nfit"]): + # Scale the dipole position + rd = sphere["mu"][eq] * rr + rd2 = np.sum(rd * rd) + rd2_inv = 1.0 / rd2 + # Go over all electrodes + this_pos = rmags - sphere["r0"] + + # Scale location onto the surface of the sphere (not used) + # if sphere['scale_pos']: + # pos_len = (sphere['layers'][-1]['rad'] / + # np.sqrt(np.sum(this_pos * this_pos, axis=1))) + # this_pos *= pos_len + + # Vector from dipole to the field point + a_vec = this_pos - rd + + # Compute the dot products needed + a = np.sqrt(np.sum(a_vec * a_vec, axis=1)) + a3 = 2.0 / (a * a * a) + r2 = np.sum(this_pos * this_pos, axis=1) + r = np.sqrt(r2) + rrd = np.sum(this_pos * rd, axis=1) + ra = r2 - rrd + rda = rrd - rd2 + + # The main ingredients + F = a * (r * a + ra) + c1 = a3 * rda + 1.0 / a - 1.0 / r + c2 = a3 + (a + r) / (r * F) + + # Mix them together and scale by lambda/(rd*rd) + m1 = c1 - c2 * rrd + m2 = c2 * rd2 + + vval_one += ( + sphere["lambda"][eq] + * rd2_inv + * (m1[:, np.newaxis] * rd + m2[:, np.newaxis] * this_pos) + ) + + # compute total result + xx = vval_one * ws[:, np.newaxis] + zz = np.array([bincount(bins, x, bins[-1] + 1) for x in xx.T]) + B[3 * ri : 3 * ri + 3, :] = zz + # finishing by scaling by 1/(4*M_PI) + B *= 0.25 / np.pi + return B + + +def _triage_coils(coils): + return coils if isinstance(coils, tuple) else _concatenate_coils(coils) + + +# ############################################################################# +# MAGNETIC DIPOLE (e.g. CHPI) + +_MIN_DIST_LIMIT = 1e-5 + + +def _magnetic_dipole_field_vec(rrs, coils, too_close="raise"): + rmags, cosmags, ws, bins = _triage_coils(coils) + fwd, min_dist = _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close) + if min_dist < _MIN_DIST_LIMIT: + msg = f"Coil too close (dist = {min_dist * 1000:g} mm)" + if too_close == "raise": + raise RuntimeError(msg) + func = warn if too_close == "warning" else logger.info + func(msg) + return fwd + + +@jit() +def _compute_mdfv(rrs, rmags, cosmags, ws, bins, too_close): + """Compute an MEG forward solution for a set of magnetic dipoles.""" + # The code below is a more efficient version (~30x) of this: + # for ri, rr in enumerate(rrs): + # for k in range(len(coils)): + # this_coil = coils[k] + # # Go through all points + # diff = this_coil['rmag'] - rr + # dist2 = np.sum(diff * diff, axis=1)[:, np.newaxis] + # dist = np.sqrt(dist2) + # if (dist < 1e-5).any(): + # raise RuntimeError('Coil too close') + # dist5 = dist2 * dist2 * dist + # sum_ = (3 * diff * np.sum(diff * this_coil['cosmag'], + # axis=1)[:, np.newaxis] - + # dist2 * this_coil['cosmag']) / dist5 + # fwd[3*ri:3*ri+3, k] = 1e-7 * np.dot(this_coil['w'], sum_) + fwd = np.zeros((3 * len(rrs), bins[-1] + 1)) + min_dist = np.inf + ws2 = ws.reshape(-1, 1) + for ri in range(len(rrs)): + rr = rrs[ri] + diff = rmags - rr + dist2_ = np.sum(diff * diff, axis=1) + dist2 = dist2_.reshape(-1, 1) + dist = np.sqrt(dist2) + min_dist = min(dist.min(), min_dist) + if min_dist < _MIN_DIST_LIMIT and too_close == "raise": + break + t_ = np.sum(diff * cosmags, axis=1) + t = t_.reshape(-1, 1) + sum_ = ws2 * (3 * diff * t - dist2 * cosmags) / (dist2 * dist2 * dist) + for ii in range(3): + fwd[3 * ri + ii] = bincount(bins, sum_[:, ii], bins[-1] + 1) + fwd *= _MAG_FACTOR + return fwd, min_dist + + +# ############################################################################# +# MAIN TRIAGING FUNCTION + + +@verbose +def _prep_field_computation(rr, *, sensors, bem, n_jobs, verbose=None): + """Precompute and store some things that are used for both MEG and EEG. + + Calculation includes multiplication factors, coordinate transforms, + compensations, and forward solutions. All are stored in modified fwd_data. + + Parameters + ---------- + rr : ndarray, shape (n_dipoles, 3) + 3D dipole source positions in head coordinates + bem : instance of ConductorModel + Boundary Element Model information + fwd_data : dict + Dict containing sensor information in the head coordinate frame. + Gets updated here with BEM and sensor information for later forward + calculations. + %(n_jobs)s + %(verbose)s + """ + bem_rr = mults = mri_Q = head_mri_t = None + if not bem["is_sphere"]: + if bem["bem_method"] != FIFF.FIFFV_BEM_APPROX_LINEAR: + raise RuntimeError("only linear collocation supported") + # Store (and apply soon) μ_0/(4Ï€) factor before source computations + mults = np.repeat( + bem["source_mult"] / (4.0 * np.pi), [len(s["rr"]) for s in bem["surfs"]] + )[np.newaxis, :] + # Get positions of BEM points for every surface + bem_rr = np.concatenate([s["rr"] for s in bem["surfs"]]) + + # The dipole location and orientation must be transformed + head_mri_t = bem["head_mri_t"] + mri_Q = bem["head_mri_t"]["trans"][:3, :3].T + + solutions = dict() + for coil_type in sensors: + coils = sensors[coil_type]["defs"] + if not bem["is_sphere"]: + if coil_type == "meg": + # MEG field computation matrices for BEM + start = "Composing the field computation matrix" + logger.info("\n" + start + "...") + cf = FIFF.FIFFV_COORD_HEAD + # multiply solution by "mults" here for simplicity + solution = _bem_specify_coils(bem, coils, cf, mults, n_jobs) + else: + # Compute solution for EEG sensor + logger.info("Setting up for EEG...") + solution = _bem_specify_els(bem, coils, mults) + else: + solution = bem + if coil_type == "eeg": + logger.info( + "Using the equivalent source approach in the " + "homogeneous sphere for EEG" + ) + sensors[coil_type]["defs"] = _triage_coils(coils) + solutions[coil_type] = solution + + # Get appropriate forward physics function depending on sphere or BEM model + fun = _sphere_pot_or_field if bem["is_sphere"] else _bem_pot_or_field + + # Update fwd_data with + # bem_rr (3D BEM vertex positions) + # mri_Q (3x3 Head->MRI coord transformation applied to identity matrix) + # head_mri_t (head->MRI coord transform dict) + # fun (_bem_pot_or_field if not 'sphere'; otherwise _sph_pot_or_field) + # solutions (len 2 list; [ndarray, shape (n_MEG_sens, n BEM vertices), + # ndarray, shape (n_EEG_sens, n BEM vertices)] + fwd_data = dict( + bem_rr=bem_rr, mri_Q=mri_Q, head_mri_t=head_mri_t, fun=fun, solutions=solutions + ) + return fwd_data + + +@fill_doc +def _compute_forwards_meeg(rr, *, sensors, fwd_data, n_jobs, silent=False): + """Compute MEG and EEG forward solutions for all sensor types.""" + Bs = dict() + # The dipole location and orientation must be transformed to mri coords + mri_rr = None + if fwd_data["head_mri_t"] is not None: + mri_rr = np.ascontiguousarray(apply_trans(fwd_data["head_mri_t"]["trans"], rr)) + mri_Q, bem_rr, fun = fwd_data["mri_Q"], fwd_data["bem_rr"], fwd_data["fun"] + solutions = fwd_data["solutions"] + del fwd_data + for coil_type, sens in sensors.items(): + coils = sens["defs"] + compensator = sens.get("compensator", None) + post_picks = sens.get("post_picks", None) + solution = solutions.get(coil_type, None) + + # Do the actual forward calculation for a list MEG/EEG sensors + if not silent: + logger.info( + f"Computing {coil_type.upper()} at {len(rr)} source location{_pl(rr)} " + "(free orientations)..." + ) + # Calculate forward solution using spherical or BEM model + B = fun( + rr, + mri_rr, + mri_Q, + coils=coils, + solution=solution, + bem_rr=bem_rr, + n_jobs=n_jobs, + coil_type=coil_type, + ) + + # Compensate if needed (only done for MEG systems w/compensation) + if compensator is not None: + B = B @ compensator.T + if post_picks is not None: + B = B[:, post_picks] + Bs[coil_type] = B + return Bs + + +@verbose +def _compute_forwards(rr, *, bem, sensors, n_jobs, verbose=None): + """Compute the MEG and EEG forward solutions.""" + # Split calculation into two steps to save (potentially) a lot of time + # when e.g. dipole fitting + solver = bem.get("solver", "mne") + _check_option("solver", solver, ("mne", "openmeeg")) + if bem["is_sphere"] or solver == "mne": + fwd_data = _prep_field_computation(rr, sensors=sensors, bem=bem, n_jobs=n_jobs) + Bs = _compute_forwards_meeg( + rr, sensors=sensors, fwd_data=fwd_data, n_jobs=n_jobs + ) + else: + Bs = _compute_forwards_openmeeg(rr, bem=bem, sensors=sensors) + n_sensors_want = sum(len(s["ch_names"]) for s in sensors.values()) + n_sensors = sum(B.shape[1] for B in Bs.values()) + n_sources = list(Bs.values())[0].shape[0] + assert (n_sources, n_sensors) == (len(rr) * 3, n_sensors_want) + return Bs + + +def _compute_forwards_openmeeg(rr, *, bem, sensors): + """Compute the MEG and EEG forward solutions for OpenMEEG.""" + if len(bem["surfs"]) != 3: + raise RuntimeError("Only 3-layer BEM is supported for OpenMEEG.") + om = _import_openmeeg("compute a forward solution using OpenMEEG") + hminv = om.SymMatrix(bem["solution"]) + geom = _make_openmeeg_geometry(bem, invert_transform(bem["head_mri_t"])) + + # Make dipoles for all XYZ orientations + dipoles = np.c_[ + np.kron(rr.T, np.ones(3)[None, :]).T, + np.kron(np.ones(len(rr))[:, None], np.eye(3)), + ] + dipoles = np.asfortranarray(dipoles) + dipoles = om.Matrix(dipoles) + dsm = om.DipSourceMat(geom, dipoles, "Brain") + Bs = dict() + if "eeg" in sensors: + rmags, _, ws, bins = _concatenate_coils(sensors["eeg"]["defs"]) + rmags = np.asfortranarray(rmags.astype(np.float64)) + eeg_sensors = om.Sensors(om.Matrix(np.asfortranarray(rmags)), geom) + h2em = om.Head2EEGMat(geom, eeg_sensors) + eeg_fwd_full = om.GainEEG(hminv, dsm, h2em).array() + Bs["eeg"] = np.array( + [bincount(bins, ws * x, bins[-1] + 1) for x in eeg_fwd_full.T], float + ) + if "meg" in sensors: + rmags, cosmags, ws, bins = _concatenate_coils(sensors["meg"]["defs"]) + rmags = np.asfortranarray(rmags.astype(np.float64)) + cosmags = np.asfortranarray(cosmags.astype(np.float64)) + labels = [str(ii) for ii in range(len(rmags))] + weights = radii = np.ones(len(labels)) + meg_sensors = om.Sensors(labels, rmags, cosmags, weights, radii) + h2mm = om.Head2MEGMat(geom, meg_sensors) + ds2mm = om.DipSource2MEGMat(dipoles, meg_sensors) + meg_fwd_full = om.GainMEG(hminv, dsm, h2mm, ds2mm).array() + B = np.array( + [bincount(bins, ws * x, bins[-1] + 1) for x in meg_fwd_full.T], float + ) + compensator = sensors["meg"].get("compensator", None) + post_picks = sensors["meg"].get("post_picks", None) + if compensator is not None: + B = B @ compensator.T + if post_picks is not None: + B = B[:, post_picks] + Bs["meg"] = B + return Bs diff --git a/mne/forward/_field_interpolation.py b/mne/forward/_field_interpolation.py new file mode 100644 index 0000000..b505b5e --- /dev/null +++ b/mne/forward/_field_interpolation.py @@ -0,0 +1,550 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import inspect +from copy import deepcopy + +import numpy as np +from scipy.interpolate import interp1d + +from .._fiff.constants import FIFF +from .._fiff.meas_info import _simplify_info +from .._fiff.pick import pick_info, pick_types +from .._fiff.proj import _has_eeg_average_ref_proj, make_projector +from ..bem import _check_origin +from ..cov import make_ad_hoc_cov +from ..epochs import BaseEpochs, EpochsArray +from ..evoked import Evoked, EvokedArray +from ..fixes import _safe_svd +from ..surface import get_head_surf, get_meg_helmet_surf +from ..transforms import _find_trans, _get_trans, transform_surface_to +from ..utils import _check_fname, _check_option, _pl, _reg_pinv, logger, verbose +from ._lead_dots import ( + _do_cross_dots, + _do_self_dots, + _do_surface_dots, + _get_legen_table, +) +from ._make_forward import _create_eeg_els, _create_meg_coils, _read_coil_defs + + +def _setup_dots(mode, info, coils, ch_type): + """Set up dot products.""" + int_rad = 0.06 + noise = make_ad_hoc_cov(info, dict(mag=20e-15, grad=5e-13, eeg=1e-6)) + n_coeff, interp = (50, "nearest") if mode == "fast" else (100, "linear") + lut, n_fact = _get_legen_table(ch_type, False, n_coeff, verbose=False) + lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, axis=0) + return int_rad, noise, lut_fun, n_fact + + +def _compute_mapping_matrix(fmd, info): + """Do the hairy computations.""" + logger.info(" Preparing the mapping matrix...") + # assemble a projector and apply it to the data + ch_names = fmd["ch_names"] + projs = info.get("projs", list()) + proj_op = make_projector(projs, ch_names)[0] + proj_dots = np.dot(proj_op.T, np.dot(fmd["self_dots"], proj_op)) + + noise_cov = fmd["noise"] + # Whiten + if not noise_cov["diag"]: + raise NotImplementedError # this shouldn't happen + whitener = np.diag(1.0 / np.sqrt(noise_cov["data"].ravel())) + whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener)) + + # SVD is numerically better than the eigenvalue composition even if + # mat is supposed to be symmetric and positive definite + if fmd.get("pinv_method", "tsvd") == "tsvd": + inv, fmd["nest"] = _pinv_trunc(whitened_dots, fmd["miss"]) + else: + assert fmd["pinv_method"] == "tikhonov", fmd["pinv_method"] + inv, fmd["nest"] = _pinv_tikhonov(whitened_dots, fmd["miss"]) + + # Sandwich with the whitener + inv_whitened = np.dot(whitener.T, np.dot(inv, whitener)) + + # Take into account that the lead fields used to compute + # d->surface_dots were unprojected + inv_whitened_proj = proj_op.T @ inv_whitened + + # Finally sandwich in the selection matrix + # This one picks up the correct lead field projection + mapping_mat = np.dot(fmd["surface_dots"], inv_whitened_proj) + + # Optionally apply the average electrode reference to the final field map + if fmd["kind"] == "eeg" and _has_eeg_average_ref_proj(info): + logger.info( + " The map has an average electrode reference " + f"({mapping_mat.shape[0]} channels)" + ) + mapping_mat -= np.mean(mapping_mat, axis=0) + return mapping_mat + + +def _pinv_trunc(x, miss): + """Compute pseudoinverse, truncating at most "miss" fraction of varexp.""" + u, s, v = _safe_svd(x, full_matrices=False) + + # Eigenvalue truncation + varexp = np.cumsum(s) + varexp /= varexp[-1] + n = np.where(varexp >= (1.0 - miss))[0][0] + 1 + logger.info( + " Truncating at %d/%d components to omit less than %g " "(%0.2g)", + n, + len(s), + miss, + 1.0 - varexp[n - 1], + ) + s = 1.0 / s[:n] + inv = ((u[:, :n] * s) @ v[:n]).T + return inv, n + + +def _pinv_tikhonov(x, reg): + # _reg_pinv requires square Hermitian, which we have here + inv, _, n = _reg_pinv(x, reg=reg, rank=None) + logger.info( + f" Truncating at {n}/{len(x)} components and regularizing " + f"with α={reg:0.1e}" + ) + return inv, n + + +def _map_meg_or_eeg_channels(info_from, info_to, mode, origin, miss=None): + """Find mapping from one set of channels to another. + + Parameters + ---------- + info_from : instance of Info + The measurement data to interpolate from. + info_to : instance of Info + The measurement info to interpolate to. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'``, which means a head-digitization-based origin + fit. Default is ``(0., 0., 0.04)``. + + Returns + ------- + mapping : array, shape (n_to, n_from) + A mapping matrix. + """ + # no need to apply trans because both from and to coils are in device + # coordinates + info_kinds = set(ch["kind"] for ch in info_to["chs"]) + info_kinds |= set(ch["kind"] for ch in info_from["chs"]) + if FIFF.FIFFV_REF_MEG_CH in info_kinds: # refs same as MEG + info_kinds |= set([FIFF.FIFFV_MEG_CH]) + info_kinds -= set([FIFF.FIFFV_REF_MEG_CH]) + info_kinds = sorted(info_kinds) + # This should be guaranteed by the callers + assert len(info_kinds) == 1 and info_kinds[0] in ( + FIFF.FIFFV_MEG_CH, + FIFF.FIFFV_EEG_CH, + ) + kind = "eeg" if info_kinds[0] == FIFF.FIFFV_EEG_CH else "meg" + + # + # Step 1. Prepare the coil definitions + # + if kind == "meg": + templates = _read_coil_defs(verbose=False) + coils_from = _create_meg_coils( + info_from["chs"], "normal", info_from["dev_head_t"], templates + ) + coils_to = _create_meg_coils( + info_to["chs"], "normal", info_to["dev_head_t"], templates + ) + pinv_method = "tsvd" + miss = 1e-4 + else: + coils_from = _create_eeg_els(info_from["chs"]) + coils_to = _create_eeg_els(info_to["chs"]) + pinv_method = "tikhonov" + miss = 1e-1 + if _has_eeg_average_ref_proj(info_from) and not _has_eeg_average_ref_proj( + info_to + ): + raise RuntimeError( + "info_to must have an average EEG reference projector if " + "info_from has one" + ) + origin = _check_origin(origin, info_from) + # + # Step 2. Calculate the dot products + # + int_rad, noise, lut_fun, n_fact = _setup_dots(mode, info_from, coils_from, kind) + logger.info( + f" Computing dot products for {len(coils_from)} " + f"{kind.upper()} channel{_pl(coils_from)}..." + ) + self_dots = _do_self_dots( + int_rad, False, coils_from, origin, kind, lut_fun, n_fact, n_jobs=None + ) + logger.info( + f" Computing cross products for {len(coils_from)} → " + f"{len(coils_to)} {kind.upper()} channel{_pl(coils_to)}..." + ) + cross_dots = _do_cross_dots( + int_rad, False, coils_from, coils_to, origin, kind, lut_fun, n_fact + ).T + + ch_names = [c["ch_name"] for c in info_from["chs"]] + fmd = dict( + kind=kind, + ch_names=ch_names, + origin=origin, + noise=noise, + self_dots=self_dots, + surface_dots=cross_dots, + int_rad=int_rad, + miss=miss, + pinv_method=pinv_method, + ) + + # + # Step 3. Compute the mapping matrix + # + mapping = _compute_mapping_matrix(fmd, info_from) + return mapping + + +def _as_meg_type_inst(inst, ch_type="grad", mode="fast"): + """Compute virtual evoked using interpolated fields in mag/grad channels. + + Parameters + ---------- + inst : instance of mne.Evoked or mne.Epochs + The evoked or epochs object. + ch_type : str + The destination channel type. It can be 'mag' or 'grad'. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + + Returns + ------- + inst : instance of mne.EvokedArray or mne.EpochsArray + The transformed evoked object containing only virtual channels. + """ + _check_option("ch_type", ch_type, ["mag", "grad"]) + + # pick the original and destination channels + pick_from = pick_types(inst.info, meg=True, eeg=False, ref_meg=False) + pick_to = pick_types(inst.info, meg=ch_type, eeg=False, ref_meg=False) + + if len(pick_to) == 0: + raise ValueError( + "No channels matching the destination channel type" + " found in info. Please pass an evoked containing" + "both the original and destination channels. Only the" + " locations of the destination channels will be used" + " for interpolation." + ) + + info_from = pick_info(inst.info, pick_from) + info_to = pick_info(inst.info, pick_to) + # XXX someday we should probably expose the origin + mapping = _map_meg_or_eeg_channels( + info_from, info_to, origin=(0.0, 0.0, 0.04), mode=mode + ) + + # compute data by multiplying by the 'gain matrix' from + # original sensors to virtual sensors + if hasattr(inst, "get_data"): + kwargs = dict() + if "copy" in inspect.getfullargspec(inst.get_data).kwonlyargs: + kwargs["copy"] = False + data = inst.get_data(**kwargs) + else: + data = inst.data + + ndim = data.ndim + if ndim == 2: + data = data[np.newaxis, :, :] + + data_ = np.empty((data.shape[0], len(mapping), data.shape[2]), dtype=data.dtype) + for d, d_ in zip(data, data_): + d_[:] = np.dot(mapping, d[pick_from]) + + # keep only the destination channel types + info = pick_info(inst.info, sel=pick_to, copy=True) + + # change channel names to emphasize they contain interpolated data + for ch in info["chs"]: + ch["ch_name"] += "_v" + info._update_redundant() + info._check_consistency() + if isinstance(inst, Evoked): + assert ndim == 2 + data_ = data_[0] # undo new axis + inst_ = EvokedArray( + data_, info, tmin=inst.times[0], comment=inst.comment, nave=inst.nave + ) + else: + assert isinstance(inst, BaseEpochs) + inst_ = EpochsArray( + data_, + info, + tmin=inst.tmin, + events=inst.events, + event_id=inst.event_id, + metadata=inst.metadata, + ) + + return inst_ + + +@verbose +def _make_surface_mapping( + info, + surf, + ch_type="meg", + trans=None, + mode="fast", + n_jobs=None, + origin=(0.0, 0.0, 0.04), + verbose=None, +): + """Re-map M/EEG data to a surface. + + Parameters + ---------- + %(info_not_none)s + surf : dict + The surface to map the data to. The required fields are `'rr'`, + `'nn'`, and `'coord_frame'`. Must be in head coordinates. + ch_type : str + Must be either `'meg'` or `'eeg'`, determines the type of field. + trans : None | dict + If None, no transformation applied. Should be a Head<->MRI + transformation. + mode : str + Either `'accurate'` or `'fast'`, determines the quality of the + Legendre polynomial expansion used. `'fast'` should be sufficient + for most applications. + %(n_jobs)s + origin : array-like, shape (3,) | str + Origin of the sphere in the head coordinate frame and in meters. + The default is ``'auto'``, which means a head-digitization-based + origin fit. + %(verbose)s + + Returns + ------- + mapping : array + A n_vertices x n_sensors array that remaps the MEG or EEG data, + as `new_data = np.dot(mapping, data)`. + """ + if not all(key in surf for key in ["rr", "nn"]): + raise KeyError('surf must have both "rr" and "nn"') + if "coord_frame" not in surf: + raise KeyError( + 'The surface coordinate frame must be specified in surf["coord_frame"]' + ) + _check_option("mode", mode, ["accurate", "fast"]) + + # deal with coordinate frames here -- always go to "head" (easiest) + orig_surf = surf + surf = transform_surface_to(deepcopy(surf), "head", trans) + origin = _check_origin(origin, info) + + # + # Step 1. Prepare the coil definitions + # Do the dot products, assume surf in head coords + # + _check_option("ch_type", ch_type, ["meg", "eeg"]) + if ch_type == "meg": + picks = pick_types(info, meg=True, eeg=False, ref_meg=False) + logger.info("Prepare MEG mapping...") + else: + picks = pick_types(info, meg=False, eeg=True, ref_meg=False) + logger.info("Prepare EEG mapping...") + if len(picks) == 0: + raise RuntimeError("cannot map, no channels found") + # XXX this code does not do any checking for compensation channels, + # but it seems like this must be intentional from the ref_meg=False + # (presumably from the C code) + dev_head_t = info["dev_head_t"] + info = pick_info(_simplify_info(info), picks) + info["dev_head_t"] = dev_head_t + + # create coil defs in head coordinates + if ch_type == "meg": + # Put them in head coordinates + coils = _create_meg_coils(info["chs"], "normal", info["dev_head_t"]) + type_str = "coils" + miss = 1e-4 # Smoothing criterion for MEG + else: # EEG + coils = _create_eeg_els(info["chs"]) + type_str = "electrodes" + miss = 1e-3 # Smoothing criterion for EEG + + # + # Step 2. Calculate the dot products + # + int_rad, noise, lut_fun, n_fact = _setup_dots(mode, info, coils, ch_type) + logger.info("Computing dot products for %i %s...", len(coils), type_str) + self_dots = _do_self_dots( + int_rad, False, coils, origin, ch_type, lut_fun, n_fact, n_jobs + ) + sel = np.arange(len(surf["rr"])) # eventually we should do sub-selection + logger.info("Computing dot products for %i surface locations...", len(sel)) + surface_dots = _do_surface_dots( + int_rad, False, coils, surf, sel, origin, ch_type, lut_fun, n_fact, n_jobs + ) + + # + # Step 4. Return the result + # + fmd = dict( + kind=ch_type, + surf=surf, + ch_names=info["ch_names"], + coils=coils, + origin=origin, + noise=noise, + self_dots=self_dots, + surface_dots=surface_dots, + int_rad=int_rad, + miss=miss, + ) + logger.info("Field mapping data ready") + + fmd["data"] = _compute_mapping_matrix(fmd, info) + # bring the original back, whatever coord frame it was in + fmd["surf"] = orig_surf + + # Remove some unnecessary fields + del fmd["self_dots"] + del fmd["surface_dots"] + del fmd["int_rad"] + del fmd["miss"] + return fmd + + +@verbose +def make_field_map( + evoked, + trans="auto", + subject=None, + subjects_dir=None, + ch_type=None, + mode="fast", + meg_surf="helmet", + origin=(0.0, 0.0, 0.04), + n_jobs=None, + *, + head_source=("bem", "head"), + verbose=None, +): + """Compute surface maps used for field display in 3D. + + Parameters + ---------- + evoked : Evoked | Epochs | Raw + The measurement file. Need to have info attribute. + %(trans)s ``"auto"`` (default) will load trans from the FreeSurfer + directory specified by ``subject`` and ``subjects_dir`` parameters. + + .. versionchanged:: 0.19 + Support for ``'fsaverage'`` argument. + subject : str | None + The subject name corresponding to FreeSurfer environment + variable SUBJECT. If None, map for EEG data will not be available. + subjects_dir : path-like + The path to the freesurfer subjects reconstructions. + It corresponds to Freesurfer environment variable SUBJECTS_DIR. + ch_type : None | ``'eeg'`` | ``'meg'`` + If None, a map for each available channel type will be returned. + Else only the specified type will be used. + mode : ``'accurate'`` | ``'fast'`` + Either ``'accurate'`` or ``'fast'``, determines the quality of the + Legendre polynomial expansion used. ``'fast'`` should be sufficient + for most applications. + meg_surf : 'helmet' | 'head' + Should be ``'helmet'`` or ``'head'`` to specify in which surface + to compute the MEG field map. The default value is ``'helmet'``. + origin : array-like, shape (3,) | 'auto' + Origin of the sphere in the head coordinate frame and in meters. + Can be ``'auto'``, which means a head-digitization-based origin + fit. Default is ``(0., 0., 0.04)``. + + .. versionadded:: 0.11 + %(n_jobs)s + %(head_source)s + + .. versionadded:: 1.1 + %(verbose)s + + Returns + ------- + surf_maps : list + The surface maps to be used for field plots. The list contains + separate ones for MEG and EEG (if both MEG and EEG are present). + """ + info = evoked.info + + if ch_type is None: + types = [t for t in ["eeg", "meg"] if t in evoked] + else: + _check_option("ch_type", ch_type, ["eeg", "meg"]) + types = [ch_type] + + if subjects_dir is not None: + subjects_dir = _check_fname( + subjects_dir, + overwrite="read", + must_exist=True, + name="subjects_dir", + need_dir=True, + ) + if isinstance(trans, str) and trans == "auto": + # let's try to do this in MRI coordinates so they're easy to plot + trans = _find_trans(subject, subjects_dir) + trans, trans_type = _get_trans(trans, fro="head", to="mri") + + if "eeg" in types and trans_type == "identity": + logger.info("No trans file available. EEG data ignored.") + types.remove("eeg") + + if len(types) == 0: + raise RuntimeError("No data available for mapping.") + + _check_option("meg_surf", meg_surf, ["helmet", "head"]) + + surfs = [] + for this_type in types: + if this_type == "meg" and meg_surf == "helmet": + surf = get_meg_helmet_surf(info, trans) + else: + surf = get_head_surf(subject, source=head_source, subjects_dir=subjects_dir) + surfs.append(surf) + + surf_maps = list() + + for this_type, this_surf in zip(types, surfs): + this_map = _make_surface_mapping( + evoked.info, + this_surf, + this_type, + trans, + n_jobs=n_jobs, + origin=origin, + mode=mode, + ) + surf_maps.append(this_map) + + return surf_maps diff --git a/mne/forward/_lead_dots.py b/mne/forward/_lead_dots.py new file mode 100644 index 0000000..5db3962 --- /dev/null +++ b/mne/forward/_lead_dots.py @@ -0,0 +1,610 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import os +import os.path as op + +import numpy as np +from numpy.polynomial import legendre + +from ..parallel import parallel_func +from ..utils import _get_extra_data_path, fill_doc, logger, verbose + +############################################################################## +# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE + + +def _next_legen_der(n, x, p0, p01, p0d, p0dd): + """Compute the next Legendre polynomial and its derivatives.""" + # only good for n > 1 ! + old_p0 = p0 + old_p0d = p0d + p0 = ((2 * n - 1) * x * old_p0 - (n - 1) * p01) / n + p0d = n * old_p0 + x * old_p0d + p0dd = (n + 1) * old_p0d + x * p0dd + return p0, p0d, p0dd + + +def _get_legen(x, n_coeff=100): + """Get Legendre polynomials expanded about x.""" + return legendre.legvander(x, n_coeff - 1) + + +def _get_legen_der(xx, n_coeff=100): + """Get Legendre polynomial derivatives expanded about x.""" + coeffs = np.empty((len(xx), n_coeff, 3)) + for c, x in zip(coeffs, xx): + p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2] + p0s[:2] = [1.0, x] + p0ds[:2] = [0.0, 1.0] + p0dds[:2] = [0.0, 0.0] + for n in range(2, n_coeff): + p0s[n], p0ds[n], p0dds[n] = _next_legen_der( + n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1] + ) + return coeffs + + +@verbose +def _get_legen_table( + ch_type, + volume_integral=False, + n_coeff=100, + n_interp=20000, + force_calc=False, + verbose=None, +): + """Return a (generated) LUT of Legendre (derivative) polynomial coeffs.""" + if n_interp % 2 != 0: + raise RuntimeError("n_interp must be even") + fname = op.join(_get_extra_data_path(), "tables") + if not op.isdir(fname): + # Updated due to API change (GH 1167) + os.makedirs(fname) + if ch_type == "meg": + fname = op.join(fname, f"legder_{n_coeff}_{n_interp}.bin") + leg_fun = _get_legen_der + extra_str = " derivative" + lut_shape = (n_interp + 1, n_coeff, 3) + else: # 'eeg' + fname = op.join(fname, f"legval_{n_coeff}_{n_interp}.bin") + leg_fun = _get_legen + extra_str = "" + lut_shape = (n_interp + 1, n_coeff) + if not op.isfile(fname) or force_calc: + logger.info(f"Generating Legendre{extra_str} table...") + x_interp = np.linspace(-1, 1, n_interp + 1) + lut = leg_fun(x_interp, n_coeff).astype(np.float32) + if not force_calc: + with open(fname, "wb") as fid: + fid.write(lut.tobytes()) + else: + logger.info(f"Reading Legendre{extra_str} table...") + with open(fname, "rb", buffering=0) as fid: + lut = np.fromfile(fid, np.float32) + lut.shape = lut_shape + + # we need this for the integration step + n_fact = np.arange(1, n_coeff, dtype=float) + if ch_type == "meg": + n_facts = list() # multn, then mult, then multn * (n + 1) + if volume_integral: + n_facts.append(n_fact / ((2.0 * n_fact + 1.0) * (2.0 * n_fact + 3.0))) + else: + n_facts.append(n_fact / (2.0 * n_fact + 1.0)) + n_facts.append(n_facts[0] / (n_fact + 1.0)) + n_facts.append(n_facts[0] * (n_fact + 1.0)) + # skip the first set of coefficients because they are not used + lut = lut[:, 1:, [0, 1, 1, 2]] # for multiplicative convenience later + # reshape this for convenience, too + n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T + n_facts = np.ascontiguousarray(n_facts) + n_fact = n_facts + else: # 'eeg' + n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact + # skip the first set of coefficients because they are not used + lut = lut[:, 1:].copy() + return lut, n_fact + + +def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact): + """Lead field dot products using Legendre polynomial (P_n) series.""" + # Compute the sum occurring in the evaluation. + # The result is + # sums[:] (2n+1)^2/n beta^n P_n + n_chunk = 50000000 // (8 * max(n_fact.shape) * 2) + lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]]) + s0 = np.empty(beta.shape) + for start, stop in zip(lims[:-1], lims[1:]): + coeffs = lut_fun(ctheta[start:stop]) + betans = np.tile(beta[start:stop][:, np.newaxis], (1, n_fact.shape[0])) + np.cumprod(betans, axis=1, out=betans) # run inplace + coeffs *= betans + s0[start:stop] = np.dot(coeffs, n_fact) # == weighted sum across cols + return s0 + + +def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral): + """Lead field dot products using Legendre polynomial (P_n) series. + + Parameters + ---------- + beta : array, shape (n_points * n_points, 1) + Coefficients of the integration. + ctheta : array, shape (n_points * n_points, 1) + Cosine of the angle between the sensor integration points. + lut_fun : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + volume_integral : bool + If True, compute volume integral. + + Returns + ------- + sums : array, shape (4, n_points * n_points) + The results. + """ + # Compute the sums occurring in the evaluation. + # Two point magnetometers on the xz plane are assumed. + # The four sums are: + # * sums[:, 0] n(n+1)/(2n+1) beta^(n+1) P_n + # * sums[:, 1] n/(2n+1) beta^(n+1) P_n' + # * sums[:, 2] n/((2n+1)(n+1)) beta^(n+1) P_n' + # * sums[:, 3] n/((2n+1)(n+1)) beta^(n+1) P_n'' + + # This is equivalent, but slower: + # sums = np.sum(bbeta[:, :, np.newaxis].T * n_fact * coeffs, axis=1) + # sums = np.rollaxis(sums, 2) + # or + # sums = np.einsum('ji,jk,ijk->ki', bbeta, n_fact, lut_fun(ctheta))) + sums = np.empty((n_fact.shape[1], len(beta))) + # beta can be e.g. 3 million elements, which ends up using lots of memory + # so we split up the computations into ~50 MB blocks + n_chunk = 50000000 // (8 * max(n_fact.shape) * 2) + lims = np.concatenate([np.arange(0, beta.size, n_chunk), [beta.size]]) + for start, stop in zip(lims[:-1], lims[1:]): + bbeta = np.tile(beta[start:stop][np.newaxis], (n_fact.shape[0], 1)) + bbeta[0] *= beta[start:stop] + np.cumprod(bbeta, axis=0, out=bbeta) # run inplace + np.einsum( + "ji,jk,ijk->ki", + bbeta, + n_fact, + lut_fun(ctheta[start:stop]), + out=sums[:, start:stop], + ) + return sums + + +############################################################################### +# SPHERE DOTS + +_meg_const = 4e-14 * np.pi # This is \mu_0^2/4\pi +_eeg_const = 1.0 / (4.0 * np.pi) + + +def _fast_sphere_dot_r0( + r, + rr1_orig, + rr2s, + lr1, + lr2s, + cosmags1, + cosmags2s, + w1, + w2s, + volume_integral, + lut, + n_fact, + ch_type, +): + """Lead field dot product computation for M/EEG in the sphere model. + + Parameters + ---------- + r : float + The integration radius. It is used to calculate beta as: + beta = (r * r) / (lr1 * lr2). + rr1 : array, shape (n_points x 3) + Normalized position vectors of integrations points in first sensor. + rr2s : list + Normalized position vector of integration points in second sensor. + lr1 : array, shape (n_points x 1) + Magnitude of position vector of integration points in first sensor. + lr2s : list + Magnitude of position vector of integration points in second sensor. + cosmags1 : array, shape (n_points x 1) + Direction of integration points in first sensor. + cosmags2s : list + Direction of integration points in second sensor. + w1 : array, shape (n_points x 1) | None + Weights of integration points in the first sensor. + w2s : list + Weights of integration points in the second sensor. + volume_integral : bool + If True, compute volume integral. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + + Returns + ------- + result : float + The integration sum. + """ + if w1 is None: # operating on surface, treat independently + out_shape = (len(rr2s), len(rr1_orig)) + sum_axis = 1 # operate along second axis only at the end + else: + out_shape = (len(rr2s),) + sum_axis = None # operate on flattened array at the end + out = np.empty(out_shape) + rr2 = np.concatenate(rr2s) + lr2 = np.concatenate(lr2s) + cosmags2 = np.concatenate(cosmags2s) + + # outer product, sum over coords + ct = np.einsum("ik,jk->ij", rr1_orig, rr2) + np.clip(ct, -1, 1, ct) + + # expand axes + rr1 = rr1_orig[:, np.newaxis, :] # (n_rr1, n_rr2, n_coord) e.g. 4x4x3 + rr2 = rr2[np.newaxis, :, :] + lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :] + + beta = (r * r) / lr1lr2 + if ch_type == "meg": + sums = _comp_sums_meg( + beta.flatten(), ct.flatten(), lut, n_fact, volume_integral + ) + sums.shape = (4,) + beta.shape + + # Accumulate the result, a little bit streamlined version + # cosmags1 = cosmags1[:, np.newaxis, :] + # cosmags2 = cosmags2[np.newaxis, :, :] + # n1c1 = np.sum(cosmags1 * rr1, axis=2) + # n1c2 = np.sum(cosmags1 * rr2, axis=2) + # n2c1 = np.sum(cosmags2 * rr1, axis=2) + # n2c2 = np.sum(cosmags2 * rr2, axis=2) + # n1n2 = np.sum(cosmags1 * cosmags2, axis=2) + n1c1 = np.einsum("ik,ijk->ij", cosmags1, rr1) + n1c2 = np.einsum("ik,ijk->ij", cosmags1, rr2) + n2c1 = np.einsum("jk,ijk->ij", cosmags2, rr1) + n2c2 = np.einsum("jk,ijk->ij", cosmags2, rr2) + n1n2 = np.einsum("ik,jk->ij", cosmags1, cosmags2) + part1 = ct * n1c1 * n2c2 + part2 = n1c1 * n2c1 + n1c2 * n2c2 + + result = ( + n1c1 * n2c2 * sums[0] + + (2.0 * part1 - part2) * sums[1] + + (n1n2 + part1 - part2) * sums[2] + + (n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3] + ) + + # Give it a finishing touch! + result *= _meg_const / lr1lr2 + if volume_integral: + result *= r + else: # 'eeg' + result = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact) + result.shape = beta.shape + # Give it a finishing touch! + result *= _eeg_const + result /= lr1lr2 + # now we add them all up with weights + offset = 0 + result *= np.concatenate(w2s) + if w1 is not None: + result *= w1[:, np.newaxis] + for ii, w2 in enumerate(w2s): + out[ii] = np.sum(result[:, offset : offset + len(w2)], axis=sum_axis) + offset += len(w2) + return out + + +@fill_doc +def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs): + """Perform the lead field dot product integrations. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2). + volume : bool + If True, perform volume integral. + coils : list of dict + The coils. + r0 : array, shape (3 x 1) + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + %(n_jobs)s + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + if ch_type == "eeg": + intrad = intrad * 0.7 + # convert to normalized distances from expansion center + rmags = [coil["rmag"] - r0[np.newaxis, :] for coil in coils] + rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] + rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] + cosmags = [coil["cosmag"] for coil in coils] + ws = [coil["w"] for coil in coils] + parallel, p_fun, n_jobs = parallel_func(_do_self_dots_subset, n_jobs) + prods = parallel( + p_fun(intrad, rmags, rlens, cosmags, ws, volume, lut, n_fact, ch_type, idx) + for idx in np.array_split(np.arange(len(rmags)), n_jobs) + ) + products = np.sum(prods, axis=0) + return products + + +def _do_self_dots_subset( + intrad, rmags, rlens, cosmags, ws, volume, lut, n_fact, ch_type, idx +): + """Parallelize.""" + # all possible combinations of two magnetometers + products = np.zeros((len(rmags), len(rmags))) + for ci1 in idx: + ci2 = ci1 + 1 + res = _fast_sphere_dot_r0( + intrad, + rmags[ci1], + rmags[:ci2], + rlens[ci1], + rlens[:ci2], + cosmags[ci1], + cosmags[:ci2], + ws[ci1], + ws[:ci2], + volume, + lut, + n_fact, + ch_type, + ) + products[ci1, :ci2] = res + products[:ci2, ci1] = res + return products + + +def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type, lut, n_fact): + """Compute lead field dot product integrations between two coil sets. + + The code is a direct translation of MNE-C code found in + `mne_map_data/lead_dots.c`. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2). + volume : bool + If True, compute volume integral. + coils1 : list of dict + The original coils. + coils2 : list of dict + The coils to which data is being mapped. + r0 : array, shape (3 x 1). + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg' + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + if ch_type == "eeg": + intrad = intrad * 0.7 + rmags1 = [coil["rmag"] - r0[np.newaxis, :] for coil in coils1] + rmags2 = [coil["rmag"] - r0[np.newaxis, :] for coil in coils2] + + rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1] + rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2] + + rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)] + rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)] + + ws1 = [coil["w"] for coil in coils1] + ws2 = [coil["w"] for coil in coils2] + + cosmags1 = [coil["cosmag"] for coil in coils1] + cosmags2 = [coil["cosmag"] for coil in coils2] + + products = np.zeros((len(rmags1), len(rmags2))) + for ci1 in range(len(coils1)): + res = _fast_sphere_dot_r0( + intrad, + rmags1[ci1], + rmags2, + rlens1[ci1], + rlens2, + cosmags1[ci1], + cosmags2, + ws1[ci1], + ws2, + volume, + lut, + n_fact, + ch_type, + ) + products[ci1, :] = res + return products + + +@fill_doc +def _do_surface_dots( + intrad, volume, coils, surf, sel, r0, ch_type, lut, n_fact, n_jobs +): + """Compute the map construction products. + + Parameters + ---------- + intrad : float + The integration radius. It is used to calculate beta as: + beta = (intrad * intrad) / (r1 * r2) + volume : bool + If True, compute a volume integral. + coils : list of dict + The coils. + surf : dict + The surface on which the field is interpolated. + sel : array + Indices of the surface vertices to select. + r0 : array, shape (3 x 1) + The origin of the sphere. + ch_type : str + The channel type. It can be 'meg' or 'eeg'. + lut : callable + Look-up table for Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + %(n_jobs)s + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + # convert to normalized distances from expansion center + rmags = [coil["rmag"] - r0[np.newaxis, :] for coil in coils] + rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags] + rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)] + cosmags = [coil["cosmag"] for coil in coils] + ws = [coil["w"] for coil in coils] + rref = None + refl = None + # virt_ref = False + if ch_type == "eeg": + intrad = intrad * 0.7 + # The virtual ref code is untested and unused, so it is + # commented out for now + # if virt_ref: + # rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :] + # refl = np.sqrt(np.sum(rref * rref, axis=1)) + # rref /= refl[:, np.newaxis] + + rsurf = surf["rr"][sel] - r0[np.newaxis, :] + lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1)) + rsurf /= lsurf[:, np.newaxis] + this_nn = surf["nn"][sel] + + # loop over the coils + parallel, p_fun, n_jobs = parallel_func(_do_surface_dots_subset, n_jobs) + prods = parallel( + p_fun( + intrad, + rsurf, + rmags, + rref, + refl, + lsurf, + rlens, + this_nn, + cosmags, + ws, + volume, + lut, + n_fact, + ch_type, + idx, + ) + for idx in np.array_split(np.arange(len(rmags)), n_jobs) + ) + products = np.sum(prods, axis=0) + return products + + +def _do_surface_dots_subset( + intrad, + rsurf, + rmags, + rref, + refl, + lsurf, + rlens, + this_nn, + cosmags, + ws, + volume, + lut, + n_fact, + ch_type, + idx, +): + """Parallelize. + + Parameters + ---------- + refl : array | None + If ch_type is 'eeg', the magnitude of position vector of the + virtual reference (never used). + lsurf : array + Magnitude of position vector of the surface points. + rlens : list of arrays of length n_coils + Magnitude of position vector. + this_nn : array, shape (n_vertices, 3) + Surface normals. + cosmags : list of array. + Direction of the integration points in the coils. + ws : list of array + Integration weights of the coils. + volume : bool + If True, compute volume integral. + lut : callable + Look-up table for evaluating Legendre polynomials. + n_fact : array + Coefficients in the integration sum. + ch_type : str + 'meg' or 'eeg' + idx : array, shape (n_coils x 1) + Index of coil. + + Returns + ------- + products : array, shape (n_coils, n_coils) + The integration products. + """ + products = _fast_sphere_dot_r0( + intrad, + rsurf, + rmags, + lsurf, + rlens, + this_nn, + cosmags, + None, + ws, + volume, + lut, + n_fact, + ch_type, + ).T + if rref is not None: + raise NotImplementedError # we don't ever use this, isn't tested + # vres = _fast_sphere_dot_r0( + # intrad, rref, rmags, refl, rlens, this_nn, cosmags, None, ws, + # volume, lut, n_fact, ch_type) + # products -= vres + return products diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py new file mode 100644 index 0000000..64aadf6 --- /dev/null +++ b/mne/forward/_make_forward.py @@ -0,0 +1,937 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import os +import os.path as op +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import numpy as np + +from .._fiff.compensator import get_current_comp, make_compensator +from .._fiff.constants import FIFF, FWD +from .._fiff.meas_info import Info, read_info +from .._fiff.pick import _has_kit_refs, pick_info, pick_types +from .._fiff.tag import _loc_to_coil_trans, _loc_to_eeg_loc +from ..bem import ConductorModel, _bem_find_surface, read_bem_solution +from ..source_estimate import VolSourceEstimate +from ..source_space._source_space import ( + _complete_vol_src, + _ensure_src, + _filter_source_spaces, + _make_discrete_source_space, +) +from ..surface import _CheckInside, _normalize_vectors +from ..transforms import ( + Transform, + _coord_frame_name, + _ensure_trans, + _get_trans, + _print_coord_trans, + apply_trans, + invert_transform, + transform_surface_to, +) +from ..utils import _check_fname, _pl, _validate_type, logger, verbose, warn +from ._compute_forward import _compute_forwards +from .forward import _FWD_ORDER, Forward, _merge_fwds, convert_forward_solution + +_accuracy_dict = dict( + point=FWD.COIL_ACCURACY_POINT, + normal=FWD.COIL_ACCURACY_NORMAL, + accurate=FWD.COIL_ACCURACY_ACCURATE, +) +_extra_coil_def_fname = None + + +@verbose +def _read_coil_defs(verbose=None): + """Read a coil definition file. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + res : list of dict + The coils. It is a dictionary with valid keys: + 'cosmag' | 'coil_class' | 'coord_frame' | 'rmag' | 'type' | + 'chname' | 'accuracy'. + cosmag contains the direction of the coils and rmag contains the + position vector. + + Notes + ----- + The global variable "_extra_coil_def_fname" can be used to prepend + additional definitions. These are never added to the registry. + """ + coil_dir = op.join(op.split(__file__)[0], "..", "data") + coils = list() + if _extra_coil_def_fname is not None: + coils += _read_coil_def_file(_extra_coil_def_fname, use_registry=False) + coils += _read_coil_def_file(op.join(coil_dir, "coil_def.dat")) + return coils + + +# Typically we only have 1 or 2 coil def files, but they can end up being +# read a lot. Let's keep a list of them and just reuse them: +_coil_registry = {} + + +def _read_coil_def_file(fname, use_registry=True): + """Read a coil def file.""" + if not use_registry or fname not in _coil_registry: + big_val = 0.5 + coils = list() + with open(fname) as fid: + lines = fid.readlines() + lines = lines[::-1] + while len(lines) > 0: + line = lines.pop().strip() + if line[0] == "#" and len(line) > 0: + continue + desc_start = line.find('"') + desc_end = len(line) - 1 + assert line.strip()[desc_end] == '"' + desc = line[desc_start:desc_end] + vals = np.fromstring(line[:desc_start].strip(), dtype=float, sep=" ") + assert len(vals) == 6 + npts = int(vals[3]) + coil = dict( + coil_type=vals[1], + coil_class=vals[0], + desc=desc, + accuracy=vals[2], + size=vals[4], + base=vals[5], + ) + # get parameters of each component + rmag = list() + cosmag = list() + w = list() + for p in range(npts): + # get next non-comment line + line = lines.pop() + while line[0] == "#": + line = lines.pop() + vals = np.fromstring(line, sep=" ") + if len(vals) != 7: + raise RuntimeError( + f"Could not interpret line {p + 1} as 7 points:\n{line}" + ) + # Read and verify data for each integration point + w.append(vals[0]) + rmag.append(vals[[1, 2, 3]]) + cosmag.append(vals[[4, 5, 6]]) + w = np.array(w) + rmag = np.array(rmag) + cosmag = np.array(cosmag) + size = np.sqrt(np.sum(cosmag**2, axis=1)) + if np.any(np.sqrt(np.sum(rmag**2, axis=1)) > big_val): + raise RuntimeError("Unreasonable integration point") + if np.any(size <= 0): + raise RuntimeError("Unreasonable normal") + cosmag /= size[:, np.newaxis] + coil.update(dict(w=w, cosmag=cosmag, rmag=rmag)) + coils.append(coil) + if use_registry: + _coil_registry[fname] = coils + if use_registry: + coils = deepcopy(_coil_registry[fname]) + logger.info("%d coil definition%s read", len(coils), _pl(coils)) + return coils + + +def _create_meg_coil(coilset, ch, acc, do_es): + """Create a coil definition using templates, transform if necessary.""" + # Also change the coordinate frame if so desired + if ch["kind"] not in [FIFF.FIFFV_MEG_CH, FIFF.FIFFV_REF_MEG_CH]: + raise RuntimeError(f"{ch['ch_name']} is not a MEG channel") + + # Simple linear search from the coil definitions + for coil in coilset: + if coil["coil_type"] == (ch["coil_type"] & 0xFFFF) and coil["accuracy"] == acc: + break + else: + raise RuntimeError( + "Desired coil definition not found " + f"(type = {ch['coil_type']} acc = {acc})" + ) + + # Apply a coordinate transformation if so desired + coil_trans = _loc_to_coil_trans(ch["loc"]) + + # Create the result + res = dict( + chname=ch["ch_name"], + coil_class=coil["coil_class"], + accuracy=coil["accuracy"], + base=coil["base"], + size=coil["size"], + type=ch["coil_type"], + w=coil["w"], + desc=coil["desc"], + coord_frame=FIFF.FIFFV_COORD_DEVICE, + rmag_orig=coil["rmag"], + cosmag_orig=coil["cosmag"], + coil_trans_orig=coil_trans, + r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil["rmag"]), + cosmag=apply_trans(coil_trans, coil["cosmag"], False), + ) + if do_es: + r0_exey = np.dot(coil["rmag"][:, :2], coil_trans[:3, :2].T) + coil_trans[:3, 3] + res.update( + ex=coil_trans[:3, 0], + ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], + r0_exey=r0_exey, + ) + return res + + +def _create_eeg_el(ch, t=None): + """Create an electrode definition, transform coords if necessary.""" + if ch["kind"] != FIFF.FIFFV_EEG_CH: + raise RuntimeError( + f"{ch['ch_name']} is not an EEG channel. Cannot create an electrode " + "definition." + ) + if t is None: + t = Transform("head", "head") # identity, no change + if t.from_str != "head": + raise RuntimeError("Inappropriate coordinate transformation") + + r0ex = _loc_to_eeg_loc(ch["loc"]) + if r0ex.shape[1] == 1: # no reference + w = np.array([1.0]) + else: # has reference + w = np.array([1.0, -1.0]) + + # Optional coordinate transformation + r0ex = apply_trans(t["trans"], r0ex.T) + + # The electrode location + cosmag = r0ex.copy() + _normalize_vectors(cosmag) + res = dict( + chname=ch["ch_name"], + coil_class=FWD.COILC_EEG, + w=w, + accuracy=_accuracy_dict["normal"], + type=ch["coil_type"], + coord_frame=t["to"], + rmag=r0ex, + cosmag=cosmag, + ) + return res + + +def _create_meg_coils(chs, acc, t=None, coilset=None, do_es=False): + """Create a set of MEG coils in the head coordinate frame.""" + acc = _accuracy_dict[acc] if isinstance(acc, str) else acc + coilset = _read_coil_defs(verbose=False) if coilset is None else coilset + coils = [_create_meg_coil(coilset, ch, acc, do_es) for ch in chs] + _transform_orig_meg_coils(coils, t, do_es=do_es) + return coils + + +def _transform_orig_meg_coils(coils, t, do_es=True): + """Transform original (device) MEG coil positions.""" + if t is None: + return + for coil in coils: + coil_trans = np.dot(t["trans"], coil["coil_trans_orig"]) + coil.update( + coord_frame=t["to"], + r0=coil_trans[:3, 3], + rmag=apply_trans(coil_trans, coil["rmag_orig"]), + cosmag=apply_trans(coil_trans, coil["cosmag_orig"], False), + ) + if do_es: + r0_exey = ( + np.dot(coil["rmag_orig"][:, :2], coil_trans[:3, :2].T) + + coil_trans[:3, 3] + ) + coil.update( + ex=coil_trans[:3, 0], + ey=coil_trans[:3, 1], + ez=coil_trans[:3, 2], + r0_exey=r0_exey, + ) + + +def _create_eeg_els(chs): + """Create a set of EEG electrodes in the head coordinate frame.""" + return [_create_eeg_el(ch) for ch in chs] + + +@verbose +def _setup_bem(bem, bem_extra, neeg, mri_head_t, allow_none=False, verbose=None): + """Set up a BEM for forward computation, making a copy and modifying.""" + if allow_none and bem is None: + return None + logger.info("") + _validate_type(bem, ("path-like", ConductorModel), bem) + if not isinstance(bem, ConductorModel): + logger.info(f"Setting up the BEM model using {bem_extra}...\n") + bem = read_bem_solution(bem) + else: + bem = bem.copy() + if bem["is_sphere"]: + logger.info("Using the sphere model.\n") + if len(bem["layers"]) == 0 and neeg > 0: + raise RuntimeError( + "Spherical model has zero shells, cannot use with EEG data" + ) + if bem["coord_frame"] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError("Spherical model is not in head coordinates") + else: + if bem["surfs"][0]["coord_frame"] != FIFF.FIFFV_COORD_MRI: + raise RuntimeError( + f'BEM is in {_coord_frame_name(bem["surfs"][0]["coord_frame"])} ' + 'coordinates, should be in MRI' + ) + if neeg > 0 and len(bem["surfs"]) == 1: + raise RuntimeError( + "Cannot use a homogeneous (1-layer BEM) model " + "for EEG forward calculations, consider " + "using a 3-layer BEM instead" + ) + logger.info("Employing the head->MRI coordinate transform with the BEM model.") + # fwd_bem_set_head_mri_t: Set the coordinate transformation + bem["head_mri_t"] = _ensure_trans(mri_head_t, "head", "mri") + logger.info(f"BEM model {op.split(bem_extra)[1]} is now set up") + logger.info("") + return bem + + +@verbose +def _prep_meg_channels( + info, + accuracy="accurate", + exclude=(), + *, + ignore_ref=False, + head_frame=True, + do_es=False, + verbose=None, +): + """Prepare MEG coil definitions for forward calculation.""" + # Find MEG channels + ref_meg = True if not ignore_ref else False + picks = pick_types(info, meg=True, ref_meg=ref_meg, exclude=exclude) + + # Make sure MEG coils exist + if len(picks) <= 0: + raise RuntimeError("Could not find any MEG channels") + info_meg = pick_info(info, picks) + del picks + + # Get channel info and names for MEG channels + logger.info(f'Read {len(info_meg["chs"])} MEG channels from info') + + # Get MEG compensation channels + compensator = post_picks = None + ch_names = info_meg["ch_names"] + if not ignore_ref: + ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=exclude) + ncomp = len(ref_picks) + if ncomp > 0: + logger.info(f"Read {ncomp} MEG compensation channels from info") + # We need to check to make sure these are NOT KIT refs + if _has_kit_refs(info, ref_picks): + raise NotImplementedError( + "Cannot create forward solution with KIT reference " + 'channels. Consider using "ignore_ref=True" in ' + "calculation" + ) + logger.info(f'{len(info["comps"])} compensation data sets in info') + # Compose a compensation data set if necessary + # adapted from mne_make_ctf_comp() from mne_ctf_comp.c + logger.info("Setting up compensation data...") + comp_num = get_current_comp(info) + if comp_num is None or comp_num == 0: + logger.info(" No compensation set. Nothing more to do.") + else: + compensator = make_compensator( + info_meg, 0, comp_num, exclude_comp_chs=False + ) + logger.info(f" Desired compensation data ({comp_num}) found.") + logger.info(" All compensation channels found.") + logger.info(" Preselector created.") + logger.info(" Compensation data matrix created.") + logger.info(" Postselector created.") + post_picks = pick_types(info_meg, meg=True, ref_meg=False, exclude=exclude) + ch_names = [ch_names[pick] for pick in post_picks] + + # Create coil descriptions with transformation to head or device frame + templates = _read_coil_defs() + + if head_frame: + _print_coord_trans(info["dev_head_t"]) + transform = info["dev_head_t"] + else: + transform = None + + megcoils = _create_meg_coils( + info_meg["chs"], accuracy, transform, templates, do_es=do_es + ) + + # Check that coordinate frame is correct and log it + if head_frame: + assert megcoils[0]["coord_frame"] == FIFF.FIFFV_COORD_HEAD + logger.info("MEG coil definitions created in head coordinates.") + else: + assert megcoils[0]["coord_frame"] == FIFF.FIFFV_COORD_DEVICE + logger.info("MEG coil definitions created in device coordinate.") + + return dict( + defs=megcoils, + ch_names=ch_names, + compensator=compensator, + info=info_meg, + post_picks=post_picks, + ) + + +@verbose +def _prep_eeg_channels(info, exclude=(), verbose=None): + """Prepare EEG electrode definitions for forward calculation.""" + info_extra = "info" + + # Find EEG electrodes + picks = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=exclude) + + # Make sure EEG electrodes exist + neeg = len(picks) + if neeg <= 0: + raise RuntimeError("Could not find any EEG channels") + + # Get channel info and names for EEG channels + eegchs = pick_info(info, picks)["chs"] + eegnames = [info["ch_names"][p] for p in picks] + logger.info(f"Read {len(picks):3} EEG channels from {info_extra}") + + # Create EEG electrode descriptions + eegels = _create_eeg_els(eegchs) + logger.info("Head coordinate coil definitions created.") + + return dict(defs=eegels, ch_names=eegnames) + + +@verbose +def _prepare_for_forward( + src, + mri_head_t, + info, + bem, + mindist, + n_jobs, + bem_extra="", + trans="", + info_extra="", + meg=True, + eeg=True, + ignore_ref=False, + allow_bem_none=False, + verbose=None, +): + """Prepare for forward computation. + + The sensors dict contains keys for each sensor type, e.g. 'meg', 'eeg'. + The vale for each of these is a dict that comes from _prep_meg_channels or + _prep_eeg_channels. Each dict contains: + + - defs : a list of dicts (one per channel) with 'rmag', 'cosmag', etc. + - ch_names: a list of str channel names corresponding to the defs + - compensator (optional): the ndarray compensation matrix to apply + - post_picks (optional): the ndarray of indices to pick after applying the + compensator + """ + # Read the source locations + logger.info("") + # let's make a copy in case we modify something + src = _ensure_src(src).copy() + nsource = sum(s["nuse"] for s in src) + if nsource == 0: + raise RuntimeError( + "No sources are active in these source spaces. " + '"do_all" option should be used.' + ) + logger.info( + "Read %d source spaces a total of %d active source locations", len(src), nsource + ) + # Delete some keys to clean up the source space: + for key in ["working_dir", "command_line"]: + if key in src.info: + del src.info[key] + + # Read the MRI -> head coordinate transformation + logger.info("") + _print_coord_trans(mri_head_t) + + # make a new dict with the relevant information + arg_list = [info_extra, trans, src, bem_extra, meg, eeg, mindist, n_jobs, verbose] + cmd = f"make_forward_solution({', '.join(str(a) for a in arg_list)})" + mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0) + + info_trans = str(trans) if isinstance(trans, Path) else trans + info = Info( + chs=info["chs"], + comps=info["comps"], + dev_head_t=info["dev_head_t"], + mri_file=info_trans, + mri_id=mri_id, + meas_file=info_extra, + meas_id=None, + working_dir=os.getcwd(), + command_line=cmd, + bads=info["bads"], + mri_head_t=mri_head_t, + ) + info._update_redundant() + info._check_consistency() + logger.info("") + + sensors = dict() + if meg and len(pick_types(info, meg=True, ref_meg=False, exclude=[])) > 0: + sensors["meg"] = _prep_meg_channels(info, ignore_ref=ignore_ref) + if eeg and len(pick_types(info, eeg=True, exclude=[])) > 0: + sensors["eeg"] = _prep_eeg_channels(info) + + # Check that some channels were found + if len(sensors) == 0: + raise RuntimeError("No MEG or EEG channels found.") + + # pick out final info + info = pick_info( + info, pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[]) + ) + + # Transform the source spaces into the appropriate coordinates + # (will either be HEAD or MRI) + for s in src: + transform_surface_to(s, "head", mri_head_t) + logger.info( + f"Source spaces are now in {_coord_frame_name(s['coord_frame'])} coordinates." + ) + + # Prepare the BEM model + eegnames = sensors.get("eeg", dict()).get("ch_names", []) + bem = _setup_bem( + bem, bem_extra, len(eegnames), mri_head_t, allow_none=allow_bem_none + ) + del eegnames + + # Circumvent numerical problems by excluding points too close to the skull, + # and check that sensors are not inside any BEM surface + if bem is not None: + if not bem["is_sphere"]: + check_surface = "inner skull surface" + inner_skull = _bem_find_surface(bem, "inner_skull") + check_inside = _filter_source_spaces( + inner_skull, mindist, mri_head_t, src, n_jobs + ) + logger.info("") + if len(bem["surfs"]) == 3: + check_surface = "scalp surface" + check_inside = _CheckInside(_bem_find_surface(bem, "head")) + else: + check_surface = "outermost sphere shell" + if len(bem["layers"]) == 0: + + def check_inside(x): + return np.zeros(len(x), bool) + + else: + + def check_inside(x): + return ( + np.linalg.norm(x - bem["r0"], axis=1) < bem["layers"][-1]["rad"] + ) + + if "meg" in sensors: + meg_loc = apply_trans( + invert_transform(mri_head_t), + np.array([coil["r0"] for coil in sensors["meg"]["defs"]]), + ) + n_inside = check_inside(meg_loc).sum() + if n_inside: + raise RuntimeError( + f"Found {n_inside} MEG sensor{_pl(n_inside)} inside the " + f"{check_surface}, perhaps coordinate frames and/or " + "coregistration must be incorrect" + ) + + rr = np.concatenate([s["rr"][s["vertno"]] for s in src]) + if len(rr) < 1: + raise RuntimeError( + "No points left in source space after excluding " + "points close to inner skull." + ) + + # deal with free orientations: + source_nn = np.tile(np.eye(3), (len(rr), 1)) + update_kwargs = dict( + nchan=len(info["ch_names"]), + nsource=len(rr), + info=info, + src=src, + source_nn=source_nn, + source_rr=rr, + surf_ori=False, + mri_head_t=mri_head_t, + ) + return sensors, rr, info, update_kwargs, bem + + +@verbose +def make_forward_solution( + info, + trans, + src, + bem, + meg=True, + eeg=True, + *, + mindist=0.0, + ignore_ref=False, + n_jobs=None, + verbose=None, +): + """Calculate a forward solution for a subject. + + Parameters + ---------- + %(info_str)s + %(trans)s + + .. versionchanged:: 0.19 + Support for ``'fsaverage'`` argument. + src : path-like | instance of SourceSpaces + Either a path to a source space file or a loaded or generated + :class:`~mne.SourceSpaces`. + bem : path-like | ConductorModel + Filename of the BEM (e.g., ``"sample-5120-5120-5120-bem-sol.fif"``) to + use, or a loaded :class:`~mne.bem.ConductorModel`. See + :func:`~mne.make_bem_model` and :func:`~mne.make_bem_solution` to create a + :class:`mne.bem.ConductorModel`. + meg : bool + If True (default), include MEG computations. + eeg : bool + If True (default), include EEG computations. + mindist : float + Minimum distance of sources from inner skull surface (in mm). + ignore_ref : bool + If True, do not include reference channels in compensation. This + option should be True for KIT files, since forward computation + with reference channels is not currently supported. + %(n_jobs)s + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution. + + See Also + -------- + convert_forward_solution + + Notes + ----- + The ``--grad`` option from MNE-C (to compute gradients) is not implemented + here. + + To create a fixed-orientation forward solution, use this function + followed by :func:`mne.convert_forward_solution`. + + .. note:: + If the BEM solution was computed with `OpenMEEG `__ + in :func:`mne.make_bem_solution`, then OpenMEEG will automatically + be used to compute the forward solution. + + .. versionchanged:: 1.2 + Added support for OpenMEEG-based forward solution calculations. + """ + # Currently not (sup)ported: + # 1. --grad option (gradients of the field, not used much) + # 2. --fixed option (can be computed post-hoc) + # 3. --mricoord option (probably not necessary) + + # read the transformation from MRI to HEAD coordinates + # (could also be HEAD to MRI) + mri_head_t, trans = _get_trans(trans) + if isinstance(bem, ConductorModel): + bem_extra = "instance of ConductorModel" + else: + bem_extra = bem + _validate_type(info, ("path-like", Info), "info") + if not isinstance(info, Info): + info_extra = op.split(info)[1] + info = _check_fname(info, must_exist=True, overwrite="read", name="info") + info = read_info(info, verbose=False) + else: + info_extra = "instance of Info" + + # Report the setup + logger.info(f"Source space : {src}") + logger.info(f"MRI -> head transform : {trans}") + logger.info(f"Measurement data : {info_extra}") + if isinstance(bem, ConductorModel) and bem["is_sphere"]: + logger.info(f"Sphere model : origin at {bem['r0']} mm") + logger.info("Standard field computations") + else: + logger.info(f"Conductor model : {bem_extra}") + logger.info("Accurate field computations") + logger.info( + "Do computations in %s coordinates", _coord_frame_name(FIFF.FIFFV_COORD_HEAD) + ) + logger.info("Free source orientations") + + # Create MEG coils and EEG electrodes in the head coordinate frame + sensors, rr, info, update_kwargs, bem = _prepare_for_forward( + src, + mri_head_t, + info, + bem, + mindist, + n_jobs, + bem_extra, + trans, + info_extra, + meg, + eeg, + ignore_ref, + ) + del (src, mri_head_t, trans, info_extra, bem_extra, mindist, meg, eeg, ignore_ref) + + # Time to do the heavy lifting: MEG first, then EEG + fwds = _compute_forwards(rr, bem=bem, sensors=sensors, n_jobs=n_jobs) + + # merge forwards + fwds = { + key: _to_forward_dict(fwds[key], sensors[key]["ch_names"]) + for key in _FWD_ORDER + if key in fwds + } + fwd = _merge_fwds(fwds, verbose=False) + del fwds + logger.info("") + + # Don't transform the source spaces back into MRI coordinates (which is + # done in the C code) because mne-python assumes forward solution source + # spaces are in head coords. + fwd.update(**update_kwargs) + logger.info("Finished.") + return fwd + + +@verbose +def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=None, *, verbose=None): + """Convert dipole object to source estimate and calculate forward operator. + + The instance of Dipole is converted to a discrete source space, + which is then combined with a BEM or a sphere model and + the sensor information in info to form a forward operator. + + The source estimate object (with the forward operator) can be projected to + sensor-space using :func:`mne.simulation.simulate_evoked`. + + .. note:: If the (unique) time points of the dipole object are unevenly + spaced, the first output will be a list of single-timepoint + source estimates. + + Parameters + ---------- + %(dipole)s + bem : str | dict + The BEM filename (str) or a loaded sphere model (dict). + info : instance of Info + The measurement information dictionary. It is sensor-information etc., + e.g., from a real data file. + trans : str | None + The head<->MRI transform filename. Must be provided unless BEM + is a sphere model. + %(n_jobs)s + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution corresponding to the source estimate(s). + stc : instance of VolSourceEstimate | list of VolSourceEstimate + The dipoles converted to a discrete set of points and associated + time courses. If the time points of the dipole are unevenly spaced, + a list of single-timepoint source estimates are returned. + + See Also + -------- + mne.simulation.simulate_evoked + + Notes + ----- + .. versionadded:: 0.12.0 + """ + if isinstance(dipole, list): + from ..dipole import _concatenate_dipoles # To avoid circular import + + dipole = _concatenate_dipoles(dipole) + + # Make copies to avoid mangling original dipole + times = dipole.times.copy() + pos = dipole.pos.copy() + amplitude = dipole.amplitude.copy() + ori = dipole.ori.copy() + + # Convert positions to discrete source space (allows duplicate rr & nn) + # NB information about dipole orientation enters here, then no more + sources = dict(rr=pos, nn=ori) + # Dipole objects must be in the head frame + src = _complete_vol_src([_make_discrete_source_space(sources, coord_frame="head")]) + + # Forward operator created for channels in info (use pick_info to restrict) + # Use defaults for most params, including min_dist + fwd = make_forward_solution(info, trans, src, bem, n_jobs=n_jobs, verbose=verbose) + # Convert from free orientations to fixed (in-place) + convert_forward_solution( + fwd, surf_ori=False, force_fixed=True, copy=False, use_cps=False, verbose=None + ) + + # Check for omissions due to proximity to inner skull in + # make_forward_solution, which will result in an exception + if fwd["src"][0]["nuse"] != len(pos): + inuse = fwd["src"][0]["inuse"].astype(bool) + head = "The following dipoles are outside the inner skull boundary" + msg = len(head) * "#" + "\n" + head + "\n" + for t, pos in zip(times[np.logical_not(inuse)], pos[np.logical_not(inuse)]): + msg += ( + f" t={t * 1000.0:.0f} ms, pos=({pos[0] * 1000.0:.0f}, " + f"{pos[1] * 1000.0:.0f}, {pos[2] * 1000.0:.0f}) mm\n" + ) + msg += len(head) * "#" + logger.error(msg) + raise ValueError("One or more dipoles outside the inner skull.") + + # multiple dipoles (rr and nn) per time instant allowed + # uneven sampling in time returns list + timepoints = np.unique(times) + if len(timepoints) > 1: + tdiff = np.diff(timepoints) + if not np.allclose(tdiff, tdiff[0]): + warn( + "Unique time points of dipoles unevenly spaced: returned " + "stc will be a list, one for each time point." + ) + tstep = -1.0 + else: + tstep = tdiff[0] + elif len(timepoints) == 1: + tstep = 0.001 + + # Build the data matrix, essentially a block-diagonal with + # n_rows: number of dipoles in total (dipole.amplitudes) + # n_cols: number of unique time points in dipole.times + # amplitude with identical value of times go together in one col (others=0) + data = np.zeros((len(amplitude), len(timepoints))) # (n_d, n_t) + row = 0 + for tpind, tp in enumerate(timepoints): + amp = amplitude[np.isin(times, tp)] + data[row : row + len(amp), tpind] = amp + row += len(amp) + + if tstep > 0: + stc = VolSourceEstimate( + data, + vertices=[fwd["src"][0]["vertno"]], + tmin=timepoints[0], + tstep=tstep, + subject=None, + ) + else: # Must return a list of stc, one for each time point + stc = [] + for col, tp in enumerate(timepoints): + stc += [ + VolSourceEstimate( + data[:, col][:, np.newaxis], + vertices=[fwd["src"][0]["vertno"]], + tmin=tp, + tstep=0.001, + subject=None, + ) + ] + return fwd, stc + + +def _to_forward_dict( + fwd, + names, + fwd_grad=None, + coord_frame=FIFF.FIFFV_COORD_HEAD, + source_ori=FIFF.FIFFV_MNE_FREE_ORI, +): + """Convert forward solution matrices to dicts.""" + assert names is not None + sol = dict( + data=fwd.T, nrow=fwd.shape[1], ncol=fwd.shape[0], row_names=names, col_names=[] + ) + fwd = Forward( + sol=sol, + source_ori=source_ori, + nsource=sol["ncol"], + coord_frame=coord_frame, + sol_grad=None, + nchan=sol["nrow"], + _orig_source_ori=source_ori, + _orig_sol=sol["data"].copy(), + _orig_sol_grad=None, + ) + if fwd_grad is not None: + sol_grad = dict( + data=fwd_grad.T, + nrow=fwd_grad.shape[1], + ncol=fwd_grad.shape[0], + row_names=names, + col_names=[], + ) + fwd.update(dict(sol_grad=sol_grad), _orig_sol_grad=sol_grad["data"].copy()) + return fwd + + +@contextmanager +def use_coil_def(fname): + """Use a custom coil definition file. + + Parameters + ---------- + fname : path-like + The filename of the coil definition file. + + Returns + ------- + context : contextmanager + The context for using the coil definition. + + Notes + ----- + This is meant to be used a context manager such as: + + >>> with use_coil_def(my_fname): # doctest:+SKIP + ... make_forward_solution(...) + + This allows using custom coil definitions with functions that require + forward modeling. + """ + global _extra_coil_def_fname + _extra_coil_def_fname = fname + try: + yield + finally: + _extra_coil_def_fname = None diff --git a/mne/forward/forward.py b/mne/forward/forward.py new file mode 100644 index 0000000..e3e5c08 --- /dev/null +++ b/mne/forward/forward.py @@ -0,0 +1,2203 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# The computations in this code were primarily derived from Matti Hämäläinen's +# C code. + +import os +import re +import shutil +import tempfile +from copy import deepcopy +from os import PathLike +from os import path as op +from pathlib import Path +from time import time + +import numpy as np +from scipy import sparse + +from .._fiff.constants import FIFF +from .._fiff.matrix import ( + _read_named_matrix, + _transpose_named_matrix, + write_named_matrix, +) +from .._fiff.meas_info import ( + Info, + _make_ch_names_mapping, + _read_bad_channels, + _read_extended_ch_info, + _write_bad_channels, + _write_ch_infos, + write_info, +) +from .._fiff.open import fiff_open +from .._fiff.pick import pick_channels, pick_channels_forward, pick_info, pick_types +from .._fiff.tag import find_tag, read_tag +from .._fiff.tree import dir_tree_find +from .._fiff.write import ( + end_block, + start_and_end_file, + start_block, + write_coord_trans, + write_id, + write_int, + write_string, +) +from ..epochs import BaseEpochs +from ..evoked import Evoked, EvokedArray +from ..html_templates import _get_html_template +from ..io import BaseRaw, RawArray +from ..label import Label +from ..source_estimate import _BaseSourceEstimate, _BaseVectorSourceEstimate +from ..source_space._source_space import ( + SourceSpaces, + _get_src_nn, + _read_source_spaces_from_tree, + _set_source_space_vertices, + _src_kind_dict, + _write_source_spaces_to_fid, + find_source_space_hemi, +) +from ..surface import _normal_orth +from ..transforms import invert_transform, transform_surface_to, write_trans +from ..utils import ( + _check_compensation_grade, + _check_fname, + _check_option, + _check_stc_units, + _import_h5io_funcs, + _on_missing, + _stamp_to_dt, + _validate_type, + check_fname, + fill_doc, + get_subjects_dir, + has_mne_c, + logger, + repr_html, + run_subprocess, + verbose, + warn, +) + + +class Forward(dict): + """Forward class to represent info from forward solution. + + Like :class:`mne.Info`, this data structure behaves like a dictionary. + It contains all metadata necessary for a forward solution. + + .. warning:: + This class should not be modified or created by users. + Forward objects should be obtained using + :func:`mne.make_forward_solution` or :func:`mne.read_forward_solution`. + + Attributes + ---------- + ch_names : list of str + A convenience wrapper accessible as ``fwd.ch_names`` which wraps + ``fwd['info']['ch_names']``. + + See Also + -------- + mne.make_forward_solution + mne.read_forward_solution + + Notes + ----- + Forward data is accessible via string keys using standard + :class:`python:dict` access (e.g., ``fwd['nsource'] == 4096``): + + source_ori : int + The source orientation, either ``FIFF.FIFFV_MNE_FIXED_ORI`` or + ``FIFF.FIFFV_MNE_FREE_ORI``. + coord_frame : int + The coordinate frame of the forward solution, usually + ``FIFF.FIFFV_COORD_HEAD``. + nsource : int + The number of source locations. + nchan : int + The number of channels. + sol : dict + The forward solution, with entries: + + ``'data'`` : ndarray, shape (n_channels, nsource * n_ori) + The forward solution data. The shape will be + ``(n_channels, nsource)`` for a fixed-orientation forward and + ``(n_channels, nsource * 3)`` for a free-orientation forward. + ``'row_names'`` : list of str + The channel names. + mri_head_t : instance of Transform + The mri ↔ head transformation that was used. + info : instance of :class:`~mne.Info` + The measurement information (with contents reduced compared to that + of the original data). + src : instance of :class:`~mne.SourceSpaces` + The source space used during forward computation. This can differ + from the original source space as: + + 1. Source points are removed due to proximity to (or existing + outside) + the inner skull surface. + 2. The source space will be converted to the ``coord_frame`` of the + forward solution, which typically means it gets converted from + MRI to head coordinates. + source_rr : ndarray, shape (n_sources, 3) + The source locations. + source_nn : ndarray, shape (n_sources, 3) + The source normals. Will be all +Z (``(0, 0, 1.)``) for volume + source spaces. For surface source spaces, these are normal to the + cortical surface. + surf_ori : int + Whether ``sol`` is surface-oriented with the surface normal in the + Z component (``FIFF.FIFFV_MNE_FIXED_ORI``) or +Z in the given + ``coord_frame`` in the Z component (``FIFF.FIFFV_MNE_FREE_ORI``). + + Forward objects also have some attributes that are accessible via ``.`` + access, like ``fwd.ch_names``. + """ + + def copy(self): + """Copy the Forward instance.""" + return Forward(deepcopy(self)) + + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save the forward solution. + + Parameters + ---------- + %(fname_fwd)s + %(overwrite)s + %(verbose)s + """ + write_forward_solution(fname, self, overwrite=overwrite) + + def _get_src_type_and_ori_for_repr(self): + src_types = np.array([src["type"] for src in self["src"]]) + + if (src_types == "surf").all(): + src_type = "Surface with {self['nsource']} vertices" + elif (src_types == "vol").all(): + src_type = "Volume with {self['nsource']} grid points" + elif (src_types == "discrete").all(): + src_type = "Discrete with {self['nsource']} dipoles" + else: + count_string = "" + if (src_types == "surf").any(): + count_string += f"{(src_types == 'surf').sum()} surface, " + if (src_types == "vol").any(): + count_string += f"{(src_types == 'vol').sum()} volume, " + if (src_types == "discrete").any(): + count_string += f"{(src_types == 'discrete').sum()} discrete, " + count_string = count_string.rstrip(", ") + src_type = f"Mixed ({count_string}) with {self['nsource']} vertices" + + if self["source_ori"] == FIFF.FIFFV_MNE_UNKNOWN_ORI: + src_ori = "Unknown" + elif self["source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: + src_ori = "Fixed" + elif self["source_ori"] == FIFF.FIFFV_MNE_FREE_ORI: + src_ori = "Free" + + return src_type, src_ori + + def __repr__(self): + """Summarize forward info instead of printing all.""" + entr = " 0: + raise ValueError("Width of matrix must be a multiple of n") + + tmp = np.arange(ma * bdn, dtype=np.int64).reshape(bdn, ma) + tmp = np.tile(tmp, (1, n)) + ii = tmp.ravel() + + jj = np.arange(na, dtype=np.int64)[None, :] + jj = jj * np.ones(ma, dtype=np.int64)[:, None] + jj = jj.T.ravel() # column indices foreach sparse bd + + bd = sparse.coo_array((A.T.ravel(), np.c_[ii, jj].T)).tocsc() + + return bd + + +def _get_tag_int(fid, node, name, id_): + """Check we have an appropriate tag.""" + tag = find_tag(fid, node, id_) + if tag is None: + fid.close() + raise ValueError(name + " tag not found") + return int(tag.data.item()) + + +def _read_one(fid, node): + """Read all interesting stuff for one forward solution.""" + # This function assumes the fid is open as a context manager + if node is None: + return None + + one = Forward() + one["source_ori"] = _get_tag_int( + fid, node, "Source orientation", FIFF.FIFF_MNE_SOURCE_ORIENTATION + ) + one["coord_frame"] = _get_tag_int( + fid, node, "Coordinate frame", FIFF.FIFF_MNE_COORD_FRAME + ) + one["nsource"] = _get_tag_int( + fid, node, "Number of sources", FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS + ) + one["nchan"] = _get_tag_int(fid, node, "Number of channels", FIFF.FIFF_NCHAN) + try: + one["sol"] = _read_named_matrix( + fid, node, FIFF.FIFF_MNE_FORWARD_SOLUTION, transpose=True + ) + one["_orig_sol"] = one["sol"]["data"].copy() + except Exception: + logger.error("Forward solution data not found") + raise + + try: + fwd_type = FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD + one["sol_grad"] = _read_named_matrix(fid, node, fwd_type, transpose=True) + one["_orig_sol_grad"] = one["sol_grad"]["data"].copy() + except Exception: + one["sol_grad"] = None + + if one["sol"]["data"].shape[0] != one["nchan"] or ( + one["sol"]["data"].shape[1] != one["nsource"] + and one["sol"]["data"].shape[1] != 3 * one["nsource"] + ): + raise ValueError("Forward solution matrix has wrong dimensions") + + if one["sol_grad"] is not None: + if one["sol_grad"]["data"].shape[0] != one["nchan"] or ( + one["sol_grad"]["data"].shape[1] != 3 * one["nsource"] + and one["sol_grad"]["data"].shape[1] != 3 * 3 * one["nsource"] + ): + raise ValueError("Forward solution gradient matrix has wrong dimensions") + + return one + + +@fill_doc +def _read_forward_meas_info(tree, fid): + """Read light measurement info from forward operator. + + Parameters + ---------- + tree : tree + FIF tree structure. + fid : file id + The file id. + + Returns + ------- + %(info_not_none)s + """ + # This function assumes fid is being used as a context manager + info = Info() + info._unlocked = True + + # Information from the MRI file + parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + if len(parent_mri) == 0: + raise ValueError("No parent MEG information found in operator") + parent_mri = parent_mri[0] + + tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_FILE_NAME) + info["mri_file"] = tag.data if tag is not None else None + tag = find_tag(fid, parent_mri, FIFF.FIFF_PARENT_FILE_ID) + info["mri_id"] = tag.data if tag is not None else None + + # Information from the MEG file + parent_meg = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + if len(parent_meg) == 0: + raise ValueError("No parent MEG information found in operator") + parent_meg = parent_meg[0] + + tag = find_tag(fid, parent_meg, FIFF.FIFF_MNE_FILE_NAME) + info["meas_file"] = tag.data if tag is not None else None + tag = find_tag(fid, parent_meg, FIFF.FIFF_PARENT_FILE_ID) + info["meas_id"] = tag.data if tag is not None else None + + # Add channel information + info["chs"] = chs = list() + for k in range(parent_meg["nent"]): + kind = parent_meg["directory"][k].kind + pos = parent_meg["directory"][k].pos + if kind == FIFF.FIFF_CH_INFO: + tag = read_tag(fid, pos) + chs.append(tag.data) + ch_names_mapping = _read_extended_ch_info(chs, parent_meg, fid) + info._update_redundant() + + # Get the MRI <-> head coordinate transformation + tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS) + coord_head = FIFF.FIFFV_COORD_HEAD + coord_mri = FIFF.FIFFV_COORD_MRI + coord_device = FIFF.FIFFV_COORD_DEVICE + coord_ctf_head = FIFF.FIFFV_MNE_COORD_CTF_HEAD + if tag is None: + raise ValueError("MRI/head coordinate transformation not found") + cand = tag.data + if cand["from"] == coord_mri and cand["to"] == coord_head: + info["mri_head_t"] = cand + else: + raise ValueError("MRI/head coordinate transformation not found") + + # Get the MEG device <-> head coordinate transformation + tag = find_tag(fid, parent_meg, FIFF.FIFF_COORD_TRANS) + if tag is None: + raise ValueError("MEG/head coordinate transformation not found") + cand = tag.data + if cand["from"] == coord_device and cand["to"] == coord_head: + info["dev_head_t"] = cand + elif cand["from"] == coord_ctf_head and cand["to"] == coord_head: + info["ctf_head_t"] = cand + else: + raise ValueError("MEG/head coordinate transformation not found") + + bads = _read_bad_channels(fid, parent_meg, ch_names_mapping=ch_names_mapping) + # clean up our bad list, old versions could have non-existent bads + info["bads"] = [bad for bad in bads if bad in info["ch_names"]] + + # Check if a custom reference has been applied + tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF) + if tag is None: + tag = find_tag(fid, parent_mri, 236) # Constant 236 used before v0.11 + + info["custom_ref_applied"] = int(tag.data.item()) if tag is not None else False + info._unlocked = False + return info + + +def _subject_from_forward(forward): + """Get subject id from inverse operator.""" + return forward["src"]._subject + + +# This sets the forward solution order (and gives human-readable names) +_FWD_ORDER = dict( + meg="MEG", + eeg="EEG", +) + + +@verbose +def _merge_fwds(fwds, *, verbose=None): + """Merge loaded forward dicts into one dict.""" + fwd = None + first_key = None + combined = list() + for key in _FWD_ORDER: + if key not in fwds: + continue + if fwd is None: # assign + fwd = fwds[key] + first_key = key + combined.append(_FWD_ORDER[key]) + continue + a = fwd + b = fwds[key] + a_kind, b_kind = _FWD_ORDER[first_key], _FWD_ORDER[key] + combined.append(b_kind) + if ( + a["sol"]["data"].shape[1] != b["sol"]["data"].shape[1] + or a["source_ori"] != b["source_ori"] + or a["nsource"] != b["nsource"] + or a["coord_frame"] != b["coord_frame"] + ): + raise ValueError( + f"The {a_kind} and {b_kind} forward solutions do not match" + ) + for k in ("sol", "sol_grad"): + if a[k] is None: + continue + a[k]["data"] = np.r_[a[k]["data"], b[k]["data"]] + a[f"_orig_{k}"] = np.r_[a[f"_orig_{k}"], b[f"_orig_{k}"]] + a[k]["nrow"] = a[k]["nrow"] + b[k]["nrow"] + a[k]["row_names"] = a[k]["row_names"] + b[k]["row_names"] + a["nchan"] = a["nchan"] + b["nchan"] + if len(fwds) > 1: + logger.info(f' Forward solutions combined: {", ".join(combined)}') + return fwd + + +@verbose +def read_forward_solution(fname, include=(), exclude=(), *, ordered=True, verbose=None): + """Read a forward solution a.k.a. lead field. + + Parameters + ---------- + fname : path-like + The file name, which should end with ``-fwd.fif``, ``-fwd.fif.gz``, + ``_fwd.fif``, ``_fwd.fif.gz``, ``-fwd.h5``, or ``_fwd.h5``. + include : list, optional + List of names of channels to include. If empty all channels + are included. + exclude : list, optional + List of names of channels to exclude. If empty include all channels. + %(ordered)s + %(verbose)s + + Returns + ------- + fwd : instance of Forward + The forward solution. + + See Also + -------- + write_forward_solution, make_forward_solution + + Notes + ----- + Forward solutions, which are derived from an original forward solution with + free orientation, are always stored on disk as forward solution with free + orientation in X/Y/Z RAS coordinates. To apply any transformation to the + forward operator (surface orientation, fixed orientation) please apply + :func:`convert_forward_solution` after reading the forward solution with + :func:`read_forward_solution`. + + Forward solutions, which are derived from an original forward solution with + fixed orientation, are stored on disk as forward solution with fixed + surface-based orientations. Please note that the transformation to + surface-based, fixed orientation cannot be reverted after loading the + forward solution with :func:`read_forward_solution`. + """ + check_fname( + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), + ) + fname = _check_fname(fname=fname, must_exist=True, overwrite="read") + # Open the file, create directory + logger.info(f"Reading forward solution from {fname}...") + if fname.suffix == ".h5": + return _read_forward_hdf5(fname) + f, tree, _ = fiff_open(fname) + with f as fid: + # Find all forward solutions + fwds = dir_tree_find(tree, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + if len(fwds) == 0: + raise ValueError(f"No forward solutions in {fname}") + + # Parent MRI data + parent_mri = dir_tree_find(tree, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + if len(parent_mri) == 0: + raise ValueError(f"No parent MRI information in {fname}") + parent_mri = parent_mri[0] + + src = _read_source_spaces_from_tree(fid, tree, patch_stats=False) + for s in src: + s["id"] = find_source_space_hemi(s) + + fwd = None + + # Locate and read the forward solutions + megnode = None + eegnode = None + for k in range(len(fwds)): + tag = find_tag(fid, fwds[k], FIFF.FIFF_MNE_INCLUDED_METHODS) + if tag is None: + raise ValueError("Methods not listed for one of the forward solutions") + + if tag.data == FIFF.FIFFV_MNE_MEG: + megnode = fwds[k] + elif tag.data == FIFF.FIFFV_MNE_EEG: + eegnode = fwds[k] + + fwds = dict() + megfwd = _read_one(fid, megnode) + if megfwd is not None: + fwds["meg"] = megfwd + if is_fixed_orient(megfwd): + ori = "fixed" + else: + ori = "free" + logger.info( + " Read MEG forward solution (%d sources, " + "%d channels, %s orientations)", + megfwd["nsource"], + megfwd["nchan"], + ori, + ) + del megfwd + + eegfwd = _read_one(fid, eegnode) + if eegfwd is not None: + fwds["eeg"] = eegfwd + if is_fixed_orient(eegfwd): + ori = "fixed" + else: + ori = "free" + logger.info( + " Read EEG forward solution (%d sources, " + "%d channels, %s orientations)", + eegfwd["nsource"], + eegfwd["nchan"], + ori, + ) + del eegfwd + + fwd = _merge_fwds(fwds) + del fwds + + # Get the MRI <-> head coordinate transformation + tag = find_tag(fid, parent_mri, FIFF.FIFF_COORD_TRANS) + if tag is None: + raise ValueError("MRI/head coordinate transformation not found") + mri_head_t = tag.data + if ( + mri_head_t["from"] != FIFF.FIFFV_COORD_MRI + or mri_head_t["to"] != FIFF.FIFFV_COORD_HEAD + ): + mri_head_t = invert_transform(mri_head_t) + if ( + mri_head_t["from"] != FIFF.FIFFV_COORD_MRI + or mri_head_t["to"] != FIFF.FIFFV_COORD_HEAD + ): + fid.close() + raise ValueError("MRI/head coordinate transformation not found") + fwd["mri_head_t"] = mri_head_t + + # + # get parent MEG info + # + fwd["info"] = _read_forward_meas_info(tree, fid) + + # MNE environment + parent_env = dir_tree_find(tree, FIFF.FIFFB_MNE_ENV) + if len(parent_env) > 0: + parent_env = parent_env[0] + tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_WORKING_DIR) + if tag is not None: + with fwd["info"]._unlock(): + fwd["info"]["working_dir"] = tag.data + tag = find_tag(fid, parent_env, FIFF.FIFF_MNE_ENV_COMMAND_LINE) + if tag is not None: + with fwd["info"]._unlock(): + fwd["info"]["command_line"] = tag.data + + # Transform the source spaces to the correct coordinate frame + # if necessary + + # Make sure forward solution is in either the MRI or HEAD coordinate frame + if fwd["coord_frame"] not in (FIFF.FIFFV_COORD_MRI, FIFF.FIFFV_COORD_HEAD): + raise ValueError( + "Only forward solutions computed in MRI or head " + "coordinates are acceptable" + ) + + # Transform each source space to the HEAD or MRI coordinate frame, + # depending on the coordinate frame of the forward solution + # NOTE: the function transform_surface_to will also work on discrete and + # volume sources + nuse = 0 + for s in src: + try: + s = transform_surface_to(s, fwd["coord_frame"], mri_head_t) + except Exception as inst: + raise ValueError(f"Could not transform source space ({inst})") + + nuse += s["nuse"] + + # Make sure the number of sources match after transformation + if nuse != fwd["nsource"]: + raise ValueError("Source spaces do not match the forward solution.") + + logger.info( + " Source spaces transformed to the forward solution coordinate frame" + ) + fwd["src"] = src + + # Handle the source locations and orientations + fwd["source_rr"] = np.concatenate([ss["rr"][ss["vertno"], :] for ss in src], axis=0) + + # Store original source orientations + fwd["_orig_source_ori"] = fwd["source_ori"] + + # Deal with include and exclude + pick_channels_forward(fwd, include=include, exclude=exclude, copy=False) + + if is_fixed_orient(fwd, orig=True): + fwd["source_nn"] = np.concatenate( + [_src["nn"][_src["vertno"], :] for _src in fwd["src"]], axis=0 + ) + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True + else: + fwd["source_nn"] = np.kron(np.ones((fwd["nsource"], 1)), np.eye(3)) + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = False + return Forward(fwd) + + +@verbose +def convert_forward_solution( + fwd, surf_ori=False, force_fixed=False, copy=True, use_cps=True, *, verbose=None +): + """Convert forward solution between different source orientations. + + Parameters + ---------- + fwd : Forward + The forward solution to modify. + surf_ori : bool, optional (default False) + Use surface-based source coordinate system? Note that force_fixed=True + implies surf_ori=True. + force_fixed : bool, optional (default False) + If True, force fixed source orientation mode. + copy : bool + Whether to return a new instance or modify in place. + %(use_cps)s + %(verbose)s + + Returns + ------- + fwd : Forward + The modified forward solution. + """ + fwd = fwd.copy() if copy else fwd + + if force_fixed is True: + surf_ori = True + + if any([src["type"] == "vol" for src in fwd["src"]]) and force_fixed: + raise ValueError( + "Forward operator was generated with sources from a " + "volume source space. Conversion to fixed orientation is not " + "possible. Consider using a discrete source space if you have " + "meaningful normal orientations." + ) + + if surf_ori and use_cps: + if any(s.get("patch_inds") is not None for s in fwd["src"]): + logger.info( + " Average patch normals will be employed in " + "the rotation to the local surface coordinates.." + ".." + ) + else: + use_cps = False + logger.info( + " No patch info available. The standard source " + "space normals will be employed in the rotation " + "to the local surface coordinates...." + ) + + # We need to change these entries (only): + # 1. source_nn + # 2. sol['data'] + # 3. sol['ncol'] + # 4. sol_grad['data'] + # 5. sol_grad['ncol'] + # 6. source_ori + + if is_fixed_orient(fwd, orig=True) or (force_fixed and not use_cps): + # Fixed + fwd["source_nn"] = np.concatenate( + [_get_src_nn(s, use_cps) for s in fwd["src"]], axis=0 + ) + if not is_fixed_orient(fwd, orig=True): + logger.info( + " Changing to fixed-orientation forward " + "solution with surface-based source orientations..." + ) + fix_rot = _block_diag(fwd["source_nn"].T, 1) + # newer versions of numpy require explicit casting here, so *= no + # longer works + fwd["sol"]["data"] = (fwd["_orig_sol"] @ fix_rot).astype("float32") + fwd["sol"]["ncol"] = fwd["nsource"] + if fwd["sol_grad"] is not None: + x = sparse.block_diag([fix_rot] * 3) + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 3 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True + + elif surf_ori: # Free, surf-oriented + # Rotate the local source coordinate systems + fwd["source_nn"] = np.kron(np.ones((fwd["nsource"], 1)), np.eye(3)) + logger.info(" Converting to surface-based source orientations...") + # Actually determine the source orientations + pp = 0 + for s in fwd["src"]: + if s["type"] in ["surf", "discrete"]: + nn = _get_src_nn(s, use_cps) + stop = pp + 3 * s["nuse"] + fwd["source_nn"][pp:stop] = _normal_orth(nn).reshape(-1, 3) + pp = stop + del nn + else: + pp += 3 * s["nuse"] + + # Rotate the solution components as well + if force_fixed: + fwd["source_nn"] = fwd["source_nn"][2::3, :] + fix_rot = _block_diag(fwd["source_nn"].T, 1) + # newer versions of numpy require explicit casting here, so *= no + # longer works + fwd["sol"]["data"] = (fwd["_orig_sol"] @ fix_rot).astype("float32") + fwd["sol"]["ncol"] = fwd["nsource"] + if fwd["sol_grad"] is not None: + x = sparse.block_diag([fix_rot] * 3) + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 3 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FIXED_ORI + fwd["surf_ori"] = True + else: + surf_rot = _block_diag(fwd["source_nn"].T, 3) + fwd["sol"]["data"] = fwd["_orig_sol"] @ surf_rot + fwd["sol"]["ncol"] = 3 * fwd["nsource"] + if fwd["sol_grad"] is not None: + x = sparse.block_diag([surf_rot] * 3) + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"] @ x + fwd["sol_grad"]["ncol"] = 9 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = True + + else: # Free, cartesian + logger.info(" Cartesian source orientations...") + fwd["source_nn"] = np.tile(np.eye(3), (fwd["nsource"], 1)) + fwd["sol"]["data"] = fwd["_orig_sol"].copy() + fwd["sol"]["ncol"] = 3 * fwd["nsource"] + if fwd["sol_grad"] is not None: + fwd["sol_grad"]["data"] = fwd["_orig_sol_grad"].copy() + fwd["sol_grad"]["ncol"] = 9 * fwd["nsource"] + fwd["source_ori"] = FIFF.FIFFV_MNE_FREE_ORI + fwd["surf_ori"] = False + + logger.info(" [done]") + + return fwd + + +@verbose +def write_forward_solution(fname, fwd, overwrite=False, verbose=None): + """Write forward solution to a file. + + Parameters + ---------- + %(fname_fwd)s + fwd : Forward + Forward solution. + %(overwrite)s + %(verbose)s + + See Also + -------- + read_forward_solution + + Notes + ----- + Forward solutions, which are derived from an original forward solution with + free orientation, are always stored on disk as forward solution with free + orientation in X/Y/Z RAS coordinates. Transformations (surface orientation, + fixed orientation) will be reverted. To reapply any transformation to the + forward operator please apply :func:`convert_forward_solution` after + reading the forward solution with :func:`read_forward_solution`. + + Forward solutions, which are derived from an original forward solution with + fixed orientation, are stored on disk as forward solution with fixed + surface-based orientations. Please note that the transformation to + surface-based, fixed orientation cannot be reverted after loading the + forward solution with :func:`read_forward_solution`. + """ + check_fname( + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), + ) + + # check for file existence and expand `~` if present + fname = _check_fname(fname, overwrite) + if fname.suffix == ".h5": + _write_forward_hdf5(fname, fwd) + else: + with start_and_end_file(fname) as fid: + _write_forward_solution(fid, fwd) + + +def _write_forward_hdf5(fname, fwd): + _, write_hdf5 = _import_h5io_funcs() + write_hdf5(fname, dict(fwd=fwd), overwrite=True) + + +def _read_forward_hdf5(fname): + read_hdf5, _ = _import_h5io_funcs() + fwd = Forward(read_hdf5(fname)["fwd"]) + fwd["info"] = Info(fwd["info"]) + fwd["src"] = SourceSpaces(fwd["src"]) + return fwd + + +def _write_forward_solution(fid, fwd): + start_block(fid, FIFF.FIFFB_MNE) + + # + # MNE env + # + start_block(fid, FIFF.FIFFB_MNE_ENV) + write_id(fid, FIFF.FIFF_BLOCK_ID) + data = fwd["info"].get("working_dir", None) + if data is not None: + write_string(fid, FIFF.FIFF_MNE_ENV_WORKING_DIR, data) + data = fwd["info"].get("command_line", None) + if data is not None: + write_string(fid, FIFF.FIFF_MNE_ENV_COMMAND_LINE, data) + end_block(fid, FIFF.FIFFB_MNE_ENV) + + # + # Information from the MRI file + # + start_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, fwd["info"]["mri_file"]) + if fwd["info"]["mri_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_FILE_ID, fwd["info"]["mri_id"]) + # store the MRI to HEAD transform in MRI file + write_coord_trans(fid, fwd["info"]["mri_head_t"]) + end_block(fid, FIFF.FIFFB_MNE_PARENT_MRI_FILE) + + # write measurement info + write_forward_meas_info(fid, fwd["info"]) + + # invert our original source space transform + src = list() + for s in fwd["src"]: + s = deepcopy(s) + try: + # returns source space to original coordinate frame + # usually MRI + s = transform_surface_to(s, fwd["mri_head_t"]["from"], fwd["mri_head_t"]) + except Exception as inst: + raise ValueError(f"Could not transform source space ({inst})") + src.append(s) + + # + # Write the source spaces (again) + # + _write_source_spaces_to_fid(fid, src) + n_vert = sum([ss["nuse"] for ss in src]) + if fwd["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: + n_col = n_vert + else: + n_col = 3 * n_vert + + # Undo transformations + sol = fwd["_orig_sol"].copy() + if fwd["sol_grad"] is not None: + sol_grad = fwd["_orig_sol_grad"].copy() + else: + sol_grad = None + + if fwd["surf_ori"] is True: + if fwd["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI: + warn( + "The forward solution, which is stored on disk now, is based " + "on a forward solution with fixed orientation. Please note " + "that the transformation to surface-based, fixed orientation " + "cannot be reverted after loading the forward solution with " + "read_forward_solution.", + RuntimeWarning, + ) + else: + warn( + "This forward solution is based on a forward solution with " + "free orientation. The original forward solution is stored " + "on disk in X/Y/Z RAS coordinates. Any transformation " + "(surface orientation or fixed orientation) will be " + "reverted. To reapply any transformation to the forward " + "operator please apply convert_forward_solution after " + "reading the forward solution with read_forward_solution.", + RuntimeWarning, + ) + + # + # MEG forward solution + # + picks_meg = pick_types(fwd["info"], meg=True, eeg=False, ref_meg=False, exclude=[]) + picks_eeg = pick_types(fwd["info"], meg=False, eeg=True, ref_meg=False, exclude=[]) + n_meg = len(picks_meg) + n_eeg = len(picks_eeg) + row_names_meg = [fwd["sol"]["row_names"][p] for p in picks_meg] + row_names_eeg = [fwd["sol"]["row_names"][p] for p in picks_eeg] + + if n_meg > 0: + meg_solution = dict( + data=sol[picks_meg], + nrow=n_meg, + ncol=n_col, + row_names=row_names_meg, + col_names=[], + ) + _transpose_named_matrix(meg_solution) + start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_MEG) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd["coord_frame"]) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd["_orig_source_ori"]) + write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) + write_int(fid, FIFF.FIFF_NCHAN, n_meg) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, meg_solution) + if sol_grad is not None: + meg_solution_grad = dict( + data=sol_grad[picks_meg], + nrow=n_meg, + ncol=n_col * 3, + row_names=row_names_meg, + col_names=[], + ) + _transpose_named_matrix(meg_solution_grad) + write_named_matrix( + fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, meg_solution_grad + ) + end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + + # + # EEG forward solution + # + if n_eeg > 0: + eeg_solution = dict( + data=sol[picks_eeg], + nrow=n_eeg, + ncol=n_col, + row_names=row_names_eeg, + col_names=[], + ) + _transpose_named_matrix(eeg_solution) + start_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + write_int(fid, FIFF.FIFF_MNE_INCLUDED_METHODS, FIFF.FIFFV_MNE_EEG) + write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, fwd["coord_frame"]) + write_int(fid, FIFF.FIFF_MNE_SOURCE_ORIENTATION, fwd["_orig_source_ori"]) + write_int(fid, FIFF.FIFF_NCHAN, n_eeg) + write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS, n_vert) + write_named_matrix(fid, FIFF.FIFF_MNE_FORWARD_SOLUTION, eeg_solution) + if sol_grad is not None: + eeg_solution_grad = dict( + data=sol_grad[picks_eeg], + nrow=n_eeg, + ncol=n_col * 3, + row_names=row_names_eeg, + col_names=[], + ) + _transpose_named_matrix(eeg_solution_grad) + write_named_matrix( + fid, FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD, eeg_solution_grad + ) + end_block(fid, FIFF.FIFFB_MNE_FORWARD_SOLUTION) + + end_block(fid, FIFF.FIFFB_MNE) + + +def is_fixed_orient(forward, orig=False): + """Check if the forward operator is fixed orientation. + + Parameters + ---------- + forward : instance of Forward + The forward. + orig : bool + If True, consider the original source orientation. + If False (default), consider the current source orientation. + + Returns + ------- + fixed_ori : bool + Whether or not it is fixed orientation. + """ + if orig: # if we want to know about the original version + fixed_ori = forward["_orig_source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI + else: # most of the time we want to know about the current version + fixed_ori = forward["source_ori"] == FIFF.FIFFV_MNE_FIXED_ORI + return fixed_ori + + +@fill_doc +def write_forward_meas_info(fid, info): + """Write measurement info stored in forward solution. + + Parameters + ---------- + fid : file id + The file id + %(info_not_none)s + """ + info._check_consistency() + # + # Information from the MEG file + # + start_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + write_string(fid, FIFF.FIFF_MNE_FILE_NAME, info["meas_file"]) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) + # get transformation from CTF and DEVICE to HEAD coordinate frame + meg_head_t = info.get("dev_head_t", info.get("ctf_head_t")) + if meg_head_t is None: + fid.close() + raise ValueError("Head<-->sensor transform not found") + write_coord_trans(fid, meg_head_t) + + ch_names_mapping = dict() + if "chs" in info: + # Channel information + ch_names_mapping = _make_ch_names_mapping(info["chs"]) + write_int(fid, FIFF.FIFF_NCHAN, len(info["chs"])) + _write_ch_infos(fid, info["chs"], False, ch_names_mapping) + if "bads" in info and len(info["bads"]) > 0: + # Bad channels + _write_bad_channels(fid, info["bads"], ch_names_mapping) + + end_block(fid, FIFF.FIFFB_MNE_PARENT_MEAS_FILE) + + +def _select_orient_forward(forward, info, noise_cov=None, copy=True): + """Prepare forward solution for inverse solvers.""" + # fwd['sol']['row_names'] may be different order from fwd['info']['chs'] + fwd_sol_ch_names = forward["sol"]["row_names"] + all_ch_names = set(fwd_sol_ch_names) + all_bads = set(info["bads"]) + if noise_cov is not None: + all_ch_names &= set(noise_cov["names"]) + all_bads |= set(noise_cov["bads"]) + else: + noise_cov = dict(bads=info["bads"]) + ch_names = [ + c["ch_name"] + for c in info["chs"] + if c["ch_name"] not in all_bads and c["ch_name"] in all_ch_names + ] + + if not len(info["bads"]) == len(noise_cov["bads"]) or not all( + b in noise_cov["bads"] for b in info["bads"] + ): + logger.info( + 'info["bads"] and noise_cov["bads"] do not match, ' + "excluding bad channels from both" + ) + + # check the compensation grade + _check_compensation_grade(forward["info"], info, "forward") + + n_chan = len(ch_names) + logger.info("Computing inverse operator with %d channels.", n_chan) + forward = pick_channels_forward(forward, ch_names, ordered=True, copy=copy) + info_idx = [info["ch_names"].index(name) for name in ch_names] + info_picked = pick_info(info, info_idx) + forward["info"]._check_consistency() + info_picked._check_consistency() + return forward, info_picked + + +def _triage_loose(src, loose, fixed="auto"): + _validate_type(loose, (str, dict, "numeric"), "loose") + _validate_type(fixed, (str, bool), "fixed") + orig_loose = loose + if isinstance(loose, str): + _check_option("loose", loose, ("auto",)) + if fixed is True: + loose = 0.0 + else: # False or auto + loose = 0.2 if src.kind == "surface" else 1.0 + src_types = set(_src_kind_dict[s["type"]] for s in src) + if not isinstance(loose, dict): + loose = float(loose) + loose = {key: loose for key in src_types} + loose_keys = set(loose.keys()) + if loose_keys != src_types: + raise ValueError( + f"loose, if dict, must have keys {sorted(src_types)} to match the " + f"source space, got {sorted(loose_keys)}" + ) + # if fixed is auto it can be ignored, if it's False it can be ignored, + # only really need to care about fixed=True + if fixed is True: + if not all(v == 0.0 for v in loose.values()): + raise ValueError( + 'When using fixed=True, loose must be 0. or "auto", ' + f"got {orig_loose}" + ) + elif fixed is False: + if any(v == 0.0 for v in loose.values()): + raise ValueError( + 'If loose==0., then fixed must be True or "auto", got False' + ) + del fixed + + for key, this_loose in loose.items(): + if key not in ("surface", "discrete") and this_loose != 1: + raise ValueError( + 'loose parameter has to be 1 or "auto" for non-surface/' + f'discrete source spaces, got loose["{key}"] = {this_loose}' + ) + if not 0 <= this_loose <= 1: + raise ValueError(f"loose ({key}) must be between 0 and 1, got {this_loose}") + return loose + + +@verbose +def compute_orient_prior(forward, loose="auto", verbose=None): + """Compute orientation prior. + + Parameters + ---------- + forward : instance of Forward + Forward operator. + %(loose)s + %(verbose)s + + Returns + ------- + orient_prior : ndarray, shape (n_sources,) + Orientation priors. + + See Also + -------- + compute_depth_prior + """ + _validate_type(forward, Forward, "forward") + n_sources = forward["sol"]["data"].shape[1] + + loose = _triage_loose(forward["src"], loose) + orient_prior = np.ones(n_sources, dtype=np.float64) + if is_fixed_orient(forward): + if any(v > 0.0 for v in loose.values()): + raise ValueError( + "loose must be 0. with forward operator " + f"with fixed orientation, got {loose}" + ) + return orient_prior + if all(v == 1.0 for v in loose.values()): + return orient_prior + # We actually need non-unity prior, compute it for each source space + # separately + if not forward["surf_ori"]: + raise ValueError( + "Forward operator is not oriented in surface " + "coordinates. loose parameter should be 1. " + f"not {loose}." + ) + start = 0 + logged = dict() + for s in forward["src"]: + this_type = _src_kind_dict[s["type"]] + use_loose = loose[this_type] + if not logged.get(this_type): + if use_loose == 1.0: + name = "free" + else: + name = "fixed" if use_loose == 0.0 else "loose" + logger.info( + f"Applying {name.ljust(5)} dipole orientations to " + f"{this_type.ljust(7)} source spaces: {use_loose}" + ) + logged[this_type] = True + stop = start + 3 * s["nuse"] + orient_prior[start:stop:3] *= use_loose + orient_prior[start + 1 : stop : 3] *= use_loose + start = stop + return orient_prior + + +def _restrict_gain_matrix(G, info): + """Restrict gain matrix entries for optimal depth weighting.""" + # Figure out which ones have been used + if len(info["chs"]) != G.shape[0]: + raise ValueError( + f'G.shape[0] ({G.shape[0]}) and length of info["chs"] ({len(info["chs"])}) ' + "do not match." + ) + for meg, eeg, kind in ( + ("grad", False, "planar"), + ("mag", False, "magnetometer or axial gradiometer"), + (False, True, "EEG"), + ): + sel = pick_types(info, meg=meg, eeg=eeg, ref_meg=False, exclude=[]) + if len(sel) > 0: + logger.info(" %d %s channels", len(sel), kind) + break + else: + warn("Could not find MEG or EEG channels to limit depth channels") + sel = slice(None) + return G[sel] + + +@verbose +def compute_depth_prior( + forward, + info, + exp=0.8, + limit=10.0, + limit_depth_chs=False, + combine_xyz="spectral", + noise_cov=None, + rank=None, + verbose=None, +): + """Compute depth prior for depth weighting. + + Parameters + ---------- + forward : instance of Forward + The forward solution. + %(info_not_none)s + exp : float + Exponent for the depth weighting, must be between 0 and 1. + limit : float | None + The upper bound on depth weighting. + Can be None to be bounded by the largest finite prior. + limit_depth_chs : bool | 'whiten' + How to deal with multiple channel types in depth weighting. + The default is True, which whitens based on the source sensitivity + of the highest-SNR channel type. See Notes for details. + + .. versionchanged:: 0.18 + Added the "whiten" option. + combine_xyz : 'spectral' | 'fro' + When a loose (or free) orientation is used, how the depth weighting + for each triplet should be calculated. + If 'spectral', use the squared spectral norm of Gk. + If 'fro', use the squared Frobenius norm of Gk. + + .. versionadded:: 0.18 + noise_cov : instance of Covariance | None + The noise covariance to use to whiten the gain matrix when + ``limit_depth_chs='whiten'``. + + .. versionadded:: 0.18 + %(rank_none)s + + .. versionadded:: 0.18 + %(verbose)s + + Returns + ------- + depth_prior : ndarray, shape (n_vertices,) + The depth prior. + + See Also + -------- + compute_orient_prior + + Notes + ----- + The defaults used by the minimum norm code and sparse solvers differ. + In particular, the values for MNE are:: + + compute_depth_prior(..., limit=10., limit_depth_chs=True, + combine_xyz='spectral') + + In sparse solvers and LCMV, the values are:: + + compute_depth_prior(..., limit=None, limit_depth_chs='whiten', + combine_xyz='fro') + + The ``limit_depth_chs`` argument can take the following values: + + * :data:`python:True` (default) + Use only grad channels in depth weighting (equivalent to MNE C + minimum-norm code). If grad channels aren't present, only mag + channels will be used (if no mag, then eeg). This makes the depth + prior dependent only on the sensor geometry (and relationship + to the sources). + * ``'whiten'`` + Compute a whitener and apply it to the gain matrix before computing + the depth prior. In this case ``noise_cov`` must not be None. + Whitening the gain matrix makes the depth prior + depend on both sensor geometry and the data of interest captured + by the noise covariance (e.g., projections, SNR). + + .. versionadded:: 0.18 + * :data:`python:False` + Use all channels. Not recommended since the depth weighting will be + biased toward whichever channel type has the largest values in + SI units (such as EEG being orders of magnitude larger than MEG). + """ + from ..cov import Covariance, compute_whitener + + _validate_type(forward, Forward, "forward") + patch_areas = forward.get("patch_areas", None) + is_fixed_ori = is_fixed_orient(forward) + G = forward["sol"]["data"] + logger.info("Creating the depth weighting matrix...") + _validate_type(noise_cov, (Covariance, None), "noise_cov", "Covariance or None") + _validate_type(limit_depth_chs, (str, bool), "limit_depth_chs") + if isinstance(limit_depth_chs, str): + if limit_depth_chs != "whiten": + raise ValueError( + f'limit_depth_chs, if str, must be "whiten", got {limit_depth_chs}' + ) + if not isinstance(noise_cov, Covariance): + raise ValueError( + 'With limit_depth_chs="whiten", noise_cov must be' + f" a Covariance, got {type(noise_cov)}" + ) + if combine_xyz is not False: # private / expert option + _check_option("combine_xyz", combine_xyz, ("fro", "spectral")) + + # If possible, pick best depth-weighting channels + if limit_depth_chs is True: + G = _restrict_gain_matrix(G, info) + elif limit_depth_chs == "whiten": + whitener, _ = compute_whitener( + noise_cov, info, pca=True, rank=rank, verbose=False + ) + G = np.dot(whitener, G) + + # Compute the gain matrix + if is_fixed_ori or combine_xyz in ("fro", False): + d = np.sum(G**2, axis=0) + if not (is_fixed_ori or combine_xyz is False): + d = d.reshape(-1, 3).sum(axis=1) + # Spherical leadfield can be zero at the center + d[d == 0.0] = np.min(d[d != 0.0]) + else: # 'spectral' + # n_pos = G.shape[1] // 3 + # The following is equivalent to this, but 4-10x faster + # d = np.zeros(n_pos) + # for k in range(n_pos): + # Gk = G[:, 3 * k:3 * (k + 1)] + # x = np.dot(Gk.T, Gk) + # d[k] = linalg.svdvals(x)[0] + G.shape = (G.shape[0], -1, 3) + d = np.linalg.norm( + np.einsum("svj,svk->vjk", G, G), # vector dot prods + ord=2, # ord=2 spectral (largest s.v.) + axis=(1, 2), + ) + G.shape = (G.shape[0], -1) + + # XXX Currently the fwd solns never have "patch_areas" defined + if patch_areas is not None: + if not is_fixed_ori and combine_xyz is False: + patch_areas = np.repeat(patch_areas, 3) + d /= patch_areas**2 + logger.info(" Patch areas taken into account in the depth weighting") + + w = 1.0 / d + if limit is not None: + ws = np.sort(w) + weight_limit = limit**2 + if limit_depth_chs is False: + # match old mne-python behavior + # we used to do ind = np.argmin(ws), but this is 0 by sort above + n_limit = 0 + limit = ws[0] * weight_limit + else: + # match C code behavior + limit = ws[-1] + n_limit = len(d) + if ws[-1] > weight_limit * ws[0]: + ind = np.where(ws > weight_limit * ws[0])[0][0] + limit = ws[ind] + n_limit = ind + + logger.info( + " limit = %d/%d = %f", n_limit + 1, len(d), np.sqrt(limit / ws[0]) + ) + scale = 1.0 / limit + logger.info(f" scale = {scale:g} exp = {exp:g}") + w = np.minimum(w / limit, 1) + depth_prior = w**exp + + if not (is_fixed_ori or combine_xyz is False): + depth_prior = np.repeat(depth_prior, 3) + + return depth_prior + + +def _stc_src_sel( + src, stc, on_missing="raise", extra=", likely due to forward calculations" +): + """Select the vertex indices of a source space using a source estimate.""" + if isinstance(stc, list): + vertices = stc + else: + assert isinstance(stc, _BaseSourceEstimate) + vertices = stc.vertices + del stc + if not len(src) == len(vertices): + raise RuntimeError( + f"Mismatch between number of source spaces ({len(src)}) and " + f"STC vertices ({len(vertices)})" + ) + src_sels, stc_sels, out_vertices = [], [], [] + src_offset = stc_offset = 0 + for s, v in zip(src, vertices): + joint_sel = np.intersect1d(s["vertno"], v) + src_sels.append(np.searchsorted(s["vertno"], joint_sel) + src_offset) + src_offset += len(s["vertno"]) + idx = np.searchsorted(v, joint_sel) + stc_sels.append(idx + stc_offset) + stc_offset += len(v) + out_vertices.append(np.array(v)[idx]) + src_sel = np.concatenate(src_sels) + stc_sel = np.concatenate(stc_sels) + assert len(src_sel) == len(stc_sel) == sum(len(v) for v in out_vertices) + + n_stc = sum(len(v) for v in vertices) + n_joint = len(src_sel) + if n_joint != n_stc: + msg = ( + f"Only {n_joint} of {n_stc} SourceEstimate " + f"{'vertex' if n_stc == 1 else 'vertices'} found in source space{extra}" + ) + _on_missing(on_missing, msg) + return src_sel, stc_sel, out_vertices + + +def _fill_measurement_info(info, fwd, sfreq, data): + """Fill the measurement info of a Raw or Evoked object.""" + sel = pick_channels(info["ch_names"], fwd["sol"]["row_names"], ordered=False) + info = pick_info(info, sel) + info["bads"] = [] + + now = time() + sec = np.floor(now) + usec = 1e6 * (now - sec) + + # this is probably correct based on what's done in meas_info.py... + with info._unlock(check_after=True): + info.update( + meas_id=fwd["info"]["meas_id"], + file_id=info["meas_id"], + meas_date=_stamp_to_dt((int(sec), int(usec))), + highpass=0.0, + lowpass=sfreq / 2.0, + sfreq=sfreq, + projs=[], + ) + + # reorder data (which is in fwd order) to match that of info + order = [fwd["sol"]["row_names"].index(name) for name in info["ch_names"]] + data = data[order] + + return info, data + + +@verbose +def _apply_forward( + fwd, stc, start=None, stop=None, on_missing="raise", use_cps=True, verbose=None +): + """Apply forward model and return data, times, ch_names.""" + _validate_type(stc, _BaseSourceEstimate, "stc", "SourceEstimate") + _validate_type(fwd, Forward, "fwd") + if isinstance(stc, _BaseVectorSourceEstimate): + vector = True + fwd = convert_forward_solution(fwd, force_fixed=False, surf_ori=False) + else: + vector = False + if not is_fixed_orient(fwd): + fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=use_cps) + + if np.all(stc.data > 0): + warn( + "Source estimate only contains currents with positive values. " + 'Use pick_ori="normal" when computing the inverse to compute ' + "currents not current magnitudes." + ) + + _check_stc_units(stc) + + src_sel, stc_sel, _ = _stc_src_sel(fwd["src"], stc, on_missing=on_missing) + gain = fwd["sol"]["data"] + stc_sel = slice(None) if len(stc_sel) == len(stc.data) else stc_sel + times = stc.times[start:stop].copy() + stc_data = stc.data[stc_sel, ..., start:stop].reshape(-1, len(times)) + del stc + if vector: + gain = gain.reshape(len(gain), gain.shape[1] // 3, 3) + gain = gain[:, src_sel].reshape(len(gain), -1) + # save some memory if possible + + logger.info("Projecting source estimate to sensor space...") + data = np.dot(gain, stc_data) + logger.info("[done]") + return data, times + + +@verbose +def apply_forward( + fwd, + stc, + info, + start=None, + stop=None, + use_cps=True, + on_missing="raise", + verbose=None, +): + """Project source space currents to sensor space using a forward operator. + + The sensor space data is computed for all channels present in fwd. Use + pick_channels_forward or pick_types_forward to restrict the solution to a + subset of channels. + + The function returns an Evoked object, which is constructed from + evoked_template. The evoked_template should be from the same MEG system on + which the original data was acquired. An exception will be raised if the + forward operator contains channels that are not present in the template. + + Parameters + ---------- + fwd : Forward + Forward operator to use. + stc : SourceEstimate + The source estimate from which the sensor space data is computed. + %(info_not_none)s + start : int, optional + Index of first time sample (index not time is seconds). + stop : int, optional + Index of first time sample not to include (index not time is seconds). + %(use_cps)s + + .. versionadded:: 0.15 + %(on_missing_fwd)s + Default is "raise". + + .. versionadded:: 0.18 + %(verbose)s + + Returns + ------- + evoked : Evoked + Evoked object with computed sensor space data. + + See Also + -------- + apply_forward_raw: Compute sensor space data and return a Raw object. + """ + _validate_type(info, Info, "info") + _validate_type(fwd, Forward, "forward") + info._check_consistency() + + # make sure evoked_template contains all channels in fwd + for ch_name in fwd["sol"]["row_names"]: + if ch_name not in info["ch_names"]: + raise ValueError( + f"Channel {ch_name} of forward operator not present in " + "evoked_template." + ) + + # project the source estimate to the sensor space + data, times = _apply_forward( + fwd, stc, start, stop, on_missing=on_missing, use_cps=use_cps + ) + + # fill the measurement info + sfreq = float(1.0 / stc.tstep) + info, data = _fill_measurement_info(info, fwd, sfreq, data) + + evoked = EvokedArray(data, info, times[0], nave=1) + + evoked._set_times(times) + evoked._update_first_last() + + return evoked + + +@verbose +def apply_forward_raw( + fwd, + stc, + info, + start=None, + stop=None, + on_missing="raise", + use_cps=True, + verbose=None, +): + """Project source space currents to sensor space using a forward operator. + + The sensor space data is computed for all channels present in fwd. Use + pick_channels_forward or pick_types_forward to restrict the solution to a + subset of channels. + + The function returns a Raw object, which is constructed using provided + info. The info object should be from the same MEG system on which the + original data was acquired. An exception will be raised if the forward + operator contains channels that are not present in the info. + + Parameters + ---------- + fwd : Forward + Forward operator to use. + stc : SourceEstimate + The source estimate from which the sensor space data is computed. + %(info_not_none)s + start : int, optional + Index of first time sample (index not time is seconds). + stop : int, optional + Index of first time sample not to include (index not time is seconds). + %(on_missing_fwd)s + Default is "raise". + + .. versionadded:: 0.18 + %(use_cps)s + + .. versionadded:: 0.21 + %(verbose)s + + Returns + ------- + raw : Raw object + Raw object with computed sensor space data. + + See Also + -------- + apply_forward: Compute sensor space data and return an Evoked object. + """ + # make sure info contains all channels in fwd + for ch_name in fwd["sol"]["row_names"]: + if ch_name not in info["ch_names"]: + raise ValueError( + f"Channel {ch_name} of forward operator not present in info." + ) + + # project the source estimate to the sensor space + data, times = _apply_forward( + fwd, stc, start, stop, on_missing=on_missing, use_cps=use_cps + ) + + sfreq = 1.0 / stc.tstep + info, data = _fill_measurement_info(info, fwd, sfreq, data) + with info._unlock(): + info["projs"] = [] + # store sensor data in Raw object using the info + raw = RawArray(data, info, first_samp=int(np.round(times[0] * sfreq))) + raw._projector = None + return raw + + +@fill_doc +def restrict_forward_to_stc(fwd, stc, on_missing="ignore"): + """Restrict forward operator to active sources in a source estimate. + + Parameters + ---------- + fwd : instance of Forward + Forward operator. + stc : instance of SourceEstimate + Source estimate. + %(on_missing_fwd)s + Default is "ignore". + + .. versionadded:: 0.18 + + Returns + ------- + fwd_out : instance of Forward + Restricted forward operator. + + See Also + -------- + restrict_forward_to_label + """ + _validate_type(on_missing, str, "on_missing") + _check_option("on_missing", on_missing, ("ignore", "warn", "raise")) + src_sel, _, _ = _stc_src_sel(fwd["src"], stc, on_missing=on_missing) + del stc + return _restrict_forward_to_src_sel(fwd, src_sel) + + +def _restrict_forward_to_src_sel(fwd, src_sel): + fwd_out = deepcopy(fwd) + # figure out the vertno we are keeping + idx_sel = np.concatenate( + [[[si] * len(s["vertno"]), s["vertno"]] for si, s in enumerate(fwd["src"])], + axis=-1, + ) + assert idx_sel.ndim == 2 and idx_sel.shape[0] == 2 + assert idx_sel.shape[1] == fwd["nsource"] + idx_sel = idx_sel[:, src_sel] + + fwd_out["source_rr"] = fwd["source_rr"][src_sel] + fwd_out["nsource"] = len(src_sel) + + if is_fixed_orient(fwd): + idx = src_sel + if fwd["sol_grad"] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd["sol_grad"] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out["source_nn"] = fwd["source_nn"][idx] + fwd_out["sol"]["data"] = fwd["sol"]["data"][:, idx] + if fwd["sol_grad"] is not None: + fwd_out["sol_grad"]["data"] = fwd["sol_grad"]["data"][:, idx_grad] + fwd_out["sol"]["ncol"] = len(idx) + + if is_fixed_orient(fwd, orig=True): + idx = src_sel + if fwd["sol_grad"] is not None: + idx_grad = (3 * src_sel[:, None] + np.arange(3)).ravel() + else: + idx = (3 * src_sel[:, None] + np.arange(3)).ravel() + if fwd["sol_grad"] is not None: + idx_grad = (9 * src_sel[:, None] + np.arange(9)).ravel() + + fwd_out["_orig_sol"] = fwd["_orig_sol"][:, idx] + if fwd["sol_grad"] is not None: + fwd_out["_orig_sol_grad"] = fwd["_orig_sol_grad"][:, idx_grad] + + vertices = [idx_sel[1][idx_sel[0] == si] for si in range(len(fwd_out["src"]))] + _set_source_space_vertices(fwd_out["src"], vertices) + + return fwd_out + + +def restrict_forward_to_label(fwd, labels): + """Restrict forward operator to labels. + + Parameters + ---------- + fwd : Forward + Forward operator. + labels : instance of Label | list + Label object or list of label objects. + + Returns + ------- + fwd_out : dict + Restricted forward operator. + + See Also + -------- + restrict_forward_to_stc + """ + vertices = [np.array([], int), np.array([], int)] + + if not isinstance(labels, list): + labels = [labels] + + # Get vertices separately of each hemisphere from all label + for label in labels: + _validate_type(label, Label, "label", "Label or list") + i = 0 if label.hemi == "lh" else 1 + vertices[i] = np.append(vertices[i], label.vertices) + # Remove duplicates and sort + vertices = [np.unique(vert_hemi) for vert_hemi in vertices] + vertices = [ + vert_hemi[np.isin(vert_hemi, s["vertno"])] + for vert_hemi, s in zip(vertices, fwd["src"]) + ] + src_sel, _, _ = _stc_src_sel(fwd["src"], vertices, on_missing="raise") + return _restrict_forward_to_src_sel(fwd, src_sel) + + +def _do_forward_solution( + subject, + meas, + fname=None, + src=None, + spacing=None, + mindist=None, + bem=None, + mri=None, + trans=None, + eeg=True, + meg=True, + fixed=False, + grad=False, + mricoord=False, + overwrite=False, + subjects_dir=None, + verbose=None, +): + """Calculate a forward solution for a subject using MNE-C routines. + + This is kept around for testing purposes. + + This function wraps to mne_do_forward_solution, so the mne + command-line tools must be installed and accessible from Python. + + Parameters + ---------- + subject : str + Name of the subject. + meas : Raw | Epochs | Evoked | str + If Raw or Epochs, a temporary evoked file will be created and + saved to a temporary directory. If str, then it should be a + filename to a file with measurement information the mne + command-line tools can understand (i.e., raw or evoked). + fname : path-like | None + Destination forward solution filename. If None, the solution + will be created in a temporary directory, loaded, and deleted. + src : str | None + Source space name. If None, the MNE default is used. + spacing : str + The spacing to use. Can be ``'#'`` for spacing in mm, ``'ico#'`` for a + recursively subdivided icosahedron, or ``'oct#'`` for a recursively + subdivided octahedron (e.g., ``spacing='ico4'``). Default is 7 mm. + mindist : float | str | None + Minimum distance measof sources from inner skull surface (in mm). + If None, the MNE default value is used. If string, ``'all'`` + indicates to include all points. + bem : str | None + Name of the BEM to use (e.g., ``"sample-5120-5120-5120"``). If None + (Default), the MNE default will be used. + mri : dict | path-like | None + The name of the trans file in FIF format. + If None, ``trans`` must not be None. + trans : dict | path-like | None + File name of the trans file in text format. + If None, ``mri`` must not be None. + eeg : bool + If True (Default), include EEG computations. + meg : bool + If True (Default), include MEG computations. + fixed : bool + If True, make a fixed-orientation forward solution (Default: + False). Note that fixed-orientation inverses can still be + created from free-orientation forward solutions. + grad : bool + If True, compute the gradient of the field with respect to the + dipole coordinates as well (Default: False). + mricoord : bool + If True, calculate in MRI coordinates (Default: False) + %(overwrite)s + %(subjects_dir)s + %(verbose)s + + See Also + -------- + make_forward_solution + + Returns + ------- + fwd : Forward + The generated forward solution. + """ + if not has_mne_c(): + raise RuntimeError("mne command line tools could not be found") + + # check for file existence + temp_dir = Path(tempfile.mkdtemp()) + if fname is None: + fname = temp_dir / "temp-fwd.fif" + _check_fname(fname, overwrite) + _validate_type(subject, "str", "subject") + + # check for meas to exist as string, or try to make evoked + _validate_type(meas, ("path-like", BaseRaw, BaseEpochs, Evoked), "meas") + if isinstance(meas, BaseRaw | BaseEpochs | Evoked): + meas_file = op.join(temp_dir, "info.fif") + write_info(meas_file, meas.info) + meas = meas_file + else: + meas = str(_check_fname(meas, overwrite="read", must_exist=True)) + + # deal with trans/mri + if mri is not None and trans is not None: + raise ValueError("trans and mri cannot both be specified") + if mri is None and trans is None: + # MNE allows this to default to a trans/mri in the subject's dir, + # but let's be safe here and force the user to pass us a trans/mri + raise ValueError("Either trans or mri must be specified") + + if trans is not None: + if isinstance(trans, dict): + trans_data = deepcopy(trans) + trans = temp_dir / "trans-trans.fif" + try: + write_trans(trans, trans_data) + except Exception: + raise OSError( + "trans was a dict, but could not be " + "written to disk as a transform file" + ) + elif isinstance(trans, str | Path | PathLike): + _check_fname(trans, "read", must_exist=True, name="trans") + trans = Path(trans) + else: + raise ValueError("trans must be a path or dict") + if mri is not None: + if isinstance(mri, dict): + mri_data = deepcopy(trans) + mri = temp_dir / "mri-trans.fif" + try: + write_trans(mri, mri_data) + except Exception: + raise OSError( + "mri was a dict, but could not be " + "written to disk as a transform file" + ) + elif isinstance(mri, str | Path | PathLike): + _check_fname(mri, "read", must_exist=True, name="mri") + mri = Path(mri) + else: + raise ValueError("mri must be a path or dict") + + # deal with meg/eeg + if not meg and not eeg: + raise ValueError("meg or eeg (or both) must be True") + + if not fname.suffix == ".fif": + raise ValueError("Forward name does not end with .fif") + path = fname.parent.absolute() + fname = fname.name + + # deal with mindist + if mindist is not None: + if isinstance(mindist, str): + if not mindist.lower() == "all": + raise ValueError('mindist, if string, must be "all"') + mindist = ["--all"] + else: + mindist = ["--mindist", f"{mindist:g}"] + + # src, spacing, bem + for element, name, kind in zip( + (src, spacing, bem), + ("src", "spacing", "bem"), + ("path-like", "str", "path-like"), + ): + if element is not None: + _validate_type(element, kind, name, f"{kind} or None") + + # put together the actual call + cmd = [ + "mne_do_forward_solution", + "--subject", + subject, + "--meas", + meas, + "--fwd", + fname, + "--destdir", + str(path), + ] + if src is not None: + cmd += ["--src", src] + if spacing is not None: + if spacing.isdigit(): + pass # spacing in mm + else: + # allow both "ico4" and "ico-4" style values + match = re.match(r"(oct|ico)-?(\d+)$", spacing) + if match is None: + raise ValueError(f"Invalid spacing parameter: {spacing!r}") + spacing = "-".join(match.groups()) + cmd += ["--spacing", spacing] + if mindist is not None: + cmd += mindist + if bem is not None: + cmd += ["--bem", bem] + if mri is not None: + cmd += ["--mri", f"{mri.absolute()}"] + if trans is not None: + cmd += ["--trans", f"{trans.absolute()}"] + if not meg: + cmd.append("--eegonly") + if not eeg: + cmd.append("--megonly") + if fixed: + cmd.append("--fixed") + if grad: + cmd.append("--grad") + if mricoord: + cmd.append("--mricoord") + if overwrite: + cmd.append("--overwrite") + + env = os.environ.copy() + subjects_dir = str(get_subjects_dir(subjects_dir, raise_error=True)) + env["SUBJECTS_DIR"] = subjects_dir + + try: + logger.info( + "Running forward solution generation command with " + f"subjects_dir {subjects_dir}" + ) + run_subprocess(cmd, env=env) + except Exception: + raise + else: + fwd = read_forward_solution(path / fname, verbose=False) + finally: + shutil.rmtree(temp_dir, ignore_errors=True) + return fwd + + +@verbose +def average_forward_solutions(fwds, weights=None, verbose=None): + """Average forward solutions. + + Parameters + ---------- + fwds : list of Forward + Forward solutions to average. Each entry (dict) should be a + forward solution. + weights : array | None + Weights to apply to each forward solution in averaging. If None, + forward solutions will be equally weighted. Weights must be + non-negative, and will be adjusted to sum to one. + %(verbose)s + + Returns + ------- + fwd : Forward + The averaged forward solution. + """ + # check for fwds being a list + _validate_type(fwds, list, "fwds") + if not len(fwds) > 0: + raise ValueError("fwds must not be empty") + + # check weights + if weights is None: + weights = np.ones(len(fwds)) + weights = np.asanyarray(weights) # in case it's a list, convert it + if not np.all(weights >= 0): + raise ValueError("weights must be non-negative") + if not len(weights) == len(fwds): + raise ValueError("weights must be None or the same length as fwds") + w_sum = np.sum(weights) + if not w_sum > 0: + raise ValueError("weights cannot all be zero") + weights /= w_sum + + # check our forward solutions + for fwd in fwds: + # check to make sure it's a forward solution + _validate_type(fwd, dict, "each entry in fwds", "dict") + # check to make sure the dict is actually a fwd + check_keys = [ + "info", + "sol_grad", + "nchan", + "src", + "source_nn", + "sol", + "source_rr", + "source_ori", + "surf_ori", + "coord_frame", + "mri_head_t", + "nsource", + ] + if not all(key in fwd for key in check_keys): + raise KeyError( + "forward solution dict does not have all standard " + "entries, cannot compute average." + ) + + # check forward solution compatibility + if any( + fwd["sol"][k] != fwds[0]["sol"][k] for fwd in fwds[1:] for k in ["nrow", "ncol"] + ): + raise ValueError("Forward solutions have incompatible dimensions") + if any( + fwd[k] != fwds[0][k] + for fwd in fwds[1:] + for k in ["source_ori", "surf_ori", "coord_frame"] + ): + raise ValueError("Forward solutions have incompatible orientations") + + # actually average them (solutions and gradients) + fwd_ave = deepcopy(fwds[0]) + fwd_ave["sol"]["data"] *= weights[0] + fwd_ave["_orig_sol"] *= weights[0] + for fwd, w in zip(fwds[1:], weights[1:]): + fwd_ave["sol"]["data"] += w * fwd["sol"]["data"] + fwd_ave["_orig_sol"] += w * fwd["_orig_sol"] + if fwd_ave["sol_grad"] is not None: + fwd_ave["sol_grad"]["data"] *= weights[0] + fwd_ave["_orig_sol_grad"] *= weights[0] + for fwd, w in zip(fwds[1:], weights[1:]): + fwd_ave["sol_grad"]["data"] += w * fwd["sol_grad"]["data"] + fwd_ave["_orig_sol_grad"] += w * fwd["_orig_sol_grad"] + return fwd_ave diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py new file mode 100644 index 0000000..c06c8dc --- /dev/null +++ b/mne/gui/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Convenience functions for opening GUIs.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/gui/__init__.pyi b/mne/gui/__init__.pyi new file mode 100644 index 0000000..086c51a --- /dev/null +++ b/mne/gui/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_GUIScraper", "coregistration"] +from ._gui import _GUIScraper, coregistration diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py new file mode 100644 index 0000000..98e3fbf --- /dev/null +++ b/mne/gui/_coreg.py @@ -0,0 +1,2091 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import inspect +import os +import os.path as op +import platform +import queue +import re +import threading +import time +from contextlib import contextmanager +from functools import partial +from pathlib import Path + +import numpy as np +from traitlets import Bool, Float, HasTraits, Instance, Unicode, observe + +from .._fiff.constants import FIFF +from .._fiff.meas_info import _empty_info, read_fiducials, read_info, write_fiducials +from .._fiff.open import dir_tree_find, fiff_open +from .._fiff.pick import pick_types +from ..bem import make_bem_solution, write_bem_solution +from ..channels import read_dig_fif +from ..coreg import ( + Coregistration, + _find_head_bem, + _is_mri_subject, + _map_fid_name_to_idx, + _mri_subject_has_bem, + bem_fname, + fid_fname, + scale_mri, +) +from ..defaults import DEFAULTS +from ..io._read_raw import _get_supported, read_raw +from ..surface import _CheckInside, _DistanceQuery +from ..transforms import ( + Transform, + _ensure_trans, + _get_trans, + _get_transforms_to_coord_frame, + read_trans, + rotation_angles, + write_trans, +) +from ..utils import ( + _check_fname, + _validate_type, + check_fname, + fill_doc, + get_subjects_dir, + logger, + verbose, +) +from ..viz._3d import ( + _plot_head_fiducials, + _plot_head_shape_points, + _plot_head_surface, + _plot_helmet, + _plot_hpi_coils, + _plot_mri_fiducials, + _plot_sensors_3d, +) +from ..viz.backends._utils import _qt_app_exec, _qt_safe_window +from ..viz.utils import safe_event + + +class _WorkerData: + def __init__(self, name, params=None): + self._name = name + self._params = params + + +def _get_subjects(sdir): + # XXX: would be nice to move this function to util + is_dir = sdir and op.isdir(sdir) + if is_dir: + dir_content = os.listdir(sdir) + subjects = [s for s in dir_content if _is_mri_subject(s, sdir)] + if len(subjects) == 0: + subjects.append("") + else: + subjects = [""] + return sorted(subjects) + + +@fill_doc +class CoregistrationUI(HasTraits): + """Class for coregistration assisted by graphical interface. + + Parameters + ---------- + info_file : None | path-like + The FIFF file with digitizer data for coregistration. + %(subject)s + %(subjects_dir)s + %(fiducials)s + head_resolution : bool + If ``True``, use a high-resolution head surface. Defaults to ``False``. + head_opacity : float + The opacity of the head surface. Defaults to ``0.8``. + hpi_coils : bool + If ``True``, display the HPI coils. Defaults to ``True``. + head_shape_points : bool + If ``True``, display the head shape points. Defaults to ``True``. + eeg_channels : bool + If ``True``, display the EEG channels. Defaults to ``True``. + meg_channels : bool + If ``True``, display the MEG channels. Defaults to ``False``. + fnirs_channels : bool + If ``True``, display the fNIRS channels. Defaults to ``True``. + orient_glyphs : bool + If ``True``, orient the sensors towards the head surface. Default to ``False``. + scale_by_distance : bool + If ``True``, scale the sensors based on their distance to the head surface. + Defaults to ``True``. + mark_inside : bool + If ``True``, mark the head shape points that are inside the head surface + with a different color. Defaults to ``True``. + sensor_opacity : float + The opacity of the sensors between ``0`` and ``1``. Defaults to ``1.``. + trans : path-like | Transform + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). + size : tuple + The dimensions (width, height) of the rendering view. The default is + ``(800, 600)``. + bgcolor : tuple of float | str + The background color as a tuple (red, green, blue) of float + values between ``0`` and ``1`` or a valid color name (i.e. ``'white'`` + or ``'w'``). Defaults to ``'grey'``. + show : bool + Display the window as soon as it is ready. Defaults to ``True``. + block : bool + Whether to halt program execution until the GUI has been closed + (``True``) or not (``False``, default). + %(fullscreen)s + The default is ``False``. + + .. versionadded:: 1.1 + %(interaction_scene)s + Defaults to ``'terrain'``. + + .. versionadded:: 1.0 + %(verbose)s + + Attributes + ---------- + coreg : mne.coreg.Coregistration + The coregistration instance used by the graphical interface. + """ + + _subject = Unicode() + _subjects_dir = Unicode() + _lock_fids = Bool() + _current_fiducial = Unicode() + _info_file = Instance(Path, default_value=Path(".")) + _orient_glyphs = Bool() + _scale_by_distance = Bool() + _mark_inside = Bool() + _hpi_coils = Bool() + _head_shape_points = Bool() + _eeg_channels = Bool() + _meg_channels = Bool() + _fnirs_channels = Bool() + _head_resolution = Bool() + _head_opacity = Float() + _helmet = Bool() + _grow_hair = Float() + _subject_to = Unicode() + _scale_mode = Unicode() + _icp_fid_match = Unicode() + + @_qt_safe_window( + splash="_renderer.figure.splash", window="_renderer.figure.plotter" + ) + @verbose + def __init__( + self, + info_file, + *, + subject=None, + subjects_dir=None, + fiducials="auto", + head_resolution=None, + head_opacity=None, + hpi_coils=None, + head_shape_points=None, + eeg_channels=None, + meg_channels=None, + fnirs_channels=None, + orient_glyphs=None, + scale_by_distance=None, + mark_inside=None, + sensor_opacity=None, + trans=None, + size=None, + bgcolor=None, + show=True, + block=False, + fullscreen=False, + interaction="terrain", + verbose=None, + ): + from ..viz.backends.renderer import _get_renderer + + def _get_default(var, val): + return var if var is not None else val + + self._actors = dict() + self._surfaces = dict() + self._widgets = dict() + self._verbose = verbose + self._plot_locked = False + self._params_locked = False + self._refresh_rate_ms = max(int(round(1000.0 / 60.0)), 1) + self._redraws_pending = set() + self._parameter_mutex = threading.Lock() + self._redraw_mutex = threading.Lock() + self._job_queue = queue.Queue() + self._parameter_queue = queue.Queue() + self._head_geo = None + self._check_inside = None + self._nearest = None + self._coord_frame = "mri" + self._mouse_no_mvt = -1 + self._to_cf_t = None + self._omit_hsp_distance = 0.0 + self._fiducials_file = None + self._trans_modified = False + self._mri_fids_modified = False + self._mri_scale_modified = False + self._accept_close_event = True + self._fid_colors = tuple( + DEFAULTS["coreg"][f"{key}_color"] for key in ("lpa", "nasion", "rpa") + ) + self._defaults = dict( + size=_get_default(size, (800, 600)), + bgcolor=_get_default(bgcolor, "grey"), + orient_glyphs=_get_default(orient_glyphs, True), + scale_by_distance=_get_default(scale_by_distance, True), + mark_inside=_get_default(mark_inside, True), + hpi_coils=_get_default(hpi_coils, True), + head_shape_points=_get_default(head_shape_points, True), + eeg_channels=_get_default(eeg_channels, True), + meg_channels=_get_default(meg_channels, False), + fnirs_channels=_get_default(fnirs_channels, True), + head_resolution=_get_default(head_resolution, True), + head_opacity=_get_default(head_opacity, 0.8), + helmet=False, + sensor_opacity=_get_default(sensor_opacity, 1.0), + fiducials=("LPA", "Nasion", "RPA"), + fiducial="LPA", + lock_fids=True, + grow_hair=0.0, + subject_to="", + scale_modes=["None", "uniform", "3-axis"], + scale_mode="None", + icp_fid_matches=("nearest", "matched"), + icp_fid_match="matched", + icp_n_iterations=20, + omit_hsp_distance=10.0, + lock_head_opacity=self._head_opacity < 1.0, + weights=dict( + lpa=1.0, + nasion=10.0, + rpa=1.0, + hsp=1.0, + eeg=1.0, + hpi=1.0, + ), + ) + + # process requirements + info = None + subjects_dir = str( + get_subjects_dir(subjects_dir=subjects_dir, raise_error=True) + ) + subject = _get_default(subject, _get_subjects(subjects_dir)[0]) + + # setup the window + splash = "Initializing coregistration GUI..." if show else False + self._renderer = _get_renderer( + size=self._defaults["size"], + bgcolor=self._defaults["bgcolor"], + splash=splash, + fullscreen=fullscreen, + ) + self._renderer._window_close_connect(self._clean) + self._renderer._window_close_connect(self._close_callback, after=False) + self._renderer.set_interaction(interaction) + + # coregistration model setup + self._immediate_redraw = self._renderer._kind != "qt" + self._info = info + self._fiducials = fiducials + self.coreg = Coregistration( + info=self._info, + subject=subject, + subjects_dir=subjects_dir, + fiducials=fiducials, + on_defects="ignore", # safe due to interactive visual inspection + ) + fid_accurate = self.coreg._fid_accurate + for fid in self._defaults["weights"].keys(): + setattr(self, f"_{fid}_weight", self._defaults["weights"][fid]) + + # set main traits + self._set_head_opacity(self._defaults["head_opacity"]) + self._old_head_opacity = self._head_opacity + self._set_subjects_dir(subjects_dir) + self._set_subject(subject) + self._set_info_file(info_file) + self._set_orient_glyphs(self._defaults["orient_glyphs"]) + self._set_scale_by_distance(self._defaults["scale_by_distance"]) + self._set_mark_inside(self._defaults["mark_inside"]) + self._set_hpi_coils(self._defaults["hpi_coils"]) + self._set_head_shape_points(self._defaults["head_shape_points"]) + self._set_eeg_channels(self._defaults["eeg_channels"]) + self._set_meg_channels(self._defaults["meg_channels"]) + self._set_fnirs_channels(self._defaults["fnirs_channels"]) + self._set_head_resolution(self._defaults["head_resolution"]) + self._set_helmet(self._defaults["helmet"]) + self._set_grow_hair(self._defaults["grow_hair"]) + self._set_omit_hsp_distance(self._defaults["omit_hsp_distance"]) + self._set_icp_n_iterations(self._defaults["icp_n_iterations"]) + self._set_icp_fid_match(self._defaults["icp_fid_match"]) + + # configure UI + self._reset_fitting_parameters() + self._configure_dialogs() + self._configure_status_bar() + self._configure_dock() + self._configure_picking() + self._configure_legend() + + # once the docks are initialized + self._set_current_fiducial(self._defaults["fiducial"]) + self._set_scale_mode(self._defaults["scale_mode"]) + self._set_subject_to(self._defaults["subject_to"]) + if trans is not None: + self._load_trans(trans) + self._redraw() # we need the elements to be present now + + if fid_accurate: + assert self.coreg._fid_filename is not None + # _set_fiducials_file() calls _update_fiducials_label() + # internally + self._set_fiducials_file(self.coreg._fid_filename) + else: + self._set_head_resolution("high") + self._forward_widget_command("high_res_head", "set_value", True) + self._set_lock_fids(True) # hack to make the dig disappear + self._update_fiducials_label() + self._update_fiducials() + + self._set_lock_fids(fid_accurate) + + # configure worker + self._configure_worker() + + # must be done last + if show: + self._renderer.show() + # update the view once shown + views = { + True: dict(azimuth=90, elevation=90), # front + False: dict(azimuth=180, elevation=90), + } # left + self._renderer.set_camera(distance="auto", **views[self._lock_fids]) + self._redraw() + # XXX: internal plotter/renderer should not be exposed + if not self._immediate_redraw: + self._renderer.plotter.add_callback(self._redraw, self._refresh_rate_ms) + self._renderer.plotter.show_axes() + # initialization does not count as modification by the user + self._trans_modified = False + self._mri_fids_modified = False + self._mri_scale_modified = False + if block and self._renderer._kind != "notebook": + _qt_app_exec(self._renderer.figure.store["app"]) + + def _set_subjects_dir(self, subjects_dir): + if subjects_dir is None or not subjects_dir: + return + try: + subjects_dir = str( + _check_fname( + subjects_dir, + overwrite="read", + must_exist=True, + need_dir=True, + ) + ) + subjects = _get_subjects(subjects_dir) + low_res_path = _find_head_bem(subjects[0], subjects_dir, high_res=False) + high_res_path = _find_head_bem(subjects[0], subjects_dir, high_res=True) + valid = low_res_path is not None or high_res_path is not None + except Exception: + valid = False + if valid: + style = dict(border="initial") + self._subjects_dir = subjects_dir + else: + style = dict(border="2px solid #ff0000") + self._forward_widget_command("subjects_dir_field", "set_style", style) + + def _set_subject(self, subject): + self._subject = subject + + def _set_lock_fids(self, state): + self._lock_fids = bool(state) + + def _set_fiducials_file(self, fname): + if fname is None: + fids = "auto" + else: + fname = str( + _check_fname( + fname, + overwrite="read", + must_exist=True, + need_dir=False, + ) + ) + fids, _ = read_fiducials(fname) + + self._fiducials_file = fname + self.coreg._setup_fiducials(fids) + self._update_distance_estimation() + self._update_fiducials_label() + self._update_fiducials() + self._reset(keep_trans=True) + + if fname is None: + self._set_lock_fids(False) + self._forward_widget_command("reload_mri_fids", "set_enabled", False) + else: + self._set_lock_fids(True) + self._forward_widget_command("reload_mri_fids", "set_enabled", True) + self._display_message(f"Loading MRI fiducials from {fname}... Done!") + + def _set_current_fiducial(self, fid): + self._current_fiducial = fid.lower() + + def _set_info_file(self, fname): + if fname is None: + return + + # info file can be anything supported by read_raw + supported = _get_supported() + try: + check_fname( + fname, + "info", + tuple(supported), + endings_err=tuple(supported), + ) + fname = Path(fname) + # ctf ds `files` are actually directories + if fname.suffix == ".ds": + info_file = _check_fname( + fname, overwrite="read", must_exist=True, need_dir=True + ) + else: + info_file = _check_fname( + fname, overwrite="read", must_exist=True, need_dir=False + ) + valid = True + except OSError: + valid = False + if valid: + style = dict(border="initial") + self._info_file = info_file + else: + style = dict(border="2px solid #ff0000") + self._forward_widget_command("info_file_field", "set_style", style) + + def _set_omit_hsp_distance(self, distance): + self._omit_hsp_distance = distance + + def _set_orient_glyphs(self, state): + self._orient_glyphs = bool(state) + + def _set_scale_by_distance(self, state): + self._scale_by_distance = bool(state) + + def _set_mark_inside(self, state): + self._mark_inside = bool(state) + + def _set_hpi_coils(self, state): + self._hpi_coils = bool(state) + + def _set_head_shape_points(self, state): + self._head_shape_points = bool(state) + + def _set_eeg_channels(self, state): + self._eeg_channels = bool(state) + + def _set_meg_channels(self, state): + self._meg_channels = bool(state) + + def _set_fnirs_channels(self, state): + self._fnirs_channels = bool(state) + + def _set_head_resolution(self, state): + self._head_resolution = bool(state) + + def _set_head_opacity(self, value): + self._head_opacity = value + + def _set_helmet(self, state): + self._helmet = bool(state) + + def _set_grow_hair(self, value): + self._grow_hair = value + + def _set_subject_to(self, value): + self._subject_to = value + self._forward_widget_command("save_subject", "set_enabled", len(value) > 0) + if self._check_subject_exists(): + style = dict(border="2px solid #ff0000") + else: + style = dict(border="initial") + self._forward_widget_command("subject_to", "set_style", style) + + def _set_scale_mode(self, mode): + self._scale_mode = mode + + def _set_fiducial(self, value, coord): + self._mri_fids_modified = True + fid = self._current_fiducial + fid_idx = _map_fid_name_to_idx(name=fid) + + coords = ["X", "Y", "Z"] + coord_idx = coords.index(coord) + + self.coreg.fiducials.dig[fid_idx]["r"][coord_idx] = value / 1e3 + self._update_plot("mri_fids") + + def _set_parameter(self, value, mode_name, coord, plot_locked=False): + if mode_name == "scale": + self._mri_scale_modified = True + else: + self._trans_modified = True + if self._params_locked: + return + if mode_name == "scale" and self._scale_mode == "uniform": + with self._lock(params=True): + self._forward_widget_command(["sY", "sZ"], "set_value", value) + with self._parameter_mutex: + self._set_parameter_safe(value, mode_name, coord) + if not plot_locked: + self._update_plot("sensors") + + def _set_parameter_safe(self, value, mode_name, coord): + params = dict( + rotation=self.coreg._rotation, + translation=self.coreg._translation, + scale=self.coreg._scale, + ) + idx = ["X", "Y", "Z"].index(coord) + if mode_name == "rotation": + params[mode_name][idx] = np.deg2rad(value) + elif mode_name == "translation": + params[mode_name][idx] = value / 1e3 + else: + assert mode_name == "scale" + if self._scale_mode == "uniform": + params[mode_name][:] = value / 1e2 + else: + params[mode_name][idx] = value / 1e2 + self._update_plot("head") + self.coreg._update_params( + rot=params["rotation"], + tra=params["translation"], + sca=params["scale"], + ) + + def _set_icp_n_iterations(self, n_iterations): + self._icp_n_iterations = n_iterations + + def _set_icp_fid_match(self, method): + self._icp_fid_match = method + + def _set_point_weight(self, weight, point): + funcs = { + "hpi": "_set_hpi_coils", + "hsp": "_set_head_shape_points", + "eeg": "_set_eeg_channels", + "meg": "_set_meg_channels", + "fnirs": "_set_fnirs_channels", + } + if point in funcs.keys(): + getattr(self, funcs[point])(weight > 0) + setattr(self, f"_{point}_weight", weight) + setattr(self.coreg, f"_{point}_weight", weight) + self._update_distance_estimation() + + @observe("_subjects_dir") + def _subjects_dir_changed(self, change=None): + # XXX: add coreg.set_subjects_dir + self.coreg._subjects_dir = self._subjects_dir + subjects = _get_subjects(self._subjects_dir) + + if self._subject not in subjects: # Just pick the first available one + self._subject = subjects[0] + + self._reset() + + @observe("_subject") + def _subject_changed(self, change=None): + # XXX: add coreg.set_subject() + self.coreg._subject = self._subject + self.coreg._setup_bem() + self.coreg._setup_fiducials(self._fiducials) + self._reset() + + default_fid_fname = fid_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + if Path(default_fid_fname).exists(): + fname = default_fid_fname + else: + fname = None + + self._set_fiducials_file(fname) + self._reset_fiducials() + + @observe("_lock_fids") + def _lock_fids_changed(self, change=None): + locked_widgets = [ + # MRI fiducials + "save_mri_fids", + # View options + "helmet", + "meg", + "head_opacity", + "high_res_head", + # Digitization source + "info_file", + "grow_hair", + "omit_distance", + "omit", + "reset_omit", + # Scaling + "scaling_mode", + "sX", + "sY", + "sZ", + # Transformation + "tX", + "tY", + "tZ", + "rX", + "rY", + "rZ", + # Fitting buttons + "fit_fiducials", + "fit_icp", + # Transformation I/O + "save_trans", + "load_trans", + "reset_trans", + # ICP + "icp_n_iterations", + "icp_fid_match", + "reset_fitting_options", + # Weights + "hsp_weight", + "eeg_weight", + "hpi_weight", + "lpa_weight", + "nasion_weight", + "rpa_weight", + ] + fits_widgets = ["fits_fiducials", "fits_icp"] + fid_widgets = ["fid_X", "fid_Y", "fid_Z", "fids_file", "fids"] + if self._lock_fids: + self._forward_widget_command(locked_widgets, "set_enabled", True) + self._forward_widget_command( + "head_opacity", "set_value", self._old_head_opacity + ) + self._scale_mode_changed() + self._display_message() + self._update_distance_estimation() + else: + self._old_head_opacity = self._head_opacity + self._forward_widget_command("head_opacity", "set_value", 1.0) + self._forward_widget_command(locked_widgets, "set_enabled", False) + self._forward_widget_command(fits_widgets, "set_enabled", False) + self._display_message( + f"Placing MRI fiducials - {self._current_fiducial.upper()}" + ) + + self._set_sensors_visibility(self._lock_fids) + self._forward_widget_command("lock_fids", "set_value", self._lock_fids) + self._forward_widget_command(fid_widgets, "set_enabled", not self._lock_fids) + + @observe("_current_fiducial") + def _current_fiducial_changed(self, change=None): + self._update_fiducials() + self._follow_fiducial_view() + if not self._lock_fids: + self._display_message( + f"Placing MRI fiducials - {self._current_fiducial.upper()}" + ) + + @observe("_info_file") + def _info_file_changed(self, change=None): + if not self._info_file: + return + elif self._info_file.name.endswith((".fif", ".fif.gz")): + fid, tree, _ = fiff_open(self._info_file) + fid.close() + if len(dir_tree_find(tree, FIFF.FIFFB_MEAS_INFO)) > 0: + self._info = read_info(self._info_file, verbose=False) + elif len(dir_tree_find(tree, FIFF.FIFFB_ISOTRAK)) > 0: + self._info = _empty_info(1) + self._info["dig"] = read_dig_fif(fname=self._info_file).dig + self._info._unlocked = False + else: + self._info = read_raw(self._info_file).info + # XXX: add coreg.set_info() + self.coreg._info = self._info + self.coreg._setup_digs() + self._reset() + + @observe("_orient_glyphs") + def _orient_glyphs_changed(self, change=None): + self._update_plot(["hpi", "hsp", "sensors"]) + + @observe("_scale_by_distance") + def _scale_by_distance_changed(self, change=None): + self._update_plot(["hpi", "hsp", "sensors"]) + + @observe("_mark_inside") + def _mark_inside_changed(self, change=None): + self._update_plot("hsp") + + @observe("_hpi_coils") + def _hpi_coils_changed(self, change=None): + self._update_plot("hpi") + + @observe("_head_shape_points") + def _head_shape_point_changed(self, change=None): + self._update_plot("hsp") + + @observe("_eeg_channels") + def _eeg_channels_changed(self, change=None): + self._update_plot("sensors") + + @observe("_meg_channels") + def _meg_channels_changed(self, change=None): + self._update_plot("sensors") + + @observe("_fnirs_channels") + def _fnirs_channels_changed(self, change=None): + self._update_plot("sensors") + + @observe("_head_resolution") + def _head_resolution_changed(self, change=None): + self._update_plot(["head", "hsp"]) + + @observe("_head_opacity") + def _head_opacity_changed(self, change=None): + if "head" in self._actors: + self._actors["head"].GetProperty().SetOpacity(self._head_opacity) + self._renderer._update() + + @observe("_helmet") + def _helmet_changed(self, change=None): + self._update_plot("helmet") + + @observe("_grow_hair") + def _grow_hair_changed(self, change=None): + self.coreg.set_grow_hair(self._grow_hair) + self._update_plot("head") + self._update_plot("hsp") # inside/outside could change + + @observe("_scale_mode") + def _scale_mode_changed(self, change=None): + locked_widgets = ["sX", "sY", "sZ", "fits_icp", "subject_to"] + mode = None if self._scale_mode == "None" else self._scale_mode + self.coreg.set_scale_mode(mode) + if self._lock_fids: + self._forward_widget_command( + locked_widgets, "set_enabled", mode is not None + ) + self._forward_widget_command( + "fits_fiducials", "set_enabled", mode not in (None, "3-axis") + ) + if self._scale_mode == "uniform": + self._forward_widget_command(["sY", "sZ"], "set_enabled", False) + + @observe("_icp_fid_match") + def _icp_fid_match_changed(self, change=None): + self.coreg.set_fid_match(self._icp_fid_match) + + def _run_worker(self, queue, jobs): + while True: + data = queue.get() + func = jobs[data._name] + if data._params is not None: + func(**data._params) + else: + func() + queue.task_done() + + def _configure_dialogs(self): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + + for name, buttons in zip( + ["overwrite_subject", "overwrite_subject_exit"], + [["Yes", "No"], ["Yes", "Discard", "Cancel"]], + ): + self._widgets[name] = self._renderer._dialog_create( + title="CoregistrationUI", + text="The name of the output subject used to " + "save the scaled anatomy already exists.", + info_text="Do you want to overwrite?", + callback=self._overwrite_subject_callback, + buttons=buttons, + modal=not MNE_3D_BACKEND_TESTING, + ) + + def _configure_worker(self): + work_plan = { + "_job_queue": dict(save_subject=self._save_subject), + "_parameter_queue": dict(set_parameter=self._set_parameter), + } + for queue_name, jobs in work_plan.items(): + t = threading.Thread( + target=partial( + self._run_worker, + queue=getattr(self, queue_name), + jobs=jobs, + ) + ) + t.daemon = True + t.start() + + def _configure_picking(self): + self._renderer._update_picking_callback( + self._on_mouse_move, + self._on_button_press, + self._on_button_release, + self._on_pick, + ) + + def _configure_legend(self): + colors = [ + np.array(DEFAULTS["coreg"][f"{fid.lower()}_color"]).astype(float) + for fid in self._defaults["fiducials"] + ] + labels = list(zip(self._defaults["fiducials"], colors)) + mri_fids_legend_actor = self._renderer.legend(labels=labels) + self._update_actor("mri_fids_legend", mri_fids_legend_actor) + + @safe_event + @verbose + def _redraw(self, *, verbose=None): + if not self._redraws_pending: + return + draw_map = dict( + head=self._add_head_surface, + mri_fids=self._add_mri_fiducials, + hsp=self._add_head_shape_points, + hpi=self._add_hpi_coils, + sensors=self._add_channels, + head_fids=self._add_head_fiducials, + helmet=self._add_helmet, + ) + with self._redraw_mutex: + # We need at least "head" before "hsp", because the grow_hair param + # for head sets the rr that are used for inside/outside hsp + redraws_ordered = sorted( + self._redraws_pending, key=lambda key: list(draw_map).index(key) + ) + logger.debug(f"Redrawing {redraws_ordered}") + for ki, key in enumerate(redraws_ordered): + logger.debug(f"{ki}. Drawing {repr(key)}") + draw_map[key]() + self._redraws_pending.clear() + self._renderer._update() + # necessary for MacOS + if platform.system() == "Darwin": + self._renderer._process_events() + + def _on_mouse_move(self, vtk_picker, event): + if self._mouse_no_mvt: + self._mouse_no_mvt -= 1 + + def _on_button_press(self, vtk_picker, event): + self._mouse_no_mvt = 2 + + def _on_button_release(self, vtk_picker, event): + if self._mouse_no_mvt > 0: + x, y = vtk_picker.GetEventPosition() + # XXX: internal plotter/renderer should not be exposed + plotter = self._renderer.figure.plotter + picked_renderer = self._renderer.figure.plotter.renderer + # trigger the pick + plotter.picker.Pick(x, y, 0, picked_renderer) + self._mouse_no_mvt = 0 + + def _on_pick(self, vtk_picker, event): + if self._lock_fids: + return + # XXX: taken from Brain, can be refactored + cell_id = vtk_picker.GetCellId() + mesh = vtk_picker.GetDataSet() + if mesh is None or cell_id == -1 or not self._mouse_no_mvt: + return + if not getattr(mesh, "_picking_target", False): + return + pos = np.array(vtk_picker.GetPickPosition()) + vtk_cell = mesh.GetCell(cell_id) + cell = [ + vtk_cell.GetPointId(point_id) + for point_id in range(vtk_cell.GetNumberOfPoints()) + ] + vertices = mesh.points[cell] + idx = np.argmin(abs(vertices - pos), axis=0) + vertex_id = cell[idx[0]] + + fiducials = [s.lower() for s in self._defaults["fiducials"]] + idx = fiducials.index(self._current_fiducial.lower()) + # XXX: add coreg.set_fids + self.coreg._fid_points[idx] = self._surfaces["head"].points[vertex_id] + self.coreg._reset_fiducials() + self._update_fiducials() + self._update_plot("mri_fids") + + def _reset_fitting_parameters(self): + self._forward_widget_command( + "icp_n_iterations", "set_value", self._defaults["icp_n_iterations"] + ) + self._forward_widget_command( + "icp_fid_match", "set_value", self._defaults["icp_fid_match"] + ) + weights_widgets = [f"{w}_weight" for w in self._defaults["weights"].keys()] + self._forward_widget_command( + weights_widgets, "set_value", list(self._defaults["weights"].values()) + ) + + def _reset_fiducials(self): + self._set_current_fiducial(self._defaults["fiducial"]) + + def _omit_hsp(self): + self.coreg.omit_head_shape_points(self._omit_hsp_distance / 1e3) + n_omitted = np.sum(~self.coreg._extra_points_filter) + n_remaining = len(self.coreg._dig_dict["hsp"]) - n_omitted + self._update_plot("hsp") + self._update_distance_estimation() + self._display_message( + f"{n_omitted} head shape points omitted, {n_remaining} remaining." + ) + + def _reset_omit_hsp_filter(self): + self.coreg._extra_points_filter = None + self.coreg._update_params(force_update=True) + self._update_plot("hsp") + self._update_distance_estimation() + n_total = len(self.coreg._dig_dict["hsp"]) + self._display_message( + f"No head shape point is omitted, the total is {n_total}." + ) + + @verbose + def _update_plot(self, changes="all", verbose=None): + # Update list of things that need to be updated/plotted (and maybe + # draw them immediately) + try: + fun_name = inspect.currentframe().f_back.f_back.f_code.co_name + except Exception: # just in case one of these attrs is missing + fun_name = "unknown" + logger.debug(f"Updating plots based on {fun_name}: {repr(changes)}") + if self._plot_locked: + return + if self._info is None: + changes = ["head", "mri_fids"] + self._to_cf_t = dict(mri=dict(trans=np.eye(4)), head=None) + else: + self._to_cf_t = _get_transforms_to_coord_frame( + self._info, self.coreg.trans, coord_frame=self._coord_frame + ) + all_keys = ( + "head", + "mri_fids", # MRI first + "hsp", + "hpi", + "sensors", + "head_fids", # then dig + "helmet", + ) + if changes == "all": + changes = list(all_keys) + elif changes == "sensors": + changes = all_keys[2:] # omit MRI ones + elif isinstance(changes, str): + changes = [changes] + changes = set(changes) + # ideally we would maybe have this in: + # with self._redraw_mutex: + # it would reduce "jerkiness" of the updates, but this should at least + # work okay + bad = changes.difference(set(all_keys)) + assert len(bad) == 0, f"Unknown changes: {bad}" + self._redraws_pending.update(changes) + if self._immediate_redraw: + self._redraw() + + @contextmanager + def _lock(self, plot=False, params=False, scale_mode=False, fitting=False): + """Which part of the UI to temporarily disable.""" + if plot: + old_plot_locked = self._plot_locked + self._plot_locked = True + if params: + old_params_locked = self._params_locked + self._params_locked = True + if scale_mode: + old_scale_mode = self.coreg._scale_mode + self.coreg._scale_mode = None + if fitting: + widgets = [ + "sX", + "sY", + "sZ", + "tX", + "tY", + "tZ", + "rX", + "rY", + "rZ", + "fit_icp", + "fit_fiducials", + "fits_icp", + "fits_fiducials", + ] + states = [ + self._forward_widget_command( + w, "is_enabled", None, input_value=False, output_value=True + ) + for w in widgets + ] + self._forward_widget_command(widgets, "set_enabled", False) + try: + yield + finally: + if plot: + self._plot_locked = old_plot_locked + if params: + self._params_locked = old_params_locked + if scale_mode: + self.coreg._scale_mode = old_scale_mode + if fitting: + for idx, w in enumerate(widgets): + self._forward_widget_command(w, "set_enabled", states[idx]) + + def _display_message(self, msg=""): + self._forward_widget_command("status_message", "set_value", msg) + self._forward_widget_command("status_message", "show", None, input_value=False) + self._forward_widget_command( + "status_message", "update", None, input_value=False + ) + if msg: + logger.info(msg) + + def _follow_fiducial_view(self): + fid = self._current_fiducial.lower() + view = dict(lpa="left", rpa="right", nasion="front") + kwargs = dict(front=(90.0, 90.0), left=(180, 90), right=(0.0, 90)) + kwargs = dict(zip(("azimuth", "elevation"), kwargs[view[fid]])) + if not self._lock_fids: + self._renderer.set_camera(distance="auto", **kwargs) + + def _update_fiducials(self): + fid = self._current_fiducial + if not fid: + return + + idx = _map_fid_name_to_idx(name=fid) + val = self.coreg.fiducials.dig[idx]["r"] * 1e3 + + with self._lock(plot=True): + self._forward_widget_command(["fid_X", "fid_Y", "fid_Z"], "set_value", val) + + def _update_distance_estimation(self): + value = ( + self.coreg._get_fiducials_distance_str() + + "\n" + + self.coreg._get_point_distance_str() + ) + dists = self.coreg.compute_dig_mri_distances() * 1e3 + if self._hsp_weight > 0: + if len(dists) == 0: + value += "\nNo head shape points found." + else: + value += ( + "\nHSP <-> MRI (mean/min/max): " + f"{np.mean(dists):.2f} " + f"/ {np.min(dists):.2f} / {np.max(dists):.2f} mm" + ) + self._forward_widget_command("fit_label", "set_value", value) + + def _update_parameters(self): + with self._lock(plot=True, params=True): + # rotation + deg = np.rad2deg(self.coreg._rotation) + logger.debug(f" Rotation: {deg}") + self._forward_widget_command(["rX", "rY", "rZ"], "set_value", deg) + # translation + mm = self.coreg._translation * 1e3 + logger.debug(f" Translation: {mm}") + self._forward_widget_command(["tX", "tY", "tZ"], "set_value", mm) + # scale + sc = self.coreg._scale * 1e2 + logger.debug(f" Scale: {sc}") + self._forward_widget_command(["sX", "sY", "sZ"], "set_value", sc) + + def _reset(self, keep_trans=False): + """Refresh the scene, and optionally reset transformation & scaling. + + Parameters + ---------- + keep_trans : bool + Whether to retain translation, rotation, and scaling; or reset them + to their default values (no translation, no rotation, no scaling). + """ + if not keep_trans: + self.coreg.set_scale(self.coreg._default_parameters[6:9]) + self.coreg.set_rotation(self.coreg._default_parameters[:3]) + self.coreg.set_translation(self.coreg._default_parameters[3:6]) + self._update_plot() + self._update_parameters() + self._update_distance_estimation() + + def _forward_widget_command( + self, names, command, value, input_value=True, output_value=False + ): + """Invoke a method of one or more widgets if the widgets exist. + + Parameters + ---------- + names : str | array-like of str + The widget names to operate on. + command : str + The method to invoke. + value : object | array-like + The value(s) to pass to the method. + input_value : bool + Whether the ``command`` accepts a ``value``. If ``False``, no + ``value`` will be passed to ``command``. + output_value : bool + Whether to return the return value of ``command``. + + Returns + ------- + ret : object | None + ``None`` if ``output_value`` is ``False``, and the return value of + ``command`` otherwise. + """ + _validate_type(item=names, types=(str, list), item_name="names") + if isinstance(names, str): + names = [names] + + if not isinstance(value, str | float | int | dict | type(None)): + value = list(value) + assert len(names) == len(value) + + for idx, name in enumerate(names): + val = value[idx] if isinstance(value, list) else value + if name in self._widgets and self._widgets[name] is not None: + if input_value: + ret = getattr(self._widgets[name], command)(val) + else: + ret = getattr(self._widgets[name], command)() + if output_value: + return ret + + def _set_sensors_visibility(self, state): + sensors = [ + "head_fiducials", + "hpi_coils", + "head_shape_points", + "sensors", + "helmet", + ] + for sensor in sensors: + if sensor in self._actors and self._actors[sensor] is not None: + actors = self._actors[sensor] + actors = actors if isinstance(actors, list) else [actors] + for actor in actors: + actor.SetVisibility(state) + self._renderer._update() + + def _update_actor(self, actor_name, actor): + # XXX: internal plotter/renderer should not be exposed + # Work around PyVista sequential update bug with iterable until > 0.42.3 is req + # https://github.com/pyvista/pyvista/pull/5046 + actors = self._actors.get(actor_name) or [] # convert None to list + if not isinstance(actors, list): + actors = [actors] + for this_actor in actors: + self._renderer.plotter.remove_actor(this_actor, render=False) + self._actors[actor_name] = actor + + def _add_mri_fiducials(self): + mri_fids_actors = _plot_mri_fiducials( + self._renderer, + self.coreg._fid_points, + self._subjects_dir, + self._subject, + self._to_cf_t, + self._fid_colors, + ) + # disable picking on the markers + for actor in mri_fids_actors: + actor.SetPickable(False) + self._update_actor("mri_fiducials", mri_fids_actors) + + def _add_head_fiducials(self): + head_fids_actors = _plot_head_fiducials( + self._renderer, self._info, self._to_cf_t, self._fid_colors + ) + self._update_actor("head_fiducials", head_fids_actors) + + def _add_hpi_coils(self): + if self._hpi_coils: + hpi_actors = _plot_hpi_coils( + self._renderer, + self._info, + self._to_cf_t, + opacity=self._defaults["sensor_opacity"], + scale=DEFAULTS["coreg"]["extra_scale"], + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + surf=self._head_geo, + check_inside=self._check_inside, + nearest=self._nearest, + ) + else: + hpi_actors = None + self._update_actor("hpi_coils", hpi_actors) + + def _add_head_shape_points(self): + if self._head_shape_points: + hsp_actors = _plot_head_shape_points( + self._renderer, + self._info, + self._to_cf_t, + opacity=self._defaults["sensor_opacity"], + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + mark_inside=self._mark_inside, + surf=self._head_geo, + mask=self.coreg._extra_points_filter, + check_inside=self._check_inside, + nearest=self._nearest, + ) + else: + hsp_actors = None + self._update_actor("head_shape_points", hsp_actors) + + def _add_channels(self): + plot_types = dict(eeg=False, meg=False, fnirs=False) + if self._eeg_channels: + plot_types["eeg"] = ["original"] + if self._meg_channels: + plot_types["meg"] = ["sensors"] + if self._fnirs_channels: + plot_types["fnirs"] = ["sources", "detectors"] + sensor_alpha = dict( + eeg=self._defaults["sensor_opacity"], + fnirs=self._defaults["sensor_opacity"], + meg=0.25, + ) + picks = pick_types( + self._info, + ref_meg=False, + meg=True, + eeg=True, + fnirs=True, + exclude=(), + ) + these_actors = _plot_sensors_3d( + self._renderer, + self._info, + self._to_cf_t, + picks=picks, + warn_meg=False, + head_surf=self._head_geo, + units="m", + sensor_alpha=sensor_alpha, + orient_glyphs=self._orient_glyphs, + scale_by_distance=self._scale_by_distance, + surf=self._head_geo, + check_inside=self._check_inside, + nearest=self._nearest, + **plot_types, + ) + sens_actors = sum((these_actors or {}).values(), list()) + self._update_actor("sensors", sens_actors) + + def _add_head_surface(self): + bem = None + if self._head_resolution: + surface = "head-dense" + key = "high" + else: + surface = "head" + key = "low" + try: + head_actor, head_surf, _ = _plot_head_surface( + self._renderer, + surface, + self._subject, + self._subjects_dir, + bem, + self._coord_frame, + self._to_cf_t, + alpha=self._head_opacity, + ) + except OSError: + head_actor, head_surf, _ = _plot_head_surface( + self._renderer, + "head", + self._subject, + self._subjects_dir, + bem, + self._coord_frame, + self._to_cf_t, + alpha=self._head_opacity, + ) + key = "low" + self._update_actor("head", head_actor) + # mark head surface mesh to restrict picking + head_surf._picking_target = True + # We need to use _get_processed_mri_points to incorporate grow_hair + rr = self.coreg._get_processed_mri_points(key) * self.coreg._scale.T + head_surf.points = rr + head_surf.compute_normals() + self._surfaces["head"] = head_surf + tris = self._surfaces["head"].faces.reshape(-1, 4)[:, 1:] + assert tris.ndim == 2 and tris.shape[1] == 3, tris.shape + nn = self._surfaces["head"].point_normals + assert nn.shape == (len(rr), 3), nn.shape + self._head_geo = dict(rr=rr, tris=tris, nn=nn) + self._check_inside = _CheckInside(head_surf, mode="pyvista") + self._nearest = _DistanceQuery(rr) + + def _add_helmet(self): + if self._helmet: + logger.debug("Drawing helmet") + head_mri_t = _get_trans(self.coreg.trans, "head", "mri")[0] + helmet_actor, _, _ = _plot_helmet( + self._renderer, self._info, self._to_cf_t, head_mri_t, self._coord_frame + ) + else: + helmet_actor = None + self._update_actor("helmet", helmet_actor) + + def _fit_fiducials(self): + with self._lock(scale_mode=True): + self._fits_fiducials() + + def _fits_fiducials(self): + with self._lock(params=True, fitting=True): + start = time.time() + self.coreg.fit_fiducials( + lpa_weight=self._lpa_weight, + nasion_weight=self._nasion_weight, + rpa_weight=self._rpa_weight, + verbose=self._verbose, + ) + end = time.time() + self._display_message( + f"Fitting fiducials finished in {end - start:.2f} seconds." + ) + self._update_plot("sensors") + self._update_parameters() + self._update_distance_estimation() + + def _fit_icp(self): + with self._lock(scale_mode=True): + self._fit_icp_real(update_head=False) + + def _fits_icp(self): + self._fit_icp_real(update_head=True) + + def _fit_icp_real(self, *, update_head): + with self._lock(params=True, fitting=True): + self._current_icp_iterations = 0 + updates = ["hsp", "hpi", "sensors", "head_fids", "helmet"] + if update_head: + updates.insert(0, "head") + + def callback(iteration, n_iterations): + self._display_message(f"Fitting ICP - iteration {iteration + 1}") + self._update_plot(updates) + self._current_icp_iterations += 1 + self._update_distance_estimation() + self._update_parameters() + self._renderer._process_events() # allow a draw or cancel + + start = time.time() + self.coreg.fit_icp( + n_iterations=self._icp_n_iterations, + lpa_weight=self._lpa_weight, + nasion_weight=self._nasion_weight, + rpa_weight=self._rpa_weight, + callback=callback, + verbose=self._verbose, + ) + end = time.time() + self._display_message() + self._display_message( + f"Fitting ICP finished in {end - start:.2f} seconds and " + f"{self._current_icp_iterations} iterations." + ) + del self._current_icp_iterations + + def _task_save_subject(self): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + + if MNE_3D_BACKEND_TESTING: + self._save_subject() + else: + self._job_queue.put(_WorkerData("save_subject", None)) + + def _task_set_parameter(self, value, mode_name, coord): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + + if MNE_3D_BACKEND_TESTING: + self._set_parameter(value, mode_name, coord, self._plot_locked) + else: + self._parameter_queue.put( + _WorkerData( + "set_parameter", + dict( + value=value, + mode_name=mode_name, + coord=coord, + plot_locked=self._plot_locked, + ), + ) + ) + + def _overwrite_subject_callback(self, button_name): + if button_name == "Yes": + self._save_subject_callback(overwrite=True) + elif button_name == "Cancel": + self._accept_close_event = False + else: + assert button_name == "No" or button_name == "Discard" + + def _check_subject_exists(self): + if not self._subject_to: + return False + subject_dirname = os.path.join("{subjects_dir}", "{subject}") + dest = subject_dirname.format( + subject=self._subject_to, subjects_dir=self._subjects_dir + ) + return os.path.exists(dest) + + def _save_subject(self, exit_mode=False): + dialog = "overwrite_subject_exit" if exit_mode else "overwrite_subject" + if self._check_subject_exists(): + self._forward_widget_command(dialog, "show", True) + else: + self._save_subject_callback() + + def _save_subject_callback(self, overwrite=False): + self._display_message(f"Saving {self._subject_to}...") + default_cursor = self._renderer._window_get_cursor() + self._renderer._window_set_cursor( + self._renderer._window_new_cursor("WaitCursor") + ) + + # prepare bem + bem_names = [] + if self._scale_mode != "None": + can_prepare_bem = _mri_subject_has_bem(self._subject, self._subjects_dir) + else: + can_prepare_bem = False + if can_prepare_bem: + pattern = bem_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject, name="(.+-bem)" + ) + bem_dir, pattern = os.path.split(pattern) + for filename in os.listdir(bem_dir): + match = re.match(pattern, filename) + if match: + bem_names.append(match.group(1)) + + # save the scaled MRI + try: + self._display_message(f"Scaling {self._subject_to}...") + scale_mri( + subject_from=self._subject, + subject_to=self._subject_to, + scale=self.coreg._scale, + overwrite=overwrite, + subjects_dir=self._subjects_dir, + skip_fiducials=True, + labels=True, + annot=True, + on_defects="ignore", + ) + except Exception: + logger.error(f"Error scaling {self._subject_to}") + bem_names = [] + else: + self._display_message(f"Scaling {self._subject_to}... Done!") + + # Precompute BEM solutions + for bem_name in bem_names: + try: + self._display_message(f"Computing {bem_name} solution...") + bem_file = bem_fname.format( + subjects_dir=self._subjects_dir, + subject=self._subject_to, + name=bem_name, + ) + bemsol = make_bem_solution(bem_file) + write_bem_solution(bem_file[:-4] + "-sol.fif", bemsol) + except Exception: + logger.error(f"Error computing {bem_name} solution") + else: + self._display_message(f"Computing {bem_name} solution... Done!") + self._display_message(f"Saving {self._subject_to}... Done!") + self._renderer._window_set_cursor(default_cursor) + self._mri_scale_modified = False + + def _save_mri_fiducials(self, fname): + self._display_message(f"Saving {fname}...") + dig_montage = self.coreg.fiducials + write_fiducials( + fname=fname, pts=dig_montage.dig, coord_frame="mri", overwrite=True + ) + self._set_fiducials_file(fname) + self._display_message(f"Saving {fname}... Done!") + self._mri_fids_modified = False + + def _save_trans(self, fname): + write_trans(fname, self.coreg.trans, overwrite=True) + self._display_message(f"{fname} transform file is saved.") + self._trans_modified = False + + def _load_trans(self, trans): + if not isinstance(trans, Transform): + trans = read_trans(trans, return_all=True) + mri_head_t = _ensure_trans(trans, "mri", "head")["trans"] + rot_x, rot_y, rot_z = rotation_angles(mri_head_t) + x, y, z = mri_head_t[:3, 3] + self.coreg._update_params( + rot=np.array([rot_x, rot_y, rot_z]), + tra=np.array([x, y, z]), + ) + self._update_parameters() + self._update_distance_estimation() + self._update_plot() + self._display_message(f"{trans} transform file is loaded.") + + def _update_fiducials_label(self): + if self._fiducials_file is None: + text = ( + "

No custom MRI fiducials loaded!

" + "

MRI fiducials could not be found in the standard " + "location. The displayed initial MRI fiducial locations " + "(diamonds) were derived from fsaverage. Place, lock, and " + "save fiducials to discard this message.

" + ) + else: + assert self._fiducials_file == fid_fname.format( + subjects_dir=self._subjects_dir, subject=self._subject + ) + assert self.coreg._fid_accurate is True + text = ( + f"

MRI fiducials (diamonds) loaded from " + f"standard location:

" + f"

{self._fiducials_file}

" + ) + + self._forward_widget_command("mri_fiducials_label", "set_value", text) + + def _configure_dock(self): + if self._renderer._kind == "notebook": + collapse = True # collapsible and collapsed + else: + collapse = None # not collapsible + self._renderer._dock_initialize(name="Input", area="left", max_width="375px") + mri_subject_layout = self._renderer._dock_add_group_box( + name="MRI Subject", + collapse=collapse, + ) + subjects_dir_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["subjects_dir_field"] = self._renderer._dock_add_text( + name="subjects_dir_field", + value=self._subjects_dir, + placeholder="Subjects Directory", + callback=self._set_subjects_dir, + layout=subjects_dir_layout, + ) + self._widgets["subjects_dir"] = self._renderer._dock_add_file_button( + name="subjects_dir", + desc="Load", + func=self._set_subjects_dir, + is_directory=True, + icon=True, + tooltip="Load the path to the directory containing the " + "FreeSurfer subjects", + layout=subjects_dir_layout, + ) + self._renderer._layout_add_widget( + layout=mri_subject_layout, + widget=subjects_dir_layout, + ) + self._widgets["subject"] = self._renderer._dock_add_combo_box( + name="Subject", + value=self._subject, + rng=_get_subjects(self._subjects_dir), + callback=self._set_subject, + compact=True, + tooltip="Select the FreeSurfer subject name", + layout=mri_subject_layout, + ) + + mri_fiducials_layout = self._renderer._dock_add_group_box( + name="MRI Fiducials", + collapse=collapse, + ) + # Add MRI fiducials I/O widgets + self._widgets["mri_fiducials_label"] = self._renderer._dock_add_label( + value="", # Will be filled via _update_fiducials_label() + layout=mri_fiducials_layout, + selectable=True, + ) + # Reload & Save buttons go into their own layout widget + mri_fiducials_button_layout = self._renderer._dock_add_layout(vertical=False) + self._renderer._layout_add_widget( + layout=mri_fiducials_layout, widget=mri_fiducials_button_layout + ) + self._widgets["reload_mri_fids"] = self._renderer._dock_add_button( + name="Reload MRI Fid.", + callback=lambda: self._set_fiducials_file(self._fiducials_file), + tooltip="Reload MRI fiducials from the standard location", + layout=mri_fiducials_button_layout, + ) + # Disable reload button until we've actually loaded a fiducial file + # (happens in _set_fiducials_file method) + self._forward_widget_command("reload_mri_fids", "set_enabled", False) + + self._widgets["save_mri_fids"] = self._renderer._dock_add_button( + name="Save MRI Fid.", + callback=lambda: self._save_mri_fiducials( + fid_fname.format(subjects_dir=self._subjects_dir, subject=self._subject) + ), + tooltip="Save MRI fiducials to the standard location. Fiducials " + "must be locked first!", + layout=mri_fiducials_button_layout, + ) + self._widgets["lock_fids"] = self._renderer._dock_add_check_box( + name="Lock fiducials", + value=self._lock_fids, + callback=self._set_lock_fids, + tooltip="Lock/Unlock interactive fiducial editing", + layout=mri_fiducials_layout, + ) + self._widgets["fids"] = self._renderer._dock_add_radio_buttons( + value=self._defaults["fiducial"], + rng=self._defaults["fiducials"], + callback=self._set_current_fiducial, + vertical=False, + layout=mri_fiducials_layout, + ) + fiducial_coords_layout = self._renderer._dock_add_layout() + for coord in ("X", "Y", "Z"): + name = f"fid_{coord}" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=coord, + value=0.0, + rng=[-1e3, 1e3], + callback=partial( + self._set_fiducial, + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} fiducial coordinate", + layout=fiducial_coords_layout, + ) + self._renderer._layout_add_widget(mri_fiducials_layout, fiducial_coords_layout) + + dig_source_layout = self._renderer._dock_add_group_box( + name="Info source with digitization", + collapse=collapse, + ) + info_file_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["info_file_field"] = self._renderer._dock_add_text( + name="info_file_field", + value=self._info_file, + placeholder="Path to info", + callback=self._set_info_file, + layout=info_file_layout, + ) + self._widgets["info_file"] = self._renderer._dock_add_file_button( + name="info_file", + desc="Load", + func=self._set_info_file, + icon=True, + tooltip="Load the FIFF file with digitization data for coregistration", + layout=info_file_layout, + ) + self._renderer._layout_add_widget( + layout=dig_source_layout, + widget=info_file_layout, + ) + self._widgets["grow_hair"] = self._renderer._dock_add_spin_box( + name="Grow Hair (mm)", + value=self._grow_hair, + rng=[0.0, 10.0], + callback=self._set_grow_hair, + tooltip="Compensate for hair on the digitizer head shape", + layout=dig_source_layout, + ) + omit_hsp_layout_1 = self._renderer._dock_add_layout(vertical=False) + omit_hsp_layout_2 = self._renderer._dock_add_layout(vertical=False) + self._widgets["omit_distance"] = self._renderer._dock_add_spin_box( + name="Omit Distance (mm)", + value=self._omit_hsp_distance, + rng=[0.0, 100.0], + callback=self._set_omit_hsp_distance, + tooltip="Set the head shape points exclusion distance", + layout=omit_hsp_layout_1, + ) + self._widgets["omit"] = self._renderer._dock_add_button( + name="Omit", + callback=self._omit_hsp, + tooltip="Exclude the head shape points that are far away from " + "the MRI head", + layout=omit_hsp_layout_2, + ) + self._widgets["reset_omit"] = self._renderer._dock_add_button( + name="Reset", + callback=self._reset_omit_hsp_filter, + tooltip="Reset all excluded head shape points", + layout=omit_hsp_layout_2, + ) + self._renderer._layout_add_widget(dig_source_layout, omit_hsp_layout_1) + self._renderer._layout_add_widget(dig_source_layout, omit_hsp_layout_2) + + view_options_layout = self._renderer._dock_add_group_box( + name="View Options", + collapse=collapse, + ) + self._widgets["helmet"] = self._renderer._dock_add_check_box( + name="Show MEG helmet", + value=self._helmet, + callback=self._set_helmet, + tooltip="Enable/Disable MEG helmet", + layout=view_options_layout, + ) + self._widgets["meg"] = self._renderer._dock_add_check_box( + name="Show MEG sensors", + value=self._meg_channels, + callback=self._set_meg_channels, + tooltip="Enable/Disable MEG sensors", + layout=view_options_layout, + ) + self._widgets["high_res_head"] = self._renderer._dock_add_check_box( + name="Show high-resolution head", + value=self._head_resolution, + callback=self._set_head_resolution, + tooltip="Enable/Disable high resolution head surface", + layout=view_options_layout, + ) + self._widgets["head_opacity"] = self._renderer._dock_add_slider( + name="Head opacity", + value=self._head_opacity, + rng=[0.25, 1.0], + callback=self._set_head_opacity, + compact=True, + double=True, + layout=view_options_layout, + ) + self._renderer._dock_add_stretch() + + self._renderer._dock_initialize( + name="Parameters", area="right", max_width="375px" + ) + mri_scaling_layout = self._renderer._dock_add_group_box( + name="MRI Scaling", + collapse=collapse, + ) + self._widgets["scaling_mode"] = self._renderer._dock_add_combo_box( + name="Scaling Mode", + value=self._defaults["scale_mode"], + rng=self._defaults["scale_modes"], + callback=self._set_scale_mode, + tooltip="Select the scaling mode", + compact=True, + layout=mri_scaling_layout, + ) + scale_params_layout = self._renderer._dock_add_group_box( + name="Scaling Parameters", + layout=mri_scaling_layout, + ) + coords = ["X", "Y", "Z"] + for coord in coords: + name = f"s{coord}" + attr = getattr(self.coreg, "_scale") + self._widgets[name] = self._renderer._dock_add_spin_box( + name=name, + value=attr[coords.index(coord)] * 1e2, + rng=[1.0, 10000.0], # percent + callback=partial( + self._set_parameter, + mode_name="scale", + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} scaling parameter (in %)", + layout=scale_params_layout, + ) + + fit_scale_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["fits_fiducials"] = self._renderer._dock_add_button( + name="Fit fiducials with scaling", + callback=self._fits_fiducials, + tooltip="Find MRI scaling, rotation, and translation to fit all " + "3 fiducials", + layout=fit_scale_layout, + ) + self._widgets["fits_icp"] = self._renderer._dock_add_button( + name="Fit ICP with scaling", + callback=self._fits_icp, + tooltip="Find MRI scaling, rotation, and translation to match the " + "head shape points", + layout=fit_scale_layout, + ) + self._renderer._layout_add_widget(scale_params_layout, fit_scale_layout) + subject_to_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["subject_to"] = self._renderer._dock_add_text( + name="subject-to", + value=self._subject_to, + placeholder="subject name", + callback=self._set_subject_to, + layout=subject_to_layout, + ) + self._widgets["save_subject"] = self._renderer._dock_add_button( + name="Save scaled anatomy", + callback=self._task_save_subject, + tooltip="Save scaled anatomy", + layout=subject_to_layout, + ) + self._renderer._layout_add_widget(mri_scaling_layout, subject_to_layout) + param_layout = self._renderer._dock_add_group_box( + name="Translation (t) and Rotation (r)", + collapse=collapse, + ) + for coord in coords: + coord_layout = self._renderer._dock_add_layout(vertical=False) + for mode, mode_name in (("t", "Translation"), ("r", "Rotation")): + name = f"{mode}{coord}" + attr = getattr(self.coreg, f"_{mode_name.lower()}") + rng = [-360, 360] if mode_name == "Rotation" else [-100, 100] + unit = "°" if mode_name == "Rotation" else "mm" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=name, + value=attr[coords.index(coord)] * 1e3, + rng=np.array(rng), + callback=partial( + self._task_set_parameter, + mode_name=mode_name.lower(), + coord=coord, + ), + compact=True, + double=True, + step=1, + tooltip=f"Set the {coord} {mode_name.lower()}" + f" parameter (in {unit})", + layout=coord_layout, + ) + self._renderer._layout_add_widget(param_layout, coord_layout) + + fit_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["fit_fiducials"] = self._renderer._dock_add_button( + name="Fit fiducials", + callback=self._fit_fiducials, + tooltip="Find rotation and translation to fit all 3 fiducials", + layout=fit_layout, + ) + self._widgets["fit_icp"] = self._renderer._dock_add_button( + name="Fit ICP", + callback=self._fit_icp, + tooltip="Find rotation and translation to match the head shape points", + layout=fit_layout, + ) + self._renderer._layout_add_widget(param_layout, fit_layout) + trans_layout = self._renderer._dock_add_group_box( + name="HEAD <> MRI Transform", + collapse=collapse, + ) + save_trans_layout = self._renderer._dock_add_layout(vertical=False) + self._widgets["save_trans"] = self._renderer._dock_add_file_button( + name="save_trans", + desc="Save...", + save=True, + func=self._save_trans, + tooltip="Save the transform file to disk", + layout=save_trans_layout, + filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", + initial_directory=self._info_file.parent, + ) + self._widgets["load_trans"] = self._renderer._dock_add_file_button( + name="load_trans", + desc="Load...", + func=self._load_trans, + tooltip="Load the transform file from disk", + layout=save_trans_layout, + filter_="Head->MRI transformation (*-trans.fif *_trans.fif)", + initial_directory=self._info_file.parent, + ) + self._renderer._layout_add_widget(trans_layout, save_trans_layout) + self._widgets["reset_trans"] = self._renderer._dock_add_button( + name="Reset Parameters", + callback=self._reset, + tooltip="Reset all the parameters affecting the coregistration", + layout=trans_layout, + ) + + fitting_options_layout = self._renderer._dock_add_group_box( + name="Fitting Options", + collapse=collapse, + ) + self._widgets["fit_label"] = self._renderer._dock_add_label( + value="", + layout=fitting_options_layout, + ) + self._widgets["icp_n_iterations"] = self._renderer._dock_add_spin_box( + name="Number Of ICP Iterations", + value=self._defaults["icp_n_iterations"], + rng=[1, 100], + callback=self._set_icp_n_iterations, + compact=True, + double=False, + tooltip="Set the number of ICP iterations", + layout=fitting_options_layout, + ) + self._widgets["icp_fid_match"] = self._renderer._dock_add_combo_box( + name="Fiducial point matching", + value=self._defaults["icp_fid_match"], + rng=self._defaults["icp_fid_matches"], + callback=self._set_icp_fid_match, + compact=True, + tooltip="Select the fiducial point matching method", + layout=fitting_options_layout, + ) + weights_layout = self._renderer._dock_add_group_box( + name="Weights", + layout=fitting_options_layout, + ) + for point, fid in zip(("HSP", "EEG", "HPI"), self._defaults["fiducials"]): + weight_layout = self._renderer._dock_add_layout(vertical=False) + point_lower = point.lower() + name = f"{point_lower}_weight" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=point, + value=getattr(self, f"_{point_lower}_weight"), + rng=[0.0, 100.0], + callback=partial(self._set_point_weight, point=point_lower), + compact=True, + double=True, + tooltip=f"Set the {point} weight", + layout=weight_layout, + ) + + fid_lower = fid.lower() + name = f"{fid_lower}_weight" + self._widgets[name] = self._renderer._dock_add_spin_box( + name=fid, + value=getattr(self, f"_{fid_lower}_weight"), + rng=[0.0, 100.0], + callback=partial(self._set_point_weight, point=fid_lower), + compact=True, + double=True, + tooltip=f"Set the {fid} weight", + layout=weight_layout, + ) + self._renderer._layout_add_widget(weights_layout, weight_layout) + self._widgets["reset_fitting_options"] = self._renderer._dock_add_button( + name="Reset Fitting Options", + callback=self._reset_fitting_parameters, + tooltip="Reset all the fitting parameters to default value", + layout=fitting_options_layout, + ) + self._renderer._dock_add_stretch() + + def _configure_status_bar(self): + self._renderer._status_bar_initialize() + self._widgets["status_message"] = self._renderer._status_bar_add_label( + "", stretch=1 + ) + self._forward_widget_command( + "status_message", "hide", value=None, input_value=False + ) + + def _clean(self): + if not self._accept_close_event: + return + self._renderer = None + self._widgets.clear() + self._actors.clear() + self._surfaces.clear() + self._defaults.clear() + self._head_geo = None + self._check_inside = None + self._nearest = None + self._redraw = None + + @safe_event + def close(self): + """Close interface and cleanup data structure.""" + if self._renderer is not None: + self._renderer.close() + + def _close_dialog_callback(self, button_name): + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + + self._accept_close_event = True + if button_name == "Save": + if self._trans_modified: + self._forward_widget_command("save_trans", "set_value", None) + # cancel means _save_trans is not called + if self._trans_modified: + self._accept_close_event = False + if self._mri_fids_modified: + self._forward_widget_command("save_mri_fids", "set_value", None) + if self._mri_scale_modified: + if self._subject_to: + self._save_subject(exit_mode=True) + else: + dialog = self._renderer._dialog_create( + title="CoregistrationUI", + text="The name of the output subject used to " + "save the scaled anatomy is not set.", + info_text="Please set a subject name", + callback=lambda x: None, + buttons=["Ok"], + modal=not MNE_3D_BACKEND_TESTING, + ) + dialog.show() + self._accept_close_event = False + elif button_name == "Cancel": + self._accept_close_event = False + else: + assert button_name == "Discard" + + def _close_callback(self): + if self._trans_modified or self._mri_fids_modified or self._mri_scale_modified: + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + + # prepare the dialog's text + text = "The following is/are not saved:" + text += "
    " + if self._trans_modified: + text += "
  • Head<>MRI transform
  • " + if self._mri_fids_modified: + text += "
  • MRI fiducials
  • " + if self._mri_scale_modified: + text += "
  • scaled subject MRI
  • " + text += "
" + self._widgets["close_dialog"] = self._renderer._dialog_create( + title="CoregistrationUI", + text=text, + info_text="Do you want to save?", + callback=self._close_dialog_callback, + buttons=["Save", "Discard", "Cancel"], + # modal=True means that the dialog blocks the application + # when show() is called, until one of the buttons is clicked + modal=not MNE_3D_BACKEND_TESTING, + ) + self._widgets["close_dialog"].show() + return self._accept_close_event diff --git a/mne/gui/_gui.py b/mne/gui/_gui.py new file mode 100644 index 0000000..b8898d8 --- /dev/null +++ b/mne/gui/_gui.py @@ -0,0 +1,222 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..utils import get_config, verbose + + +@verbose +def coregistration( + *, + width=None, + height=None, + inst=None, + subject=None, + subjects_dir=None, + head_opacity=None, + head_high_res=None, + trans=None, + orient_to_surface=None, + scale_by_distance=None, + mark_inside=None, + interaction=None, + fullscreen=None, + show=True, + block=False, + verbose=None, +): + """Coregister an MRI with a subject's head shape. + + The GUI can be launched through the command line interface: + + .. code-block:: bash + + $ mne coreg + + or using a python interpreter as shown in :ref:`tut-source-alignment`. + + Parameters + ---------- + width : int | None + Specify the width for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to ``800``). + height : int | None + Specify a height for window (in logical pixels). + Default is None, which uses ``MNE_COREG_WINDOW_WIDTH`` config value + (which defaults to ``400``). + inst : None | path-like + Path to an instance file containing the digitizer data. Compatible for + Raw, Epochs, and Evoked files. + subject : None | str + Name of the mri subject. + %(subjects_dir)s + head_opacity : float | None + The opacity of the head surface in the range ``[0., 1.]``. + Default is None, which uses ``MNE_COREG_HEAD_OPACITY`` config value + (which defaults to ``1.``). + head_high_res : bool | None + Use a high resolution head surface. + Default is None, which uses ``MNE_COREG_HEAD_HIGH_RES`` config value + (which defaults to True). + trans : path-like | Transform | None + The Head<->MRI transform or the path to its FIF file (``"-trans.fif"``). + orient_to_surface : bool | None + If True (default), orient EEG electrode and head shape points to the head + surface. + + .. versionadded:: 0.16 + scale_by_distance : bool | None + If True (default), scale the digitization points by their distance from the + scalp surface. + + .. versionadded:: 0.16 + mark_inside : bool | None + If True (default), mark points inside the head surface in a + different color. + + .. versionadded:: 0.16 + %(interaction_scene_none)s + Defaults to ``'terrain'``. + + .. versionadded:: 0.16 + .. versionchanged:: 1.0 + Default interaction mode if ``None`` and no config setting found + changed from ``'trackball'`` to ``'terrain'``. + %(fullscreen)s + Default is ``None``, which uses ``MNE_COREG_FULLSCREEN`` config value + (which defaults to ``False``). + + .. versionadded:: 1.1 + show : bool + Show the GUI if True. + block : bool + Whether to halt program execution until the figure is closed. + %(verbose)s + + Returns + ------- + frame : instance of CoregistrationUI + The coregistration frame. + + Notes + ----- + Many parameters (e.g., ``head_opacity``) take None as a parameter, + which means that the default will be read from the MNE-Python + configuration file (which gets saved when exiting). + + Step by step instructions for the coregistrations are shown below: + + .. youtube:: ALV5qqMHLlQ + """ + config = get_config() + if head_high_res is None: + head_high_res = config.get("MNE_COREG_HEAD_HIGH_RES", "true") == "true" + if head_opacity is None: + head_opacity = config.get("MNE_COREG_HEAD_OPACITY", 0.8) + if width is None: + width = config.get("MNE_COREG_WINDOW_WIDTH", 800) + if height is None: + height = config.get("MNE_COREG_WINDOW_HEIGHT", 600) + if subjects_dir is None: + if "SUBJECTS_DIR" in config: + subjects_dir = config["SUBJECTS_DIR"] + elif "MNE_COREG_SUBJECTS_DIR" in config: + subjects_dir = config["MNE_COREG_SUBJECTS_DIR"] + false_like = ("false", "0") + if orient_to_surface is None: + orient_to_surface = config.get("MNE_COREG_ORIENT_TO_SURFACE", "true").lower() + orient_to_surface = orient_to_surface not in false_like + if scale_by_distance is None: + scale_by_distance = config.get("MNE_COREG_SCALE_BY_DISTANCE", "true").lower() + scale_by_distance = scale_by_distance not in false_like + if interaction is None: + interaction = config.get("MNE_COREG_INTERACTION", "terrain") + if mark_inside is None: + mark_inside = config.get("MNE_COREG_MARK_INSIDE", "true").lower() + mark_inside = mark_inside not in false_like + if fullscreen is None: + fullscreen = config.get("MNE_COREG_FULLSCREEN", "") == "true" + head_opacity = float(head_opacity) + width = int(width) + height = int(height) + + from ..viz.backends.renderer import MNE_3D_BACKEND_TESTING + from ._coreg import CoregistrationUI + + if MNE_3D_BACKEND_TESTING: + show = block = False + return CoregistrationUI( + info_file=inst, + subject=subject, + subjects_dir=subjects_dir, + head_resolution=head_high_res, + head_opacity=head_opacity, + orient_glyphs=orient_to_surface, + scale_by_distance=scale_by_distance, + mark_inside=mark_inside, + trans=trans, + size=(width, height), + show=show, + block=block, + interaction=interaction, + fullscreen=fullscreen, + verbose=verbose, + ) + + +class _GUIScraper: + """Scrape GUI outputs.""" + + def __repr__(self): + return "" + + def __call__(self, block, block_vars, gallery_conf): + from ._coreg import CoregistrationUI + + gui_classes = (CoregistrationUI,) + try: + from mne_gui_addons._ieeg_locate import IntracranialElectrodeLocator + except Exception: + pass + else: + gui_classes = gui_classes + (IntracranialElectrodeLocator,) + from qtpy import QtGui + from sphinx_gallery.scrapers import figure_rst + + for gui in block_vars["example_globals"].values(): + if ( + isinstance(gui, gui_classes) + and not getattr(gui, "_scraped", False) + and gallery_conf["builder_name"] == "html" + ): + gui._scraped = True # monkey-patch but it's easy enough + img_fname = next(block_vars["image_path_iterator"]) + # TODO fix in window refactor + window = gui if hasattr(gui, "grab") else gui._renderer._window + # window is QWindow + # https://doc.qt.io/qt-5/qwidget.html#grab + pixmap = window.grab() + if hasattr(gui, "_renderer"): # if no renderer, no need + # Now the tricky part: we need to get the 3D renderer, + # extract the image from it, and put it in the correct + # place in the pixmap. The easiest way to do this is + # actually to save the 3D image first, then load it + # using QPixmap and Qt geometry. + plotter = gui._renderer.plotter + plotter.screenshot(img_fname) + sub_pixmap = QtGui.QPixmap(img_fname) + # https://doc.qt.io/qt-5/qwidget.html#mapTo + # https://doc.qt.io/qt-5/qpainter.html#drawPixmap-1 + QtGui.QPainter(pixmap).drawPixmap( + plotter.mapTo(window, plotter.rect().topLeft()), sub_pixmap + ) + # https://doc.qt.io/qt-5/qpixmap.html#save + pixmap.save(img_fname) + try: # for compatibility with both GUIs, will be refactored + gui._renderer.close() # TODO should be triggered by close + except Exception: + pass + gui.close() + return figure_rst([img_fname], gallery_conf["src_dir"], "GUI") + return "" diff --git a/mne/html_templates/__init__.py b/mne/html_templates/__init__.py new file mode 100644 index 0000000..b8234d2 --- /dev/null +++ b/mne/html_templates/__init__.py @@ -0,0 +1,8 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Jinja2 HTML templates.""" +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/html_templates/__init__.pyi b/mne/html_templates/__init__.pyi new file mode 100644 index 0000000..2312227 --- /dev/null +++ b/mne/html_templates/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_get_html_template"] +from ._templates import _get_html_template diff --git a/mne/html_templates/_templates.py b/mne/html_templates/_templates.py new file mode 100644 index 0000000..9427f2d --- /dev/null +++ b/mne/html_templates/_templates.py @@ -0,0 +1,171 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations # only needed for Python ≤ 3.9 + +import datetime +import functools +import uuid +from dataclasses import dataclass +from typing import Any, Literal + +from .._fiff.pick import channel_type +from ..defaults import _handle_default + +_COLLAPSED = False # will override in doc build + + +def _format_number(value: int | float) -> str: + """Insert thousand separators.""" + return f"{value:,}" + + +def _append_uuid(string: str, sep: str = "-") -> str: + """Append a UUID to a string.""" + return f"{string}{sep}{uuid.uuid4()}" + + +def _data_type(obj) -> str: + """Return the qualified name of a class.""" + return obj.__class__.__qualname__ + + +def _dt_to_str(dt: datetime.datetime) -> str: + """Convert a datetime object to a human-readable string representation.""" + return dt.strftime("%Y-%m-%d at %H:%M:%S %Z") + + +def _format_baseline(inst) -> str: + """Format the baseline time period.""" + if inst.baseline is None: + baseline = "off" + else: + baseline = ( + f"{round(inst.baseline[0], 3):.3f} – {round(inst.baseline[1], 3):.3f} s" + ) + + return baseline + + +def _format_metadata(inst) -> str: + """Format metadata representation.""" + if inst.metadata is None: + metadata = "No metadata set" + else: + metadata = f"{inst.metadata.shape[0]} rows × {inst.metadata.shape[1]} columns" + + return metadata + + +def _format_time_range(inst) -> str: + """Format evoked and epochs time range.""" + tr = f"{round(inst.tmin, 3):.3f} – {round(inst.tmax, 3):.3f} s" + return tr + + +def _format_projs(info) -> list[str]: + """Format projectors.""" + projs = [f'{p["desc"]} ({"on" if p["active"] else "off"})' for p in info["projs"]] + return projs + + +@dataclass +class _Channel: + """A channel in a recording.""" + + index: int + name_html: str + type: str + type_pretty: str + status: Literal["good", "bad"] + + +def _format_channels(info) -> dict[str, dict[Literal["good", "bad"], list[str]]]: + """Format channel names.""" + ch_types_pretty: dict[str, str] = _handle_default("titles") + channels = [] + + if info.ch_names: + for ch_index, ch_name in enumerate(info.ch_names): + ch_type = channel_type(info, ch_index) + ch_type_pretty = ch_types_pretty.get(ch_type, ch_type.upper()) + ch_status = "bad" if ch_name in info["bads"] else "good" + channel = _Channel( + index=ch_index, + name_html=ch_name.replace(" ", " "), + type=ch_type, + type_pretty=ch_type_pretty, + status=ch_status, + ) + channels.append(channel) + + # Extract unique channel types and put them in the desired order. + ch_types = list(set([c.type_pretty for c in channels])) + ch_types = [c for c in ch_types_pretty.values() if c in ch_types] + + channels_formatted = {} + for ch_type in ch_types: + goods = [c for c in channels if c.type_pretty == ch_type and c.status == "good"] + bads = [c for c in channels if c.type_pretty == ch_type and c.status == "bad"] + if ch_type not in channels_formatted: + channels_formatted[ch_type] = {"good": [], "bad": []} + channels_formatted[ch_type]["good"] = goods + channels_formatted[ch_type]["bad"] = bads + + return channels_formatted + + +def _has_attr(obj: Any, attr: str) -> bool: + """Check if an object has an attribute `obj.attr`. + + This is needed because on dict-like objects, Jinja2's `obj.attr is defined` would + check for `obj["attr"]`, which may not be what we want. + """ + return hasattr(obj, attr) + + +@functools.lru_cache(maxsize=2) +def _get_html_templates_env(kind): + # For _html_repr_() and mne.Report + assert kind in ("repr", "report"), kind + import jinja2 + + templates_env = jinja2.Environment( + loader=jinja2.PackageLoader( + package_name="mne.html_templates", package_path=kind + ), + autoescape=jinja2.select_autoescape(default=True, default_for_string=True), + ) + if kind == "report": + templates_env.filters["zip"] = zip + + templates_env.filters["format_number"] = _format_number + templates_env.filters["append_uuid"] = _append_uuid + templates_env.filters["data_type"] = _data_type + templates_env.filters["dt_to_str"] = _dt_to_str + templates_env.filters["format_baseline"] = _format_baseline + templates_env.filters["format_metadata"] = _format_metadata + templates_env.filters["format_time_range"] = _format_time_range + templates_env.filters["format_projs"] = _format_projs + templates_env.filters["format_channels"] = _format_channels + templates_env.filters["has_attr"] = _has_attr + return templates_env + + +def _get_html_template(kind, name): + return _RenderWrap( + _get_html_templates_env(kind).get_template(name), + collapsed=_COLLAPSED, + ) + + +class _RenderWrap: + """Class that allows functools.partial-like wrapping of jinja2 Template.render().""" + + def __init__(self, template, **kwargs): + self._template = template + self._kwargs = kwargs + + def render(self, *args, **kwargs): + return self._template.render(*args, **kwargs, **self._kwargs) diff --git a/mne/html_templates/report/bem.html.jinja b/mne/html_templates/report/bem.html.jinja new file mode 100644 index 0000000..a04a4c0 --- /dev/null +++ b/mne/html_templates/report/bem.html.jinja @@ -0,0 +1,8 @@ +{% extends "section.html.jinja" %} +{% block html_content %} +
+ {{ html_slider_axial | safe }} + {{ html_slider_sagittal | safe }} + {{ html_slider_coronal | safe }} +
+{% endblock html_content %} diff --git a/mne/html_templates/report/code.html.jinja b/mne/html_templates/report/code.html.jinja new file mode 100644 index 0000000..63783e4 --- /dev/null +++ b/mne/html_templates/report/code.html.jinja @@ -0,0 +1,6 @@ +{% extends "section.html.jinja" %} +{% block html_content %} +
+  {{ code }}
+
+{% endblock html_content %} diff --git a/mne/html_templates/report/footer.html.jinja b/mne/html_templates/report/footer.html.jinja new file mode 100644 index 0000000..9733155 --- /dev/null +++ b/mne/html_templates/report/footer.html.jinja @@ -0,0 +1,10 @@ + + +
+ +
+ + + diff --git a/mne/html_templates/report/forward.html.jinja b/mne/html_templates/report/forward.html.jinja new file mode 100644 index 0000000..1a46f4c --- /dev/null +++ b/mne/html_templates/report/forward.html.jinja @@ -0,0 +1,5 @@ +{% extends "section.html.jinja" %} +{% block html_content %} +{{repr | safe}} +{{sensitivity_maps | safe}} +{% endblock html_content %} diff --git a/mne/html_templates/report/header.html.jinja b/mne/html_templates/report/header.html.jinja new file mode 100644 index 0000000..692dbab --- /dev/null +++ b/mne/html_templates/report/header.html.jinja @@ -0,0 +1,56 @@ + + + + + + {{include | safe }} + + + + + {{ title }} + + + + diff --git a/mne/html_templates/report/html.html.jinja b/mne/html_templates/report/html.html.jinja new file mode 100644 index 0000000..a9b4f88 --- /dev/null +++ b/mne/html_templates/report/html.html.jinja @@ -0,0 +1,19 @@ +
+
+ +
+
+
+ {{ html | safe }} +
+
+
diff --git a/mne/html_templates/report/image.html.jinja b/mne/html_templates/report/image.html.jinja new file mode 100644 index 0000000..41cf47e --- /dev/null +++ b/mne/html_templates/report/image.html.jinja @@ -0,0 +1,17 @@ +{% extends "section.html.jinja" %} +{% block html_content %} +
+ {% if image_format == 'svg' %} +
+ {{ img|safe }} +
+ {% else %} + {{ title }} + {% endif %} + + {% if caption is not none %} +
{{ caption }}
+ {% endif %} +
+{% endblock html_content %} diff --git a/mne/html_templates/report/inverse.html.jinja b/mne/html_templates/report/inverse.html.jinja new file mode 100644 index 0000000..ca03f3c --- /dev/null +++ b/mne/html_templates/report/inverse.html.jinja @@ -0,0 +1,5 @@ +{% extends "section.html.jinja" %} +{% block html_content %} +{{repr | safe}} +{{source_space | safe}} +{% endblock html_content %} diff --git a/mne/html_templates/report/section.html.jinja b/mne/html_templates/report/section.html.jinja new file mode 100644 index 0000000..baddf7d --- /dev/null +++ b/mne/html_templates/report/section.html.jinja @@ -0,0 +1,23 @@ +
+
+ +
+
+
+ {% block html_content %} + {% for html in htmls %} + {{ html | safe }} + {% endfor %} + {% endblock %} +
+
+
diff --git a/mne/html_templates/report/slider.html.jinja b/mne/html_templates/report/slider.html.jinja new file mode 100644 index 0000000..fab7f56 --- /dev/null +++ b/mne/html_templates/report/slider.html.jinja @@ -0,0 +1,49 @@ +
+
+ +
+
+
+
+ + +
+ +
+
+
diff --git a/mne/html_templates/report/toc.html.jinja b/mne/html_templates/report/toc.html.jinja new file mode 100644 index 0000000..0b71463 --- /dev/null +++ b/mne/html_templates/report/toc.html.jinja @@ -0,0 +1,12 @@ +
+
+
+
Table of contents
+ +
+
diff --git a/mne/html_templates/repr/_acquisition.html.jinja b/mne/html_templates/repr/_acquisition.html.jinja new file mode 100644 index 0000000..0016740 --- /dev/null +++ b/mne/html_templates/repr/_acquisition.html.jinja @@ -0,0 +1,97 @@ +{% set section = "Acquisition" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if duration %} + + + Duration + {{ duration }} (HH:MM:SS) + +{% endif %} +{% if inst is defined and inst | has_attr("kind") and inst | has_attr("nave") %} + + + Aggregation + {% if inst.kind == "average" %} + average of {{ inst.nave }} epochs + {% elif inst.kind == "standard_error" %} + standard error of {{ inst.nave }} epochs + {% else %} + {{ inst.kind }} ({{ inst.nave }} epochs) + {% endif %} + +{% endif %} +{% if inst is defined and inst | has_attr("comment") %} + + + Condition + {{inst.comment}} + +{% endif %} +{% if inst is defined and inst | has_attr("events") %} + + + Total number of events + {{ inst.events | length }} + +{% endif %} +{% if event_counts is defined %} + + + Events counts + {% if events is not none %} + + {% for e in event_counts %} + {{ e }} + {% if not loop.last %}
{% endif %} + {% endfor %} + + {% else %} + Not available + {% endif %} + +{% endif %} +{% if inst is defined and inst | has_attr("tmin") and inst | has_attr("tmax") %} + + + Time range + {{ inst | format_time_range }} + +{% endif %} +{% if inst is defined and inst | has_attr("baseline") %} + + + Baseline + {{ inst | format_baseline }} + +{% endif %} +{% if info["sfreq"] is defined and info["sfreq"] is not none %} + + + Sampling frequency + {{ "%0.2f" | format(info["sfreq"]) }} Hz + +{% endif %} +{% if inst is defined and inst.times is defined %} + + + Time points + {{ inst.times | length | format_number }} + +{% endif %} +{% if inst is defined and inst | has_attr("metadata") %} + + + Metadata + {{ inst | format_metadata }} + +{% endif %} \ No newline at end of file diff --git a/mne/html_templates/repr/_channels.html.jinja b/mne/html_templates/repr/_channels.html.jinja new file mode 100644 index 0000000..1d057b9 --- /dev/null +++ b/mne/html_templates/repr/_channels.html.jinja @@ -0,0 +1,25 @@ +{% set section = "Channels" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% for channel_type, channels in (info | format_channels).items() %} + {% include 'static/_channels.html.jinja' %} +{% endfor %} + + + + Head & sensor digitization + {% if info["dig"] is not none %} + {{ info["dig"] | length }} points + {% else %} + Not available + {% endif %} + \ No newline at end of file diff --git a/mne/html_templates/repr/_filters.html.jinja b/mne/html_templates/repr/_filters.html.jinja new file mode 100644 index 0000000..97ede51 --- /dev/null +++ b/mne/html_templates/repr/_filters.html.jinja @@ -0,0 +1,38 @@ +{% set section = "Filters" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if info["highpass"] is defined and info["highpass"] is not none %} + + + Highpass + {{ "%0.2f" | format(info["highpass"]) }} Hz + +{% endif %} +{% if info["lowpass"] is defined and info["lowpass"] is not none %} + + + Lowpass + {{ "%0.2f" | format(info["lowpass"]) }} Hz + +{% endif %} +{% if info.projs is defined and info.projs %} + + + Projections + + {% for p in (info | format_projs) %} + {{ p }} + {% if not loop.last %}
{% endif %} + {% endfor %} + + +{% endif %} \ No newline at end of file diff --git a/mne/html_templates/repr/_general.html.jinja b/mne/html_templates/repr/_general.html.jinja new file mode 100644 index 0000000..a57ae40 --- /dev/null +++ b/mne/html_templates/repr/_general.html.jinja @@ -0,0 +1,58 @@ +{% set section = "General" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + +{%include 'static/_section_header_row.html.jinja' %} + +{% if filenames %} + + + Filename(s) + + {% for f in filenames %} + {{ f }} + {% if not loop.last %}
{% endif %} + {% endfor %} + + +{% endif %} + + + MNE object type + {{ inst | data_type }} + + + + Measurement date + {% if info["meas_date"] is defined and info["meas_date"] is not none %} + {{ info["meas_date"] | dt_to_str }} + {% else %} + Unknown + {% endif %} + + + + Participant + {% if info["subject_info"] is defined and info["subject_info"] is not none %} + {% if info["subject_info"]["his_id"] is defined %} + {{ info["subject_info"]["his_id"] }} + {% endif %} + {% else %} + Unknown + {% endif %} + + + + Experimenter + {% if info["experimenter"] is defined and info["experimenter"] is not none %} + {{ info["experimenter"] }} + {% else %} + Unknown + {% endif %} + \ No newline at end of file diff --git a/mne/html_templates/repr/_js_and_css.html.jinja b/mne/html_templates/repr/_js_and_css.html.jinja new file mode 100644 index 0000000..f185cfb --- /dev/null +++ b/mne/html_templates/repr/_js_and_css.html.jinja @@ -0,0 +1,7 @@ + + + \ No newline at end of file diff --git a/mne/html_templates/repr/epochs.html.jinja b/mne/html_templates/repr/epochs.html.jinja new file mode 100644 index 0000000..6b33c17 --- /dev/null +++ b/mne/html_templates/repr/epochs.html.jinja @@ -0,0 +1,10 @@ +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
\ No newline at end of file diff --git a/mne/html_templates/repr/evoked.html.jinja b/mne/html_templates/repr/evoked.html.jinja new file mode 100644 index 0000000..6b33c17 --- /dev/null +++ b/mne/html_templates/repr/evoked.html.jinja @@ -0,0 +1,10 @@ +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
\ No newline at end of file diff --git a/mne/html_templates/repr/forward.html.jinja b/mne/html_templates/repr/forward.html.jinja new file mode 100644 index 0000000..e8bce96 --- /dev/null +++ b/mne/html_templates/repr/forward.html.jinja @@ -0,0 +1,29 @@ +{%include '_js_and_css.html.jinja' %} + +{% set section = "Forward" %} +{% set section_class_name = section | lower | append_uuid %} + +{# Collapse content during documentation build. #} +{% if collapsed %} +{% set collapsed_row_class = "mne-repr-collapsed" %} +{% else %} +{% set collapsed_row_class = "" %} +{% endif %} + + + {%include 'static/_section_header_row.html.jinja' %} + {% for channel_type, channels in (info | format_channels).items() %} + {% include 'static/_channels.html.jinja' %} + {% endfor %} + + + + + + + + + + + +
Source space{{ source_space_descr }}
Source orientation{{ source_orientation }}
\ No newline at end of file diff --git a/mne/html_templates/repr/ica.html.jinja b/mne/html_templates/repr/ica.html.jinja new file mode 100644 index 0000000..6276209 --- /dev/null +++ b/mne/html_templates/repr/ica.html.jinja @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + {% if fit_on %} + + + + + + + + + + + + + + + + + {% endif %} +
Method{{ method }}
Fit parameters{% if fit_params %}{% for key, value in fit_params.items() %}{{ key }}={{ value }}
{% endfor %}{% else %}—{% endif %}
Fit{% if fit_on %}{{ n_iter }} iterations on {{ fit_on }} ({{ n_samples }} samples){% else %}no{% endif %}
ICA components{{ n_components }}
Available PCA components{{ n_pca_components }}
Channel types{{ ch_types|join(', ') }}
ICA components marked for exclusion{% if excludes %}{{ excludes|join('
' | safe) }}{% else %}—{% endif %}
diff --git a/mne/html_templates/repr/info.html.jinja b/mne/html_templates/repr/info.html.jinja new file mode 100644 index 0000000..6446372 --- /dev/null +++ b/mne/html_templates/repr/info.html.jinja @@ -0,0 +1,10 @@ +{%include '_js_and_css.html.jinja' %} + +{%set inst = info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
\ No newline at end of file diff --git a/mne/html_templates/repr/inverse_operator.html.jinja b/mne/html_templates/repr/inverse_operator.html.jinja new file mode 100644 index 0000000..63d1ee8 --- /dev/null +++ b/mne/html_templates/repr/inverse_operator.html.jinja @@ -0,0 +1,14 @@ + + + + + + + + + + + + + +
Channels{{ channels }}
Source space{{ source_space_descr }}
Source orientation{{ source_orientation }}
diff --git a/mne/html_templates/repr/raw.html.jinja b/mne/html_templates/repr/raw.html.jinja new file mode 100644 index 0000000..6b33c17 --- /dev/null +++ b/mne/html_templates/repr/raw.html.jinja @@ -0,0 +1,10 @@ +{%include '_js_and_css.html.jinja' %} + +{% set info = inst.info %} + + + {%include '_general.html.jinja' %} + {%include '_acquisition.html.jinja' %} + {%include '_channels.html.jinja' %} + {%include '_filters.html.jinja' %} +
\ No newline at end of file diff --git a/mne/html_templates/repr/spectrum.html.jinja b/mne/html_templates/repr/spectrum.html.jinja new file mode 100644 index 0000000..11a1d7a --- /dev/null +++ b/mne/html_templates/repr/spectrum.html.jinja @@ -0,0 +1,50 @@ + + + + + + {%- for unit in units %} + + {%- if loop.index == 1 %} + + {%- endif %} + + + {%- endfor %} + + + + + {%- if inst_type == "Epochs" %} + + + + + {% endif -%} + + + + + + + + + {% if "taper" in spectrum._dims %} + + + + + {% endif %} + + + + + + + + + + + + +
Data type{{ spectrum._data_type }}
Units{{ unit }}
Data source{{ inst_type }}
Number of epochs{{ spectrum.shape[0] }}
Dims{{ spectrum._dims | join(", ") }}
Estimation method{{ spectrum.method }}
Number of tapers{{ spectrum._mt_weights.size }}
Number of channels{{ spectrum.ch_names|length }}
Number of frequency bins{{ spectrum.freqs|length }}
Frequency range{{ '%.2f'|format(spectrum.freqs[0]) }} – {{ '%.2f'|format(spectrum.freqs[-1]) }} Hz
diff --git a/mne/html_templates/repr/static/_channels.html.jinja b/mne/html_templates/repr/static/_channels.html.jinja new file mode 100644 index 0000000..d40f62f --- /dev/null +++ b/mne/html_templates/repr/static/_channels.html.jinja @@ -0,0 +1,17 @@ +{% set channel_names_good = channels["good"] | map(attribute='name_html') | join(', ') %} + + + {{ channel_type }} + + + + {% if channels["bad"] %} + {% set channel_names_bad = channels["bad"] | map(attribute='name_html') | join(', ') %} + and + {% endif %} + + diff --git a/mne/html_templates/repr/static/_section_header_row.html.jinja b/mne/html_templates/repr/static/_section_header_row.html.jinja new file mode 100644 index 0000000..d8d3da2 --- /dev/null +++ b/mne/html_templates/repr/static/_section_header_row.html.jinja @@ -0,0 +1,12 @@ + + + + + + {{ section }} + + diff --git a/mne/html_templates/repr/static/repr.css b/mne/html_templates/repr/static/repr.css new file mode 100644 index 0000000..5893187 --- /dev/null +++ b/mne/html_templates/repr/static/repr.css @@ -0,0 +1,107 @@ +/* +Styles in this section apply both to the sphinx-built website docs and to notebooks +rendered in an IDE or in Jupyter. In our web docs, styles here are complemented by +doc/_static/styles.css and other CSS files (e.g. from the sphinx theme, sphinx-gallery, +or bootstrap). In IDEs/Jupyter, those style files are unavailable, so only the rules in +this file apply (plus whatever default styling the IDE applies). +*/ +.mne-repr-table { + display: inline; /* prevent using full container width */ +} +.mne-repr-table tr.mne-repr-section-header > th { + padding-top: 1rem; + text-align: left; + vertical-align: middle; +} +.mne-repr-section-toggle > button { + all: unset; + display: block; + height: 1rem; + width: 1rem; +} +.mne-repr-section-toggle > button > svg { + height: 60%; +} + +/* transition (rotation) effects on the collapser button */ +.mne-repr-section-toggle > button.collapsed > svg { + transition: 0.1s ease-out; + transform: rotate(-90deg); +} +.mne-repr-section-toggle > button:not(.collapsed) > svg { + transition: 0.1s ease-out; + transform: rotate(0deg); +} + +/* hide collapsed table rows */ +.mne-repr-collapsed { + display: none; +} + + +@layer { + /* + Selectors in a `@layer` will always be lower-precedence than selectors outside the + layer. So even though e.g. `div.output_html` is present in the sphinx-rendered + website docs, the styles here won't take effect there as long as some other rule + somewhere in the page's CSS targets the same element. + + In IDEs or Jupyter notebooks, though, the CSS files from the sphinx theme, + sphinx-gallery, and bootstrap are unavailable, so these styles will apply. + + Notes: + + - the selector `.accordion-body` is for MNE Reports + - the selector `.output_html` is for VSCode's notebook interface + - the selector `.jp-RenderedHTML` is for Jupyter notebook + - variables starting with `--theme-` are VSCode-specific. + - variables starting with `--jp-` are Jupyter styles, *some of which* are also + available in VSCode. Here we try the `--theme-` variable first, then fall back to + the `--jp-` ones. + */ + .mne-repr-table { + --mne-toggle-color: var(--theme-foreground, var(--jp-ui-font-color1)); + --mne-button-bg-color: var(--theme-button-background, var(--jp-info-color0, var(--jp-content-link-color))); + --mne-button-fg-color: var(--theme-button-foreground, var(--jp-ui-inverse-font-color0, var(--jp-editor-background))); + --mne-button-hover-bg-color: var(--theme-button-hover-background, var(--jp-info-color1)); + --mne-button-radius: var(--jp-border-radius, 0.25rem); + } + /* chevron position/alignment; in VSCode it looks ok without adjusting */ + .accordion-body .mne-repr-section-toggle > button, + .jp-RenderedHTML .mne-repr-section-toggle > button { + padding: 0 0 45% 25% !important; + } + /* chevron color; MNE Report doesn't have light/dark mode */ + div.output_html .mne-repr-section-toggle > button > svg > path, + .jp-RenderedHTML .mne-repr-section-toggle > button > svg > path { + fill: var(--mne-toggle-color); + } + .accordion-body .mne-ch-names-btn, + div.output_html .mne-ch-names-btn, + .jp-RenderedHTML .mne-ch-names-btn { + -webkit-border-radius: var(--mne-button-radius); + -moz-border-radius: var(--mne-button-radius); + border-radius: var(--mne-button-radius); + border: none; + background-image: none; + background-color: var(--mne-button-bg-color); + color: var(--mne-button-fg-color); + font-size: inherit; + min-width: 1.5rem; + padding: 0.25rem; + text-align: center; + text-decoration: none; + } + .accordion-body .mne-ch-names-btn:hover, + div.output_html .mne.ch-names-btn:hover, + .jp-RenderedHTML .mne-ch-names-btn:hover { + background-color: var(--mne-button-hover-bg-color); + text-decoration: underline; + } + .accordion-body .mne-ch-names-btn:focus-visible, + div.output_html .mne-ch-names-btn:focus-visible, + .jp-RenderedHTML .mne-ch-names-btn:focus-visible { + outline: 0.1875rem solid var(--mne-button-bg-color) !important; + outline-offset: 0.1875rem !important; + } +} diff --git a/mne/html_templates/repr/static/repr.js b/mne/html_templates/repr/static/repr.js new file mode 100644 index 0000000..00c9d32 --- /dev/null +++ b/mne/html_templates/repr/static/repr.js @@ -0,0 +1,23 @@ +// must be `var` (not `const`) because this can get embedded multiple times on a page +var toggleVisibility = (className) => { + + const elements = document.querySelectorAll(`.${className}`); + + elements.forEach(element => { + if (element.classList.contains("mne-repr-section-header")) { + return // Don't collapse the section header row + } + element.classList.toggle("mne-repr-collapsed"); + }); + + // trigger caret to rotate + var sel = `.mne-repr-section-header.${className} > th.mne-repr-section-toggle > button`; + const button = document.querySelector(sel); + button.classList.toggle("collapsed"); + + // adjust tooltip + sel = `tr.mne-repr-section-header.${className}`; + const secHeadRow = document.querySelector(sel); + secHeadRow.classList.toggle("collapsed"); + secHeadRow.title = secHeadRow.title === "Hide section" ? "Show section" : "Hide section"; +} diff --git a/mne/html_templates/repr/tfr.html.jinja b/mne/html_templates/repr/tfr.html.jinja new file mode 100644 index 0000000..d2881a0 --- /dev/null +++ b/mne/html_templates/repr/tfr.html.jinja @@ -0,0 +1,60 @@ + + + + + + {%- for unit in units %} + + {%- if loop.index == 1 %} + + {%- endif %} + + + {%- endfor %} + + + + + {%- if inst_type == "Epochs" %} + + + + + {% endif -%} + {%- if inst_type == "Evoked" %} + + + + + {% endif -%} + + + + + + + + + {% if "taper" in tfr._dims %} + + + + + {% endif %} + + + + + + + + + + + + + + + + +
Data type{{ tfr._data_type }}
Units{{ unit }}
Data source{{ inst_type }}
Number of epochs{{ tfr.shape[0] }}
Number of averaged trials{{ nave }}
Dims{{ tfr._dims | join(", ") }}
Estimation method{{ tfr.method }}
Number of tapers{{ tfr._mt_weights.size }}
Number of channels{{ tfr.ch_names|length }}
Number of timepoints{{ tfr.times|length }}
Number of frequency bins{{ tfr.freqs|length }}
Frequency range{{ '%.2f'|format(tfr.freqs[0]) }} – {{ '%.2f'|format(tfr.freqs[-1]) }} Hz
diff --git a/mne/icons/README.rst b/mne/icons/README.rst new file mode 100644 index 0000000..a6de772 --- /dev/null +++ b/mne/icons/README.rst @@ -0,0 +1,11 @@ +.. -*- mode: rst -*- + + +Documentation +============= + +The icons are used in ``mne/viz/_brain/_brain.py`` for the toolbar. +These Material design icons are provided by Google under the `Apache 2.0`_ license. + + +.. _Apache 2.0: https://github.com/google/material-design-icons/blob/master/LICENSE diff --git a/mne/icons/dark/actions/clear.svg b/mne/icons/dark/actions/clear.svg new file mode 100644 index 0000000..f28cedd --- /dev/null +++ b/mne/icons/dark/actions/clear.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/folder.svg b/mne/icons/dark/actions/folder.svg new file mode 100644 index 0000000..7fa0de9 --- /dev/null +++ b/mne/icons/dark/actions/folder.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/help.svg b/mne/icons/dark/actions/help.svg new file mode 100644 index 0000000..3631070 --- /dev/null +++ b/mne/icons/dark/actions/help.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/movie.svg b/mne/icons/dark/actions/movie.svg new file mode 100644 index 0000000..94a5554 --- /dev/null +++ b/mne/icons/dark/actions/movie.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/pause.svg b/mne/icons/dark/actions/pause.svg new file mode 100644 index 0000000..af1364e --- /dev/null +++ b/mne/icons/dark/actions/pause.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/play.svg b/mne/icons/dark/actions/play.svg new file mode 100644 index 0000000..eea3600 --- /dev/null +++ b/mne/icons/dark/actions/play.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/reset.svg b/mne/icons/dark/actions/reset.svg new file mode 100644 index 0000000..43e59a3 --- /dev/null +++ b/mne/icons/dark/actions/reset.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/restore.svg b/mne/icons/dark/actions/restore.svg new file mode 100644 index 0000000..1471121 --- /dev/null +++ b/mne/icons/dark/actions/restore.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/scale.svg b/mne/icons/dark/actions/scale.svg new file mode 100644 index 0000000..be0df03 --- /dev/null +++ b/mne/icons/dark/actions/scale.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/screenshot.svg b/mne/icons/dark/actions/screenshot.svg new file mode 100644 index 0000000..ad9719d --- /dev/null +++ b/mne/icons/dark/actions/screenshot.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/visibility_off.svg b/mne/icons/dark/actions/visibility_off.svg new file mode 100644 index 0000000..9e10c3a --- /dev/null +++ b/mne/icons/dark/actions/visibility_off.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/actions/visibility_on.svg b/mne/icons/dark/actions/visibility_on.svg new file mode 100644 index 0000000..9d4b89e --- /dev/null +++ b/mne/icons/dark/actions/visibility_on.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/dark/index.theme b/mne/icons/dark/index.theme new file mode 100644 index 0000000..d3e887c --- /dev/null +++ b/mne/icons/dark/index.theme @@ -0,0 +1,11 @@ +[Icon Theme] +Name=dark + +Directories=actions + +[actions] +Size=32 +Context=Actions +MinSize=16 +MaxSize=128 +Type=Scalable diff --git a/mne/icons/light/actions/clear.svg b/mne/icons/light/actions/clear.svg new file mode 100644 index 0000000..f519316 --- /dev/null +++ b/mne/icons/light/actions/clear.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/folder.svg b/mne/icons/light/actions/folder.svg new file mode 100644 index 0000000..6b44438 --- /dev/null +++ b/mne/icons/light/actions/folder.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/help.svg b/mne/icons/light/actions/help.svg new file mode 100644 index 0000000..afa9398 --- /dev/null +++ b/mne/icons/light/actions/help.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/movie.svg b/mne/icons/light/actions/movie.svg new file mode 100644 index 0000000..6bb41a1 --- /dev/null +++ b/mne/icons/light/actions/movie.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/pause.svg b/mne/icons/light/actions/pause.svg new file mode 100644 index 0000000..05bcc5c --- /dev/null +++ b/mne/icons/light/actions/pause.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/play.svg b/mne/icons/light/actions/play.svg new file mode 100644 index 0000000..23abb1a --- /dev/null +++ b/mne/icons/light/actions/play.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/reset.svg b/mne/icons/light/actions/reset.svg new file mode 100644 index 0000000..1aea2e0 --- /dev/null +++ b/mne/icons/light/actions/reset.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/restore.svg b/mne/icons/light/actions/restore.svg new file mode 100644 index 0000000..8a4b066 --- /dev/null +++ b/mne/icons/light/actions/restore.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/scale.svg b/mne/icons/light/actions/scale.svg new file mode 100644 index 0000000..85af480 --- /dev/null +++ b/mne/icons/light/actions/scale.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/screenshot.svg b/mne/icons/light/actions/screenshot.svg new file mode 100644 index 0000000..53d8fe3 --- /dev/null +++ b/mne/icons/light/actions/screenshot.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/visibility_off.svg b/mne/icons/light/actions/visibility_off.svg new file mode 100644 index 0000000..00e6067 --- /dev/null +++ b/mne/icons/light/actions/visibility_off.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/actions/visibility_on.svg b/mne/icons/light/actions/visibility_on.svg new file mode 100644 index 0000000..617b9a0 --- /dev/null +++ b/mne/icons/light/actions/visibility_on.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mne/icons/light/index.theme b/mne/icons/light/index.theme new file mode 100644 index 0000000..2176682 --- /dev/null +++ b/mne/icons/light/index.theme @@ -0,0 +1,11 @@ +[Icon Theme] +Name=light + +Directories=actions + +[actions] +Size=32 +Context=Actions +MinSize=16 +MaxSize=128 +Type=Scalable diff --git a/mne/icons/mne_bigsur_icon.png b/mne/icons/mne_bigsur_icon.png new file mode 100644 index 0000000..9661d77 Binary files /dev/null and b/mne/icons/mne_bigsur_icon.png differ diff --git a/mne/icons/mne_default_icon.png b/mne/icons/mne_default_icon.png new file mode 100644 index 0000000..22a90e2 Binary files /dev/null and b/mne/icons/mne_default_icon.png differ diff --git a/mne/icons/mne_icon-cropped.png b/mne/icons/mne_icon-cropped.png new file mode 100644 index 0000000..9cc2c33 Binary files /dev/null and b/mne/icons/mne_icon-cropped.png differ diff --git a/mne/icons/mne_icon.png b/mne/icons/mne_icon.png new file mode 100644 index 0000000..c197b62 Binary files /dev/null and b/mne/icons/mne_icon.png differ diff --git a/mne/icons/mne_splash.png b/mne/icons/mne_splash.png new file mode 100644 index 0000000..76aa18e Binary files /dev/null and b/mne/icons/mne_splash.png differ diff --git a/mne/icons/toolbar_move_horizontal@2x.png b/mne/icons/toolbar_move_horizontal@2x.png new file mode 100644 index 0000000..143b62e Binary files /dev/null and b/mne/icons/toolbar_move_horizontal@2x.png differ diff --git a/mne/icons/toolbar_move_vertical@2x.png b/mne/icons/toolbar_move_vertical@2x.png new file mode 100644 index 0000000..453d7b7 Binary files /dev/null and b/mne/icons/toolbar_move_vertical@2x.png differ diff --git a/mne/icons/toolbar_separator_horizontal.png b/mne/icons/toolbar_separator_horizontal.png new file mode 100644 index 0000000..ecf2ab7 Binary files /dev/null and b/mne/icons/toolbar_separator_horizontal.png differ diff --git a/mne/icons/toolbar_separator_horizontal@2x.png b/mne/icons/toolbar_separator_horizontal@2x.png new file mode 100644 index 0000000..ac2b343 Binary files /dev/null and b/mne/icons/toolbar_separator_horizontal@2x.png differ diff --git a/mne/icons/toolbar_separator_vertical@2x.png b/mne/icons/toolbar_separator_vertical@2x.png new file mode 100644 index 0000000..2f66e93 Binary files /dev/null and b/mne/icons/toolbar_separator_vertical@2x.png differ diff --git a/mne/inverse_sparse/__init__.py b/mne/inverse_sparse/__init__.py new file mode 100644 index 0000000..615c306 --- /dev/null +++ b/mne/inverse_sparse/__init__.py @@ -0,0 +1,9 @@ +"""Non-Linear sparse inverse solvers.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/inverse_sparse/__init__.pyi b/mne/inverse_sparse/__init__.pyi new file mode 100644 index 0000000..5579211 --- /dev/null +++ b/mne/inverse_sparse/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["gamma_map", "make_stc_from_dipoles", "mixed_norm", "tf_mixed_norm"] +from ._gamma_map import gamma_map +from .mxne_inverse import make_stc_from_dipoles, mixed_norm, tf_mixed_norm diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py new file mode 100644 index 0000000..35fd158 --- /dev/null +++ b/mne/inverse_sparse/_gamma_map.py @@ -0,0 +1,341 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..fixes import _safe_svd +from ..forward import is_fixed_orient +from ..minimum_norm.inverse import _check_reference, _log_exp_var +from ..utils import logger, verbose, warn +from .mxne_inverse import ( + _check_ori, + _compute_residual, + _make_dipoles_sparse, + _make_sparse_stc, + _prepare_gain, + _reapply_source_weighting, +) + + +@verbose +def _gamma_map_opt( + M, + G, + alpha, + maxit=10000, + tol=1e-6, + update_mode=1, + group_size=1, + gammas=None, + verbose=None, +): + """Hierarchical Bayes (Gamma-MAP). + + Parameters + ---------- + M : array, shape=(n_sensors, n_times) + Observation. + G : array, shape=(n_sensors, n_sources) + Forward operator. + alpha : float + Regularization parameter (noise variance). + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter for convergence. + group_size : int + Number of consecutive sources which use the same gamma. + update_mode : int + Update mode, 1: MacKay update (default), 3: Modified MacKay update. + gammas : array, shape=(n_sources,) + Initial values for posterior variances (gammas). If None, a + variance of 1.0 is used. + %(verbose)s + + Returns + ------- + X : array, shape=(n_active, n_times) + Estimated source time courses. + active_set : array, shape=(n_active,) + Indices of active sources. + """ + G = G.copy() + M = M.copy() + + if gammas is None: + gammas = np.ones(G.shape[1], dtype=np.float64) + + eps = np.finfo(float).eps + + n_sources = G.shape[1] + n_sensors, n_times = M.shape + + # apply normalization so the numerical values are sane + M_normalize_constant = np.linalg.norm(np.dot(M, M.T), ord="fro") + M /= np.sqrt(M_normalize_constant) + alpha /= M_normalize_constant + G_normalize_constant = np.linalg.norm(G, ord=np.inf) + G /= G_normalize_constant + + if n_sources % group_size != 0: + raise ValueError( + "Number of sources has to be evenly dividable by the group size" + ) + + n_active = n_sources + active_set = np.arange(n_sources) + + gammas_full_old = gammas.copy() + + if update_mode == 2: + denom_fun = np.sqrt + else: + # do nothing + def denom_fun(x): + return x + + last_size = -1 + for itno in range(maxit): + gammas[np.isnan(gammas)] = 0.0 + + gidx = np.abs(gammas) > eps + active_set = active_set[gidx] + gammas = gammas[gidx] + + # update only active gammas (once set to zero it stays at zero) + if n_active > len(active_set): + n_active = active_set.size + G = G[:, gidx] + + CM = np.dot(G * gammas[np.newaxis, :], G.T) + CM.flat[:: n_sensors + 1] += alpha + # Invert CM keeping symmetry + U, S, _ = _safe_svd(CM, full_matrices=False) + S = S[np.newaxis, :] + del CM + CMinv = np.dot(U / (S + eps), U.T) + CMinvG = np.dot(CMinv, G) + A = np.dot(CMinvG.T, M) # mult. w. Diag(gamma) in gamma update + + if update_mode == 1: + # MacKay fixed point update (10) in [1] + numer = gammas**2 * np.mean((A * A.conj()).real, axis=1) + denom = gammas * np.sum(G * CMinvG, axis=0) + elif update_mode == 2: + # modified MacKay fixed point update (11) in [1] + numer = gammas * np.sqrt(np.mean((A * A.conj()).real, axis=1)) + denom = np.sum(G * CMinvG, axis=0) # sqrt is applied below + else: + raise ValueError("Invalid value for update_mode") + + if group_size == 1: + if denom is None: + gammas = numer + else: + gammas = numer / np.maximum(denom_fun(denom), np.finfo("float").eps) + else: + numer_comb = np.sum(numer.reshape(-1, group_size), axis=1) + if denom is None: + gammas_comb = numer_comb + else: + denom_comb = np.sum(denom.reshape(-1, group_size), axis=1) + gammas_comb = numer_comb / denom_fun(denom_comb) + + gammas = np.repeat(gammas_comb / group_size, group_size) + + # compute convergence criterion + gammas_full = np.zeros(n_sources, dtype=np.float64) + gammas_full[active_set] = gammas + + err = np.sum(np.abs(gammas_full - gammas_full_old)) / np.sum( + np.abs(gammas_full_old) + ) + + gammas_full_old = gammas_full + + breaking = err < tol or n_active == 0 + if len(gammas) != last_size or breaking: + logger.info( + f"Iteration: {itno}\t active set size: {len(gammas)}\t convergence: " + f"{err:.3e}" + ) + last_size = len(gammas) + + if breaking: + break + + if itno < maxit - 1: + logger.info("\nConvergence reached !\n") + else: + warn("\nConvergence NOT reached !\n") + + # undo normalization and compute final posterior mean + n_const = np.sqrt(M_normalize_constant) / G_normalize_constant + x_active = n_const * gammas[:, None] * A + + return x_active, active_set + + +@verbose +def gamma_map( + evoked, + forward, + noise_cov, + alpha, + loose="auto", + depth=0.8, + xyz_same_gamma=True, + maxit=10000, + tol=1e-6, + update_mode=1, + gammas=None, + pca=True, + return_residual=False, + return_as_dipoles=False, + rank=None, + pick_ori=None, + verbose=None, +): + """Hierarchical Bayes (Gamma-MAP) sparse source localization method. + + Models each source time course using a zero-mean Gaussian prior with an + unknown variance (gamma) parameter. During estimation, most gammas are + driven to zero, resulting in a sparse source estimate, as in + :footcite:`WipfEtAl2007` and :footcite:`WipfNagarajan2009`. + + For fixed-orientation forward operators, a separate gamma is used for each + source time course, while for free-orientation forward operators, the same + gamma is used for the three source time courses at each source space point + (separate gammas can be used in this case by using xyz_same_gamma=False). + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + alpha : float + Regularization parameter (noise variance). + %(loose)s + %(depth)s + xyz_same_gamma : bool + Use same gamma for xyz current components at each source space point. + Recommended for free-orientation forward solutions. + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter for convergence. + update_mode : int + Update mode, 1: MacKay update (default), 2: Modified MacKay update. + gammas : array, shape=(n_sources,) + Initial values for posterior variances (gammas). If None, a + variance of 1.0 is used. + pca : bool + If True the rank of the data is reduced to the true dimension. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + %(verbose)s + + Returns + ------- + stc : instance of SourceEstimate + Source time courses. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + References + ---------- + .. footbibliography:: + """ + _check_reference(evoked) + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, evoked.info, noise_cov, pca, depth, loose, rank + ) + _check_ori(pick_ori, forward) + + group_size = 1 if (is_fixed_orient(forward) or not xyz_same_gamma) else 3 + + # get the data + sel = [evoked.ch_names.index(name) for name in gain_info["ch_names"]] + M = evoked.data[sel] + + # whiten the data + logger.info("Whitening data matrix.") + M = np.dot(whitener, M) + + # run the optimization + X, active_set = _gamma_map_opt( + M, + gain, + alpha, + maxit=maxit, + tol=tol, + update_mode=update_mode, + gammas=gammas, + group_size=group_size, + verbose=verbose, + ) + + if len(active_set) == 0: + raise Exception("No active dipoles found. alpha is too big.") + + M_estimate = gain[:, active_set] @ X + + # Reapply weights to have correct unit + X = _reapply_source_weighting(X, source_weighting, active_set) + + if return_residual: + residual = _compute_residual(forward, evoked, X, active_set, gain_info) + + if group_size == 1 and not is_fixed_orient(forward): + # make sure each source has 3 components + idx, offset = divmod(active_set, 3) + active_src = np.unique(idx) + if len(X) < 3 * len(active_src): + X_xyz = np.zeros((len(active_src), 3, X.shape[1]), dtype=X.dtype) + idx = np.searchsorted(active_src, idx) + X_xyz[idx, offset, :] = X + X_xyz.shape = (len(active_src) * 3, X.shape[1]) + X = X_xyz + active_set = (active_src[:, np.newaxis] * 3 + np.arange(3)).ravel() + source_weighting[source_weighting == 0] = 1 # zeros + gain_active = gain[:, active_set] / source_weighting[active_set] + del source_weighting + + tmin = evoked.times[0] + tstep = 1.0 / evoked.info["sfreq"] + + if return_as_dipoles: + out = _make_dipoles_sparse( + X, active_set, forward, tmin, tstep, M, gain_active, active_is_idx=True + ) + else: + out = _make_sparse_stc( + X, + active_set, + forward, + tmin, + tstep, + active_is_idx=True, + pick_ori=pick_ori, + verbose=verbose, + ) + + _log_exp_var(M, M_estimate, prefix="") + logger.info("[done]") + + if return_residual: + out = out, residual + + return out diff --git a/mne/inverse_sparse/mxne_debiasing.py b/mne/inverse_sparse/mxne_debiasing.py new file mode 100644 index 0000000..860c67c --- /dev/null +++ b/mne/inverse_sparse/mxne_debiasing.py @@ -0,0 +1,137 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from math import sqrt + +import numpy as np + +from ..utils import check_random_state, fill_doc, logger, verbose + + +@fill_doc +def power_iteration_kron(A, C, max_iter=1000, tol=1e-3, random_state=0): + """Find the largest singular value for the matrix kron(C.T, A). + + It uses power iterations. + + Parameters + ---------- + A : array + An array + C : array + An array + max_iter : int + Maximum number of iterations + %(random_state)s + + Returns + ------- + L : float + largest singular value + + Notes + ----- + http://en.wikipedia.org/wiki/Power_iteration + """ + AS_size = C.shape[0] + rng = check_random_state(random_state) + B = rng.randn(AS_size, AS_size) + B /= np.linalg.norm(B, "fro") + ATA = np.dot(A.T, A) + CCT = np.dot(C, C.T) + L0 = np.inf + for _ in range(max_iter): + Y = np.dot(np.dot(ATA, B), CCT) + L = np.linalg.norm(Y, "fro") + + if abs(L - L0) < tol: + break + + B = Y / L + L0 = L + return L + + +@verbose +def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None): + """Compute scaling to correct amplitude bias. + + It solves the following optimization problem using FISTA: + + min 1/2 * (|| M - GDX ||fro)^2 + s.t. D >= 1 and D is a diagonal matrix + + Reference for the FISTA algorithm: + Amir Beck and Marc Teboulle + A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse + Problems, SIAM J. Imaging Sci., 2(1), 183-202. (20 pages) + http://epubs.siam.org/doi/abs/10.1137/080716542 + + Parameters + ---------- + M : array + measurement data. + G : array + leadfield matrix. + X : array + reconstructed time courses with amplitude bias. + max_iter : int + Maximum number of iterations. + tol : float + The tolerance on convergence. + n_orient : int + The number of orientations (1 for fixed and 3 otherwise). + %(verbose)s + + Returns + ------- + D : array + Debiasing weights. + """ + n_sources = X.shape[0] + + lipschitz_constant = 1.1 * power_iteration_kron(G, X) + + # initializations + D = np.ones(n_sources) + Y = np.ones(n_sources) + t = 1.0 + + for i in range(max_iter): + D0 = D + + # gradient step + R = M - np.dot(G * Y, X) + D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant + # Equivalent but faster than: + # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant + + # prox ie projection on constraint + if n_orient != 1: # take care of orientations + # The scaling has to be the same for all orientations + D = np.mean(D.reshape(-1, n_orient), axis=1) + D = np.tile(D, [n_orient, 1]).T.ravel() + D = np.maximum(D, 1.0) + + t0 = t + t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t**2)) + Y.fill(0.0) + dt = (t0 - 1.0) / t + Y = D + dt * (D - D0) + + Ddiff = np.linalg.norm(D - D0, np.inf) + + if Ddiff < tol: + logger.info( + f"Debiasing converged after {i} iterations " + f"max(|D - D0| = {Ddiff:e} < {tol:e})" + ) + break + else: + Ddiff = np.linalg.norm(D - D0, np.inf) + logger.info( + f"Debiasing did not converge after {max_iter} iterations! " + f"max(|D - D0| = {Ddiff:e} >= {tol:e})" + ) + return D diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py new file mode 100644 index 0000000..c3ccddb --- /dev/null +++ b/mne/inverse_sparse/mxne_inverse.py @@ -0,0 +1,1092 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from .._fiff.proj import deactivate_proj +from ..dipole import Dipole +from ..fixes import _safe_svd +from ..forward import is_fixed_orient +from ..minimum_norm.inverse import ( + _check_reference, + _log_exp_var, + _prepare_forward, + combine_xyz, +) +from ..source_estimate import SourceEstimate, _BaseSourceEstimate, _make_stc +from ..utils import ( + _check_depth, + _check_option, + _validate_type, + check_random_state, + logger, + sum_squared, + verbose, + warn, +) +from .mxne_optim import ( + _Phi, + groups_norm2, + iterative_mixed_norm_solver, + iterative_tf_mixed_norm_solver, + mixed_norm_solver, + norm_epsilon_inf, + norm_l2inf, + tf_mixed_norm_solver, +) + + +def _check_ori(pick_ori, forward): + """Check pick_ori.""" + _check_option("pick_ori", pick_ori, [None, "vector"]) + if pick_ori == "vector" and is_fixed_orient(forward): + raise ValueError( + 'pick_ori="vector" cannot be combined with a fixed ' + "orientation forward solution." + ) + + +def _prepare_weights(forward, gain, source_weighting, weights, weights_min): + mask = None + if isinstance(weights, _BaseSourceEstimate): + weights = np.max(np.abs(weights.data), axis=1) + weights_max = np.max(weights) + if weights_min > weights_max: + raise ValueError(f"weights_min > weights_max ({weights_min} > {weights_max})") + weights_min = weights_min / weights_max + weights = weights / weights_max + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + weights = np.ravel(np.tile(weights, [n_dip_per_pos, 1]).T) + if len(weights) != gain.shape[1]: + raise ValueError( + "weights do not have the correct dimension " + f" ({len(weights)} != {gain.shape[1]})" + ) + if len(source_weighting.shape) == 1: + source_weighting *= weights + else: + source_weighting *= weights[:, None] + gain *= weights[None, :] + + if weights_min is not None: + mask = weights > weights_min + gain = gain[:, mask] + n_sources = np.sum(mask) // n_dip_per_pos + logger.info(f"Reducing source space to {n_sources} sources") + + return gain, source_weighting, mask + + +def _prepare_gain( + forward, info, noise_cov, pca, depth, loose, rank, weights=None, weights_min=None +): + depth = _check_depth(depth, "depth_sparse") + forward, gain_info, gain, _, _, source_weighting, _, _, whitener = _prepare_forward( + forward, info, noise_cov, "auto", loose, rank, pca, use_cps=True, **depth + ) + + if weights is None: + mask = None + else: + gain, source_weighting, mask = _prepare_weights( + forward, gain, source_weighting, weights, weights_min + ) + + return forward, gain, gain_info, whitener, source_weighting, mask + + +def _reapply_source_weighting(X, source_weighting, active_set): + X *= source_weighting[active_set][:, None] + return X + + +def _compute_residual(forward, evoked, X, active_set, info): + # OK, picking based on row_names is safe + sel = [forward["sol"]["row_names"].index(c) for c in info["ch_names"]] + residual = evoked.copy().pick(info["ch_names"]) + r_tmp = residual.copy() + + r_tmp.data = np.dot(forward["sol"]["data"][sel, :][:, active_set], X) + + # Take care of proj + active_projs = list() + non_active_projs = list() + for p in evoked.info["projs"]: + if p["active"]: + active_projs.append(p) + else: + non_active_projs.append(p) + + if len(active_projs) > 0: + with r_tmp.info._unlock(): + r_tmp.info["projs"] = deactivate_proj( + active_projs, copy=True, verbose=False + ) + r_tmp.apply_proj(verbose=False) + r_tmp.add_proj(non_active_projs, remove_existing=False, verbose=False) + + residual.data -= r_tmp.data + + return residual + + +@verbose +def _make_sparse_stc( + X, + active_set, + forward, + tmin, + tstep, + active_is_idx=False, + pick_ori=None, + verbose=None, +): + source_nn = forward["source_nn"] + vector = False + if not is_fixed_orient(forward): + if pick_ori != "vector": + logger.info("combining the current components...") + X = combine_xyz(X) + else: + vector = True + source_nn = np.reshape(source_nn, (-1, 3, 3)) + + if not active_is_idx: + active_idx = np.where(active_set)[0] + else: + active_idx = active_set + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + if n_dip_per_pos > 1: + active_idx = np.unique(active_idx // n_dip_per_pos) + + src = forward["src"] + vertices = [] + n_points_so_far = 0 + for this_src in src: + this_n_points_so_far = n_points_so_far + len(this_src["vertno"]) + this_active_idx = active_idx[ + (n_points_so_far <= active_idx) & (active_idx < this_n_points_so_far) + ] + this_active_idx -= n_points_so_far + this_vertno = this_src["vertno"][this_active_idx] + n_points_so_far = this_n_points_so_far + vertices.append(this_vertno) + source_nn = source_nn[active_idx] + return _make_stc( + X, + vertices, + src.kind, + tmin, + tstep, + src[0]["subject_his_id"], + vector=vector, + source_nn=source_nn, + ) + + +def _split_gof(M, X, gain): + # parse out the variance explained using an orthogonal basis + # assuming x is estimated using elements of gain, with residual res + # along the first axis + assert M.ndim == X.ndim == gain.ndim == 2, (M.ndim, X.ndim, gain.ndim) + assert gain.shape == (M.shape[0], X.shape[0]) + assert M.shape[1] == X.shape[1] + norm = (M * M.conj()).real.sum(0, keepdims=True) + norm[norm == 0] = np.inf + M_est = gain @ X + assert M.shape == M_est.shape + res = M - M_est + assert gain.shape[0] == M.shape[0], (gain.shape, M.shape) + # find an orthonormal basis for our matrices that spans the actual data + U, s, _ = np.linalg.svd(gain, full_matrices=False) + if U.shape[1] > 0: + U = U[:, s >= s[0] * 1e-6] + # the part that gets explained + fit_orth = U.T @ M + # the part that got over-explained (landed in residual) + res_orth = U.T @ res + # determine the weights by projecting each one onto this basis + w = (U.T @ gain)[:, :, np.newaxis] * X + w_norm = np.linalg.norm(w, axis=1, keepdims=True) + w_norm[w_norm == 0] = 1.0 + w /= w_norm + # our weights are now unit-norm positive (will presrve power) + fit_back = np.linalg.norm(fit_orth[:, np.newaxis] * w, axis=0) ** 2 + res_back = np.linalg.norm(res_orth[:, np.newaxis] * w, axis=0) ** 2 + # and the resulting goodness of fits + gof_back = 100 * (fit_back - res_back) / norm + assert gof_back.shape == X.shape, (gof_back.shape, X.shape) + return gof_back + + +@verbose +def _make_dipoles_sparse( + X, + active_set, + forward, + tmin, + tstep, + M, + gain_active, + active_is_idx=False, + verbose=None, +): + times = tmin + tstep * np.arange(X.shape[1]) + + if not active_is_idx: + active_idx = np.where(active_set)[0] + else: + active_idx = active_set + + # Compute the GOF split amongst the dipoles + assert M.shape == (gain_active.shape[0], len(times)) + assert gain_active.shape[1] == len(active_idx) == X.shape[0] + gof_split = _split_gof(M, X, gain_active) + assert gof_split.shape == (len(active_idx), len(times)) + assert X.shape[0] in (len(active_idx), 3 * len(active_idx)) + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + if n_dip_per_pos > 1: + active_idx = active_idx // n_dip_per_pos + _, keep = np.unique(active_idx, return_index=True) + keep.sort() # maintain old order + active_idx = active_idx[keep] + gof_split.shape = (len(active_idx), n_dip_per_pos, len(times)) + gof_split = gof_split.sum(1) + assert (gof_split < 100).all() + assert gof_split.shape == (len(active_idx), len(times)) + + dipoles = [] + for k, i_dip in enumerate(active_idx): + i_pos = forward["source_rr"][i_dip][np.newaxis, :] + i_pos = i_pos.repeat(len(times), axis=0) + X_ = X[k * n_dip_per_pos : (k + 1) * n_dip_per_pos] + if n_dip_per_pos == 1: + amplitude = X_[0] + i_ori = forward["source_nn"][i_dip][np.newaxis, :] + i_ori = i_ori.repeat(len(times), axis=0) + else: + if forward["surf_ori"]: + X_ = np.dot( + forward["source_nn"][ + i_dip * n_dip_per_pos : (i_dip + 1) * n_dip_per_pos + ].T, + X_, + ) + amplitude = np.linalg.norm(X_, axis=0) + i_ori = np.zeros((len(times), 3)) + i_ori[amplitude > 0.0] = ( + X_[:, amplitude > 0.0] / amplitude[amplitude > 0.0] + ).T + + dipoles.append(Dipole(times, i_pos, amplitude, i_ori, gof_split[k])) + + return dipoles + + +@verbose +def make_stc_from_dipoles(dipoles, src, verbose=None): + """Convert a list of spatio-temporal dipoles into a SourceEstimate. + + Parameters + ---------- + dipoles : Dipole | list of instances of Dipole + The dipoles to convert. + src : instance of SourceSpaces + The source space used to generate the forward operator. + %(verbose)s + + Returns + ------- + stc : SourceEstimate + The source estimate. + """ + logger.info("Converting dipoles into a SourceEstimate.") + if isinstance(dipoles, Dipole): + dipoles = [dipoles] + if not isinstance(dipoles, list): + raise ValueError( + "Dipoles must be an instance of Dipole or " + "a list of instances of Dipole. " + f"Got {type(dipoles)}!" + ) + tmin = dipoles[0].times[0] + tstep = dipoles[0].times[1] - tmin + X = np.zeros((len(dipoles), len(dipoles[0].times))) + source_rr = np.concatenate([_src["rr"][_src["vertno"], :] for _src in src], axis=0) + n_lh_points = len(src[0]["vertno"]) + lh_vertno = list() + rh_vertno = list() + for i in range(len(dipoles)): + if not np.all(dipoles[i].pos == dipoles[i].pos[0]): + raise ValueError( + "Only dipoles with fixed position over time are supported!" + ) + X[i] = dipoles[i].amplitude + idx = np.all(source_rr == dipoles[i].pos[0], axis=1) + idx = np.where(idx)[0][0] + if idx < n_lh_points: + lh_vertno.append(src[0]["vertno"][idx]) + else: + rh_vertno.append(src[1]["vertno"][idx - n_lh_points]) + vertices = [np.array(lh_vertno).astype(int), np.array(rh_vertno).astype(int)] + stc = SourceEstimate( + X, vertices=vertices, tmin=tmin, tstep=tstep, subject=src._subject + ) + logger.info("[done]") + return stc + + +@verbose +def mixed_norm( + evoked, + forward, + noise_cov, + alpha="sure", + loose="auto", + depth=0.8, + maxit=3000, + tol=1e-4, + active_set_size=10, + debias=True, + time_pca=True, + weights=None, + weights_min=0.0, + solver="auto", + n_mxne_iter=1, + return_residual=False, + return_as_dipoles=False, + dgap_freq=10, + rank=None, + pick_ori=None, + sure_alpha_grid="auto", + random_state=None, + verbose=None, +): + """Mixed-norm estimate (MxNE) and iterative reweighted MxNE (irMxNE). + + Compute L1/L2 mixed-norm solution :footcite:`GramfortEtAl2012` or L0.5/L2 + :footcite:`StrohmeierEtAl2016` mixed-norm solution on evoked data. + + Parameters + ---------- + evoked : instance of Evoked or list of instances of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + alpha : float | str + Regularization parameter. If float it should be in the range [0, 100): + 0 means no regularization, 100 would give 0 active dipole. + If ``'sure'`` (default), the SURE method from + :footcite:`DeledalleEtAl2014` will be used. + + .. versionchanged:: 0.24 + The default was changed to ``'sure'``. + %(loose)s + %(depth)s + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + active_set_size : int | None + Size of active set increment. If None, no active set strategy is used. + debias : bool + Remove coefficient amplitude bias due to L1 penalty. + time_pca : bool or int + If True the rank of the concatenated epochs is reduced to + its true dimension. If is 'int' the rank is limited to this value. + weights : None | array | SourceEstimate + Weight for penalty in mixed_norm. Can be None, a + 1d array with shape (n_sources,), or a SourceEstimate (e.g. obtained + with wMNE, dSPM, or fMRI). + weights_min : float + Do not consider in the estimation sources for which weights + is less than weights_min. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. 'cd' uses + coordinate descent, and 'bcd' applies block coordinate descent. + 'cd' is only available for fixed orientation. + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting + is applied. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. Ignored if + solver is 'cd'. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + sure_alpha_grid : array | str + If ``'auto'`` (default), the SURE is evaluated along 15 uniformly + distributed alphas between alpha_max and 0.1 * alpha_max. If array, the + grid is directly specified. Ignored if alpha is not "sure". + + .. versionadded:: 0.24 + random_state : int | None + The random state used in a random number generator for delta and + epsilon used for the SURE computation. Defaults to None. + + .. versionadded:: 0.24 + %(verbose)s + + Returns + ------- + stc : SourceEstimate | list of SourceEstimate + Source time courses for each evoked data passed as input. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + See Also + -------- + tf_mixed_norm + + References + ---------- + .. footbibliography:: + """ + _validate_type(alpha, ("numeric", str), "alpha") + if isinstance(alpha, str): + _check_option("alpha", alpha, ("sure",)) + elif not 0.0 <= alpha < 100: + raise ValueError( + f'If not equal to "sure" alpha must be in [0, 100). Got alpha = {alpha}' + ) + if n_mxne_iter < 1: + raise ValueError( + "MxNE has to be computed at least 1 time. " + f"Requires n_mxne_iter >= 1, got {n_mxne_iter}" + ) + if dgap_freq <= 0.0: + raise ValueError( + f"dgap_freq must be a positive integer. Got dgap_freq = {dgap_freq}" + ) + if not ( + isinstance(sure_alpha_grid, np.ndarray | list) or sure_alpha_grid == "auto" + ): + raise ValueError( + 'If not equal to "auto" sure_alpha_grid must be an ' + f"array. Got {type(sure_alpha_grid)}" + ) + if (isinstance(sure_alpha_grid, str) and sure_alpha_grid != "auto") and ( + isinstance(alpha, str) and alpha != "sure" + ): + raise Exception( + "If sure_alpha_grid is manually specified, alpha must " + f'be "sure". Got {alpha}' + ) + pca = True + if not isinstance(evoked, list): + evoked = [evoked] + + _check_reference(evoked[0]) + + all_ch_names = evoked[0].ch_names + if not all(all_ch_names == evoked[i].ch_names for i in range(1, len(evoked))): + raise Exception("All the datasets must have the same good channels.") + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, + evoked[0].info, + noise_cov, + pca, + depth, + loose, + rank, + weights, + weights_min, + ) + _check_ori(pick_ori, forward) + + sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] + M = np.concatenate([e.data[sel] for e in evoked], axis=1) + + # Whiten data + logger.info("Whitening data matrix.") + M = np.dot(whitener, M) + + if time_pca: + U, s, Vh = _safe_svd(M, full_matrices=False) + if not isinstance(time_pca, bool) and isinstance(time_pca, int): + U = U[:, :time_pca] + s = s[:time_pca] + Vh = Vh[:time_pca] + M = U * s + + # Scaling to make setting of tol and alpha easy + tol *= sum_squared(M) + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + alpha_max = norm_l2inf(np.dot(gain.T, M), n_dip_per_pos, copy=False) + alpha_max *= 0.01 + gain /= alpha_max + source_weighting /= alpha_max + + # Alpha selected automatically by SURE minimization + if alpha == "sure": + alpha_grid = sure_alpha_grid + if isinstance(sure_alpha_grid, str) and sure_alpha_grid == "auto": + alpha_grid = np.geomspace(100, 10, num=15) + X, active_set, best_alpha_ = _compute_mxne_sure( + M, + gain, + alpha_grid, + sigma=1, + random_state=random_state, + n_mxne_iter=n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_dip_per_pos, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) + logger.info(f"Selected alpha: {best_alpha_}") + else: + if n_mxne_iter == 1: + X, active_set, E = mixed_norm_solver( + M, + gain, + alpha, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + n_orient=n_dip_per_pos, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) + else: + X, active_set, E = iterative_mixed_norm_solver( + M, + gain, + alpha, + n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_dip_per_pos, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + verbose=verbose, + ) + + if time_pca: + X = np.dot(X, Vh) + M = np.dot(M, Vh) + + gain_active = gain[:, active_set] + if mask is not None: + active_set_tmp = np.zeros(len(mask), dtype=bool) + active_set_tmp[mask] = active_set + active_set = active_set_tmp + del active_set_tmp + + if active_set.sum() == 0: + warn("No active dipoles found. alpha is too big.") + M_estimate = np.zeros_like(M) + else: + # Reapply weights to have correct unit + X = _reapply_source_weighting(X, source_weighting, active_set) + source_weighting[source_weighting == 0] = 1 # zeros + gain_active /= source_weighting[active_set] + del source_weighting + M_estimate = np.dot(gain_active, X) + + outs = list() + residual = list() + cnt = 0 + for e in evoked: + tmin = e.times[0] + tstep = 1.0 / e.info["sfreq"] + Xe = X[:, cnt : (cnt + len(e.times))] + if return_as_dipoles: + out = _make_dipoles_sparse( + Xe, + active_set, + forward, + tmin, + tstep, + M[:, cnt : (cnt + len(e.times))], + gain_active, + ) + else: + out = _make_sparse_stc( + Xe, active_set, forward, tmin, tstep, pick_ori=pick_ori + ) + outs.append(out) + cnt += len(e.times) + + if return_residual: + residual.append(_compute_residual(forward, e, Xe, active_set, gain_info)) + + _log_exp_var(M, M_estimate, prefix="") + logger.info("[done]") + + if len(outs) == 1: + out = outs[0] + if return_residual: + residual = residual[0] + else: + out = outs + + if return_residual: + out = out, residual + + return out + + +def _window_evoked(evoked, size): + """Window evoked (size in seconds).""" + if isinstance(size, float | int): + lsize = rsize = float(size) + else: + lsize, rsize = size + evoked = evoked.copy() + sfreq = float(evoked.info["sfreq"]) + lsize = int(lsize * sfreq) + rsize = int(rsize * sfreq) + lhann = np.hanning(lsize * 2)[:lsize] + rhann = np.hanning(rsize * 2)[-rsize:] + window = np.r_[lhann, np.ones(len(evoked.times) - lsize - rsize), rhann] + evoked.data *= window[None, :] + return evoked + + +@verbose +def tf_mixed_norm( + evoked, + forward, + noise_cov, + loose="auto", + depth=0.8, + maxit=3000, + tol=1e-4, + weights=None, + weights_min=0.0, + pca=True, + debias=True, + wsize=64, + tstep=4, + window=0.02, + return_residual=False, + return_as_dipoles=False, + alpha=None, + l1_ratio=None, + dgap_freq=10, + rank=None, + pick_ori=None, + n_tfmxne_iter=1, + verbose=None, +): + """Time-Frequency Mixed-norm estimate (TF-MxNE). + + Compute L1/L2 + L1 mixed-norm solution on time-frequency + dictionary. Works with evoked data + :footcite:`GramfortEtAl2013b,GramfortEtAl2011`. + + Parameters + ---------- + evoked : instance of Evoked + Evoked data to invert. + forward : dict + Forward operator. + noise_cov : instance of Covariance + Noise covariance to compute whitener. + %(loose)s + %(depth)s + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + weights : None | array | SourceEstimate + Weight for penalty in mixed_norm. Can be None or + 1d array of length n_sources or a SourceEstimate e.g. obtained + with wMNE or dSPM or fMRI. + weights_min : float + Do not consider in the estimation sources for which weights + is less than weights_min. + pca : bool + If True the rank of the data is reduced to true dimension. + debias : bool + Remove coefficient amplitude bias due to L1 penalty. + wsize : int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. See :footcite:`BekhtiEtAl2016`. + tstep : int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. See + :footcite:`BekhtiEtAl2016`. + window : float or (float, float) + Length of time window used to take care of edge artifacts in seconds. + It can be one float or float if the values are different for left + and right window length. + return_residual : bool + If True, the residual is returned as an Evoked instance. + return_as_dipoles : bool + If True, the sources are returned as a list of Dipole instances. + alpha : float in [0, 100) or None + Overall regularization parameter. + If alpha and l1_ratio are not None, alpha_space and alpha_time are + overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max + * l1_ratio. 0 means no regularization, 100 would give 0 active dipole. + l1_ratio : float in [0, 1] or None + Proportion of temporal regularization. + If l1_ratio and alpha are not None, alpha_space and alpha_time are + overridden by alpha * alpha_max * (1. - l1_ratio) and alpha * alpha_max + * l1_ratio. 0 means no time regularization a.k.a. MxNE. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(rank_none)s + + .. versionadded:: 0.18 + %(pick_ori)s + n_tfmxne_iter : int + Number of TF-MxNE iterations. If > 1, iterative reweighting is applied. + %(verbose)s + + Returns + ------- + stc : instance of SourceEstimate + Source time courses. + residual : instance of Evoked + The residual a.k.a. data not explained by the sources. + Only returned if return_residual is True. + + See Also + -------- + mixed_norm + + References + ---------- + .. footbibliography:: + """ + _check_reference(evoked) + + all_ch_names = evoked.ch_names + info = evoked.info + + if not (0.0 <= alpha < 100.0): + raise ValueError(f"alpha must be in [0, 100). Got alpha = {alpha}") + + if not (0.0 <= l1_ratio <= 1.0): + raise ValueError(f"l1_ratio must be in range [0, 1]. Got l1_ratio = {l1_ratio}") + alpha_space = alpha * (1.0 - l1_ratio) + alpha_time = alpha * l1_ratio + + if n_tfmxne_iter < 1: + raise ValueError( + "TF-MxNE has to be computed at least 1 time. " + f"Requires n_tfmxne_iter >= 1, got {n_tfmxne_iter}" + ) + + if dgap_freq <= 0.0: + raise ValueError( + f"dgap_freq must be a positive integer. Got dgap_freq = {dgap_freq}" + ) + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) + + forward, gain, gain_info, whitener, source_weighting, mask = _prepare_gain( + forward, evoked.info, noise_cov, pca, depth, loose, rank, weights, weights_min + ) + _check_ori(pick_ori, forward) + + n_dip_per_pos = 1 if is_fixed_orient(forward) else 3 + + if window is not None: + evoked = _window_evoked(evoked, window) + + sel = [all_ch_names.index(name) for name in gain_info["ch_names"]] + M = evoked.data[sel] + + # Whiten data + logger.info("Whitening data matrix.") + M = np.dot(whitener, M) + + n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, evoked.data.shape[1]) + + # Scaling to make setting of tol and alpha easy + tol *= sum_squared(M) + alpha_max = norm_epsilon_inf(gain, M, phi, l1_ratio, n_dip_per_pos) + alpha_max *= 0.01 + gain /= alpha_max + source_weighting /= alpha_max + + if n_tfmxne_iter == 1: + X, active_set, E = tf_mixed_norm_solver( + M, + gain, + alpha_space, + alpha_time, + wsize=wsize, + tstep=tstep, + maxit=maxit, + tol=tol, + verbose=verbose, + n_orient=n_dip_per_pos, + dgap_freq=dgap_freq, + debias=debias, + ) + else: + X, active_set, E = iterative_tf_mixed_norm_solver( + M, + gain, + alpha_space, + alpha_time, + wsize=wsize, + tstep=tstep, + n_tfmxne_iter=n_tfmxne_iter, + maxit=maxit, + tol=tol, + verbose=verbose, + n_orient=n_dip_per_pos, + dgap_freq=dgap_freq, + debias=debias, + ) + + if active_set.sum() == 0: + raise Exception("No active dipoles found. alpha_space/alpha_time are too big.") + + # Compute estimated whitened sensor data for each dipole (dip, ch, time) + gain_active = gain[:, active_set] + + if mask is not None: + active_set_tmp = np.zeros(len(mask), dtype=bool) + active_set_tmp[mask] = active_set + active_set = active_set_tmp + del active_set_tmp + + X = _reapply_source_weighting(X, source_weighting, active_set) + gain_active /= source_weighting[active_set] + + if return_residual: + residual = _compute_residual(forward, evoked, X, active_set, gain_info) + + if return_as_dipoles: + out = _make_dipoles_sparse( + X, active_set, forward, evoked.times[0], 1.0 / info["sfreq"], M, gain_active + ) + else: + out = _make_sparse_stc( + X, + active_set, + forward, + evoked.times[0], + 1.0 / info["sfreq"], + pick_ori=pick_ori, + ) + + logger.info("[done]") + + if return_residual: + out = out, residual + + return out + + +@verbose +def _compute_mxne_sure( + M, + gain, + alpha_grid, + sigma, + n_mxne_iter, + maxit, + tol, + n_orient, + active_set_size, + debias, + solver, + dgap_freq, + random_state, + verbose, +): + """Stein Unbiased Risk Estimator (SURE). + + Implements the finite-difference Monte-Carlo approximation + of the SURE for Multi-Task LASSO. + + See reference :footcite:`DeledalleEtAl2014`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + gain : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_grid : array, shape (n_alphas,) + The grid of alphas used to evaluate the SURE. + sigma : float + The true or estimated noise level in the data. Usually 1 if the data + has been previously whitened using MNE whitener. + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting is + applied. + maxit : int + Maximum number of iterations. + tol : float + Tolerance parameter. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + random_state : int | None + The random state used in a random number generator for delta and + epsilon used for the SURE computation. + + Returns + ------- + X : array, shape (n_active, n_times) + Coefficient matrix. + active_set : array, shape (n_dipoles,) + Array of indices of non-zero coefficients. + best_alpha_ : float + Alpha that minimizes the SURE. + + References + ---------- + .. footbibliography:: + """ + + def g(w): + return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) + + def gprime(w): + return 2.0 * np.repeat(g(w), n_orient).ravel() + + def _run_solver(alpha, M, n_mxne_iter, as_init=None, X_init=None, w_init=None): + if n_mxne_iter == 1: + X, active_set, _ = mixed_norm_solver( + M, + gain, + alpha, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + n_orient=n_orient, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + active_set_init=as_init, + X_init=X_init, + verbose=False, + ) + else: + X, active_set, _ = iterative_mixed_norm_solver( + M, + gain, + alpha, + n_mxne_iter, + maxit=maxit, + tol=tol, + n_orient=n_orient, + active_set_size=active_set_size, + debias=debias, + solver=solver, + dgap_freq=dgap_freq, + weight_init=w_init, + verbose=False, + ) + return X, active_set + + def _fit_on_grid(gain, M, eps, delta): + coefs_grid_1_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1])) + coefs_grid_2_0 = np.zeros((len(alpha_grid), gain.shape[1], M.shape[1])) + active_sets, active_sets_eps = [], [] + M_eps = M + eps * delta + # warm start - first iteration (leverages convexity) + logger.info("Warm starting...") + for j, alpha in enumerate(alpha_grid): + logger.info(f"alpha: {alpha}") + X, a_set = _run_solver(alpha, M, 1) + X_eps, a_set_eps = _run_solver(alpha, M_eps, 1) + coefs_grid_1_0[j][a_set, :] = X + coefs_grid_2_0[j][a_set_eps, :] = X_eps + active_sets.append(a_set) + active_sets_eps.append(a_set_eps) + # next iterations + if n_mxne_iter == 1: + return coefs_grid_1_0, coefs_grid_2_0, active_sets + else: + coefs_grid_1 = coefs_grid_1_0.copy() + coefs_grid_2 = coefs_grid_2_0.copy() + logger.info("Fitting SURE on grid.") + for j, alpha in enumerate(alpha_grid): + logger.info(f"alpha: {alpha}") + if active_sets[j].sum() > 0: + w = gprime(coefs_grid_1[j]) + X, a_set = _run_solver(alpha, M, n_mxne_iter - 1, w_init=w) + coefs_grid_1[j][a_set, :] = X + active_sets[j] = a_set + if active_sets_eps[j].sum() > 0: + w_eps = gprime(coefs_grid_2[j]) + X_eps, a_set_eps = _run_solver( + alpha, M_eps, n_mxne_iter - 1, w_init=w_eps + ) + coefs_grid_2[j][a_set_eps, :] = X_eps + active_sets_eps[j] = a_set_eps + + return coefs_grid_1, coefs_grid_2, active_sets + + def _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps): + n_sensors, n_times = gain.shape[0], M.shape[1] + dof = (gain @ (coef2 - coef1) * delta).sum() / eps + df_term = np.linalg.norm(M - gain @ coef1) ** 2 + sure = df_term - n_sensors * n_times * sigma**2 + sure += 2 * dof * sigma**2 + return sure + + sure_path = np.empty(len(alpha_grid)) + + rng = check_random_state(random_state) + # See Deledalle et al. 20214 Sec. 5.1 + eps = 2 * sigma / (M.shape[0] ** 0.3) + delta = rng.randn(*M.shape) + + coefs_grid_1, coefs_grid_2, active_sets = _fit_on_grid(gain, M, eps, delta) + + logger.info("Computing SURE values on grid.") + for i, (coef1, coef2) in enumerate(zip(coefs_grid_1, coefs_grid_2)): + sure_path[i] = _compute_sure_val(coef1, coef2, gain, M, sigma, delta, eps) + if verbose: + logger.info(f"alpha {alpha_grid[i]} :: sure {sure_path[i]}") + best_alpha_ = alpha_grid[np.argmin(sure_path)] + + X = coefs_grid_1[np.argmin(sure_path)] + active_set = active_sets[np.argmin(sure_path)] + + X = X[active_set, :] + + return X, active_set, best_alpha_ diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py new file mode 100644 index 0000000..528cc14 --- /dev/null +++ b/mne/inverse_sparse/mxne_optim.py @@ -0,0 +1,1687 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import functools +from math import sqrt + +import numpy as np + +from ..time_frequency._stft import istft, stft, stft_norm1, stft_norm2 +from ..utils import ( + _check_option, + _get_blas_funcs, + _validate_type, + logger, + sum_squared, + verbose, + warn, +) +from .mxne_debiasing import compute_bias + + +@functools.lru_cache(None) +def _get_dgemm(): + return _get_blas_funcs(np.float64, "gemm") + + +def groups_norm2(A, n_orient): + """Compute squared L2 norms of groups inplace.""" + n_positions = A.shape[0] // n_orient + return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1) + + +def norm_l2inf(A, n_orient, copy=True): + """L2-inf norm.""" + if A.size == 0: + return 0.0 + if copy: + A = A.copy() + return sqrt(np.max(groups_norm2(A, n_orient))) + + +def norm_l21(A, n_orient, copy=True): + """L21 norm.""" + if A.size == 0: + return 0.0 + if copy: + A = A.copy() + return np.sum(np.sqrt(groups_norm2(A, n_orient))) + + +def _primal_l21(M, G, X, active_set, alpha, n_orient): + """Primal objective for the mixed-norm inverse problem. + + See :footcite:`GramfortEtAl2012`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_active, n_times) + Sources. + active_set : array of bool, shape (n_sources,) + Mask of active sources. + alpha : float + The regularization parameter. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + + Returns + ------- + p_obj : float + Primal objective. + R : array, shape (n_sensors, n_times) + Current residual (M - G * X). + nR2 : float + Data-fitting term. + GX : array, shape (n_sensors, n_times) + Forward prediction. + """ + GX = np.dot(G[:, active_set], X) + R = M - GX + penalty = norm_l21(X, n_orient, copy=True) + nR2 = sum_squared(R) + p_obj = 0.5 * nR2 + alpha * penalty + return p_obj, R, nR2, GX + + +def dgap_l21(M, G, X, active_set, alpha, n_orient): + """Duality gap for the mixed norm inverse problem. + + See :footcite:`GramfortEtAl2012`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_active, n_times) + Sources. + active_set : array of bool, shape (n_sources, ) + Mask of active sources. + alpha : float + The regularization parameter. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + + Returns + ------- + gap : float + Dual gap. + p_obj : float + Primal objective. + d_obj : float + Dual objective. gap = p_obj - d_obj. + R : array, shape (n_sensors, n_times) + Current residual (M - G * X). + + References + ---------- + .. footbibilography:: + """ + p_obj, R, nR2, GX = _primal_l21(M, G, X, active_set, alpha, n_orient) + dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False) + scaling = alpha / dual_norm + scaling = min(scaling, 1.0) + d_obj = (scaling - 0.5 * (scaling**2)) * nR2 + scaling * np.sum(R * GX) + + gap = p_obj - d_obj + return gap, p_obj, d_obj, R + + +def _mixed_norm_solver_cd( + M, + G, + alpha, + lipschitz_constant, + maxit=10000, + tol=1e-8, + init=None, + n_orient=1, + dgap_freq=10, +): + """Solve L21 inverse problem with coordinate descent.""" + from sklearn.linear_model import MultiTaskLasso + + assert M.ndim == G.ndim and M.shape[0] == G.shape[0] + + clf = MultiTaskLasso( + alpha=alpha / len(M), + tol=tol / sum_squared(M), + fit_intercept=False, + max_iter=maxit, + warm_start=True, + ) + if init is not None: + clf.coef_ = init.T + else: + clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T + clf.fit(G, M) + + X = clf.coef_.T + active_set = np.any(X, axis=1) + X = X[active_set] + gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient) + return X, active_set, p_obj + + +def _mixed_norm_solver_bcd( + M, + G, + alpha, + lipschitz_constant, + maxit=200, + tol=1e-8, + init=None, + n_orient=1, + dgap_freq=10, + use_accel=True, + K=5, +): + """Solve L21 inverse problem with block coordinate descent.""" + _, n_times = M.shape + _, n_sources = G.shape + n_positions = n_sources // n_orient + + if init is None: + X = np.zeros((n_sources, n_times)) + R = M.copy() + else: + X = init + R = M - np.dot(G, X) + + E = [] # track primal objective function + highest_d_obj = -np.inf + active_set = np.zeros(n_sources, dtype=bool) # start with full AS + + alpha_lc = alpha / lipschitz_constant + + if use_accel: + last_K_X = np.empty((K + 1, n_sources, n_times)) + U = np.zeros((K, n_sources * n_times)) + + # First make G fortran for faster access to blocks of columns + G = np.asfortranarray(G) + # Ensure these are correct for dgemm + assert R.dtype == np.float64 + assert G.dtype == np.float64 + one_ovr_lc = 1.0 / lipschitz_constant + + # assert that all the multiplied matrices are fortran contiguous + assert X.T.flags.f_contiguous + assert R.T.flags.f_contiguous + assert G.flags.f_contiguous + # storing list of contiguous arrays + list_G_j_c = [] + for j in range(n_positions): + idx = slice(j * n_orient, (j + 1) * n_orient) + list_G_j_c.append(np.ascontiguousarray(G[:, idx])) + + for i in range(maxit): + _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c) + + if (i + 1) % dgap_freq == 0: + _, p_obj, d_obj, _ = dgap_l21( + M, G, X[active_set], active_set, alpha, n_orient + ) + highest_d_obj = max(d_obj, highest_d_obj) + gap = p_obj - highest_d_obj + E.append(p_obj) + logger.debug( + "Iteration %d :: p_obj %f :: dgap %f :: n_active %d", + i + 1, + p_obj, + gap, + np.sum(active_set) / n_orient, + ) + + if gap < tol: + logger.debug(f"Convergence reached ! (gap: {gap} < {tol})") + break + + # using Anderson acceleration of the primal variable for faster + # convergence + if use_accel: + last_K_X[i % (K + 1)] = X + + if i % (K + 1) == K: + for k in range(K): + U[k] = last_K_X[k + 1].ravel() - last_K_X[k].ravel() + C = U @ U.T + # at least on ARM64 we can't rely on np.linalg.solve to + # reliably raise LinAlgError here, so use SVD instead + # equivalent to: + # z = np.linalg.solve(C, np.ones(K)) + u, s, _ = np.linalg.svd(C, hermitian=True) + if s[-1] <= 1e-6 * s[0] or not np.isfinite(s).all(): + logger.debug("Iteration %d: LinAlg Error", i + 1) + continue + z = ((u * 1 / s) @ u.T).sum(0) + c = z / z.sum() + X_acc = np.sum(last_K_X[:-1] * c[:, None, None], axis=0) + _grp_norm2_acc = groups_norm2(X_acc, n_orient) + active_set_acc = _grp_norm2_acc != 0 + if n_orient > 1: + active_set_acc = np.kron( + active_set_acc, np.ones(n_orient, dtype=bool) + ) + p_obj = _primal_l21(M, G, X[active_set], active_set, alpha, n_orient)[0] + p_obj_acc = _primal_l21( + M, G, X_acc[active_set_acc], active_set_acc, alpha, n_orient + )[0] + if p_obj_acc < p_obj: + X = X_acc + active_set = active_set_acc + R = M - G[:, active_set] @ X[active_set] + + X = X[active_set] + + return X, active_set, E + + +def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, alpha_lc, list_G_j_c): + """Implement one full pass of BCD. + + BCD stands for Block Coordinate Descent. + This function make use of scipy.linalg.get_blas_funcs to speed reasons. + + Parameters + ---------- + G : array, shape (n_sensors, n_active) + The gain matrix a.k.a. lead field. + X : array, shape (n_sources, n_times) + Sources, modified in place. + R : array, shape (n_sensors, n_times) + The residuals: R = M - G @ X, modified in place. + active_set : array of bool, shape (n_sources, ) + Mask of active sources, modified in place. + one_ovr_lc : array, shape (n_positions, ) + One over the lipschitz constants. + n_orient : int + Number of dipoles per positions (typically 1 or 3). + n_positions : int + Number of source positions. + alpha_lc: array, shape (n_positions, ) + alpha * (Lipschitz constants). + """ + X_j_new = np.zeros_like(X[:n_orient, :], order="C") + dgemm = _get_dgemm() + + for j, G_j_c in enumerate(list_G_j_c): + idx = slice(j * n_orient, (j + 1) * n_orient) + G_j = G[:, idx] + X_j = X[idx] + dgemm( + alpha=one_ovr_lc[j], beta=0.0, a=R.T, b=G_j, c=X_j_new.T, overwrite_c=True + ) + # X_j_new = G_j.T @ R + # Mathurin's trick to avoid checking all the entries + was_non_zero = X_j[0, 0] != 0 + # was_non_zero = np.any(X_j) + if was_non_zero: + dgemm(alpha=1.0, beta=1.0, a=X_j.T, b=G_j_c.T, c=R.T, overwrite_c=True) + # R += np.dot(G_j, X_j) + X_j_new += X_j + block_norm = sqrt(sum_squared(X_j_new)) + if block_norm <= alpha_lc[j]: + X_j.fill(0.0) + active_set[idx] = False + else: + shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0) + X_j_new *= shrink + dgemm(alpha=-1.0, beta=1.0, a=X_j_new.T, b=G_j_c.T, c=R.T, overwrite_c=True) + # R -= np.dot(G_j, X_j_new) + X_j[:] = X_j_new + active_set[idx] = True + + +@verbose +def mixed_norm_solver( + M, + G, + alpha, + maxit=3000, + tol=1e-8, + verbose=None, + active_set_size=50, + debias=True, + n_orient=1, + solver="auto", + return_gap=False, + dgap_freq=10, + active_set_init=None, + X_init=None, +): + """Solve L1/L2 mixed-norm inverse problem with active set strategy. + + See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016, + BertrandEtAl2020`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha : float + The regularization parameter. It should be between 0 and 100. + A value of 100 will lead to an empty active set (no active source). + maxit : int + The number of iterations. + tol : float + Tolerance on dual gap for convergence checking. + %(verbose)s + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. Block Coordinate Descent + (BCD) uses Anderson acceleration for faster convergence. + return_gap : bool + Return final duality gap. + dgap_freq : int + The duality gap is computed every dgap_freq iterations of the solver on + the active set. + active_set_init : array, shape (n_dipoles,) or None + The initial active set (boolean array) used at the first iteration. + If None, the usual active set strategy is applied. + X_init : array, shape (n_dipoles, n_times) or None + The initial weight matrix used for warm starting the solver. If None, + the weights are initialized at zero. + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array, shape (new_active_set_size,) + The mask of active sources. Note that new_active_set_size is the size + of the active set after convergence of the solver. + E : list + The value of the objective function over the iterations. + gap : float + Final duality gap. Returned only if return_gap is True. + + References + ---------- + .. footbibliography:: + """ + n_dipoles = G.shape[1] + n_positions = n_dipoles // n_orient + _, n_times = M.shape + alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False) + logger.info(f"-- ALPHA MAX : {alpha_max}") + alpha = float(alpha) + X = np.zeros((n_dipoles, n_times), dtype=G.dtype) + + has_sklearn = True + try: + from sklearn.linear_model import MultiTaskLasso # noqa: F401 + except ImportError: + has_sklearn = False + + _validate_type(solver, str, "solver") + _check_option("solver", solver, ("cd", "bcd", "auto")) + if solver == "auto": + if has_sklearn and (n_orient == 1): + solver = "cd" + else: + solver = "bcd" + + if solver == "cd": + if n_orient == 1 and not has_sklearn: + warn( + "Scikit-learn >= 0.12 cannot be found. Using block coordinate" + " descent instead of coordinate descent." + ) + solver = "bcd" + if n_orient > 1: + warn( + "Coordinate descent is only available for fixed orientation. " + "Using block coordinate descent instead of coordinate " + "descent" + ) + solver = "bcd" + + if solver == "cd": + logger.info("Using coordinate descent") + l21_solver = _mixed_norm_solver_cd + lc = None + else: + assert solver == "bcd" + logger.info("Using block coordinate descent") + l21_solver = _mixed_norm_solver_bcd + G = np.asfortranarray(G) + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + if active_set_size is not None: + E = list() + highest_d_obj = -np.inf + if X_init is not None and X_init.shape != (n_dipoles, n_times): + raise ValueError("Wrong dim for initialized coefficients.") + active_set = ( + active_set_init + if active_set_init is not None + else np.zeros(n_dipoles, dtype=bool) + ) + idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient)) + new_active_idx = idx_large_corr[-active_set_size:] + if n_orient > 1: + new_active_idx = ( + n_orient * new_active_idx[:, None] + np.arange(n_orient)[None, :] + ).ravel() + active_set[new_active_idx] = True + as_size = np.sum(active_set) + gap = np.inf + for k in range(maxit): + if solver == "bcd": + lc_tmp = lc[active_set[::n_orient]] + elif solver == "cd": + lc_tmp = None + else: + lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2 + X, as_, _ = l21_solver( + M, + G[:, active_set], + alpha, + lc_tmp, + maxit=maxit, + tol=tol, + init=X_init, + n_orient=n_orient, + dgap_freq=dgap_freq, + ) + active_set[active_set] = as_.copy() + idx_old_active_set = np.where(active_set)[0] + + _, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha, n_orient) + highest_d_obj = max(d_obj, highest_d_obj) + gap = p_obj - highest_d_obj + E.append(p_obj) + logger.info( + "Iteration %d :: p_obj %f :: dgap %f :: n_active_start %d :: n_active_" + "end %d", + k + 1, + p_obj, + gap, + as_size // n_orient, + np.sum(active_set) // n_orient, + ) + if gap < tol: + logger.info(f"Convergence reached ! (gap: {gap} < {tol})") + break + + # add sources if not last iteration + if k < (maxit - 1): + idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R), n_orient)) + new_active_idx = idx_large_corr[-active_set_size:] + if n_orient > 1: + new_active_idx = ( + n_orient * new_active_idx[:, None] + + np.arange(n_orient)[None, :] + ) + new_active_idx = new_active_idx.ravel() + active_set[new_active_idx] = True + idx_active_set = np.where(active_set)[0] + as_size = np.sum(active_set) + X_init = np.zeros((as_size, n_times), dtype=X.dtype) + idx = np.searchsorted(idx_active_set, idx_old_active_set) + X_init[idx] = X + else: + warn(f"Did NOT converge ! (gap: {gap} > {tol})") + else: + X, active_set, E = l21_solver( + M, G, alpha, lc, maxit=maxit, tol=tol, n_orient=n_orient, init=None + ) + if return_gap: + gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0] + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + logger.info("Final active set size: %s" % (np.sum(active_set) // n_orient)) + + if return_gap: + return X, active_set, E, gap + else: + return X, active_set, E + + +@verbose +def iterative_mixed_norm_solver( + M, + G, + alpha, + n_mxne_iter, + maxit=3000, + tol=1e-8, + verbose=None, + active_set_size=50, + debias=True, + n_orient=1, + dgap_freq=10, + solver="auto", + weight_init=None, +): + """Solve L0.5/L2 mixed-norm inverse problem with active set strategy. + + See reference :footcite:`StrohmeierEtAl2016`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha : float + The regularization parameter. It should be between 0 and 100. + A value of 100 will lead to an empty active set (no active source). + n_mxne_iter : int + The number of MxNE iterations. If > 1, iterative reweighting + is applied. + maxit : int + The number of iterations. + tol : float + Tolerance on dual gap for convergence checking. + %(verbose)s + active_set_size : int + Size of active set increase at each iteration. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + solver : 'cd' | 'bcd' | 'auto' + The algorithm to use for the optimization. + weight_init : array, shape (n_dipoles,) or None + The initial weight used for reweighting the gain matrix. If None, the + weights are initialized with ones. + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function over the iterations. + + References + ---------- + .. footbibliography:: + """ + + def g(w): + return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient))) + + def gprime(w): + return 2.0 * np.repeat(g(w), n_orient).ravel() + + E = list() + + if weight_init is not None and weight_init.shape != (G.shape[1],): + raise ValueError( + f"Wrong dimension for weight initialization. Got {weight_init.shape}. " + f"Expected {(G.shape[1],)}." + ) + + weights = weight_init if weight_init is not None else np.ones(G.shape[1]) + active_set = weights != 0 + weights = weights[active_set] + X = np.zeros((G.shape[1], M.shape[1])) + + for k in range(n_mxne_iter): + X0 = X.copy() + active_set_0 = active_set.copy() + G_tmp = G[:, active_set] * weights[np.newaxis, :] + + if active_set_size is not None: + if np.sum(active_set) > (active_set_size * n_orient): + X, _active_set, _ = mixed_norm_solver( + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=active_set_size, + dgap_freq=dgap_freq, + solver=solver, + ) + else: + X, _active_set, _ = mixed_norm_solver( + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=None, + dgap_freq=dgap_freq, + solver=solver, + ) + else: + X, _active_set, _ = mixed_norm_solver( + M, + G_tmp, + alpha, + debias=False, + n_orient=n_orient, + maxit=maxit, + tol=tol, + active_set_size=None, + dgap_freq=dgap_freq, + solver=solver, + ) + + logger.info("active set size %d", _active_set.sum() / n_orient) + + if _active_set.sum() > 0: + active_set[active_set] = _active_set + # Reapply weights to have correct unit + X *= weights[_active_set][:, np.newaxis] + weights = gprime(X) + p_obj = 0.5 * np.linalg.norm( + M - np.dot(G[:, active_set], X), "fro" + ) ** 2.0 + alpha * np.sum(g(X)) + E.append(p_obj) + + # Check convergence + if ( + (k >= 1) + and np.all(active_set == active_set_0) + and np.all(np.abs(X - X0) < tol) + ): + logger.info("Convergence reached after %d reweightings!", k) + break + else: + active_set = np.zeros_like(active_set) + p_obj = 0.5 * np.linalg.norm(M) ** 2.0 + E.append(p_obj) + break + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + return X, active_set, E + + +############################################################################### +# TF-MxNE + + +class _Phi: + """Have phi stft as callable w/o using a lambda that does not pickle.""" + + def __init__(self, wsize, tstep, n_coefs, n_times): + self.wsize = np.atleast_1d(wsize) + self.tstep = np.atleast_1d(tstep) + self.n_coefs = np.atleast_1d(n_coefs) + self.n_dicts = len(tstep) + self.n_freqs = wsize // 2 + 1 + self.n_steps = self.n_coefs // self.n_freqs + self.n_times = n_times + # ravel freq+time here + self.ops = list() + for ws, ts in zip(self.wsize, self.tstep): + self.ops.append( + stft(np.eye(n_times), ws, ts, verbose=False).reshape(n_times, -1) + ) + + def __call__(self, x): # noqa: D105 + if self.n_dicts == 1: + return x @ self.ops[0] + else: + return np.hstack([x @ op for op in self.ops]) / np.sqrt(self.n_dicts) + + def norm(self, z, ord=2): # noqa: A002 + """Squared L2 norm if ord == 2 and L1 norm if order == 1.""" + if ord not in (1, 2): + raise ValueError(f"Only supported norm order are 1 and 2. Got ord = {ord}") + stft_norm = stft_norm1 if ord == 1 else stft_norm2 + norm = 0.0 + if len(self.n_coefs) > 1: + z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1], axis=1) + else: + z_ = [np.atleast_2d(z)] + for i in range(len(z_)): + norm += stft_norm(z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i])) + return norm + + +class _PhiT: + """Have phi.T istft as callable w/o using a lambda that does not pickle.""" + + def __init__(self, tstep, n_freqs, n_steps, n_times): + self.tstep = tstep + self.n_freqs = n_freqs + self.n_steps = n_steps + self.n_times = n_times + self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1 + self.n_coefs = list() + self.op_re = list() + self.op_im = list() + for nf, ns, ts in zip(self.n_freqs, self.n_steps, self.tstep): + nc = nf * ns + self.n_coefs.append(nc) + eye = np.eye(nc).reshape(nf, ns, nf, ns) + self.op_re.append(istft(eye, ts, n_times).reshape(nc, n_times)) + self.op_im.append(istft(eye * 1j, ts, n_times).reshape(nc, n_times)) + + def __call__(self, z): # noqa: D105 + if self.n_dicts == 1: + return z.real @ self.op_re[0] + z.imag @ self.op_im[0] + else: + x_out = np.zeros((z.shape[0], self.n_times)) + z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1) + for this_z, op_re, op_im in zip(z_, self.op_re, self.op_im): + x_out += this_z.real @ op_re + this_z.imag @ op_im + return x_out / np.sqrt(self.n_dicts) + + +def norm_l21_tf(Z, phi, n_orient, w_space=None): + """L21 norm for TF.""" + if Z.shape[0]: + l21_norm = np.sqrt(phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1)) + if w_space is not None: + l21_norm *= w_space + l21_norm = l21_norm.sum() + else: + l21_norm = 0.0 + return l21_norm + + +def norm_l1_tf(Z, phi, n_orient, w_time): + """L1 norm for TF.""" + if Z.shape[0]: + n_positions = Z.shape[0] // n_orient + Z_ = np.sqrt( + np.sum((np.abs(Z) ** 2.0).reshape((n_orient, -1), order="F"), axis=0) + ) + Z_ = Z_.reshape((n_positions, -1), order="F") + if w_time is not None: + Z_ *= w_time + l1_norm = phi.norm(Z_, ord=1).sum() + else: + l1_norm = 0.0 + return l1_norm + + +def norm_epsilon(Y, l1_ratio, phi, w_space=1.0, w_time=None): + """Weighted epsilon norm. + + The weighted epsilon norm is the dual norm of:: + + w_{space} * (1. - l1_ratio) * ||Y||_2 + l1_ratio * ||Y||_{1, w_{time}}. + + where `||Y||_{1, w_{time}} = (np.abs(Y) * w_time).sum()` + + Warning: it takes into account the fact that Y only contains coefficients + corresponding to the positive frequencies (see `stft_norm2()`): some + entries will be counted twice. It is also assumed that all entries of both + Y and w_time are non-negative. See + :footcite:`NdiayeEtAl2016,BurdakovMerkulov2001`. + + Parameters + ---------- + Y : array, shape (n_coefs,) + The input data. + l1_ratio : float between 0 and 1 + Tradeoff between L2 and L1 regularization. When it is 0, no temporal + regularization is applied. + phi : instance of _Phi + The TF operator. + w_space : float + Scalar weight of the L2 norm. By default, it is taken equal to 1. + w_time : array, shape (n_coefs, ) | None + Weights of each TF coefficient in the L1 norm. If None, weights equal + to 1 are used. + + + Returns + ------- + nu : float + The value of the dual norm evaluated at Y. + + References + ---------- + .. footbibliography:: + """ + # since the solution is invariant to flipped signs in Y, all entries + # of Y are assumed positive + + # Add negative freqs: count all freqs twice except first and last: + freqs_count = np.full(len(Y), 2) + for i, fc in enumerate(np.array_split(freqs_count, np.cumsum(phi.n_coefs)[:-1])): + fc[: phi.n_steps[i]] = 1 + fc[-phi.n_steps[i] :] = 1 + + # exclude 0 weights: + if w_time is not None: + nonzero_weights = w_time != 0.0 + Y = Y[nonzero_weights] + freqs_count = freqs_count[nonzero_weights] + w_time = w_time[nonzero_weights] + + norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y) + if l1_ratio == 1.0: + # dual norm of L1 weighted is Linf with inverse weights + return norm_inf_Y + elif l1_ratio == 0.0: + # dual norm of L2 is L2 + return np.sqrt(phi.norm(Y[None, :], ord=2).sum()) + + if norm_inf_Y == 0.0: + return 0.0 + + # ignore some values of Y by lower bound on dual norm: + if w_time is None: + idx = Y > l1_ratio * norm_inf_Y + else: + idx = Y > l1_ratio * np.max( + Y / (w_space * (1.0 - l1_ratio) + l1_ratio * w_time) + ) + + if idx.sum() == 1: + return norm_inf_Y + + # sort both Y / w_time and freqs_count at the same time + if w_time is not None: + idx_sort = np.argsort(Y[idx] / w_time[idx])[::-1] + w_time = w_time[idx][idx_sort] + else: + idx_sort = np.argsort(Y[idx])[::-1] + + Y = Y[idx][idx_sort] + freqs_count = freqs_count[idx][idx_sort] + + Y = np.repeat(Y, freqs_count) + if w_time is not None: + w_time = np.repeat(w_time, freqs_count) + + K = Y.shape[0] + if w_time is None: + p_sum_Y2 = np.cumsum(Y**2) + p_sum_w2 = np.arange(1, K + 1) + p_sum_Yw = np.cumsum(Y) + upper = p_sum_Y2 / Y**2 - 2.0 * p_sum_Yw / Y + p_sum_w2 + else: + p_sum_Y2 = np.cumsum(Y**2) + p_sum_w2 = np.cumsum(w_time**2) + p_sum_Yw = np.cumsum(Y * w_time) + upper = p_sum_Y2 / (Y / w_time) ** 2 - 2.0 * p_sum_Yw / (Y / w_time) + p_sum_w2 + upper_greater = np.where(upper > w_space**2 * (1.0 - l1_ratio) ** 2 / l1_ratio**2)[ + 0 + ] + + i0 = upper_greater[0] - 1 if upper_greater.size else K - 1 + + p_sum_Y2 = p_sum_Y2[i0] + p_sum_w2 = p_sum_w2[i0] + p_sum_Yw = p_sum_Yw[i0] + + denom = l1_ratio**2 * p_sum_w2 - w_space**2 * (1.0 - l1_ratio) ** 2 + if np.abs(denom) < 1e-10: + return p_sum_Y2 / (2.0 * l1_ratio * p_sum_Yw) + else: + delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom + return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom + + +def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None): + """Weighted epsilon-inf norm of phi(np.dot(G.T, R)). + + Parameters + ---------- + G : array, shape (n_sensors, n_sources) + Gain matrix a.k.a. lead field. + R : array, shape (n_sensors, n_times) + Residual. + phi : instance of _Phi + The TF operator. + l1_ratio : float between 0 and 1 + Parameter controlling the tradeoff between L21 and L1 regularization. + 0 corresponds to an absence of temporal regularization, ie MxNE. + n_orient : int + Number of dipoles per location (typically 1 or 3). + w_space : array, shape (n_positions,) or None. + Weights for the L2 term of the epsilon norm. If None, weights are + all equal to 1. + w_time : array, shape (n_positions, n_coefs) or None + Weights for the L1 term of the epsilon norm. If None, weights are + all equal to 1. + + Returns + ------- + nu : float + The maximum value of the epsilon norms over groups of n_orient dipoles + (consecutive rows of phi(np.dot(G.T, R))). + """ + n_positions = G.shape[1] // n_orient + GTRPhi = np.abs(phi(np.dot(G.T, R))) + # norm over orientations: + GTRPhi = GTRPhi.reshape((n_orient, -1), order="F") + GTRPhi = np.linalg.norm(GTRPhi, axis=0) + GTRPhi = GTRPhi.reshape((n_positions, -1), order="F") + nu = 0.0 + for idx in range(n_positions): + GTRPhi_ = GTRPhi[idx] + w_t = w_time[idx] if w_time is not None else None + w_s = w_space[idx] if w_space is not None else 1.0 + norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s, w_time=w_t) + if norm_eps > nu: + nu = norm_eps + + return nu + + +def dgap_l21l1( + M, + G, + Z, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + highest_d_obj, + w_space=None, + w_time=None, +): + """Duality gap for the time-frequency mixed norm inverse problem. + + See :footcite:`GramfortEtAl2012,NdiayeEtAl2016` + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_sources) + Gain matrix a.k.a. lead field. + Z : array, shape (n_active, n_coefs) + Sources in TF domain. + active_set : array of bool, shape (n_sources, ) + Mask of active sources. + alpha_space : float + The spatial regularization parameter. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. + phi : instance of _Phi + The TF operator. + phiT : instance of _PhiT + The transpose of the TF operator. + n_orient : int + Number of dipoles per locations (typically 1 or 3). + highest_d_obj : float + The highest value of the dual objective so far. + w_space : array, shape (n_positions, ) + Array of spatial weights. + w_time : array, shape (n_positions, n_coefs) + Array of TF weights. + + Returns + ------- + gap : float + Dual gap + p_obj : float + Primal objective + d_obj : float + Dual objective. gap = p_obj - d_obj + R : array, shape (n_sensors, n_times) + Current residual (M - G * X) + + References + ---------- + .. footbibliography:: + """ + X = phiT(Z) + GX = np.dot(G[:, active_set], X) + R = M - GX + + # some functions need w_time only on active_set, other need it completely + if w_time is not None: + w_time_as = w_time[active_set[::n_orient]] + else: + w_time_as = None + if w_space is not None: + w_space_as = w_space[active_set[::n_orient]] + else: + w_space_as = None + + penaltyl1 = norm_l1_tf(Z, phi, n_orient, w_time_as) + penaltyl21 = norm_l21_tf(Z, phi, n_orient, w_space_as) + nR2 = sum_squared(R) + p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1 + + l1_ratio = alpha_time / (alpha_space + alpha_time) + dual_norm = norm_epsilon_inf( + G, R, phi, l1_ratio, n_orient, w_space=w_space, w_time=w_time + ) + scaling = min(1.0, (alpha_space + alpha_time) / dual_norm) + + d_obj = (scaling - 0.5 * (scaling**2)) * nR2 + scaling * np.sum(R * GX) + d_obj = max(d_obj, highest_d_obj) + + gap = p_obj - d_obj + return gap, p_obj, d_obj, R + + +def _tf_mixed_norm_solver_bcd_( + M, + G, + Z, + active_set, + candidates, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + *, + w_space=None, + w_time=None, + n_orient=1, + maxit=200, + tol=1e-8, + dgap_freq=10, + perc=None, +): + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + # First make G fortran for faster access to blocks of columns + Gd = np.asfortranarray(G) + G = np.ascontiguousarray(Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1)) + + R = M.copy() # residual + active = np.where(active_set[::n_orient])[0] + for idx in active: + R -= np.dot(G[idx], phiT(Z[idx])) + + E = [] # track primal objective function + + if w_time is None: + alpha_time_lc = alpha_time / lipschitz_constant + else: + alpha_time_lc = alpha_time * w_time / lipschitz_constant[:, None] + if w_space is None: + alpha_space_lc = alpha_space / lipschitz_constant + else: + alpha_space_lc = alpha_space * w_space / lipschitz_constant + + converged = False + d_obj = -np.inf + + for i in range(maxit): + for jj in candidates: + ids = jj * n_orient + ide = ids + n_orient + + G_j = G[jj] + Z_j = Z[jj] + active_set_j = active_set[ids:ide] + + was_active = np.any(active_set_j) + + # gradient step + GTR = np.dot(G_j.T, R) / lipschitz_constant[jj] + X_j_new = GTR.copy() + + if was_active: + X_j = phiT(Z_j) + R += np.dot(G_j, X_j) + X_j_new += X_j + + rows_norm = np.linalg.norm(X_j_new, "fro") + if rows_norm <= alpha_space_lc[jj]: + if was_active: + Z[jj] = 0.0 + active_set_j[:] = False + else: + GTR_phi = phi(GTR) + if was_active: + Z_j_new = Z_j + GTR_phi + else: + Z_j_new = GTR_phi + col_norm = np.linalg.norm(Z_j_new, axis=0) + + if np.all(col_norm <= alpha_time_lc[jj]): + Z[jj] = 0.0 + active_set_j[:] = False + else: + # l1 + shrink = np.maximum( + 1.0 + - alpha_time_lc[jj] / np.maximum(col_norm, alpha_time_lc[jj]), + 0.0, + ) + if w_time is not None: + shrink[w_time[jj] == 0.0] = 0.0 + Z_j_new *= shrink[np.newaxis, :] + + # l21 + shape_init = Z_j_new.shape + row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum()) + if row_norm <= alpha_space_lc[jj]: + Z[jj] = 0.0 + active_set_j[:] = False + else: + shrink = np.maximum( + 1.0 + - alpha_space_lc[jj] + / np.maximum(row_norm, alpha_space_lc[jj]), + 0.0, + ) + Z_j_new *= shrink + Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy() + active_set_j[:] = True + Z_j_phi_T = phiT(Z[jj]) + R -= np.dot(G_j, Z_j_phi_T) + + if (i + 1) % dgap_freq == 0: + Zd = np.vstack([Z[pos] for pos in range(n_positions) if np.any(Z[pos])]) + gap, p_obj, d_obj, _ = dgap_l21l1( + M, + Gd, + Zd, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + d_obj, + w_space=w_space, + w_time=w_time, + ) + converged = gap < tol + E.append(p_obj) + logger.info( + "\n Iteration %d :: n_active %d", + i + 1, + np.sum(active_set) / n_orient, + ) + logger.info(f" dgap {gap:.2e} :: p_obj {p_obj} :: d_obj {d_obj}") + + if converged: + break + + if perc is not None: + if np.sum(active_set) / float(n_orient) <= perc * n_positions: + break + + return Z, active_set, E, converged + + +def _tf_mixed_norm_solver_bcd_active_set( + M, + G, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + *, + Z_init=None, + w_space=None, + w_time=None, + n_orient=1, + maxit=200, + tol=1e-8, + dgap_freq=10, +): + n_sensors, n_times = M.shape + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + Z = dict.fromkeys(np.arange(n_positions), 0.0) + active_set = np.zeros(n_sources, dtype=bool) + active = [] + if Z_init is not None: + if Z_init.shape != (n_sources, phi.n_coefs.sum()): + raise Exception( + "Z_init must be None or an array with shape (n_sources, n_coefs)." + ) + for ii in range(n_positions): + if np.any(Z_init[ii * n_orient : (ii + 1) * n_orient]): + active_set[ii * n_orient : (ii + 1) * n_orient] = True + active.append(ii) + if len(active): + Z.update(dict(zip(active, np.vsplit(Z_init[active_set], len(active))))) + + E = [] + candidates = range(n_positions) + d_obj = -np.inf + + while True: + # single BCD pass on all positions: + Z_init = dict.fromkeys(np.arange(n_positions), 0.0) + Z_init.update(dict(zip(active, Z.values()))) + Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_( + M, + G, + Z_init, + active_set, + candidates, + alpha_space, + alpha_time, + lipschitz_constant, + phi, + phiT, + w_space=w_space, + w_time=w_time, + n_orient=n_orient, + maxit=1, + tol=tol, + perc=None, + ) + + E += E_tmp + + # multiple BCD pass on active positions: + active = np.where(active_set[::n_orient])[0] + Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active])) + candidates_ = range(len(active)) + if w_space is not None: + w_space_as = w_space[active_set[::n_orient]] + else: + w_space_as = None + if w_time is not None: + w_time_as = w_time[active_set[::n_orient]] + else: + w_time_as = None + + Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_( + M, + G[:, active_set], + Z_init, + np.ones(len(active) * n_orient, dtype=bool), + candidates_, + alpha_space, + alpha_time, + lipschitz_constant[active_set[::n_orient]], + phi, + phiT, + w_space=w_space_as, + w_time=w_time_as, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + perc=0.5, + ) + active = np.where(active_set[::n_orient])[0] + active_set[active_set] = as_.copy() + E += E_tmp + + converged = True + if converged: + Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) + gap, p_obj, d_obj, _ = dgap_l21l1( + M, + G, + Zd, + active_set, + alpha_space, + alpha_time, + phi, + phiT, + n_orient, + d_obj, + w_space, + w_time, + ) + logger.info( + "\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d", + gap, + p_obj, + d_obj, + np.sum(active_set) / n_orient, + ) + if gap < tol: + logger.info("\nConvergence reached!\n") + break + + if active_set.sum(): + Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) + X = phiT(Z) + else: + Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128) + X = np.zeros((0, n_times)) + + return X, Z, active_set, E, gap + + +@verbose +def tf_mixed_norm_solver( + M, + G, + alpha_space, + alpha_time, + wsize=64, + tstep=4, + n_orient=1, + maxit=200, + tol=1e-8, + active_set_size=None, + debias=True, + return_gap=False, + dgap_freq=10, + verbose=None, +): + """Solve TF L21+L1 inverse solver with BCD and active set approach. + + See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`. + + Parameters + ---------- + M : array, shape (n_sensors, n_times) + The data. + G : array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_space : float + The spatial regularization parameter. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. + wsize: int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. + tstep: int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + maxit : int + The number of iterations. + tol : float + If absolute difference between estimates at 2 successive iterations + is lower than tol, the convergence is reached. + debias : bool + Debias source estimates. + return_gap : bool + Return final duality gap. + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(verbose)s + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function every dgap_freq iteration. If + log_objective is False or dgap_freq is np.inf, it will be empty. + gap : float + Final duality gap. Returned only if return_gap is True. + + References + ---------- + .. footbibliography:: + """ + n_sensors, n_times = M.shape + n_sensors, n_sources = G.shape + n_positions = n_sources // n_orient + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) + + n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + phiT = _PhiT(tstep, n_freqs, n_steps, n_times) + + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + logger.info("Using block coordinate descent with active set approach") + X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set( + M, + G, + alpha_space, + alpha_time, + lc, + phi, + phiT, + Z_init=None, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + ) + + if np.any(active_set) and debias: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + if return_gap: + return X, active_set, E, gap + else: + return X, active_set, E + + +def iterative_tf_mixed_norm_solver( + M, + G, + alpha_space, + alpha_time, + n_tfmxne_iter, + wsize=64, + tstep=4, + maxit=3000, + tol=1e-8, + debias=True, + n_orient=1, + dgap_freq=10, + verbose=None, +): + """Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach. + + Parameters + ---------- + M: array, shape (n_sensors, n_times) + The data. + G: array, shape (n_sensors, n_dipoles) + The gain matrix a.k.a. lead field. + alpha_space: float + The spatial regularization parameter. The higher it is the less there + will be active sources. + alpha_time : float + The temporal regularization parameter. The higher it is the smoother + will be the estimated time series. 0 means no temporal regularization, + a.k.a. irMxNE. + n_tfmxne_iter : int + Number of TF-MxNE iterations. If > 1, iterative reweighting is applied. + wsize : int or array-like + Length of the STFT window in samples (must be a multiple of 4). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep) and each entry of wsize must be a multiple + of 4. + tstep : int or array-like + Step between successive windows in samples (must be a multiple of 2, + a divider of wsize and smaller than wsize/2) (default: wsize/2). + If an array is passed, multiple TF dictionaries are used (each having + its own wsize and tstep), and each entry of tstep must be a multiple + of 2 and divide the corresponding entry of wsize. + maxit : int + The maximum number of iterations for each TF-MxNE problem. + tol : float + If absolute difference between estimates at 2 successive iterations + is lower than tol, the convergence is reached. Also used as criterion + on duality gap for each TF-MxNE problem. + debias : bool + Debias source estimates. + n_orient : int + The number of orientation (1 : fixed or 3 : free or loose). + dgap_freq : int or np.inf + The duality gap is evaluated every dgap_freq iterations. + %(verbose)s + + Returns + ------- + X : array, shape (n_active, n_times) + The source estimates. + active_set : array + The mask of active sources. + E : list + The value of the objective function over iterations. + """ + n_sensors, n_times = M.shape + n_sources = G.shape[1] + n_positions = n_sources // n_orient + + tstep = np.atleast_1d(tstep) + wsize = np.atleast_1d(wsize) + if len(tstep) != len(wsize): + raise ValueError( + "The same number of window sizes and steps must be " + f"passed. Got tstep = {tstep} and wsize = {wsize}" + ) + + n_steps = np.ceil(n_times / tstep.astype(float)).astype(int) + n_freqs = wsize // 2 + 1 + n_coefs = n_steps * n_freqs + phi = _Phi(wsize, tstep, n_coefs, n_times) + phiT = _PhiT(tstep, n_freqs, n_steps, n_times) + + if n_orient == 1: + lc = np.sum(G * G, axis=0) + else: + lc = np.empty(n_positions) + for j in range(n_positions): + G_tmp = G[:, (j * n_orient) : ((j + 1) * n_orient)] + lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2) + + # space and time penalties, and inverse of their derivatives: + def g_space(Z): + return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))) + + def g_space_prime_inv(Z): + return 2.0 * g_space(Z) + + def g_time(Z): + return np.sqrt( + np.sqrt( + np.sum((np.abs(Z) ** 2.0).reshape((n_orient, -1), order="F"), axis=0) + ).reshape((-1, Z.shape[1]), order="F") + ) + + def g_time_prime_inv(Z): + return 2.0 * g_time(Z) + + E = list() + + active_set = np.ones(n_sources, dtype=bool) + Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128) + + for k in range(n_tfmxne_iter): + active_set_0 = active_set.copy() + Z0 = Z.copy() + + if k == 0: + w_space = None + w_time = None + else: + w_space = 1.0 / g_space_prime_inv(Z) + w_time = g_time_prime_inv(Z) + w_time[w_time == 0.0] = -1.0 + w_time = 1.0 / w_time + w_time[w_time < 0.0] = 0.0 + + X, Z, active_set_, _, _ = _tf_mixed_norm_solver_bcd_active_set( + M, + G[:, active_set], + alpha_space, + alpha_time, + lc[active_set[::n_orient]], + phi, + phiT, + Z_init=Z, + w_space=w_space, + w_time=w_time, + n_orient=n_orient, + maxit=maxit, + tol=tol, + dgap_freq=dgap_freq, + ) + + active_set[active_set] = active_set_ + + if active_set.sum() > 0: + l21_penalty = np.sum(g_space(Z.copy())) + l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum() + + p_obj = ( + 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X), "fro") ** 2.0 + + alpha_space * l21_penalty + + alpha_time * l1_penalty + ) + E.append(p_obj) + + logger.info( + "Iteration %d: active set size=%d, E=%f", + k + 1, + active_set.sum() / n_orient, + p_obj, + ) + + # Check convergence + if np.array_equal(active_set, active_set_0): + max_diff = np.amax(np.abs(Z - Z0)) + if max_diff < tol: + logger.info("Convergence reached after %d reweightings!", k) + break + else: + p_obj = 0.5 * np.linalg.norm(M) ** 2.0 + E.append(p_obj) + logger.info( + "Iteration %d: as_size=%d, E=%f", + k + 1, + active_set.sum() / n_orient, + p_obj, + ) + break + + if debias: + if active_set.sum() > 0: + bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient) + X *= bias[:, np.newaxis] + + return X, active_set, E diff --git a/mne/io/__init__.py b/mne/io/__init__.py new file mode 100644 index 0000000..40f3853 --- /dev/null +++ b/mne/io/__init__.py @@ -0,0 +1,9 @@ +"""IO module for reading raw data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import lazy_loader as lazy + +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/io/__init__.pyi b/mne/io/__init__.pyi new file mode 100644 index 0000000..a9c1141 --- /dev/null +++ b/mne/io/__init__.pyi @@ -0,0 +1,91 @@ +__all__ = [ + "BaseRaw", + "Raw", + "RawArray", + "anonymize_info", + "concatenate_raws", + "constants", + "get_channel_type_constants", + "match_channel_orders", + "pick", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds_mff", + "read_fiducials", + "read_info", + "read_raw", + "read_raw_ant", + "read_raw_artemis123", + "read_raw_bdf", + "read_raw_boxy", + "read_raw_brainvision", + "read_raw_bti", + "read_raw_cnt", + "read_raw_ctf", + "read_raw_curry", + "read_raw_edf", + "read_raw_eeglab", + "read_raw_egi", + "read_raw_eximia", + "read_raw_eyelink", + "read_raw_fieldtrip", + "read_raw_fif", + "read_raw_fil", + "read_raw_gdf", + "read_raw_hitachi", + "read_raw_kit", + "read_raw_nedf", + "read_raw_neuralynx", + "read_raw_nicolet", + "read_raw_nihon", + "read_raw_nirx", + "read_raw_nsx", + "read_raw_persyst", + "read_raw_snirf", + "show_fiff", + "write_fiducials", + "write_info", +] +from . import constants, pick +from ._fiff_wrap import ( + anonymize_info, + get_channel_type_constants, + read_fiducials, + read_info, + show_fiff, + write_fiducials, + write_info, +) +from ._read_raw import read_raw +from .ant import read_raw_ant +from .array import RawArray +from .artemis123 import read_raw_artemis123 +from .base import BaseRaw, concatenate_raws, match_channel_orders +from .besa import read_evoked_besa +from .boxy import read_raw_boxy +from .brainvision import read_raw_brainvision +from .bti import read_raw_bti +from .cnt import read_raw_cnt +from .ctf import read_raw_ctf +from .curry import read_raw_curry +from .edf import read_raw_bdf, read_raw_edf, read_raw_gdf +from .eeglab import read_epochs_eeglab, read_raw_eeglab +from .egi import read_evokeds_mff, read_raw_egi +from .eximia import read_raw_eximia +from .eyelink import read_raw_eyelink +from .fieldtrip import read_epochs_fieldtrip, read_evoked_fieldtrip, read_raw_fieldtrip +from .fiff import Raw, read_raw_fif +from .fil import read_raw_fil +from .hitachi import read_raw_hitachi +from .kit import read_epochs_kit, read_raw_kit +from .nedf import read_raw_nedf +from .neuralynx import read_raw_neuralynx +from .nicolet import read_raw_nicolet +from .nihon import read_raw_nihon +from .nirx import read_raw_nirx +from .nsx import read_raw_nsx +from .persyst import read_raw_persyst +from .snirf import read_raw_snirf diff --git a/mne/io/_fiff_wrap.py b/mne/io/_fiff_wrap.py new file mode 100644 index 0000000..e4566d6 --- /dev/null +++ b/mne/io/_fiff_wrap.py @@ -0,0 +1,20 @@ +# ruff: noqa: F401 +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +# Backward compat since these were in the public API before switching to _fiff +# (and _empty_info is convenient to keep here for tests and is private) +from .._fiff.meas_info import ( + Info as _info, +) +from .._fiff.meas_info import ( + _empty_info, + anonymize_info, + read_fiducials, + read_info, + write_fiducials, + write_info, +) +from .._fiff.open import show_fiff +from .._fiff.pick import get_channel_type_constants # moved up a level diff --git a/mne/io/_read_raw.py b/mne/io/_read_raw.py new file mode 100644 index 0000000..28c86c0 --- /dev/null +++ b/mne/io/_read_raw.py @@ -0,0 +1,193 @@ +"""Generic wrapper function read_raw for specific read_raw_xxx readers.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from functools import partial +from pathlib import Path + +from ..utils import fill_doc +from .base import BaseRaw + + +def _read_unsupported(fname, **kwargs): + ext = "".join(Path(fname).suffixes) + msg = f"Unsupported file type ({ext})." + suggest = kwargs.get("suggest") + if suggest is not None: + msg += f" Try reading a {suggest} file instead." + msg += " Consider using a dedicated reader function for more options." + raise ValueError(msg) + + +# supported read file formats +def _get_supported(): + from . import ( + read_raw_ant, + read_raw_artemis123, + read_raw_bdf, + read_raw_boxy, + read_raw_brainvision, + read_raw_cnt, + read_raw_ctf, + read_raw_curry, + read_raw_edf, + read_raw_eeglab, + read_raw_egi, + read_raw_eximia, + read_raw_eyelink, + read_raw_fieldtrip, + read_raw_fif, + read_raw_fil, + read_raw_gdf, + read_raw_kit, + read_raw_nedf, + read_raw_nicolet, + read_raw_nihon, + read_raw_nirx, + read_raw_nsx, + read_raw_persyst, + read_raw_snirf, + ) + + return { + ".edf": dict(EDF=read_raw_edf), + ".eeg": dict(NihonKoden=read_raw_nihon), + ".bdf": dict(BDF=read_raw_bdf), + ".gdf": dict(GDF=read_raw_gdf), + ".vhdr": dict(brainvision=read_raw_brainvision), + ".ahdr": dict(brainvision=read_raw_brainvision), + ".fif": dict(FIF=read_raw_fif), + ".fif.gz": dict(FIF=read_raw_fif), + ".set": dict(EEGLAB=read_raw_eeglab), + ".cnt": dict(CNT=read_raw_cnt, ANT=read_raw_ant), + ".mff": dict(EGI=read_raw_egi), + ".nxe": dict(eximia=read_raw_eximia), + ".hdr": dict(NIRx=read_raw_nirx), + ".snirf": dict(SNIRF=read_raw_snirf), + ".mat": dict(fieldtrip=read_raw_fieldtrip), + ".bin": { + "ARTEMIS": read_raw_artemis123, + "UCL FIL OPM": read_raw_fil, + }, + ".data": dict(Nicolet=read_raw_nicolet), + ".sqd": dict(KIT=read_raw_kit), + ".con": dict(KIT=read_raw_kit), + ".ds": dict(CTF=read_raw_ctf), + ".txt": dict(BOXY=read_raw_boxy), + # Curry + ".dat": dict(CURRY=read_raw_curry), + ".dap": dict(CURRY=read_raw_curry), + ".rs3": dict(CURRY=read_raw_curry), + ".cdt": dict(CURRY=read_raw_curry), + ".cdt.dpa": dict(CURRY=read_raw_curry), + ".cdt.cef": dict(CURRY=read_raw_curry), + ".cef": dict(CURRY=read_raw_curry), + # NEDF + ".nedf": dict(NEDF=read_raw_nedf), + # EyeLink + ".asc": dict(EyeLink=read_raw_eyelink), + ".ns3": dict(NSx=read_raw_nsx), + ".lay": dict(Persyst=read_raw_persyst), + } + + +# known but unsupported file formats +_suggested = { + ".vmrk": dict(brainvision=partial(_read_unsupported, suggest=".vhdr")), + ".amrk": dict(brainvision=partial(_read_unsupported, suggest=".ahdr")), +} + + +# all known file formats +def _get_readers(): + return {**_get_supported(), **_suggested} + + +def split_name_ext(fname): + """Return name and supported file extension.""" + maxsuffixes = max(ext.count(".") for ext in _get_supported()) + suffixes = Path(fname).suffixes + for si in range(-maxsuffixes, 0): + ext = "".join(suffixes[si:]).lower() + if ext in _get_readers(): + return Path(fname).name[: -len(ext)], ext + return fname, None # unknown file extension + + +@fill_doc +def read_raw(fname, *, preload=False, verbose=None, **kwargs) -> BaseRaw: + """Read raw file. + + This function is a convenient wrapper for readers defined in `mne.io`. The + correct reader is automatically selected based on the detected file format. + All function arguments are passed to the respective reader. + + The following readers are currently supported: + + * `~mne.io.read_raw_ant` + * `~mne.io.read_raw_artemis123` + * `~mne.io.read_raw_bdf` + * `~mne.io.read_raw_boxy` + * `~mne.io.read_raw_brainvision` + * `~mne.io.read_raw_cnt` + * `~mne.io.read_raw_ctf` + * `~mne.io.read_raw_curry` + * `~mne.io.read_raw_edf` + * `~mne.io.read_raw_eeglab` + * `~mne.io.read_raw_egi` + * `~mne.io.read_raw_eximia` + * `~mne.io.read_raw_eyelink` + * `~mne.io.read_raw_fieldtrip` + * `~mne.io.read_raw_fif` + * `~mne.io.read_raw_fil` + * `~mne.io.read_raw_gdf` + * `~mne.io.read_raw_kit` + * `~mne.io.read_raw_nedf` + * `~mne.io.read_raw_nicolet` + * `~mne.io.read_raw_nihon` + * `~mne.io.read_raw_nirx` + * `~mne.io.read_raw_nsx` + * `~mne.io.read_raw_persyst` + * `~mne.io.read_raw_snirf` + + Parameters + ---------- + fname : path-like + Name of the file to read. + %(preload)s + %(verbose)s + **kwargs + Additional keyword arguments to pass to the underlying reader. For + details, see the arguments of the reader for the respective file + format. + + Returns + ------- + raw : mne.io.Raw + Raw object. + """ + _, ext = split_name_ext(fname) + kwargs["verbose"] = verbose + kwargs["preload"] = preload + readers = _get_readers() + if ext not in readers: + _read_unsupported(fname) + these_readers = list(readers[ext].values()) + for reader in these_readers: + try: + return reader(fname, **kwargs) + except Exception: + if len(these_readers) == 1: + raise + else: + choices = "\n".join( + f"mne.io.{func.__name__.ljust(20)} ({kind})" + for kind, func in readers[ext].items() + ) + raise RuntimeError( + "Could not read file using any of the possible readers for " + f"extension {ext}. Consider trying to read the file directly with " + f"one of:\n{choices}" + ) diff --git a/mne/io/ant/__init__.py b/mne/io/ant/__init__.py new file mode 100644 index 0000000..86ae7b3 --- /dev/null +++ b/mne/io/ant/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .ant import read_raw_ant diff --git a/mne/io/ant/ant.py b/mne/io/ant/ant.py new file mode 100644 index 0000000..8544062 --- /dev/null +++ b/mne/io/ant/ant.py @@ -0,0 +1,338 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from __future__ import annotations + +import re +from collections import defaultdict +from typing import TYPE_CHECKING + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ...annotations import Annotations +from ...utils import ( + _check_fname, + _soft_import, + _validate_type, + copy_doc, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw + +if TYPE_CHECKING: + from pathlib import Path + + from numpy.typing import NDArray + +_UNITS: dict[str, float] = {"uv": 1e-6, "µv": 1e-6} + + +@fill_doc +class RawANT(BaseRaw): + r"""Reader for Raw ANT files in .cnt format. + + Parameters + ---------- + fname : file-like + Path to the ANT raw file to load. The file should have the extension ``.cnt``. + eog : str | None + Regex pattern to find EOG channel labels. If None, no EOG channels are + automatically detected. + misc : str | None + Regex pattern to find miscellaneous channels. If None, no miscellaneous channels + are automatically detected. The default pattern ``"BIP\d+"`` will mark all + bipolar channels as ``misc``. + + .. note:: + + A bipolar channel might actually contain ECG, EOG or other signal types + which might have a dedicated channel type in MNE-Python. In this case, use + :meth:`mne.io.Raw.set_channel_types` to change the channel type of the + channel. + bipolars : list of str | tuple of str | None + The list of channels to treat as bipolar EEG channels. Each element should be + a string of the form ``'anode-cathode'`` or in ANT terminology as ``'label- + reference'``. If None, all channels are interpreted as ``'eeg'`` channels + referenced to the same reference electrode. Bipolar channels are treated + as EEG channels with a special coil type in MNE-Python, see also + :func:`mne.set_bipolar_reference` + + .. warning:: + + Do not provide auxiliary channels in this argument, provide them in the + ``eog`` and ``misc`` arguments. + impedance_annotation : str + The string to use for impedance annotations. Defaults to ``"impedance"``, + however, the impedance measurement might mark the end of a segment and the + beginning of a new segment, in which case a discontinuity similar to what + :func:`mne.concatenate_raws` produces is present. In this case, it's better to + include a ``BAD_xxx`` annotation to mark the discontinuity. + + .. note:: + + Note that the impedance annotation will likely have a duration of ``0``. + If the measurement marks a discontinuity, the duration should be modified to + cover the discontinuity in its entirety. + encoding : str + Encoding to use for :class:`str` in the CNT file. Defaults to ``'latin-1'``. + %(preload)s + %(verbose)s + """ + + @verbose + def __init__( + self, + fname: str | Path, + eog: str | None, + misc: str | None, + bipolars: list[str] | tuple[str, ...] | None, + impedance_annotation: str, + *, + encoding: str = "latin-1", + preload: bool | NDArray, + verbose=None, + ) -> None: + logger.info("Reading ANT file %s", fname) + _soft_import("antio", "reading ANT files", min_version="0.5.0") + + from antio import read_cnt + from antio.parser import ( + read_device_info, + read_info, + read_meas_date, + read_subject_info, + read_triggers, + ) + + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + _validate_type(eog, (str, None), "eog") + _validate_type(misc, (str, None), "misc") + _validate_type(bipolars, (list, tuple, None), "bipolar") + _validate_type(impedance_annotation, (str,), "impedance_annotation") + if len(impedance_annotation) == 0: + raise ValueError("The impedance annotation cannot be an empty string.") + cnt = read_cnt(fname) + # parse channels, sampling frequency, and create info + ch_names, ch_units, ch_refs, _, _ = read_info(cnt, encoding=encoding) + ch_types = _parse_ch_types(ch_names, eog, misc, ch_refs) + if bipolars is not None: # handle bipolar channels + bipolars_idx = _handle_bipolar_channels(ch_names, ch_refs, bipolars) + for idx, ch in zip(bipolars_idx, bipolars): + if ch_types[idx] != "eeg": + warn( + f"Channel {ch} was not parsed as an EEG channel, changing to " + "EEG channel type since bipolar EEG was requested." + ) + ch_names[idx] = ch + ch_types[idx] = "eeg" + info = create_info( + ch_names, sfreq=cnt.get_sample_frequency(), ch_types=ch_types + ) + info.set_meas_date(read_meas_date(cnt)) + make, model, serial, site = read_device_info(cnt, encoding=encoding) + info["device_info"] = dict(type=make, model=model, serial=serial, site=site) + his_id, name, sex, birthday = read_subject_info(cnt, encoding=encoding) + info["subject_info"] = dict( + his_id=his_id, + first_name=name, + sex=sex, + ) + if birthday is not None: + info["subject_info"]["birthday"] = birthday + if bipolars is not None: + with info._unlock(): + for idx in bipolars_idx: + info["chs"][idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + first_samps = np.array((0,)) + last_samps = (cnt.get_sample_count() - 1,) + raw_extras = { + "orig_nchan": cnt.get_channel_count(), + "orig_ch_units": ch_units, + "first_samples": np.array(first_samps), + "last_samples": np.array(last_samps), + } + super().__init__( + info, + preload=preload, + first_samps=first_samps, + last_samps=last_samps, + filenames=[fname], + verbose=verbose, + raw_extras=[raw_extras], + ) + # look for annotations (called trigger by ant) + onsets, durations, descriptions, _, disconnect = read_triggers(cnt) + onsets, durations, descriptions = _prepare_annotations( + onsets, durations, descriptions, disconnect, impedance_annotation + ) + onsets = np.array(onsets) / self.info["sfreq"] + durations = np.array(durations) / self.info["sfreq"] + annotations = Annotations(onsets, duration=durations, description=descriptions) + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + from antio import read_cnt + from antio.parser import read_data + + ch_units = self._raw_extras[0]["orig_ch_units"] + first_samples = self._raw_extras[0]["first_samples"] + n_times = self._raw_extras[0]["last_samples"] + 1 + for first_samp, this_n_times in zip(first_samples, n_times): + i_start = max(start, first_samp) + i_stop = min(stop, this_n_times + first_samp) + # read and scale data array + cnt = read_cnt(self.filenames[fi]) + one = read_data(cnt, i_start, i_stop) + _scale_data(one, ch_units) + data_view = data[:, i_start - start : i_stop - start] + if isinstance(idx, slice): + data_view[:] = one[idx] + else: + # faster than doing one = one[idx] + np.take(one, idx, axis=0, out=data_view) + + +def _handle_bipolar_channels( + ch_names: list[str], ch_refs: list[str], bipolars: list[str] | tuple[str, ...] +) -> list[int]: + """Handle bipolar channels.""" + bipolars_idx = [] + for ch in bipolars: + _validate_type(ch, (str,), "bipolar_channel") + if "-" not in ch: + raise ValueError( + "Bipolar channels should be provided as 'anode-cathode' or " + f"'label-reference'. '{ch}' is not valid." + ) + anode, cathode = ch.split("-") + if anode not in ch_names: + raise ValueError(f"Anode channel {anode} not found in the channels.") + idx = ch_names.index(anode) + if cathode != ch_refs[idx]: + raise ValueError( + f"Reference electrode for {anode} is {ch_refs[idx]}, not {cathode}." + ) + # store idx for later FIFF coil type change + bipolars_idx.append(idx) + return bipolars_idx + + +def _parse_ch_types( + ch_names: list[str], eog: str | None, misc: str | None, ch_refs: list[str] +) -> list[str]: + """Parse the channel types.""" + eog = re.compile(eog) if eog is not None else None + misc = re.compile(misc) if misc is not None else None + ch_types = [] + for ch in ch_names: + if eog is not None and re.fullmatch(eog, ch): + ch_types.append("eog") + elif misc is not None and re.fullmatch(misc, ch): + ch_types.append("misc") + else: + ch_types.append("eeg") + eeg_refs = [ch_refs[k] for k, elt in enumerate(ch_types) if elt == "eeg"] + if len(set(eeg_refs)) == 1: + logger.info( + "All %i EEG channels are referenced to %s.", len(eeg_refs), eeg_refs[0] + ) + else: + warn("All EEG channels are not referenced to the same electrode.") + return ch_types + + +def _prepare_annotations( + onsets: list[int], + durations: list[int], + descriptions: list[str], + disconnect: dict[str, list[int]], + impedance_annotation: str, +) -> tuple[list[int], list[int], list[str]]: + """Parse the ANT triggers into better Annotations.""" + # first, let's replace the description 'impedance' with impedance_annotation + for k, desc in enumerate(descriptions): + if desc.lower() == "impedance": + descriptions[k] = impedance_annotation + # next, let's look for amplifier connection/disconnection and let's try to create + # BAD_disconnection annotations from them. + if ( + len(disconnect["start"]) == len(disconnect["stop"]) + and len(disconnect["start"]) != 0 + and all( + 0 <= stop - start + for start, stop in zip(disconnect["start"], disconnect["stop"]) + ) + ): + for start, stop in zip(disconnect["start"], disconnect["stop"]): + onsets.append(start) + durations.append(stop - start) + descriptions.append("BAD_disconnection") + else: + for elt in disconnect["start"]: + onsets.append(elt) + durations.append(0) + descriptions.append("Amplifier disconnected") + for elt in disconnect["stop"]: + onsets.append(elt) + durations.append(0) + descriptions.append("Amplifier reconnected") + return onsets, durations, descriptions + + +def _scale_data(data: NDArray[np.float64], ch_units: list[str]) -> None: + """Scale the data array based on the human-readable units reported by ANT. + + Operates in-place. + """ + units_index = defaultdict(list) + for idx, unit in enumerate(ch_units): + units_index[unit].append(idx) + for unit, value in units_index.items(): + if unit in _UNITS: + data[np.array(value, dtype=np.int16), :] *= _UNITS[unit] + else: + warn( + f"Unit {unit} not recognized, not scaling. Please report the unit on " + "a github issue on https://github.com/mne-tools/mne-python." + ) + + +@copy_doc(RawANT) +def read_raw_ant( + fname, + eog=None, + misc=r"BIP\d+", + bipolars=None, + impedance_annotation="impedance", + *, + encoding: str = "latin-1", + preload=False, + verbose=None, +) -> RawANT: + """ + Returns + ------- + raw : instance of RawANT + A Raw object containing ANT data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + Notes + ----- + .. versionadded:: 1.9 + """ + return RawANT( + fname, + eog=eog, + misc=misc, + bipolars=bipolars, + impedance_annotation=impedance_annotation, + encoding=encoding, + preload=preload, + verbose=verbose, + ) diff --git a/mne/io/array/__init__.py b/mne/io/array/__init__.py new file mode 100644 index 0000000..aea21ef --- /dev/null +++ b/mne/io/array/__init__.py @@ -0,0 +1,7 @@ +"""Module to convert user data to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .array import RawArray diff --git a/mne/io/array/array.py b/mne/io/array/array.py new file mode 100644 index 0000000..f1c987c --- /dev/null +++ b/mne/io/array/array.py @@ -0,0 +1,96 @@ +"""Tools for creating Raw objects from numpy arrays.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ...utils import _check_option, _validate_type, fill_doc, logger, verbose +from ..base import BaseRaw + + +@fill_doc +class RawArray(BaseRaw): + """Raw object from numpy array. + + Parameters + ---------- + data : array, shape (n_channels, n_times) + The channels' time series. See notes for proper units of measure. + %(info_not_none)s Consider using :func:`mne.create_info` to populate + this structure. This may be modified in place by the class. + first_samp : int + First sample offset used during recording (default 0). + + .. versionadded:: 0.12 + copy : {'data', 'info', 'both', 'auto', None} + Determines what gets copied on instantiation. "auto" (default) + will copy info, and copy "data" only if necessary to get to + double floating point precision. + + .. versionadded:: 0.18 + %(verbose)s + + See Also + -------- + mne.EpochsArray + mne.EvokedArray + mne.create_info + + Notes + ----- + Proper units of measure: + + * V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog + * T: mag + * T/m: grad + * M: hbo, hbr + * Am: dipole + * AU: misc + """ + + @verbose + def __init__(self, data, info, first_samp=0, copy="auto", verbose=None): + _validate_type(info, "info", "info") + _check_option("copy", copy, ("data", "info", "both", "auto", None)) + dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64 + orig_data = data + data = np.asanyarray(orig_data, dtype=dtype) + if data.ndim != 2: + raise ValueError( + "Data must be a 2D array of shape (n_channels, n_samples), got shape " + f"{data.shape}" + ) + if len(data) != len(info["ch_names"]): + raise ValueError( + 'len(data) ({}) does not match len(info["ch_names"]) ({})'.format( + len(data), len(info["ch_names"]) + ) + ) + assert len(info["ch_names"]) == info["nchan"] + if copy in ("auto", "info", "both"): + info = info.copy() + if copy in ("data", "both"): + if data is orig_data: + data = data.copy() + elif copy != "auto" and data is not orig_data: + raise ValueError( + f"data copying was not requested by copy={copy!r} but it was required " + "to get to double floating point precision" + ) + logger.info( + f"Creating RawArray with {dtype.__name__} data, " + f"n_channels={data.shape[0]}, n_times={data.shape[1]}" + ) + super().__init__( + info, data, first_samps=(int(first_samp),), dtype=dtype, verbose=verbose + ) + logger.info( + " Range : %d ... %d = %9.3f ... %9.3f secs", + self.first_samp, + self.last_samp, + float(self.first_samp) / info["sfreq"], + float(self.last_samp) / info["sfreq"], + ) + logger.info("Ready.") diff --git a/mne/io/artemis123/__init__.py b/mne/io/artemis123/__init__.py new file mode 100644 index 0000000..7a51a73 --- /dev/null +++ b/mne/io/artemis123/__init__.py @@ -0,0 +1,7 @@ +"""artemis123 module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .artemis123 import read_raw_artemis123 diff --git a/mne/io/artemis123/artemis123.py b/mne/io/artemis123/artemis123.py new file mode 100644 index 0000000..349a628 --- /dev/null +++ b/mne/io/artemis123/artemis123.py @@ -0,0 +1,530 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import calendar +import datetime +import os.path as op + +import numpy as np +from scipy.spatial.distance import cdist + +from ..._fiff._digitization import DigPoint, _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _read_segments_file +from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans +from ...utils import _check_fname, logger, verbose, warn +from ..base import BaseRaw +from .utils import _load_mne_locs, _read_pos + + +@verbose +def read_raw_artemis123( + input_fname, preload=False, verbose=None, pos_fname=None, add_head_trans=True +) -> "RawArtemis123": + """Read Artemis123 data as raw object. + + Parameters + ---------- + input_fname : path-like + Path to the data file (extension ``.bin``). The header file with the + same file name stem and an extension ``.txt`` is expected to be found + in the same directory. + %(preload)s + %(verbose)s + pos_fname : path-like | None + If not None, load digitized head points from this file. + add_head_trans : bool (default True) + If True attempt to perform initial head localization. Compute initial + device to head coordinate transform using HPI coils. If no + HPI coils are in info['dig'] hpi coils are assumed to be in canonical + order of fiducial points (nas, rpa, lpa). + + Returns + ------- + raw : instance of Raw + A Raw object containing the data. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + return RawArtemis123( + input_fname, + preload=preload, + verbose=verbose, + pos_fname=pos_fname, + add_head_trans=add_head_trans, + ) + + +def _get_artemis123_info(fname, pos_fname=None): + """Generate info struct from artemis123 header file.""" + fname = op.splitext(fname)[0] + header = fname + ".txt" + + logger.info("Reading header...") + + # key names for artemis channel info... + chan_keys = [ + "name", + "scaling", + "FLL_Gain", + "FLL_Mode", + "FLL_HighPass", + "FLL_AutoReset", + "FLL_ResetLock", + ] + + header_info = dict() + header_info["filter_hist"] = [] + header_info["comments"] = "" + header_info["channels"] = [] + + with open(header) as fid: + # section flag + # 0 - None + # 1 - main header + # 2 - channel header + # 3 - comments + # 4 - length + # 5 - filtering History + sectionFlag = 0 + for line in fid: + # skip emptylines or header line for channel info + if (not line.strip()) or (sectionFlag == 2 and line.startswith("DAQ Map")): + continue + + # set sectionFlag + if line.startswith(""): + sectionFlag = 1 + elif line.startswith(""): + sectionFlag = 2 + elif line.startswith(""): + sectionFlag = 3 + elif line.startswith(""): + sectionFlag = 4 + elif line.startswith(""): + sectionFlag = 5 + else: + # parse header info lines + # part of main header - lines are name value pairs + if sectionFlag == 1: + values = line.strip().split("\t") + if len(values) == 1: + values.append("") + header_info[values[0]] = values[1] + # part of channel header - lines are Channel Info + elif sectionFlag == 2: + values = line.strip().split("\t") + if len(values) != 7: + raise OSError( + f"Error parsing line \n\t:{line}\nfrom file {header}" + ) + tmp = dict() + for k, v in zip(chan_keys, values): + tmp[k] = v + header_info["channels"].append(tmp) + elif sectionFlag == 3: + header_info["comments"] = f"{header_info['comments']}{line.strip()}" + elif sectionFlag == 4: + header_info["num_samples"] = int(line.strip()) + elif sectionFlag == 5: + header_info["filter_hist"].append(line.strip()) + + for k in [ + "Temporal Filter Active?", + "Decimation Active?", + "Spatial Filter Active?", + ]: + if header_info[k] != "FALSE": + warn(f"{k} - set to but is not supported") + if header_info["filter_hist"]: + warn("Non-Empty Filter history found, BUT is not supported") + + # build mne info struct + info = _empty_info(float(header_info["DAQ Sample Rate"])) + + # Attempt to get time/date from fname + # Artemis123 files saved from the scanner observe the following + # naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin' + try: + date = datetime.datetime.strptime( + op.basename(fname).split("_")[2], "%Y-%m-%d-%Hh-%Mm" + ) + meas_date = (calendar.timegm(date.utctimetuple()), 0) + except Exception: + meas_date = None + + # build subject info must be an integer (as per FIFF) + try: + subject_info = {"id": int(header_info["Subject ID"])} + except ValueError: + subject_info = {"id": 0} + + # build description + desc = "" + for k in ["Purpose", "Notes"]: + desc += f"{k} : {header_info[k]}\n" + desc += f"Comments : {header_info['comments']}" + + info.update( + { + "meas_date": meas_date, + "description": desc, + "subject_info": subject_info, + "proj_name": header_info["Project Name"], + } + ) + + # Channel Names by type + ref_mag_names = ["REF_001", "REF_002", "REF_003", "REF_004", "REF_005", "REF_006"] + + ref_grad_names = ["REF_007", "REF_008", "REF_009", "REF_010", "REF_011", "REF_012"] + + # load mne loc dictionary + loc_dict = _load_mne_locs() + info["chs"] = [] + bads = [] + + for i, chan in enumerate(header_info["channels"]): + # build chs struct + t = { + "cal": float(chan["scaling"]), + "ch_name": chan["name"], + "logno": i + 1, + "scanno": i + 1, + "range": 1.0, + "unit_mul": FIFF.FIFF_UNITM_NONE, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + # REF_018 has a zero cal which can cause problems. Let's set it to + # a value of another ref channel to make writers/readers happy. + if t["cal"] == 0: + t["cal"] = 4.716e-10 + bads.append(t["ch_name"]) + t["loc"] = loc_dict.get(chan["name"], np.zeros(12)) + + if chan["name"].startswith("MEG"): + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD + t["kind"] = FIFF.FIFFV_MEG_CH + # While gradiometer units are T/m, the meg sensors referred to as + # gradiometers report the field difference between 2 pick-up coils. + # Therefore the units of the measurements should be T + # *AND* the baseline (difference between pickup coils) + # should not be used in leadfield / forwardfield computations. + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F + + # 3 axis reference magnetometers + elif chan["name"] in ref_mag_names: + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG + t["kind"] = FIFF.FIFFV_REF_MEG_CH + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F + + # reference gradiometers + elif chan["name"] in ref_grad_names: + t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD + t["kind"] = FIFF.FIFFV_REF_MEG_CH + # While gradiometer units are T/m, the meg sensors referred to as + # gradiometers report the field difference between 2 pick-up coils. + # Therefore the units of the measurements should be T + # *AND* the baseline (difference between pickup coils) + # should not be used in leadfield / forwardfield computations. + t["unit"] = FIFF.FIFF_UNIT_T + t["unit_mul"] = FIFF.FIFF_UNITM_F + + # other reference channels are unplugged and should be ignored. + elif chan["name"].startswith("REF"): + t["coil_type"] = FIFF.FIFFV_COIL_NONE + t["kind"] = FIFF.FIFFV_MISC_CH + t["unit"] = FIFF.FIFF_UNIT_V + bads.append(t["ch_name"]) + + elif chan["name"].startswith(("AUX", "TRG", "MIO")): + t["coil_type"] = FIFF.FIFFV_COIL_NONE + t["unit"] = FIFF.FIFF_UNIT_V + if chan["name"].startswith("TRG"): + t["kind"] = FIFF.FIFFV_STIM_CH + else: + t["kind"] = FIFF.FIFFV_MISC_CH + else: + raise ValueError( + f'Channel does not match expected channel Types:"{chan["name"]}"' + ) + + # incorporate multiplier (unit_mul) into calibration + t["cal"] *= 10 ** t["unit_mul"] + t["unit_mul"] = FIFF.FIFF_UNITM_NONE + + # append this channel to the info + info["chs"].append(t) + if chan["FLL_ResetLock"] == "TRUE": + bads.append(t["ch_name"]) + + # HPI information + # print header_info.keys() + hpi_sub = dict() + # Don't know what event_channel is don't think we have it HPIs are either + # always on or always off. + # hpi_sub['event_channel'] = ??? + hpi_sub["hpi_coils"] = [dict(), dict(), dict(), dict()] + hpi_coils = [dict(), dict(), dict(), dict()] + drive_channels = ["MIO_001", "MIO_003", "MIO_009", "MIO_011"] + key_base = "Head Tracking %s %d" + + # set default HPI frequencies + if info["sfreq"] == 1000: + default_freqs = [140, 150, 160, 40] + else: + default_freqs = [700, 750, 800, 40] + + for i in range(4): + # build coil structure + hpi_coils[i]["number"] = i + 1 + hpi_coils[i]["drive_chan"] = drive_channels[i] + this_freq = header_info.pop(key_base % ("Frequency", i + 1), default_freqs[i]) + hpi_coils[i]["coil_freq"] = this_freq + + # check if coil is on + if header_info[key_base % ("Channel", i + 1)] == "OFF": + hpi_sub["hpi_coils"][i]["event_bits"] = [0] + else: + hpi_sub["hpi_coils"][i]["event_bits"] = [256] + + info["hpi_subsystem"] = hpi_sub + info["hpi_meas"] = [{"hpi_coils": hpi_coils}] + # read in digitized points if supplied + if pos_fname is not None: + info["dig"] = _read_pos(pos_fname) + else: + info["dig"] = [] + + info._unlocked = False + info._update_redundant() + # reduce info['bads'] to unique set + info["bads"] = list(set(bads)) + del bads + return info, header_info + + +class RawArtemis123(BaseRaw): + """Raw object from Artemis123 file. + + Parameters + ---------- + input_fname : path-like + Path to the Artemis123 data file (ending in ``'.bin'``). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__( + self, + input_fname, + preload=False, + verbose=None, + pos_fname=None, + add_head_trans=True, + ): + from ...chpi import ( + _fit_coil_order_dev_head_trans, + compute_chpi_amplitudes, + compute_chpi_locs, + ) + + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) + fname, ext = op.splitext(input_fname) + if ext == ".txt": + input_fname = fname + ".bin" + elif ext != ".bin": + raise RuntimeError( + 'Valid artemis123 files must end in "txt"' + ' or ".bin".' + ) + + if not op.exists(input_fname): + raise RuntimeError(f"{input_fname} - Not Found") + + info, header_info = _get_artemis123_info(input_fname, pos_fname=pos_fname) + + last_samps = [header_info.get("num_samples", 1) - 1] + + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[header_info], + last_samps=last_samps, + orig_format="single", + verbose=verbose, + ) + + if add_head_trans: + n_hpis = 0 + for d in info["hpi_subsystem"]["hpi_coils"]: + if d["event_bits"] == [256]: + n_hpis += 1 + if n_hpis < 3: + warn( + f"{n_hpis:d} HPIs active. At least 3 needed to perform" + "head localization\n *NO* head localization performed" + ) + else: + # Localized HPIs using the 1st 250 milliseconds of data. + with info._unlock(): + info["hpi_results"] = [ + dict( + dig_points=[ + dict( + r=np.zeros(3), + coord_frame=FIFF.FIFFV_COORD_DEVICE, + ident=ii + 1, + ) + for ii in range(n_hpis) + ], + coord_trans=Transform("meg", "head"), + ) + ] + coil_amplitudes = compute_chpi_amplitudes( + self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25 + ) + assert len(coil_amplitudes["times"]) == 1 + coil_locs = compute_chpi_locs(self.info, coil_amplitudes) + with info._unlock(): + info["hpi_results"] = None + hpi_g = coil_locs["gofs"][0] + hpi_dev = coil_locs["rrs"][0] + + # only use HPI coils with localizaton goodness_of_fit > 0.98 + bad_idx = [] + for i, g in enumerate(hpi_g): + msg = f"HPI coil {i + 1} - location goodness of fit ({g:0.3f})" + if g < 0.98: + bad_idx.append(i) + msg += " *Removed from coregistration*" + logger.info(msg) + hpi_dev = np.delete(hpi_dev, bad_idx, axis=0) + hpi_g = np.delete(hpi_g, bad_idx, axis=0) + + if pos_fname is not None: + # Digitized HPI points are needed. + hpi_head = np.array( + [ + d["r"] + for d in self.info.get("dig", []) + if d["kind"] == FIFF.FIFFV_POINT_HPI + ] + ) + + if len(hpi_head) != len(hpi_dev): + raise RuntimeError( + f"number of digitized ({len(hpi_head)}) and active " + f"({len(hpi_dev)}) HPI coils are not the same." + ) + + # compute initial head to dev transform and hpi ordering + head_to_dev_t, order, trans_g = _fit_coil_order_dev_head_trans( + hpi_dev, hpi_head + ) + + # set the device to head transform + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, head_to_dev_t + ) + + # add hpi_meg_dev to dig... + for idx, point in enumerate(hpi_dev): + d = { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + self.info["dig"].append(DigPoint(d)) + + dig_dists = cdist(hpi_head[order], hpi_head[order]) + dev_dists = cdist(hpi_dev, hpi_dev) + tmp_dists = np.abs(dig_dists - dev_dists) + dist_limit = tmp_dists.max() * 1.1 + + logger.info( + "HPI-Dig corrregsitration\n" + f"\tGOF : {trans_g:0.3f}\n" + f"\tMax Coil Error : {100 * tmp_dists.max():0.3f} cm\n" + ) + + else: + logger.info("Assuming Cardinal HPIs") + nas = hpi_dev[0] + lpa = hpi_dev[2] + rpa = hpi_dev[1] + t = get_ras_to_neuromag_trans(nas, lpa, rpa) + with self.info._unlock(): + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t + ) + + # transform fiducial points + nas = apply_trans(t, nas) + lpa = apply_trans(t, lpa) + rpa = apply_trans(t, rpa) + + hpi = apply_trans(self.info["dev_head_t"], hpi_dev) + with self.info._unlock(): + self.info["dig"] = _make_dig_points( + nasion=nas, lpa=lpa, rpa=rpa, hpi=hpi + ) + order = np.array([0, 1, 2]) + dist_limit = 0.005 + + # fill in hpi_results + hpi_result = dict() + + # add HPI points in device coords... + dig = [] + for idx, point in enumerate(hpi_dev): + dig.append( + { + "r": point, + "ident": idx + 1, + "kind": FIFF.FIFFV_POINT_HPI, + "coord_frame": FIFF.FIFFV_COORD_DEVICE, + } + ) + hpi_result["dig_points"] = dig + + # attach Transform + hpi_result["coord_trans"] = self.info["dev_head_t"] + + # 1 based indexing + hpi_result["order"] = order + 1 + hpi_result["used"] = np.arange(3) + 1 + hpi_result["dist_limit"] = dist_limit + hpi_result["good_limit"] = 0.98 + + # Warn for large discrepancies between digitized and fit + # cHPI locations + if hpi_result["dist_limit"] > 0.005: + warn( + "Large difference between digitized geometry" + " and HPI geometry. Max coil to coil difference" + f" is {100.0 * tmp_dists.max():0.2f} cm\n" + "beware of *POOR* head localization" + ) + + # store it + with self.info._unlock(): + self.info["hpi_results"] = [hpi_result] + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=">f4") diff --git a/mne/io/artemis123/resources/Artemis123_ChannelMap.csv b/mne/io/artemis123/resources/Artemis123_ChannelMap.csv new file mode 100644 index 0000000..1ee9325 --- /dev/null +++ b/mne/io/artemis123/resources/Artemis123_ChannelMap.csv @@ -0,0 +1,146 @@ +name,Channel Type,CAD X+ (INCH),CAD Y+ (INCH),CAD Z+ (INCH),CAD X- (INCH),CAD Y- (INCH),CAD Z- (INCH) +Derived from '90-0395 Channel Map for 6th cooldown 2-01-13.xls',,,,,,, +MEG_059,MEG_GRAD,-1.97677,1.56552,2.91489,-4.18768,2.50074,5.40664 +MEG_045,MEG_GRAD,-1.61144,0.93037,3.41137,-3.33479,1.92534,6.24186 +MEG_029,MEG_GRAD,-0.91075,1.72387,3.473,-1.93587,2.72988,6.62081 +MEG_073,MEG_GRAD,-2.38955,0.86972,2.76491,-4.94504,1.79985,4.90406 +MEG_043,MEG_GRAD,-1.59926,2.33243,2.93122,-3.46787,3.39595,5.64209 +MEG_085,MEG_GRAD,-2.78631,1.40783,1.84839,-5.89386,2.21359,3.13893 +REF_013,UNUSED,,,,,, +MEG_071,MEG_GRAD,-2.43321,2.17533,2.12153,-5.27622,3.05529,3.88634 +MEG_032,MEG_GRAD,0.93037,-1.61144,3.41137,1.92534,-3.33479,6.24186 +MEG_048,MEG_GRAD,1.27145,-2.20222,2.76491,2.6312,-4.55737,4.90406 +MEG_018,MEG_GRAD,0.44157,-2.50427,2.76491,0.91381,-5.18245,4.90406 +MEG_006,MEG_GRAD,0,-3.0105,1.94967,0,-6.23006,3.21696 +MEG_005,MEG_GRAD,0,-1.86073,3.41137,0,-3.85068,6.24186 +MEG_049,MEG_GRAD,-1.27145,-2.20222,2.76491,-2.6312,-4.55737,4.90406 +MEG_019,MEG_GRAD,-0.44157,-2.50427,2.76491,-0.91381,-5.18245,4.90406 +MEG_033,MEG_GRAD,-0.93037,-1.61144,3.41137,-1.92534,-3.33479,6.24186 +MEG_021,MEG_GRAD,-0.56074,-3.168,1.10519,-1.13708,-6.39559,2.21066 +MEG_020,MEG_GRAD,0.56022,-3.16809,1.10519,1.13604,-6.39578,2.21066 +MEG_034,MEG_GRAD,1.02965,-2.82894,1.94967,2.13081,-5.85434,3.21696 +MEG_077,MEG_GRAD,-2.47272,-2.0647,1.06346,-5.01426,-4.15829,2.12604 +MEG_035,MEG_GRAD,-1.02965,-2.82894,1.94967,-2.13081,-5.85434,3.21696 +MEG_007,MEG_GRAD,0,-3.27147,0.25764,0,-6.63351,1.0751 +MEG_023,MEG_GRAD,-0.576,-3.27431,-0.5962,-1.16503,-6.58484,0.21931 +MEG_022,MEG_GRAD,0.56022,-3.27709,-0.59609,1.14872,-6.58771,0.21942 +MEG_047,MEG_GRAD,-1.61144,-0.93037,3.41137,-3.33479,-1.92534,6.24186 +MEG_061,MEG_GRAD,-1.86073,0,3.41137,-3.85068,0,6.24186 +MEG_087,MEG_GRAD,-2.5429,0,2.76491,-5.2624,0,4.90406 +MEG_113,MEG_GRAD,-3.22769,0.0086,0.98505,-6.5452,0.046,1.96703 +MEG_101,MEG_GRAD,-2.96476,-0.52277,1.94967,-6.13541,-1.08184,3.21696 +MEG_099,MEG_GRAD,-2.96476,0.52277,1.94967,-6.13541,1.08184,3.21696 +MEG_063,MEG_GRAD,-1.94798,-1.63455,2.76491,-4.03123,-3.38261,4.90406 +MEG_075,MEG_GRAD,-2.38955,-0.86972,2.76491,-4.94504,-1.79985,4.90406 +MEG_089,MEG_GRAD,-2.60717,-1.50525,1.94967,-5.39539,-3.11503,3.21696 +MEG_123,MEG_GRAD,-3.24454,-0.65992,-1.54654,-6.63007,-1.24165,-1.13258 +MEG_103,MEG_GRAD,-3.03312,-1.09456,1.02677,-6.15066,-2.19102,2.05164 +MEG_119,MEG_GRAD,-3.27163,-0.04807,-0.71822,-6.66217,-0.02172,-0.02891 +MEG_121,MEG_GRAD,-3.24454,0.48346,-1.58979,-6.63007,1.0948,-1.22095 +MEG_105,MEG_GRAD,-3.07707,-1.16672,-0.67591,-6.26323,-2.29919,0.05723 +MEG_091,MEG_GRAD,-2.81455,-1.64764,0.19622,-5.75085,-3.31563,0.94961 +MEG_115,MEG_GRAD,-3.20059,-0.58777,0.15614,-6.53962,-1.15004,0.86771 +MEG_037,MEG_GRAD,-1.11155,-3.07561,0.25023,-2.27119,-6.23333,1.05996 +MEG_067,MEG_GRAD,-2.08904,-2.51166,0.2289,-4.26844,-5.08104,1.01638 +MEG_079,MEG_GRAD,-2.51137,-2.1514,-0.63867,-5.10885,-4.30296,0.13301 +MEG_093,MEG_GRAD,-2.8532,-1.73435,-1.50591,-5.7542,-3.37531,-0.57691 +MEG_051,MEG_GRAD,-1.61407,-2.7848,1.0907,-3.27306,-5.61852,2.18127 +MEG_065,MEG_GRAD,-1.93511,-2.30617,1.94967,-4.00461,-4.7725,3.21696 +REF_014,UNUSED,,,,,, +MEG_053,MEG_GRAD,-1.64275,-2.88336,-0.61098,-3.33826,-5.79135,0.1893 +MEG_039,MEG_GRAD,-1.37821,4.03301,0.38766,-2.98972,7.09471,0.3625 +MEG_041,MEG_GRAD,-1.59926,3.67934,1.66789,-3.46787,6.31662,2.90266 +MEG_055,MEG_GRAD,-2.06278,3.53364,0.8475,-4.47296,6.00069,1.12372 +MEG_069,MEG_GRAD,-2.43321,2.88136,1.45931,-5.27622,4.58626,2.45038 +MEG_027,MEG_GRAD,-1.02514,3.32279,2.63742,-2.22293,5.54346,5.00502 +MEG_025,MEG_GRAD,-0.92333,4.17235,1.20548,-2.00217,7.38566,1.89996 +MEG_057,MEG_GRAD,-1.84667,3.00588,2.29955,-4.00435,4.85628,4.27238 +REF_015,UNUSED,,,,,, +MEG_083,MEG_GRAD,-2.81067,2.32514,1.52142,-6.13327,3.15736,2.01067 +MEG_095,MEG_GRAD,-2.85632,2.16654,0.82155,-6.24599,2.85761,0.88605 +MEG_117,MEG_GRAD,-3.14455,0.87829,-0.52294,-6.53422,1.56936,-0.45844 +MEG_109,MEG_GRAD,-3.0226,1.3925,0.37679,-6.41227,2.08357,0.44129 +MEG_107,MEG_GRAD,-2.7791,2.44789,0.19401,-6.01824,3.66345,0.23867 +MEG_111,MEG_GRAD,-3.20059,0.54013,0.11348,-6.53962,1.15454,0.78055 +MEG_097,MEG_GRAD,-3.04326,1.22292,1.10768,-6.3884,1.94226,1.62169 +MEG_081,MEG_GRAD,-2.54021,2.92425,0.68688,-5.5195,4.68347,0.71098 +REF_001,REF_MAG,-2.26079604,3.98626183,5.04439808,-2.20703425,3.92437924,4.93090704 +REF_002,REF_MAG,1.93013445,4.03046866,5.17689263,1.8763992,3.96852956,5.06341985 +REF_004,REF_MAG,1.70031266,4.21202221,5.57217923,1.57144014,4.22797498,5.62449924 +REF_012,REF_GRAD,4.64675,-0.89642,-0.43802,6.03162,-1.01804,-0.22614 +REF_006,REF_MAG,2.07781,3.83073028,5.60154279,2.08802749,3.70619491,5.66468189 +REF_008,REF_GRAD,4.50056,0.78066,1.76423,5.88573,0.92199,1.96135 +REF_010,REF_GRAD,4.31926,2.18698,-0.37055,5.69806,2.46181,-0.34022 +MEG_094,REF_GRAD,2.85632,2.16654,0.82155,6.24599,2.85761,0.88605 +REF_016,UNUSED,,,,,, +REF_003,REF_MAG,-2.73073962,4.07852721,5.1569653,-2.8596759,4.06162797,5.1051015 +REF_017,UNUSED,,,,,, +REF_011,REF_GRAD,-4.64675,-0.89642,-0.43802,-6.03162,-1.01804,-0.22614 +REF_009,REF_GRAD,-4.31926,2.18698,-0.37055,-5.69806,2.46181,-0.34022 +REF_007,REF_GRAD,-4.50056,0.78066,1.76423,-5.88573,0.92199,1.96135 +REF_018,UNUSED,,,,,, +REF_005,REF_MAG,-2.4058382,3.78665997,5.47001894,-2.41506358,3.66222139,5.53350068 +MEG_090,MEG_GRAD,2.81455,-1.64764,0.19622,5.75085,-3.31563,0.94961 +MEG_088,MEG_GRAD,2.60717,-1.50525,1.94967,5.39539,-3.11503,3.21696 +MEG_102,MEG_GRAD,3.03294,-1.09506,1.02679,6.1503,-2.19202,2.05167 +MEG_122,MEG_GRAD,3.24454,-0.65992,-1.54654,6.63007,-1.24165,-1.13258 +MEG_114,MEG_GRAD,3.20059,-0.58777,0.15614,6.53962,-1.15004,0.86771 +MEG_104,MEG_GRAD,3.07159,-1.18176,-0.67534,6.25756,-2.31475,0.05782 +MEG_120,MEG_GRAD,3.24454,0.48346,-1.58979,6.63007,1.0948,-1.22094 +MEG_118,MEG_GRAD,3.27163,-0.06408,-0.71761,6.66217,-0.03828,-0.02828 +MEG_106,MEG_GRAD,2.7791,2.44789,0.19401,6.01824,3.66345,0.23867 +MEG_082,MEG_GRAD,2.81067,2.32514,1.52142,6.13327,3.15736,2.01067 +MEG_110,MEG_GRAD,3.20059,0.54013,0.11348,6.53962,1.15454,0.78055 +MEG_116,MEG_GRAD,3.14455,0.87829,-0.52294,6.53422,1.56936,-0.45844 +MEG_096,MEG_GRAD,3.04326,1.22292,1.10768,6.3884,1.94226,1.62169 +MEG_080,MEG_GRAD,2.54021,2.92425,0.68688,5.5195,4.68347,0.71098 +MEG_108,MEG_GRAD,3.0226,1.3925,0.37679,6.41227,2.08357,0.44129 +REF_019,UNUSED,,,,,, +MEG_009,MEG_GRAD,-0.48824,4.32904,0.13976,-1.05817,7.74156,0.10133 +MEG_003,MEG_GRAD,0,3.44805,2.77097,0,5.81508,5.29461 +MEG_010,MEG_GRAD,0.51257,3.97032,2.03007,1.11147,6.94759,3.68802 +MEG_012,MEG_GRAD,0.51257,2.67525,3.24478,1.11147,4.13933,6.32201 +MEG_004,MEG_GRAD,0,4.3528,1.03622,0,7.77696,1.53295 +MEG_011,MEG_GRAD,-0.51257,3.97032,2.03007,-1.11147,6.94759,3.68802 +MEG_008,MEG_GRAD,0.48824,4.32904,0.13976,1.05817,7.74156,0.10133 +MEG_013,MEG_GRAD,-0.51257,2.67525,3.24478,-1.11147,4.13933,6.32201 +MEG_024,MEG_GRAD,0.92333,4.17235,1.20548,2.00217,7.38566,1.89996 +REF_020,UNUSED,,,,,, +MEG_068,MEG_GRAD,2.43321,2.88136,1.45931,5.27622,4.58626,2.45038 +MEG_026,MEG_GRAD,1.02514,3.32279,2.63742,2.22293,5.54346,5.00502 +MEG_038,MEG_GRAD,1.37821,4.03301,0.38766,2.98972,7.09471,0.3625 +MEG_040,MEG_GRAD,1.59926,3.67934,1.66789,3.46787,6.31662,2.90266 +MEG_054,MEG_GRAD,2.06278,3.53364,0.8475,4.47296,6.00069,1.12372 +MEG_056,MEG_GRAD,1.84667,3.00588,2.29955,4.00435,4.85628,4.27238 +MEG_058,MEG_GRAD,2.00892,1.56358,2.88668,4.25593,2.49543,5.34722 +MEG_042,MEG_GRAD,1.59926,2.33243,2.93122,3.46787,3.39595,5.64209 +MEG_028,MEG_GRAD,0.90968,1.7238,3.47337,1.93358,2.72985,6.62156 +MEG_070,MEG_GRAD,2.43321,2.17533,2.12153,5.27622,3.05529,3.88634 +REF_021,UNUSED,,,,,, +MEG_072,MEG_GRAD,2.38955,0.86972,2.76491,4.94504,1.79985,4.90406 +MEG_044,MEG_GRAD,1.61144,0.93037,3.41137,3.33479,1.92534,6.24186 +MEG_084,MEG_GRAD,2.78632,1.40783,1.84839,5.89386,2.21359,3.13893 +MEG_046,MEG_GRAD,1.61144,-0.93037,3.41137,3.33479,-1.92534,6.24186 +MEG_098,MEG_GRAD,2.96476,0.52277,1.94967,6.13541,1.08184,3.21696 +MEG_060,MEG_GRAD,1.8607,0,3.41137,3.85068,0,6.24186 +MEG_100,MEG_GRAD,2.96476,-0.52277,1.94967,6.13541,-1.08184,3.21696 +MEG_074,MEG_GRAD,2.38955,-0.86972,2.76491,4.94504,-1.79985,4.90406 +MEG_086,MEG_GRAD,2.5429,0,2.76491,5.2624,0,4.90406 +MEG_062,MEG_GRAD,1.94798,-1.63455,2.76491,4.03123,-3.38261,4.90406 +MEG_112,MEG_GRAD,3.22769,0.00807,0.98507,6.5452,0.04494,1.96707 +MEG_016,MEG_GRAD,0.50538,-0.87535,3.83752,0.89368,-1.5479,7.20924 +MEG_031,MEG_GRAD,-1.01076,0,3.83752,-1.78736,0,7.20924 +MEG_015,MEG_GRAD,-0.50538,0.87535,3.83752,-0.89368,1.5479,7.20924 +MEG_001,MEG_GRAD,0,0,4,0,0,7.46 +MEG_002,MEG_GRAD,0,1.80611,3.59215,0,2.82922,6.89743 +MEG_017,MEG_GRAD,-0.50538,-0.87535,3.83752,-0.89368,-1.5479,7.20924 +MEG_014,MEG_GRAD,0.50538,0.87535,3.83752,0.89368,1.5479,7.20924 +MEG_030,MEG_GRAD,1.01076,0,3.83752,1.78736,0,7.20924 +MEG_050,MEG_GRAD,1.61362,-2.78506,1.09071,3.27214,-5.61905,2.18129 +MEG_064,MEG_GRAD,1.93511,-2.30617,1.94967,4.00461,-4.7725,3.21696 +MEG_076,MEG_GRAD,2.47238,-2.0651,1.06348,5.01358,-4.1591,2.12607 +MEG_078,MEG_GRAD,2.50107,-2.16367,-0.6382,5.0982,-4.31565,0.13349 +MEG_066,MEG_GRAD,2.08904,-2.51166,0.2289,4.26844,-5.08104,1.01638 +MEG_036,MEG_GRAD,1.11155,-3.07561,0.25023,2.27119,-6.23333,1.05996 +MEG_052,MEG_GRAD,1.62888,-2.89137,-0.61068,3.32391,-5.79963,0.18962 +MEG_092,MEG_GRAD,2.8532,-1.73435,-1.50591,5.7542,-3.37531,-0.57691 diff --git a/mne/io/artemis123/resources/Artemis123_mneLoc.csv b/mne/io/artemis123/resources/Artemis123_mneLoc.csv new file mode 100644 index 0000000..cdad771 --- /dev/null +++ b/mne/io/artemis123/resources/Artemis123_mneLoc.csv @@ -0,0 +1,144 @@ +MEG_001,0.0,0.0,0.10160000191,1.0,-0.0,-0.0,-0.0,1.0,-0.0,0.0,0.0,1.0 +MEG_002,0.0,0.0458751948625,0.0912406117153,1.0,-0.0,-0.0,-0.0,0.955282042035,-0.295696161906,0.0,0.295696161906,0.955282042035 +MEG_003,0.0,0.0875804716465,0.0703826393232,1.0,-0.0,-0.0,-0.0,0.729376031116,-0.684113006186,0.0,0.684113006186,0.729376031116 +MEG_004,0.0,0.110561122079,0.0263199884948,1.0,-0.0,-0.0,-0.0,0.143563509474,-0.989641106032,0.0,0.989641106032,0.143563509474 +MEG_005,0.0,-0.0472625428885,0.086648799629,1.0,0.0,-0.0,0.0,0.818061560022,0.575130666904,0.0,-0.575130666904,0.818061560022 +MEG_006,0.0,-0.0764667014376,0.049521618931,1.0,0.0,-0.0,0.0,0.366268930876,0.930509038255,0.0,-0.930509038255,0.366268930876 +MEG_007,0.0,-0.0830953395622,0.00654405612303,1.0,0.0,-0.0,0.0,0.236260571358,0.971689735678,0.0,-0.971689735678,0.236260571358 +MEG_008,0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,-0.164284112711,-0.164719723212,-0.164284112711,0.0163303909087,-0.986277875978,0.164719723212,0.986277875978,-0.0111069411385 +MEG_009,-0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,0.164284112711,0.164719723212,0.164284112711,0.0163303909087,-0.986277875978,-0.164719723212,0.986277875978,-0.0111069411385 +MEG_010,0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,-0.100693145673,-0.173092369408,-0.100693145673,0.499431154085,-0.860482081594,0.173092369408,0.860482081594,0.479175979061 +MEG_011,-0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,0.100693145673,0.173092369408,0.100693145673,0.499431154085,-0.860482081594,-0.173092369408,0.860482081594,0.479175979061 +MEG_012,0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,-0.038765954324,-0.17309280415,-0.038765954324,0.905232161619,-0.423145287527,0.17309280415,0.423145287527,0.889374469386 +MEG_013,-0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,0.038765954324,0.17309280415,0.038765954324,0.905232161619,-0.423145287527,-0.17309280415,0.423145287527,0.889374469386 +MEG_014,0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,-0.112225451567,-0.0110480572384,0.980864355149,-0.194378643965,0.112225451567,0.194378643965,0.974485705791 +MEG_015,-0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,0.112225451567,0.0110480572384,0.980864355149,-0.194378643965,-0.112225451567,0.194378643965,0.974485705791 +MEG_016,0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,-0.112225451567,0.0110480572384,0.980864355149,0.194378643965,0.112225451567,-0.194378643965,0.974485705791 +MEG_017,-0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,0.112225451567,-0.0110480572384,0.980864355149,0.194378643965,-0.112225451567,-0.194378643965,0.974485705791 +MEG_018,0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,0.0652835409099,-0.136485426846,0.0652835409099,0.629762253104,0.774039768908,0.136485426846,-0.774039768908,0.61825089115 +MEG_019,-0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,-0.0652835409099,0.136485426846,-0.0652835409099,0.629762253104,0.774039768908,-0.136485426846,-0.774039768908,0.61825089115 +MEG_020,0.0142295882675,-0.0804694875128,0.0280718265278,0.979010049739,0.117656650617,-0.166421858768,0.117656650617,0.340489745704,0.93285778425,0.166421858768,-0.93285778425,0.319499795443 +MEG_021,-0.0142427962678,-0.0804672015128,0.0280718265278,0.978972050612,-0.117759654311,0.166572470526,-0.117759654311,0.340528364062,0.932830690472,-0.166572470526,-0.932830690472,0.319500414673 +MEG_022,0.0142295882675,-0.0832380875649,-0.0151406862846,0.976588506526,0.131701883642,-0.170086750705,0.131701883642,0.259108088321,0.956826845574,0.170086750705,-0.956826845574,0.235696594848 +MEG_023,-0.0146304002751,-0.0831674755635,-0.0151434802847,0.976546368958,-0.131816629327,0.170239729521,-0.131816629327,0.259149948413,0.956799707604,-0.170239729521,-0.956799707604,0.235696317371 +MEG_024,0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,-0.241167202261,-0.311803997292,-0.241167202261,0.281686827798,-0.928703888007,0.311803997292,0.928703888007,0.200717103592 +MEG_025,-0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,0.241167202261,0.311803997292,0.241167202261,0.281686827798,-0.928703888007,-0.311803997292,0.928703888007,0.200717103592 +MEG_026,0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,-0.131916373824,-0.346181995721,-0.131916373824,0.755430639878,-0.641811980763,0.346181995721,0.641811980763,0.68427728823 +MEG_027,-0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,0.131916373824,0.346181995721,0.131916373824,0.755430639878,-0.641811980763,-0.346181995721,0.641811980763,0.68427728823 +MEG_028,0.0231058724344,0.0437845208231,0.0882235996586,0.954148215535,-0.0450524345745,-0.295924755521,-0.0450524345745,0.955732979975,-0.290765797726,0.295924755521,0.290765797726,0.90988119551 +MEG_029,-0.0231330504349,0.0437862988232,0.0882142016584,0.954036318954,0.0451068389737,0.296277024411,0.0451068389737,0.955734030088,-0.290753911081,-0.296277024411,0.290753911081,0.909770349043 +MEG_030,0.0256733044827,0.0,0.0974730098325,0.974485414074,-0.0,-0.224450835944,-0.0,1.0,-0.0,0.224450835944,0.0,0.974485414074 +MEG_031,-0.0256733044827,0.0,0.0974730098325,0.974485414074,0.0,0.224450835944,0.0,1.0,-0.0,-0.224450835944,0.0,0.974485414074 +MEG_032,0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,0.078781387629,-0.287563894118,0.078781387629,0.863545730655,0.498078572146,0.287563894118,-0.498078572146,0.818061576392 +MEG_033,-0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,-0.078781387629,0.287563894118,-0.078781387629,0.863545730655,0.498078572146,-0.287563894118,-0.498078572146,0.818061576392 +MEG_034,0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,0.203678027441,-0.318254036858,0.203678027441,0.440401481873,0.874392243734,0.318254036858,-0.874392243734,0.366268442706 +MEG_035,-0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,-0.203678027441,0.318254036858,-0.203678027441,0.440401481873,0.874392243734,-0.318254036858,-0.874392243734,0.366268442706 +MEG_036,0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,0.247867468623,-0.335155744599,0.247867468623,0.325052548187,0.91263495381,0.335155744599,-0.91263495381,0.23402578479 +MEG_037,-0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,-0.247867468623,0.335155744599,-0.247867468623,0.325052548187,0.91263495381,-0.335155744599,-0.91263495381,0.23402578479 +MEG_038,0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,-0.415157481208,-0.465754249753,-0.415157481208,0.211244323513,-0.884884230609,0.465754249753,0.884884230609,-0.00727167496558 +MEG_039,-0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,0.415157481208,0.465754249753,0.415157481208,0.211244323513,-0.884884230609,-0.465754249753,0.884884230609,-0.00727167496558 +MEG_040,0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,-0.303378150058,-0.540060556425,-0.303378150058,0.57182444299,-0.762219459517,0.540060556425,0.762219459517,0.356869851524 +MEG_041,-0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,0.303378150058,0.540060556425,0.303378150058,0.57182444299,-0.762219459517,-0.540060556425,0.762219459517,0.356869851524 +MEG_042,0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,-0.0930769183398,-0.540060822675,-0.0930769183398,0.947025241119,-0.307375795983,0.540060822675,0.307375795983,0.7834886265 +MEG_043,-0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,0.0930769183398,0.540060822675,0.0930769183398,0.947025241119,-0.307375795983,-0.540060822675,0.307375795983,0.7834886265 +MEG_044,0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,-0.498078572146,-0.078781387629,0.954515845737,-0.287563894118,0.498078572146,0.287563894118,0.818061576392 +MEG_045,-0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,0.078781387629,0.498078572146,0.078781387629,0.954515845737,-0.287563894118,-0.498078572146,0.287563894118,0.818061576392 +MEG_046,0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,0.078781387629,-0.498078572146,0.078781387629,0.954515845737,0.287563894118,0.498078572146,-0.287563894118,0.818061576392 +MEG_047,-0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,0.498078572146,-0.078781387629,0.954515845737,0.287563894118,-0.498078572146,-0.287563894118,0.818061576392 +MEG_048,0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,0.165302346745,-0.392991094644,0.165302346745,0.713688676641,0.680678784005,0.392991094644,-0.680678784005,0.618251075645 +MEG_049,-0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,-0.165302346745,0.392991094644,-0.165302346745,0.713688676641,0.680678784005,-0.392991094644,-0.680678784005,0.618251075645 +MEG_050,0.0409859487705,-0.0707405253299,0.0277040345208,0.825297111533,0.298522923381,-0.479341988471,0.298522923381,0.489900043633,0.819073874241,0.479341988471,-0.819073874241,0.315197155166 +MEG_051,-0.0409973787708,-0.0707339213298,0.0277037805208,0.825197788666,-0.298579570884,0.479477683976,-0.298579570884,0.489996382374,0.818995595294,-0.479477683976,-0.818995595294,0.31519417104 +MEG_052,0.0413735527778,-0.0734407993807,-0.0155112722916,0.805087785644,0.334422043575,-0.489893411036,0.334422043575,0.426212956438,0.840538168399,0.489893411036,-0.840538168399,0.231300742083 +MEG_053,-0.0417258507844,-0.0732373453769,-0.0155188922918,0.804976833568,-0.334486625117,0.490031626568,-0.334486625117,0.426317886079,0.840459253996,-0.490031626568,-0.840459253996,0.231294719647 +MEG_054,0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,-0.459958724875,-0.696583791076,-0.459958724875,0.529188204946,-0.713020206696,0.696583791076,0.713020206696,0.0798323671971 +MEG_055,-0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,0.459958724875,0.696583791076,0.459958724875,0.529188204946,-0.713020206696,-0.696583791076,0.713020206696,0.0798323671971 +MEG_056,0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,-0.212397698952,-0.623606380317,-0.212397698952,0.817850328992,-0.534797210957,0.623606380317,0.534797210957,0.570181572467 +MEG_057,-0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,0.212397698952,0.623606380317,0.212397698952,0.817850328992,-0.534797210957,-0.623606380317,0.534797210957,0.570181572467 +MEG_058,0.0510265689593,0.0397149327466,0.0733216733784,0.75352606525,-0.102214380931,-0.649423351381,-0.102214380931,0.95761101603,-0.269320185484,0.649423351381,0.269320185484,0.71113708128 +MEG_059,-0.0502099589439,0.0397642087476,0.0740382073919,0.762632097113,0.100407167247,0.638991928915,0.100407167247,0.957527538003,-0.27029505125,-0.638991928915,0.27029505125,0.720159635116 +MEG_060,0.0472617808885,0.0,0.086648799629,0.818057480605,-0.0,-0.575136469394,-0.0,1.0,-0.0,0.575136469394,0.0,0.818057480605 +MEG_061,-0.0472625428885,0.0,0.086648799629,0.818061560022,0.0,0.575130666904,0.0,1.0,-0.0,-0.575130666904,0.0,0.818061560022 +MEG_062,0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,0.187974664221,-0.602095198473,0.187974664221,0.842270014862,0.505219504448,0.602095198473,-0.505219504448,0.618251263081 +MEG_063,-0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,-0.187974664221,0.602095198473,-0.187974664221,0.842270014862,0.505219504448,-0.602095198473,-0.505219504448,0.618251263081 +MEG_064,0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,0.312052080775,-0.598120441436,0.312052080775,0.628111423833,0.712811011513,0.598120441436,-0.712811011513,0.366268206923 +MEG_065,-0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,-0.312052080775,0.598120441436,-0.312052080775,0.628111423833,0.712811011513,-0.598120441436,-0.712811011513,0.366268206923 +MEG_066,0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,0.381028180993,-0.629883796022,0.381028180993,0.550790957291,0.742594671847,0.629883796022,-0.742594671847,0.227595159994 +MEG_067,-0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,-0.381028180993,0.629883796022,-0.381028180993,0.550790957291,0.742594671847,-0.629883796022,-0.742594671847,0.227595159994 +MEG_068,0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,-0.314728784866,-0.821678860785,-0.314728784866,0.811263025695,-0.492745466865,0.821678860785,0.492745466865,0.286436301159 +MEG_069,-0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,0.314728784866,0.821678860785,0.314728784866,0.811263025695,-0.492745466865,-0.821678860785,0.492745466865,0.286436301159 +MEG_070,0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,-0.138386914129,-0.821679540869,-0.138386914129,0.957166893906,-0.254323807789,0.821679540869,0.254323807789,0.510060910979 +MEG_071,-0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,0.138386914129,0.821679540869,0.138386914129,0.957166893906,-0.254323807789,-0.821679540869,0.254323807789,0.510060910979 +MEG_072,0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,-0.738579885939,-0.12269267874,0.955343146999,-0.268823321284,0.738579885939,0.268823321284,0.61825057543 +MEG_073,-0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,0.738579885939,0.12269267874,0.955343146999,-0.268823321284,-0.738579885939,0.268823321284,0.61825057543 +MEG_074,0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,-0.738579885939,0.12269267874,0.955343146999,0.268823321284,0.738579885939,-0.268823321284,0.61825057543 +MEG_075,-0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,0.738579885939,-0.12269267874,0.955343146999,0.268823321284,-0.738579885939,-0.268823321284,0.61825057543 +MEG_076,0.0627984531806,-0.0524535409861,0.0270123925078,0.587320034467,0.340056606259,-0.73444991773,0.340056606259,0.719786504995,0.605201529878,0.73444991773,-0.605201529878,0.307106539462 +MEG_077,-0.0628070891808,-0.0524433809859,0.0270118845078,0.587208379993,-0.340036516337,0.734548491268,-0.340036516337,0.719895397972,0.605083286446,-0.734548491268,-0.605083286446,0.307103777966 +MEG_078,0.0635271791943,-0.0549572190332,-0.0162102803048,0.539322307512,0.381717195782,-0.750615368258,0.381717195782,0.683709413476,0.621959339803,0.750615368258,-0.621959339803,0.223031720988 +MEG_079,-0.0637887991992,-0.0546455610273,-0.016222218305,0.539196876386,-0.381695169412,0.750716675014,-0.381695169412,0.683831999207,0.621838077403,-0.750716675014,-0.621838077403,0.223028875593 +MEG_080,0.064521335213,0.0742759513964,0.017446752328,0.263693428198,-0.434776489447,-0.861066304154,-0.434776489447,0.743271888347,-0.508444986421,0.861066304154,0.508444986421,0.00696531654525 +MEG_081,-0.064521335213,0.0742759513964,0.017446752328,0.263693428198,0.434776489447,0.861066304154,0.434776489447,0.743271888347,-0.508444986421,-0.861066304154,0.508444986421,0.00696531654525 +MEG_082,0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,-0.202359959395,-0.960287956478,-0.202359959395,0.949314390716,-0.240525745844,0.960287956478,0.240525745844,0.141401577893 +MEG_083,-0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,0.202359959395,0.960287956478,0.202359959395,0.949314390716,-0.240525745844,-0.960287956478,0.240525745844,0.141401577893 +MEG_084,0.0707725293305,0.0357588826723,0.0469491068826,0.412488973038,-0.15233685973,-0.89813491653,-0.15233685973,0.960500283795,-0.232879123147,0.89813491653,0.232879123147,0.372989256833 +MEG_085,-0.0707722753305,0.0357588826723,0.0469491068826,0.412487827635,0.152336666507,0.898135475354,0.152336666507,0.960500461005,-0.232878518647,-0.898135475354,0.232878518647,0.37298828864 +MEG_086,0.0645896612143,0.0,0.0702287153203,0.618250335472,-0.0,-0.785981248306,-0.0,1.0,-0.0,0.785981248306,0.0,0.618250335472 +MEG_087,-0.0645896612143,0.0,0.0702287153203,0.618250335472,0.0,0.785981248306,0.0,1.0,-0.0,-0.785981248306,0.0,0.618250335472 +MEG_088,0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,0.274413611706,-0.80584438968,0.274413611706,0.841567184852,0.46525460029,0.80584438968,-0.46525460029,0.36626899477 +MEG_089,-0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,-0.274413611706,0.80584438968,-0.274413611706,0.841567184852,0.46525460029,-0.80584438968,-0.46525460029,0.36626899477 +MEG_090,0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,0.335957722235,-0.848640029826,0.335957722235,0.809156380101,0.482077132224,0.848640029826,-0.482077132224,0.217742366948 +MEG_091,-0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,-0.335957722235,0.848640029826,-0.335957722235,0.809156380101,0.482077132224,-0.848640029826,-0.482077132224,0.217742366948 +MEG_092,0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,0.313476011591,-0.83843959625,0.313476011591,0.822681283702,0.474266059932,0.83843959625,-0.474266059932,0.26849720266 +MEG_093,-0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,-0.313476011591,0.83843959625,-0.313476011591,0.822681283702,0.474266059932,-0.83843959625,-0.474266059932,0.26849720266 +MEG_094,0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_095,-0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_096,0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,-0.175001933135,-0.966802743999,-0.175001933135,0.962367527045,-0.20790157837,0.966802743999,0.20790157837,0.148557692187 +MEG_097,-0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,0.175001933135,0.966802743999,0.175001933135,0.962367527045,-0.20790157837,-0.966802743999,0.20790157837,0.148557692187 +MEG_098,0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,-0.91637265511,-0.108374224505,0.980890739219,-0.1615808936,0.91637265511,0.1615808936,0.366268715277 +MEG_099,-0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,0.108374224505,0.91637265511,0.108374224505,0.980890739219,-0.1615808936,-0.91637265511,0.1615808936,0.366268715277 +MEG_100,0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,0.108374224505,-0.91637265511,0.108374224505,0.980890739219,0.1615808936,0.91637265511,-0.1615808936,0.366268715277 +MEG_101,-0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,0.91637265511,-0.108374224505,0.980890739219,0.1615808936,-0.91637265511,-0.1615808936,0.366268715277 +MEG_102,0.0770366774483,-0.0278145245229,0.0260804664903,0.373752636547,0.220368615692,-0.900969832953,0.220368615692,0.922455039947,0.31704001718,0.900969832953,-0.31704001718,0.296207676495 +MEG_103,-0.0770412494484,-0.0278018245227,0.0260799584903,0.373679152588,-0.220281297547,0.901021665041,-0.220281297547,0.92252557096,0.31689544155,-0.901021665041,-0.31689544155,0.296204723548 +MEG_104,0.0780183874667,-0.0300167045643,-0.0171536363225,0.300373897506,0.24880001314,-0.920800779299,0.24880001314,0.911522102566,0.327453828799,0.920800779299,-0.327453828799,0.211896000073 +MEG_105,-0.0781575794694,-0.0296346885571,-0.0171681143228,0.300287289279,-0.248701776907,0.920855564169,-0.248701776907,0.911602900892,0.327303494098,-0.920855564169,-0.327303494098,0.211890190171 +MEG_106,0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,-0.324701145067,-0.936167295022,-0.324701145067,0.878148606143,-0.351317793345,0.936167295022,0.351317793345,0.0129075098315 +MEG_107,-0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,0.324701145067,0.936167295022,0.324701145067,0.878148606143,-0.351317793345,-0.936167295022,0.351317793345,0.0129075098315 +MEG_108,0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_109,-0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_110,0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,-0.143668166804,-0.965037436268,-0.143668166804,0.973563831901,-0.177575119486,0.965037436268,0.177575119486,0.192794770521 +MEG_111,-0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,0.143668166804,0.965037436268,0.143668166804,0.973563831901,-0.177575119486,-0.965037436268,0.177575119486,0.192794770521 +MEG_112,0.0819833275413,0.000204978003854,0.0250207784704,0.283903999524,-0.00795851694118,-0.958819681203,-0.00795851694118,0.999911550977,-0.010656088948,0.958819681203,0.010656088948,0.283815550501 +MEG_113,-0.0819833275413,0.000218440004107,0.0250202704704,0.283900779742,0.00807295557139,0.958819677859,0.00807295557139,0.999908989411,-0.0108092683826,-0.958819677859,0.0108092683826,0.283809769153 +MEG_114,0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,0.130073723873,-0.965037541675,0.130073723873,0.978096467321,0.162505775197,0.965037541675,-0.162505775197,0.205656062847 +MEG_115,-0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,-0.130073723873,0.965037541675,-0.130073723873,0.978096467321,0.162505775197,-0.965037541675,-0.162505775197,0.205656062847 +MEG_116,0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491 +MEG_117,-0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491 +MEG_118,0.0830994035623,-0.0016276320306,-0.0182272943427,0.199274663362,-0.00609304526277,-0.979924733508,-0.00609304526277,0.999953635537,-0.00745664647062,0.979924733508,0.00745664647062,0.199228298899 +MEG_119,-0.0830994035623,-0.00122097802295,-0.018242788343,0.199270871862,0.00622296522868,0.979924688091,0.00622296522868,0.999951637458,-0.00761560563545,-0.979924688091,0.00761560563545,0.19922250932 +MEG_120,0.0824113175493,0.0122798842309,-0.0403806667592,0.134815219165,-0.156230210311,-0.978476866394,-0.156230210311,0.971788825746,-0.176687859065,0.978476866394,0.176687859065,0.106604044912 +MEG_121,-0.0824113175493,0.0122798842309,-0.0403806667592,0.134812452063,0.156230709979,0.978477167862,0.156230709979,0.971788735519,-0.176687913503,-0.978477167862,0.176687913503,0.106601187582 +MEG_122,0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,0.146932333372,-0.978477448481,0.146932333372,0.974752861061,0.168130155723,0.978477448481,-0.168130155723,0.119641688177 +MEG_123,-0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,-0.146932333372,0.978477448481,-0.146932333372,0.974752861061,0.168130155723,-0.978477448481,-0.168130155723,0.119641688177 +REF_001,-0.0574242204956,0.101251052386,0.128127713641,0.221198761686,0.896440347728,-0.384012774259,0.896440347728,-0.0318490232173,0.442018486814,0.384012774259,-0.442018486814,-0.810650261531 +REF_002,0.0490254159517,0.102373905889,0.131493075274,0.222503034056,-0.896198721013,0.383823204472,-0.896198721013,-0.0330228704748,0.442422131545,-0.383823204472,-0.442422131545,-0.810519836419 +REF_003,-0.069360787652,0.103594593082,0.130986921083,-0.347310972431,-0.17658747001,0.920973373049,-0.17658747001,0.976855280479,0.120708849866,-0.920973373049,-0.120708849866,-0.370455691952 +REF_004,0.0431879423759,0.106985366145,0.141533355103,0.383166262537,0.0763561288473,0.920517982899,0.0763561288473,0.990548087664,-0.113948355026,-0.920517982899,0.113948355026,0.3737143502 +REF_005,-0.0611082914288,0.0961811650462,0.138938483688,0.997012450802,-0.040298218601,0.0658955728709,-0.040298218601,0.456428559123,0.888847019455,-0.0658955728709,-0.888847019455,0.453441009925 +REF_006,0.0527763749922,0.0973005509413,0.142279189541,0.99632914816,0.0447419955488,-0.072982068763,0.0447419955488,0.454664406796,0.889538324653,0.072982068763,-0.889538324653,0.450993554956 +REF_007,-0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,0.0868247934117,0.985012933323,0.0868247934117,0.991141197071,-0.100501655296,-0.985012933323,0.100501655296,0.14017467128 +REF_008,0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,-0.0868247934117,-0.985012933323,-0.0868247934117,0.991141197071,-0.100501655296,0.985012933323,0.100501655296,0.14017467128 +REF_009,-0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,0.187574011648,0.98047954998,0.187574011648,0.96261171626,-0.195434576966,-0.98047954998,0.195434576966,0.0215679901007 +REF_010,0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,-0.187574011648,-0.98047954998,-0.187574011648,0.96261171626,-0.195434576966,0.98047954998,0.195434576966,0.0215679901007 +REF_011,-0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,-0.0740177576494,0.984793851615,-0.0740177576494,0.993499722223,0.0864851056297,-0.984793851615,-0.0864851056297,0.150669825529 +REF_012,0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,0.0740177576494,-0.984793851615,0.0740177576494,0.993499722223,0.0864851056297,0.984793851615,-0.0864851056297,0.150669825529 +REF_013,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_014,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_015,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_016,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_017,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_018,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_019,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_020,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 +REF_021,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 diff --git a/mne/io/artemis123/utils.py b/mne/io/artemis123/utils.py new file mode 100644 index 0000000..90df53e --- /dev/null +++ b/mne/io/artemis123/utils.py @@ -0,0 +1,123 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op + +import numpy as np + +from ..._fiff._digitization import _artemis123_read_pos +from ...transforms import rotation3d_align_z_axis +from ...utils import logger + + +def _load_mne_locs(fname=None): + """Load MNE locs structure from file (if exists) or recreate it.""" + if not fname: + # find input file + resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources") + fname = op.join(resource_dir, "Artemis123_mneLoc.csv") + + if not op.exists(fname): + raise OSError(f'MNE locs file "{fname}" does not exist') + + logger.info(f"Loading mne loc file {fname}") + locs = dict() + with open(fname) as fid: + for line in fid: + vals = line.strip().split(",") + locs[vals[0]] = np.array(vals[1::], np.float64) + + return locs + + +def _generate_mne_locs_file(output_fname): + """Generate mne coil locs and save to supplied file.""" + logger.info("Converting Tristan coil file to mne loc file...") + resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources") + chan_fname = op.join(resource_dir, "Artemis123_ChannelMap.csv") + chans = _load_tristan_coil_locs(chan_fname) + + # compute a dict of loc structs + locs = {n: _compute_mne_loc(cinfo) for n, cinfo in chans.items()} + + # write it out to output_fname + with open(output_fname, "w") as fid: + for n in sorted(locs.keys()): + fid.write(f"{n},") + fid.write(",".join(locs[n].astype(str))) + fid.write("\n") + + +def _load_tristan_coil_locs(coil_loc_path): + """Load the Coil locations from Tristan CAD drawings.""" + channel_info = dict() + with open(coil_loc_path) as fid: + # skip 2 Header lines + fid.readline() + fid.readline() + for line in fid: + line = line.strip() + vals = line.split(",") + channel_info[vals[0]] = dict() + if vals[6]: + channel_info[vals[0]]["inner_coil"] = np.array(vals[2:5], np.float64) + channel_info[vals[0]]["outer_coil"] = np.array(vals[5:8], np.float64) + else: # nothing supplied + channel_info[vals[0]]["inner_coil"] = np.zeros(3) + channel_info[vals[0]]["outer_coil"] = np.zeros(3) + return channel_info + + +def _compute_mne_loc(coil_loc): + """Convert a set of coils to an mne Struct. + + Note input coil locations are in inches. + """ + loc = np.zeros(12) + if (np.linalg.norm(coil_loc["inner_coil"]) == 0) and ( + np.linalg.norm(coil_loc["outer_coil"]) == 0 + ): + return loc + + # channel location is inner coil location converted to meters From inches + loc[0:3] = coil_loc["inner_coil"] / 39.370078 + + # figure out rotation + z_axis = coil_loc["outer_coil"] - coil_loc["inner_coil"] + R = rotation3d_align_z_axis(z_axis) + loc[3:13] = R.T.reshape(9) + return loc + + +def _read_pos(fname): + """Read the .pos file and return positions as dig points.""" + nas, lpa, rpa, hpi, extra = None, None, None, None, None + with open(fname) as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.split() + # The lines can have 4 or 5 parts. First part is for the id, + # which can be an int or a string. The last three are for xyz + # coordinates. The extra part is for additional info + # (e.g. 'Pz', 'Cz') which is ignored. + if len(parts) not in [4, 5]: + continue + + if parts[0].lower() == "nasion": + nas = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif parts[0].lower() == "left": + lpa = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif parts[0].lower() == "right": + rpa = np.array([float(p) for p in parts[-3:]]) / 100.0 + elif "hpi" in parts[0].lower(): + if hpi is None: + hpi = list() + hpi.append(np.array([float(p) for p in parts[-3:]]) / 100.0) + else: + if extra is None: + extra = list() + extra.append(np.array([float(p) for p in parts[-3:]]) / 100.0) + + return _artemis123_read_pos(nas, lpa, rpa, hpi, extra) diff --git a/mne/io/base.py b/mne/io/base.py new file mode 100644 index 0000000..54d3334 --- /dev/null +++ b/mne/io/base.py @@ -0,0 +1,3259 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import shutil +from collections import defaultdict +from contextlib import nullcontext +from copy import deepcopy +from dataclasses import dataclass, field +from datetime import timedelta +from inspect import getfullargspec +from pathlib import Path + +import numpy as np + +from .._fiff.compensator import make_compensator, set_current_comp +from .._fiff.constants import FIFF +from .._fiff.meas_info import ( + ContainsMixin, + SetChannelsMixin, + _ensure_infos_match, + _unit2human, + write_meas_info, +) +from .._fiff.pick import ( + _picks_to_idx, + channel_type, + pick_channels, + pick_info, + pick_types, +) +from .._fiff.proj import ProjMixin, _proj_equal, activate_proj, setup_proj +from .._fiff.utils import _check_orig_units, _make_split_fnames +from .._fiff.write import ( + _NEXT_FILE_BUFFER, + _get_split_size, + end_block, + start_and_end_file, + start_block, + write_complex64, + write_complex128, + write_dau_pack16, + write_double, + write_float, + write_id, + write_int, + write_string, +) +from ..annotations import ( + Annotations, + _annotations_starts_stops, + _combine_annotations, + _handle_meas_date, + _sync_onset, + _write_annotations, +) +from ..channels.channels import InterpolationMixin, ReferenceMixin, UpdateChannelsMixin +from ..defaults import _handle_default +from ..event import concatenate_events, find_events +from ..filter import ( + FilterMixin, + _check_fun, + _check_resamp_noop, + _resamp_ratio_len, + _resample_stim_channels, + notch_filter, + resample, +) +from ..html_templates import _get_html_template +from ..parallel import parallel_func +from ..time_frequency.spectrum import Spectrum, SpectrumMixin, _validate_method +from ..time_frequency.tfr import RawTFR +from ..utils import ( + SizeMixin, + TimeMixin, + _arange_div, + _build_data_frame, + _check_fname, + _check_option, + _check_pandas_index_arguments, + _check_pandas_installed, + _check_preload, + _check_time_format, + _convert_times, + _file_like, + _get_argvalues, + _get_stim_channel, + _pl, + _scale_dataframe_data, + _stamp_to_dt, + _time_mask, + _validate_type, + check_fname, + copy_doc, + copy_function_doc_to_method_doc, + fill_doc, + logger, + repr_html, + sizeof_fmt, + verbose, + warn, +) +from ..viz import _RAW_CLIP_DEF, plot_raw + + +@fill_doc +class BaseRaw( + ProjMixin, + ContainsMixin, + UpdateChannelsMixin, + ReferenceMixin, + SetChannelsMixin, + InterpolationMixin, + TimeMixin, + SizeMixin, + FilterMixin, + SpectrumMixin, +): + """Base class for Raw data. + + Parameters + ---------- + %(info_not_none)s + preload : bool | str | ndarray + Preload data into memory for data manipulation and faster indexing. + If True, the data will be preloaded into memory (fast, requires + large amount of memory). If preload is a string, preload is the + file name of a memory-mapped file which is used to store the data + on the hard drive (slower, requires less memory). If preload is an + ndarray, the data are taken from that array. If False, data are not + read until save. + first_samps : iterable + Iterable of the first sample number from each raw file. For unsplit raw + files this should be a length-one list or tuple. + last_samps : iterable | None + Iterable of the last sample number from each raw file. For unsplit raw + files this should be a length-one list or tuple. If None, then preload + must be an ndarray. + filenames : tuple | None + Tuple of length one (for unsplit raw files) or length > 1 (for split + raw files). + raw_extras : list of dict + The data necessary for on-demand reads for the given reader format. + Should be the same length as ``filenames``. Will have the entry + ``raw_extras['orig_nchan']`` added to it for convenience. + orig_format : str + The data format of the original raw file (e.g., ``'double'``). + dtype : dtype | None + The dtype of the raw data. If preload is an ndarray, its dtype must + match what is passed here. + buffer_size_sec : float + The buffer size in seconds that should be written by default using + :meth:`mne.io.Raw.save`. + orig_units : dict | None + Dictionary mapping channel names to their units as specified in + the header file. Example: {'FC1': 'nV'}. + + .. versionadded:: 0.17 + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + This class is public to allow for stable type-checking in user + code (i.e., ``isinstance(my_raw_object, BaseRaw)``) but should not be used + as a constructor for `Raw` objects (use instead one of the subclass + constructors, or one of the ``mne.io.read_raw_*`` functions). + + Subclasses must provide the following methods: + + * _read_segment_file(self, data, idx, fi, start, stop, cals, mult) + (only needed for types that support on-demand disk reads) + """ + + # NOTE: If you add a new attribute to this class and get a Sphinx warning like: + # docstring of mne.io.base.BaseRaw:71: + # WARNING: py:obj reference target not found: duration [ref.obj] + # You need to add the attribute to doc/conf.py nitpick_ignore_regex. You should also + # consider adding it to the Attributes list for Raw in mne/io/fiff/raw.py. + + _extra_attributes = () + + @verbose + def __init__( + self, + info, + preload=False, + first_samps=(0,), + last_samps=None, + filenames=None, + raw_extras=(None,), + orig_format="double", + dtype=np.float64, + buffer_size_sec=1.0, + orig_units=None, + *, + verbose=None, + ): + # wait until the end to preload data, but triage here + if isinstance(preload, np.ndarray): + # some functions (e.g., filtering) only work w/64-bit data + if preload.dtype not in (np.float64, np.complex128): + raise RuntimeError( + f"datatype must be float64 or complex128, not {preload.dtype}" + ) + if preload.dtype != dtype: + raise ValueError("preload and dtype must match") + self._data = preload + self.preload = True + assert len(first_samps) == 1 + last_samps = [first_samps[0] + self._data.shape[1] - 1] + load_from_disk = False + else: + if last_samps is None: + raise ValueError( + "last_samps must be given unless preload is an ndarray" + ) + if not preload: + self.preload = False + load_from_disk = False + else: + load_from_disk = True + self._last_samps = np.array(last_samps) + self._first_samps = np.array(first_samps) + orig_ch_names = info["ch_names"] + with info._unlock(check_after=True): + # be permissive of old code + if isinstance(info["meas_date"], tuple): + info["meas_date"] = _stamp_to_dt(info["meas_date"]) + self.info = info + self.buffer_size_sec = float(buffer_size_sec) + cals = np.empty(info["nchan"]) + for k in range(info["nchan"]): + cals[k] = info["chs"][k]["range"] * info["chs"][k]["cal"] + bad = np.where(cals == 0)[0] + if len(bad) > 0: + raise ValueError( + f"Bad cals for channels {dict((ii, self.ch_names[ii]) for ii in bad)}" + ) + self._cals = cals + if raw_extras is None: + raw_extras = [None] * len(first_samps) + self._raw_extras = list(dict() if r is None else r for r in raw_extras) + for r in self._raw_extras: + r["orig_nchan"] = info["nchan"] + self._read_picks = [np.arange(info["nchan"]) for _ in range(len(raw_extras))] + # deal with compensation (only relevant for CTF data, either CTF + # reader or MNE-C converted CTF->FIF files) + self._read_comp_grade = self.compensation_grade # read property + if self._read_comp_grade is not None and len(info["comps"]): + logger.info("Current compensation grade : %d", self._read_comp_grade) + self._comp = None + if filenames is None: + filenames = [None] * len(first_samps) + self.filenames = list(filenames) + _validate_type(orig_format, str, "orig_format") + _check_option("orig_format", orig_format, ("double", "single", "int", "short")) + self.orig_format = orig_format + # Sanity check and set original units, if provided by the reader: + + if orig_units: + if not isinstance(orig_units, dict): + raise ValueError( + f"orig_units must be of type dict, but got {type(orig_units)}" + ) + + # original units need to be truncated to 15 chars or renamed + # to match MNE conventions (channel name unique and less than + # 15 characters). + orig_units = deepcopy(orig_units) + for old_ch, new_ch in zip(orig_ch_names, info["ch_names"]): + if old_ch in orig_units: + this_unit = orig_units[old_ch] + del orig_units[old_ch] + orig_units[new_ch] = this_unit + + # STI 014 channel is native only to fif ... for all other formats + # this was artificially added by the IO procedure, so remove it + ch_names = list(info["ch_names"]) + if "STI 014" in ch_names and self.filenames[0].suffix != ".fif": + ch_names.remove("STI 014") + + # Each channel in the data must have a corresponding channel in + # the original units. + ch_correspond = [ch in orig_units for ch in ch_names] + if not all(ch_correspond): + ch_without_orig_unit = ch_names[ch_correspond.index(False)] + raise ValueError( + f"Channel {ch_without_orig_unit} has no associated original unit." + ) + + # Final check of orig_units, editing a unit if it is not a valid + # unit + orig_units = _check_orig_units(orig_units) + self._orig_units = orig_units or dict() # always a dict + self._projector = None + self._dtype_ = dtype + self.set_annotations(None) + self._cropped_samp = first_samps[0] + # If we have True or a string, actually do the preloading + if load_from_disk: + self._preload_data(preload) + self._init_kwargs = _get_argvalues() + + @verbose + def apply_gradient_compensation(self, grade, verbose=None): + """Apply CTF gradient compensation. + + .. warning:: The compensation matrices are stored with single + precision, so repeatedly switching between different + of compensation (e.g., 0->1->3->2) can increase + numerical noise, especially if data are saved to + disk in between changing grades. It is thus best to + only use a single gradient compensation level in + final analyses. + + Parameters + ---------- + grade : int + CTF gradient compensation level. + %(verbose)s + + Returns + ------- + raw : instance of Raw + The modified Raw instance. Works in-place. + """ + grade = int(grade) + current_comp = self.compensation_grade + if current_comp != grade: + if self.proj: + raise RuntimeError( + "Cannot change compensation on data where projectors have been " + "applied." + ) + # Figure out what operator to use (varies depending on preload) + from_comp = current_comp if self.preload else self._read_comp_grade + comp = make_compensator(self.info, from_comp, grade) + logger.info( + "Compensator constructed to change %d -> %d", current_comp, grade + ) + set_current_comp(self.info, grade) + # We might need to apply it to our data now + if self.preload: + logger.info("Applying compensator to loaded data") + lims = np.concatenate( + [np.arange(0, len(self.times), 10000), [len(self.times)]] + ) + for start, stop in zip(lims[:-1], lims[1:]): + self._data[:, start:stop] = np.dot(comp, self._data[:, start:stop]) + else: + self._comp = comp # store it for later use + return self + + @property + def _dtype(self): + """Datatype for loading data (property so subclasses can override).""" + # most classes only store real data, they won't need anything special + return self._dtype_ + + @verbose + def _read_segment( + self, start=0, stop=None, sel=None, data_buffer=None, *, verbose=None + ): + """Read a chunk of raw data. + + Parameters + ---------- + start : int, (optional) + first sample to include (first is 0). If omitted, defaults to the + first sample in data. + stop : int, (optional) + First sample to not include. + If omitted, data is included to the end. + sel : array, optional + Indices of channels to select. + data_buffer : array or str, optional + numpy array to fill with data read, must have the correct shape. + If str, a np.memmap with the correct data type will be used + to store the data. + projector : array + SSP operator to apply to the data. + %(verbose)s + + Returns + ------- + data : array, [channels x samples] + the data matrix (channels x samples). + """ + # Initial checks + start = int(start) + stop = self.n_times if stop is None else min([int(stop), self.n_times]) + + if start >= stop: + raise ValueError("No data in this range") + + # Initialize the data and calibration vector + if sel is None: + n_out = self.info["nchan"] + idx = slice(None) + else: + n_out = len(sel) + idx = _convert_slice(sel) + del sel + assert n_out <= self.info["nchan"] + data_shape = (n_out, stop - start) + dtype = self._dtype + if isinstance(data_buffer, np.ndarray): + if data_buffer.shape != data_shape: + raise ValueError( + f"data_buffer has incorrect shape: " + f"{data_buffer.shape} != {data_shape}" + ) + data = data_buffer + else: + data = _allocate_data(data_buffer, data_shape, dtype) + + # deal with having multiple files accessed by the raw object + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, dtype="int"))) + cumul_lens = np.cumsum(cumul_lens) + files_used = np.logical_and( + np.less(start, cumul_lens[1:]), np.greater_equal(stop - 1, cumul_lens[:-1]) + ) + + # set up cals and mult (cals, compensation, and projector) + n_out = len(np.arange(len(self.ch_names))[idx]) + cals = self._cals.ravel() + projector, comp = self._projector, self._comp + if comp is not None: + mult = comp + if projector is not None: + mult = projector @ mult + else: + mult = projector + del projector, comp + + if mult is None: + cals = cals[idx, np.newaxis] + assert cals.shape == (n_out, 1) + need_idx = idx # sufficient just to read the given channels + else: + mult = mult[idx] * cals + cals = None # shouldn't be used + assert mult.shape == (n_out, len(self.ch_names)) + # read all necessary for proj + need_idx = np.where(np.any(mult, axis=0))[0] + mult = mult[:, need_idx] + logger.debug( + f"Reading {len(need_idx)}/{len(self.ch_names)} channels " + f"due to projection" + ) + assert (mult is None) ^ (cals is None) # xor + + # read from necessary files + offset = 0 + for fi in np.nonzero(files_used)[0]: + start_file = self._first_samps[fi] + # first iteration (only) could start in the middle somewhere + if offset == 0: + start_file += start - cumul_lens[fi] + stop_file = np.min( + [ + stop - cumul_lens[fi] + self._first_samps[fi], + self._last_samps[fi] + 1, + ] + ) + if start_file < self._first_samps[fi] or stop_file < start_file: + raise ValueError("Bad array indexing, could be a bug") + n_read = stop_file - start_file + this_sl = slice(offset, offset + n_read) + # reindex back to original file + orig_idx = _convert_slice(self._read_picks[fi][need_idx]) + _ReadSegmentFileProtector(self)._read_segment_file( + data[:, this_sl], + orig_idx, + fi, + int(start_file), + int(stop_file), + cals, + mult, + ) + offset += n_read + return data + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + Only needs to be implemented for readers that support + ``preload=False``. Any implementation should only make use of: + + - self._raw_extras[fi] + - self.filenames[fi] + + So be sure to store any information necessary for reading raw data + in self._raw_extras[fi]. Things like ``info`` can be decoupled + from the original data (e.g., different subsets of channels) due + to picking before preload, for example. + + Parameters + ---------- + data : ndarray, shape (n_out, stop - start + 1) + The data array. Should be modified inplace. + idx : ndarray | slice + The requested channel indices. + fi : int + The file index that must be read from. + start : int + The start sample in the given file. + stop : int + The stop sample in the given file (inclusive). + cals : ndarray, shape (len(idx), 1) + Channel calibrations (already sub-indexed). + mult : ndarray, shape (n_out, len(idx) | None + The compensation + projection + cals matrix, if applicable. + """ + raise NotImplementedError + + def _check_bad_segment( + self, start, stop, picks, reject_start, reject_stop, reject_by_annotation=False + ): + """Check if data segment is bad. + + If the slice is good, returns the data in desired range. + If rejected based on annotation, returns description of the + bad segment as a string. + + Parameters + ---------- + start : int + First sample of the slice. + stop : int + End of the slice. + picks : array of int + Channel picks. + reject_start : int + First sample to check for overlaps with bad annotations. + reject_stop : int + Last sample to check for overlaps with bad annotations. + reject_by_annotation : bool + Whether to perform rejection based on annotations. + False by default. + + Returns + ------- + data : array | str + Data in the desired range (good segment) or description of the bad + segment. + """ + if start < 0: + return None + if reject_by_annotation and len(self.annotations) > 0: + annot = self.annotations + sfreq = self.info["sfreq"] + onset = _sync_onset(self, annot.onset) + overlaps = np.where(onset < reject_stop / sfreq) + overlaps = np.where( + onset[overlaps] + annot.duration[overlaps] > reject_start / sfreq + ) + for descr in annot.description[overlaps]: + if descr.lower().startswith("bad"): + return descr + return self._getitem((picks, slice(start, stop)), return_times=False) + + @verbose + def load_data(self, verbose=None): + """Load raw data. + + Parameters + ---------- + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw object with data. + + Notes + ----- + This function will load raw data if it was not already preloaded. + If data were already preloaded, it will do nothing. + + .. versionadded:: 0.10.0 + """ + if not self.preload: + self._preload_data(True) + return self + + def _preload_data(self, preload): + """Actually preload the data.""" + data_buffer = preload + if isinstance(preload, bool | np.bool_) and not preload: + data_buffer = None + t = self.times + logger.info( + f"Reading 0 ... {len(t) - 1} = {0.0:9.3f} ... {t[-1]:9.3f} secs..." + ) + self._data = self._read_segment(data_buffer=data_buffer) + assert len(self._data) == self.info["nchan"] + self.preload = True + self._comp = None # no longer needed + self.close() + + @property + def _first_time(self): + return self.first_samp / float(self.info["sfreq"]) + + @property + def first_samp(self): + """The first data sample. + + See :term:`first_samp`. + """ + return self._cropped_samp + + @property + def first_time(self): + """The first time point (including first_samp but not meas_date).""" + return self._first_time + + @property + def last_samp(self): + """The last data sample.""" + return self.first_samp + sum(self._raw_lengths) - 1 + + @property + def _last_time(self): + return self.last_samp / float(self.info["sfreq"]) + + def time_as_index(self, times, use_rounding=False, origin=None): + """Convert time to indices. + + Parameters + ---------- + times : list-like | float | int + List of numbers or a number representing points in time. + use_rounding : bool + If True, use rounding (instead of truncation) when converting + times to indices. This can help avoid non-unique indices. + origin : datetime | float | int | None + Time reference for times. If None, ``times`` are assumed to be + relative to :term:`first_samp`. + + .. versionadded:: 0.17.0 + + Returns + ------- + index : ndarray + Indices relative to :term:`first_samp` corresponding to the times + supplied. + """ + origin = _handle_meas_date(origin) + if origin is None: + delta = 0 + elif self.info["meas_date"] is None: + raise ValueError( + f'origin must be None when info["meas_date"] is None, got {origin}' + ) + else: + first_samp_in_abs_time = self.info["meas_date"] + timedelta( + 0, self._first_time + ) + delta = (origin - first_samp_in_abs_time).total_seconds() + times = np.atleast_1d(times) + delta + + return super().time_as_index(times, use_rounding) + + @property + def _raw_lengths(self): + return [ + last - first + 1 for first, last in zip(self._first_samps, self._last_samps) + ] + + @property + def annotations(self): # noqa: D401 + """:class:`~mne.Annotations` for marking segments of data.""" + return self._annotations + + @property + def filenames(self) -> tuple[Path | None, ...]: + """The filenames used. + + :type: :class:`tuple` of :class:`pathlib.Path` | ``None`` + """ + return tuple(self._filenames) + + @filenames.setter + def filenames(self, value): + """The filenames used, cast to list of paths.""" # noqa: D401 + _validate_type(value, (list, tuple), "filenames") + if isinstance(value, tuple): + value = list(value) + for k, elt in enumerate(value): + if elt is not None: + value[k] = _check_fname(elt, overwrite="read", must_exist=False) + if not value[k].exists(): + # check existence separately from _check_fname since some + # fileformats use directories instead of files and '_check_fname' + # does not handle it correctly. + raise FileNotFoundError(f"File {value[k]} not found.") + self._filenames = list(value) + + @verbose + def set_annotations( + self, annotations, emit_warning=True, on_missing="raise", *, verbose=None + ): + """Setter for annotations. + + This setter checks if they are inside the data range. + + Parameters + ---------- + annotations : instance of mne.Annotations | None + Annotations to set. If None, the annotations is defined + but empty. + %(emit_warning)s + The default is True. + %(on_missing_ch_names)s + %(verbose)s + + Returns + ------- + self : instance of Raw + The raw object with annotations. + """ + meas_date = _handle_meas_date(self.info["meas_date"]) + if annotations is None: + self._annotations = Annotations([], [], [], meas_date) + else: + _validate_type(annotations, Annotations, "annotations") + + if meas_date is None and annotations.orig_time is not None: + raise RuntimeError( + "Ambiguous operation. Setting an Annotation object with known " + "``orig_time`` to a raw object which has ``meas_date`` set to None " + "is ambiguous. Please, either set a meaningful ``meas_date`` to " + "the raw object; or set ``orig_time`` to None in which case the " + "annotation onsets would be taken in reference to the first sample " + "of the raw object." + ) + + delta = 1.0 / self.info["sfreq"] + new_annotations = annotations.copy() + new_annotations._prune_ch_names(self.info, on_missing) + if annotations.orig_time is None: + new_annotations.crop( + 0, self.times[-1] + delta, emit_warning=emit_warning + ) + new_annotations.onset += self._first_time + else: + tmin = meas_date + timedelta(0, self._first_time) + tmax = tmin + timedelta(seconds=self.times[-1] + delta) + new_annotations.crop(tmin=tmin, tmax=tmax, emit_warning=emit_warning) + new_annotations.onset -= ( + meas_date - new_annotations.orig_time + ).total_seconds() + new_annotations._orig_time = meas_date + + self._annotations = new_annotations + + return self + + def __del__(self): # noqa: D105 + # remove file for memmap + if hasattr(self, "_data") and getattr(self._data, "filename", None) is not None: + # First, close the file out; happens automatically on del + filename = self._data.filename + del self._data + # Now file can be removed + try: + os.remove(filename) + except OSError: + pass # ignore file that no longer exists + + def __enter__(self): + """Entering with block.""" + return self + + def __exit__(self, exception_type, exception_val, trace): + """Exit with block.""" + try: + self.close() + except Exception: + return exception_type, exception_val, trace + + def _parse_get_set_params(self, item): + """Parse the __getitem__ / __setitem__ tuples.""" + # make sure item is a tuple + if not isinstance(item, tuple): # only channel selection passed + item = (item, slice(None, None, None)) + + if len(item) != 2: # should be channels and time instants + raise RuntimeError( + "Unable to access raw data (need both channels and time)" + ) + + sel = _picks_to_idx(self.info, item[0]) + + if isinstance(item[1], slice): + time_slice = item[1] + start, stop, step = (time_slice.start, time_slice.stop, time_slice.step) + else: + item1 = item[1] + # Let's do automated type conversion to integer here + if np.array(item[1]).dtype.kind == "i": + item1 = int(item1) + if isinstance(item1, int | np.integer): + start, stop, step = item1, item1 + 1, 1 + # Need to special case -1, because -1:0 will be empty + if start == -1: + stop = None + else: + raise ValueError("Must pass int or slice to __getitem__") + + if start is None: + start = 0 + if step is not None and step != 1: + raise ValueError(f"step needs to be 1 : {step} given") + + if isinstance(sel, int | np.integer): + sel = np.array([sel]) + + if sel is not None and len(sel) == 0: + raise ValueError("Empty channel list") + + return sel, start, stop + + def __getitem__(self, item): + """Get raw data and times. + + Parameters + ---------- + item : tuple or array-like + See below for use cases. + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + The raw data. + times : ndarray, shape (n_times,) + The times associated with the data. + + Examples + -------- + Generally raw data is accessed as:: + + >>> data, times = raw[picks, time_slice] # doctest: +SKIP + + To get all data, you can thus do either of:: + + >>> data, times = raw[:] # doctest: +SKIP + + Which will be equivalent to: + + >>> data, times = raw[:, :] # doctest: +SKIP + + To get only the good MEG data from 10-20 seconds, you could do:: + + >>> picks = mne.pick_types(raw.info, meg=True, exclude='bads') # doctest: +SKIP + >>> t_idx = raw.time_as_index([10., 20.]) # doctest: +SKIP + >>> data, times = raw[picks, t_idx[0]:t_idx[1]] # doctest: +SKIP + + """ # noqa: E501 + return self._getitem(item) + + def _getitem(self, item, return_times=True): + sel, start, stop = self._parse_get_set_params(item) + if self.preload: + data = self._data[sel, start:stop] + else: + data = self._read_segment(start=start, stop=stop, sel=sel) + + if return_times: + # Rather than compute the entire thing just compute the subset + # times = self.times[start:stop] + # stop can be None here so don't use it directly + times = np.arange(start, start + data.shape[1], dtype=float) + times /= self.info["sfreq"] + return data, times + else: + return data + + def __setitem__(self, item, value): + """Set raw data content.""" + _check_preload(self, "Modifying data of Raw") + sel, start, stop = self._parse_get_set_params(item) + # set the data + self._data[sel, start:stop] = value + + @verbose + def get_data( + self, + picks=None, + start=0, + stop=None, + reject_by_annotation=None, + return_times=False, + units=None, + *, + tmin=None, + tmax=None, + verbose=None, + ): + """Get data in the given range. + + Parameters + ---------- + %(picks_all)s + start : int + The first sample to include. Defaults to 0. + stop : int | None + End sample (first not to include). If None (default), the end of + the data is used. + reject_by_annotation : None | 'omit' | 'NaN' + Whether to reject by annotation. If None (default), no rejection is + done. If 'omit', segments annotated with description starting with + 'bad' are omitted. If 'NaN', the bad samples are filled with NaNs. + return_times : bool + Whether to return times as well. Defaults to False. + %(units)s + tmin : int | float | None + Start time of data to get in seconds. The ``tmin`` parameter is + ignored if the ``start`` parameter is bigger than 0. + + .. versionadded:: 0.24.0 + tmax : int | float | None + End time of data to get in seconds. The ``tmax`` parameter is + ignored if the ``stop`` parameter is defined. + + .. versionadded:: 0.24.0 + %(verbose)s + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + Copy of the data in the given range. + times : ndarray, shape (n_times,) + Times associated with the data samples. Only returned if + return_times=True. + + Notes + ----- + .. versionadded:: 0.14.0 + """ + # validate types + _validate_type(start, types=("int-like"), item_name="start", type_name="int") + _validate_type( + stop, types=("int-like", None), item_name="stop", type_name="int, None" + ) + + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + + # Get channel factors for conversion into specified unit + # (vector of ones if no conversion needed) + if units is not None: + ch_factors = _get_ch_factors(self, units, picks) + + # convert to ints + picks = np.atleast_1d(np.arange(self.info["nchan"])[picks]) + + # handle start/tmin stop/tmax + tmin_start, tmax_stop = self._handle_tmin_tmax(tmin, tmax) + + # tmin/tmax are ignored if start/stop are defined to + # something other than their defaults + start = tmin_start if start == 0 else start + stop = tmax_stop if stop is None else stop + + # truncate start/stop to the open interval [0, n_times] + start = min(max(0, start), self.n_times) + stop = min(max(0, stop), self.n_times) + + if len(self.annotations) == 0 or reject_by_annotation is None: + getitem = self._getitem( + (picks, slice(start, stop)), return_times=return_times + ) + if return_times: + data, times = getitem + if units is not None: + data *= ch_factors[:, np.newaxis] + return data, times + if units is not None: + getitem *= ch_factors[:, np.newaxis] + return getitem + _check_option( + "reject_by_annotation", reject_by_annotation.lower(), ["omit", "nan"] + ) + onsets, ends = _annotations_starts_stops(self, ["BAD"]) + keep = (onsets < stop) & (ends > start) + onsets = np.maximum(onsets[keep], start) + ends = np.minimum(ends[keep], stop) + if len(onsets) == 0: + data, times = self[picks, start:stop] + if units is not None: + data *= ch_factors[:, np.newaxis] + if return_times: + return data, times + return data + n_samples = stop - start # total number of samples + used = np.ones(n_samples, bool) + for onset, end in zip(onsets, ends): + if onset >= end: + continue + used[onset - start : end - start] = False + used = np.concatenate([[False], used, [False]]) + starts = np.where(~used[:-1] & used[1:])[0] + start + stops = np.where(used[:-1] & ~used[1:])[0] + start + n_kept = (stops - starts).sum() # kept samples + n_rejected = n_samples - n_kept # rejected samples + if n_rejected > 0: + if reject_by_annotation == "omit": + msg = ( + "Omitting {} of {} ({:.2%}) samples, retaining {}" + " ({:.2%}) samples." + ) + logger.info( + msg.format( + n_rejected, + n_samples, + n_rejected / n_samples, + n_kept, + n_kept / n_samples, + ) + ) + data = np.zeros((len(picks), n_kept)) + times = np.zeros(data.shape[1]) + idx = 0 + for start, stop in zip(starts, stops): # get the data + if start == stop: + continue + end = idx + stop - start + data[:, idx:end], times[idx:end] = self[picks, start:stop] + idx = end + else: + msg = ( + "Setting {} of {} ({:.2%}) samples to NaN, retaining {}" + " ({:.2%}) samples." + ) + logger.info( + msg.format( + n_rejected, + n_samples, + n_rejected / n_samples, + n_kept, + n_kept / n_samples, + ) + ) + data, times = self[picks, start:stop] + data[:, ~used[1:-1]] = np.nan + else: + data, times = self[picks, start:stop] + + if units is not None: + data *= ch_factors[:, np.newaxis] + if return_times: + return data, times + return data + + @verbose + def apply_function( + self, + fun, + picks=None, + dtype=None, + n_jobs=None, + channel_wise=True, + verbose=None, + **kwargs, + ): + """Apply a function to a subset of channels. + + %(applyfun_summary_raw)s + + Parameters + ---------- + %(fun_applyfun)s + %(picks_all_data_noref)s + %(dtype_applyfun)s + %(n_jobs)s Ignored if ``channel_wise=False`` as the workload + is split across channels. + %(channel_wise_applyfun)s + + .. versionadded:: 0.18 + %(verbose)s + %(kwargs_fun)s + + Returns + ------- + self : instance of Raw + The raw object with transformed data. + """ + _check_preload(self, "raw.apply_function") + picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False) + + if not callable(fun): + raise ValueError("fun needs to be a function") + + data_in = self._data + if dtype is not None and dtype != self._data.dtype: + self._data = self._data.astype(dtype) + + args = getfullargspec(fun).args + getfullargspec(fun).kwonlyargs + if channel_wise is False: + if ("ch_idx" in args) or ("ch_name" in args): + raise ValueError( + "apply_function cannot access ch_idx or ch_name " + "when channel_wise=False" + ) + if "ch_idx" in args: + logger.info("apply_function requested to access ch_idx") + if "ch_name" in args: + logger.info("apply_function requested to access ch_name") + + if channel_wise: + parallel, p_fun, n_jobs = parallel_func(_check_fun, n_jobs) + if n_jobs == 1: + # modify data inplace to save memory + for ch_idx in picks: + if "ch_idx" in args: + kwargs.update(ch_idx=ch_idx) + if "ch_name" in args: + kwargs.update(ch_name=self.info["ch_names"][ch_idx]) + self._data[ch_idx, :] = _check_fun( + fun, data_in[ch_idx, :], **kwargs + ) + else: + # use parallel function + data_picks_new = parallel( + p_fun( + fun, + data_in[ch_idx], + **kwargs, + **{ + k: v + for k, v in [ + ("ch_name", self.info["ch_names"][ch_idx]), + ("ch_idx", ch_idx), + ] + if k in args + }, + ) + for ch_idx in picks + ) + for run_idx, ch_idx in enumerate(picks): + self._data[ch_idx, :] = data_picks_new[run_idx] + else: + self._data[picks, :] = _check_fun(fun, data_in[picks, :], **kwargs) + + return self + + # Need a separate method because the default pad is different for raw + @copy_doc(FilterMixin.filter) + def filter( + self, + l_freq, + h_freq, + picks=None, + filter_length="auto", + l_trans_bandwidth="auto", + h_trans_bandwidth="auto", + n_jobs=None, + method="fir", + iir_params=None, + phase="zero", + fir_window="hamming", + fir_design="firwin", + skip_by_annotation=("edge", "bad_acq_skip"), + pad="reflect_limited", + verbose=None, + ): + return super().filter( + l_freq, + h_freq, + picks, + filter_length, + l_trans_bandwidth, + h_trans_bandwidth, + n_jobs=n_jobs, + method=method, + iir_params=iir_params, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + skip_by_annotation=skip_by_annotation, + pad=pad, + verbose=verbose, + ) + + @verbose + def notch_filter( + self, + freqs, + picks=None, + filter_length="auto", + notch_widths=None, + trans_bandwidth=1.0, + n_jobs=None, + method="fir", + iir_params=None, + mt_bandwidth=None, + p_value=0.05, + phase="zero", + fir_window="hamming", + fir_design="firwin", + pad="reflect_limited", + skip_by_annotation=("edge", "bad_acq_skip"), + verbose=None, + ): + """Notch filter a subset of channels. + + Parameters + ---------- + freqs : float | array of float | None + Specific frequencies to filter out from data, e.g., + ``np.arange(60, 241, 60)`` in the US or ``np.arange(50, 251, 50)`` + in Europe. ``None`` can only be used with the mode + ``'spectrum_fit'``, where an F test is used to find sinusoidal + components. + %(picks_all_data)s + %(filter_length_notch)s + notch_widths : float | array of float | None + Width of each stop band (centred at each freq in freqs) in Hz. + If None, ``freqs / 200`` is used. + trans_bandwidth : float + Width of the transition band in Hz. + Only used for ``method='fir'`` and ``method='iir'``. + %(n_jobs_fir)s + %(method_fir)s + %(iir_params)s + mt_bandwidth : float | None + The bandwidth of the multitaper windowing function in Hz. + Only used in 'spectrum_fit' mode. + p_value : float + P-value to use in F-test thresholding to determine significant + sinusoidal components to remove when ``method='spectrum_fit'`` and + ``freqs=None``. Note that this will be Bonferroni corrected for the + number of frequencies, so large p-values may be justified. + %(phase)s + %(fir_window)s + %(fir_design)s + %(pad_fir)s + The default is ``'reflect_limited'``. + + .. versionadded:: 0.15 + %(skip_by_annotation)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + The raw instance with filtered data. + + See Also + -------- + mne.filter.notch_filter + mne.io.Raw.filter + + Notes + ----- + Applies a zero-phase notch filter to the channels selected by + "picks". By default the data of the Raw object is modified inplace. + + The Raw object has to have the data loaded e.g. with ``preload=True`` + or ``self.load_data()``. + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporarily stored in memory. + + For details, see :func:`mne.filter.notch_filter`. + """ + fs = float(self.info["sfreq"]) + picks = _picks_to_idx(self.info, picks, exclude=(), none="data_or_ica") + _check_preload(self, "raw.notch_filter") + onsets, ends = _annotations_starts_stops(self, skip_by_annotation, invert=True) + logger.info( + "Filtering raw data in %d contiguous segment%s", len(onsets), _pl(onsets) + ) + for si, (start, stop) in enumerate(zip(onsets, ends)): + notch_filter( + self._data[:, start:stop], + fs, + freqs, + filter_length=filter_length, + notch_widths=notch_widths, + trans_bandwidth=trans_bandwidth, + method=method, + iir_params=iir_params, + mt_bandwidth=mt_bandwidth, + p_value=p_value, + picks=picks, + n_jobs=n_jobs, + copy=False, + phase=phase, + fir_window=fir_window, + fir_design=fir_design, + pad=pad, + ) + return self + + @verbose + def resample( + self, + sfreq, + *, + npad="auto", + window="auto", + stim_picks=None, + n_jobs=None, + events=None, + pad="auto", + method="fft", + verbose=None, + ): + """Resample all channels. + + If appropriate, an anti-aliasing filter is applied before resampling. + See :ref:`resampling-and-decimating` for more information. + + .. warning:: The intended purpose of this function is primarily to + speed up computations (e.g., projection calculation) when + precise timing of events is not required, as downsampling + raw data effectively jitters trigger timings. It is + generally recommended not to epoch downsampled data, + but instead epoch and then downsample, as epoching + downsampled data jitters triggers. + For more, see + `this illustrative gist + `_. + + If resampling the continuous data is desired, it is + recommended to construct events using the original data. + The event onsets can be jointly resampled with the raw + data using the 'events' parameter (a resampled copy is + returned). + + Parameters + ---------- + sfreq : float + New sample rate to use. + %(npad_resample)s + %(window_resample)s + stim_picks : list of int | None + Stim channels. These channels are simply subsampled or + supersampled (without applying any filtering). This reduces + resampling artifacts in stim channels, but may lead to missing + triggers. If None, stim channels are automatically chosen using + :func:`mne.pick_types`. + %(n_jobs_cuda)s + events : 2D array, shape (n_events, 3) | None + An optional event matrix. When specified, the onsets of the events + are resampled jointly with the data. NB: The input events are not + modified, but a new array is returned with the raw instead. + %(pad_resample_auto)s + + .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 + %(verbose)s + + Returns + ------- + raw : instance of Raw + The resampled version of the raw object. + events : array, shape (n_events, 3) | None + If events are jointly resampled, these are returned with the raw. + + See Also + -------- + mne.io.Raw.filter + mne.Epochs.resample + + Notes + ----- + For some data, it may be more accurate to use ``npad=0`` to reduce + artifacts. This is dataset dependent -- check your data! + + For optimum performance and to make use of ``n_jobs > 1``, the raw + object has to have the data loaded e.g. with ``preload=True`` or + ``self.load_data()``, but this increases memory requirements. The + resulting raw object will have the data loaded into memory. + """ + sfreq = float(sfreq) + o_sfreq = float(self.info["sfreq"]) + if _check_resamp_noop(sfreq, o_sfreq): + return self + + # When no event object is supplied, some basic detection of dropped + # events is performed to generate a warning. Finding events can fail + # for a variety of reasons, e.g. if no stim channel is present or it is + # corrupted. This should not stop the resampling from working. The + # warning should simply not be generated in this case. + if events is None: + try: + original_events = find_events(self) + except Exception: + pass + + offsets = np.concatenate(([0], np.cumsum(self._raw_lengths))) + + # set up stim channel processing + if stim_picks is None: + stim_picks = pick_types( + self.info, meg=False, ref_meg=False, stim=True, exclude=[] + ) + else: + stim_picks = _picks_to_idx( + self.info, stim_picks, exclude=(), with_ref_meg=False + ) + + kwargs = dict( + up=sfreq, + down=o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, + ) + ratio, n_news = zip( + *( + _resamp_ratio_len(sfreq, o_sfreq, old_len) + for old_len in self._raw_lengths + ) + ) + ratio, n_news = ratio[0], np.array(n_news, int) + new_offsets = np.cumsum([0] + list(n_news)) + if self.preload: + new_data = np.empty((len(self.ch_names), new_offsets[-1]), self._data.dtype) + for ri, (n_orig, n_new) in enumerate(zip(self._raw_lengths, n_news)): + this_sl = slice(new_offsets[ri], new_offsets[ri + 1]) + if self.preload: + data_chunk = self._data[:, offsets[ri] : offsets[ri + 1]] + new_data[:, this_sl] = resample(data_chunk, **kwargs) + # In empirical testing, it was faster to resample all channels + # (above) and then replace the stim channels than it was to + # only resample the proper subset of channels and then use + # np.insert() to restore the stims. + if len(stim_picks) > 0: + new_data[stim_picks, this_sl] = _resample_stim_channels( + data_chunk[stim_picks], n_new, data_chunk.shape[1] + ) + else: # this will not be I/O efficient, but will be mem efficient + for ci in range(len(self.ch_names)): + data_chunk = self.get_data( + ci, offsets[ri], offsets[ri + 1], verbose="error" + )[0] + if ci == 0 and ri == 0: + new_data = np.empty( + (len(self.ch_names), new_offsets[-1]), data_chunk.dtype + ) + if ci in stim_picks: + resamp = _resample_stim_channels( + data_chunk, n_new, data_chunk.shape[-1] + )[0] + else: + resamp = resample(data_chunk, **kwargs) + new_data[ci, this_sl] = resamp + + self._cropped_samp = int(np.round(self._cropped_samp * ratio)) + self._first_samps = np.round(self._first_samps * ratio).astype(int) + self._last_samps = np.array(self._first_samps) + n_news - 1 + self._raw_lengths[ri] = list(n_news) + assert np.array_equal(n_news, self._last_samps - self._first_samps + 1) + self._data = new_data + self.preload = True + lowpass = self.info.get("lowpass") + lowpass = np.inf if lowpass is None else lowpass + with self.info._unlock(): + self.info["lowpass"] = min(lowpass, sfreq / 2.0) + self.info["sfreq"] = sfreq + + # See the comment above why we ignore all errors here. + if events is None: + try: + # Did we loose events? + resampled_events = find_events(self) + if len(resampled_events) != len(original_events): + warn( + "Resampling of the stim channels caused event " + "information to become unreliable. Consider finding " + "events on the original data and passing the event " + "matrix as a parameter." + ) + except Exception: + pass + + return self + else: + # always make a copy of events + events = events.copy() + + events[:, 0] = np.minimum( + np.round(events[:, 0] * ratio).astype(int), + self._data.shape[1] + self.first_samp - 1, + ) + return self, events + + @verbose + def rescale(self, scalings, *, verbose=None): + """Rescale channels. + + .. warning:: + MNE-Python assumes data are stored in SI base units. This function should + typically only be used to fix an incorrect scaling factor in the data to get + it to be in SI base units, otherwise unintended problems (e.g., incorrect + source imaging results) and analysis errors can occur. + + Parameters + ---------- + scalings : int | float | dict + The scaling factor(s) by which to multiply the data. If a float, the same + scaling factor is applied to all channels (this works only if all channels + are of the same type). If a dict, the keys must be valid channel types and + the values the scaling factors to apply to the corresponding channels. + %(verbose)s + + Returns + ------- + raw : Raw + The raw object with rescaled data (modified in-place). + + Examples + -------- + A common use case for EEG data is to convert from µV to V, since many EEG + systems store data in µV, but MNE-Python expects the data to be in V. Therefore, + the data needs to be rescaled by a factor of 1e-6. To rescale all channels from + µV to V, you can do:: + + >>> raw.rescale(1e-6) # doctest: +SKIP + + Note that the previous example only works if all channels are of the same type. + If there are multiple channel types, you can pass a dict with the individual + scaling factors. For example, to rescale only EEG channels, you can do:: + + >>> raw.rescale({"eeg": 1e-6}) # doctest: +SKIP + """ + _validate_type(scalings, (int, float, dict), "scalings") + _check_preload(self, "raw.rescale") + + channel_types = self.get_channel_types(unique=True) + + if isinstance(scalings, int | float): + if len(channel_types) == 1: + self.apply_function(lambda x: x * scalings, channel_wise=False) + else: + raise ValueError( + "If scalings is a scalar, all channels must be of the same type. " + "Consider passing a dict instead." + ) + else: + for ch_type in scalings.keys(): + if ch_type not in channel_types: + raise ValueError( + f'Channel type "{ch_type}" is not present in the Raw file.' + ) + for ch_type, ch_scale in scalings.items(): + self.apply_function( + lambda x: x * ch_scale, picks=ch_type, channel_wise=False + ) + + return self + + @verbose + def crop(self, tmin=0.0, tmax=None, include_tmax=True, *, verbose=None): + """Crop raw data file. + + Limit the data from the raw file to go between specific times. Note + that the new ``tmin`` is assumed to be ``t=0`` for all subsequently + called functions (e.g., :meth:`~mne.io.Raw.time_as_index`, or + :class:`~mne.Epochs`). New :term:`first_samp` and :term:`last_samp` + are set accordingly. + + Thus function operates in-place on the instance. + Use :meth:`mne.io.Raw.copy` if operation on a copy is desired. + + Parameters + ---------- + %(tmin_raw)s + %(tmax_raw)s + %(include_tmax)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + The cropped raw object, modified in-place. + """ + max_time = (self.n_times - 1) / self.info["sfreq"] + if tmax is None: + tmax = max_time + + if tmin > tmax: + raise ValueError(f"tmin ({tmin}) must be less than tmax ({tmax})") + if tmin < 0.0: + raise ValueError(f"tmin ({tmin}) must be >= 0") + elif tmax - int(not include_tmax) / self.info["sfreq"] > max_time: + raise ValueError( + f"tmax ({tmax}) must be less than or equal to the max " + f"time ({max_time:0.4f} s)" + ) + + smin, smax = np.where( + _time_mask( + self.times, + tmin, + tmax, + sfreq=self.info["sfreq"], + include_tmax=include_tmax, + ) + )[0][[0, -1]] + cumul_lens = np.concatenate(([0], np.array(self._raw_lengths, dtype="int"))) + cumul_lens = np.cumsum(cumul_lens) + keepers = np.logical_and( + np.less(smin, cumul_lens[1:]), np.greater_equal(smax, cumul_lens[:-1]) + ) + keepers = np.where(keepers)[0] + # if we drop file(s) from the beginning, we need to keep track of + # how many samples we dropped relative to that one + self._cropped_samp += smin + self._first_samps = np.atleast_1d(self._first_samps[keepers]) + # Adjust first_samp of first used file! + self._first_samps[0] += smin - cumul_lens[keepers[0]] + self._last_samps = np.atleast_1d(self._last_samps[keepers]) + self._last_samps[-1] -= cumul_lens[keepers[-1] + 1] - 1 - smax + self._read_picks = [self._read_picks[ri] for ri in keepers] + assert all(len(r) == len(self._read_picks[0]) for r in self._read_picks) + self._raw_extras = [self._raw_extras[ri] for ri in keepers] + self.filenames = [self.filenames[ri] for ri in keepers] + if self.preload: + # slice and copy to avoid the reference to large array + self._data = self._data[:, smin : smax + 1].copy() + + annotations = self.annotations + # now call setter to filter out annotations outside of interval + if annotations.orig_time is None: + assert self.info["meas_date"] is None + # When self.info['meas_date'] is None (which is guaranteed if + # self.annotations.orig_time is None), when we do the + # self.set_annotations, it's assumed that the annotations onset + # are relative to first_time, so we have to subtract it, then + # set_annotations will put it back. + annotations.onset -= self.first_time + self.set_annotations(annotations, False) + + return self + + @verbose + def crop_by_annotations(self, annotations=None, *, verbose=None): + """Get crops of raw data file for selected annotations. + + Parameters + ---------- + annotations : instance of Annotations | None + The annotations to use for cropping the raw file. If None, + the annotations from the instance are used. + %(verbose)s + + Returns + ------- + raws : list + The cropped raw objects. + """ + if annotations is None: + annotations = self.annotations + + raws = [] + for annot in annotations: + onset = annot["onset"] - self.first_time + # be careful about near-zero errors (crop is very picky about this, + # e.g., -1e-8 is an error) + if -self.info["sfreq"] / 2 < onset < 0: + onset = 0 + raw_crop = self.copy().crop(onset, onset + annot["duration"]) + raws.append(raw_crop) + + return raws + + @verbose + def save( + self, + fname, + picks=None, + tmin=0, + tmax=None, + buffer_size_sec=None, + drop_small_buffer=False, + proj=False, + fmt="single", + overwrite=False, + split_size="2GB", + split_naming="neuromag", + verbose=None, + ): + """Save raw data to file. + + Parameters + ---------- + fname : path-like + File name of the new dataset. This has to be a new filename + unless data have been preloaded. Filenames should end with + ``raw.fif`` (common raw data), ``raw_sss.fif`` + (Maxwell-filtered continuous data), + ``raw_tsss.fif`` (temporally signal-space-separated data), + ``_meg.fif`` (common MEG data), ``_eeg.fif`` (common EEG data), + or ``_ieeg.fif`` (common intracranial EEG data). You may also + append an additional ``.gz`` suffix to enable gzip compression. + %(picks_all)s + %(tmin_raw)s + %(tmax_raw)s + buffer_size_sec : float | None + Size of data chunks in seconds. If None (default), the buffer + size of the original file is used. + drop_small_buffer : bool + Drop or not the last buffer. It is required by maxfilter (SSS) + that only accepts raw files with buffers of the same size. + proj : bool + If True the data is saved with the projections applied (active). + + .. note:: If ``apply_proj()`` was used to apply the projections, + the projectons will be active even if ``proj`` is False. + fmt : 'single' | 'double' | 'int' | 'short' + Format to use to save raw data. Valid options are 'double', + 'single', 'int', and 'short' for 64- or 32-bit float, or 32- or + 16-bit integers, respectively. It is **strongly** recommended to + use 'single', as this is backward-compatible, and is standard for + maintaining precision. Note that using 'short' or 'int' may result + in loss of precision, complex data cannot be saved as 'short', + and neither complex data types nor real data stored as 'double' + can be loaded with the MNE command-line tools. See raw.orig_format + to determine the format the original data were stored in. + %(overwrite)s + To overwrite original file (the same one that was loaded), + data must be preloaded upon reading. + split_size : str | int + Large raw files are automatically split into multiple pieces. This + parameter specifies the maximum size of each piece. If the + parameter is an integer, it specifies the size in Bytes. It is + also possible to pass a human-readable string, e.g., 100MB. + + .. note:: Due to FIFF file limitations, the maximum split + size is 2GB. + %(split_naming)s + + .. versionadded:: 0.17 + %(verbose)s + + Returns + ------- + fnames : List of path-like + List of path-like objects containing the path to each file split. + .. versionadded:: 1.9 + + Notes + ----- + If Raw is a concatenation of several raw files, **be warned** that + only the measurement information from the first raw file is stored. + This likely means that certain operations with external tools may not + work properly on a saved concatenated file (e.g., probably some + or all forms of SSS). It is recommended not to concatenate and + then save raw files for this reason. + + Samples annotated ``BAD_ACQ_SKIP`` are not stored in order to optimize + memory. Whatever values, they will be loaded as 0s when reading file. + """ + endings = ( + "raw.fif", + "raw_sss.fif", + "raw_tsss.fif", + "_meg.fif", + "_eeg.fif", + "_ieeg.fif", + ) + endings += tuple([f"{e}.gz" for e in endings]) + endings_err = (".fif", ".fif.gz") + + # convert to str, check for overwrite a few lines later + fname = _check_fname( + fname, + overwrite=True, + verbose="error", + check_bids_split=True, + name="fname", + ) + check_fname(fname, "raw", endings, endings_err=endings_err) + + split_size = _get_split_size(split_size) + if not self.preload and fname in self.filenames: + raise ValueError( + "You cannot save data to the same file. Please use a different " + "filename." + ) + + if self.preload: + if np.iscomplexobj(self._data): + warn( + "Saving raw file with complex data. Loading with command-line MNE " + "tools will not work." + ) + + data_test = self[0, 0][0] + if fmt == "short" and np.iscomplexobj(data_test): + raise ValueError( + 'Complex data must be saved as "single" or "double", not "short"' + ) + + # check for file existence and expand `~` if present + fname = _check_fname(fname=fname, overwrite=overwrite, verbose="error") + + if proj: + info = deepcopy(self.info) + projector, info = setup_proj(info) + activate_proj(info["projs"], copy=False) + else: + info = self.info + projector = None + + # + # Set up the reading parameters + # + + # Convert to samples + start, stop = self._tmin_tmax_to_start_stop(tmin, tmax) + buffer_size = self._get_buffer_size(buffer_size_sec) + + # write the raw file + _validate_type(split_naming, str, "split_naming") + _check_option("split_naming", split_naming, ("neuromag", "bids")) + + cfg = _RawFidWriterCfg(buffer_size, split_size, drop_small_buffer, fmt) + raw_fid_writer = _RawFidWriter(self, info, picks, projector, start, stop, cfg) + filenames = _write_raw(raw_fid_writer, fname, split_naming, overwrite) + return filenames + + @verbose + def export( + self, + fname, + fmt="auto", + physical_range="auto", + add_ch_type=False, + *, + overwrite=False, + verbose=None, + ): + """Export Raw to external formats. + + %(export_fmt_support_raw)s + + %(export_warning)s + + Parameters + ---------- + %(fname_export_params)s + %(export_fmt_params_raw)s + %(physical_range_export_params)s + %(add_ch_type_export_params)s + %(overwrite)s + + .. versionadded:: 0.24.1 + %(verbose)s + + Notes + ----- + .. versionadded:: 0.24 + + %(export_warning_note_raw)s + %(export_eeglab_note)s + %(export_edf_note)s + """ + from ..export import export_raw + + export_raw( + fname, + self, + fmt, + physical_range=physical_range, + add_ch_type=add_ch_type, + overwrite=overwrite, + verbose=verbose, + ) + + def _tmin_tmax_to_start_stop(self, tmin, tmax): + start = int(np.floor(tmin * self.info["sfreq"])) + + # "stop" is the first sample *not* to save, so we need +1's here + if tmax is None: + stop = np.inf + else: + stop = self.time_as_index(float(tmax), use_rounding=True)[0] + 1 + stop = min(stop, self.last_samp - self.first_samp + 1) + if stop <= start or stop <= 0: + raise ValueError(f"tmin ({tmin}) and tmax ({tmax}) yielded no samples") + return start, stop + + @copy_function_doc_to_method_doc(plot_raw) + def plot( + self, + events=None, + duration=10.0, + start=0.0, + n_channels=20, + bgcolor="w", + color=None, + bad_color="lightgray", + event_color="cyan", + scalings=None, + remove_dc=True, + order=None, + show_options=False, + title=None, + show=True, + block=False, + highpass=None, + lowpass=None, + filtorder=4, + clipping=_RAW_CLIP_DEF, + show_first_samp=False, + proj=True, + group_by="type", + butterfly=False, + decim="auto", + noise_cov=None, + event_id=None, + show_scrollbars=True, + show_scalebars=True, + time_format="float", + precompute=None, + use_opengl=None, + *, + picks=None, + theme=None, + overview_mode=None, + splash=True, + verbose=None, + ): + return plot_raw( + self, + events, + duration, + start, + n_channels, + bgcolor, + color, + bad_color, + event_color, + scalings, + remove_dc, + order, + show_options, + title, + show, + block, + highpass, + lowpass, + filtorder, + clipping, + show_first_samp, + proj, + group_by, + butterfly, + decim, + noise_cov=noise_cov, + event_id=event_id, + show_scrollbars=show_scrollbars, + show_scalebars=show_scalebars, + time_format=time_format, + precompute=precompute, + use_opengl=use_opengl, + picks=picks, + theme=theme, + overview_mode=overview_mode, + splash=splash, + verbose=verbose, + ) + + @property + def ch_names(self): + """Channel names.""" + return self.info["ch_names"] + + @property + def times(self): + """Time points.""" + out = _arange_div(self.n_times, float(self.info["sfreq"])) + out.flags["WRITEABLE"] = False + return out + + @property + def n_times(self): + """Number of time points.""" + return self.last_samp - self.first_samp + 1 + + @property + def duration(self): + """Duration of the data in seconds. + + .. versionadded:: 1.9 + """ + return self.n_times / self.info["sfreq"] + + def __len__(self): + """Return the number of time points. + + Returns + ------- + len : int + The number of time points. + + Examples + -------- + This can be used as:: + + >>> len(raw) # doctest: +SKIP + 1000 + """ + return self.n_times + + @verbose + def load_bad_channels(self, bad_file=None, force=False, verbose=None): + """Mark channels as bad from a text file. + + This function operates mostly in the style of the C function + ``mne_mark_bad_channels``. Each line in the text file will be + interpreted as a name of a bad channel. + + Parameters + ---------- + bad_file : path-like | None + File name of the text file containing bad channels. + If ``None`` (default), bad channels are cleared, but this + is more easily done directly with ``raw.info['bads'] = []``. + force : bool + Whether or not to force bad channel marking (of those + that exist) if channels are not found, instead of + raising an error. Defaults to ``False``. + %(verbose)s + """ + prev_bads = self.info["bads"] + new_bads = [] + if bad_file is not None: + # Check to make sure bad channels are there + names = frozenset(self.info["ch_names"]) + with open(bad_file) as fid: + bad_names = [line for line in fid.read().splitlines() if line] + new_bads = [ci for ci in bad_names if ci in names] + count_diff = len(bad_names) - len(new_bads) + + if count_diff > 0: + msg = ( + f"{count_diff} bad channel(s) from:" + f"\n{bad_file}\nnot found in:\n{self.filenames[0]}" + ) + if not force: + raise ValueError(msg) + else: + warn(msg) + + if prev_bads != new_bads: + logger.info(f"Updating bad channels: {prev_bads} -> {new_bads}") + self.info["bads"] = new_bads + else: + logger.info(f"No channels updated. Bads are: {prev_bads}") + + @fill_doc + def append(self, raws, preload=None): + """Concatenate raw instances as if they were continuous. + + .. note:: Boundaries of the raw files are annotated bad. If you wish to + use the data as continuous recording, you can remove the + boundary annotations after concatenation (see + :meth:`mne.Annotations.delete`). + + Parameters + ---------- + raws : list, or Raw instance + List of Raw instances to concatenate to the current instance + (in order), or a single raw instance to concatenate. + %(preload_concatenate)s + """ + if not isinstance(raws, list): + raws = [raws] + + # make sure the raws are compatible + all_raws = [self] + all_raws += raws + _check_raw_compatibility(all_raws) + + # deal with preloading data first (while files are separate) + all_preloaded = self.preload and all(r.preload for r in raws) + if preload is None: + if all_preloaded: + preload = True + else: + preload = False + + if preload is False: + if self.preload: + self._data = None + self.preload = False + else: + # do the concatenation ourselves since preload might be a string + nchan = self.info["nchan"] + c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)]) + nsamp = c_ns[-1] + + if not self.preload: + this_data = self._read_segment() + else: + this_data = self._data + + # allocate the buffer + _data = _allocate_data(preload, (nchan, nsamp), this_data.dtype) + _data[:, 0 : c_ns[0]] = this_data + + for ri in range(len(raws)): + if not raws[ri].preload: + # read the data directly into the buffer + data_buffer = _data[:, c_ns[ri] : c_ns[ri + 1]] + raws[ri]._read_segment(data_buffer=data_buffer) + else: + _data[:, c_ns[ri] : c_ns[ri + 1]] = raws[ri]._data + self._data = _data + self.preload = True + + # now combine information from each raw file to construct new self + annotations = self.annotations + assert annotations.orig_time == self.info["meas_date"] + edge_samps = list() + for ri, r in enumerate(raws): + edge_samps.append(self.last_samp - self.first_samp + 1) + annotations = _combine_annotations( + annotations, + r.annotations, + edge_samps[-1], + self.first_samp, + r.first_samp, + self.info["sfreq"], + ) + self._first_samps = np.r_[self._first_samps, r._first_samps] + self._last_samps = np.r_[self._last_samps, r._last_samps] + self._read_picks += r._read_picks + self._raw_extras += r._raw_extras + self._filenames += r._filenames # use the private attribute to use the list + assert annotations.orig_time == self.info["meas_date"] + # The above _combine_annotations gets everything synchronized to + # first_samp. set_annotations (with no absolute time reference) assumes + # that the annotations being set are relative to first_samp, and will + # add it back on. So here we have to remove it: + if annotations.orig_time is None: + annotations.onset -= self.first_samp / self.info["sfreq"] + self.set_annotations(annotations) + for edge_samp in edge_samps: + onset = _sync_onset(self, edge_samp / self.info["sfreq"], True) + logger.debug( + f"Marking edge at {edge_samp} samples " f"(maps to {onset:0.3f} sec)" + ) + self.annotations.append(onset, 0.0, "BAD boundary") + self.annotations.append(onset, 0.0, "EDGE boundary") + if not ( + len(self._first_samps) + == len(self._last_samps) + == len(self._raw_extras) + == len(self.filenames) + == len(self._read_picks) + ): + raise RuntimeError("Append error") # should never happen + + def close(self): + """Clean up the object. + + Does nothing for objects that close their file descriptors. + Things like Raw will override this method. + """ + pass # noqa + + def copy(self): + """Return copy of Raw instance. + + Returns + ------- + inst : instance of Raw + A copy of the instance. + """ + return deepcopy(self) + + def __repr__(self): # noqa: D105 + name = self.filenames[0] + name = "" if name is None else Path(name).name + ", " + size_str = str(sizeof_fmt(self._size)) # str in case it fails -> None + size_str += f", data{'' if self.preload else ' not'} loaded" + s = ( + f"{name}{len(self.ch_names)} x {self.n_times} " + f"({self.duration:0.1f} s), ~{size_str}" + ) + return f"<{self.__class__.__name__} | {s}>" + + @repr_html + def _repr_html_(self): + basenames = [f.name for f in self.filenames if f is not None] + + duration = self._get_duration_string() + + raw_template = _get_html_template("repr", "raw.html.jinja") + return raw_template.render( + inst=self, + filenames=basenames, + duration=duration, + ) + + def _get_duration_string(self): + # https://stackoverflow.com/a/10981895 + duration = np.ceil(self.duration) # always take full seconds + hours, remainder = divmod(duration, 3600) + minutes, seconds = divmod(remainder, 60) + return f"{hours:02.0f}:{minutes:02.0f}:{seconds:02.0f}" + + def add_events(self, events, stim_channel=None, replace=False): + """Add events to stim channel. + + Parameters + ---------- + events : ndarray, shape (n_events, 3) + Events to add. The first column specifies the sample number of + each event, the second column is ignored, and the third column + provides the event value. If events already exist in the Raw + instance at the given sample numbers, the event values will be + added together. + stim_channel : str | None + Name of the stim channel to add to. If None, the config variable + 'MNE_STIM_CHANNEL' is used. If this is not found, it will default + to ``'STI 014'``. + replace : bool + If True the old events on the stim channel are removed before + adding the new ones. + + Notes + ----- + Data must be preloaded in order to add events. + """ + _check_preload(self, "Adding events") + events = np.asarray(events) + if events.ndim != 2 or events.shape[1] != 3: + raise ValueError("events must be shape (n_events, 3)") + stim_channel = _get_stim_channel(stim_channel, self.info) + pick = pick_channels(self.ch_names, stim_channel, ordered=False) + if len(pick) == 0: + raise ValueError(f"Channel {stim_channel} not found") + pick = pick[0] + idx = events[:, 0].astype(int) + if np.any(idx < self.first_samp) or np.any(idx > self.last_samp): + raise ValueError( + f"event sample numbers must be between {self.first_samp} " + f"and {self.last_samp}" + ) + if not all(idx == events[:, 0]): + raise ValueError("event sample numbers must be integers") + if replace: + self._data[pick, :] = 0.0 + self._data[pick, idx - self.first_samp] += events[:, 2] + + def _get_buffer_size(self, buffer_size_sec=None): + """Get the buffer size.""" + if buffer_size_sec is None: + buffer_size_sec = self.buffer_size_sec + buffer_size_sec = float(buffer_size_sec) + return int(np.ceil(buffer_size_sec * self.info["sfreq"])) + + @verbose + def compute_psd( + self, + method="welch", + fmin=0, + fmax=np.inf, + tmin=None, + tmax=None, + picks=None, + exclude=(), + proj=False, + remove_dc=True, + reject_by_annotation=True, + *, + n_jobs=1, + verbose=None, + **method_kw, + ): + """Perform spectral analysis on sensor data. + + Parameters + ---------- + %(method_psd)s + Note that ``"multitaper"`` cannot be used if ``reject_by_annotation=True`` + and there are ``"bad_*"`` annotations in the :class:`~mne.io.Raw` data; + in such cases use ``"welch"``. Default is ``'welch'``. + %(fmin_fmax_psd)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(exclude_psd)s + %(proj_psd)s + %(remove_dc)s + %(reject_by_annotation_psd)s + %(n_jobs)s + %(verbose)s + %(method_kw_psd)s + + Returns + ------- + spectrum : instance of Spectrum + The spectral representation of the data. + + Notes + ----- + .. versionadded:: 1.2 + + References + ---------- + .. footbibliography:: + """ + method = _validate_method(method, type(self).__name__) + self._set_legacy_nfft_default(tmin, tmax, method, method_kw) + + return Spectrum( + self, + method=method, + fmin=fmin, + fmax=fmax, + tmin=tmin, + tmax=tmax, + picks=picks, + exclude=exclude, + proj=proj, + remove_dc=remove_dc, + reject_by_annotation=reject_by_annotation, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def compute_tfr( + self, + method, + freqs, + *, + tmin=None, + tmax=None, + picks=None, + proj=False, + output="power", + reject_by_annotation=True, + decim=1, + n_jobs=None, + verbose=None, + **method_kw, + ): + """Compute a time-frequency representation of sensor data. + + Parameters + ---------- + %(method_tfr)s + %(freqs_tfr)s + %(tmin_tmax_psd)s + %(picks_good_data_noref)s + %(proj_psd)s + %(output_compute_tfr)s + %(reject_by_annotation_tfr)s + %(decim_tfr)s + %(n_jobs)s + %(verbose)s + %(method_kw_tfr)s + + Returns + ------- + tfr : instance of RawTFR + The time-frequency-resolved power estimates of the data. + + Notes + ----- + .. versionadded:: 1.7 + + References + ---------- + .. footbibliography:: + """ + _check_option("output", output, ("power", "phase", "complex")) + method_kw["output"] = output + return RawTFR( + self, + method=method, + freqs=freqs, + tmin=tmin, + tmax=tmax, + picks=picks, + proj=proj, + reject_by_annotation=reject_by_annotation, + decim=decim, + n_jobs=n_jobs, + verbose=verbose, + **method_kw, + ) + + @verbose + def to_data_frame( + self, + picks=None, + index=None, + scalings=None, + copy=True, + start=None, + stop=None, + long_format=False, + time_format=None, + *, + verbose=None, + ): + """Export data in tabular structure as a pandas DataFrame. + + Channels are converted to columns in the DataFrame. By default, an + additional column "time" is added, unless ``index`` is not ``None`` + (in which case time values form the DataFrame's index). + + Parameters + ---------- + %(picks_all)s + %(index_df_raw)s + Defaults to ``None``. + %(scalings_df)s + %(copy_df)s + start : int | None + Starting sample index for creating the DataFrame from a temporal + span of the Raw object. ``None`` (the default) uses the first + sample. + stop : int | None + Ending sample index for creating the DataFrame from a temporal span + of the Raw object. ``None`` (the default) uses the last sample. + %(long_format_df_raw)s + %(time_format_df_raw)s + + .. versionadded:: 0.20 + %(verbose)s + + Returns + ------- + %(df_return)s + """ + # check pandas once here, instead of in each private utils function + pd = _check_pandas_installed() # noqa + # arg checking + valid_index_args = ["time"] + valid_time_formats = ["ms", "timedelta", "datetime"] + index = _check_pandas_index_arguments(index, valid_index_args) + time_format = _check_time_format( + time_format, valid_time_formats, self.info["meas_date"] + ) + # get data + picks = _picks_to_idx(self.info, picks, "all", exclude=()) + data, times = self[picks, start:stop] + data = data.T + if copy: + data = data.copy() + data = _scale_dataframe_data(self, data, picks, scalings) + # prepare extra columns / multiindex + mindex = list() + times = _convert_times( + times, time_format, self.info["meas_date"], self.first_time + ) + mindex.append(("time", times)) + # build DataFrame + df = _build_data_frame( + self, data, picks, long_format, mindex, index, default_index=["time"] + ) + return df + + def describe(self, data_frame=False): + """Describe channels (name, type, descriptive statistics). + + Parameters + ---------- + data_frame : bool + If True, return results in a pandas.DataFrame. If False, only print + results. Columns 'ch', 'type', and 'unit' indicate channel index, + channel type, and unit of the remaining five columns. These columns + are 'min' (minimum), 'Q1' (first quartile or 25% percentile), + 'median', 'Q3' (third quartile or 75% percentile), and 'max' + (maximum). + + Returns + ------- + result : None | pandas.DataFrame + If data_frame=False, returns None. If data_frame=True, returns + results in a pandas.DataFrame (requires pandas). + """ + nchan = self.info["nchan"] + + # describe each channel + cols = defaultdict(list) + cols["name"] = self.ch_names + for i in range(nchan): + ch = self.info["chs"][i] + data = self[i][0] + cols["type"].append(channel_type(self.info, i)) + cols["unit"].append(_unit2human[ch["unit"]]) + cols["min"].append(np.min(data)) + cols["Q1"].append(np.percentile(data, 25)) + cols["median"].append(np.median(data)) + cols["Q3"].append(np.percentile(data, 75)) + cols["max"].append(np.max(data)) + + if data_frame: # return data frame + import pandas as pd + + df = pd.DataFrame(cols) + df.index.name = "ch" + return df + + # convert into commonly used units + scalings = _handle_default("scalings") + units = _handle_default("units") + for i in range(nchan): + unit = units.get(cols["type"][i]) + scaling = scalings.get(cols["type"][i], 1) + if scaling != 1: + cols["unit"][i] = unit + for col in ["min", "Q1", "median", "Q3", "max"]: + cols[col][i] *= scaling + + lens = { + "ch": max(2, len(str(nchan))), + "name": max(4, max([len(n) for n in cols["name"]])), + "type": max(4, max([len(t) for t in cols["type"]])), + "unit": max(4, max([len(u) for u in cols["unit"]])), + } + + # print description, start with header + print(self) + print( + f"{'ch':>{lens['ch']}} " + f"{'name':<{lens['name']}} " + f"{'type':<{lens['type']}} " + f"{'unit':<{lens['unit']}} " + f"{'min':>9} " + f"{'Q1':>9} " + f"{'median':>9} " + f"{'Q3':>9} " + f"{'max':>9}" + ) + # print description for each channel + for i in range(nchan): + msg = ( + f"{i:>{lens['ch']}} " + f"{cols['name'][i]:<{lens['name']}} " + f"{cols['type'][i].upper():<{lens['type']}} " + f"{cols['unit'][i]:<{lens['unit']}} " + ) + for col in ["min", "Q1", "median", "Q3"]: + msg += f"{cols[col][i]:>9.2f} " + msg += f"{cols['max'][i]:>9.2f}" + print(msg) + + +def _allocate_data(preload, shape, dtype): + """Allocate data in memory or in memmap for preloading.""" + if preload in (None, True): # None comes from _read_segment + data = np.zeros(shape, dtype) + else: + _validate_type(preload, "path-like", "preload") + data = np.memmap(str(preload), mode="w+", dtype=dtype, shape=shape) + return data + + +def _convert_slice(sel): + if len(sel) and (np.diff(sel) == 1).all(): + return slice(sel[0], sel[-1] + 1) + else: + return sel + + +def _get_ch_factors(inst, units, picks_idxs): + """Get scaling factors for data, given units. + + Parameters + ---------- + inst : instance of Raw | Epochs | Evoked + The instance. + %(units)s + picks_idxs : ndarray + The picks as provided through _picks_to_idx. + + Returns + ------- + ch_factors : ndarray of floats, shape(len(picks),) + The scaling factors for each channel, ordered according + to picks. + + """ + _validate_type(units, types=(None, str, dict), item_name="units") + ch_factors = np.ones(len(picks_idxs)) + si_units = _handle_default("si_units") + ch_types = inst.get_channel_types(picks=picks_idxs) + # Convert to dict if str units + if isinstance(units, str): + # Check that there is only one channel type + unit_ch_type = list(set(ch_types) & set(si_units.keys())) + if len(unit_ch_type) > 1: + raise ValueError( + '"units" cannot be str if there is more than ' + "one channel type with a unit " + f"{unit_ch_type}." + ) + units = {unit_ch_type[0]: units} # make the str argument a dict + # Loop over the dict to get channel factors + if isinstance(units, dict): + for ch_type, ch_unit in units.items(): + # Get the scaling factors + scaling = _get_scaling(ch_type, ch_unit) + if scaling != 1: + indices = [i_ch for i_ch, ch in enumerate(ch_types) if ch == ch_type] + ch_factors[indices] *= scaling + + return ch_factors + + +def _get_scaling(ch_type, target_unit): + """Return the scaling factor based on the channel type and a target unit. + + Parameters + ---------- + ch_type : str + The channel type. + target_unit : str + The target unit for the provided channel type. + + Returns + ------- + scaling : float + The scaling factor to convert from the si_unit (used by default for MNE + objects) to the target unit. + """ + scaling = 1.0 + si_units = _handle_default("si_units") + si_units_splitted = {key: si_units[key].split("/") for key in si_units} + prefixes = _handle_default("prefixes") + prefix_list = list(prefixes.keys()) + + # Check that the provided unit exists for the ch_type + unit_list = target_unit.split("/") + if ch_type not in si_units.keys(): + raise KeyError( + f"{ch_type} is not a channel type that can be scaled from units." + ) + si_unit_list = si_units_splitted[ch_type] + if len(unit_list) != len(si_unit_list): + raise ValueError( + f"{target_unit} is not a valid unit for {ch_type}, use a " + f"sub-multiple of {si_units[ch_type]} instead." + ) + for i, unit in enumerate(unit_list): + valid = [prefix + si_unit_list[i] for prefix in prefix_list] + if unit not in valid: + raise ValueError( + f"{target_unit} is not a valid unit for {ch_type}, use a " + f"sub-multiple of {si_units[ch_type]} instead." + ) + + # Get the scaling factors + for i, unit in enumerate(unit_list): + has_square = False + # XXX power normally not used as csd cannot get_data() + if unit[-1] == "²": + has_square = True + if unit == "m" or unit == "m²": + factor = 1.0 + elif unit[0] in prefixes.keys(): + factor = prefixes[unit[0]] + else: + factor = 1.0 + if factor != 1: + if has_square: + factor *= factor + if i == 0: + scaling = scaling * factor + elif i == 1: + scaling = scaling / factor + return scaling + + +class _ReadSegmentFileProtector: + """Ensure only _filenames, _raw_extras, and _read_segment_file are used.""" + + def __init__(self, raw): + self.__raw = raw + assert hasattr(raw, "_projector") + self._filenames = raw._filenames + self._raw_extras = raw._raw_extras + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + return self.__raw.__class__._read_segment_file( + self, data, idx, fi, start, stop, cals, mult + ) + + @property + def filenames(self) -> tuple[Path, ...]: + return tuple(self._filenames) + + +class _RawShell: + """Create a temporary raw object.""" + + def __init__(self): + self.first_samp = None + self.last_samp = None + self._first_time = None + self._last_time = None + self._cals = None + self._projector = None + + @property + def n_times(self): # noqa: D102 + return self.last_samp - self.first_samp + 1 + + @property + def annotations(self): # noqa: D102 + return self._annotations + + def set_annotations(self, annotations): + if annotations is None: + annotations = Annotations([], [], [], None) + self._annotations = annotations.copy() + + +############################################################################### +# Writing + +# Assume we never hit more than 100 splits, like for epochs +MAX_N_SPLITS = 100 + + +def _write_raw(raw_fid_writer, fpath, split_naming, overwrite): + """Write raw file with splitting.""" + dir_path = fpath.parent + _check_fname( + dir_path, + overwrite="read", + must_exist=True, + name="parent directory", + need_dir=True, + ) + # We have to create one extra filename here to make the for loop below happy, + # but it will raise an error if it actually gets used + split_fnames = _make_split_fnames( + fpath.name, n_splits=MAX_N_SPLITS + 1, split_naming=split_naming + ) + is_next_split, prev_fname = True, None + output_fnames = [] + for part_idx in range(0, MAX_N_SPLITS): + if not is_next_split: + break + bids_special_behavior = part_idx == 0 and split_naming == "bids" + if bids_special_behavior: + reserved_fname = dir_path / split_fnames[0] + logger.info(f"Reserving possible split file {reserved_fname.name}") + _check_fname(reserved_fname, overwrite) + reserved_ctx = _ReservedFilename(reserved_fname) + use_fpath = fpath + else: + reserved_ctx = nullcontext() + use_fpath = dir_path / split_fnames[part_idx] + next_fname = split_fnames[part_idx + 1] + _check_fname(use_fpath, overwrite) + + logger.info(f"Writing {use_fpath}") + with start_and_end_file(use_fpath) as fid, reserved_ctx: + is_next_split = raw_fid_writer.write(fid, part_idx, prev_fname, next_fname) + logger.info(f"Closing {use_fpath}") + if bids_special_behavior and is_next_split: + logger.info(f"Renaming BIDS split file {fpath.name}") + prev_fname = dir_path / split_fnames[0] + shutil.move(use_fpath, prev_fname) + output_fnames.append(prev_fname) + else: + output_fnames.append(use_fpath) + prev_fname = use_fpath + else: + raise RuntimeError(f"Exceeded maximum number of splits ({MAX_N_SPLITS}).") + + logger.info("[done]") + return output_fnames + + +class _ReservedFilename: + def __init__(self, fname: Path): + self.fname = fname + assert fname.parent.exists(), fname + with open(fname, "w"): + pass + self.remove = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if self.remove: + self.fname.unlink() + + +@dataclass(frozen=True) +class _RawFidWriterCfg: + buffer_size: int + split_size: int + drop_small_buffer: bool + fmt: str + reset_range: bool = field(init=False) + data_type: int = field(init=False) + + def __post_init__(self): + type_dict = dict( + short=FIFF.FIFFT_DAU_PACK16, + int=FIFF.FIFFT_INT, + single=FIFF.FIFFT_FLOAT, + double=FIFF.FIFFT_DOUBLE, + ) + _check_option("fmt", self.fmt, type_dict.keys()) + reset_dict = dict(short=False, int=False, single=True, double=True) + object.__setattr__(self, "reset_range", reset_dict[self.fmt]) + object.__setattr__(self, "data_type", type_dict[self.fmt]) + + +class _RawFidWriter: + def __init__(self, raw, info, picks, projector, start, stop, cfg): + self.raw = raw + self.picks = _picks_to_idx(info, picks, "all", ()) + self.info = pick_info(info, sel=self.picks, copy=True) + for k in range(self.info["nchan"]): + # Scan numbers may have been messed up + self.info["chs"][k]["scanno"] = k + 1 # scanno starts at 1 in FIF format + if cfg.reset_range: + self.info["chs"][k]["range"] = 1.0 + self.projector = projector + # self.start is the only mutable attribute in this design! + self.start, self.stop = start, stop + self.cfg = cfg + + def write(self, fid, part_idx, prev_fname, next_fname): + self._check_start_stop_within_bounds() + start_block(fid, FIFF.FIFFB_MEAS) + _write_raw_metadata( + fid, + self.info, + self.cfg.data_type, + self.cfg.reset_range, + self.raw.annotations, + ) + self.start = _write_raw_data( + self.raw, + self.info, + self.picks, + fid, + part_idx, + self.start, + self.stop, + self.cfg.buffer_size, + prev_fname, + self.cfg.split_size, + next_fname, + self.projector, + self.cfg.drop_small_buffer, + self.cfg.fmt, + ) + end_block(fid, FIFF.FIFFB_MEAS) + is_next_split = self.start < self.stop + return is_next_split + + def _check_start_stop_within_bounds(self): + # we've done something wrong if we hit this + n_times_max = len(self.raw.times) + error_msg = ( + f"Can't write raw file with no data: {self.start} -> {self.stop} " + f"(max: {n_times_max}) requested" + ) + if self.start >= self.stop or self.stop > n_times_max: + raise RuntimeError(error_msg) + + +def _write_raw_data( + raw, + info, + picks, + fid, + part_idx, + start, + stop, + buffer_size, + prev_fname, + split_size, + next_fname, + projector, + drop_small_buffer, + fmt, +): + # Start the raw data + data_kind = "IAS_" if info.get("maxshield", False) else "" + data_kind = getattr(FIFF, f"FIFFB_{data_kind}RAW_DATA") + start_block(fid, data_kind) + + first_samp = raw.first_samp + start + if first_samp != 0: + write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first_samp) + + # previous file name and id + if part_idx > 0 and prev_fname is not None: + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_PREV_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, prev_fname) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info["meas_id"]) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx - 1) + end_block(fid, FIFF.FIFFB_REF) + + pos_prev = fid.tell() + if pos_prev > split_size: + raise ValueError( + 'file is larger than "split_size" after writing ' + "measurement information, you must use a larger " + f"value for split size: {pos_prev} plus enough bytes for " + "the chosen buffer_size" + ) + + # Check to see if this has acquisition skips and, if so, if we can + # write out empty buffers instead of zeroes + firsts = list(range(start, stop, buffer_size)) + lasts = np.array(firsts) + buffer_size + if lasts[-1] > stop: + lasts[-1] = stop + sk_onsets, sk_ends = _annotations_starts_stops(raw, "bad_acq_skip") + do_skips = False + if len(sk_onsets) > 0: + if np.isin(sk_onsets, firsts).all() and np.isin(sk_ends, lasts).all(): + do_skips = True + else: + if part_idx == 0: + warn( + "Acquisition skips detected but did not fit evenly into " + "output buffer_size, will be written as zeroes." + ) + + cals = [ch["cal"] * ch["range"] for ch in info["chs"]] + # Write the blocks + n_current_skip = 0 + new_start = start + for first, last in zip(firsts, lasts): + if do_skips: + if ((first >= sk_onsets) & (last <= sk_ends)).any(): + # Track how many we have + n_current_skip += 1 + continue + elif n_current_skip > 0: + # Write out an empty buffer instead of data + write_int(fid, FIFF.FIFF_DATA_SKIP, n_current_skip) + # These two NOPs appear to be optional (MaxFilter does not do + # it, but some acquisition machines do) so let's not bother. + # write_nop(fid) + # write_nop(fid) + n_current_skip = 0 + data, times = raw[picks, first:last] + assert len(times) == last - first + + if projector is not None: + data = np.dot(projector, data) + + if drop_small_buffer and (first > start) and (len(times) < buffer_size): + logger.info("Skipping data chunk due to small buffer ... [done]") + break + logger.debug(f"Writing FIF {first:6d} ... {last:6d} ...") + _write_raw_buffer(fid, data, cals, fmt) + + pos = fid.tell() + this_buff_size_bytes = pos - pos_prev + overage = pos - split_size + _NEXT_FILE_BUFFER + if overage > 0: + # This should occur on the first buffer write of the file, so + # we should mention the space required for the meas info + raise ValueError( + f"buffer size ({this_buff_size_bytes}) is too large for the " + f"given split size ({split_size}) " + f"by {overage} bytes after writing info ({pos_prev}) and " + "leaving enough space " + f'for end tags ({_NEXT_FILE_BUFFER}): decrease "buffer_size_sec" ' + 'or increase "split_size".' + ) + + new_start = last + # Split files if necessary, leave some space for next file info + # make sure we check to make sure we actually *need* another buffer + # with the "and" check + if ( + pos >= split_size - this_buff_size_bytes - _NEXT_FILE_BUFFER + and first + buffer_size < stop + ): + start_block(fid, FIFF.FIFFB_REF) + write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE) + write_string(fid, FIFF.FIFF_REF_FILE_NAME, next_fname.name) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_REF_FILE_ID, info["meas_id"]) + write_int(fid, FIFF.FIFF_REF_FILE_NUM, part_idx + 1) + end_block(fid, FIFF.FIFFB_REF) + + break + pos_prev = pos + + end_block(fid, data_kind) + return new_start + + +@fill_doc +def _write_raw_metadata(fid, info, data_type, reset_range, annotations): + """Start write raw data in file. + + Parameters + ---------- + fid : file + The created file. + %(info_not_none)s + data_type : int + The data_type in case it is necessary. Should be 4 (FIFFT_FLOAT), + 5 (FIFFT_DOUBLE), 16 (FIFFT_DAU_PACK16), or 3 (FIFFT_INT) for raw data. + reset_range : bool + If True, the info['chs'][k]['range'] parameter will be set to unity. + annotations : instance of Annotations + The annotations to write. + + """ + # + # Create the file and save the essentials + # + write_id(fid, FIFF.FIFF_BLOCK_ID) + if info["meas_id"] is not None: + write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info["meas_id"]) + + write_meas_info(fid, info, data_type=data_type, reset_range=reset_range) + + # + # Annotations + # + if len(annotations) > 0: # don't save empty annot + _write_annotations(fid, annotations) + + +def _write_raw_buffer(fid, buf, cals, fmt): + """Write raw buffer. + + Parameters + ---------- + fid : file descriptor + an open raw data file. + buf : array + The buffer to write. + cals : array + Calibration factors. + fmt : str + 'short', 'int', 'single', or 'double' for 16/32 bit int or 32/64 bit + float for each item. This will be doubled for complex datatypes. Note + that short and int formats cannot be used for complex data. + """ + if buf.shape[0] != len(cals): + raise ValueError("buffer and calibration sizes do not match") + + _check_option("fmt", fmt, ["short", "int", "single", "double"]) + + cast_int = False # allow unsafe cast + if np.isrealobj(buf): + if fmt == "short": + write_function = write_dau_pack16 + cast_int = True + elif fmt == "int": + write_function = write_int + cast_int = True + elif fmt == "single": + write_function = write_float + else: + write_function = write_double + else: + if fmt == "single": + write_function = write_complex64 + elif fmt == "double": + write_function = write_complex128 + else: + raise ValueError( + 'only "single" and "double" supported for writing complex data' + ) + + buf = buf / np.ravel(cals)[:, None] + if cast_int: + buf = buf.astype(np.int32) + write_function(fid, FIFF.FIFF_DATA_BUFFER, buf) + + +def _check_raw_compatibility(raw): + """Ensure all instances of Raw have compatible parameters.""" + for ri in range(1, len(raw)): + if not isinstance(raw[ri], type(raw[0])): + raise ValueError(f"raw[{ri}] type must match") + for key in ("nchan", "sfreq"): + a, b = raw[ri].info[key], raw[0].info[key] + if a != b: + raise ValueError( + f"raw[{ri}].info[{key}] must match:\n{repr(a)} != {repr(b)}" + ) + for kind in ("bads", "ch_names"): + set1 = set(raw[0].info[kind]) + set2 = set(raw[ri].info[kind]) + mismatch = set1.symmetric_difference(set2) + if mismatch: + raise ValueError( + f"raw[{ri}]['info'][{kind}] do not match: {sorted(mismatch)}" + ) + if any(raw[ri]._cals != raw[0]._cals): + raise ValueError(f"raw[{ri}]._cals must match") + if len(raw[0].info["projs"]) != len(raw[ri].info["projs"]): + raise ValueError("SSP projectors in raw files must be the same") + if not all( + _proj_equal(p1, p2) + for p1, p2 in zip(raw[0].info["projs"], raw[ri].info["projs"]) + ): + raise ValueError("SSP projectors in raw files must be the same") + if any(r.orig_format != raw[0].orig_format for r in raw): + warn( + "raw files do not all have the same data format, could result in " + 'precision mismatch. Setting raw.orig_format="unknown"' + ) + raw[0].orig_format = "unknown" + + +@verbose +def concatenate_raws( + raws, preload=None, events_list=None, *, on_mismatch="raise", verbose=None +): + """Concatenate `~mne.io.Raw` instances as if they were continuous. + + .. note:: ``raws[0]`` is modified in-place to achieve the concatenation. + Boundaries of the raw files are annotated bad. If you wish to use + the data as continuous recording, you can remove the boundary + annotations after concatenation (see + :meth:`mne.Annotations.delete`). + + Parameters + ---------- + raws : list + List of `~mne.io.Raw` instances to concatenate (in order). + %(preload_concatenate)s + events_list : None | list + The events to concatenate. Defaults to ``None``. + %(on_mismatch_info)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + The result of the concatenation (first Raw instance passed in). + events : ndarray of int, shape (n_events, 3) + The events. Only returned if ``event_list`` is not None. + """ + for idx, raw in enumerate(raws[1:], start=1): + _ensure_infos_match( + info1=raws[0].info, + info2=raw.info, + name=f"raws[{idx}]", + on_mismatch=on_mismatch, + ) + + if events_list is not None: + if len(events_list) != len(raws): + raise ValueError( + "`raws` and `event_list` are required to be of the same length" + ) + first, last = zip(*[(r.first_samp, r.last_samp) for r in raws]) + events = concatenate_events(events_list, first, last) + raws[0].append(raws[1:], preload) + + if events_list is None: + return raws[0] + else: + return raws[0], events + + +@fill_doc +def match_channel_orders(insts=None, copy=True, *, raws=None): + """Ensure consistent channel order across instances (Raw, Epochs, or Evoked). + + Parameters + ---------- + insts : list + List of :class:`~mne.io.Raw`, :class:`~mne.Epochs`, + or :class:`~mne.Evoked` instances to order. + %(copy_df)s + raws : list + This parameter is deprecated and will be removed in mne version 1.9. + Please use ``insts`` instead. + + Returns + ------- + list of Raw | list of Epochs | list of Evoked + List of instances (Raw, Epochs, or Evoked) with channel orders matched + according to the order they had in the first item in the ``insts`` list. + """ + # XXX: remove "raws" parameter and logic below with MNE version 1.9 + # and remove default parameter value of insts + if raws is not None: + warn( + "The ``raws`` parameter is deprecated and will be removed in version " + "1.9. Use the ``insts`` parameter to suppress this warning.", + DeprecationWarning, + ) + insts = raws + elif insts is None: + # both insts and raws is None + raise ValueError( + "You need to pass a list of Raw, Epochs, or Evoked to ``insts``." + ) + insts = deepcopy(insts) if copy else insts + ch_order = insts[0].ch_names + for inst in insts[1:]: + inst.reorder_channels(ch_order) + return insts + + +def _check_maxshield(allow_maxshield): + """Warn or error about MaxShield.""" + msg = ( + "This file contains raw Internal Active " + "Shielding data. It may be distorted. Elekta " + "recommends it be run through MaxFilter to " + "produce reliable results. Consider closing " + "the file and running MaxFilter on the data." + ) + if allow_maxshield: + if not (isinstance(allow_maxshield, str) and allow_maxshield == "yes"): + warn(msg) + else: + msg += ( + " Use allow_maxshield=True if you are sure you" + " want to load the data despite this warning." + ) + raise ValueError(msg) + + +def _get_fname_rep(fname): + if not _file_like(fname): + out = str(fname) + else: + out = "file-like" + try: + out += f' "{fname.name}"' + except Exception: + pass + return out diff --git a/mne/io/besa/__init__.py b/mne/io/besa/__init__.py new file mode 100644 index 0000000..a91614c --- /dev/null +++ b/mne/io/besa/__init__.py @@ -0,0 +1,7 @@ +"""Support for various BESA file formats.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .besa import read_evoked_besa diff --git a/mne/io/besa/besa.py b/mne/io/besa/besa.py new file mode 100644 index 0000000..d6d4ee9 --- /dev/null +++ b/mne/io/besa/besa.py @@ -0,0 +1,274 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import OrderedDict +from pathlib import Path + +import numpy as np + +from ..._fiff.meas_info import create_info +from ...evoked import EvokedArray +from ...utils import fill_doc, logger, verbose + + +@fill_doc +@verbose +def read_evoked_besa(fname, verbose=None): + """Reader function for BESA ``.avr`` or ``.mul`` files. + + When a ``.elp`` sidecar file is present, it will be used to determine + electrode information. + + Parameters + ---------- + fname : path-like + Path to the ``.avr`` or ``.mul`` file. + %(verbose)s + + Returns + ------- + ev : Evoked + The evoked data in the .avr or .mul file. + """ + fname = Path(fname) + if fname.suffix == ".avr": + return _read_evoked_besa_avr(fname, verbose) + elif fname.suffix == ".mul": + return _read_evoked_besa_mul(fname, verbose) + else: + raise ValueError("Filename must end in either .avr or .mul") + + +@verbose +def _read_evoked_besa_avr(fname, verbose): + """Create EvokedArray from a BESA .avr file.""" + with open(fname) as f: + header = f.readline().strip() + + # There are two versions of .avr files. The old style, generated by + # BESA 1, 2 and 3 does not define Nchan and does not have channel names + # in the file. + new_style = "Nchan=" in header + if new_style: + ch_names = f.readline().strip().split() + else: + ch_names = None + + fields = _parse_header(header) + data = np.loadtxt(fname, skiprows=2 if new_style else 1, ndmin=2) + ch_types = _read_elp_sidecar(fname) + + # Consolidate channel names + if new_style: + if len(ch_names) != len(data): + raise RuntimeError( + "Mismatch between the number of channel names defined in " + f"the .avr file ({len(ch_names)}) and the number of rows " + f"in the data matrix ({len(data)})." + ) + else: + # Determine channel names from the .elp sidecar file + if ch_types is not None: + ch_names = list(ch_types.keys()) + if len(ch_names) != len(data): + raise RuntimeError( + "Mismatch between the number of channels " + f"defined in the .avr file ({len(data)}) " + f"and .elp file ({len(ch_names)})." + ) + else: + logger.info( + "No .elp file found and no channel names present in " + "the .avr file. Falling back to generic names. " + ) + ch_names = [f"CH{i + 1:02d}" for i in range(len(data))] + + # Consolidate channel types + if ch_types is None: + logger.info("Marking all channels as EEG.") + ch_types = ["eeg"] * len(ch_names) + else: + ch_types = [ch_types[ch] for ch in ch_names] + + # Go over all the header fields and make sure they are all defined to + # something sensible. + if "Npts" in fields: + fields["Npts"] = int(fields["Npts"]) + if fields["Npts"] != data.shape[1]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "Npts" field ({fields["Npts"]}).' + ) + if "Nchan" in fields: + fields["Nchan"] = int(fields["Nchan"]) + if fields["Nchan"] != data.shape[0]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "Nchan" field ({fields["Nchan"]}).' + ) + if "DI" in fields: + fields["DI"] = float(fields["DI"]) + else: + raise RuntimeError( + 'No "DI" field present. Could not determine sampling frequency.' + ) + if "TSB" in fields: + fields["TSB"] = float(fields["TSB"]) + else: + fields["TSB"] = 0 + if "SB" in fields: + fields["SB"] = float(fields["SB"]) + else: + fields["SB"] = 1.0 + if "SegmentName" not in fields: + fields["SegmentName"] = "" + + # Build the Evoked object based on the header fields. + info = create_info(ch_names, sfreq=1000 / fields["DI"], ch_types="eeg") + return EvokedArray( + data / fields["SB"] / 1e6, + info, + tmin=fields["TSB"] / 1000, + comment=fields["SegmentName"], + verbose=verbose, + ) + + +@verbose +def _read_evoked_besa_mul(fname, verbose): + """Create EvokedArray from a BESA .mul file.""" + with open(fname) as f: + header = f.readline().strip() + ch_names = f.readline().strip().split() + + fields = _parse_header(header) + data = np.loadtxt(fname, skiprows=2, ndmin=2) + + if len(ch_names) != data.shape[1]: + raise RuntimeError( + "Mismatch between the number of channel names " + f"defined in the .mul file ({len(ch_names)}) " + "and the number of columns in the data matrix " + f"({data.shape[1]})." + ) + + # Consolidate channel types + ch_types = _read_elp_sidecar(fname) + if ch_types is None: + logger.info("Marking all channels as EEG.") + ch_types = ["eeg"] * len(ch_names) + else: + ch_types = [ch_types[ch] for ch in ch_names] + + # Go over all the header fields and make sure they are all defined to + # something sensible. + if "TimePoints" in fields: + fields["TimePoints"] = int(fields["TimePoints"]) + if fields["TimePoints"] != data.shape[0]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "TimePoints" field ({fields["TimePoints"]}).' + ) + if "Channels" in fields: + fields["Channels"] = int(fields["Channels"]) + if fields["Channels"] != data.shape[1]: + logger.warn( + f"The size of the data matrix ({data.shape}) does not " + f'match the "Channels" field ({fields["Channels"]}).' + ) + if "SamplingInterval[ms]" in fields: + fields["SamplingInterval[ms]"] = float(fields["SamplingInterval[ms]"]) + else: + raise RuntimeError( + 'No "SamplingInterval[ms]" field present. Could ' + "not determine sampling frequency." + ) + if "BeginSweep[ms]" in fields: + fields["BeginSweep[ms]"] = float(fields["BeginSweep[ms]"]) + else: + fields["BeginSweep[ms]"] = 0.0 + if "Bins/uV" in fields: + fields["Bins/uV"] = float(fields["Bins/uV"]) + else: + fields["Bins/uV"] = 1 + if "SegmentName" not in fields: + fields["SegmentName"] = "" + + # Build the Evoked object based on the header fields. + info = create_info( + ch_names, sfreq=1000 / fields["SamplingInterval[ms]"], ch_types=ch_types + ) + return EvokedArray( + data.T / fields["Bins/uV"] / 1e6, + info, + tmin=fields["BeginSweep[ms]"] / 1000, + comment=fields["SegmentName"], + verbose=verbose, + ) + + +def _parse_header(header): + """Parse an .avr or .mul header string into name/val pairs. + + The header line looks like: + Npts= 256 TSB= 0.000 DI= 4.000000 SB= 1.000 SC= 200.0 Nchan= 27 + No consistent use of separation chars, so parsing this is a bit iffy. + + Parameters + ---------- + header : str + The first line of the file. + + Returns + ------- + fields : dict + The parsed header fields + """ + parts = header.split() # Splits on one or more spaces + name_val_pairs = zip(parts[::2], parts[1::2]) + return dict((name.replace("=", ""), val) for name, val in name_val_pairs) + + +def _read_elp_sidecar(fname): + """Read a possible .elp sidecar file with electrode information. + + The reason we don't use the read_custom_montage for this is that we are + interested in the channel types, which a DigMontage object does not provide + us. + + Parameters + ---------- + fname : Path + The path of the .avr or .mul file. The corresponding .elp file will be + derived from this path. + + Returns + ------- + ch_type : OrderedDict | None + If the sidecar file exists, return a dictionary mapping channel names + to channel types. Otherwise returns ``None``. + """ + fname_elp = fname.parent / (fname.stem + ".elp") + if not fname_elp.exists(): + logger.info(f"No {fname_elp} file present containing electrode information.") + return None + + logger.info(f"Reading electrode names and types from {fname_elp}") + ch_types = OrderedDict() + with open(fname_elp) as f: + lines = f.readlines() + if len(lines[0].split()) > 3: + # Channel types present + for line in lines: + ch_type, ch_name = line.split()[:2] + ch_types[ch_name] = ch_type.lower() + else: + # No channel types present + logger.info( + "No channel types present in .elp file. Marking all channels as EEG." + ) + for line in lines: + ch_name = line.split()[:1] + ch_types[ch_name] = "eeg" + return ch_types diff --git a/mne/io/boxy/__init__.py b/mne/io/boxy/__init__.py new file mode 100644 index 0000000..ac47ccc --- /dev/null +++ b/mne/io/boxy/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .boxy import read_raw_boxy diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py new file mode 100644 index 0000000..9a13173 --- /dev/null +++ b/mne/io/boxy/boxy.py @@ -0,0 +1,283 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re as re + +import numpy as np + +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import _check_fname, fill_doc, logger, verbose +from ..base import BaseRaw + + +@fill_doc +def read_raw_boxy(fname, preload=False, verbose=None) -> "RawBOXY": + """Reader for an optical imaging recording. + + This function has been tested using the ISS Imagent I and II systems + and versions 0.40/0.84 of the BOXY recording software. + + Parameters + ---------- + fname : path-like + Path to the BOXY data file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawBOXY + A Raw object containing BOXY data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawBOXY. + """ + return RawBOXY(fname, preload, verbose) + + +@fill_doc +class RawBOXY(BaseRaw): + """Raw object from a BOXY optical imaging file. + + Parameters + ---------- + fname : path-like + Path to the BOXY data file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + logger.info(f"Loading {fname}") + + # Read header file and grab some info. + start_line = np.inf + col_names = mrk_col = filetype = mrk_data = end_line = None + raw_extras = dict() + raw_extras["offsets"] = list() # keep track of our offsets + sfreq = None + fname = str(_check_fname(fname, "read", True, "fname")) + with open(fname) as fid: + line_num = 0 + i_line = fid.readline() + while i_line: + # most of our lines will be data lines, so check that first + if line_num >= start_line: + assert col_names is not None + assert filetype is not None + if "#DATA ENDS" in i_line: + # Data ends just before this. + end_line = line_num + break + if mrk_col is not None: + if filetype == "non-parsed": + # Non-parsed files have different lines lengths. + crnt_line = i_line.rsplit(" ")[0] + temp_data = re.findall(r"[-+]?\d*\.?\d+", crnt_line) + if len(temp_data) == len(col_names): + mrk_data.append( + float( + re.findall(r"[-+]?\d*\.?\d+", crnt_line)[ + mrk_col + ] + ) + ) + else: + crnt_line = i_line.rsplit(" ")[0] + mrk_data.append( + float(re.findall(r"[-+]?\d*\.?\d+", crnt_line)[mrk_col]) + ) + raw_extras["offsets"].append(fid.tell()) + # now proceed with more standard header parsing + elif "BOXY.EXE:" in i_line: + boxy_ver = re.findall(r"\d*\.\d+", i_line.rsplit(" ")[-1])[0] + # Check that the BOXY version is supported + if boxy_ver not in ["0.40", "0.84"]: + raise RuntimeError( + f"MNE has not been tested with BOXY version ({boxy_ver})" + ) + elif "Detector Channels" in i_line: + raw_extras["detect_num"] = int(i_line.rsplit(" ")[0]) + elif "External MUX Channels" in i_line: + raw_extras["source_num"] = int(i_line.rsplit(" ")[0]) + elif "Update Rate (Hz)" in i_line or "Updata Rate (Hz)" in i_line: + # Version 0.40 of the BOXY recording software + # (and possibly other versions lower than 0.84) contains a + # typo in the raw data file where 'Update Rate' is spelled + # "Updata Rate. This will account for this typo. + sfreq = float(i_line.rsplit(" ")[0]) + elif "#DATA BEGINS" in i_line: + # Data should start a couple lines later. + start_line = line_num + 3 + elif line_num == start_line - 2: + # Grab names for each column of data. + raw_extras["col_names"] = col_names = re.findall( + r"\w+\-\w+|\w+\-\d+|\w+", i_line.rsplit(" ")[0] + ) + if "exmux" in col_names: + # Change filetype based on data organisation. + filetype = "non-parsed" + else: + filetype = "parsed" + if "digaux" in col_names: + mrk_col = col_names.index("digaux") + mrk_data = list() + # raw_extras['offsets'].append(fid.tell()) + elif line_num == start_line - 1: + raw_extras["offsets"].append(fid.tell()) + line_num += 1 + i_line = fid.readline() + assert sfreq is not None + raw_extras.update(filetype=filetype, start_line=start_line, end_line=end_line) + + # Label each channel in our data, for each data type (DC, AC, Ph). + # Data is organised by channels x timepoint, where the first + # 'source_num' rows correspond to the first detector, the next + # 'source_num' rows correspond to the second detector, and so on. + ch_names = list() + ch_types = list() + cals = list() + for det_num in range(raw_extras["detect_num"]): + for src_num in range(raw_extras["source_num"]): + for i_type, ch_type in [ + ("DC", "fnirs_cw_amplitude"), + ("AC", "fnirs_fd_ac_amplitude"), + ("Ph", "fnirs_fd_phase"), + ]: + ch_names.append(f"S{src_num + 1}_D{det_num + 1} {i_type}") + ch_types.append(ch_type) + cals.append(np.pi / 180.0 if i_type == "Ph" else 1.0) + + # Create info structure. + info = create_info(ch_names, sfreq, ch_types) + for ch, cal in zip(info["chs"], cals): + ch["cal"] = cal + + # Determine how long our data is. + delta = end_line - start_line + assert len(raw_extras["offsets"]) == delta + 1 + if filetype == "non-parsed": + delta //= raw_extras["source_num"] + super().__init__( + info, + preload, + filenames=[fname], + first_samps=[0], + last_samps=[delta - 1], + raw_extras=[raw_extras], + verbose=verbose, + ) + + # Now let's grab our markers, if they are present. + if mrk_data is not None: + mrk_data = np.array(mrk_data, float) + # We only want the first instance of each trigger. + prev_mrk = 0 + mrk_idx = list() + duration = list() + tmp_dur = 0 + for i_num, i_mrk in enumerate(mrk_data): + if i_mrk != 0 and i_mrk != prev_mrk: + mrk_idx.append(i_num) + if i_mrk != 0 and i_mrk == prev_mrk: + tmp_dur += 1 + if i_mrk == 0 and i_mrk != prev_mrk: + duration.append((tmp_dur + 1) / sfreq) + tmp_dur = 0 + prev_mrk = i_mrk + onset = np.array(mrk_idx) / sfreq + description = mrk_data[mrk_idx] + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + Boxy file organises data in two ways, parsed or un-parsed. + Regardless of type, output has (n_montages x n_sources x n_detectors + + n_marker_channels) rows, and (n_timepoints x n_blocks) columns. + """ + source_num = self._raw_extras[fi]["source_num"] + detect_num = self._raw_extras[fi]["detect_num"] + start_line = self._raw_extras[fi]["start_line"] + end_line = self._raw_extras[fi]["end_line"] + filetype = self._raw_extras[fi]["filetype"] + col_names = self._raw_extras[fi]["col_names"] + offsets = self._raw_extras[fi]["offsets"] + boxy_file = self.filenames[fi] + + # Non-parsed multiplexes sources, so we need source_num times as many + # lines in that case + if filetype == "parsed": + start_read = start_line + start + stop_read = start_read + (stop - start) + else: + assert filetype == "non-parsed" + start_read = start_line + start * source_num + stop_read = start_read + (stop - start) * source_num + assert start_read >= start_line + assert stop_read <= end_line + + # Possible detector names. + detectors = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[:detect_num] + + # Loop through our data. + one = np.zeros((len(col_names), stop_read - start_read)) + with open(boxy_file) as fid: + # Just a more efficient version of this: + # ii = 0 + # for line_num, i_line in enumerate(fid): + # if line_num >= start_read: + # if line_num >= stop_read: + # break + # # Grab actual data. + # i_data = i_line.strip().split() + # one[:len(i_data), ii] = i_data + # ii += 1 + fid.seek(offsets[start_read - start_line], 0) + for oo in one.T: + i_data = fid.readline().strip().split() + oo[: len(i_data)] = i_data + + # in theory we could index in the loop above, but it's painfully slow, + # so let's just take a hopefully minor memory hit + if filetype == "non-parsed": + ch_idxs = [ + col_names.index(f"{det}-{i_type}") + for det in detectors + for i_type in ["DC", "AC", "Ph"] + ] + one = ( + one[ch_idxs] + .reshape( # each "time point" multiplexes srcs + len(detectors), 3, -1, source_num + ) + .transpose( # reorganize into (det, source, DC/AC/Ph, t) order + 0, 3, 1, 2 + ) + .reshape( # reshape the way we store it (det x source x DAP, t) + len(detectors) * source_num * 3, -1 + ) + ) + else: + assert filetype == "parsed" + ch_idxs = [ + col_names.index(f"{det}-{i_type}{si + 1}") + for det in detectors + for si in range(source_num) + for i_type in ["DC", "AC", "Ph"] + ] + one = one[ch_idxs] + + # Place our data into the data object in place. + _mult_cal_one(data, one, idx, cals, mult) diff --git a/mne/io/brainvision/__init__.py b/mne/io/brainvision/__init__.py new file mode 100644 index 0000000..860e157 --- /dev/null +++ b/mne/io/brainvision/__init__.py @@ -0,0 +1,7 @@ +"""BrainVision module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .brainvision import read_raw_brainvision diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py new file mode 100644 index 0000000..07be6ab --- /dev/null +++ b/mne/io/brainvision/brainvision.py @@ -0,0 +1,1144 @@ +"""Conversion tool from BrainVision EEG to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import configparser +import os +import os.path as op +import re +from datetime import datetime, timezone +from io import StringIO + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _mult_cal_one, _read_segments_file +from ...annotations import Annotations, read_annotations +from ...channels import make_dig_montage +from ...defaults import HEAD_SIZE_DEFAULT +from ...transforms import _sph_to_cart +from ...utils import _DefaultEventParser, fill_doc, logger, verbose, warn +from ..base import BaseRaw + + +@fill_doc +class RawBrainVision(BaseRaw): + """Raw object from Brain Vision EEG file. + + Parameters + ---------- + vhdr_fname : path-like + Path to the EEG header file. + eog : list or tuple + Names of channels or list of indices that should be designated + EOG channels. Values should correspond to the header file. + Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. + misc : list or tuple of str | ``'auto'`` + Names of channels or list of indices that should be designated + MISC channels. Values should correspond to the electrodes + in the header file. If ``'auto'``, units in header file are used for + inferring misc channels. Default is ``'auto'``. + scale : float + The scaling factor for EEG data. Unless specified otherwise by + header file, units are in microvolts. Default scale factor is 1. + ignore_marker_types : bool + If ``True``, ignore marker types and only use marker descriptions. Default is + ``False``. + + .. versionadded:: 1.8 + %(preload)s + %(verbose)s + + Attributes + ---------- + impedances : dict + A dictionary of all electrodes and their impedances. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + If the BrainVision header file contains impedance measurements, these may be + accessed using ``raw.impedances`` after reading using this function. However, + this attribute will NOT be available after a save and re-load of the data. + That is, it is only available when reading data directly from the BrainVision + header file. + + BrainVision markers consist of a type and a description (in addition to other fields + like onset and duration). In contrast, annotations in MNE only have a description. + Therefore, a BrainVision marker of type "Stimulus" and description "S 1" will be + converted to an annotation "Stimulus/S 1" by default. If you want to ignore the + type and instead only use the description, set ``ignore_marker_types=True``, which + will convert the same marker to an annotation "S 1". + """ + + _extra_attributes = ("impedances",) + + @verbose + def __init__( + self, + vhdr_fname, + eog=("HEOGL", "HEOGR", "VEOGb"), + misc="auto", + scale=1.0, + ignore_marker_types=False, + preload=False, + verbose=None, + ): # noqa: D107 + # Channel info and events + logger.info(f"Extracting parameters from {vhdr_fname}...") + hdr_fname = op.abspath(vhdr_fname) + ext = op.splitext(hdr_fname)[-1] + ahdr_format = True if ext == ".ahdr" else False + ( + info, + data_fname, + fmt, + order, + n_samples, + mrk_fname, + montage, + orig_units, + ) = _get_hdr_info(hdr_fname, eog, misc, scale) + + with open(data_fname, "rb") as f: + if isinstance(fmt, dict): # ASCII, this will be slow :( + if order == "F": # multiplexed, channels in columns + n_skip = 0 + for ii in range(int(fmt["skiplines"])): + n_skip += len(f.readline()) + offsets = np.cumsum([n_skip] + [len(line) for line in f]) + n_samples = len(offsets) - 1 + elif order == "C": # vectorized, channels, in rows + raise NotImplementedError() + else: + n_data_ch = int(info["nchan"]) + f.seek(0, os.SEEK_END) + n_samples = f.tell() + dtype_bytes = _fmt_byte_dict[fmt] + offsets = None + n_samples = n_samples // (dtype_bytes * n_data_ch) + + orig_format = "single" if isinstance(fmt, dict) else fmt + raw_extras = dict(offsets=offsets, fmt=fmt, order=order, n_samples=n_samples) + super().__init__( + info, + last_samps=[n_samples - 1], + filenames=[data_fname], + orig_format=orig_format, + preload=preload, + verbose=verbose, + raw_extras=[raw_extras], + orig_units=orig_units, + ) + + self.set_montage(montage) + + settings, _, _, _ = _aux_hdr_info(hdr_fname) + split_settings = settings.splitlines() + self.impedances = _parse_impedance(split_settings, self.info["meas_date"]) + + # Get annotations from marker file + annots = read_annotations( + mrk_fname, info["sfreq"], ignore_marker_types=ignore_marker_types + ) + self.set_annotations(annots) + + # Drop the fake ahdr channel if needed + if ahdr_format: + self.drop_channels(_AHDR_CHANNEL_NAME) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + # read data + n_data_ch = self._raw_extras[fi]["orig_nchan"] + fmt = self._raw_extras[fi]["fmt"] + if self._raw_extras[fi]["order"] == "C": + _read_segments_c(self, data, idx, fi, start, stop, cals, mult) + elif isinstance(fmt, str): + dtype = _fmt_dtype_dict[fmt] + _read_segments_file( + self, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype=dtype, + n_channels=n_data_ch, + ) + else: + offsets = self._raw_extras[fi]["offsets"] + with open(self.filenames[fi], "rb") as fid: + fid.seek(offsets[start]) + block = np.empty((n_data_ch, stop - start)) + for ii in range(stop - start): + line = fid.readline().decode("ASCII") + line = line.strip() + + # Not sure why we special-handle the "," character here, + # but let's just keep this for historical and backward- + # compat reasons + if ( + isinstance(fmt, dict) + and "decimalsymbol" in fmt + and fmt["decimalsymbol"] != "." + ): + line = line.replace(",", ".") + + if " " in line: + line_data = line.split() + elif "," in line: + # likely exported from BrainVision Analyzer? + line_data = line.split(",") + else: + raise RuntimeError( + "Unknown BrainVision data format encountered. " + "Please contact the MNE-Python developers." + ) + + block[:n_data_ch, ii] = [float(part) for part in line_data] + _mult_cal_one(data, block, idx, cals, mult) + + +def _read_segments_c(raw, data, idx, fi, start, stop, cals, mult): + """Read chunk of vectorized raw data.""" + n_samples = raw._raw_extras[fi]["n_samples"] + fmt = raw._raw_extras[fi]["fmt"] + dtype = _fmt_dtype_dict[fmt] + n_bytes = _fmt_byte_dict[fmt] + n_channels = raw._raw_extras[fi]["orig_nchan"] + block = np.zeros((n_channels, stop - start)) + with open(raw.filenames[fi], "rb", buffering=0) as fid: + ids = np.arange(idx.start, idx.stop) if isinstance(idx, slice) else idx + for ch_id in ids: + fid.seek(start * n_bytes + ch_id * n_bytes * n_samples) + block[ch_id] = np.fromfile(fid, dtype, stop - start) + _mult_cal_one(data, block, idx, cals, mult) + + +def _read_mrk(fname, ignore_marker_types=False): + """Read annotations from a vmrk/amrk file. + + Parameters + ---------- + fname : str + vmrk/amrk file to be read. + ignore_marker_types : bool + If True, ignore marker types and only use marker descriptions. Default is False. + + Returns + ------- + onset : array, shape (n_annots,) + The onsets in seconds. + duration : array, shape (n_annots,) + The onsets in seconds. + description : array, shape (n_annots,) + The description of each annotation. + date_str : str + The recording time as a string. Defaults to empty string if no + recording time is found. + """ + # read marker file + with open(fname, "rb") as fid: + txt = fid.read() + + # we don't actually need to know the coding for the header line. + # the characters in it all belong to ASCII and are thus the + # same in Latin-1 and UTF-8 + header = txt.decode("ascii", "ignore").split("\n")[0].strip() + _check_bv_version(header, "marker") + + # although the markers themselves are guaranteed to be ASCII (they + # consist of numbers and a few reserved words), we should still + # decode the file properly here because other (currently unused) + # blocks, such as that the filename are specifying are not + # guaranteed to be ASCII. + + try: + # if there is an explicit codepage set, use it + # we pretend like it's ascii when searching for the codepage + cp_setting = re.search( + "Codepage=(.+)", txt.decode("ascii", "ignore"), re.IGNORECASE & re.MULTILINE + ) + codepage = "utf-8" + if cp_setting: + codepage = cp_setting.group(1).strip() + # BrainAmp Recorder also uses ANSI codepage + # an ANSI codepage raises a LookupError exception + # python recognize ANSI decoding as cp1252 + if codepage == "ANSI": + codepage = "cp1252" + txt = txt.decode(codepage) + except UnicodeDecodeError: + # if UTF-8 (new standard) or explicit codepage setting fails, + # fallback to Latin-1, which is Windows default and implicit + # standard in older recordings + txt = txt.decode("latin-1") + + # extract Marker Infos block + m = re.search(r"\[Marker Infos\]", txt, re.IGNORECASE) + if not m: + return np.array(list()), np.array(list()), np.array(list()), "" + + mk_txt = txt[m.end() :] + m = re.search(r"^\[.*\]$", mk_txt) + if m: + mk_txt = mk_txt[: m.start()] + + # extract event information + items = re.findall(r"^Mk\d+=(.*)", mk_txt, re.MULTILINE) + onset, duration, description = list(), list(), list() + date_str = "" + for info in items: + info_data = info.split(",") + mtype, mdesc, this_onset, this_duration = info_data[:4] + # commas in mtype and mdesc are handled as "\1". convert back to comma + mtype = mtype.replace(r"\1", ",") + mdesc = mdesc.replace(r"\1", ",") + if date_str == "" and len(info_data) == 5 and mtype == "New Segment": + # to handle the origin of time and handle the presence of multiple + # New Segment annotations. We only keep the first one that is + # different from an empty string for date_str. + date_str = info_data[-1] + + this_duration = int(this_duration) if this_duration.isdigit() else 0 + duration.append(this_duration) + onset.append(int(this_onset) - 1) # BV is 1-indexed, not 0-indexed + if not ignore_marker_types: + description.append(mtype + "/" + mdesc) + else: + description.append(mdesc) + + return np.array(onset), np.array(duration), np.array(description), date_str + + +def _read_annotations_brainvision(fname, sfreq="auto", ignore_marker_types=False): + """Create Annotations from BrainVision vmrk/amrk. + + This function reads a .vmrk or .amrk file and creates an :class:`mne.Annotations` + object. + + Parameters + ---------- + fname : str | object + The path to the .vmrk/.amrk file. + sfreq : float | 'auto' + The sampling frequency in the file. This is necessary because Annotations are + expressed in seconds and vmrk/amrk files are in samples. If set to 'auto' then + the sfreq is taken from the .vhdr/.ahdr file with the same name (without file + extension). So data.vmrk/amrk looks for sfreq in data.vhdr or, if it does not + exist, in data.ahdr. + ignore_marker_types : bool + If True, ignore marker types and only use marker descriptions. Default is False. + + Returns + ------- + annotations : instance of Annotations + The annotations present in the file. + """ + onset, duration, description, date_str = _read_mrk( + fname, ignore_marker_types=ignore_marker_types + ) + orig_time = _str_to_meas_date(date_str) + + if sfreq == "auto": + hdr_fname = op.splitext(fname)[0] + ".vhdr" + # if vhdr file does not exist assume that the format is ahdr + if not op.exists(hdr_fname): + hdr_fname = op.splitext(fname)[0] + ".ahdr" + logger.info(f"Finding 'sfreq' from header file: {hdr_fname}") + _, _, _, info = _aux_hdr_info(hdr_fname) + sfreq = info["sfreq"] + + onset = np.array(onset, dtype=float) / sfreq + duration = np.array(duration, dtype=float) / sfreq + annotations = Annotations( + onset=onset, duration=duration, description=description, orig_time=orig_time + ) + return annotations + + +def _check_bv_version(header, kind): + """Check the header version.""" + _data_err = ( + "MNE-Python currently only supports %s versions 1.0 and 2.0, got unparsable " + "%r. Contact MNE-Python developers for support." + ) + # optional space, optional Core or V-Amp, optional Exchange, + # Version/Header, optional comma, 1/2 + _data_re = r"Brain ?Vision( Core| V-Amp)? Data( Exchange)? %s File,? Version %s\.0" + + assert kind in ("header", "marker") + + for version in range(1, 3): + this_re = _data_re % (kind.capitalize(), version) + if re.search(this_re, header) is not None: + return version + else: + if header == "": + warn(f"Missing header in {kind} file.") + else: + warn(_data_err % (kind, header)) + + +_orientation_dict = dict(MULTIPLEXED="F", VECTORIZED="C") +_fmt_dict = dict(INT_16="short", INT_32="int", IEEE_FLOAT_32="single") +_fmt_byte_dict = dict(short=2, int=4, single=4) +_fmt_dtype_dict = dict(short=" 0: + misc += to_misc + warn( + f"No coordinate information found for channels {to_misc}. Setting " + "channel types to misc. To avoid this warning, set channel types " + "explicitly." + ) + + if np.isnan(cals).any(): + raise RuntimeError("Missing channel units") + + # Attempts to extract filtering info from header. If not found, both are + # set to zero. + settings = settings.splitlines() + idx = None + + if "Channels" in settings: + idx = settings.index("Channels") + settings = settings[idx + 1 :] + hp_col, lp_col = 4, 5 + for idx, setting in enumerate(settings): + if re.match(r"#\s+Name", setting): + break + else: + idx = None + + # If software filters are active, then they override the hardware setup + # But we still want to be able to double check the channel names + # for alignment purposes, we keep track of the hardware setting idx + idx_amp = idx + filter_list_has_ch_name = True + + if "S o f t w a r e F i l t e r s" in settings: + idx = settings.index("S o f t w a r e F i l t e r s") + for idx, setting in enumerate(settings[idx + 1 :], idx + 1): + if re.match(r"#\s+Low Cutoff", setting): + hp_col, lp_col = 1, 2 + filter_list_has_ch_name = False + warn( + "Online software filter detected. Using software " + "filter settings and ignoring hardware values" + ) + break + else: + idx = idx_amp + + if idx: + lowpass = [] + highpass = [] + + # for newer BV files, the unit is specified for every channel + # separated by a single space, while for older files, the unit is + # specified in the column headers + divider = r"\s+" + if "Resolution / Unit" in settings[idx]: + shift = 1 # shift for unit + else: + shift = 0 + + # Extract filter units and convert from seconds to Hz if necessary. + # this cannot be done as post-processing as the inverse t-f + # relationship means that the min/max comparisons don't make sense + # unless we know the units. + # + # For reasoning about the s to Hz conversion, see this reference: + # `Ebersole, J. S., & Pedley, T. A. (Eds.). (2003). + # Current practice of clinical electroencephalography. + # Lippincott Williams & Wilkins.`, page 40-41 + header = re.split(r"\s\s+", settings[idx]) + hp_s = "[s]" in header[hp_col] + lp_s = "[s]" in header[lp_col] + + for i, ch in enumerate(ch_names, 1): + if ahdr_format and i == len(ch_names) and ch == _AHDR_CHANNEL_NAME: + break + # double check alignment with channel by using the hw settings + if idx == idx_amp: + line_amp = settings[idx + i] + else: + line_amp = settings[idx_amp + i] + assert line_amp.find(ch) > -1 + + # Correct shift for channel names with spaces + # Header already gives 1 therefore has to be subtracted + if filter_list_has_ch_name: + ch_name_parts = re.split(divider, ch) + real_shift = shift + len(ch_name_parts) - 1 + else: + real_shift = shift + + line = re.split(divider, settings[idx + i]) + highpass.append(line[hp_col + real_shift]) + lowpass.append(line[lp_col + real_shift]) + + if len(highpass) == 0: + pass + elif len(set(highpass)) == 1: + if highpass[0] in ("NaN", "Off"): + pass # Placeholder for future use. Highpass set in _empty_info + elif highpass[0] == "DC": + info["highpass"] = 0.0 + else: + info["highpass"] = float(highpass[0]) + if hp_s: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info["highpass"] = 1.0 / (2 * np.pi * info["highpass"]) + + else: + heterogeneous_hp_filter = True + if hp_s: + # We convert channels with disabled filters to having + # highpass relaxed / no filters + highpass = [ + float(filt) if filt not in ("NaN", "Off", "DC") else np.inf + for filt in highpass + ] + info["highpass"] = np.max(np.array(highpass, dtype=np.float64)) + # Conveniently enough 1 / np.inf = 0.0, so this works for + # DC / no highpass filter + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info["highpass"] = 1.0 / (2 * np.pi * info["highpass"]) + + # not exactly the cleanest use of FP, but this makes us + # more conservative in *not* warning. + if info["highpass"] == 0.0 and len(set(highpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_hp_filter = False + else: + highpass = [ + float(filt) if filt not in ("NaN", "Off", "DC") else 0.0 + for filt in highpass + ] + info["highpass"] = np.min(np.array(highpass, dtype=np.float64)) + if info["highpass"] == 0.0 and len(set(highpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_hp_filter = False + + if heterogeneous_hp_filter: + warn( + "Channels contain different highpass filters. " + f"Lowest (weakest) filter setting ({info['highpass']:0.2f} Hz) " + "will be stored." + ) + + if len(lowpass) == 0: + pass + elif len(set(lowpass)) == 1: + if lowpass[0] in ("NaN", "Off", "0"): + pass # Placeholder for future use. Lowpass set in _empty_info + else: + info["lowpass"] = float(lowpass[0]) + if lp_s: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info["lowpass"] = 1.0 / (2 * np.pi * info["lowpass"]) + + else: + heterogeneous_lp_filter = True + if lp_s: + # We convert channels with disabled filters to having + # infinitely relaxed / no filters + lowpass = [ + float(filt) if filt not in ("NaN", "Off", "0") else 0.0 + for filt in lowpass + ] + info["lowpass"] = np.min(np.array(lowpass, dtype=np.float64)) + try: + # filter time constant t [secs] to Hz conversion: 1/2*pi*t + info["lowpass"] = 1.0 / (2 * np.pi * info["lowpass"]) + + except ZeroDivisionError: + if len(set(lowpass)) == 1: + # No lowpass actually set for the weakest setting + # so we set lowpass to the Nyquist frequency + info["lowpass"] = info["sfreq"] / 2.0 + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_lp_filter = False + else: + # no lowpass filter is the weakest filter, + # but it wasn't the only filter + pass + else: + # We convert channels with disabled filters to having + # infinitely relaxed / no filters + lowpass = [ + float(filt) if filt not in ("NaN", "Off", "0") else np.inf + for filt in lowpass + ] + info["lowpass"] = np.max(np.array(lowpass, dtype=np.float64)) + + if np.isinf(info["lowpass"]): + # No lowpass actually set for the weakest setting + # so we set lowpass to the Nyquist frequency + info["lowpass"] = info["sfreq"] / 2.0 + if len(set(lowpass)) == 1: + # not actually heterogeneous in effect + # ... just heterogeneously disabled + heterogeneous_lp_filter = False + + if heterogeneous_lp_filter: + # this isn't clean FP, but then again, we only want to provide + # the Nyquist hint when the lowpass filter was actually + # calculated from dividing the sampling frequency by 2, so the + # exact/direct comparison (instead of tolerance) makes sense + if info["lowpass"] == info["sfreq"] / 2.0: + nyquist = ", Nyquist limit" + else: + nyquist = "" + warn( + "Channels contain different lowpass filters. " + f"Highest (weakest) filter setting ({info['lowpass']:0.2f} " + f"Hz{nyquist}) will be stored." + ) + + # Creates a list of dicts of eeg channels for raw.info + logger.info("Setting channel info structure...") + info["chs"] = [] + for idx, ch_name in enumerate(ch_names): + if ch_name in eog or idx in eog or idx - nchan in eog: + kind = FIFF.FIFFV_EOG_CH + coil_type = FIFF.FIFFV_COIL_NONE + unit = FIFF.FIFF_UNIT_V + elif ch_name in misc or idx in misc or idx - nchan in misc: + kind = FIFF.FIFFV_MISC_CH + coil_type = FIFF.FIFFV_COIL_NONE + if ch_name in misc_chs: + unit = misc_chs[ch_name] + else: + unit = FIFF.FIFF_UNIT_NONE + elif ch_name == "STI 014": + kind = FIFF.FIFFV_STIM_CH + coil_type = FIFF.FIFFV_COIL_NONE + unit = FIFF.FIFF_UNIT_NONE + else: + kind = FIFF.FIFFV_EEG_CH + coil_type = FIFF.FIFFV_COIL_EEG + unit = FIFF.FIFF_UNIT_V + info["chs"].append( + dict( + ch_name=ch_name, + coil_type=coil_type, + kind=kind, + logno=idx + 1, + scanno=idx + 1, + cal=cals[idx], + range=ranges[idx], + loc=np.full(12, np.nan), + unit=unit, + unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) + + info._unlocked = False + info._update_redundant() + return (info, data_fname, fmt, order, n_samples, mrk_fname, montage, orig_units) + + +@fill_doc +def read_raw_brainvision( + vhdr_fname, + eog=("HEOGL", "HEOGR", "VEOGb"), + misc="auto", + scale=1.0, + ignore_marker_types=False, + preload=False, + verbose=None, +) -> RawBrainVision: + """Reader for Brain Vision EEG file. + + Parameters + ---------- + vhdr_fname : path-like + Path to the EEG header file. + eog : list or tuple of str + Names of channels or list of indices that should be designated + EOG channels. Values should correspond to the header file + Default is ``('HEOGL', 'HEOGR', 'VEOGb')``. + misc : list or tuple of str | ``'auto'`` + Names of channels or list of indices that should be designated + MISC channels. Values should correspond to the electrodes in the + header file. If ``'auto'``, units in header file are used for inferring + misc channels. Default is ``'auto'``. + scale : float + The scaling factor for EEG data. Unless specified otherwise by + header file, units are in microvolts. Default scale factor is 1. + ignore_marker_types : bool + If ``True``, ignore marker types and only use marker descriptions. Default is + ``False``. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawBrainVision + A Raw object containing BrainVision data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawBrainVision. + + Notes + ----- + If the BrainVision header file contains impedance measurements, these may be + accessed using ``raw.impedances`` after reading using this function. However, + this attribute will NOT be available after a save and re-load of the data. + That is, it is only available when reading data directly from the BrainVision + header file. + + BrainVision markers consist of a type and a description (in addition to other fields + like onset and duration). In contrast, annotations in MNE only have a description. + Therefore, a BrainVision marker of type "Stimulus" and description "S 1" will be + converted to an annotation "Stimulus/S 1" by default. If you want to ignore the + type and instead only use the description, set ``ignore_marker_types=True``, which + will convert the same marker to an annotation "S 1". + """ + return RawBrainVision( + vhdr_fname=vhdr_fname, + eog=eog, + misc=misc, + scale=scale, + ignore_marker_types=ignore_marker_types, + preload=preload, + verbose=verbose, + ) + + +_BV_EVENT_IO_OFFSETS = { + "Event/": 0, + "Stimulus/S": 0, + "Response/R": 1000, + "Optic/O": 2000, +} +_OTHER_ACCEPTED_MARKERS = {"New Segment/": 99999, "SyncStatus/Sync On": 99998} +_OTHER_OFFSET = 10001 # where to start "unknown" event_ids +_AHDR_CHANNEL_NAME = "AHDR_CHANNEL" + + +class _BVEventParser(_DefaultEventParser): + """Parse standard brainvision events, accounting for non-standard ones.""" + + def __call__(self, description): + """Parse BrainVision event codes (like `Stimulus/S 11`) to ints.""" + offsets = _BV_EVENT_IO_OFFSETS + + maybe_digit = description[-3:].strip() + kind = description[:-3] + if maybe_digit.isdigit() and kind in offsets: + code = int(maybe_digit) + offsets[kind] + elif description in _OTHER_ACCEPTED_MARKERS: + code = _OTHER_ACCEPTED_MARKERS[description] + else: + code = super().__call__(description, offset=_OTHER_OFFSET) + return code + + +def _check_bv_annot(descriptions): + markers_basename = set([dd.rstrip("0123456789 ") for dd in descriptions]) + bv_markers = set(_BV_EVENT_IO_OFFSETS.keys()).union( + set(_OTHER_ACCEPTED_MARKERS.keys()) + ) + return len(markers_basename - bv_markers) == 0 + + +def _parse_impedance(settings, recording_date=None): + """Parse impedances from the header file. + + Parameters + ---------- + settings : list + The header settings lines from the VHDR/AHDR file. + recording_date : datetime.datetime | None + The date of the recording as extracted from the VMRK/AMRK file. + + Returns + ------- + impedances : dict + A dictionary of all electrodes and their impedances. + """ + ranges = _parse_impedance_ranges(settings) + impedance_setting_lines = [ + i for i in settings if i.startswith("Impedance [") and i.endswith(" :") + ] + impedances = dict() + if len(impedance_setting_lines) > 0: + idx = settings.index(impedance_setting_lines[0]) + impedance_setting = impedance_setting_lines[0].split() + impedance_unit = impedance_setting[1].lstrip("[").rstrip("]") + impedance_time = None + + # If we have a recording date, we can update it with the time of + # impedance measurement + if recording_date is not None: + meas_time = [int(i) for i in impedance_setting[3].split(":")] + impedance_time = recording_date.replace( + hour=meas_time[0], + minute=meas_time[1], + second=meas_time[2], + microsecond=0, + ) + for setting in settings[idx + 1 :]: + # Parse channel impedances until we find a line that doesn't start + # with a channel name and optional +/- polarity for passive elecs + match = re.match(r"[ a-zA-Z0-9_+-]+:", setting) + if match: + channel_name = match.group().rstrip(":") + channel_imp_line = setting.split() + imp_as_number = re.findall(r"[-+]?\d*\.\d+|\d+", channel_imp_line[-1]) + channel_impedance = dict( + imp=float(imp_as_number[0]) if imp_as_number else np.nan, + imp_unit=impedance_unit, + ) + if impedance_time is not None: + channel_impedance.update({"imp_meas_time": impedance_time}) + + if channel_name == "Ref" and "Reference" in ranges: + channel_impedance.update(ranges["Reference"]) + elif channel_name == "Gnd" and "Ground" in ranges: + channel_impedance.update(ranges["Ground"]) + elif "Data" in ranges: + channel_impedance.update(ranges["Data"]) + impedances[channel_name] = channel_impedance + else: + break + return impedances + + +def _parse_impedance_ranges(settings): + """Parse the selected electrode impedance ranges from the header. + + Parameters + ---------- + settings : list + The header settings lines from the VHDR/AHDR file. + + Returns + ------- + electrode_imp_ranges : dict + A dictionary of impedance ranges for each type of electrode. + """ + impedance_ranges = [ + item for item in settings if "Selected Impedance Measurement Range" in item + ] + electrode_imp_ranges = dict() + if impedance_ranges: + if len(impedance_ranges) == 1: + img_range = impedance_ranges[0].split() + for electrode_type in ["Data", "Reference", "Ground"]: + electrode_imp_ranges[electrode_type] = { + "imp_lower_bound": float(img_range[-4]), + "imp_upper_bound": float(img_range[-2]), + "imp_range_unit": img_range[-1], + } + else: + for electrode_range in impedance_ranges: + electrode_range = electrode_range.split() + electrode_imp_ranges[electrode_range[0]] = { + "imp_lower_bound": float(electrode_range[6]), + "imp_upper_bound": float(electrode_range[8]), + "imp_range_unit": electrode_range[9], + } + return electrode_imp_ranges diff --git a/mne/io/bti/__init__.py b/mne/io/bti/__init__.py new file mode 100644 index 0000000..3a9d58f --- /dev/null +++ b/mne/io/bti/__init__.py @@ -0,0 +1,7 @@ +"""BTi module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .bti import read_raw_bti diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py new file mode 100644 index 0000000..a992d3c --- /dev/null +++ b/mne/io/bti/bti.py @@ -0,0 +1,1414 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import functools +import os.path as op +from io import BytesIO +from itertools import count + +import numpy as np + +from ..._fiff._digitization import _make_bti_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.tag import _coil_trans_to_loc, _loc_to_coil_trans +from ..._fiff.utils import _mult_cal_one, read_str +from ...transforms import Transform, combine_transforms, invert_transform +from ...utils import _stamp_to_dt, _validate_type, logger, path_like, verbose +from ..base import BaseRaw +from .constants import BTI +from .read import ( + read_char, + read_dev_header, + read_double, + read_double_matrix, + read_float, + read_float_matrix, + read_int16, + read_int16_matrix, + read_int32, + read_int64, + read_transform, + read_uint16, + read_uint32, +) + +BTI_WH2500_REF_MAG = ("MxA", "MyA", "MzA", "MxaA", "MyaA", "MzaA") +BTI_WH2500_REF_GRAD = ("GxxA", "GyyA", "GyxA", "GzaA", "GzyA") + +dtypes = zip(list(range(1, 5)), (">i2", ">i4", ">f4", ">f8")) +DTYPES = {i: np.dtype(t) for i, t in dtypes} + + +def _instantiate_default_info_chs(): + """Populate entries in info['chs'] with default values.""" + return dict( + loc=np.array([0, 0, 0, 1] * 3, dtype="f4"), + ch_name=None, + unit_mul=FIFF.FIFF_UNITM_NONE, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + coil_type=FIFF.FIFFV_COIL_NONE, + range=1.0, + unit=FIFF.FIFF_UNIT_V, + cal=1.0, + scanno=None, + kind=FIFF.FIFFV_MISC_CH, + logno=None, + ) + + +class _bytes_io_mock_context: + """Make a context for BytesIO.""" + + def __init__(self, target): + self.target = target + + def __enter__(self): # noqa: D105 + return self.target + + def __exit__(self, exception_type, value, tb): # noqa: D105 + pass + + +def _bti_open(fname, *args, **kwargs): + """Handle BytesIO.""" + if isinstance(fname, path_like): + return open(fname, *args, **kwargs) + elif isinstance(fname, BytesIO): + return _bytes_io_mock_context(fname) + else: + raise RuntimeError("Cannot mock this.") + + +def _get_bti_dev_t(adjust=0.0, translation=(0.0, 0.02, 0.11)): + """Get the general Magnes3600WH to Neuromag coordinate transform. + + Parameters + ---------- + adjust : float | None + Degrees to tilt x-axis for sensor frame misalignment. + If None, no adjustment will be applied. + translation : array-like + The translation to place the origin of coordinate system + to the center of the head. + + Returns + ------- + m_nm_t : ndarray + 4 x 4 rotation, translation, scaling matrix. + """ + flip_t = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + rad = np.deg2rad(adjust) + adjust_t = np.array( + [ + [1.0, 0.0, 0.0], + [0.0, np.cos(rad), -np.sin(rad)], + [0.0, np.sin(rad), np.cos(rad)], + ] + ) + m_nm_t = np.eye(4) + m_nm_t[:3, :3] = np.dot(flip_t, adjust_t) + m_nm_t[:3, 3] = translation + return m_nm_t + + +def _rename_channels(names, ecg_ch="E31", eog_ch=("E63", "E64")): + """Rename appropriately ordered list of channel names. + + Parameters + ---------- + names : list of str + Lists of 4-D channel names in ascending order + + Returns + ------- + new : list + List of names, channel names in Neuromag style + """ + new = list() + ref_mag, ref_grad, eog, eeg, ext = (count(1) for _ in range(5)) + for i, name in enumerate(names, 1): + if name.startswith("A"): + name = f"MEG {i:03d}" + elif name == "RESPONSE": + name = "STI 013" + elif name == "TRIGGER": + name = "STI 014" + elif any(name == k for k in eog_ch): + name = f"EOG {next(eog):03d}" + elif name == ecg_ch: + name = "ECG 001" + elif name.startswith("E"): + name = f"EEG {next(eeg):03d}" + elif name == "UACurrent": + name = "UTL 001" + elif name.startswith("M"): + name = f"RFM {next(ref_mag):03d}" + elif name.startswith("G"): + name = f"RFG {next(ref_grad):03d}" + elif name.startswith("X"): + name = f"EXT {next(ext):03d}" + + new += [name] + + return new + + +# read the points +def _read_head_shape(fname): + """Read the head shape.""" + with _bti_open(fname, "rb") as fid: + fid.seek(BTI.FILE_HS_N_DIGPOINTS) + _n_dig_points = read_int32(fid) + idx_points = read_double_matrix(fid, BTI.DATA_N_IDX_POINTS, 3) + dig_points = read_double_matrix(fid, _n_dig_points, 3) + + # reorder to lpa, rpa, nasion so = is direct. + nasion, lpa, rpa = (idx_points[_, :] for _ in [2, 0, 1]) + hpi = idx_points[3 : len(idx_points), :] + + return nasion, lpa, rpa, hpi, dig_points + + +def _check_nan_dev_head_t(dev_ctf_t): + """Make sure we deal with nans.""" + has_nan = np.isnan(dev_ctf_t["trans"]) + if np.any(has_nan): + logger.info( + "Missing values BTI dev->head transform. Replacing with identity matrix." + ) + dev_ctf_t["trans"] = np.identity(4) + + +def _convert_coil_trans(coil_trans, dev_ctf_t, bti_dev_t): + """Convert the coil trans.""" + t = combine_transforms(invert_transform(dev_ctf_t), bti_dev_t, "ctf_head", "meg") + t = np.dot(t["trans"], coil_trans) + return t + + +def _correct_offset(fid): + """Align fid pointer.""" + current = fid.tell() + if (current % BTI.FILE_CURPOS) != 0: + offset = current % BTI.FILE_CURPOS + fid.seek(BTI.FILE_CURPOS - (offset), 1) + + +def _read_config(fname): + """Read BTi system config file. + + Parameters + ---------- + fname : str + The absolute path to the config file + + Returns + ------- + cfg : dict + The config blocks found. + """ + with _bti_open(fname, "rb") as fid: + cfg = dict() + cfg["hdr"] = { + "version": read_int16(fid), + "site_name": read_str(fid, 32), + "dap_hostname": read_str(fid, 16), + "sys_type": read_int16(fid), + "sys_options": read_int32(fid), + "supply_freq": read_int16(fid), + "total_chans": read_int16(fid), + "system_fixed_gain": read_float(fid), + "volts_per_bit": read_float(fid), + "total_sensors": read_int16(fid), + "total_user_blocks": read_int16(fid), + "next_der_chan_no": read_int16(fid), + } + + fid.seek(2, 1) + + cfg["checksum"] = read_uint32(fid) + cfg["reserved"] = read_char(fid, 32) + cfg["transforms"] = [ + read_transform(fid) for t in range(cfg["hdr"]["total_sensors"]) + ] + + cfg["user_blocks"] = dict() + for block in range(cfg["hdr"]["total_user_blocks"]): + ub = dict() + + ub["hdr"] = { + "nbytes": read_uint32(fid), + "kind": read_str(fid, 20), + "checksum": read_int32(fid), + "username": read_str(fid, 32), + "timestamp": read_uint32(fid), + "user_space_size": read_uint32(fid), + "reserved": read_char(fid, 32), + } + + _correct_offset(fid) + start_bytes = fid.tell() + kind = ub["hdr"].pop("kind") + if not kind: # make sure reading goes right. Should never be empty + raise RuntimeError( + "Could not read user block. Probably you " + "acquired data using a BTi version " + "currently not supported. Please contact " + "the mne-python developers." + ) + dta, cfg["user_blocks"][kind] = dict(), ub + if kind in [v for k, v in BTI.items() if k[:5] == "UB_B_"]: + if kind == BTI.UB_B_MAG_INFO: + dta["version"] = read_int32(fid) + fid.seek(20, 1) + dta["headers"] = list() + for hdr in range(6): + d = { + "name": read_str(fid, 16), + "transform": read_transform(fid), + "units_per_bit": read_float(fid), + } + dta["headers"] += [d] + fid.seek(20, 1) + + elif kind == BTI.UB_B_COH_POINTS: + dta["n_points"] = read_int32(fid) + dta["status"] = read_int32(fid) + dta["points"] = [ + { + "pos": read_double_matrix(fid, 1, 3), + "direction": read_double_matrix(fid, 1, 3), + "error": read_double(fid), + } + for _ in range(16) + ] + + elif kind == BTI.UB_B_CCP_XFM_BLOCK: + dta["method"] = read_int32(fid) + # handle difference btw/ linux (0) and solaris (4) + size = 0 if ub["hdr"]["user_space_size"] == 132 else 4 + fid.seek(size, 1) + dta["transform"] = read_transform(fid) + + elif kind == BTI.UB_B_EEG_LOCS: + dta["electrodes"] = [] + while True: + d = { + "label": read_str(fid, 16), + "location": read_double_matrix(fid, 1, 3), + } + if not d["label"]: + break + dta["electrodes"] += [d] + + elif kind in [BTI.UB_B_WHC_CHAN_MAP_VER, BTI.UB_B_WHS_SUBSYS_VER]: + dta["version"] = read_int16(fid) + dta["struct_size"] = read_int16(fid) + dta["entries"] = read_int16(fid) + + fid.seek(8, 1) + + elif kind == BTI.UB_B_WHC_CHAN_MAP: + num_channels = None + for name, data in cfg["user_blocks"].items(): + if name == BTI.UB_B_WHC_CHAN_MAP_VER: + num_channels = data["entries"] + break + + if num_channels is None: + raise ValueError( + f"Cannot find block {BTI.UB_B_WHC_CHAN_MAP_VER} to " + "determine number of channels" + ) + + dta["channels"] = list() + for i in range(num_channels): + d = { + "subsys_type": read_int16(fid), + "subsys_num": read_int16(fid), + "card_num": read_int16(fid), + "chan_num": read_int16(fid), + "recdspnum": read_int16(fid), + } + dta["channels"] += [d] + fid.seek(8, 1) + + elif kind == BTI.UB_B_WHS_SUBSYS: + num_subsys = None + for name, data in cfg["user_blocks"].items(): + if name == BTI.UB_B_WHS_SUBSYS_VER: + num_subsys = data["entries"] + break + + if num_subsys is None: + raise ValueError( + f"Cannot find block {BTI.UB_B_WHS_SUBSYS_VER} to determine" + " number of subsystems" + ) + + dta["subsys"] = list() + for _ in range(num_subsys): + d = { + "subsys_type": read_int16(fid), + "subsys_num": read_int16(fid), + "cards_per_sys": read_int16(fid), + "channels_per_card": read_int16(fid), + "card_version": read_int16(fid), + } + + fid.seek(2, 1) + + d.update( + { + "offsetdacgain": read_float(fid), + "squid_type": read_int32(fid), + "timesliceoffset": read_int16(fid), + "padding": read_int16(fid), + "volts_per_bit": read_float(fid), + } + ) + + dta["subsys"] += [d] + + elif kind == BTI.UB_B_CH_LABELS: + dta["version"] = read_int32(fid) + dta["entries"] = read_int32(fid) + fid.seek(16, 1) + + dta["labels"] = list() + for label in range(dta["entries"]): + dta["labels"] += [read_str(fid, 16)] + + elif kind == BTI.UB_B_CALIBRATION: + dta["sensor_no"] = read_int16(fid) + fid.seek(2, 1) + dta["timestamp"] = read_int32(fid) + dta["logdir"] = read_str(fid, 256) + + elif kind == BTI.UB_B_SYS_CONFIG_TIME: + # handle difference btw/ linux (256) and solaris (512) + size = 256 if ub["hdr"]["user_space_size"] == 260 else 512 + dta["sysconfig_name"] = read_str(fid, size) + dta["timestamp"] = read_int32(fid) + + elif kind == BTI.UB_B_DELTA_ENABLED: + dta["delta_enabled"] = read_int16(fid) + + elif kind in [BTI.UB_B_E_TABLE_USED, BTI.UB_B_E_TABLE]: + dta["hdr"] = { + "version": read_int32(fid), + "entry_size": read_int32(fid), + "n_entries": read_int32(fid), + "filtername": read_str(fid, 16), + "n_e_values": read_int32(fid), + "reserved": read_str(fid, 28), + } + + if dta["hdr"]["version"] == 2: + size = 16 + dta["ch_names"] = [ + read_str(fid, size) for ch in range(dta["hdr"]["n_entries"]) + ] + dta["e_ch_names"] = [ + read_str(fid, size) + for ch in range(dta["hdr"]["n_e_values"]) + ] + + rows = dta["hdr"]["n_entries"] + cols = dta["hdr"]["n_e_values"] + dta["etable"] = read_float_matrix(fid, rows, cols) + else: # handle MAGNES2500 naming scheme + dta["ch_names"] = ["WH2500"] * dta["hdr"]["n_e_values"] + dta["hdr"]["n_e_values"] = 6 + dta["e_ch_names"] = BTI_WH2500_REF_MAG + rows = dta["hdr"]["n_entries"] + cols = dta["hdr"]["n_e_values"] + dta["etable"] = read_float_matrix(fid, rows, cols) + + elif any( + [kind == BTI.UB_B_WEIGHTS_USED, kind[:4] == BTI.UB_B_WEIGHT_TABLE] + ): + dta["hdr"] = dict( + version=read_int32(fid), + n_bytes=read_uint32(fid), + n_entries=read_uint32(fid), + name=read_str(fid, 32), + ) + if dta["hdr"]["version"] == 2: + dta["hdr"].update( + description=read_str(fid, 80), + n_anlg=read_uint32(fid), + n_dsp=read_uint32(fid), + reserved=read_str(fid, 72), + ) + dta["ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_entries"]) + ] + dta["anlg_ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_anlg"]) + ] + + dta["dsp_ch_names"] = [ + read_str(fid, 16) for ch in range(dta["hdr"]["n_dsp"]) + ] + dta["dsp_wts"] = read_float_matrix( + fid, dta["hdr"]["n_entries"], dta["hdr"]["n_dsp"] + ) + dta["anlg_wts"] = read_int16_matrix( + fid, dta["hdr"]["n_entries"], dta["hdr"]["n_anlg"] + ) + else: # handle MAGNES2500 naming scheme + fid.seek( + start_bytes + + ub["hdr"]["user_space_size"] + - dta["hdr"]["n_bytes"] * dta["hdr"]["n_entries"], + 0, + ) + + dta["hdr"]["n_dsp"] = dta["hdr"]["n_bytes"] // 4 - 2 + assert dta["hdr"]["n_dsp"] == len(BTI_WH2500_REF_MAG) + len( + BTI_WH2500_REF_GRAD + ) + dta["ch_names"] = ["WH2500"] * dta["hdr"]["n_entries"] + dta["hdr"]["n_anlg"] = 3 + # These orders could be wrong, so don't set them + # for now + # dta['anlg_ch_names'] = BTI_WH2500_REF_MAG[:3] + # dta['dsp_ch_names'] = (BTI_WH2500_REF_GRAD + + # BTI_WH2500_REF_MAG) + dta["anlg_wts"] = np.zeros( + (dta["hdr"]["n_entries"], dta["hdr"]["n_anlg"]), dtype="i2" + ) + dta["dsp_wts"] = np.zeros( + (dta["hdr"]["n_entries"], dta["hdr"]["n_dsp"]), dtype="f4" + ) + for n in range(dta["hdr"]["n_entries"]): + dta["anlg_wts"][n] = read_int16_matrix( + fid, 1, dta["hdr"]["n_anlg"] + ) + read_int16(fid) + dta["dsp_wts"][n] = read_float_matrix( + fid, 1, dta["hdr"]["n_dsp"] + ) + + elif kind == BTI.UB_B_TRIG_MASK: + dta["version"] = read_int32(fid) + dta["entries"] = read_int32(fid) + fid.seek(16, 1) + + dta["masks"] = [] + for entry in range(dta["entries"]): + d = { + "name": read_str(fid, 20), + "nbits": read_uint16(fid), + "shift": read_uint16(fid), + "mask": read_uint32(fid), + } + dta["masks"] += [d] + fid.seek(8, 1) + + else: + dta["unknown"] = {"hdr": read_char(fid, ub["hdr"]["user_space_size"])} + + n_read = fid.tell() - start_bytes + if n_read != ub["hdr"]["user_space_size"]: + raise RuntimeError( + f"Internal MNE reading error, read size {n_read} " + f"!= {ub['hdr']['user_space_size']} expected size for kind {kind}." + ) + ub.update(dta) # finally update the userblock data + _correct_offset(fid) # after reading. + + cfg["chs"] = list() + + # prepare reading channels + for channel in range(cfg["hdr"]["total_chans"]): + ch = { + "name": read_str(fid, 16), + "chan_no": read_int16(fid), + "ch_type": read_uint16(fid), + "sensor_no": read_int16(fid), + "data": dict(), + } + + fid.seek(2, 1) + ch.update( + { + "gain": read_float(fid), + "units_per_bit": read_float(fid), + "yaxis_label": read_str(fid, 16), + "aar_val": read_double(fid), + "checksum": read_int32(fid), + "reserved": read_str(fid, 32), + } + ) + + cfg["chs"] += [ch] + _correct_offset(fid) # before and after + dta = dict() + if ch["ch_type"] in [BTI.CHTYPE_MEG, BTI.CHTYPE_REFERENCE]: + dev = { + "device_info": read_dev_header(fid), + "inductance": read_float(fid), + "padding": read_str(fid, 4), + "transform": _correct_trans(read_transform(fid), False), + "xform_flag": read_int16(fid), + "total_loops": read_int16(fid), + } + + fid.seek(4, 1) + dev["reserved"] = read_str(fid, 32) + dta.update({"dev": dev, "loops": []}) + for _ in range(dev["total_loops"]): + d = { + "position": read_double_matrix(fid, 1, 3), + "orientation": read_double_matrix(fid, 1, 3), + "radius": read_double(fid), + "wire_radius": read_double(fid), + "turns": read_int16(fid), + } + fid.seek(2, 1) + d["checksum"] = read_int32(fid) + d["reserved"] = read_str(fid, 32) + dta["loops"] += [d] + + elif ch["ch_type"] == BTI.CHTYPE_EEG: + dta = { + "device_info": read_dev_header(fid), + "impedance": read_float(fid), + "padding": read_str(fid, 4), + "transform": read_transform(fid), + "reserved": read_char(fid, 32), + } + + elif ch["ch_type"] == BTI.CHTYPE_EXTERNAL: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + "reserved": read_str(fid, 32), + } + + elif ch["ch_type"] == BTI.CHTYPE_TRIGGER: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + } + fid.seek(2, 1) + dta["reserved"] = read_str(fid, 32) + + elif ch["ch_type"] in [BTI.CHTYPE_UTILITY, BTI.CHTYPE_DERIVED]: + dta = { + "device_info": read_dev_header(fid), + "user_space_size": read_int32(fid), + "reserved": read_str(fid, 32), + } + + elif ch["ch_type"] == BTI.CHTYPE_SHORTED: + dta = { + "device_info": read_dev_header(fid), + "reserved": read_str(fid, 32), + } + + ch.update(dta) # add data collected + _correct_offset(fid) # after each reading + + return cfg + + +def _read_epoch(fid): + """Read BTi PDF epoch.""" + out = { + "pts_in_epoch": read_int32(fid), + "epoch_duration": read_float(fid), + "expected_iti": read_float(fid), + "actual_iti": read_float(fid), + "total_var_events": read_int32(fid), + "checksum": read_int32(fid), + "epoch_timestamp": read_int32(fid), + } + + fid.seek(28, 1) + + return out + + +def _read_channel(fid): + """Read BTi PDF channel.""" + out = { + "chan_label": read_str(fid, 16), + "chan_no": read_int16(fid), + "attributes": read_int16(fid), + "scale": read_float(fid), + "yaxis_label": read_str(fid, 16), + "valid_min_max": read_int16(fid), + } + + fid.seek(6, 1) + out.update( + { + "ymin": read_double(fid), + "ymax": read_double(fid), + "index": read_int32(fid), + "checksum": read_int32(fid), + "off_flag": read_str(fid, 4), + "offset": read_float(fid), + } + ) + + fid.seek(24, 1) + + return out + + +def _read_event(fid): + """Read BTi PDF event.""" + out = { + "event_name": read_str(fid, 16), + "start_lat": read_float(fid), + "end_lat": read_float(fid), + "step_size": read_float(fid), + "fixed_event": read_int16(fid), + "checksum": read_int32(fid), + } + + fid.seek(32, 1) + _correct_offset(fid) + + return out + + +def _read_process(fid): + """Read BTi PDF process.""" + out = { + "nbytes": read_int32(fid), + "process_type": read_str(fid, 20), + "checksum": read_int32(fid), + "user": read_str(fid, 32), + "timestamp": read_int32(fid), + "filename": read_str(fid, 256), + "total_steps": read_int32(fid), + } + + fid.seek(32, 1) + _correct_offset(fid) + out["processing_steps"] = list() + for step in range(out["total_steps"]): + this_step = { + "nbytes": read_int32(fid), + "process_type": read_str(fid, 20), + "checksum": read_int32(fid), + } + ptype = this_step["process_type"] + if ptype == BTI.PROC_DEFAULTS: + this_step["scale_option"] = read_int32(fid) + + fid.seek(4, 1) + this_step["scale"] = read_double(fid) + this_step["dtype"] = read_int32(fid) + this_step["selected"] = read_int16(fid) + this_step["color_display"] = read_int16(fid) + + fid.seek(32, 1) + elif ptype in BTI.PROC_FILTER: + this_step["freq"] = read_float(fid) + fid.seek(32, 1) + elif ptype in BTI.PROC_BPFILTER: + this_step["high_freq"] = read_float(fid) + this_step["low_freq"] = read_float(fid) + else: + jump = this_step["user_space_size"] = read_int32(fid) + fid.seek(32, 1) + fid.seek(jump, 1) + + out["processing_steps"] += [this_step] + _correct_offset(fid) + + return out + + +def _read_assoc_file(fid): + """Read BTi PDF assocfile.""" + out = {"file_id": read_int16(fid), "length": read_int16(fid)} + + fid.seek(32, 1) + out["checksum"] = read_int32(fid) + + return out + + +def _read_pfid_ed(fid): + """Read PDF ed file.""" + out = {"comment_size": read_int32(fid), "name": read_str(fid, 17)} + + fid.seek(9, 1) + out.update( + { + "pdf_number": read_int16(fid), + "total_events": read_int32(fid), + "timestamp": read_int32(fid), + "flags": read_int32(fid), + "de_process": read_int32(fid), + "checksum": read_int32(fid), + "ed_id": read_int32(fid), + "win_width": read_float(fid), + "win_offset": read_float(fid), + } + ) + + fid.seek(8, 1) + + return out + + +def _read_bti_header_pdf(pdf_fname): + """Read header from pdf file.""" + with _bti_open(pdf_fname, "rb") as fid: + fid.seek(-8, 2) + start = fid.tell() + header_position = read_int64(fid) + check_value = header_position & BTI.FILE_MASK + + if (start + BTI.FILE_CURPOS - check_value) <= BTI.FILE_MASK: + header_position = check_value + + # Check header position for alignment issues + if (header_position % 8) != 0: + header_position += 8 - (header_position % 8) + + fid.seek(header_position, 0) + + # actual header starts here + info = { + "version": read_int16(fid), + "file_type": read_str(fid, 5), + "hdr_size": start - header_position, # add for convenience + "start": start, + } + + fid.seek(1, 1) + + info.update( + { + "data_format": read_int16(fid), + "acq_mode": read_int16(fid), + "total_epochs": read_int32(fid), + "input_epochs": read_int32(fid), + "total_events": read_int32(fid), + "total_fixed_events": read_int32(fid), + "sample_period": read_float(fid), + "xaxis_label": read_str(fid, 16), + "total_processes": read_int32(fid), + "total_chans": read_int16(fid), + } + ) + + fid.seek(2, 1) + info.update( + { + "checksum": read_int32(fid), + "total_ed_classes": read_int32(fid), + "total_associated_files": read_int16(fid), + "last_file_index": read_int16(fid), + "timestamp": read_int32(fid), + } + ) + + fid.seek(20, 1) + _correct_offset(fid) + + # actual header ends here, so dar seems ok. + + info["epochs"] = [_read_epoch(fid) for _ in range(info["total_epochs"])] + + info["chs"] = [_read_channel(fid) for _ in range(info["total_chans"])] + + info["events"] = [_read_event(fid) for _ in range(info["total_events"])] + + info["processes"] = [_read_process(fid) for _ in range(info["total_processes"])] + + info["assocfiles"] = [ + _read_assoc_file(fid) for _ in range(info["total_associated_files"]) + ] + + info["edclasses"] = [ + _read_pfid_ed(fid) for _ in range(info["total_ed_classes"]) + ] + + info["extra_data"] = fid.read(start - fid.tell()) + info["pdf"] = pdf_fname + + info["total_slices"] = sum(e["pts_in_epoch"] for e in info["epochs"]) + + info["dtype"] = DTYPES[info["data_format"]] + bps = info["dtype"].itemsize * info["total_chans"] + info["bytes_per_slice"] = bps + return info + + +def _read_bti_header(pdf_fname, config_fname, sort_by_ch_name=True): + """Read bti PDF header.""" + info = _read_bti_header_pdf(pdf_fname) if pdf_fname is not None else dict() + cfg = _read_config(config_fname) + info["bti_transform"] = cfg["transforms"] + + # augment channel list by according info from config. + # get channels from config present in PDF + chans = info.get("chs", None) + if chans is not None: + chans_cfg = [ + c for c in cfg["chs"] if c["chan_no"] in [c_["chan_no"] for c_ in chans] + ] + + # sort chans_cfg and chans + chans = sorted(chans, key=lambda k: k["chan_no"]) + chans_cfg = sorted(chans_cfg, key=lambda k: k["chan_no"]) + + # check all pdf channels are present in config + match = [c["chan_no"] for c in chans_cfg] == [c["chan_no"] for c in chans] + + if not match: + raise RuntimeError( + "Could not match raw data channels with" + " config channels. Some of the channels" + " found are not described in config." + ) + else: + chans_cfg = cfg["chs"] + chans = [dict() for _ in chans_cfg] + + # transfer channel info from config to channel info + for ch, ch_cfg in zip(chans, chans_cfg): + ch["upb"] = ch_cfg["units_per_bit"] + ch["gain"] = ch_cfg["gain"] + ch["name"] = ch_cfg["name"] + if ch_cfg.get("dev", dict()).get("transform", None) is not None: + ch["loc"] = _coil_trans_to_loc(ch_cfg["dev"]["transform"]) + else: + ch["loc"] = np.full(12, np.nan) + if pdf_fname is not None: + if info["data_format"] <= 2: # see DTYPES, implies integer + ch["cal"] = ch["scale"] * ch["upb"] / float(ch["gain"]) + else: # float + ch["cal"] = ch["scale"] * ch["gain"] + else: # if we are in this mode we don't read data, only channel info. + ch["cal"] = ch["scale"] = 1.0 # so we put a trivial default value + + if sort_by_ch_name: + by_index = [(i, d["index"]) for i, d in enumerate(chans)] + by_index.sort(key=lambda c: c[1]) + by_index = [idx[0] for idx in by_index] + chs = [chans[pos] for pos in by_index] + + sort_by_name_idx = [(i, d["name"]) for i, d in enumerate(chs)] + a_chs = [c for c in sort_by_name_idx if c[1].startswith("A")] + other_chs = [c for c in sort_by_name_idx if not c[1].startswith("A")] + sort_by_name_idx = sorted(a_chs, key=lambda c: int(c[1][1:])) + sorted( + other_chs + ) + + sort_by_name_idx = [idx[0] for idx in sort_by_name_idx] + + info["chs"] = [chans[pos] for pos in sort_by_name_idx] + info["order"] = sort_by_name_idx + else: + info["chs"] = chans + info["order"] = np.arange(len(chans)) + + # finally add some important fields from the config + info["e_table"] = cfg["user_blocks"][BTI.UB_B_E_TABLE_USED] + info["weights"] = cfg["user_blocks"][BTI.UB_B_WEIGHTS_USED] + + return info + + +def _correct_trans(t, check=True): + """Convert to a transformation matrix.""" + t = np.array(t, np.float64) + t[:3, :3] *= t[3, :3][:, np.newaxis] # apply scalings + t[3, :3] = 0.0 # remove them + if check: + assert t[3, 3] == 1.0 + else: + t[3, 3] = 1.0 + return t + + +class RawBTi(BaseRaw): + """Raw object from 4D Neuroimaging MagnesWH3600 data. + + Parameters + ---------- + pdf_fname : path-like + Path to the processed data file (PDF). + config_fname : path-like + Path to system config file. + head_shape_fname : path-like | None + Path to the head shape file. + rotation_x : float + Degrees to tilt x-axis for sensor frame misalignment. Ignored + if convert is True. + translation : array-like, shape (3,) + The translation to place the origin of coordinate system + to the center of the head. Ignored if convert is True. + convert : bool + Convert to Neuromag coordinates or not. + rename_channels : bool + Whether to keep original 4D channel labels or not. Defaults to True. + sort_by_ch_name : bool + Reorder channels according to channel label. 4D channels don't have + monotonically increasing numbers in their labels. Defaults to True. + ecg_ch : str | None + The 4D name of the ECG channel. If None, the channel will be treated + as regular EEG channel. + eog_ch : tuple of str | None + The 4D names of the EOG channels. If None, the channels will be treated + as regular EEG channels. + %(preload)s + + .. versionadded:: 0.11 + + %(verbose)s + """ + + @verbose + def __init__( + self, + pdf_fname, + config_fname="config", + head_shape_fname="hs_file", + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=True, + rename_channels=True, + sort_by_ch_name=True, + ecg_ch="E31", + eog_ch=("E63", "E64"), + preload=False, + verbose=None, + ): + _validate_type(pdf_fname, ("path-like", BytesIO), "pdf_fname") + info, bti_info = _get_bti_info( + pdf_fname=pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + convert=convert, + ecg_ch=ecg_ch, + rename_channels=rename_channels, + sort_by_ch_name=sort_by_ch_name, + eog_ch=eog_ch, + ) + bti_info["bti_ch_labels"] = [c["chan_label"] for c in bti_info["chs"]] + # make Raw repr work if we have a BytesIO as input + filename = bti_info["pdf"] + if isinstance(filename, BytesIO): + filename = None + super().__init__( + info, + preload, + filenames=[filename], + raw_extras=[bti_info], + last_samps=[bti_info["total_slices"] - 1], + verbose=verbose, + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + bti_info = self._raw_extras[fi] + fname_or_bytes = bti_info["pdf"] + dtype = bti_info["dtype"] + assert len(bti_info["chs"]) == self._raw_extras[fi]["orig_nchan"] + n_channels = len(bti_info["chs"]) + n_bytes = np.dtype(dtype).itemsize + data_left = (stop - start) * n_channels + read_cals = np.empty((bti_info["total_chans"],)) + for ch in bti_info["chs"]: + read_cals[ch["index"]] = ch["cal"] + + block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels + block_size = min(data_left, block_size) + # extract data in chunks + with _bti_open(fname_or_bytes, "rb") as fid: + fid.seek(bti_info["bytes_per_slice"] * start, 0) + for sample_start in np.arange(0, data_left, block_size) // n_channels: + count = min(block_size, data_left - sample_start * n_channels) + if isinstance(fid, BytesIO): + block = np.frombuffer(fid.getvalue(), dtype, count) + else: + block = np.fromfile(fid, dtype, count) + sample_stop = sample_start + count // n_channels + shape = (sample_stop - sample_start, bti_info["total_chans"]) + block.shape = shape + data_view = data[:, sample_start:sample_stop] + one = np.empty(block.shape[::-1]) + + for ii, b_i_o in enumerate(bti_info["order"]): + one[ii] = block[:, b_i_o] * read_cals[b_i_o] + _mult_cal_one(data_view, one, idx, cals, mult) + + +@functools.lru_cache(1) +def _1020_names(): + from mne.channels import make_standard_montage + + return set( + ch_name.lower() for ch_name in make_standard_montage("standard_1005").ch_names + ) + + +def _eeg_like(ch_name): + # Some bti recordigs look like "F4-POz", so let's at least mark them + # as EEG + if ch_name.count("-") != 1: + return + ch, ref = ch_name.split("-") + eeg_names = _1020_names() + return ch.lower() in eeg_names and ref.lower() in eeg_names + + +def _make_bti_digitization( + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t +): + with info._unlock(): + if head_shape_fname: + logger.info(f"... Reading digitization points from {head_shape_fname}") + + nasion, lpa, rpa, hpi, dig_points = _read_head_shape(head_shape_fname) + info["dig"], dev_head_t, ctf_head_t = _make_bti_dig_points( + nasion, + lpa, + rpa, + hpi, + dig_points, + convert, + use_hpi, + bti_dev_t, + dev_ctf_t, + ) + else: + logger.info("... no headshape file supplied, doing nothing.") + info["dig"] = None + dev_head_t = Transform("meg", "head", trans=None) + ctf_head_t = Transform("ctf_head", "head", trans=None) + + info.update(dev_head_t=dev_head_t, dev_ctf_t=dev_ctf_t, ctf_head_t=ctf_head_t) + + return info + + +def _get_bti_info( + pdf_fname, + config_fname, + head_shape_fname, + rotation_x, + translation, + convert, + ecg_ch, + eog_ch, + rename_channels=True, + sort_by_ch_name=True, +): + """Read BTI info. + + Note. This helper supports partial construction of infos when `pdf_fname` + is None. Some datasets, such as the HCP, are shipped as a large collection + of zipped files where it can be more efficient to only read the needed + information. In such a situation, some information can neither be accessed + directly nor guessed based on the `config`. + + These fields will thus be set to None: + - 'lowpass' + - 'highpass' + - 'sfreq' + - 'meas_date' + + """ + if pdf_fname is None: + logger.info("No pdf_fname passed, trying to construct partial info from config") + if pdf_fname is not None and not isinstance(pdf_fname, BytesIO): + if not op.isabs(pdf_fname): + pdf_fname = op.abspath(pdf_fname) + + if not isinstance(config_fname, BytesIO): + if not op.isabs(config_fname): + config_tries = [ + op.abspath(config_fname), + op.abspath(op.join(op.dirname(pdf_fname), config_fname)), + ] + for config_try in config_tries: + if op.isfile(config_try): + config_fname = config_try + break + if not op.isfile(config_fname): + raise ValueError( + f"Could not find the config file {config_fname}. Please check" + " whether you are in the right directory " + "or pass the full name" + ) + + if head_shape_fname is not None and not isinstance(head_shape_fname, BytesIO): + orig_name = head_shape_fname + if not op.isfile(head_shape_fname): + head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname) + + if not op.isfile(head_shape_fname): + raise ValueError( + f'Could not find the head_shape file "{orig_name}". ' + "You should check whether you are in the " + "right directory, pass the full file name, " + "or pass head_shape_fname=None." + ) + + logger.info(f"Reading 4D PDF file {pdf_fname}...") + bti_info = _read_bti_header( + pdf_fname, config_fname, sort_by_ch_name=sort_by_ch_name + ) + extras = dict( + pdf_fname=pdf_fname, + head_shape_fname=head_shape_fname, + config_fname=config_fname, + ) + for key, val in extras.items(): + bti_info[key] = None if isinstance(val, BytesIO) else val + + dev_ctf_t = Transform( + "ctf_meg", "ctf_head", _correct_trans(bti_info["bti_transform"][0]) + ) + + _check_nan_dev_head_t(dev_ctf_t) + # for old backward compatibility and external processing + rotation_x = 0.0 if rotation_x is None else rotation_x + bti_dev_t = _get_bti_dev_t(rotation_x, translation) if convert else None + bti_dev_t = Transform("ctf_meg", "meg", bti_dev_t) + + use_hpi = False # hard coded, but marked as later option. + logger.info("Creating Neuromag info structure ...") + if "sample_period" in bti_info.keys(): + sfreq = 1.0 / bti_info["sample_period"] + else: + sfreq = None + + if pdf_fname is not None: + info = _empty_info(sfreq) + date = bti_info["processes"][0]["timestamp"] + info["meas_date"] = _stamp_to_dt((date, 0)) + else: # these cannot be guessed from config, see docstring + info = _empty_info(1.0) + info["sfreq"] = None + info["lowpass"] = None + info["highpass"] = None + info["meas_date"] = None + bti_info["processes"] = list() + + # browse processing info for filter specs. + hp, lp = info["highpass"], info["lowpass"] + for proc in bti_info["processes"]: + if "filt" in proc["process_type"]: + for step in proc["processing_steps"]: + if "high_freq" in step: + hp, lp = step["high_freq"], step["low_freq"] + elif "hp" in step["process_type"]: + hp = step["freq"] + elif "lp" in step["process_type"]: + lp = step["freq"] + + info["highpass"] = hp + info["lowpass"] = lp + chs = [] + + # Note that 'name' and 'chan_label' are not the same. + # We want the configured label if out IO parsed it + # except for the MEG channels for which we keep the config name + bti_ch_names = list() + for ch in bti_info["chs"]: + # we have always relied on 'A' as indicator of MEG data channels. + ch_name = ch["name"] + if not ch_name.startswith("A"): + ch_name = ch.get("chan_label", ch_name) + bti_ch_names.append(ch_name) + + neuromag_ch_names = _rename_channels(bti_ch_names, ecg_ch=ecg_ch, eog_ch=eog_ch) + ch_mapping = zip(bti_ch_names, neuromag_ch_names) + + logger.info("... Setting channel info structure.") + for idx, (chan_4d, chan_neuromag) in enumerate(ch_mapping): + chan_info = _instantiate_default_info_chs() + chan_info["ch_name"] = chan_neuromag if rename_channels else chan_4d + chan_info["logno"] = idx + BTI.FIFF_LOGNO + chan_info["scanno"] = idx + 1 + chan_info["cal"] = float(bti_info["chs"][idx]["scale"]) + + if any(chan_4d.startswith(k) for k in ("A", "M", "G")): + loc = bti_info["chs"][idx]["loc"] + if loc is not None: + if convert: + if idx == 0: + logger.info( + "... putting coil transforms in Neuromag coordinates" + ) + t = _loc_to_coil_trans(bti_info["chs"][idx]["loc"]) + t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t) + loc = _coil_trans_to_loc(t) + chan_info["loc"] = loc + + # BTI sensors are natively stored in 4D head coords we believe + meg_frame = FIFF.FIFFV_COORD_DEVICE if convert else FIFF.FIFFV_MNE_COORD_4D_HEAD + eeg_frame = FIFF.FIFFV_COORD_HEAD if convert else FIFF.FIFFV_MNE_COORD_4D_HEAD + if chan_4d.startswith("A"): + chan_info["kind"] = FIFF.FIFFV_MEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_MAG + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith("M"): + chan_info["kind"] = FIFF.FIFFV_REF_MEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T + + elif chan_4d.startswith("G"): + chan_info["kind"] = FIFF.FIFFV_REF_MEG_CH + chan_info["coord_frame"] = meg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_T_M + if chan_4d in ("GxxA", "GyyA"): + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + elif chan_4d in ("GyxA", "GzxA", "GzyA"): + chan_info["coil_type"] = FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD + + elif chan_4d.startswith("EEG") or _eeg_like(chan_4d): + chan_info["kind"] = FIFF.FIFFV_EEG_CH + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["coord_frame"] = eeg_frame + chan_info["unit"] = FIFF.FIFF_UNIT_V + # TODO: We should use 'electrodes' to fill this in, and make sure + # we turn them into dig as well + chan_info["loc"][:3] = np.nan + + elif chan_4d == "RESPONSE": + chan_info["kind"] = FIFF.FIFFV_STIM_CH + elif chan_4d == "TRIGGER": + chan_info["kind"] = FIFF.FIFFV_STIM_CH + elif ( + chan_4d.startswith("EOG") + or chan_4d[:4] in ("HEOG", "VEOG") + or chan_4d in eog_ch + ): + chan_info["kind"] = FIFF.FIFFV_EOG_CH + elif chan_4d.startswith("EMG"): + chan_info["kind"] = FIFF.FIFFV_EMG_CH + elif chan_4d == ecg_ch or chan_4d.startswith("ECG"): + chan_info["kind"] = FIFF.FIFFV_ECG_CH + # Our default is now misc, but if we ever change that, + # we'll need this: + # elif chan_4d.startswith('X') or chan_4d == 'UACurrent': + # chan_info['kind'] = FIFF.FIFFV_MISC_CH + + chs.append(chan_info) + + info["chs"] = chs + + # ### Dig stuff + info = _make_bti_digitization( + info, head_shape_fname, convert, use_hpi, bti_dev_t, dev_ctf_t + ) + + logger.info( + "Currently direct inclusion of 4D weight tables is not supported." + " For critical use cases please take into account the MNE command" + ' "mne_create_comp_data" to include weights as printed out by ' + 'the 4D "print_table" routine.' + ) + + # check that the info is complete + info._unlocked = False + info._update_redundant() + info._check_consistency() + return info, bti_info + + +@verbose +def read_raw_bti( + pdf_fname, + config_fname="config", + head_shape_fname="hs_file", + rotation_x=0.0, + translation=(0.0, 0.02, 0.11), + convert=True, + rename_channels=True, + sort_by_ch_name=True, + ecg_ch="E31", + eog_ch=("E63", "E64"), + preload=False, + verbose=None, +) -> RawBTi: + """Raw object from 4D Neuroimaging MagnesWH3600 data. + + .. note:: + 1. Currently direct inclusion of reference channel weights + is not supported. Please use ``mne_create_comp_data`` to include + the weights or use the low level functions from this module to + include them by yourself. + 2. The informed guess for the 4D name is E31 for the ECG channel and + E63, E63 for the EOG channels. Please check and adjust if those + channels are present in your dataset but 'ECG 01' and 'EOG 01', + 'EOG 02' don't appear in the channel names of the raw object. + + Parameters + ---------- + pdf_fname : path-like + Path to the processed data file (PDF). + config_fname : path-like + Path to system config file. + head_shape_fname : path-like | None + Path to the head shape file. + rotation_x : float + Degrees to tilt x-axis for sensor frame misalignment. Ignored + if convert is True. + translation : array-like, shape (3,) + The translation to place the origin of coordinate system + to the center of the head. Ignored if convert is True. + convert : bool + Convert to Neuromag coordinates or not. + rename_channels : bool + Whether to keep original 4D channel labels or not. Defaults to True. + sort_by_ch_name : bool + Reorder channels according to channel label. 4D channels don't have + monotonically increasing numbers in their labels. Defaults to True. + ecg_ch : str | None + The 4D name of the ECG channel. If None, the channel will be treated + as regular EEG channel. + eog_ch : tuple of str | None + The 4D names of the EOG channels. If None, the channels will be treated + as regular EEG channels. + %(preload)s + + .. versionadded:: 0.11 + %(verbose)s + + Returns + ------- + raw : instance of RawBTi + A Raw object containing BTI data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawBTi. + """ + return RawBTi( + pdf_fname, + config_fname=config_fname, + head_shape_fname=head_shape_fname, + rotation_x=rotation_x, + translation=translation, + convert=convert, + rename_channels=rename_channels, + sort_by_ch_name=sort_by_ch_name, + ecg_ch=ecg_ch, + eog_ch=eog_ch, + preload=preload, + verbose=verbose, + ) diff --git a/mne/io/bti/constants.py b/mne/io/bti/constants.py new file mode 100644 index 0000000..d135dae --- /dev/null +++ b/mne/io/bti/constants.py @@ -0,0 +1,99 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import BunchConst + +BTI = BunchConst() + +BTI.ELEC_STATE_NOT_COLLECTED = 0 +BTI.ELEC_STATE_COLLECTED = 1 +BTI.ELEC_STATE_SKIPPED = 2 +BTI.ELEC_STATE_NOT_APPLICABLE = 3 +# +## Byte offesets and data sizes for different files +# +BTI.FILE_MASK = 2147483647 +BTI.FILE_CURPOS = 8 +BTI.FILE_END = -8 + +BTI.FILE_HS_VERSION = 0 +BTI.FILE_HS_TIMESTAMP = 4 +BTI.FILE_HS_CHECKSUM = 8 +BTI.FILE_HS_N_DIGPOINTS = 12 +BTI.FILE_HS_N_INDEXPOINTS = 16 + +BTI.FILE_PDF_H_ENTER = 1 +BTI.FILE_PDF_H_FTYPE = 5 +BTI.FILE_PDF_H_XLABEL = 16 +BTI.FILE_PDF_H_NEXT = 2 +BTI.FILE_PDF_H_EXIT = 20 + +BTI.FILE_PDF_EPOCH_EXIT = 28 + +BTI.FILE_PDF_CH_NEXT = 6 +BTI.FILE_PDF_CH_LABELSIZE = 16 +BTI.FILE_PDF_CH_YLABEL = 16 +BTI.FILE_PDF_CH_OFF_FLAG = 16 +BTI.FILE_PDF_CH_EXIT = 12 + +BTI.FILE_PDF_EVENT_NAME = 16 +BTI.FILE_PDF_EVENT_EXIT = 32 + +BTI.FILE_PDF_PROCESS_BLOCKTYPE = 20 +BTI.FILE_PDF_PROCESS_USER = 32 +BTI.FILE_PDF_PROCESS_FNAME = 256 +BTI.FILE_PDF_PROCESS_EXIT = 32 + +BTI.FILE_PDF_ASSOC_NEXT = 32 + +BTI.FILE_PDFED_NAME = 17 +BTI.FILE_PDFED_NEXT = 9 +BTI.FILE_PDFED_EXIT = 8 + +# +## General data constants +# +BTI.DATA_N_IDX_POINTS = 5 +BTI.DATA_ROT_N_ROW = 3 +BTI.DATA_ROT_N_COL = 3 +BTI.DATA_XFM_N_COL = 4 +BTI.DATA_XFM_N_ROW = 4 +BTI.FIFF_LOGNO = 111 +# +## Channel Types +# +BTI.CHTYPE_MEG = 1 +BTI.CHTYPE_EEG = 2 +BTI.CHTYPE_REFERENCE = 3 +BTI.CHTYPE_EXTERNAL = 4 +BTI.CHTYPE_TRIGGER = 5 +BTI.CHTYPE_UTILITY = 6 +BTI.CHTYPE_DERIVED = 7 +BTI.CHTYPE_SHORTED = 8 +# +## Processes +# +BTI.PROC_DEFAULTS = "BTi_defaults" +BTI.PROC_FILTER = "b_filt_hp,b_filt_lp,b_filt_notch" +BTI.PROC_BPFILTER = "b_filt_b_pass,b_filt_b_reject" +# +## User blocks +# +BTI.UB_B_MAG_INFO = "B_Mag_Info" +BTI.UB_B_COH_POINTS = "B_COH_Points" +BTI.UB_B_CCP_XFM_BLOCK = "b_ccp_xfm_block" +BTI.UB_B_EEG_LOCS = "b_eeg_elec_locs" +BTI.UB_B_WHC_CHAN_MAP_VER = "B_WHChanMapVer" +BTI.UB_B_WHC_CHAN_MAP = "B_WHChanMap" +BTI.UB_B_WHS_SUBSYS_VER = "B_WHSubsysVer" # B_WHSubsysVer +BTI.UB_B_WHS_SUBSYS = "B_WHSubsys" +BTI.UB_B_CH_LABELS = "B_ch_labels" +BTI.UB_B_CALIBRATION = "B_Calibration" +BTI.UB_B_SYS_CONFIG_TIME = "B_SysConfigTime" +BTI.UB_B_DELTA_ENABLED = "B_DELTA_ENABLED" +BTI.UB_B_E_TABLE_USED = "B_E_table_used" +BTI.UB_B_E_TABLE = "B_E_TABLE" +BTI.UB_B_WEIGHTS_USED = "B_weights_used" +BTI.UB_B_TRIG_MASK = "B_trig_mask" +BTI.UB_B_WEIGHT_TABLE = "BWT_" diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py new file mode 100644 index 0000000..abf4b72 --- /dev/null +++ b/mne/io/bti/read.py @@ -0,0 +1,98 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..._fiff.utils import read_str + + +def _unpack_matrix(fid, rows, cols, dtype, out_dtype): + """Unpack matrix.""" + dtype = np.dtype(dtype) + + string = fid.read(int(dtype.itemsize * rows * cols)) + out = np.frombuffer(string, dtype=dtype).reshape(rows, cols).astype(out_dtype) + return out + + +def _unpack_simple(fid, dtype, out_dtype): + """Unpack a NumPy type.""" + dtype = np.dtype(dtype) + string = fid.read(dtype.itemsize) + out = np.frombuffer(string, dtype=dtype).astype(out_dtype) + + if len(out) > 0: + out = out[0] + return out + + +def read_char(fid, count=1): + """Read character from bti file.""" + return _unpack_simple(fid, f">S{count}", "S") + + +def read_uint16(fid): + """Read unsigned 16bit integer from bti file.""" + return _unpack_simple(fid, ">u2", np.uint32) + + +def read_int16(fid): + """Read 16bit integer from bti file.""" + return _unpack_simple(fid, ">i2", np.int32) + + +def read_uint32(fid): + """Read unsigned 32bit integer from bti file.""" + return _unpack_simple(fid, ">u4", np.uint32) + + +def read_int32(fid): + """Read 32bit integer from bti file.""" + return _unpack_simple(fid, ">i4", np.int32) + + +def read_int64(fid): + """Read 64bit integer from bti file.""" + return _unpack_simple(fid, ">u8", np.int64) + + +def read_float(fid): + """Read 32bit float from bti file.""" + return _unpack_simple(fid, ">f4", np.float32) + + +def read_double(fid): + """Read 64bit float from bti file.""" + return _unpack_simple(fid, ">f8", np.float64) + + +def read_int16_matrix(fid, rows, cols): + """Read 16bit integer matrix from bti file.""" + return _unpack_matrix( + fid, + rows, + cols, + dtype=">i2", + out_dtype=np.int32, + ) + + +def read_float_matrix(fid, rows, cols): + """Read 32bit float matrix from bti file.""" + return _unpack_matrix(fid, rows, cols, dtype=">f4", out_dtype=np.float32) + + +def read_double_matrix(fid, rows, cols): + """Read 64bit float matrix from bti file.""" + return _unpack_matrix(fid, rows, cols, dtype=">f8", out_dtype=np.float64) + + +def read_transform(fid): + """Read 64bit float matrix transform from bti file.""" + return read_double_matrix(fid, rows=4, cols=4) + + +def read_dev_header(x): + """Create a dev header.""" + return dict(size=read_int32(x), checksum=read_int32(x), reserved=read_str(x, 32)) diff --git a/mne/io/cnt/__init__.py b/mne/io/cnt/__init__.py new file mode 100644 index 0000000..10aac79 --- /dev/null +++ b/mne/io/cnt/__init__.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""CNT data reader.""" + +from .cnt import read_raw_cnt diff --git a/mne/io/cnt/_utils.py b/mne/io/cnt/_utils.py new file mode 100644 index 0000000..cf2d45c --- /dev/null +++ b/mne/io/cnt/_utils.py @@ -0,0 +1,150 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import namedtuple +from datetime import datetime +from math import modf +from os import SEEK_END +from struct import Struct + +import numpy as np + +from ...utils import warn + + +def _read_teeg(f, teeg_offset): + """ + Read TEEG structure from an open CNT file. + + # from TEEG structure in http://paulbourke.net/dataformats/eeg/ + typedef struct { + char Teeg; /* Either 1 or 2 */ + long Size; /* Total length of all the events */ + long Offset; /* Hopefully always 0 */ + } TEEG; + """ + # we use a more descriptive names based on TEEG doc comments + Teeg = namedtuple("Teeg", "event_type total_length offset") + teeg_parser = Struct("3 range 0-15 bit coded response pad */ +# /* 4->7 values 0xd=Accept 0xc=Reject */ +# long Offset; /* file offset of event */ +# } EVENT1; + + +CNTEventType2 = namedtuple( + "CNTEventType2", + ( + "StimType KeyBoard KeyPad_Accept Offset Type " + "Code Latency EpochEvent Accept2 Accuracy" + ), +) +# unsigned short StimType; /* range 0-65535 */ +# unsigned char KeyBoard; /* range 0-11 corresponding to fcn keys +1 */ +# char KeyPad_Accept; /* 0->3 range 0-15 bit coded response pad */ +# /* 4->7 values 0xd=Accept 0xc=Reject */ +# long Offset; /* file offset of event */ +# short Type; +# short Code; +# float Latency; +# char EpochEvent; +# char Accept2; +# char Accuracy; + + +# needed for backward compat: EVENT type 3 has the same structure as type 2 +CNTEventType3 = namedtuple( + "CNTEventType3", + ( + "StimType KeyBoard KeyPad_Accept Offset Type " + "Code Latency EpochEvent Accept2 Accuracy" + ), +) + + +def _get_event_parser(event_type): + if event_type == 1: + event_maker = CNTEventType1 + struct_pattern = "> 4 + # Lower nibble (4 bits) keypad button press + keypad = event.KeyPad_Accept[0] & 0x0F + if str(keypad) != "0": + description.append(f"KeyPad Response {keypad}") + elif event.KeyBoard != 0: + description.append(f"Keyboard Response {event.KeyBoard}") + else: + description.append(str(event.StimType)) + + description = np.array(description) + + onset, duration, description = _update_bad_span_onset( + accept_reject, onset / sfreq, duration, description + ) + return Annotations( + onset=onset, duration=duration, description=description, orig_time=None + ) + + +@fill_doc +def read_raw_cnt( + input_fname, + eog=(), + misc=(), + ecg=(), + emg=(), + data_format="auto", + date_format="mm/dd/yy", + *, + header="auto", + preload=False, + verbose=None, +) -> "RawCNT": + """Read CNT data as raw object. + + .. Note:: + 2d spatial coordinates (x, y) for EEG channels are read from the file + header and fit to a sphere to compute corresponding z-coordinates. + If channels assigned as EEG channels have locations + far away from the head (i.e. x and y coordinates don't fit to a + sphere), all the channel locations will be distorted + (all channels that are not assigned with keywords ``eog``, ``ecg``, + ``emg`` and ``misc`` are assigned as EEG channels). If you are not + sure that the channel locations in the header are correct, it is + probably safer to replace them with :meth:`mne.io.Raw.set_montage`. + Montages can be created/imported with: + + - Standard montages with :func:`mne.channels.make_standard_montage` + - Montages for `Compumedics systems + `__ with + :func:`mne.channels.read_dig_dat` + - Other reader functions are listed under *See Also* at + :class:`mne.channels.DigMontage` + + Parameters + ---------- + input_fname : path-like + Path to the data file. + eog : list | tuple | ``'auto'`` | ``'header'`` + Names of channels or list of indices that should be designated + EOG channels. If 'header', VEOG and HEOG channels assigned in the file + header are used. If ``'auto'``, channel names containing ``'EOG'`` are + used. Defaults to empty tuple. + misc : list | tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + ecg : list | tuple | ``'auto'`` + Names of channels or list of indices that should be designated + ECG channels. If ``'auto'``, the channel names containing ``'ECG'`` are + used. Defaults to empty tuple. + emg : list | tuple + Names of channels or list of indices that should be designated + EMG channels. If 'auto', the channel names containing 'EMG' are used. + Defaults to empty tuple. + data_format : ``'auto'`` | ``'int16'`` | ``'int32'`` + Defines the data format the data is read in. If ``'auto'``, it is + determined from the file header using ``numsamples`` field. + Defaults to ``'auto'``. + date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'`` + Format of date in the header. Defaults to ``'mm/dd/yy'``. + header : ``'auto'`` | ``'new'`` | ``'old'`` + Defines the header format. Used to describe how bad channels + are formatted. If auto, reads using old and new header and + if either contain a bad channel make channel bad. + Defaults to ``'auto'``. + + .. versionadded:: 1.6 + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawCNT. + The raw data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawCNT. + + Notes + ----- + .. versionadded:: 0.12 + """ + return RawCNT( + input_fname, + eog=eog, + misc=misc, + ecg=ecg, + emg=emg, + data_format=data_format, + date_format=date_format, + header=header, + preload=preload, + verbose=verbose, + ) + + +def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format, header): + """Read the cnt header.""" + data_offset = 900 # Size of the 'SETUP' header. + cnt_info = dict() + # Reading only the fields of interest. Structure of the whole header at + # http://paulbourke.net/dataformats/eeg/ + with open(input_fname, "rb", buffering=0) as fid: + fid.seek(21) + patient_id = read_str(fid, 20) + patient_id = int(patient_id) if patient_id.isdigit() else 0 + fid.seek(121) + patient_name = read_str(fid, 20).split() + last_name = patient_name[0] if len(patient_name) > 0 else "" + first_name = patient_name[-1] if len(patient_name) > 0 else "" + fid.seek(2, 1) + sex = read_str(fid, 1) + if sex == "M": + sex = FIFF.FIFFV_SUBJ_SEX_MALE + elif sex == "F": + sex = FIFF.FIFFV_SUBJ_SEX_FEMALE + else: # can be 'U' + sex = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + hand = read_str(fid, 1) + if hand == "R": + hand = FIFF.FIFFV_SUBJ_HAND_RIGHT + elif hand == "L": + hand = FIFF.FIFFV_SUBJ_HAND_LEFT + else: # can be 'M' for mixed or 'U' + hand = None + fid.seek(205) + session_label = read_str(fid, 20) + + session_date = f"{read_str(fid, 10)} {read_str(fid, 12)}" + meas_date = _session_date_2_meas_date(session_date, date_format) + + fid.seek(370) + n_channels = np.fromfile(fid, dtype="= 0] + fid.seek(438) + lowpass_toggle = np.fromfile(fid, "i1", count=1).item() + highpass_toggle = np.fromfile(fid, "i1", count=1).item() + + # Header has a field for number of samples, but it does not seem to be + # too reliable. That's why we have option for setting n_bytes manually. + fid.seek(864) + n_samples = np.fromfile(fid, dtype=" n_samples: + n_bytes = 4 + n_samples = n_samples_header + warn( + "Annotations are outside data range. " + "Changing data format to 'int32'." + ) + else: + n_bytes = data_size // (n_samples * n_channels) + else: + n_bytes = 2 if data_format == "int16" else 4 + n_samples = data_size // (n_bytes * n_channels) + + # See PR #12393 + if n_samples_header != 0: + n_samples = n_samples_header + # Channel offset refers to the size of blocks per channel in the file. + cnt_info["channel_offset"] = np.fromfile(fid, dtype=" 1: + cnt_info["channel_offset"] //= n_bytes + else: + cnt_info["channel_offset"] = 1 + + ch_names, cals, baselines, chs, pos = (list(), list(), list(), list(), list()) + + bads = list() + _validate_type(header, str, "header") + _check_option("header", header, ("auto", "new", "old")) + for ch_idx in range(n_channels): # ELECTLOC fields + fid.seek(data_offset + 75 * ch_idx) + ch_name = read_str(fid, 10) + ch_names.append(ch_name) + + # Some files have bad channels marked differently in the header. + if header in ("new", "auto"): + fid.seek(data_offset + 75 * ch_idx + 14) + if np.fromfile(fid, dtype="u1", count=1).item(): + bads.append(ch_name) + if header in ("old", "auto"): + fid.seek(data_offset + 75 * ch_idx + 4) + if np.fromfile(fid, dtype="u1", count=1).item(): + bads.append(ch_name) + + fid.seek(data_offset + 75 * ch_idx + 19) + xy = np.fromfile(fid, dtype="f4", count=2) + xy[1] *= -1 # invert y-axis + pos.append(xy) + fid.seek(data_offset + 75 * ch_idx + 47) + # Baselines are subtracted before scaling the data. + baselines.append(np.fromfile(fid, dtype="i2", count=1).item()) + fid.seek(data_offset + 75 * ch_idx + 59) + sensitivity = np.fromfile(fid, dtype="f4", count=1).item() + fid.seek(data_offset + 75 * ch_idx + 71) + cal = np.fromfile(fid, dtype="f4", count=1).item() + cals.append(cal * sensitivity * 1e-6 / 204.8) + + info = _empty_info(sfreq) + if lowpass_toggle == 1: + info["lowpass"] = highcutoff + if highpass_toggle == 1: + info["highpass"] = lowcutoff + subject_info = { + "hand": hand, + "id": patient_id, + "sex": sex, + "first_name": first_name, + "last_name": last_name, + } + subject_info = {key: val for key, val in subject_info.items() if val is not None} + + if eog == "auto": + eog = _find_channels(ch_names, "EOG") + if ecg == "auto": + ecg = _find_channels(ch_names, "ECG") + if emg == "auto": + emg = _find_channels(ch_names, "EMG") + + chs = _create_chs( + ch_names, cals, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_EEG_CH, eog, ecg, emg, misc + ) + eegs = [idx for idx, ch in enumerate(chs) if ch["coil_type"] == FIFF.FIFFV_COIL_EEG] + coords = _topo_to_sphere(pos, eegs) + locs = np.full((len(chs), 12), np.nan) + locs[:, :3] = coords + dig = _make_dig_points( + dig_ch_pos=dict(zip(ch_names, coords)), + coord_frame="head", + add_missing_fiducials=True, + ) + for ch, loc in zip(chs, locs): + ch.update(loc=loc) + + cnt_info.update(baselines=np.array(baselines), n_samples=n_samples, n_bytes=n_bytes) + + session_label = None if str(session_label) == "" else str(session_label) + info.update( + meas_date=meas_date, + dig=dig, + description=session_label, + subject_info=subject_info, + chs=chs, + ) + info._unlocked = False + info._update_redundant() + info["bads"] = bads + return info, cnt_info + + +@fill_doc +class RawCNT(BaseRaw): + """Raw object from Neuroscan CNT file. + + .. note:: + + The channel positions are read from the file header. Channels that are + not assigned with keywords ``eog``, ``ecg``, ``emg`` and ``misc`` are + assigned as eeg channels. All the eeg channel locations are fit to a + sphere when computing the z-coordinates for the channels. If channels + assigned as eeg channels have locations far away from the head (i.e. + x and y coordinates don't fit to a sphere), all the channel locations + will be distorted. If you are not sure that the channel locations in + the header are correct, it is probably safer to use a (standard) + montage. See :func:`mne.channels.make_standard_montage` + + .. note:: + + A CNT file can also come from the EEG manufacturer ANT Neuro, in which case the + function :func:`mne.io.read_raw_ant` should be used. + + Parameters + ---------- + input_fname : path-like + Path to the Neuroscan CNT file. + eog : list | tuple + Names of channels or list of indices that should be designated + EOG channels. If ``'auto'``, the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + misc : list | tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + ecg : list | tuple + Names of channels or list of indices that should be designated + ECG channels. If ``'auto'``, the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list | tuple + Names of channels or list of indices that should be designated + EMG channels. If ``'auto'``, the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + data_format : ``'auto'`` | ``'int16'`` | ``'int32'`` + Defines the data format the data is read in. If ``'auto'``, it is + determined from the file header using ``numsamples`` field. + Defaults to ``'auto'``. + date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'`` + Format of date in the header. Defaults to ``'mm/dd/yy'``. + header : ``'auto'`` | ``'new'`` | ``'old'`` + Defines the header format. Used to describe how bad channels + are formatted. If auto, reads using old and new header and + if either contain a bad channel make channel bad. + Defaults to ``'auto'``. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + def __init__( + self, + input_fname, + eog=(), + misc=(), + ecg=(), + emg=(), + data_format="auto", + date_format="mm/dd/yy", + *, + header="auto", + preload=False, + verbose=None, + ): + _check_option("date_format", date_format, ["mm/dd/yy", "dd/mm/yy"]) + if date_format == "dd/mm/yy": + _date_format = "%d/%m/%y %H:%M:%S" + else: + _date_format = "%m/%d/%y %H:%M:%S" + + input_fname = path.abspath(input_fname) + try: + info, cnt_info = _get_cnt_info( + input_fname, eog, ecg, emg, misc, data_format, _date_format, header + ) + except Exception: + raise RuntimeError( + f"{_explain_exception()}\n" + "WARNING: mne.io.read_raw_cnt " + "supports Neuroscan CNT files only. If this file is an ANT Neuro CNT, " + "please use mne.io.read_raw_ant instead." + ) + last_samps = [cnt_info["n_samples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[cnt_info], + last_samps=last_samps, + orig_format="int", + verbose=verbose, + ) + + data_format = "int32" if cnt_info["n_bytes"] == 4 else "int16" + self.set_annotations( + _read_annotations_cnt(input_fname, data_format=data_format) + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Take a chunk of raw data, multiply by mult or cals, and store.""" + n_channels = self._raw_extras[fi]["orig_nchan"] + if "stim_channel" in self._raw_extras[fi]: + f_channels = n_channels - 1 # Stim channel already read. + stim_ch = self._raw_extras[fi]["stim_channel"] + else: + f_channels = n_channels + stim_ch = None + + channel_offset = self._raw_extras[fi]["channel_offset"] + baselines = self._raw_extras[fi]["baselines"] + n_bytes = self._raw_extras[fi]["n_bytes"] + n_samples = self._raw_extras[fi]["n_samples"] + dtype = "= (channel_offset / 2): # Extend at the end. + extra_samps += chunk_size + count = n_samps // channel_offset * chunk_size + extra_samps + n_chunks = count // chunk_size + samps = np.fromfile(fid, dtype=dtype, count=count) + samps = samps.reshape((n_chunks, f_channels, channel_offset), order="C") + + # Intermediate shaping to chunk sizes. + block = np.zeros((n_channels, channel_offset * n_chunks)) + for set_idx, row in enumerate(samps): # Final shape. + block_slice = slice( + set_idx * channel_offset, (set_idx + 1) * channel_offset + ) + block[:f_channels, block_slice] = row + if "stim_channel" in self._raw_extras[fi]: + _data_start = start + sample_start + _data_stop = start + sample_stop + block[-1] = stim_ch[_data_start:_data_stop] + one[idx] = block[idx, s_offset : n_samps + s_offset] + + one[idx] -= baselines[idx][:, None] + _mult_cal_one(data[:, sample_start:sample_stop], one, idx, cals, mult) diff --git a/mne/io/constants.py b/mne/io/constants.py new file mode 100644 index 0000000..50cf567 --- /dev/null +++ b/mne/io/constants.py @@ -0,0 +1,7 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .._fiff.constants import FIFF + +__all__ = ["FIFF"] diff --git a/mne/io/ctf/__init__.py b/mne/io/ctf/__init__.py new file mode 100644 index 0000000..538d63f --- /dev/null +++ b/mne/io/ctf/__init__.py @@ -0,0 +1,7 @@ +"""CTF module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .ctf import read_raw_ctf, RawCTF diff --git a/mne/io/ctf/constants.py b/mne/io/ctf/constants.py new file mode 100644 index 0000000..a99b627 --- /dev/null +++ b/mne/io/ctf/constants.py @@ -0,0 +1,38 @@ +"""CTF constants.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ...utils import BunchConst + + +CTF = BunchConst() + +# ctf_types.h +CTF.CTFV_MAX_AVERAGE_BINS = 8 +CTF.CTFV_MAX_COILS = 8 +CTF.CTFV_MAX_BALANCING = 50 +CTF.CTFV_SENSOR_LABEL = 31 + +CTF.CTFV_COIL_LPA = 1 +CTF.CTFV_COIL_RPA = 2 +CTF.CTFV_COIL_NAS = 3 +CTF.CTFV_COIL_SPARE = 4 + +CTF.CTFV_REF_MAG_CH = 0 +CTF.CTFV_REF_GRAD_CH = 1 +CTF.CTFV_MEG_CH = 5 +CTF.CTFV_EEG_CH = 9 +CTF.CTFV_STIM_CH = 11 + +CTF.CTFV_FILTER_LOWPASS = 1 +CTF.CTFV_FILTER_HIGHPASS = 2 + +# read_res4.c +CTF.FUNNY_POS = 1844 + +# read_write_data.c +CTF.HEADER_SIZE = 8 +CTF.BLOCK_SIZE = 2000 +CTF.SYSTEM_CLOCK_CH = "SCLK01-177" diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py new file mode 100644 index 0000000..44a4e39 --- /dev/null +++ b/mne/io/ctf/ctf.py @@ -0,0 +1,303 @@ +"""Conversion tool from CTF to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os + +import numpy as np + +from ..._fiff._digitization import _format_dig_points +from ..._fiff.utils import _blk_read_lims, _mult_cal_one +from ...utils import ( + _check_fname, + _check_option, + _clean_names, + fill_doc, + logger, + verbose, +) +from ..base import BaseRaw +from .constants import CTF +from .eeg import _read_eeg, _read_pos +from .hc import _read_hc +from .info import _annotate_bad_segments, _compose_meas_info, _read_bad_chans +from .markers import _read_annotations_ctf_call +from .res4 import _make_ctf_name, _read_res4 +from .trans import _make_ctf_coord_trans_set + + +@fill_doc +def read_raw_ctf( + directory, system_clock="truncate", preload=False, clean_names=False, verbose=None +) -> "RawCTF": + """Raw object from CTF directory. + + Parameters + ---------- + directory : path-like + Path to the CTF data (ending in ``'.ds'``). + system_clock : str + How to treat the system clock. Use "truncate" (default) to truncate + the data file when the system clock drops to zero, and use "ignore" + to ignore the system clock (e.g., if head positions are measured + multiple times during a recording). + %(preload)s + clean_names : bool, optional + If True main channel names and compensation channel names will + be cleaned from CTF suffixes. The default is False. + %(verbose)s + + Returns + ------- + raw : instance of RawCTF + The raw data. + + Notes + ----- + .. versionadded:: 0.11 + + To read in the Polhemus digitization data (for example, from + a .pos file), include the file in the CTF directory. The + points will then automatically be read into the `mne.io.Raw` + instance via `mne.io.read_raw_ctf`. + """ + return RawCTF( + directory, + system_clock, + preload=preload, + clean_names=clean_names, + verbose=verbose, + ) + + +@fill_doc +class RawCTF(BaseRaw): + """Raw object from CTF directory. + + Parameters + ---------- + directory : path-like + Path to the CTF data (ending in ``'.ds'``). + system_clock : str + How to treat the system clock. Use ``"truncate"`` (default) to truncate + the data file when the system clock drops to zero, and use ``"ignore"`` + to ignore the system clock (e.g., if head positions are measured + multiple times during a recording). + %(preload)s + clean_names : bool, optional + If True main channel names and compensation channel names will + be cleaned from CTF suffixes. The default is False. + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__( + self, + directory, + system_clock="truncate", + preload=False, + verbose=None, + clean_names=False, + ): + # adapted from mne_ctf2fiff.c + directory = str( + _check_fname(directory, "read", True, "directory", need_dir=True) + ) + if not directory.endswith(".ds"): + raise TypeError( + f'directory must be a directory ending with ".ds", got {directory}' + ) + _check_option("system_clock", system_clock, ["ignore", "truncate"]) + logger.info(f"ds directory : {directory}") + res4 = _read_res4(directory) # Read the magical res4 file + coils = _read_hc(directory) # Read the coil locations + eeg = _read_eeg(directory) # Read the EEG electrode loc info + + # Investigate the coil location data to get the coordinate trans + coord_trans = _make_ctf_coord_trans_set(res4, coils) + + digs = _read_pos(directory, coord_trans) + + # Compose a structure which makes fiff writing a piece of cake + info = _compose_meas_info(res4, coils, coord_trans, eeg) + with info._unlock(): + info["dig"] += digs + info["dig"] = _format_dig_points(info["dig"]) + info["bads"] += _read_bad_chans(directory, info) + + # Determine how our data is distributed across files + fnames = list() + last_samps = list() + raw_extras = list() + missing_names = list() + no_samps = list() + while True: + suffix = "meg4" if len(fnames) == 0 else f"{len(fnames)}_meg4" + meg4_name, found = _make_ctf_name(directory, suffix, raise_error=False) + if not found: + missing_names.append(os.path.relpath(meg4_name, directory)) + break + # check how much data is in the file + sample_info = _get_sample_info(meg4_name, res4, system_clock) + if sample_info["n_samp"] == 0: + no_samps.append(os.path.relpath(meg4_name, directory)) + break + if len(fnames) == 0: + buffer_size_sec = sample_info["block_size"] / info["sfreq"] + else: + buffer_size_sec = 1.0 + fnames.append(meg4_name) + last_samps.append(sample_info["n_samp"] - 1) + raw_extras.append(sample_info) + first_samps = [0] * len(last_samps) + if len(fnames) == 0: + raise OSError( + f"Could not find any data, could not find the following " + f"file(s): {missing_names}, and the following file(s) had no " + f"valid samples: {no_samps}" + ) + super().__init__( + info, + preload, + first_samps=first_samps, + last_samps=last_samps, + filenames=fnames, + raw_extras=raw_extras, + orig_format="int", + buffer_size_sec=buffer_size_sec, + verbose=verbose, + ) + + # Add bad segments as Annotations (correct for start time) + start_time = -res4["pre_trig_pts"] / float(info["sfreq"]) + annot = _annotate_bad_segments(directory, start_time, info["meas_date"]) + marker_annot = _read_annotations_ctf_call( + directory=directory, + total_offset=(res4["pre_trig_pts"] / res4["sfreq"]), + trial_duration=(res4["nsamp"] / res4["sfreq"]), + meas_date=info["meas_date"], + ) + annot = marker_annot if annot is None else annot + marker_annot + self.set_annotations(annot) + if clean_names: + _clean_names_inst(self) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + si = self._raw_extras[fi] + offset = 0 + trial_start_idx, r_lims, d_lims = _blk_read_lims( + start, stop, int(si["block_size"]) + ) + with open(self.filenames[fi], "rb") as fid: + for bi in range(len(r_lims)): + samp_offset = (bi + trial_start_idx) * si["res4_nsamp"] + n_read = min(si["n_samp_tot"] - samp_offset, si["block_size"]) + # read the chunk of data + # have to be careful on Windows and make sure we are using + # 64-bit integers here + with np.errstate(over="raise"): + pos = np.int64(CTF.HEADER_SIZE) + pos += np.int64(samp_offset) * si["n_chan"] * 4 + fid.seek(pos, 0) + this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read) + this_data.shape = (si["n_chan"], n_read) + this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]] + data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]] + _mult_cal_one(data_view, this_data, idx, cals, mult) + offset += n_read + + +def _clean_names_inst(inst): + """Clean up CTF suffixes from channel names.""" + mapping = dict(zip(inst.ch_names, _clean_names(inst.ch_names))) + inst.rename_channels(mapping) + for comp in inst.info["comps"]: + for key in ("row_names", "col_names"): + comp["data"][key] = _clean_names(comp["data"][key]) + + +def _get_sample_info(fname, res4, system_clock): + """Determine the number of valid samples.""" + logger.info(f"Finding samples for {fname}: ") + if CTF.SYSTEM_CLOCK_CH in res4["ch_names"]: + clock_ch = res4["ch_names"].index(CTF.SYSTEM_CLOCK_CH) + else: + clock_ch = None + for k, ch in enumerate(res4["chs"]): + if ch["ch_name"] == CTF.SYSTEM_CLOCK_CH: + clock_ch = k + break + with open(fname, "rb") as fid: + fid.seek(0, os.SEEK_END) + st_size = fid.tell() + fid.seek(0, 0) + if (st_size - CTF.HEADER_SIZE) % (4 * res4["nsamp"] * res4["nchan"]) != 0: + raise RuntimeError( + "The number of samples is not an even multiple of the trial size" + ) + n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4["nchan"]) + n_trial = n_samp_tot // res4["nsamp"] + n_samp = n_samp_tot + if clock_ch is None: + logger.info( + " System clock channel is not available, assuming " + "all samples to be valid." + ) + elif system_clock == "ignore": + logger.info(" System clock channel is available, but ignored.") + else: # use it + logger.info( + " System clock channel is available, checking " + "which samples are valid." + ) + for t in range(n_trial): + # Skip to the correct trial + samp_offset = t * res4["nsamp"] + offset = ( + CTF.HEADER_SIZE + + (samp_offset * res4["nchan"] + (clock_ch * res4["nsamp"])) * 4 + ) + fid.seek(offset, 0) + this_data = np.fromfile(fid, ">i4", res4["nsamp"]) + if len(this_data) != res4["nsamp"]: + raise RuntimeError(f"Cannot read data for trial {t+1}.") + end = np.where(this_data == 0)[0] + if len(end) > 0: + n_samp = samp_offset + end[0] + break + if n_samp < res4["nsamp"]: + n_trial = 1 + logger.info( + " %d x %d = %d samples from %d chs", + n_trial, + n_samp, + n_samp, + res4["nchan"], + ) + else: + n_trial = n_samp // res4["nsamp"] + n_omit = n_samp_tot - n_samp + logger.info( + " %d x %d = %d samples from %d chs", + n_trial, + res4["nsamp"], + n_samp, + res4["nchan"], + ) + if n_omit != 0: + logger.info(" %d samples omitted at the end", n_omit) + + return dict( + n_samp=n_samp, + n_samp_tot=n_samp_tot, + block_size=res4["nsamp"], + res4_nsamp=res4["nsamp"], + n_chan=res4["nchan"], + ) diff --git a/mne/io/ctf/eeg.py b/mne/io/ctf/eeg.py new file mode 100644 index 0000000..cd39bc9 --- /dev/null +++ b/mne/io/ctf/eeg.py @@ -0,0 +1,108 @@ +"""Read .eeg files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from os import listdir +from os.path import join + +import numpy as np + +from ..._fiff.constants import FIFF +from ...transforms import apply_trans +from ...utils import logger, warn +from .res4 import _make_ctf_name + +_cardinal_dict = dict( + nasion=FIFF.FIFFV_POINT_NASION, + lpa=FIFF.FIFFV_POINT_LPA, + left=FIFF.FIFFV_POINT_LPA, + rpa=FIFF.FIFFV_POINT_RPA, + right=FIFF.FIFFV_POINT_RPA, +) + + +def _read_eeg(directory): + """Read the .eeg file.""" + # Missing file is ok + fname, found = _make_ctf_name(directory, "eeg", raise_error=False) + if not found: + logger.info(" Separate EEG position data file not present.") + return + eeg = dict( + labels=list(), + kinds=list(), + ids=list(), + rr=list(), + np=0, + assign_to_chs=True, + coord_frame=FIFF.FIFFV_MNE_COORD_CTF_HEAD, + ) + with open(fname, "rb") as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.decode("utf-8").split() + if len(parts) != 5: + raise RuntimeError(f"Illegal data in EEG position file: {line}") + r = np.array([float(p) for p in parts[2:]]) / 100.0 + if (r * r).sum() > 1e-4: + label = parts[1] + eeg["labels"].append(label) + eeg["rr"].append(r) + id_ = _cardinal_dict.get(label.lower(), int(parts[0])) + if label.lower() in _cardinal_dict: + kind = FIFF.FIFFV_POINT_CARDINAL + else: + kind = FIFF.FIFFV_POINT_EXTRA + eeg["ids"].append(id_) + eeg["kinds"].append(kind) + eeg["np"] += 1 + logger.info(" Separate EEG position data file read.") + return eeg + + +def _read_pos(directory, transformations): + """Read the .pos file and return eeg positions as dig extra points.""" + fname = [join(directory, f) for f in listdir(directory) if f.endswith(".pos")] + if len(fname) < 1: + return list() + elif len(fname) > 1: + warn(" Found multiple pos files. Extra digitizer points not added.") + return list() + logger.info(f" Reading digitizer points from {fname}...") + if transformations["t_ctf_head_head"] is None: + warn(" No transformation found. Extra digitizer points not added.") + return list() + fname = fname[0] + digs = list() + i = 2000 + with open(fname) as fid: + for line in fid: + line = line.strip() + if len(line) > 0: + parts = line.split() + # The lines can have 4 or 5 parts. First part is for the id, + # which can be an int or a string. The last three are for xyz + # coordinates. The extra part is for additional info + # (e.g. 'Pz', 'Cz') which is ignored. + if len(parts) not in [4, 5]: + continue + try: + ident = int(parts[0]) + 1000 + except ValueError: # if id is not an int + ident = i + i += 1 + dig = dict( + kind=FIFF.FIFFV_POINT_EXTRA, + ident=ident, + r=list(), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + r = np.array([float(p) for p in parts[-3:]]) / 100.0 # cm to m + if (r * r).sum() > 1e-4: + r = apply_trans(transformations["t_ctf_head_head"], r) + dig["r"] = r + digs.append(dig) + return digs diff --git a/mne/io/ctf/hc.py b/mne/io/ctf/hc.py new file mode 100644 index 0000000..22acced --- /dev/null +++ b/mne/io/ctf/hc.py @@ -0,0 +1,89 @@ +"""Read .hc files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..._fiff.constants import FIFF +from ...utils import logger +from .constants import CTF +from .res4 import _make_ctf_name + +_kind_dict = { + "nasion": CTF.CTFV_COIL_NAS, + "left ear": CTF.CTFV_COIL_LPA, + "right ear": CTF.CTFV_COIL_RPA, + "spare": CTF.CTFV_COIL_SPARE, +} + +_coord_dict = { + "relative to dewar": FIFF.FIFFV_MNE_COORD_CTF_DEVICE, + "relative to head": FIFF.FIFFV_MNE_COORD_CTF_HEAD, +} + + +def _read_one_coil_point(fid): + """Read coil coordinate information from the hc file.""" + # Descriptor + one = "#" + while len(one) > 0 and one[0] == "#": + one = fid.readline() + if len(one) == 0: + return None + one = one.strip().decode("utf-8") + if "Unable" in one: + raise RuntimeError("HPI information not available") + + # Hopefully this is an unambiguous interpretation + p = dict() + p["valid"] = "measured" in one + for key, val in _coord_dict.items(): + if key in one: + p["coord_frame"] = val + break + else: + p["coord_frame"] = -1 + + for key, val in _kind_dict.items(): + if key in one: + p["kind"] = val + break + else: + p["kind"] = -1 + + # Three coordinates + p["r"] = np.empty(3) + for ii, coord in enumerate("xyz"): + sp = fid.readline().decode("utf-8").strip() + if len(sp) == 0: # blank line + continue + sp = sp.split(" ") + if len(sp) != 3 or sp[0] != coord or sp[1] != "=": + raise RuntimeError(f"Bad line: {one}") + # We do not deal with centimeters + p["r"][ii] = float(sp[2]) / 100.0 + return p + + +def _read_hc(directory): + """Read the hc file to get the HPI info and to prepare for coord trans.""" + fname, found = _make_ctf_name(directory, "hc", raise_error=False) + if not found: + logger.info(" hc data not present") + return None + s = list() + with open(fname, "rb") as fid: + while True: + p = _read_one_coil_point(fid) + if p is None: + # First point bad indicates that the file is empty + if len(s) == 0: + logger.info("hc file empty, no data present") + return None + # Returns None if at EOF + logger.info(" hc data read.") + return s + if p["valid"]: + s.append(p) diff --git a/mne/io/ctf/info.py b/mne/io/ctf/info.py new file mode 100644 index 0000000..1b96d8b --- /dev/null +++ b/mne/io/ctf/info.py @@ -0,0 +1,561 @@ +"""Populate measurement info.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +from calendar import timegm +from time import strptime + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.ctf_comp import _add_kind, _calibrate_comp +from ..._fiff.meas_info import _empty_info +from ..._fiff.write import get_new_file_id +from ...annotations import Annotations +from ...transforms import ( + _coord_frame_name, + apply_trans, + combine_transforms, + invert_transform, +) +from ...utils import _clean_names, logger, warn +from .constants import CTF + +_ctf_to_fiff = { + CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA, + CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA, + CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION, +} + + +def _pick_isotrak_and_hpi_coils(res4, coils, t): + """Pick the HPI coil locations given in device coordinates.""" + if coils is None: + return list(), list() + dig = list() + hpi_result = dict(dig_points=list()) + n_coil_dev = 0 + n_coil_head = 0 + for p in coils: + if p["valid"]: + if p["kind"] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS]: + kind = FIFF.FIFFV_POINT_CARDINAL + ident = _ctf_to_fiff[p["kind"]] + else: # CTF.CTFV_COIL_SPARE + kind = FIFF.FIFFV_POINT_HPI + ident = p["kind"] + if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + if t is None or t["t_ctf_dev_dev"] is None: + raise RuntimeError( + "No coordinate transformation " + "available for HPI coil locations" + ) + d = dict( + kind=kind, + ident=ident, + r=apply_trans(t["t_ctf_dev_dev"], p["r"]), + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + hpi_result["dig_points"].append(d) + n_coil_dev += 1 + elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + if t is None or t["t_ctf_head_head"] is None: + raise RuntimeError( + "No coordinate transformation " + "available for (virtual) Polhemus data" + ) + d = dict( + kind=kind, + ident=ident, + r=apply_trans(t["t_ctf_head_head"], p["r"]), + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + dig.append(d) + n_coil_head += 1 + if n_coil_head > 0: + logger.info(" Polhemus data for %d HPI coils added", n_coil_head) + if n_coil_dev > 0: + logger.info( + " Device coordinate locations for %d HPI coils added", n_coil_dev + ) + return dig, [hpi_result] + + +def _convert_time(date_str, time_str): + """Convert date and time strings to float time.""" + if date_str == time_str == "": + date_str = "01/01/1970" + time_str = "00:00:00" + logger.info( + "No date or time found, setting to the start of the " + "POSIX epoch (1970/01/01 midnight)" + ) + + for fmt in ("%d/%m/%Y", "%d-%b-%Y", "%a, %b %d, %Y", "%Y/%m/%d"): + try: + date = strptime(date_str.strip(), fmt) + except ValueError: + pass + else: + break + else: + raise RuntimeError( + f"Illegal date: {date_str}.\nIf the language of the date does not " + "correspond to your local machine's language try to set the " + "locale to the language of the date string:\n" + 'locale.setlocale(locale.LC_ALL, "en_US")' + ) + + for fmt in ("%H:%M:%S", "%H:%M"): + try: + time = strptime(time_str, fmt) + except ValueError: + pass + else: + break + else: + raise RuntimeError(f"Illegal time: {time_str}") + # MNE-C uses mktime which uses local time, but here we instead decouple + # conversion location from the process, and instead assume that the + # acquisition was in GMT. This will be wrong for most sites, but at least + # the value we obtain here won't depend on the geographical location + # that the file was converted. + res = timegm( + ( + date.tm_year, + date.tm_mon, + date.tm_mday, + time.tm_hour, + time.tm_min, + time.tm_sec, + date.tm_wday, + date.tm_yday, + date.tm_isdst, + ) + ) + return res + + +def _get_plane_vectors(ez): + """Get two orthogonal vectors orthogonal to ez (ez will be modified).""" + assert ez.shape == (3,) + ez_len = np.sqrt(np.sum(ez * ez)) + if ez_len == 0: + raise RuntimeError("Zero length normal. Cannot proceed.") + if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction + ex = np.array([1.0, 0.0, 0.0]) + else: + ex = np.zeros(3) + if ez[1] < ez[2]: + ex[0 if ez[0] < ez[1] else 1] = 1.0 + else: + ex[0 if ez[0] < ez[2] else 2] = 1.0 + ez /= ez_len + ex -= np.dot(ez, ex) * ez + ex /= np.sqrt(np.sum(ex * ex)) + ey = np.cross(ez, ex) + return ex, ey + + +def _at_origin(x): + """Determine if a vector is at the origin.""" + return np.sum(x * x) < 1e-8 + + +def _check_comp_ch(cch, kind, desired=None): + if desired is None: + desired = cch["grad_order_no"] + if cch["grad_order_no"] != desired: + raise RuntimeError( + f"{kind} channel with inconsistent compensation " + f"grade {cch['grad_order_no']}, should be {desired}" + ) + return desired + + +def _convert_channel_info(res4, t, use_eeg_pos): + """Convert CTF channel information to fif format.""" + nmeg = neeg = nstim = nmisc = nref = 0 + chs = list() + this_comp = None + for k, cch in enumerate(res4["chs"]): + cal = float(1.0 / (cch["proper_gain"] * cch["qgain"])) + ch = dict( + scanno=k + 1, + range=1.0, + cal=cal, + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name=cch["ch_name"][:15], + coil_type=FIFF.FIFFV_COIL_NONE, + ) + del k + chs.append(ch) + # Create the channel position information + if cch["sensor_type_index"] in ( + CTF.CTFV_REF_MAG_CH, + CTF.CTFV_REF_GRAD_CH, + CTF.CTFV_MEG_CH, + ): + # Extra check for a valid MEG channel + if ( + np.sum(cch["coil"]["pos"][0] ** 2) < 1e-6 + or np.sum(cch["coil"]["norm"][0] ** 2) < 1e-6 + ): + nmisc += 1 + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_V, + ) + text = "MEG" + if cch["sensor_type_index"] != CTF.CTFV_MEG_CH: + text += " ref" + warn( + f"{text} channel {ch['ch_name']} did not have position " + "assigned, so it was changed to a MISC channel" + ) + continue + ch["unit"] = FIFF.FIFF_UNIT_T + # Set up the local coordinate frame + r0 = cch["coil"]["pos"][0].copy() + ez = cch["coil"]["norm"][0].copy() + # It turns out that positive proper_gain requires swapping + # of the normal direction + if cch["proper_gain"] > 0.0: + ez *= -1 + # Check how the other vectors should be defined + off_diag = False + # Default: ex and ey are arbitrary in the plane normal to ez + if cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH: + # The off-diagonal gradiometers are an exception: + # + # We use the same convention for ex as for Neuromag planar + # gradiometers: ex pointing in the positive gradient direction + diff = cch["coil"]["pos"][0] - cch["coil"]["pos"][1] + size = np.sqrt(np.sum(diff * diff)) + if size > 0.0: + diff /= size + # Is ez normal to the line joining the coils? + if np.abs(np.dot(diff, ez)) < 1e-3: + off_diag = True + # Handle the off-diagonal gradiometer coordinate system + r0 -= size * diff / 2.0 + ex = diff + ey = np.cross(ez, ex) + else: + ex, ey = _get_plane_vectors(ez) + else: + ex, ey = _get_plane_vectors(ez) + # Transform into a Neuromag-like device coordinate system + ch["loc"] = np.concatenate( + [ + apply_trans(t["t_ctf_dev_dev"], r0), + apply_trans(t["t_ctf_dev_dev"], ex, move=False), + apply_trans(t["t_ctf_dev_dev"], ey, move=False), + apply_trans(t["t_ctf_dev_dev"], ez, move=False), + ] + ) + del r0, ex, ey, ez + # Set the coil type + if cch["sensor_type_index"] == CTF.CTFV_REF_MAG_CH: + ch["kind"] = FIFF.FIFFV_REF_MEG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_MAG + nref += 1 + ch["logno"] = nref + elif cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH: + ch["kind"] = FIFF.FIFFV_REF_MEG_CH + if off_diag: + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD + else: + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_GRAD + nref += 1 + ch["logno"] = nref + else: + this_comp = _check_comp_ch(cch, "Gradiometer", this_comp) + ch["kind"] = FIFF.FIFFV_MEG_CH + ch["coil_type"] = FIFF.FIFFV_COIL_CTF_GRAD + nmeg += 1 + ch["logno"] = nmeg + # Encode the software gradiometer order + ch["coil_type"] = int(ch["coil_type"] | (cch["grad_order_no"] << 16)) + ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE + elif cch["sensor_type_index"] == CTF.CTFV_EEG_CH: + coord_frame = FIFF.FIFFV_COORD_HEAD + if use_eeg_pos: + # EEG electrode coordinates may be present but in the + # CTF head frame + ch["loc"][:3] = cch["coil"]["pos"][0] + if not _at_origin(ch["loc"][:3]): + if t["t_ctf_head_head"] is None: + warn( + f"EEG electrode ({ch['ch_name']}) location omitted because " + "of missing HPI information" + ) + ch["loc"].fill(np.nan) + coord_frame = FIFF.FIFFV_MNE_COORD_CTF_HEAD + else: + ch["loc"][:3] = apply_trans(t["t_ctf_head_head"], ch["loc"][:3]) + neeg += 1 + ch.update( + logno=neeg, + kind=FIFF.FIFFV_EEG_CH, + unit=FIFF.FIFF_UNIT_V, + coord_frame=coord_frame, + coil_type=FIFF.FIFFV_COIL_EEG, + ) + elif cch["sensor_type_index"] == CTF.CTFV_STIM_CH: + nstim += 1 + ch.update( + logno=nstim, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_STIM_CH, + unit=FIFF.FIFF_UNIT_V, + ) + else: + nmisc += 1 + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_V, + ) + return chs + + +def _comp_sort_keys(c): + """Sort the compensation data.""" + return (int(c["coeff_type"]), int(c["scanno"])) + + +def _check_comp(comp): + """Check that conversion to named matrices is possible.""" + ref_sens = None + kind = -1 + for k, c_k in enumerate(comp): + if c_k["coeff_type"] != kind: + c_ref = c_k + ref_sens = c_ref["sensors"] + kind = c_k["coeff_type"] + elif not c_k["sensors"] == ref_sens: + raise RuntimeError("Cannot use an uneven compensation matrix") + + +def _conv_comp(comp, first, last, chs): + """Add a new converted compensation data item.""" + ch_names = [c["ch_name"] for c in chs] + n_col = comp[first]["ncoeff"] + col_names = comp[first]["sensors"][:n_col] + row_names = [comp[p]["sensor_name"] for p in range(first, last + 1)] + mask = np.isin(col_names, ch_names) # missing channels excluded + col_names = np.array(col_names)[mask].tolist() + n_col = len(col_names) + n_row = len(row_names) + ccomp = dict(ctfkind=comp[first]["coeff_type"], save_calibrated=False) + _add_kind(ccomp) + + data = np.empty((n_row, n_col)) + for ii, coeffs in enumerate(comp[first : last + 1]): + # Pick the elements to the matrix + data[ii, :] = coeffs["coeffs"][mask] + ccomp["data"] = dict( + row_names=row_names, + col_names=col_names, + data=data, + nrow=len(row_names), + ncol=len(col_names), + ) + mk = ("proper_gain", "qgain") + _calibrate_comp(ccomp, chs, row_names, col_names, mult_keys=mk, flip=True) + return ccomp + + +def _convert_comp_data(res4): + """Convert the compensation data into named matrices.""" + if res4["ncomp"] == 0: + return + # Sort the coefficients in our favorite order + res4["comp"] = sorted(res4["comp"], key=_comp_sort_keys) + # Check that all items for a given compensation type have the correct + # number of channels + _check_comp(res4["comp"]) + # Create named matrices + first = 0 + kind = -1 + comps = list() + for k in range(len(res4["comp"])): + if res4["comp"][k]["coeff_type"] != kind: + if k > 0: + comps.append(_conv_comp(res4["comp"], first, k - 1, res4["chs"])) + kind = res4["comp"][k]["coeff_type"] + first = k + comps.append(_conv_comp(res4["comp"], first, k, res4["chs"])) + return comps + + +def _pick_eeg_pos(c): + """Pick EEG positions.""" + eeg = dict( + coord_frame=FIFF.FIFFV_COORD_HEAD, + assign_to_chs=False, + labels=list(), + ids=list(), + rr=list(), + kinds=list(), + np=0, + ) + for ch in c["chs"]: + if ch["kind"] == FIFF.FIFFV_EEG_CH and not _at_origin(ch["loc"][:3]): + eeg["labels"].append(ch["ch_name"]) + eeg["ids"].append(ch["logno"]) + eeg["rr"].append(ch["loc"][:3]) + eeg["kinds"].append(FIFF.FIFFV_POINT_EEG) + eeg["np"] += 1 + if eeg["np"] == 0: + return None + logger.info("Picked positions of %d EEG channels from channel info", eeg["np"]) + return eeg + + +def _add_eeg_pos(eeg, t, c): + """Pick the (virtual) EEG position data.""" + if eeg is None: + return + if t is None or t["t_ctf_head_head"] is None: + raise RuntimeError( + "No coordinate transformation available for EEG position data" + ) + eeg_assigned = 0 + if eeg["assign_to_chs"]: + for k in range(eeg["np"]): + # Look for a channel name match + for ch in c["chs"]: + if ch["ch_name"].lower() == eeg["labels"][k].lower(): + r0 = ch["loc"][:3] + r0[:] = eeg["rr"][k] + if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + r0[:] = apply_trans(t["t_ctf_head_head"], r0) + elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError( + "Illegal coordinate frame for EEG electrode " + f"positions : {_coord_frame_name(eeg['coord_frame'])}" + ) + # Use the logical channel number as an identifier + eeg["ids"][k] = ch["logno"] + eeg["kinds"][k] = FIFF.FIFFV_POINT_EEG + eeg_assigned += 1 + break + + # Add these to the Polhemus data + fid_count = eeg_count = extra_count = 0 + for k in range(eeg["np"]): + d = dict( + r=eeg["rr"][k].copy(), + kind=eeg["kinds"][k], + ident=eeg["ids"][k], + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + c["dig"].append(d) + if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + d["r"] = apply_trans(t["t_ctf_head_head"], d["r"]) + elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD: + raise RuntimeError( + "Illegal coordinate frame for EEG electrode positions: " + + _coord_frame_name(eeg["coord_frame"]) + ) + if eeg["kinds"][k] == FIFF.FIFFV_POINT_CARDINAL: + fid_count += 1 + elif eeg["kinds"][k] == FIFF.FIFFV_POINT_EEG: + eeg_count += 1 + else: + extra_count += 1 + if eeg_assigned > 0: + logger.info( + " %d EEG electrode locations assigned to channel info.", eeg_assigned + ) + for count, kind in zip( + (fid_count, eeg_count, extra_count), + ("fiducials", "EEG locations", "extra points"), + ): + if count > 0: + logger.info(" %d %s added to Polhemus data.", count, kind) + + +_filt_map = {CTF.CTFV_FILTER_LOWPASS: "lowpass", CTF.CTFV_FILTER_HIGHPASS: "highpass"} + + +def _compose_meas_info(res4, coils, trans, eeg): + """Create meas info from CTF data.""" + info = _empty_info(res4["sfreq"]) + + # Collect all the necessary data from the structures read + info["meas_id"] = get_new_file_id() + info["meas_id"]["usecs"] = 0 + info["meas_id"]["secs"] = _convert_time(res4["data_date"], res4["data_time"]) + info["meas_date"] = (info["meas_id"]["secs"], info["meas_id"]["usecs"]) + info["experimenter"] = res4["nf_operator"] + info["subject_info"] = dict(his_id=res4["nf_subject_id"]) + for filt in res4["filters"]: + if filt["type"] in _filt_map: + info[_filt_map[filt["type"]]] = filt["freq"] + info["dig"], info["hpi_results"] = _pick_isotrak_and_hpi_coils(res4, coils, trans) + if trans is not None: + if len(info["hpi_results"]) > 0: + info["hpi_results"][0]["coord_trans"] = trans["t_ctf_head_head"] + if trans["t_dev_head"] is not None: + info["dev_head_t"] = trans["t_dev_head"] + info["dev_ctf_t"] = combine_transforms( + trans["t_dev_head"], + invert_transform(trans["t_ctf_head_head"]), + FIFF.FIFFV_COORD_DEVICE, + FIFF.FIFFV_MNE_COORD_CTF_HEAD, + ) + if trans["t_ctf_head_head"] is not None: + info["ctf_head_t"] = trans["t_ctf_head_head"] + info["chs"] = _convert_channel_info(res4, trans, eeg is None) + info["comps"] = _convert_comp_data(res4) + if eeg is None: + # Pick EEG locations from chan info if not read from a separate file + eeg = _pick_eeg_pos(info) + _add_eeg_pos(eeg, trans, info) + logger.info(" Measurement info composed.") + info._unlocked = False + info._update_redundant() + return info + + +def _read_bad_chans(directory, info): + """Read Bad channel list and match to internal names.""" + fname = op.join(directory, "BadChannels") + if not op.exists(fname): + return [] + mapping = dict(zip(_clean_names(info["ch_names"]), info["ch_names"])) + with open(fname) as fid: + bad_chans = [mapping[f.strip()] for f in fid.readlines()] + return bad_chans + + +def _annotate_bad_segments(directory, start_time, meas_date): + fname = op.join(directory, "bad.segments") + if not op.exists(fname): + return None + + # read in bad segment file + onsets = [] + durations = [] + desc = [] + with open(fname) as fid: + for f in fid.readlines(): + tmp = f.strip().split() + desc.append(f"bad_{tmp[0]}") + onsets.append(np.float64(tmp[1]) - start_time) + durations.append(np.float64(tmp[2]) - np.float64(tmp[1])) + # return None if there are no bad segments + if len(onsets) == 0: + return None + + return Annotations(onsets, durations, desc, meas_date) diff --git a/mne/io/ctf/markers.py b/mne/io/ctf/markers.py new file mode 100644 index 0000000..64360fb --- /dev/null +++ b/mne/io/ctf/markers.py @@ -0,0 +1,89 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +from io import BytesIO + +import numpy as np + +from ...annotations import Annotations +from .info import _convert_time +from .res4 import _read_res4 + + +def _get_markers(fname): + def consume(fid, predicate): # just a consumer to move around conveniently + while predicate(fid.readline()): + pass + + def parse_marker(string): # XXX: there should be a nicer way to do that + data = np.genfromtxt( + BytesIO(string.encode()), dtype=[("trial", int), ("sync", float)] + ) + return int(data["trial"]), float(data["sync"]) + + markers = dict() + with open(fname) as fid: + consume(fid, lambda line: not line.startswith("NUMBER OF MARKERS:")) + num_of_markers = int(fid.readline()) + + for _ in range(num_of_markers): + consume(fid, lambda line: not line.startswith("NAME:")) + label = fid.readline().strip("\n") + + consume(fid, lambda line: not line.startswith("NUMBER OF SAMPLES:")) + n_markers = int(fid.readline()) + + consume(fid, lambda line: not line.startswith("LIST OF SAMPLES:")) + next(fid) # skip the samples header + markers[label] = [parse_marker(next(fid)) for _ in range(n_markers)] + + return markers + + +def _get_res4_info_needed_by_markers(directory): + """Get required information from CTF res4 information file.""" + # we only need a few values from res4. Maybe we can read them directly + # instead of parsing the entire res4 file. + res4 = _read_res4(directory) + + total_offset_duration = res4["pre_trig_pts"] / res4["sfreq"] + trial_duration = res4["nsamp"] / res4["sfreq"] + + meas_date = (_convert_time(res4["data_date"], res4["data_time"]), 0) + return total_offset_duration, trial_duration, meas_date + + +def _read_annotations_ctf(directory): + total_offset, trial_duration, meas_date = _get_res4_info_needed_by_markers( + directory + ) + return _read_annotations_ctf_call( + directory, total_offset, trial_duration, meas_date + ) + + +def _read_annotations_ctf_call(directory, total_offset, trial_duration, meas_date): + fname = op.join(directory, "MarkerFile.mrk") + if not op.exists(fname): + return Annotations(list(), list(), list(), orig_time=meas_date) + else: + markers = _get_markers(fname) + + onset = [ + synctime + (trialnum * trial_duration) + total_offset + for _, m in markers.items() + for (trialnum, synctime) in m + ] + + description = np.concatenate( + [np.repeat(label, len(m)) for label, m in markers.items()] + ) + + return Annotations( + onset=onset, + duration=np.zeros_like(onset), + description=description, + orig_time=meas_date, + ) diff --git a/mne/io/ctf/res4.py b/mne/io/ctf/res4.py new file mode 100644 index 0000000..b2ecb9d --- /dev/null +++ b/mne/io/ctf/res4.py @@ -0,0 +1,232 @@ +"""Read .res4 files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op + +import numpy as np + +from ...utils import logger +from .constants import CTF + + +def _make_ctf_name(directory, extra, raise_error=True): + """Make a CTF name.""" + fname = op.join(directory, op.basename(directory)[:-3] + "." + extra) + found = True + if not op.isfile(fname): + if raise_error: + raise OSError(f"Standard file {fname} not found") + found = False + return fname, found + + +def _read_double(fid, n=1): + """Read a double.""" + return np.fromfile(fid, ">f8", n) + + +def _read_string(fid, n_bytes, decode=True): + """Read string.""" + s0 = fid.read(n_bytes) + s = s0.split(b"\x00")[0] + return s.decode("utf-8") if decode else s + + +def _read_ustring(fid, n_bytes): + """Read unsigned character string.""" + return np.fromfile(fid, ">B", n_bytes) + + +def _read_int2(fid): + """Read int from short.""" + return _auto_cast(np.fromfile(fid, ">i2", 1)[0]) + + +def _read_int(fid): + """Read a 32-bit integer.""" + return np.fromfile(fid, ">i4", 1)[0] + + +def _move_to_next(fid, byte=8): + """Move to next byte boundary.""" + now = fid.tell() + if now % byte != 0: + now = now - (now % byte) + byte + fid.seek(now, 0) + + +def _read_filter(fid): + """Read filter information.""" + f = dict() + f["freq"] = _read_double(fid)[0] + f["class"] = _read_int(fid) + f["type"] = _read_int(fid) + f["npar"] = _read_int2(fid) + f["pars"] = _read_double(fid, f["npar"]) + return f + + +def _read_comp_coeff(fid, d): + """Read compensation coefficients.""" + # Read the coefficients and initialize + d["ncomp"] = _read_int2(fid) + d["comp"] = list() + # Read each record + dt = np.dtype( + [ + ("sensor_name", "S32"), + ("coeff_type", ">i4"), + ("d0", ">i4"), + ("ncoeff", ">i2"), + ("sensors", f"S{CTF.CTFV_SENSOR_LABEL}", CTF.CTFV_MAX_BALANCING), + ("coeffs", ">f8", CTF.CTFV_MAX_BALANCING), + ] + ) + comps = np.fromfile(fid, dt, d["ncomp"]) + for k in range(d["ncomp"]): + comp = dict() + d["comp"].append(comp) + comp["sensor_name"] = comps["sensor_name"][k].split(b"\x00")[0].decode("utf-8") + comp["coeff_type"] = comps["coeff_type"][k].item() + comp["ncoeff"] = comps["ncoeff"][k].item() + comp["sensors"] = [ + s.split(b"\x00")[0].decode("utf-8") + for s in comps["sensors"][k][: comp["ncoeff"]] + ] + comp["coeffs"] = comps["coeffs"][k][: comp["ncoeff"]] + comp["scanno"] = d["ch_names"].index(comp["sensor_name"]) + + +def _read_res4(dsdir): + """Read the magical res4 file.""" + # adapted from read_res4.c + name, _ = _make_ctf_name(dsdir, "res4") + res = dict() + with open(name, "rb") as fid: + # Read the fields + res["head"] = _read_string(fid, 8) + res["appname"] = _read_string(fid, 256) + res["origin"] = _read_string(fid, 256) + res["desc"] = _read_string(fid, 256) + res["nave"] = _read_int2(fid) + res["data_time"] = _read_string(fid, 255) + res["data_date"] = _read_string(fid, 255) + # Seems that date and time can be swapped + # (are they entered manually?!) + if "/" in res["data_time"] and ":" in res["data_date"]: + data_date = res["data_date"] + res["data_date"] = res["data_time"] + res["data_time"] = data_date + res["nsamp"] = _read_int(fid) + res["nchan"] = _read_int2(fid) + _move_to_next(fid, 8) + res["sfreq"] = _read_double(fid)[0] + res["epoch_time"] = _read_double(fid)[0] + res["no_trials"] = _read_int2(fid) + _move_to_next(fid, 4) + res["pre_trig_pts"] = _read_int(fid) + res["no_trials_done"] = _read_int2(fid) + res["no_trials_bst_message_windowlay"] = _read_int2(fid) + _move_to_next(fid, 4) + res["save_trials"] = _read_int(fid) + res["primary_trigger"] = fid.read(1) + res["secondary_trigger"] = [ + fid.read(1) for k in range(CTF.CTFV_MAX_AVERAGE_BINS) + ] + res["trigger_polarity_mask"] = fid.read(1) + res["trigger_mode"] = _read_int2(fid) + _move_to_next(fid, 4) + res["accept_reject"] = _read_int(fid) + res["run_time_bst_message_windowlay"] = _read_int2(fid) + _move_to_next(fid, 4) + res["zero_head"] = _read_int(fid) + _move_to_next(fid, 4) + res["artifact_mode"] = _read_int(fid) + _read_int(fid) # padding + res["nf_run_name"] = _read_string(fid, 32) + res["nf_run_title"] = _read_string(fid, 256) + res["nf_instruments"] = _read_string(fid, 32) + res["nf_collect_descriptor"] = _read_string(fid, 32) + res["nf_subject_id"] = _read_string(fid, 32) + res["nf_operator"] = _read_string(fid, 32) + if len(res["nf_operator"]) == 0: + res["nf_operator"] = None + res["nf_sensor_file_name"] = _read_ustring(fid, 60) + _move_to_next(fid, 4) + res["rdlen"] = _read_int(fid) + fid.seek(CTF.FUNNY_POS, 0) + + if res["rdlen"] > 0: + res["run_desc"] = _read_string(fid, res["rdlen"]) + + # Filters + res["nfilt"] = _read_int2(fid) + res["filters"] = list() + for k in range(res["nfilt"]): + res["filters"].append(_read_filter(fid)) + + # Channel information (names, then data) + res["ch_names"] = list() + for k in range(res["nchan"]): + ch_name = _read_string(fid, 32) + res["ch_names"].append(ch_name) + _coil_dt = np.dtype( + [ + ("pos", ">f8", 3), + ("d0", ">f8"), + ("norm", ">f8", 3), + ("d1", ">f8"), + ("turns", ">i2"), + ("d2", ">i4"), + ("d3", ">i2"), + ("area", ">f8"), + ] + ) + _ch_dt = np.dtype( + [ + ("sensor_type_index", ">i2"), + ("original_run_no", ">i2"), + ("coil_type", ">i4"), + ("proper_gain", ">f8"), + ("qgain", ">f8"), + ("io_gain", ">f8"), + ("io_offset", ">f8"), + ("num_coils", ">i2"), + ("grad_order_no", ">i2"), + ("d0", ">i4"), + ("coil", _coil_dt, CTF.CTFV_MAX_COILS), + ("head_coil", _coil_dt, CTF.CTFV_MAX_COILS), + ] + ) + chs = np.fromfile(fid, _ch_dt, res["nchan"]) + for coil in (chs["coil"], chs["head_coil"]): + coil["pos"] /= 100.0 + coil["area"] *= 1e-4 + # convert to dict + chs = [dict(zip(chs.dtype.names, x)) for x in chs] + for ch in chs: + for key, val in ch.items(): + ch[key] = _auto_cast(val) + res["chs"] = chs + for k in range(res["nchan"]): + res["chs"][k]["ch_name"] = res["ch_names"][k] + + # The compensation coefficients + _read_comp_coeff(fid, res) + logger.info(" res4 data read.") + return res + + +def _auto_cast(x): + # Upcast scalars + if isinstance(x, np.ScalarType): + if x.dtype.kind == "i": + if x.dtype != np.int64: + x = x.astype(np.int64) + elif x.dtype.kind == "f": + if x.dtype != np.float64: + x = x.astype(np.float64) + return x diff --git a/mne/io/ctf/trans.py b/mne/io/ctf/trans.py new file mode 100644 index 0000000..3e74063 --- /dev/null +++ b/mne/io/ctf/trans.py @@ -0,0 +1,132 @@ +"""Create coordinate transforms.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..._fiff.constants import FIFF +from ...transforms import ( + Transform, + _fit_matched_points, + _quat_to_affine, + apply_trans, + combine_transforms, + get_ras_to_neuromag_trans, + invert_transform, +) +from ...utils import logger +from .constants import CTF + + +def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa): + """Make a transform from cardinal landmarks.""" + return invert_transform( + Transform(to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa)) + ) + + +def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4): + """Perform an alignment using the unit quaternions (modifies points).""" + assert from_pts.shape[1] == to_pts.shape[1] == 3 + trans = _quat_to_affine(_fit_matched_points(from_pts, to_pts)[0]) + + # Test the transformation and print the results + logger.info(" Quaternion matching (desired vs. transformed):") + for fro, to in zip(from_pts, to_pts): + rr = apply_trans(trans, fro) + diff = np.linalg.norm(to - rr) + logger.info( + " %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm " + "(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm" + % (tuple(1000 * to) + tuple(1000 * rr) + tuple(1000 * fro) + (1000 * diff,)) + ) + if diff > diff_tol: + raise RuntimeError( + "Something is wrong: quaternion matching did not work (see above)" + ) + return Transform(from_frame, to_frame, trans) + + +def _make_ctf_coord_trans_set(res4, coils): + """Figure out the necessary coordinate transforms.""" + # CTF head > Neuromag head + lpa = rpa = nas = T1 = T2 = T3 = T5 = None + if coils is not None: + for p in coils: + if p["valid"] and (p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD): + if lpa is None and p["kind"] == CTF.CTFV_COIL_LPA: + lpa = p + elif rpa is None and p["kind"] == CTF.CTFV_COIL_RPA: + rpa = p + elif nas is None and p["kind"] == CTF.CTFV_COIL_NAS: + nas = p + if lpa is None or rpa is None or nas is None: + raise RuntimeError( + "Some of the mandatory HPI device-coordinate info was not there." + ) + t = _make_transform_card("head", "ctf_head", lpa["r"], nas["r"], rpa["r"]) + T3 = invert_transform(t) + + # CTF device -> Neuromag device + # + # Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm + # in z direction to get a coordinate system comparable to the Neuromag one + # + R = np.eye(4) + R[:3, 3] = [0.0, 0.0, 0.19] + val = 0.5 * np.sqrt(2.0) + R[0, 0] = val + R[0, 1] = -val + R[1, 0] = val + R[1, 1] = val + T4 = Transform("ctf_meg", "meg", R) + + # CTF device -> CTF head + # We need to make the implicit transform explicit! + h_pts = dict() + d_pts = dict() + kinds = ( + CTF.CTFV_COIL_LPA, + CTF.CTFV_COIL_RPA, + CTF.CTFV_COIL_NAS, + CTF.CTFV_COIL_SPARE, + ) + if coils is not None: + for p in coils: + if p["valid"]: + if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD: + for kind in kinds: + if kind not in h_pts and p["kind"] == kind: + h_pts[kind] = p["r"] + elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE: + for kind in kinds: + if kind not in d_pts and p["kind"] == kind: + d_pts[kind] = p["r"] + if any(kind not in h_pts for kind in kinds[:-1]): + raise RuntimeError( + "Some of the mandatory HPI device-coordinate info was not there." + ) + if any(kind not in d_pts for kind in kinds[:-1]): + raise RuntimeError( + "Some of the mandatory HPI head-coordinate info was not there." + ) + use_kinds = [kind for kind in kinds if (kind in h_pts and kind in d_pts)] + r_head = np.array([h_pts[kind] for kind in use_kinds]) + r_dev = np.array([d_pts[kind] for kind in use_kinds]) + T2 = _quaternion_align("ctf_meg", "ctf_head", r_dev, r_head) + + # The final missing transform + if T3 is not None and T2 is not None: + T5 = combine_transforms(T2, T3, "ctf_meg", "head") + T1 = combine_transforms(invert_transform(T4), T5, "meg", "head") + s = dict( + t_dev_head=T1, + t_ctf_dev_ctf_head=T2, + t_ctf_head_head=T3, + t_ctf_dev_dev=T4, + t_ctf_dev_head=T5, + ) + logger.info(" Coordinate transformations established.") + return s diff --git a/mne/io/curry/__init__.py b/mne/io/curry/__init__.py new file mode 100644 index 0000000..fce6b7d --- /dev/null +++ b/mne/io/curry/__init__.py @@ -0,0 +1,7 @@ +"""Reader for CURRY data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .curry import read_raw_curry diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py new file mode 100644 index 0000000..3e8347f --- /dev/null +++ b/mne/io/curry/curry.py @@ -0,0 +1,631 @@ +# +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +import re +from collections import namedtuple +from datetime import datetime, timezone +from pathlib import Path + +import numpy as np + +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.tag import _coil_trans_to_loc +from ..._fiff.utils import _mult_cal_one, _read_segments_file +from ...annotations import Annotations +from ...surface import _normal_orth +from ...transforms import ( + Transform, + _angle_between_quats, + apply_trans, + combine_transforms, + get_ras_to_neuromag_trans, + invert_transform, + rot_to_quat, +) +from ...utils import _check_fname, check_fname, logger, verbose +from ..base import BaseRaw +from ..ctf.trans import _quaternion_align + +FILE_EXTENSIONS = { + "Curry 7": { + "info": ".dap", + "data": ".dat", + "labels": ".rs3", + "events_cef": ".cef", + "events_ceo": ".ceo", + "hpi": ".hpi", + }, + "Curry 8": { + "info": ".cdt.dpa", + "data": ".cdt", + "labels": ".cdt.dpa", + "events_cef": ".cdt.cef", + "events_ceo": ".cdt.ceo", + "hpi": ".cdt.hpi", + }, +} +CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"} +FIFFV_CHANTYPES = { + "meg": FIFF.FIFFV_MEG_CH, + "eeg": FIFF.FIFFV_EEG_CH, + "misc": FIFF.FIFFV_MISC_CH, +} +FIFFV_COILTYPES = { + "meg": FIFF.FIFFV_COIL_CTF_GRAD, + "eeg": FIFF.FIFFV_COIL_EEG, + "misc": FIFF.FIFFV_COIL_NONE, +} +SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T) +SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15) + +CurryParameters = namedtuple( + "CurryParameters", + "n_samples, sfreq, is_ascii, unit_dict, n_chans, dt_start, chanidx_in_file", +) + + +def _get_curry_version(file_extension): + """Check out the curry file version.""" + return "Curry 8" if "cdt" in file_extension else "Curry 7" + + +def _get_curry_file_structure(fname, required=()): + """Store paths to a dict and check for required files.""" + _msg = ( + "The following required files cannot be found: {0}.\nPlease make " + "sure all required files are located in the same directory as {1}." + ) + fname = Path(_check_fname(fname, "read", True, "fname")) + + # we don't use os.path.splitext to also handle extensions like .cdt.dpa + # this won't handle a dot in the filename, but it should handle it in + # the parent directories + fname_base = fname.name.split(".", maxsplit=1)[0] + ext = fname.name[len(fname_base) :] + fname_base = str(fname) + fname_base = fname_base[: len(fname_base) - len(ext)] + del fname + version = _get_curry_version(ext) + my_curry = dict() + for key in ("info", "data", "labels", "events_cef", "events_ceo", "hpi"): + fname = fname_base + FILE_EXTENSIONS[version][key] + if op.isfile(fname): + _key = "events" if key.startswith("events") else key + my_curry[_key] = fname + + missing = [field for field in required if field not in my_curry] + if missing: + raise FileNotFoundError(_msg.format(np.unique(missing), fname)) + + return my_curry + + +def _read_curry_lines(fname, regex_list): + """Read through the lines of a curry parameter files and save data. + + Parameters + ---------- + fname : path-like + Path to a curry file. + regex_list : list of str + A list of strings or regular expressions to search within the file. + Each element `regex` in `regex_list` must be formulated so that + `regex + " START_LIST"` initiates the start and `regex + " END_LIST"` + initiates the end of the elements that should be saved. + + Returns + ------- + data_dict : dict + A dictionary containing the extracted data. For each element `regex` + in `regex_list` a dictionary key `data_dict[regex]` is created, which + contains a list of the according data. + + """ + save_lines = {} + data_dict = {} + + for regex in regex_list: + save_lines[regex] = False + data_dict[regex] = [] + + with open(fname) as fid: + for line in fid: + for regex in regex_list: + if re.match(regex + " END_LIST", line): + save_lines[regex] = False + + if save_lines[regex] and line != "\n": + result = line.replace("\n", "") + if "\t" in result: + result = result.split("\t") + data_dict[regex].append(result) + + if re.match(regex + " START_LIST", line): + save_lines[regex] = True + + return data_dict + + +def _read_curry_parameters(fname): + """Extract Curry params from a Curry info file.""" + _msg_match = ( + "The sampling frequency and the time steps extracted from " + "the parameter file do not match." + ) + _msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}" + + var_names = [ + "NumSamples", + "SampleFreqHz", + "DataFormat", + "SampleTimeUsec", + "NumChannels", + "StartYear", + "StartMonth", + "StartDay", + "StartHour", + "StartMin", + "StartSec", + "StartMillisec", + "NUM_SAMPLES", + "SAMPLE_FREQ_HZ", + "DATA_FORMAT", + "SAMPLE_TIME_USEC", + "NUM_CHANNELS", + "START_YEAR", + "START_MONTH", + "START_DAY", + "START_HOUR", + "START_MIN", + "START_SEC", + "START_MILLISEC", + ] + + param_dict = dict() + unit_dict = dict() + + with open(fname) as fid: + for line in iter(fid): + if any(var_name in line for var_name in var_names): + key, val = line.replace(" ", "").replace("\n", "").split("=") + param_dict[key.lower().replace("_", "")] = val + for key, type_ in CHANTYPES.items(): + if f"DEVICE_PARAMETERS{type_} START" in line: + data_unit = next(fid) + unit_dict[key] = ( + data_unit.replace(" ", "").replace("\n", "").split("=")[-1] + ) + + # look for CHAN_IN_FILE sections, which may or may not exist; issue #8391 + types = ["meg", "eeg", "misc"] + chanidx_in_file = _read_curry_lines( + fname, ["CHAN_IN_FILE" + CHANTYPES[key] for key in types] + ) + + n_samples = int(param_dict["numsamples"]) + sfreq = float(param_dict["samplefreqhz"]) + time_step = float(param_dict["sampletimeusec"]) * 1e-6 + is_ascii = param_dict["dataformat"] == "ASCII" + n_channels = int(param_dict["numchannels"]) + try: + dt_start = datetime( + int(param_dict["startyear"]), + int(param_dict["startmonth"]), + int(param_dict["startday"]), + int(param_dict["starthour"]), + int(param_dict["startmin"]), + int(param_dict["startsec"]), + int(param_dict["startmillisec"]) * 1000, + timezone.utc, + ) + # Note that the time zone information is not stored in the Curry info + # file, and it seems the start time info is in the local timezone + # of the acquisition system (which is unknown); therefore, just set + # the timezone to be UTC. If the user knows otherwise, they can + # change it later. (Some Curry files might include StartOffsetUTCMin, + # but its presence is unpredictable, so we won't rely on it.) + except (ValueError, KeyError): + dt_start = None # if missing keywords or illegal values, don't set + + if time_step == 0: + true_sfreq = sfreq + elif sfreq == 0: + true_sfreq = 1 / time_step + elif not np.isclose(sfreq, 1 / time_step): + raise ValueError(_msg_match) + else: # they're equal and != 0 + true_sfreq = sfreq + if true_sfreq <= 0: + raise ValueError(_msg_invalid.format(true_sfreq)) + + return CurryParameters( + n_samples, + true_sfreq, + is_ascii, + unit_dict, + n_channels, + dt_start, + chanidx_in_file, + ) + + +def _read_curry_info(curry_paths): + """Extract info from curry parameter files.""" + curry_params = _read_curry_parameters(curry_paths["info"]) + R = np.eye(4) + R[[0, 1], [0, 1]] = -1 # rotate 180 deg + # shift down and back + # (chosen by eyeballing to make the CTF helmet look roughly correct) + R[:3, 3] = [0.0, -0.015, -0.12] + curry_dev_dev_t = Transform("ctf_meg", "meg", R) + + # read labels from label files + label_fname = curry_paths["labels"] + types = ["meg", "eeg", "misc"] + labels = _read_curry_lines( + label_fname, ["LABELS" + CHANTYPES[key] for key in types] + ) + sensors = _read_curry_lines( + label_fname, ["SENSORS" + CHANTYPES[key] for key in types] + ) + normals = _read_curry_lines( + label_fname, ["NORMALS" + CHANTYPES[key] for key in types] + ) + assert len(labels) == len(sensors) == len(normals) + + all_chans = list() + dig_ch_pos = dict() + for key in ["meg", "eeg", "misc"]: + chanidx_is_explicit = ( + len(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]]) > 0 + ) # channel index + # position in the datafile may or may not be explicitly declared, + # based on the CHAN_IN_FILE section in info file + for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]): + chanidx = len(all_chans) + 1 # by default, just assume the + # channel index in the datafile is in order of the channel + # names as we found them in the labels file + if chanidx_is_explicit: # but, if explicitly declared, use + # that index number + chanidx = int( + curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]][ind] + ) + if chanidx <= 0: # if chanidx was explicitly declared to be ' 0', + # it means the channel is not actually saved in the data file + # (e.g. the "Ref" channel), so don't add it to our list. + # Git issue #8391 + continue + ch = { + "ch_name": chan, + "unit": curry_params.unit_dict[key], + "kind": FIFFV_CHANTYPES[key], + "coil_type": FIFFV_COILTYPES[key], + "ch_idx": chanidx, + } + if key == "eeg": + loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) + # XXX just the sensor, where is ref (next 3)? + assert loc.shape == (3,) + loc /= 1000.0 # to meters + loc = np.concatenate([loc, np.zeros(9)]) + ch["loc"] = loc + # XXX need to check/ensure this + ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + dig_ch_pos[chan] = loc[:3] + elif key == "meg": + pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) + pos /= 1000.0 # to meters + pos = pos[:3] # just the inner coil + pos = apply_trans(curry_dev_dev_t, pos) + nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float) + assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4) + nn /= np.linalg.norm(nn) + nn = apply_trans(curry_dev_dev_t, nn, move=False) + trans = np.eye(4) + trans[:3, 3] = pos + trans[:3, :3] = _normal_orth(nn).T + ch["loc"] = _coil_trans_to_loc(trans) + ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE + all_chans.append(ch) + dig = _make_dig_points( + dig_ch_pos=dig_ch_pos, coord_frame="head", add_missing_fiducials=True + ) + del dig_ch_pos + + ch_count = len(all_chans) + assert ch_count == curry_params.n_chans # ensure that we have assembled + # the same number of channels as declared in the info (.DAP) file in the + # DATA_PARAMETERS section. Git issue #8391 + + # sort the channels to assure they are in the order that matches how + # recorded in the datafile. In general they most likely are already in + # the correct order, but if the channel index in the data file was + # explicitly declared we might as well use it. + all_chans = sorted(all_chans, key=lambda ch: ch["ch_idx"]) + + ch_names = [chan["ch_name"] for chan in all_chans] + info = create_info(ch_names, curry_params.sfreq) + with info._unlock(): + info["meas_date"] = curry_params.dt_start # for Git issue #8398 + info["dig"] = dig + _make_trans_dig(curry_paths, info, curry_dev_dev_t) + + for ind, ch_dict in enumerate(info["chs"]): + all_chans[ind].pop("ch_idx") + ch_dict.update(all_chans[ind]) + assert ch_dict["loc"].shape == (12,) + ch_dict["unit"] = SI_UNITS[all_chans[ind]["unit"][1]] + ch_dict["cal"] = SI_UNIT_SCALE[all_chans[ind]["unit"][0]] + + return info, curry_params.n_samples, curry_params.is_ascii + + +_card_dict = { + "Left ear": FIFF.FIFFV_POINT_LPA, + "Nasion": FIFF.FIFFV_POINT_NASION, + "Right ear": FIFF.FIFFV_POINT_RPA, +} + + +def _make_trans_dig(curry_paths, info, curry_dev_dev_t): + # Coordinate frame transformations and definitions + no_msg = "Leaving device<->head transform as None" + info["dev_head_t"] = None + label_fname = curry_paths["labels"] + key = "LANDMARKS" + CHANTYPES["meg"] + lm = _read_curry_lines(label_fname, [key])[key] + lm = np.array(lm, float) + lm.shape = (-1, 3) + if len(lm) == 0: + # no dig + logger.info(no_msg + " (no landmarks found)") + return + lm /= 1000.0 + key = "LM_REMARKS" + CHANTYPES["meg"] + remarks = _read_curry_lines(label_fname, [key])[key] + assert len(remarks) == len(lm) + with info._unlock(): + info["dig"] = list() + cards = dict() + for remark, r in zip(remarks, lm): + kind = ident = None + if remark in _card_dict: + kind = FIFF.FIFFV_POINT_CARDINAL + ident = _card_dict[remark] + cards[ident] = r + elif remark.startswith("HPI"): + kind = FIFF.FIFFV_POINT_HPI + ident = int(remark[3:]) - 1 + if kind is not None: + info["dig"].append( + dict(kind=kind, ident=ident, r=r, coord_frame=FIFF.FIFFV_COORD_UNKNOWN) + ) + with info._unlock(): + info["dig"].sort(key=lambda x: (x["kind"], x["ident"])) + has_cards = len(cards) == 3 + has_hpi = "hpi" in curry_paths + if has_cards and has_hpi: # have all three + logger.info("Composing device<->head transformation from dig points") + hpi_u = np.array( + [d["r"] for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], float + ) + hpi_c = np.ascontiguousarray(_first_hpi(curry_paths["hpi"])[: len(hpi_u), 1:4]) + unknown_curry_t = _quaternion_align("unknown", "ctf_meg", hpi_u, hpi_c, 1e-2) + angle = np.rad2deg( + _angle_between_quats( + np.zeros(3), rot_to_quat(unknown_curry_t["trans"][:3, :3]) + ) + ) + dist = 1000 * np.linalg.norm(unknown_curry_t["trans"][:3, 3]) + logger.info(f" Fit a {angle:0.1f}° rotation, {dist:0.1f} mm translation") + unknown_dev_t = combine_transforms( + unknown_curry_t, curry_dev_dev_t, "unknown", "meg" + ) + unknown_head_t = Transform( + "unknown", + "head", + get_ras_to_neuromag_trans( + *( + cards[key] + for key in ( + FIFF.FIFFV_POINT_NASION, + FIFF.FIFFV_POINT_LPA, + FIFF.FIFFV_POINT_RPA, + ) + ) + ), + ) + with info._unlock(): + info["dev_head_t"] = combine_transforms( + invert_transform(unknown_dev_t), unknown_head_t, "meg", "head" + ) + for d in info["dig"]: + d.update( + coord_frame=FIFF.FIFFV_COORD_HEAD, + r=apply_trans(unknown_head_t, d["r"]), + ) + else: + if has_cards: + no_msg += " (no .hpi file found)" + elif has_hpi: + no_msg += " (not all cardinal points found)" + else: + no_msg += " (neither cardinal points nor .hpi file found)" + logger.info(no_msg) + + +def _first_hpi(fname): + # Get the first HPI result + with open(fname) as fid: + for line in fid: + line = line.strip() + if any(x in line for x in ("FileVersion", "NumCoils")) or not line: + continue + hpi = np.array(line.split(), float) + break + else: + raise RuntimeError(f"Could not find valid HPI in {fname}") + # t is the first entry + assert hpi.ndim == 1 + hpi = hpi[1:] + hpi.shape = (-1, 5) + hpi /= 1000.0 + return hpi + + +def _read_events_curry(fname): + """Read events from Curry event files. + + Parameters + ---------- + fname : path-like + Path to a curry event file with extensions .cef, .ceo, + .cdt.cef, or .cdt.ceo + + Returns + ------- + events : ndarray, shape (n_events, 3) + The array of events. + """ + check_fname( + fname, + "curry event", + (".cef", ".ceo", ".cdt.cef", ".cdt.ceo"), + endings_err=(".cef", ".ceo", ".cdt.cef", ".cdt.ceo"), + ) + + events_dict = _read_curry_lines(fname, ["NUMBER_LIST"]) + # The first 3 column seem to contain the event information + curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3] + + return curry_events + + +def _read_annotations_curry(fname, sfreq="auto"): + r"""Read events from Curry event files. + + Parameters + ---------- + fname : str + The filename. + sfreq : float | 'auto' + The sampling frequency in the file. If set to 'auto' then the + ``sfreq`` is taken from the respective info file of the same name with + according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8). + So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + """ + required = ["events", "info"] if sfreq == "auto" else ["events"] + curry_paths = _get_curry_file_structure(fname, required) + events = _read_events_curry(curry_paths["events"]) + + if sfreq == "auto": + sfreq = _read_curry_parameters(curry_paths["info"]).sfreq + + onset = events[:, 0] / sfreq + duration = np.zeros(events.shape[0]) + description = events[:, 2] + + return Annotations(onset, duration, description) + + +@verbose +def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": + """Read raw data from Curry files. + + Parameters + ---------- + fname : path-like + Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, + ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawCurry + A Raw object containing Curry data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawCurry. + """ + return RawCurry(fname, preload, verbose) + + +class RawCurry(BaseRaw): + """Raw object from Curry file. + + Parameters + ---------- + fname : path-like + Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, + ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + curry_paths = _get_curry_file_structure( + fname, required=["info", "data", "labels"] + ) + + data_fname = op.abspath(curry_paths["data"]) + + info, n_samples, is_ascii = _read_curry_info(curry_paths) + + last_samps = [n_samples - 1] + raw_extras = dict(is_ascii=is_ascii) + + super().__init__( + info, + preload, + filenames=[data_fname], + last_samps=last_samps, + orig_format="int", + raw_extras=[raw_extras], + verbose=verbose, + ) + + if "events" in curry_paths: + logger.info( + "Event file found. Extracting Annotations from " + f"{curry_paths['events']}..." + ) + annots = _read_annotations_curry( + curry_paths["events"], sfreq=self.info["sfreq"] + ) + self.set_annotations(annots) + else: + logger.info("Event file not found. No Annotations set.") + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + if self._raw_extras[fi]["is_ascii"]: + if isinstance(idx, slice): + idx = np.arange(idx.start, idx.stop) + block = np.loadtxt( + self.filenames[0], skiprows=start, max_rows=stop - start, ndmin=2 + ).T + _mult_cal_one(data, block, idx, cals, mult) + + else: + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=">> events = mne.find_events(...) # doctest:+SKIP + >>> events[:, 2] &= (2**16 - 1) # doctest:+SKIP + + The above operation can be carried out directly in :func:`mne.find_events` + using the ``mask`` and ``mask_type`` parameters (see + :func:`mne.find_events` for more details). + + It is also possible to retrieve system codes, but no particular effort has + been made to decode these in MNE. In case it is necessary, for instance to + check the CMS bit, the following operation can be carried out: + + >>> cms_bit = 20 # doctest:+SKIP + >>> cms_high = (events[:, 2] & (1 << cms_bit)) != 0 # doctest:+SKIP + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'EDF Annotations' or 'BDF Annotations' are parsed and + extracted annotations are stored in raw.annotations. Use + :func:`mne.events_from_annotations` to obtain events from these + annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + @verbose + def __init__( + self, + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + preload=False, + include=None, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, + ): + logger.info(f"Extracting EDF parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + info, edf_info, orig_units = _get_info( + input_fname, + stim_channel, + eog, + misc, + exclude, + infer_types, + preload, + include, + exclude_after_unique, + ) + logger.info("Creating raw.info structure...") + + _validate_type(units, (str, None, dict), "units") + if units is None: + units = dict() + elif isinstance(units, str): + units = {ch_name: units for ch_name in info["ch_names"]} + + for k, (this_ch, this_unit) in enumerate(orig_units.items()): + if this_ch not in units: + continue + if this_unit not in ("", units[this_ch]): + raise ValueError( + f"Unit for channel {this_ch} is present in the file as " + f"{repr(this_unit)}, cannot overwrite it with the units " + f"argument {repr(units[this_ch])}." + ) + if this_unit == "": + orig_units[this_ch] = units[this_ch] + ch_type = edf_info["ch_types"][k] + scaling = _get_scaling(ch_type.lower(), orig_units[this_ch]) + edf_info["units"][k] /= scaling + + # Raw attributes + last_samps = [edf_info["nsamples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[edf_info], + last_samps=last_samps, + orig_format="int", + orig_units=orig_units, + verbose=verbose, + ) + + # Read annotations from file and set it + if len(edf_info["tal_idx"]) > 0: + # Read TAL data exploiting the header info (no regexp) + idx = np.empty(0, int) + tal_data = self._read_segment_file( + np.empty((0, self.n_times)), + idx, + 0, + 0, + int(self.n_times), + np.ones((len(idx), 1)), + None, + ) + annotations = _read_annotations_edf( + tal_data[0], + ch_names=info["ch_names"], + encoding=encoding, + ) + self.set_annotations(annotations, on_missing="warn") + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + return _read_segment_file( + data, + idx, + fi, + start, + stop, + self._raw_extras[fi], + self.filenames[fi], + cals, + mult, + ) + + +@fill_doc +class RawGDF(BaseRaw): + """Raw object from GDF file. + + Parameters + ---------- + input_fname : path-like + Path to the GDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to 'auto', which means that channels named 'status' or + 'trigger' (case insensitive) are set to STIM. If str (or list of str), + all channels matching the name(s) are set to STIM. If int (or list of + ints), channels corresponding to the indices are set to STIM. + exclude : list of str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. + + .. versionadded:: 0.24.1 + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + + .. versionadded:: 1.1 + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + mne.io.read_raw_gdf : Recommended way to read GDF files. + + Notes + ----- + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + @verbose + def __init__( + self, + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + preload=False, + include=None, + verbose=None, + ): + logger.info(f"Extracting EDF parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + info, edf_info, orig_units = _get_info( + input_fname, stim_channel, eog, misc, exclude, True, preload, include + ) + logger.info("Creating raw.info structure...") + + # Raw attributes + last_samps = [edf_info["nsamples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[edf_info], + last_samps=last_samps, + orig_format="int", + orig_units=orig_units, + verbose=verbose, + ) + + # Read annotations from file and set it + onset, duration, desc = _get_annotations_gdf(edf_info, self.info["sfreq"]) + + self.set_annotations( + Annotations( + onset=onset, duration=duration, description=desc, orig_time=None + ) + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + return _read_segment_file( + data, + idx, + fi, + start, + stop, + self._raw_extras[fi], + self.filenames[fi], + cals, + mult, + ) + + +def _read_ch(fid, subtype, samp, dtype_byte, dtype=None): + """Read a number of samples for a single channel.""" + # BDF + if subtype == "bdf": + ch_data = np.fromfile(fid, dtype=dtype, count=samp * dtype_byte) + ch_data = ch_data.reshape(-1, 3).astype(INT32) + ch_data = (ch_data[:, 0]) + (ch_data[:, 1] << 8) + (ch_data[:, 2] << 16) + # 24th bit determines the sign + ch_data[ch_data >= (1 << 23)] -= 1 << 24 + + # GDF data and EDF data + else: + ch_data = np.fromfile(fid, dtype=dtype, count=samp) + + return ch_data + + +def _read_segment_file(data, idx, fi, start, stop, raw_extras, filenames, cals, mult): + """Read a chunk of raw data.""" + n_samps = raw_extras["n_samps"] + buf_len = int(raw_extras["max_samp"]) + dtype = raw_extras["dtype_np"] + dtype_byte = raw_extras["dtype_byte"] + data_offset = raw_extras["data_offset"] + stim_channel_idxs = raw_extras["stim_channel_idxs"] + orig_sel = raw_extras["sel"] + tal_idx = raw_extras.get("tal_idx", np.empty(0, int)) + subtype = raw_extras["subtype"] + cal = raw_extras["cal"] + offsets = raw_extras["offsets"] + gains = raw_extras["units"] + + read_sel = np.concatenate([orig_sel[idx], tal_idx]) + tal_data = [] + + # only try to read the stim channel if it's not None and it's + # actually one of the requested channels + idx_arr = np.arange(idx.start, idx.stop) if isinstance(idx, slice) else idx + + # We could read this one EDF block at a time, which would be this: + ch_offsets = np.cumsum(np.concatenate([[0], n_samps]), dtype=np.int64) + block_start_idx, r_lims, _ = _blk_read_lims(start, stop, buf_len) + # But to speed it up, we really need to read multiple blocks at once, + # Otherwise we can end up with e.g. 18,181 chunks for a 20 MB file! + # Let's do ~10 MB chunks: + n_per = max(10 * 1024 * 1024 // (ch_offsets[-1] * dtype_byte), 1) + with open(filenames, "rb", buffering=0) as fid: + # Extract data + start_offset = data_offset + block_start_idx * ch_offsets[-1] * dtype_byte + + # first read everything into the `ones` array. For channels with + # lower sampling frequency, there will be zeros left at the end of the + # row. Ignore TAL/annotations channel and only store `orig_sel` + ones = np.zeros((len(orig_sel), data.shape[-1]), dtype=data.dtype) + # save how many samples have already been read per channel + n_smp_read = [0 for _ in range(len(orig_sel))] + + # read data in chunks + for ai in range(0, len(r_lims), n_per): + block_offset = ai * ch_offsets[-1] * dtype_byte + n_read = min(len(r_lims) - ai, n_per) + fid.seek(start_offset + block_offset, 0) + # Read and reshape to (n_chunks_read, ch0_ch1_ch2_ch3...) + many_chunk = _read_ch( + fid, subtype, ch_offsets[-1] * n_read, dtype_byte, dtype + ).reshape(n_read, -1) + r_sidx = r_lims[ai][0] + r_eidx = buf_len * (n_read - 1) + r_lims[ai + n_read - 1][1] + + # loop over selected channels, ci=channel selection + for ii, ci in enumerate(read_sel): + # This now has size (n_chunks_read, n_samp[ci]) + ch_data = many_chunk[:, ch_offsets[ci] : ch_offsets[ci + 1]].copy() + + # annotation channel has to be treated separately + if ci in tal_idx: + tal_data.append(ch_data) + continue + + orig_idx = idx_arr[ii] + ch_data = ch_data * cal[orig_idx] + ch_data += offsets[orig_idx] + ch_data *= gains[orig_idx] + + assert ci == orig_sel[orig_idx] + + if n_samps[ci] != buf_len: + if orig_idx in stim_channel_idxs: + # Stim channel will be interpolated + old = np.linspace(0, 1, n_samps[ci] + 1, True) + new = np.linspace(0, 1, buf_len, False) + ch_data = np.append(ch_data, np.zeros((len(ch_data), 1)), -1) + ch_data = interp1d(old, ch_data, kind="zero", axis=-1)(new) + elif orig_idx in stim_channel_idxs: + ch_data = np.bitwise_and(ch_data.astype(int), 2**17 - 1) + + one_i = ch_data.ravel()[r_sidx:r_eidx] + + # note how many samples have been read + smp_read = n_smp_read[orig_idx] + ones[orig_idx, smp_read : smp_read + len(one_i)] = one_i + n_smp_read[orig_idx] += len(one_i) + + # skip if no data was requested, ie. only annotations were read + if sum(n_smp_read) > 0: + # expected number of samples, equals maximum sfreq + smp_exp = data.shape[-1] + assert max(n_smp_read) == smp_exp + + # resample data after loading all chunks to prevent edge artifacts + resampled = False + for i, smp_read in enumerate(n_smp_read): + # nothing read, nothing to resample + if smp_read == 0: + continue + # upsample if n_samples is lower than from highest sfreq + if smp_read != smp_exp: + assert (ones[i, smp_read:] == 0).all() # sanity check + ones[i, :] = resample( + ones[i, :smp_read].astype(np.float64), + smp_exp, + smp_read, + npad=0, + axis=-1, + ) + resampled = True + + # give warning if we resampled a subselection + if resampled and raw_extras["nsamples"] != (stop - start): + warn( + "Loading an EDF with mixed sampling frequencies and " + "preload=False will result in edge artifacts. " + "It is recommended to use preload=True." + "See also https://github.com/mne-tools/mne-python/issues/10635" + ) + + _mult_cal_one(data[:, :], ones, idx, cals, mult) + + if len(tal_data) > 1: + tal_data = np.concatenate([tal.ravel() for tal in tal_data]) + tal_data = tal_data[np.newaxis, :] + return tal_data + + +@fill_doc +def _read_header(fname, exclude, infer_types, include=None, exclude_after_unique=False): + """Unify EDF, BDF and GDF _read_header call. + + Parameters + ---------- + fname : str + Path to the EDF+, BDF, or GDF file. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + %(exclude_after_unique)s + + Returns + ------- + (edf_info, orig_units) : tuple + """ + ext = os.path.splitext(fname)[1][1:].lower() + logger.info(f"{ext.upper()} file detected") + if ext in ("bdf", "edf"): + return _read_edf_header( + fname, exclude, infer_types, include, exclude_after_unique + ) + elif ext == "gdf": + return _read_gdf_header(fname, exclude, include), None + else: + raise NotImplementedError( + f"Only GDF, EDF, and BDF files are supported, got {ext}." + ) + + +def _get_info( + fname, + stim_channel, + eog, + misc, + exclude, + infer_types, + preload, + include=None, + exclude_after_unique=False, +): + """Extract information from EDF+, BDF or GDF file.""" + eog = eog if eog is not None else [] + misc = misc if misc is not None else [] + + edf_info, orig_units = _read_header( + fname, exclude, infer_types, include, exclude_after_unique + ) + + # XXX: `tal_ch_names` to pass to `_check_stim_channel` should be computed + # from `edf_info['ch_names']` and `edf_info['tal_idx']` but 'tal_idx' + # contains stim channels that are not TAL. + stim_channel_idxs, _ = _check_stim_channel(stim_channel, edf_info["ch_names"]) + + sel = edf_info["sel"] # selection of channels not excluded + ch_names = edf_info["ch_names"] # of length len(sel) + if "ch_types" in edf_info: + ch_types = edf_info["ch_types"] # of length len(sel) + else: + ch_types = [None] * len(sel) + if len(sel) == 0: # only want stim channels + n_samps = edf_info["n_samps"][[0]] + else: + n_samps = edf_info["n_samps"][sel] + nchan = edf_info["nchan"] + physical_ranges = edf_info["physical_max"] - edf_info["physical_min"] + cals = edf_info["digital_max"] - edf_info["digital_min"] + bad_idx = np.where((~np.isfinite(cals)) | (cals == 0))[0] + if len(bad_idx) > 0: + warn( + "Scaling factor is not defined in following channels:\n" + + ", ".join(ch_names[i] for i in bad_idx) + ) + cals[bad_idx] = 1 + bad_idx = np.where(physical_ranges == 0)[0] + if len(bad_idx) > 0: + warn( + "Physical range is not defined in following channels:\n" + + ", ".join(ch_names[i] for i in bad_idx) + ) + physical_ranges[bad_idx] = 1 + + # Creates a list of dicts of eeg channels for raw.info + logger.info("Setting channel info structure...") + chs = list() + pick_mask = np.ones(len(ch_names)) + + chs_without_types = list() + + for idx, ch_name in enumerate(ch_names): + chan_info = {} + chan_info["cal"] = 1.0 + chan_info["logno"] = idx + 1 + chan_info["scanno"] = idx + 1 + chan_info["range"] = 1.0 + chan_info["unit_mul"] = FIFF.FIFF_UNITM_NONE + chan_info["ch_name"] = ch_name + chan_info["unit"] = FIFF.FIFF_UNIT_V + chan_info["coord_frame"] = FIFF.FIFFV_COORD_HEAD + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["kind"] = FIFF.FIFFV_EEG_CH + # montage can't be stored in EDF so channel locs are unknown: + chan_info["loc"] = np.full(12, np.nan) + + # if the edf info contained channel type information + # set it now + ch_type = ch_types[idx] + if ch_type is not None and ch_type in CH_TYPE_MAPPING: + chan_info["kind"] = CH_TYPE_MAPPING.get(ch_type) + if ch_type not in ["EEG", "ECOG", "SEEG", "DBS"]: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + pick_mask[idx] = False + # if user passes in explicit mapping for eog, misc and stim + # channels set them here + if ch_name in eog or idx in eog or idx - nchan in eog: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_EOG_CH + pick_mask[idx] = False + elif ch_name in misc or idx in misc or idx - nchan in misc: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_MISC_CH + pick_mask[idx] = False + elif idx in stim_channel_idxs: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["unit"] = FIFF.FIFF_UNIT_NONE + chan_info["kind"] = FIFF.FIFFV_STIM_CH + pick_mask[idx] = False + chan_info["ch_name"] = ch_name + ch_names[idx] = chan_info["ch_name"] + edf_info["units"][idx] = 1 + elif ch_type not in CH_TYPE_MAPPING: + chs_without_types.append(ch_name) + chs.append(chan_info) + + # warn if channel type was not inferable + if len(chs_without_types): + msg = ( + "Could not determine channel type of the following channels, " + f'they will be set as EEG:\n{", ".join(chs_without_types)}' + ) + logger.info(msg) + + edf_info["stim_channel_idxs"] = stim_channel_idxs + if any(pick_mask): + picks = [item for item, mask in zip(range(nchan), pick_mask) if mask] + edf_info["max_samp"] = max_samp = n_samps[picks].max() + else: + edf_info["max_samp"] = max_samp = n_samps.max() + + # Info structure + # ------------------------------------------------------------------------- + + not_stim_ch = [x for x in range(n_samps.shape[0]) if x not in stim_channel_idxs] + if len(not_stim_ch) == 0: # only loading stim channels + not_stim_ch = list(range(len(n_samps))) + sfreq = ( + np.take(n_samps, not_stim_ch).max() + * edf_info["record_length"][1] + / edf_info["record_length"][0] + ) + del n_samps + info = _empty_info(sfreq) + info["meas_date"] = edf_info["meas_date"] + info["chs"] = chs + info["ch_names"] = ch_names + + # Subject information + info["subject_info"] = {} + + # String subject identifier + if edf_info["subject_info"].get("id") is not None: + info["subject_info"]["his_id"] = edf_info["subject_info"]["id"] + # Subject sex (0=unknown, 1=male, 2=female) + if edf_info["subject_info"].get("sex") is not None: + if edf_info["subject_info"]["sex"] == "M": + info["subject_info"]["sex"] = 1 + elif edf_info["subject_info"]["sex"] == "F": + info["subject_info"]["sex"] = 2 + else: + info["subject_info"]["sex"] = 0 + # Subject names (first, middle, last). + if edf_info["subject_info"].get("name") is not None: + sub_names = edf_info["subject_info"]["name"].split("_") + if len(sub_names) < 2 or len(sub_names) > 3: + info["subject_info"]["last_name"] = edf_info["subject_info"]["name"] + elif len(sub_names) == 2: + info["subject_info"]["first_name"] = sub_names[0] + info["subject_info"]["last_name"] = sub_names[1] + else: + info["subject_info"]["first_name"] = sub_names[0] + info["subject_info"]["middle_name"] = sub_names[1] + info["subject_info"]["last_name"] = sub_names[2] + # Birthday in (year, month, day) format. + if isinstance(edf_info["subject_info"].get("birthday"), datetime): + info["subject_info"]["birthday"] = date( + edf_info["subject_info"]["birthday"].year, + edf_info["subject_info"]["birthday"].month, + edf_info["subject_info"]["birthday"].day, + ) + # Handedness (1=right, 2=left, 3=ambidextrous). + if edf_info["subject_info"].get("hand") is not None: + info["subject_info"]["hand"] = int(edf_info["subject_info"]["hand"]) + # Height in meters. + if edf_info["subject_info"].get("height") is not None: + info["subject_info"]["height"] = float(edf_info["subject_info"]["height"]) + # Weight in kilograms. + if edf_info["subject_info"].get("weight") is not None: + info["subject_info"]["weight"] = float(edf_info["subject_info"]["weight"]) + # Remove values after conversion to help with in-memory anonymization + for key in ("subject_info", "meas_date"): + del edf_info[key] + + # Filter settings + if filt_ch_idxs := [x for x in range(len(sel)) if x not in stim_channel_idxs]: + _set_prefilter(info, edf_info, filt_ch_idxs, "highpass") + _set_prefilter(info, edf_info, filt_ch_idxs, "lowpass") + + if np.isnan(info["lowpass"]): + info["lowpass"] = info["sfreq"] / 2.0 + + if info["highpass"] > info["lowpass"]: + warn( + f'Highpass cutoff frequency {info["highpass"]} is greater ' + f'than lowpass cutoff frequency {info["lowpass"]}, ' + "setting values to 0 and Nyquist." + ) + info["highpass"] = 0.0 + info["lowpass"] = info["sfreq"] / 2.0 + + # Some keys to be consistent with FIF measurement info + info["description"] = None + edf_info["nsamples"] = int(edf_info["n_records"] * max_samp) + + info._unlocked = False + info._update_redundant() + + # Later used for reading + edf_info["cal"] = physical_ranges / cals + + # physical dimension in µV + edf_info["offsets"] = ( + edf_info["physical_min"] - edf_info["digital_min"] * edf_info["cal"] + ) + del edf_info["physical_min"] + del edf_info["digital_min"] + + if edf_info["subtype"] == "bdf": + edf_info["cal"][stim_channel_idxs] = 1 + edf_info["offsets"][stim_channel_idxs] = 0 + edf_info["units"][stim_channel_idxs] = 1 + + return info, edf_info, orig_units + + +def _parse_prefilter_string(prefiltering): + """Parse prefilter string from EDF+ and BDF headers.""" + filter_types = ["HP", "LP"] + filter_strings = {t: [] for t in filter_types} + for filt in prefiltering: + for t in filter_types: + matches = re.findall(rf"{t}:\s*([a-zA-Z0-9,.]+)(Hz)?", filt) + value = "" + for match in matches: + if match[0]: + value = match[0].replace("Hz", "").replace(",", ".") + filter_strings[t].append(value) + return np.array(filter_strings["HP"]), np.array(filter_strings["LP"]) + + +def _prefilter_float(filt): + if isinstance(filt, int | float | np.number): + return filt + if filt == "DC": + return 0.0 + if filt.replace(".", "", 1).isdigit(): + return float(filt) + return np.nan + + +def _set_prefilter(info, edf_info, ch_idxs, key): + value = 0 + if len(values := edf_info.get(key, [])): + values = [x for i, x in enumerate(values) if i in ch_idxs] + if len(np.unique(values)) > 1: + warn( + f"Channels contain different {key} filters. " + f"{'Highest' if key == 'highpass' else 'Lowest'} filter " + "setting will be stored." + ) + if key == "highpass": + value = np.nanmax([_prefilter_float(x) for x in values]) + else: + value = np.nanmin([_prefilter_float(x) for x in values]) + else: + value = _prefilter_float(values[0]) + if not np.isnan(value) and value != 0: + info[key] = value + + +def _edf_str(x): + return x.decode("latin-1").split("\x00")[0] + + +def _edf_str_num(x): + return _edf_str(x).replace(",", ".") + + +def _read_edf_header( + fname, exclude, infer_types, include=None, exclude_after_unique=False +): + """Read header information from EDF+ or BDF file.""" + edf_info = {"events": []} + + with open(fname, "rb") as fid: + fid.read(8) # version (unused here) + + # patient ID + patient = {} + id_info = fid.read(80).decode("latin-1").rstrip() + id_info = id_info.split(" ") + if len(id_info): + patient["id"] = id_info[0] + if len(id_info) >= 4: + try: + birthdate = datetime.strptime(id_info[2], "%d-%b-%Y") + except ValueError: + birthdate = "X" + patient["sex"] = id_info[1] + patient["birthday"] = birthdate + patient["name"] = id_info[3] + if len(id_info) > 4: + for info in id_info[4:]: + if "=" in info: + key, value = info.split("=") + err = f"patient {key} info cannot be {value}, skipping." + if key in ["weight", "height"]: + try: + patient[key] = float(value) + except ValueError: + logger.debug(err) + continue + elif key in ["hand"]: + try: + patient[key] = int(value) + except ValueError: + logger.debug(err) + continue + else: + warn(f"Invalid patient information {key}") + + # Recording ID + rec_info = fid.read(80).decode("latin-1").rstrip().split(" ") + # if the measurement date is available in the recording info, it's used instead + # of the file's meas_date since it contains all 4 digits of the year. + meas_date = None + if len(rec_info) == 5: + try: + meas_date = datetime.strptime(rec_info[1], "%d-%b-%Y") + except Exception: + meas_date = None + else: + fid.read(8) # skip the file's meas_date + if meas_date is None: + try: + meas_date = fid.read(8).decode("latin-1") + day, month, year = (int(x) for x in meas_date.split(".")) + year = year + 2000 if year < 85 else year + 1900 + meas_date = datetime(year, month, day) + except Exception: + meas_date = None + if meas_date is not None: + # try to get the hour/minute/sec from the recording info + try: + meas_time = fid.read(8).decode("latin-1") + hour, minute, second = (int(x) for x in meas_time.split(".")) + except Exception: + hour, minute, second = 0, 0, 0 + meas_date = meas_date.replace( + hour=hour, minute=minute, second=second, tzinfo=timezone.utc + ) + else: + fid.read(8) # skip the file's measurement time + warn("Invalid measurement date encountered in the header.") + + header_nbytes = int(_edf_str(fid.read(8))) + # The following 44 bytes sometimes identify the file type, but this is + # not guaranteed. Therefore, we skip this field and use the file + # extension to determine the subtype (EDF or BDF, which differ in the + # number of bytes they use for the data records; EDF uses 2 bytes + # whereas BDF uses 3 bytes). + fid.read(44) + subtype = os.path.splitext(fname)[1][1:].lower() + + n_records = int(_edf_str(fid.read(8))) + record_length = float(_edf_str(fid.read(8))) + record_length = np.array([record_length, 1.0]) # in seconds + if record_length[0] == 0: + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. Default " + "record length set to 1.\nIt is possible that this file only" + " contains annotations and no signals. In that case, please " + "use mne.read_annotations() to load these annotations." + ) + + nchan = int(_edf_str(fid.read(4))) + channels = list(range(nchan)) + + # read in 16 byte labels and strip any extra spaces at the end + ch_labels = [fid.read(16).strip().decode("latin-1") for _ in channels] + + # get channel names and optionally channel type + # EDF specification contains 16 bytes that encode channel names, + # optionally prefixed by a string representing channel type separated + # by a space + if infer_types: + ch_types, ch_names = [], [] + for ch_label in ch_labels: + ch_type, ch_name = "EEG", ch_label # default to EEG + parts = ch_label.split(" ") + if len(parts) > 1: + if parts[0].upper() in CH_TYPE_MAPPING: + ch_type = parts[0].upper() + ch_name = " ".join(parts[1:]) + logger.info( + f"Channel '{ch_label}' recognized as type " + f"{ch_type} (renamed to '{ch_name}')." + ) + ch_types.append(ch_type) + ch_names.append(ch_name) + else: + ch_types, ch_names = ["EEG"] * nchan, ch_labels + + tal_idx = _find_tal_idx(ch_names) + if exclude_after_unique: + # make sure channel names are unique + ch_names = _unique_channel_names(ch_names) + + exclude = _find_exclude_idx(ch_names, exclude, include) + exclude = np.concatenate([exclude, tal_idx]) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + + for ch in channels: + fid.read(80) # transducer + units = [fid.read(8).strip().decode("latin-1") for ch in channels] + edf_info["units"] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + # allow μ (greek mu), µ (micro symbol) and μ (sjis mu) codepoints + if unit in ("\u03bcV", "\u00b5V", "\x83\xcaV", "uV"): + edf_info["units"].append(1e-6) + elif unit == "mV": + edf_info["units"].append(1e-3) + else: + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) + + ch_names = [ch_names[idx] for idx in sel] + ch_types = [ch_types[idx] for idx in sel] + units = [units[idx] for idx in sel] + + if not exclude_after_unique: + # make sure channel names are unique + ch_names = _unique_channel_names(ch_names) + orig_units = dict(zip(ch_names, units)) + + physical_min = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + physical_max = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + digital_min = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + digital_max = np.array([float(_edf_str_num(fid.read(8))) for ch in channels])[ + sel + ] + prefiltering = np.array([_edf_str(fid.read(80)).strip() for ch in channels]) + highpass, lowpass = _parse_prefilter_string(prefiltering) + + # number of samples per record + n_samps = np.array([int(_edf_str(fid.read(8))) for ch in channels]) + + # Populate edf_info + edf_info.update( + ch_names=ch_names, + ch_types=ch_types, + data_offset=header_nbytes, + digital_max=digital_max, + digital_min=digital_min, + highpass=highpass, + sel=sel, + lowpass=lowpass, + meas_date=meas_date, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + subtype=subtype, + tal_idx=tal_idx, + ) + + fid.read(32 * nchan).decode() # reserved + assert fid.tell() == header_nbytes + + fid.seek(0, 2) + n_bytes = fid.tell() + n_data_bytes = n_bytes - header_nbytes + total_samps = n_data_bytes // 3 if subtype == "bdf" else n_data_bytes // 2 + read_records = total_samps // np.sum(n_samps) + if n_records != read_records: + warn( + "Number of records from the header does not match the file " + "size (perhaps the recording was not stopped before exiting)." + " Inferring from the file size." + ) + edf_info["n_records"] = read_records + del n_records + + if subtype == "bdf": + edf_info["dtype_byte"] = 3 # 24-bit (3 byte) integers + edf_info["dtype_np"] = UINT8 + else: + edf_info["dtype_byte"] = 2 # 16-bit (2 byte) integers + edf_info["dtype_np"] = INT16 + + return edf_info, orig_units + + +INT8 = " 1: + # We will not read it properly, so this should be an error + raise RuntimeError("Reading multiple data types not supported") + return dtype_np[0], dtype_byte[0] + + +def _read_gdf_header(fname, exclude, include=None): + """Read GDF 1.x and GDF 2.x header info.""" + edf_info = dict() + events = None + with open(fname, "rb") as fid: + version = fid.read(8).decode() + edf_info["type"] = edf_info["subtype"] = version[:3] + edf_info["number"] = float(version[4:]) + meas_date = None + + # GDF 1.x + # --------------------------------------------------------------------- + if edf_info["number"] < 1.9: + # patient ID + pid = fid.read(80).decode("latin-1") + pid = pid.split(" ", 2) + patient = {} + if len(pid) >= 2: + patient["id"] = pid[0] + patient["name"] = pid[1] + + # Recording ID + meas_id = {} + meas_id["recording_id"] = _edf_str(fid.read(80)).strip() + + # date + tm = _edf_str(fid.read(16)).strip() + try: + if tm[14:16] == " ": + tm = tm[:14] + "00" + tm[16:] + meas_date = datetime( + int(tm[0:4]), + int(tm[4:6]), + int(tm[6:8]), + int(tm[8:10]), + int(tm[10:12]), + int(tm[12:14]), + int(tm[14:16]) * pow(10, 4), + tzinfo=timezone.utc, + ) + except Exception: + pass + + header_nbytes = np.fromfile(fid, INT64, 1)[0] + meas_id["equipment"] = np.fromfile(fid, UINT8, 8)[0] + meas_id["hospital"] = np.fromfile(fid, UINT8, 8)[0] + meas_id["technician"] = np.fromfile(fid, UINT8, 8)[0] + fid.seek(20, 1) # 20bytes reserved + + n_records = np.fromfile(fid, INT64, 1)[0] + # record length in seconds + record_length = np.fromfile(fid, UINT32, 2) + if record_length[0] == 0: + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. " + "Default record length set to 1." + ) + nchan = int(np.fromfile(fid, UINT32, 1)[0]) + channels = list(range(nchan)) + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] + exclude = _find_exclude_idx(ch_names, exclude, include) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + fid.seek(80 * len(channels), 1) # transducer + units = [_edf_str(fid.read(8)).strip() for ch in channels] + edf_info["units"] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + if unit[:2] == "uV": + edf_info["units"].append(1e-6) + else: + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) + + ch_names = [ch_names[idx] for idx in sel] + physical_min = np.fromfile(fid, FLOAT64, len(channels)) + physical_max = np.fromfile(fid, FLOAT64, len(channels)) + digital_min = np.fromfile(fid, INT64, len(channels)) + digital_max = np.fromfile(fid, INT64, len(channels)) + prefiltering = [_edf_str(fid.read(80)) for ch in channels] + highpass, lowpass = _parse_prefilter_string(prefiltering) + + # n samples per record + n_samps = np.fromfile(fid, INT32, len(channels)) + + # channel data type + dtype = np.fromfile(fid, INT32, len(channels)) + + # total number of bytes for data + bytes_tot = np.sum( + [GDFTYPE_BYTE[t] * n_samps[i] for i, t in enumerate(dtype)] + ) + + # Populate edf_info + dtype_np, dtype_byte = _check_dtype_byte(dtype) + edf_info.update( + bytes_tot=bytes_tot, + ch_names=ch_names, + data_offset=header_nbytes, + digital_min=digital_min, + digital_max=digital_max, + dtype_byte=dtype_byte, + dtype_np=dtype_np, + exclude=exclude, + highpass=highpass, + sel=sel, + lowpass=lowpass, + meas_date=meas_date, + meas_id=meas_id, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + ) + + fid.seek(32 * edf_info["nchan"], 1) # reserved + assert fid.tell() == header_nbytes + + # Event table + # ----------------------------------------------------------------- + etp = header_nbytes + n_records * edf_info["bytes_tot"] + # skip data to go to event table + fid.seek(etp) + etmode = np.fromfile(fid, UINT8, 1)[0] + if etmode in (1, 3): + sr = np.fromfile(fid, UINT8, 3).astype(np.uint32) + event_sr = sr[0] + for i in range(1, len(sr)): + event_sr = event_sr + sr[i] * 2 ** (i * 8) + n_events = np.fromfile(fid, UINT32, 1)[0] + pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds + typ = np.fromfile(fid, UINT16, n_events) + + if etmode == 3: + chn = np.fromfile(fid, UINT16, n_events) + dur = np.fromfile(fid, UINT32, n_events) + else: + chn = np.zeros(n_events, dtype=np.int32) + dur = np.ones(n_events, dtype=UINT32) + np.maximum(dur, 1, out=dur) + events = [n_events, pos, typ, chn, dur] + + # GDF 2.x + # --------------------------------------------------------------------- + else: + # FIXED HEADER + handedness = ("Unknown", "Right", "Left", "Equal") + gender = ("Unknown", "Male", "Female") + scale = ("Unknown", "No", "Yes", "Corrected") + + # date + pid = fid.read(66).decode() + pid = pid.split(" ", 2) + patient = {} + if len(pid) >= 2: + patient["id"] = pid[0] + patient["name"] = pid[1] + fid.seek(10, 1) # 10bytes reserved + + # Smoking / Alcohol abuse / drug abuse / medication + sadm = np.fromfile(fid, UINT8, 1)[0] + patient["smoking"] = scale[sadm % 4] + patient["alcohol_abuse"] = scale[(sadm >> 2) % 4] + patient["drug_abuse"] = scale[(sadm >> 4) % 4] + patient["medication"] = scale[(sadm >> 6) % 4] + patient["weight"] = np.fromfile(fid, UINT8, 1)[0] + if patient["weight"] == 0 or patient["weight"] == 255: + patient["weight"] = None + patient["height"] = np.fromfile(fid, UINT8, 1)[0] + if patient["height"] == 0 or patient["height"] == 255: + patient["height"] = None + + # Gender / Handedness / Visual Impairment + ghi = np.fromfile(fid, UINT8, 1)[0] + patient["sex"] = gender[ghi % 4] + patient["handedness"] = handedness[(ghi >> 2) % 4] + patient["visual"] = scale[(ghi >> 4) % 4] + + # Recording identification + meas_id = {} + meas_id["recording_id"] = _edf_str(fid.read(64)).strip() + vhsv = np.fromfile(fid, UINT8, 4) + loc = {} + if vhsv[3] == 0: + loc["vertpre"] = 10 * int(vhsv[0] >> 4) + int(vhsv[0] % 16) + loc["horzpre"] = 10 * int(vhsv[1] >> 4) + int(vhsv[1] % 16) + loc["size"] = 10 * int(vhsv[2] >> 4) + int(vhsv[2] % 16) + else: + loc["vertpre"] = 29 + loc["horzpre"] = 29 + loc["size"] = 29 + loc["version"] = 0 + loc["latitude"] = float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 + loc["longitude"] = float(np.fromfile(fid, UINT32, 1)[0]) / 3600000 + loc["altitude"] = float(np.fromfile(fid, INT32, 1)[0]) / 100 + meas_id["loc"] = loc + + meas_date = np.fromfile(fid, UINT64, 1)[0] + if meas_date != 0: + meas_date = datetime(1, 1, 1, tzinfo=timezone.utc) + timedelta( + meas_date * pow(2, -32) - 367 + ) + else: + meas_date = None + + birthday = np.fromfile(fid, UINT64, 1).tolist()[0] + if birthday == 0: + birthday = datetime(1, 1, 1, tzinfo=timezone.utc) + else: + birthday = datetime(1, 1, 1, tzinfo=timezone.utc) + timedelta( + birthday * pow(2, -32) - 367 + ) + patient["birthday"] = birthday + if patient["birthday"] != datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc): + today = datetime.now(tz=timezone.utc) + patient["age"] = today.year - patient["birthday"].year + # fudge the day by -1 if today happens to be a leap day + day = 28 if today.month == 2 and today.day == 29 else today.day + today = today.replace(year=patient["birthday"].year, day=day) + if today < patient["birthday"]: + patient["age"] -= 1 + else: + patient["age"] = None + + header_nbytes = np.fromfile(fid, UINT16, 1)[0] * 256 + + fid.seek(6, 1) # 6 bytes reserved + meas_id["equipment"] = np.fromfile(fid, UINT8, 8) + meas_id["ip"] = np.fromfile(fid, UINT8, 6) + patient["headsize"] = np.fromfile(fid, UINT16, 3) + patient["headsize"] = np.asarray(patient["headsize"], np.float32) + patient["headsize"] = np.ma.masked_array( + patient["headsize"], np.equal(patient["headsize"], 0), None + ).filled() + ref = np.fromfile(fid, FLOAT32, 3) + gnd = np.fromfile(fid, FLOAT32, 3) + n_records = np.fromfile(fid, INT64, 1)[0] + + # record length in seconds + record_length = np.fromfile(fid, UINT32, 2) + if record_length[0] == 0: + record_length[0] = 1.0 + warn( + "Header information is incorrect for record length. " + "Default record length set to 1." + ) + + nchan = int(np.fromfile(fid, UINT16, 1)[0]) + fid.seek(2, 1) # 2bytes reserved + + # Channels (variable header) + channels = list(range(nchan)) + ch_names = [_edf_str(fid.read(16)).strip() for ch in channels] + exclude = _find_exclude_idx(ch_names, exclude, include) + sel = np.setdiff1d(np.arange(len(ch_names)), exclude) + + fid.seek(80 * len(channels), 1) # reserved space + fid.seek(6 * len(channels), 1) # phys_dim, obsolete + + """The Physical Dimensions are encoded as int16, according to: + - Units codes : + https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/units.csv + - Decimal factors codes: + https://sourceforge.net/p/biosig/svn/HEAD/tree/trunk/biosig/doc/DecimalFactors.txt + """ # noqa + units = np.fromfile(fid, UINT16, len(channels)).tolist() + unitcodes = np.array(units[:]) + edf_info["units"] = list() + for i, unit in enumerate(units): + if i in exclude: + continue + if unit == 4275: # microvolts + edf_info["units"].append(1e-6) + elif unit == 4274: # millivolts + edf_info["units"].append(1e-3) + elif unit == 512: # dimensionless + edf_info["units"].append(1) + elif unit == 0: + edf_info["units"].append(1) # unrecognized + else: + warn( + f"Unsupported physical dimension for channel {i} " + "(assuming dimensionless). Please contact the " + "MNE-Python developers for support." + ) + edf_info["units"].append(1) + edf_info["units"] = np.array(edf_info["units"], float) + + ch_names = [ch_names[idx] for idx in sel] + physical_min = np.fromfile(fid, FLOAT64, len(channels)) + physical_max = np.fromfile(fid, FLOAT64, len(channels)) + digital_min = np.fromfile(fid, FLOAT64, len(channels)) + digital_max = np.fromfile(fid, FLOAT64, len(channels)) + + fid.seek(68 * len(channels), 1) # obsolete + lowpass = np.fromfile(fid, FLOAT32, len(channels)) + highpass = np.fromfile(fid, FLOAT32, len(channels)) + notch = np.fromfile(fid, FLOAT32, len(channels)) + + # number of samples per record + n_samps = np.fromfile(fid, INT32, len(channels)) + + # data type + dtype = np.fromfile(fid, INT32, len(channels)) + + channel = {} + channel["xyz"] = [np.fromfile(fid, FLOAT32, 3)[0] for ch in channels] + + if edf_info["number"] < 2.19: + impedance = np.fromfile(fid, UINT8, len(channels)).astype(float) + impedance[impedance == 255] = np.nan + channel["impedance"] = pow(2, impedance / 8) + fid.seek(19 * len(channels), 1) # reserved + else: + tmp = np.fromfile(fid, FLOAT32, 5 * len(channels)) + tmp = tmp[::5] + fZ = tmp[:] + impedance = tmp[:] + # channels with no voltage (code 4256) data + ch = [unitcodes & 65504 != 4256][0] + impedance[np.where(ch)] = None + # channel with no impedance (code 4288) data + ch = [unitcodes & 65504 != 4288][0] + fZ[np.where(ch)[0]] = None + + assert fid.tell() == header_nbytes + + # total number of bytes for data + bytes_tot = np.sum( + [GDFTYPE_BYTE[t] * n_samps[i] for i, t in enumerate(dtype)] + ) + + # Populate edf_info + dtype_np, dtype_byte = _check_dtype_byte(dtype) + edf_info.update( + bytes_tot=bytes_tot, + ch_names=ch_names, + data_offset=header_nbytes, + dtype_byte=dtype_byte, + dtype_np=dtype_np, + digital_min=digital_min, + digital_max=digital_max, + exclude=exclude, + gnd=gnd, + highpass=highpass, + sel=sel, + impedance=impedance, + lowpass=lowpass, + meas_date=meas_date, + meas_id=meas_id, + n_records=n_records, + n_samps=n_samps, + nchan=nchan, + notch=notch, + subject_info=patient, + physical_max=physical_max, + physical_min=physical_min, + record_length=record_length, + ref=ref, + ) + + # EVENT TABLE + # ----------------------------------------------------------------- + etp = ( + edf_info["data_offset"] + edf_info["n_records"] * edf_info["bytes_tot"] + ) + fid.seek(etp) # skip data to go to event table + etmode = fid.read(1).decode() + if etmode != "": + etmode = np.fromstring(etmode, UINT8).tolist()[0] + + if edf_info["number"] < 1.94: + sr = np.fromfile(fid, UINT8, 3) + event_sr = sr[0] + for i in range(1, len(sr)): + event_sr = event_sr + sr[i] * 2 ** (i * 8) + n_events = np.fromfile(fid, UINT32, 1)[0] + else: + ne = np.fromfile(fid, UINT8, 3) + n_events = ne[0] + for i in range(1, len(ne)): + n_events = n_events + int(ne[i]) * 2 ** (i * 8) + event_sr = np.fromfile(fid, FLOAT32, 1)[0] + + pos = np.fromfile(fid, UINT32, n_events) - 1 # 1-based inds + typ = np.fromfile(fid, UINT16, n_events) + + if etmode == 3: + chn = np.fromfile(fid, UINT16, n_events) + dur = np.fromfile(fid, UINT32, n_events) + else: + chn = np.zeros(n_events, dtype=np.uint32) + dur = np.ones(n_events, dtype=np.uint32) + np.maximum(dur, 1, out=dur) + events = [n_events, pos, typ, chn, dur] + edf_info["event_sfreq"] = event_sr + + edf_info.update(events=events, sel=np.arange(len(edf_info["ch_names"]))) + + return edf_info + + +def _check_stim_channel( + stim_channel, + ch_names, + tal_ch_names=("EDF Annotations", "BDF Annotations"), +): + """Check that the stimulus channel exists in the current datafile.""" + DEFAULT_STIM_CH_NAMES = ["status", "trigger"] + + if stim_channel is None or stim_channel is False: + return [], [] + + if stim_channel is True: # convenient aliases + stim_channel = "auto" + + elif isinstance(stim_channel, str): + if stim_channel == "auto": + if "auto" in ch_names: + warn( + RuntimeWarning, + "Using `stim_channel='auto'` when auto" + " also corresponds to a channel name is ambiguous." + " Please use `stim_channel=['auto']`.", + ) + else: + valid_stim_ch_names = DEFAULT_STIM_CH_NAMES + else: + valid_stim_ch_names = [stim_channel.lower()] + + elif isinstance(stim_channel, int): + valid_stim_ch_names = [ch_names[stim_channel].lower()] + + elif isinstance(stim_channel, list): + if all([isinstance(s, str) for s in stim_channel]): + valid_stim_ch_names = [s.lower() for s in stim_channel] + elif all([isinstance(s, int) for s in stim_channel]): + valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel] + else: + raise ValueError("Invalid stim_channel") + else: + raise ValueError("Invalid stim_channel") + + # Forbid the synthesis of stim channels from TAL Annotations + tal_ch_names_found = [ + ch for ch in valid_stim_ch_names if ch in [t.lower() for t in tal_ch_names] + ] + if len(tal_ch_names_found): + _msg = ( + "The synthesis of the stim channel is not supported since 0.18. Please " + f"remove {tal_ch_names_found} from `stim_channel` and use " + "`mne.events_from_annotations` instead." + ) + raise ValueError(_msg) + + ch_names_low = [ch.lower() for ch in ch_names] + found = list(set(valid_stim_ch_names) & set(ch_names_low)) + + if not found: + return [], [] + else: + stim_channel_idxs = [ch_names_low.index(f) for f in found] + names = [ch_names[idx] for idx in stim_channel_idxs] + return stim_channel_idxs, names + + +def _find_exclude_idx(ch_names, exclude, include=None): + """Find indices of all channels to exclude. + + If there are several channels called "A" and we want to exclude "A", then + add (the index of) all "A" channels to the exclusion list. + """ + if include: # find other than include channels + if exclude: + raise ValueError( + f"'exclude' must be empty if 'include' is assigned. Got {exclude}." + ) + if isinstance(include, str): # regex for channel names + indices_include = [] + for idx, ch in enumerate(ch_names): + if re.match(include, ch): + indices_include.append(idx) + indices = np.setdiff1d(np.arange(len(ch_names)), indices_include) + return indices + # list of channel names + return [idx for idx, ch in enumerate(ch_names) if ch not in include] + + if isinstance(exclude, str): # regex for channel names + indices = [] + for idx, ch in enumerate(ch_names): + if re.match(exclude, ch): + indices.append(idx) + return indices + # list of channel names + return [idx for idx, ch in enumerate(ch_names) if ch in exclude] + + +def _find_tal_idx(ch_names): + # Annotations / TAL Channels + accepted_tal_ch_names = ["EDF Annotations", "BDF Annotations"] + tal_channel_idx = np.where(np.isin(ch_names, accepted_tal_ch_names))[0] + return tal_channel_idx + + +@fill_doc +def read_raw_edf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + include=None, + preload=False, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, +) -> RawEDF: + """Reader function for EDF and EDF+ files. + + Parameters + ---------- + input_fname : path-like + Path to the EDF or EDF+ file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + + .. versionadded:: 1.1 + %(preload)s + %(units_edf_bdf_io)s + %(encoding_edf)s + %(exclude_after_unique)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.read_raw_bdf : Reader function for BDF files. + mne.io.read_raw_gdf : Reader function for GDF files. + mne.export.export_raw : Export function for EDF files. + mne.io.Raw : Documentation of attributes and methods of RawEDF. + + Notes + ----- + %(edf_resamp_note)s + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'EDF Annotations' are parsed and extracted annotations + are stored in raw.annotations. Use :func:`mne.events_from_annotations` to + obtain events from these annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + + The EDF specification allows optional storage of channel types in the + prefix of the signal label for each channel. For example, ``EEG Fz`` + implies that ``Fz`` is an EEG channel and ``MISC E`` would imply ``E`` is + a MISC channel. However, there is no standard way of specifying all + channel types. MNE-Python will try to infer the channel type, when such a + string exists, defaulting to EEG, when there is no prefix or the prefix is + not recognized. + + The following prefix strings are mapped to MNE internal types: + + - 'EEG': 'eeg' + - 'SEEG': 'seeg' + - 'ECOG': 'ecog' + - 'DBS': 'dbs' + - 'EOG': 'eog' + - 'ECG': 'ecg' + - 'EMG': 'emg' + - 'BIO': 'bio' + - 'RESP': 'resp' + - 'MISC': 'misc' + - 'SAO2': 'bio' + + The EDF specification allows storage of subseconds in measurement date. + However, this reader currently sets subseconds to 0 by default. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != "edf": + raise NotImplementedError(f"Only EDF files are supported, got {ext}.") + return RawEDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + infer_types=infer_types, + preload=preload, + include=include, + units=units, + encoding=encoding, + exclude_after_unique=exclude_after_unique, + verbose=verbose, + ) + + +@fill_doc +def read_raw_bdf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + infer_types=False, + include=None, + preload=False, + units=None, + encoding="utf8", + exclude_after_unique=False, + *, + verbose=None, +) -> RawEDF: + """Reader function for BDF files. + + Parameters + ---------- + input_fname : path-like + Path to the BDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + infer_types : bool + If True, try to infer channel types from channel labels. If a channel + label starts with a known type (such as 'EEG') followed by a space and + a name (such as 'Fp1'), the channel type will be set accordingly, and + the channel will be renamed to the original label without the prefix. + For unknown prefixes, the type will be 'EEG' and the name will not be + modified. If False, do not infer types and assume all channels are of + type 'EEG'. + + .. versionadded:: 0.24.1 + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + + .. versionadded:: 1.1 + %(preload)s + %(units_edf_bdf_io)s + %(encoding_edf)s + %(exclude_after_unique)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.read_raw_edf : Reader function for EDF and EDF+ files. + mne.io.read_raw_gdf : Reader function for GDF files. + mne.io.Raw : Documentation of attributes and methods of RawEDF. + + Notes + ----- + :class:`mne.io.Raw` only stores signals with matching sampling frequencies. + Therefore, if mixed sampling frequency signals are requested, all signals + are upsampled to the highest loaded sampling frequency. In this case, using + preload=True is recommended, as otherwise, edge artifacts appear when + slices of the signal are requested. + + Biosemi devices trigger codes are encoded in 16-bit format, whereas system + codes (CMS in/out-of range, battery low, etc.) are coded in bits 16-23 of + the status channel (see http://www.biosemi.com/faq/trigger_signals.htm). + To retrieve correct event values (bits 1-16), one could do: + + >>> events = mne.find_events(...) # doctest:+SKIP + >>> events[:, 2] &= (2**16 - 1) # doctest:+SKIP + + The above operation can be carried out directly in :func:`mne.find_events` + using the ``mask`` and ``mask_type`` parameters (see + :func:`mne.find_events` for more details). + + It is also possible to retrieve system codes, but no particular effort has + been made to decode these in MNE. In case it is necessary, for instance to + check the CMS bit, the following operation can be carried out: + + >>> cms_bit = 20 # doctest:+SKIP + >>> cms_high = (events[:, 2] & (1 << cms_bit)) != 0 # doctest:+SKIP + + It is worth noting that in some special cases, it may be necessary to shift + event values in order to retrieve correct event triggers. This depends on + the triggering device used to perform the synchronization. For instance, in + some files events need to be shifted by 8 bits: + + >>> events[:, 2] >>= 8 # doctest:+SKIP + + TAL channels called 'BDF Annotations' are parsed and extracted annotations + are stored in raw.annotations. Use :func:`mne.events_from_annotations` to + obtain events from these annotations. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != "bdf": + raise NotImplementedError(f"Only BDF files are supported, got {ext}.") + return RawEDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + infer_types=infer_types, + preload=preload, + include=include, + units=units, + encoding=encoding, + exclude_after_unique=exclude_after_unique, + verbose=verbose, + ) + + +@fill_doc +def read_raw_gdf( + input_fname, + eog=None, + misc=None, + stim_channel="auto", + exclude=(), + include=None, + preload=False, + verbose=None, +) -> RawGDF: + """Reader function for GDF files. + + Parameters + ---------- + input_fname : path-like + Path to the GDF file. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + exclude : list of str | str + Channel names to exclude. This can help when reading data with + different sampling rates to avoid unnecessary resampling. A str is + interpreted as a regular expression. + include : list of str | str + Channel names to be included. A str is interpreted as a regular + expression. 'exclude' must be empty if include is assigned. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawGDF + The raw instance. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.read_raw_edf : Reader function for EDF and EDF+ files. + mne.io.read_raw_bdf : Reader function for BDF files. + mne.io.Raw : Documentation of attributes and methods of RawGDF. + + Notes + ----- + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = os.path.abspath(input_fname) + ext = os.path.splitext(input_fname)[1][1:].lower() + if ext != "gdf": + raise NotImplementedError(f"Only GDF files are supported, got {ext}.") + return RawGDF( + input_fname=input_fname, + eog=eog, + misc=misc, + stim_channel=stim_channel, + exclude=exclude, + preload=preload, + include=include, + verbose=verbose, + ) + + +@fill_doc +def _read_annotations_edf(annotations, ch_names=None, encoding="utf8"): + """Annotation File Reader. + + Parameters + ---------- + annotations : ndarray (n_chans, n_samples) | str + Channel data in EDF+ TAL format or path to annotation file. + ch_names : list of string + List of channels' names. + %(encoding_edf)s + + Returns + ------- + annot : instance of Annotations + The annotations. + """ + pat = "([+-]\\d+\\.?\\d*)(\x15(\\d+\\.?\\d*))?(\x14.*?)\x14\x00" + if isinstance(annotations, str | Path): + with open(annotations, "rb") as annot_file: + triggers = re.findall(pat.encode(), annot_file.read()) + triggers = [tuple(map(lambda x: x.decode(encoding), t)) for t in triggers] + else: + tals = bytearray() + annotations = np.atleast_2d(annotations) + for chan in annotations: + this_chan = chan.ravel() + if this_chan.dtype == INT32: # BDF + this_chan = this_chan.view(dtype=UINT8) + this_chan = this_chan.reshape(-1, 4) + # Why only keep the first 3 bytes as BDF values + # are stored with 24 bits (not 32) + this_chan = this_chan[:, :3].ravel() + # As ravel() returns a 1D array we can add all values at once + tals.extend(this_chan) + else: + this_chan = chan.astype(np.int64) + # Exploit np vectorized processing + tals.extend(np.uint8([this_chan % 256, this_chan // 256]).flatten("F")) + try: + triggers = re.findall(pat, tals.decode(encoding)) + except UnicodeDecodeError as e: + raise Exception( + "Encountered invalid byte in at least one annotations channel." + " You might want to try setting \"encoding='latin1'\"." + ) from e + + events = {} + offset = 0.0 + for k, ev in enumerate(triggers): + onset = float(ev[0]) + offset + duration = float(ev[2]) if ev[2] else 0 + for description in ev[3].split("\x14")[1:]: + if description: + if ( + "@@" in description + and ch_names is not None + and description.split("@@")[1] in ch_names + ): + description, ch_name = description.split("@@") + key = f"{onset}_{duration}_{description}" + else: + ch_name = None + key = f"{onset}_{duration}_{description}" + if key in events: + key += f"_{k}" # make key unique + if key in events and ch_name: + events[key][3] += (ch_name,) + else: + events[key] = [ + onset, + duration, + description, + (ch_name,) if ch_name else (), + ] + + elif k == 0: + # The startdate/time of a file is specified in the EDF+ header + # fields 'startdate of recording' and 'starttime of recording'. + # These fields must indicate the absolute second in which the + # start of the first data record falls. So, the first TAL in + # the first data record always starts with +0.X, indicating + # that the first data record starts a fraction, X, of a second + # after the startdate/time that is specified in the EDF+ + # header. If X=0, then the .X may be omitted. + offset = -onset + + if events: + onset, duration, description, annot_ch_names = zip(*events.values()) + else: + onset, duration, description, annot_ch_names = list(), list(), list(), list() + + assert len(onset) == len(duration) == len(description) == len(annot_ch_names) + + return Annotations( + onset=onset, + duration=duration, + description=description, + orig_time=None, + ch_names=annot_ch_names, + ) + + +def _get_annotations_gdf(edf_info, sfreq): + onset, duration, desc = list(), list(), list() + events = edf_info.get("events", None) + # Annotations in GDF: events are stored as the following + # list: `events = [n_events, pos, typ, chn, dur]` where pos is the + # latency, dur is the duration in samples. They both are + # numpy.ndarray + if events is not None and events[1].shape[0] > 0: + onset = events[1] / sfreq + duration = events[4] / sfreq + desc = events[2] + + return onset, duration, desc diff --git a/mne/io/eeglab/__init__.py b/mne/io/eeglab/__init__.py new file mode 100644 index 0000000..6ffc4e4 --- /dev/null +++ b/mne/io/eeglab/__init__.py @@ -0,0 +1,7 @@ +"""EEGLAB module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .eeglab import read_raw_eeglab, read_epochs_eeglab diff --git a/mne/io/eeglab/_eeglab.py b/mne/io/eeglab/_eeglab.py new file mode 100644 index 0000000..28df469 --- /dev/null +++ b/mne/io/eeglab/_eeglab.py @@ -0,0 +1,83 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +try: + from scipy.io.matlab import MatlabFunction, MatlabOpaque +except ImportError: # scipy < 1.8 + from scipy.io.matlab.mio5 import MatlabFunction + from scipy.io.matlab.mio5_params import MatlabOpaque +from scipy.io import loadmat + +from ...utils import _import_pymatreader_funcs + + +def _todict_from_np_struct(data): # taken from pymatreader.utils + data_dict = {} + + for cur_field_name in data.dtype.names: + try: + n_items = len(data[cur_field_name]) + cur_list = [] + + for idx in np.arange(n_items): + cur_value = data[cur_field_name].item(idx) + cur_value = _check_for_scipy_mat_struct(cur_value) + cur_list.append(cur_value) + + data_dict[cur_field_name] = cur_list + except TypeError: + cur_value = data[cur_field_name].item(0) + cur_value = _check_for_scipy_mat_struct(cur_value) + data_dict[cur_field_name] = cur_value + + return data_dict + + +def _handle_scipy_ndarray(data): # taken from pymatreader.utils + if data.dtype == np.dtype("object") and not isinstance(data, MatlabFunction): + as_list = [] + for element in data: + as_list.append(_check_for_scipy_mat_struct(element)) + data = as_list + elif isinstance(data.dtype.names, tuple): + data = _todict_from_np_struct(data) + data = _check_for_scipy_mat_struct(data) + + if isinstance(data, np.ndarray): + data = np.array(data) + + return data + + +def _check_for_scipy_mat_struct(data): # taken from pymatreader.utils + """Convert all scipy.io.matlab.mio5_params.mat_struct elements.""" + if isinstance(data, dict): + for key in data: + data[key] = _check_for_scipy_mat_struct(data[key]) + + if isinstance(data, MatlabOpaque): + try: + if data[0][2] == b"string": + return None + except IndexError: + pass + + if isinstance(data, np.ndarray): + data = _handle_scipy_ndarray(data) + + return data + + +def _readmat(fname, uint16_codec=None): + try: + read_mat = _import_pymatreader_funcs("EEGLAB I/O") + except RuntimeError: # pymatreader not installed + eeg = loadmat( + fname, squeeze_me=True, mat_dtype=False, uint16_codec=uint16_codec + ) + return _check_for_scipy_mat_struct(eeg) + else: + return read_mat(fname, uint16_codec=uint16_codec) diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py new file mode 100644 index 0000000..3aa611b --- /dev/null +++ b/mne/io/eeglab/eeglab.py @@ -0,0 +1,825 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op +from os import PathLike +from pathlib import Path + +import numpy as np + +from mne.utils.check import _check_option + +from ..._fiff._digitization import _ensure_fiducials_head +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.pick import _PICK_TYPES_KEYS +from ..._fiff.utils import _find_channels, _read_segments_file +from ...annotations import Annotations, read_annotations +from ...channels import make_dig_montage +from ...defaults import DEFAULTS +from ...epochs import BaseEpochs +from ...event import read_events +from ...utils import ( + Bunch, + _check_fname, + _check_head_radius, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw +from ._eeglab import _readmat + +# just fix the scaling for now, EEGLAB doesn't seem to provide this info +CAL = 1e-6 + + +def _check_eeglab_fname(fname, dataname): + """Check whether the filename is valid. + + Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) or + whether the ``EEG.data`` filename exists. If ``EEG.data`` file is absent + the set file name with .set changed to .fdt is checked. + """ + fmt = str(op.splitext(dataname)[-1]) + if fmt == ".dat": + raise NotImplementedError( + "Old data format .dat detected. Please update your EEGLAB " + "version and resave the data in .fdt format" + ) + + basedir = op.dirname(fname) + data_fname = op.join(basedir, dataname) + if not op.exists(data_fname): + fdt_from_set_fname = op.splitext(fname)[0] + ".fdt" + if op.exists(fdt_from_set_fname): + data_fname = fdt_from_set_fname + msg = ( + "Data file name in EEG.data ({}) is incorrect, the file " + "name must have changed on disk, using the correct file " + "name ({})." + ) + warn(msg.format(dataname, op.basename(fdt_from_set_fname))) + elif not data_fname == fdt_from_set_fname: + msg = "Could not find the .fdt data file, tried {} and {}." + raise FileNotFoundError(msg.format(data_fname, fdt_from_set_fname)) + return data_fname + + +def _check_load_mat(fname, uint16_codec): + """Check if the mat struct contains 'EEG'.""" + fname = _check_fname(fname, "read", True) + eeg = _readmat(fname, uint16_codec=uint16_codec) + if "ALLEEG" in eeg: + raise NotImplementedError( + "Loading an ALLEEG array is not supported. Please contact" + "mne-python developers for more information." + ) + if "EEG" in eeg: # fields are contained in EEG structure + eeg = eeg["EEG"] + eeg = eeg.get("EEG", eeg) # handle nested EEG structure + eeg = Bunch(**eeg) + eeg.trials = int(eeg.trials) + eeg.nbchan = int(eeg.nbchan) + eeg.pnts = int(eeg.pnts) + return eeg + + +def _to_loc(ll): + """Check if location exists.""" + if isinstance(ll, int | float) or len(ll) > 0: + return ll + else: + return np.nan + + +def _eeg_has_montage_information(eeg): + try: + from scipy.io.matlab import mat_struct + except ImportError: # SciPy < 1.8 + from scipy.io.matlab.mio5_params import mat_struct + if not len(eeg.chanlocs): + has_pos = False + else: + pos_fields = ["X", "Y", "Z"] + if isinstance(eeg.chanlocs[0], mat_struct): + has_pos = all(hasattr(eeg.chanlocs[0], fld) for fld in pos_fields) + elif isinstance(eeg.chanlocs[0], np.ndarray): + # Old files + has_pos = all(fld in eeg.chanlocs[0].dtype.names for fld in pos_fields) + elif isinstance(eeg.chanlocs[0], dict): + # new files + has_pos = all(fld in eeg.chanlocs[0] for fld in pos_fields) + else: + has_pos = False # unknown (sometimes we get [0, 0]) + + return has_pos + + +def _get_montage_information(eeg, get_pos, *, montage_units): + """Get channel name, type and montage information from ['chanlocs'].""" + ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list() + unknown_types = dict() + for chanloc in eeg.chanlocs: + # channel name + ch_names.append(chanloc["labels"]) + + # channel type + ch_type = "eeg" + try_type = chanloc.get("type", None) + if isinstance(try_type, str): + try_type = try_type.strip().lower() + if try_type in _PICK_TYPES_KEYS: + ch_type = try_type + else: + if try_type in unknown_types: + unknown_types[try_type].append(chanloc["labels"]) + else: + unknown_types[try_type] = [chanloc["labels"]] + ch_types.append(ch_type) + + # channel loc + if get_pos: + loc_x = _to_loc(chanloc["X"]) + loc_y = _to_loc(chanloc["Y"]) + loc_z = _to_loc(chanloc["Z"]) + locs = np.r_[-loc_y, loc_x, loc_z] + pos_ch_names.append(chanloc["labels"]) + pos.append(locs) + + # warn if unknown types were provided + if len(unknown_types): + warn( + "Unknown types found, setting as type EEG:\n" + + "\n".join( + [ + f"{key}: {sorted(unknown_types[key])}" + for key in sorted(unknown_types) + ] + ) + ) + + lpa, rpa, nasion = None, None, None + if hasattr(eeg, "chaninfo") and isinstance(eeg.chaninfo["nodatchans"], dict): + nodatchans = eeg.chaninfo["nodatchans"] + types = nodatchans.get("type", []) + descriptions = nodatchans.get("description", []) + xs = nodatchans.get("X", []) + ys = nodatchans.get("Y", []) + zs = nodatchans.get("Z", []) + + for type_, description, x, y, z in zip(types, descriptions, xs, ys, zs): + if type_ != "FID": + continue + if description == "Nasion": + nasion = np.array([x, y, z]) + elif description == "Right periauricular point": + rpa = np.array([x, y, z]) + elif description == "Left periauricular point": + lpa = np.array([x, y, z]) + + # Always check this even if it's not used + _check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto")) + if pos_ch_names: + pos_array = np.array(pos, float) + pos_array.shape = (-1, 3) + + # roughly estimate head radius and check if its reasonable + is_nan_pos = np.isnan(pos).any(axis=1) + if not is_nan_pos.all(): + mean_radius = np.mean(np.linalg.norm(pos_array[~is_nan_pos], axis=1)) + scale_units = _handle_montage_units(montage_units, mean_radius) + mean_radius *= scale_units + pos_array *= scale_units + additional_info = ( + " Check if the montage_units argument is correct (the default " + 'is "mm", but your channel positions may be in different units' + ")." + ) + _check_head_radius(mean_radius, add_info=additional_info) + + montage = make_dig_montage( + ch_pos=dict(zip(ch_names, pos_array)), + coord_frame="head", + lpa=lpa, + rpa=rpa, + nasion=nasion, + ) + _ensure_fiducials_head(montage.dig) + else: + montage = None + + return ch_names, ch_types, montage + + +def _get_info(eeg, *, eog, montage_units): + """Get measurement info.""" + # add the ch_names and info['chs'][idx]['loc'] + if not isinstance(eeg.chanlocs, np.ndarray) and eeg.nbchan == 1: + eeg.chanlocs = [eeg.chanlocs] + + if isinstance(eeg.chanlocs, dict): + eeg.chanlocs = _dol_to_lod(eeg.chanlocs) + + eeg_has_ch_names_info = len(eeg.chanlocs) > 0 + + if eeg_has_ch_names_info: + has_pos = _eeg_has_montage_information(eeg) + ch_names, ch_types, eeg_montage = _get_montage_information( + eeg, has_pos, montage_units=montage_units + ) + update_ch_names = False + else: # if eeg.chanlocs is empty, we still need default chan names + ch_names = [f"EEG {ii:03d}" for ii in range(eeg.nbchan)] + ch_types = "eeg" + eeg_montage = None + update_ch_names = True + + info = create_info(ch_names, sfreq=eeg.srate, ch_types=ch_types) + + eog = _find_channels(ch_names, ch_type="EOG") if eog == "auto" else eog + for idx, ch in enumerate(info["chs"]): + ch["cal"] = CAL + if ch["ch_name"] in eog or idx in eog: + ch["coil_type"] = FIFF.FIFFV_COIL_NONE + ch["kind"] = FIFF.FIFFV_EOG_CH + + return info, eeg_montage, update_ch_names + + +def _set_dig_montage_in_init(self, montage): + """Set EEG sensor configuration and head digitization from when init. + + This is done from the information within fname when + read_raw_eeglab(fname) or read_epochs_eeglab(fname). + """ + if montage is None: + self.set_montage(None) + else: + missing_channels = set(self.ch_names) - set(montage.ch_names) + ch_pos = dict( + zip(list(missing_channels), np.full((len(missing_channels), 3), np.nan)) + ) + self.set_montage(montage + make_dig_montage(ch_pos=ch_pos, coord_frame="head")) + + +def _handle_montage_units(montage_units, mean_radius): + if montage_units == "auto": + # radius should be between 0.05 and 0.11 meters + if mean_radius < 0.25: + montage_units = "m" + elif mean_radius < 2.5: + montage_units = "dm" + elif mean_radius < 25: + montage_units = "cm" + else: # mean_radius >= 25 + montage_units = "mm" + prefix = montage_units[:-1] + scale_units = 1 / DEFAULTS["prefixes"][prefix] + return scale_units + + +@fill_doc +def read_raw_eeglab( + input_fname, + eog=(), + preload=False, + uint16_codec=None, + montage_units="auto", + verbose=None, +) -> "RawEEGLAB": + r"""Read an EEGLAB .set file. + + Parameters + ---------- + input_fname : path-like + Path to the ``.set`` file. If the data is stored in a separate ``.fdt`` + file, it is expected to be in the same folder as the ``.set`` file. + eog : list | tuple | ``'auto'`` + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(preload)s + Note that ``preload=False`` will be effective only if the data is + stored in a separate binary file. + %(uint16_codec)s + %(montage_units)s + + .. versionchanged:: 1.6 + Support for ``'auto'`` was added and is the new default. + %(verbose)s + + Returns + ------- + raw : instance of RawEEGLAB + A Raw object containing EEGLAB .set data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawEEGLAB. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + return RawEEGLAB( + input_fname=input_fname, + preload=preload, + eog=eog, + uint16_codec=uint16_codec, + montage_units=montage_units, + verbose=verbose, + ) + + +@fill_doc +def read_epochs_eeglab( + input_fname, + events=None, + event_id=None, + eog=(), + *, + uint16_codec=None, + montage_units="auto", + verbose=None, +) -> "EpochsEEGLAB": + r"""Reader function for EEGLAB epochs files. + + Parameters + ---------- + input_fname : path-like + Path to the ``.set`` file. If the data is stored in a separate ``.fdt`` + file, it is expected to be in the same folder as the ``.set`` file. + events : path-like | array, shape (n_events, 3) | None + Path to events file. If array, it is the events typically returned + by the read_events function. If some events don't match the events + of interest as specified by event_id, they will be marked as 'IGNORED' + in the drop log. If None, it is constructed from the EEGLAB (.set) file + with each unique event encoded with a different integer. + event_id : int | list of int | dict | None + The id of the event to consider. If dict, the keys can later be used + to access associated events. + Example:: + + {"auditory":1, "visual":3} + + If int, a dict will be created with + the id as string. If a list, all events with the IDs specified + in the list are used. If None, the event_id is constructed from the + EEGLAB (.set) file with each descriptions copied from ``eventtype``. + eog : list | tuple | 'auto' + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(uint16_codec)s + %(montage_units)s + + .. versionchanged:: 1.6 + Support for ``'auto'`` was added and is the new default. + %(verbose)s + + Returns + ------- + EpochsEEGLAB : instance of BaseEpochs + The epochs. + + See Also + -------- + mne.Epochs : Documentation of attributes and methods. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + epochs = EpochsEEGLAB( + input_fname=input_fname, + events=events, + eog=eog, + event_id=event_id, + uint16_codec=uint16_codec, + montage_units=montage_units, + verbose=verbose, + ) + return epochs + + +@fill_doc +class RawEEGLAB(BaseRaw): + r"""Raw object from EEGLAB .set file. + + Parameters + ---------- + input_fname : path-like + Path to the ``.set`` file. If the data is stored in a separate ``.fdt`` + file, it is expected to be in the same folder as the ``.set`` file. + eog : list | tuple | 'auto' + Names or indices of channels that should be designated EOG channels. + If 'auto', the channel names containing ``EOG`` or ``EYE`` are used. + Defaults to empty tuple. + %(preload)s + Note that preload=False will be effective only if the data is stored + in a separate binary file. + %(uint16_codec)s + %(montage_units)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + .. versionadded:: 0.11.0 + """ + + @verbose + def __init__( + self, + input_fname, + eog=(), + preload=False, + *, + uint16_codec=None, + montage_units="auto", + verbose=None, + ): + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) + eeg = _check_load_mat(input_fname, uint16_codec) + if eeg.trials != 1: + raise TypeError( + f"The number of trials is {eeg.trials:d}. It must be 1 for raw" + " files. Please use `mne.io.read_epochs_eeglab` if" + " the .set file contains epochs." + ) + + last_samps = [eeg.pnts - 1] + info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units) + + # read the data + if isinstance(eeg.data, str): + data_fname = _check_eeglab_fname(input_fname, eeg.data) + logger.info(f"Reading {data_fname}") + + super().__init__( + info, + preload, + filenames=[data_fname], + last_samps=last_samps, + orig_format="double", + verbose=verbose, + ) + else: + if preload is False or isinstance(preload, str): + warn( + "Data will be preloaded. preload=False or a string " + "preload is not supported when the data is stored in " + "the .set file" + ) + # can't be done in standard way with preload=True because of + # different reading path (.set file) + if eeg.nbchan == 1 and len(eeg.data.shape) == 1: + n_chan, n_times = [1, eeg.data.shape[0]] + else: + n_chan, n_times = eeg.data.shape + data = np.empty((n_chan, n_times), dtype=float) + data[:n_chan] = eeg.data + data *= CAL + super().__init__( + info, + data, + filenames=[input_fname], + last_samps=last_samps, + orig_format="double", + verbose=verbose, + ) + + # create event_ch from annotations + annot = read_annotations(input_fname, uint16_codec=uint16_codec) + self.set_annotations(annot) + _check_boundary(annot, None) + + _set_dig_montage_in_init(self, eeg_montage) + + latencies = np.round(annot.onset * self.info["sfreq"]) + _check_latencies(latencies) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=" 1: + # first extract the events and construct an event_id dict + event_name, event_latencies, unique_ev = list(), list(), list() + ev_idx = 0 + warn_multiple_events = False + epochs = _bunchify(eeg.epoch) + events = _bunchify(eeg.event) + for ep in epochs: + if isinstance(ep.eventtype, int | float): + ep.eventtype = str(ep.eventtype) + if not isinstance(ep.eventtype, str): + event_type = "/".join([str(et) for et in ep.eventtype]) + event_name.append(event_type) + # store latency of only first event + event_latencies.append(events[ev_idx].latency) + ev_idx += len(ep.eventtype) + warn_multiple_events = True + else: + event_type = ep.eventtype + event_name.append(ep.eventtype) + event_latencies.append(events[ev_idx].latency) + ev_idx += 1 + + if event_type not in unique_ev: + unique_ev.append(event_type) + + # invent event dict but use id > 0 so you know its a trigger + event_id = {ev: idx + 1 for idx, ev in enumerate(unique_ev)} + + # warn about multiple events in epoch if necessary + if warn_multiple_events: + warn( + "At least one epoch has multiple events. Only the latency" + " of the first event will be retained." + ) + + # now fill up the event array + events = np.zeros((eeg.trials, 3), dtype=int) + for idx in range(0, eeg.trials): + if idx == 0: + prev_stim = 0 + elif idx > 0 and event_latencies[idx] - event_latencies[idx - 1] == 1: + prev_stim = event_id[event_name[idx - 1]] + events[idx, 0] = event_latencies[idx] + events[idx, 1] = prev_stim + events[idx, 2] = event_id[event_name[idx]] + elif isinstance(events, str | Path | PathLike): + events = read_events(events) + + logger.info(f"Extracting parameters from {input_fname}...") + info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units) + + for key, val in event_id.items(): + if val not in events[:, 2]: + raise ValueError(f"No matching events found for {key} (event id {val})") + + if isinstance(eeg.data, str): + data_fname = _check_eeglab_fname(input_fname, eeg.data) + with open(data_fname, "rb") as data_fid: + data = np.fromfile(data_fid, dtype=np.float32) + data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F") + else: + data = eeg.data + + if eeg.nbchan == 1 and len(data.shape) == 2: + data = data[np.newaxis, :] + data = data.transpose((2, 0, 1)).astype("double") + data *= CAL + assert data.shape == (eeg.trials, eeg.nbchan, eeg.pnts) + tmin, tmax = eeg.xmin, eeg.xmax + + super().__init__( + info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + filename=input_fname, + verbose=verbose, + ) + + # data are preloaded but _bad_dropped is not set so we do it here: + self._bad_dropped = True + + _set_dig_montage_in_init(self, eeg_montage) + + logger.info("Ready.") + + +def _check_boundary(annot, event_id): + if event_id is None: + event_id = dict() + if "boundary" in annot.description and "boundary" not in event_id: + warn( + "The data contains 'boundary' events, indicating data " + "discontinuities. Be cautious of filtering and epoching around " + "these events." + ) + + +def _check_latencies(latencies): + if (latencies < -1).any(): + raise ValueError( + "At least one event sample index is negative. Please" + " check if EEG.event.sample values are correct." + ) + if (latencies == -1).any(): + warn( + "At least one event has a sample index of -1. This usually is " + "a consequence of how eeglab handles event latency after " + "resampling - especially when you had a boundary event at the " + "beginning of the file. Please make sure that the events at " + "the very beginning of your EEGLAB file can be safely dropped " + "(e.g., because they are boundary events)." + ) + + +def _bunchify(items): + if isinstance(items, dict): + items = _dol_to_lod(items) + if len(items) > 0 and isinstance(items[0], dict): + items = [Bunch(**item) for item in items] + return items + + +def _read_annotations_eeglab(eeg, uint16_codec=None): + r"""Create Annotations from EEGLAB file. + + This function reads the event attribute from the EEGLAB + structure and makes an :class:`mne.Annotations` object. + + Parameters + ---------- + eeg : object | str | Path + 'EEG' struct or the path to the (EEGLAB) .set file. + uint16_codec : str | None + If your \*.set file contains non-ascii characters, sometimes reading + it may fail and give rise to error message stating that "buffer is + too small". ``uint16_codec`` allows to specify what codec (for example: + 'latin1' or 'utf-8') should be used when reading character arrays and + can therefore help you solve this problem. + + Returns + ------- + annotations : instance of Annotations + The annotations present in the file. + """ + if isinstance(eeg, (str | Path | PathLike)): + eeg = _check_load_mat(eeg, uint16_codec=uint16_codec) + + if not hasattr(eeg, "event"): + events = [] + elif isinstance(eeg.event, dict) and np.array(eeg.event["latency"]).ndim > 0: + events = _dol_to_lod(eeg.event) + elif not isinstance(eeg.event, np.ndarray | list): + events = [eeg.event] + else: + events = eeg.event + events = _bunchify(events) + description = [str(event.type) for event in events] + onset = [event.latency - 1 for event in events] + duration = np.zeros(len(onset)) + if len(events) > 0 and hasattr(events[0], "duration"): + for idx, event in enumerate(events): + # empty duration fields are read as empty arrays + is_empty_array = ( + isinstance(event.duration, np.ndarray) and len(event.duration) == 0 + ) + duration[idx] = np.nan if is_empty_array else event.duration + + # Drop events with NaN onset see PR #12484 + valid_indices = [ + idx for idx, onset_idx in enumerate(onset) if not np.isnan(onset_idx) + ] + n_dropped = len(onset) - len(valid_indices) + if len(valid_indices) != len(onset): + warn( + f"{n_dropped} events have an onset that is NaN. These values are " + "usually ignored by EEGLAB and will be dropped from the " + "annotations." + ) + + onset = np.array([onset[idx] for idx in valid_indices]) + duration = np.array([duration[idx] for idx in valid_indices]) + description = [description[idx] for idx in valid_indices] + + return Annotations( + onset=np.array(onset) / eeg.srate, + duration=duration / eeg.srate, + description=description, + orig_time=None, + ) + + +def _dol_to_lod(dol): + """Convert a dict of lists to a list of dicts.""" + return [ + {key: dol[key][ii] for key in dol.keys()} + for ii in range(len(dol[list(dol.keys())[0]])) + ] diff --git a/mne/io/egi/__init__.py b/mne/io/egi/__init__.py new file mode 100644 index 0000000..72f638d --- /dev/null +++ b/mne/io/egi/__init__.py @@ -0,0 +1,8 @@ +"""EGI module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .egi import read_raw_egi +from .egimff import read_evokeds_mff diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py new file mode 100644 index 0000000..433758e --- /dev/null +++ b/mne/io/egi/egi.py @@ -0,0 +1,332 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import time + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _create_chs, _read_segments_file +from ...annotations import Annotations +from ...utils import _check_fname, _validate_type, logger, verbose +from ..base import BaseRaw +from .egimff import _read_raw_egi_mff +from .events import _combine_triggers, _triage_include_exclude + + +def _read_header(fid): + """Read EGI binary header.""" + version = np.fromfile(fid, " 6 & ~np.bitwise_and(version, 6): + version = version.byteswap().astype(np.uint32) + else: + raise ValueError("Watchout. This does not seem to be a simple binary EGI file.") + + def my_fread(*x, **y): + return int(np.fromfile(*x, **y)[0]) + + info = dict( + version=version, + year=my_fread(fid, ">i2", 1), + month=my_fread(fid, ">i2", 1), + day=my_fread(fid, ">i2", 1), + hour=my_fread(fid, ">i2", 1), + minute=my_fread(fid, ">i2", 1), + second=my_fread(fid, ">i2", 1), + millisecond=my_fread(fid, ">i4", 1), + samp_rate=my_fread(fid, ">i2", 1), + n_channels=my_fread(fid, ">i2", 1), + gain=my_fread(fid, ">i2", 1), + bits=my_fread(fid, ">i2", 1), + value_range=my_fread(fid, ">i2", 1), + ) + + unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0 + precision = np.bitwise_and(version, 6) + if precision == 0: + raise RuntimeError("Floating point precision is undefined.") + + if unsegmented: + info.update( + dict( + n_categories=0, + n_segments=1, + n_samples=int(np.fromfile(fid, ">i4", 1)[0]), + n_events=int(np.fromfile(fid, ">i2", 1)[0]), + event_codes=[], + category_names=[], + category_lengths=[], + pre_baseline=0, + ) + ) + for event in range(info["n_events"]): + event_codes = "".join(np.fromfile(fid, "S1", 4).astype("U1")) + info["event_codes"].append(event_codes) + else: + raise NotImplementedError("Only continuous files are supported") + info["unsegmented"] = unsegmented + info["dtype"], info["orig_format"] = { + 2: (">i2", "short"), + 4: (">f4", "float"), + 6: (">f8", "double"), + }[precision] + info["dtype"] = np.dtype(info["dtype"]) + return info + + +def _read_events(fid, info): + """Read events.""" + events = np.zeros([info["n_events"], info["n_segments"] * info["n_samples"]]) + fid.seek(36 + info["n_events"] * 4, 0) # skip header + for si in range(info["n_samples"]): + # skip data channels + fid.seek(info["n_channels"] * info["dtype"].itemsize, 1) + # read event channels + events[:, si] = np.fromfile(fid, info["dtype"], info["n_events"]) + return events + + +@verbose +def read_raw_egi( + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, +) -> "RawEGI": + """Read EGI simple binary as raw object. + + Parameters + ---------- + input_fname : path-like + Path to the raw file. Files with an extension ``.mff`` are + automatically considered to be EGI's native MFF format files. + eog : list or tuple + Names of channels or list of indices that should be designated + EOG channels. Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Default is None. + include : None | list + The event channels to be included when creating the synthetic + trigger or annotations. Defaults to None. + Note. Overrides ``exclude`` parameter. + exclude : None | list + The event channels to be ignored when creating the synthetic + trigger or annotations. Defaults to None. If None, the ``sync`` and ``TREV`` + channels will be ignored. This is ignored when ``include`` is not None. + %(preload)s + + .. versionadded:: 0.11 + channel_naming : str + Channel naming convention for the data channels. Defaults to ``'E%%d'`` + (resulting in channel names ``'E1'``, ``'E2'``, ``'E3'``...). The + effective default prior to 0.14.0 was ``'EEG %%03d'``. + .. versionadded:: 0.14.0 + + events_as_annotations : bool + If True, annotations are created from experiment events. If False (default), + a synthetic trigger channel ``STI 014`` is created from experiment events. + See the Notes section for details. + The default will change from False to True in version 1.9. + + .. versionadded:: 1.8.0 + %(verbose)s + + Returns + ------- + raw : instance of RawEGI + A Raw object containing EGI data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawEGI. + + Notes + ----- + When ``events_from_annotations=True``, event codes on stimulus channels like + ``DIN1`` are stored as annotations with the ``description`` set to the stimulus + channel name. + + When ``events_from_annotations=False`` and events are present on the included + stimulus channels, a new stim channel ``STI014`` will be synthesized from the + events. It will contain 1-sample pulses where the Netstation file had event + timestamps. A ``raw.event_id`` dictionary is added to the raw object that will have + arbitrary sequential integer IDs for the events. This will fail if any timestamps + are duplicated. The ``event_id`` will also not survive a save/load roundtrip. + + For these reasons, it is recommended to use ``events_as_annotations=True``. + """ + _validate_type(input_fname, "path-like", "input_fname") + input_fname = str(input_fname) + _validate_type(events_as_annotations, bool, "events_as_annotations") + + if input_fname.rstrip("/\\").endswith(".mff"): # allows .mff or .mff/ + return _read_raw_egi_mff( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) + return RawEGI( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) + + +class RawEGI(BaseRaw): + """Raw object from EGI simple binary file.""" + + _extra_attributes = ("event_id",) + + @verbose + def __init__( + self, + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, + ): + input_fname = str(_check_fname(input_fname, "read", True, "input_fname")) + if eog is None: + eog = [] + if misc is None: + misc = [] + with open(input_fname, "rb") as fid: # 'rb' important for py3k + logger.info(f"Reading EGI header from {input_fname}...") + egi_info = _read_header(fid) + logger.info(" Reading events ...") + egi_events = _read_events(fid, egi_info) # update info + jump + if egi_info["value_range"] != 0 and egi_info["bits"] != 0: + cal = egi_info["value_range"] / 2.0 ** egi_info["bits"] + else: + cal = 1e-6 + + logger.info(" Assembling measurement info ...") + + event_codes = egi_info["event_codes"] + include = _triage_include_exclude(include, exclude, egi_events, egi_info) + if egi_info["n_events"] > 0 and not events_as_annotations: + event_ids = np.arange(len(include)) + 1 + logger.info(' Synthesizing trigger channel "STI 014" ...') + egi_info["new_trigger"] = _combine_triggers( + egi_events[[e in include for e in event_codes]], remapping=event_ids + ) + self.event_id = dict( + zip([e for e in event_codes if e in include], event_ids) + ) + else: + self.event_id = None + egi_info["new_trigger"] = None + info = _empty_info(egi_info["samp_rate"]) + my_time = datetime.datetime( + egi_info["year"], + egi_info["month"], + egi_info["day"], + egi_info["hour"], + egi_info["minute"], + egi_info["second"], + ) + my_timestamp = time.mktime(my_time.timetuple()) + info["meas_date"] = (my_timestamp, 0) + ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])] + cals = np.repeat(cal, len(ch_names)) + ch_names.extend(list(event_codes)) + cals = np.concatenate([cals, np.ones(egi_info["n_events"])]) + if egi_info["new_trigger"] is not None: + ch_names.append("STI 014") # our new_trigger + cals = np.concatenate([cals, [1.0]]) + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) + sti_ch_idx = [ + i + for i, name in enumerate(ch_names) + if name.startswith("STI") or name in event_codes + ] + for idx in sti_ch_idx: + chs[idx].update( + { + "unit_mul": FIFF.FIFF_UNITM_NONE, + "kind": FIFF.FIFFV_STIM_CH, + "coil_type": FIFF.FIFFV_COIL_NONE, + "unit": FIFF.FIFF_UNIT_NONE, + "loc": np.zeros(12), + } + ) + info["chs"] = chs + info._unlocked = False + info._update_redundant() + orig_format = ( + egi_info["orig_format"] if egi_info["orig_format"] != "float" else "single" + ) + super().__init__( + info, + preload, + orig_format=orig_format, + filenames=[input_fname], + last_samps=[egi_info["n_samples"] - 1], + raw_extras=[egi_info], + verbose=verbose, + ) + if events_as_annotations: + annot = dict(onset=list(), duration=list(), description=list()) + for code, row in zip(egi_info["event_codes"], egi_events): + if code not in include: + continue + onset = np.where(row)[0] / self.info["sfreq"] + annot["onset"].extend(onset) + annot["duration"].extend([0.0] * len(onset)) + annot["description"].extend([code] * len(onset)) + if annot: + self.set_annotations(Annotations(**annot)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + egi_info = self._raw_extras[fi] + dtype = egi_info["dtype"] + n_chan_read = egi_info["n_channels"] + egi_info["n_events"] + offset = 36 + egi_info["n_events"] * 4 + trigger_ch = egi_info["new_trigger"] + _read_segments_file( + self, + data, + idx, + fi, + start, + stop, + cals, + mult, + dtype=dtype, + n_channels=n_chan_read, + offset=offset, + trigger_ch=trigger_ch, + ) diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py new file mode 100644 index 0000000..b2f0802 --- /dev/null +++ b/mne/io/egi/egimff.py @@ -0,0 +1,974 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""EGI NetStation Load Function.""" + +import datetime +import math +import os.path as op +import re +from collections import OrderedDict +from pathlib import Path + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info, _ensure_meas_date_none_or_dt, create_info +from ..._fiff.proj import setup_proj +from ..._fiff.utils import _create_chs, _mult_cal_one +from ...annotations import Annotations +from ...channels.montage import make_dig_montage +from ...evoked import EvokedArray +from ...utils import _check_fname, _check_option, _soft_import, logger, verbose, warn +from ..base import BaseRaw +from .events import _combine_triggers, _read_events, _triage_include_exclude +from .general import ( + _block_r, + _extract, + _get_blocks, + _get_ep_info, + _get_gains, + _get_signalfname, +) + +REFERENCE_NAMES = ("VREF", "Vertex Reference") + + +def _read_mff_header(filepath): + """Read mff header.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + all_files = _get_signalfname(filepath) + eeg_file = all_files["EEG"]["signal"] + eeg_info_file = all_files["EEG"]["info"] + + info_filepath = op.join(filepath, "info.xml") # add with filepath + tags = ["mffVersion", "recordTime"] + version_and_date = _extract(tags, filepath=info_filepath) + version = "" + if len(version_and_date["mffVersion"]): + version = version_and_date["mffVersion"][0] + + fname = op.join(filepath, eeg_file) + signal_blocks = _get_blocks(fname) + epochs = _get_ep_info(filepath) + summaryinfo = dict(eeg_fname=eeg_file, info_fname=eeg_info_file) + summaryinfo.update(signal_blocks) + # sanity check and update relevant values + record_time = version_and_date["recordTime"][0] + # e.g., + # 2018-07-30T10:47:01.021673-04:00 + # 2017-09-20T09:55:44.072000000+01:00 + g = re.match( + r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}", # noqa: E501 + record_time, + ) + if g is None: + raise RuntimeError(f"Could not parse recordTime {repr(record_time)}") + frac = g.groups()[0] + assert len(frac) in (6, 9) and all(f.isnumeric() for f in frac) # regex + div = 1000 if len(frac) == 6 else 1000000 + for key in ("last_samps", "first_samps"): + # convert from times in µS to samples + for ei, e in enumerate(epochs[key]): + if e % div != 0: + raise RuntimeError(f"Could not parse epoch time {e}") + epochs[key][ei] = e // div + epochs[key] = np.array(epochs[key], np.uint64) + # I guess they refer to times in milliseconds? + # What we really need to do here is: + # epochs[key] *= signal_blocks['sfreq'] + # epochs[key] //= 1000 + # But that multiplication risks an overflow, so let's only multiply + # by what we need to (e.g., a sample rate of 500 means we can multiply + # by 1 and divide by 2 rather than multiplying by 500 and dividing by + # 1000) + numerator = int(signal_blocks["sfreq"]) + denominator = 1000 + this_gcd = math.gcd(numerator, denominator) + numerator = numerator // this_gcd + denominator = denominator // this_gcd + with np.errstate(over="raise"): + epochs[key] *= numerator + epochs[key] //= denominator + # Should be safe to cast to int now, which makes things later not + # upbroadcast to float + epochs[key] = epochs[key].astype(np.int64) + n_samps_block = signal_blocks["samples_block"].sum() + n_samps_epochs = (epochs["last_samps"] - epochs["first_samps"]).sum() + bad = ( + n_samps_epochs != n_samps_block + or not (epochs["first_samps"] < epochs["last_samps"]).all() + or not (epochs["first_samps"][1:] >= epochs["last_samps"][:-1]).all() + ) + if bad: + raise RuntimeError( + "EGI epoch first/last samps could not be parsed:\n" + f'{list(epochs["first_samps"])}\n{list(epochs["last_samps"])}' + ) + summaryinfo.update(epochs) + # index which samples in raw are actually readable from disk (i.e., not + # in a skip) + disk_samps = np.full(epochs["last_samps"][-1], -1) + offset = 0 + for first, last in zip(epochs["first_samps"], epochs["last_samps"]): + n_this = last - first + disk_samps[first:last] = np.arange(offset, offset + n_this) + offset += n_this + summaryinfo["disk_samps"] = disk_samps + + # Add the sensor info. + sensor_layout_file = op.join(filepath, "sensorLayout.xml") + sensor_layout_obj = parse(sensor_layout_file) + summaryinfo["device"] = sensor_layout_obj.getElementsByTagName("name")[ + 0 + ].firstChild.data + sensors = sensor_layout_obj.getElementsByTagName("sensor") + chan_type = list() + chan_unit = list() + n_chans = 0 + numbers = list() # used for identification + for sensor in sensors: + sensortype = int(sensor.getElementsByTagName("type")[0].firstChild.data) + if sensortype in [0, 1]: + sn = sensor.getElementsByTagName("number")[0].firstChild.data + sn = sn.encode() + numbers.append(sn) + chan_type.append("eeg") + chan_unit.append("uV") + n_chans = n_chans + 1 + if n_chans != summaryinfo["n_channels"]: + raise RuntimeError( + f"Number of defined channels ({n_chans}) did not match the " + f"expected channels ({summaryinfo['n_channels']})." + ) + + # Check presence of PNS data + pns_names = [] + if "PNS" in all_files: + pns_fpath = op.join(filepath, all_files["PNS"]["signal"]) + pns_blocks = _get_blocks(pns_fpath) + pns_samples = pns_blocks["samples_block"] + signal_samples = signal_blocks["samples_block"] + same_blocks = np.array_equal( + pns_samples[:-1], signal_samples[:-1] + ) and pns_samples[-1] in (signal_samples[-1] - np.arange(2)) + if not same_blocks: + raise RuntimeError( + "PNS and signals samples did not match:\n" + f"{list(pns_samples)}\nvs\n{list(signal_samples)}" + ) + + pns_file = op.join(filepath, "pnsSet.xml") + pns_obj = parse(pns_file) + sensors = pns_obj.getElementsByTagName("sensor") + pns_types = [] + pns_units = [] + for sensor in sensors: + # sensor number: + # sensor.getElementsByTagName('number')[0].firstChild.data + name = sensor.getElementsByTagName("name")[0].firstChild.data + unit_elem = sensor.getElementsByTagName("unit")[0].firstChild + unit = "" + if unit_elem is not None: + unit = unit_elem.data + + if name == "ECG": + ch_type = "ecg" + elif "EMG" in name: + ch_type = "emg" + else: + ch_type = "bio" + pns_types.append(ch_type) + pns_units.append(unit) + pns_names.append(name) + + summaryinfo.update( + pns_types=pns_types, + pns_units=pns_units, + pns_fname=all_files["PNS"]["signal"], + pns_sample_blocks=pns_blocks, + ) + summaryinfo.update( + pns_names=pns_names, + version=version, + date=version_and_date["recordTime"][0], + chan_type=chan_type, + chan_unit=chan_unit, + numbers=numbers, + ) + + return summaryinfo + + +def _read_header(input_fname): + """Obtain the headers from the file package mff. + + Parameters + ---------- + input_fname : path-like + Path for the file + + Returns + ------- + info : dict + Main headers set. + """ + input_fname = str(input_fname) # cast to str any Paths + mff_hdr = _read_mff_header(input_fname) + with open(input_fname + "/signal1.bin", "rb") as fid: + version = np.fromfile(fid, np.int32, 1)[0] + """ + the datetime.strptime .f directive (milleseconds) + will only accept up to 6 digits. if there are more than + six millesecond digits in the provided timestamp string + (i.e. because of trailing zeros, as in test_egi_pns.mff) + then slice both the first 26 elements and the last 6 + elements of the timestamp string to truncate the + milleseconds to 6 digits and extract the timezone, + and then piece these together and assign back to mff_hdr['date'] + """ + if len(mff_hdr["date"]) > 32: + dt, tz = [mff_hdr["date"][:26], mff_hdr["date"][-6:]] + mff_hdr["date"] = dt + tz + + time_n = datetime.datetime.strptime(mff_hdr["date"], "%Y-%m-%dT%H:%M:%S.%f%z") + + info = dict( + version=version, + meas_dt_local=time_n, + utc_offset=time_n.strftime("%z"), + gain=0, + bits=0, + value_range=0, + ) + info.update( + n_categories=0, + n_segments=1, + n_events=0, + event_codes=[], + category_names=[], + category_lengths=[], + pre_baseline=0, + ) + info.update(mff_hdr) + return info + + +def _get_eeg_calibration_info(filepath, egi_info): + """Calculate calibration info for EEG channels.""" + gains = _get_gains(op.join(filepath, egi_info["info_fname"])) + if egi_info["value_range"] != 0 and egi_info["bits"] != 0: + cals = [egi_info["value_range"] / 2 ** egi_info["bits"]] * len( + egi_info["chan_type"] + ) + else: + cal_scales = {"uV": 1e-6, "V": 1} + cals = [cal_scales[t] for t in egi_info["chan_unit"]] + if "gcal" in gains: + cals *= gains["gcal"] + return cals + + +def _read_locs(filepath, egi_info, channel_naming): + """Read channel locations.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + fname = op.join(filepath, "coordinates.xml") + if not op.exists(fname): + logger.warn("File coordinates.xml not found, not setting channel locations") + ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])] + return ch_names, None + dig_ident_map = { + "Left periauricular point": "lpa", + "Right periauricular point": "rpa", + "Nasion": "nasion", + } + numbers = np.array(egi_info["numbers"]) + coordinates = parse(fname) + sensors = coordinates.getElementsByTagName("sensor") + ch_pos = OrderedDict() + hsp = list() + nlr = dict() + ch_names = list() + + for sensor in sensors: + name_element = sensor.getElementsByTagName("name")[0].firstChild + num_element = sensor.getElementsByTagName("number")[0].firstChild + name = ( + channel_naming % int(num_element.data) + if name_element is None + else name_element.data + ) + nr = num_element.data.encode() + coords = [ + float(sensor.getElementsByTagName(coord)[0].firstChild.data) + for coord in "xyz" + ] + loc = np.array(coords) / 100 # cm -> m + # create dig entry + if name in dig_ident_map: + nlr[dig_ident_map[name]] = loc + else: + # id_ is the index of the channel in egi_info['numbers'] + id_ = np.flatnonzero(numbers == nr) + # if it's not in egi_info['numbers'], it's a headshape point + if len(id_) == 0: + hsp.append(loc) + # not HSP, must be a data or reference channel + else: + ch_names.append(name) + ch_pos[name] = loc + mon = make_dig_montage(ch_pos=ch_pos, hsp=hsp, **nlr) + return ch_names, mon + + +def _add_pns_channel_info(chs, egi_info, ch_names): + """Add info for PNS channels to channel info dict.""" + for i_ch, ch_name in enumerate(egi_info["pns_names"]): + idx = ch_names.index(ch_name) + ch_type = egi_info["pns_types"][i_ch] + type_to_kind_map = {"ecg": FIFF.FIFFV_ECG_CH, "emg": FIFF.FIFFV_EMG_CH} + ch_kind = type_to_kind_map.get(ch_type, FIFF.FIFFV_BIO_CH) + ch_unit = FIFF.FIFF_UNIT_V + ch_cal = 1e-6 + if egi_info["pns_units"][i_ch] != "uV": + ch_unit = FIFF.FIFF_UNIT_NONE + ch_cal = 1.0 + chs[idx].update( + cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, unit=ch_unit + ) + return chs + + +@verbose +def _read_raw_egi_mff( + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, +): + """Read EGI mff binary as raw object.""" + return RawMff( + input_fname, + eog, + misc, + include, + exclude, + preload, + channel_naming, + events_as_annotations=events_as_annotations, + verbose=verbose, + ) + + +class RawMff(BaseRaw): + """RawMff class.""" + + _extra_attributes = ("event_id",) + + @verbose + def __init__( + self, + input_fname, + eog=None, + misc=None, + include=None, + exclude=None, + preload=False, + channel_naming="E%d", + *, + events_as_annotations=True, + verbose=None, + ): + """Init the RawMff class.""" + input_fname = str( + _check_fname( + input_fname, + "read", + True, + "input_fname", + need_dir=True, + ) + ) + logger.info(f"Reading EGI MFF Header from {input_fname}...") + egi_info = _read_header(input_fname) + if eog is None: + eog = [] + if misc is None: + misc = np.where(np.array(egi_info["chan_type"]) != "eeg")[0].tolist() + + logger.info(" Reading events ...") + egi_events, egi_info, mff_events = _read_events(input_fname, egi_info) + cals = _get_eeg_calibration_info(input_fname, egi_info) + logger.info(" Assembling measurement info ...") + event_codes = egi_info["event_codes"] + include = _triage_include_exclude(include, exclude, egi_events, egi_info) + if egi_info["n_events"] > 0 and not events_as_annotations: + logger.info(' Synthesizing trigger channel "STI 014" ...') + if all(ch.startswith("D") for ch in include): + # support the DIN format DIN1, DIN2, ..., DIN9, DI10, DI11, ... DI99, + # D100, D101, ..., D255 that we get when sending 0-255 triggers on a + # parallel port. + events_ids = list() + for ch in include: + while not ch[0].isnumeric(): + ch = ch[1:] + events_ids.append(int(ch)) + else: + events_ids = np.arange(len(include)) + 1 + egi_info["new_trigger"] = _combine_triggers( + egi_events[[c in include for c in event_codes]], remapping=events_ids + ) + self.event_id = dict( + zip([e for e in event_codes if e in include], events_ids) + ) + if egi_info["new_trigger"] is not None: + egi_events = np.vstack([egi_events, egi_info["new_trigger"]]) + else: + self.event_id = None + egi_info["new_trigger"] = None + assert egi_events.shape[1] == egi_info["last_samps"][-1] + + meas_dt_utc = egi_info["meas_dt_local"].astimezone(datetime.timezone.utc) + info = _empty_info(egi_info["sfreq"]) + info["meas_date"] = _ensure_meas_date_none_or_dt(meas_dt_utc) + info["utc_offset"] = egi_info["utc_offset"] + info["device_info"] = dict(type=egi_info["device"]) + + # read in the montage, if it exists + ch_names, mon = _read_locs(input_fname, egi_info, channel_naming) + # Second: Stim + ch_names.extend(list(egi_info["event_codes"])) + n_extra = len(event_codes) + len(misc) + len(eog) + len(egi_info["pns_names"]) + if egi_info["new_trigger"] is not None: + ch_names.append("STI 014") # channel for combined events + n_extra += 1 + + # Third: PNS + ch_names.extend(egi_info["pns_names"]) + + cals = np.concatenate([cals, np.ones(n_extra)]) + assert len(cals) == len(ch_names), (len(cals), len(ch_names)) + + # Actually create channels as EEG, then update stim and PNS + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc) + + sti_ch_idx = [ + i + for i, name in enumerate(ch_names) + if name.startswith("STI") or name in event_codes + ] + for idx in sti_ch_idx: + chs[idx].update( + { + "unit_mul": FIFF.FIFF_UNITM_NONE, + "cal": cals[idx], + "kind": FIFF.FIFFV_STIM_CH, + "coil_type": FIFF.FIFFV_COIL_NONE, + "unit": FIFF.FIFF_UNIT_NONE, + } + ) + chs = _add_pns_channel_info(chs, egi_info, ch_names) + info["chs"] = chs + info._unlocked = False + info._update_redundant() + + if mon is not None: + info.set_montage(mon, on_missing="ignore") + + ref_idx = np.flatnonzero(np.isin(mon.ch_names, REFERENCE_NAMES)) + if len(ref_idx): + ref_idx = ref_idx.item() + ref_coords = info["chs"][int(ref_idx)]["loc"][:3] + for chan in info["chs"]: + if chan["kind"] == FIFF.FIFFV_EEG_CH: + chan["loc"][3:6] = ref_coords + + file_bin = op.join(input_fname, egi_info["eeg_fname"]) + egi_info["egi_events"] = egi_events + + # Check how many channels to read are from EEG + keys = ("eeg", "sti", "pns") + idx = dict() + idx["eeg"] = np.where([ch["kind"] == FIFF.FIFFV_EEG_CH for ch in chs])[0] + idx["sti"] = np.where([ch["kind"] == FIFF.FIFFV_STIM_CH for ch in chs])[0] + idx["pns"] = np.where( + [ + ch["kind"] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, FIFF.FIFFV_BIO_CH) + for ch in chs + ] + )[0] + # By construction this should always be true, but check anyway + if not np.array_equal( + np.concatenate([idx[key] for key in keys]), np.arange(len(chs)) + ): + raise ValueError( + "Currently interlacing EEG and PNS channels is not supported" + ) + egi_info["kind_bounds"] = [0] + for key in keys: + egi_info["kind_bounds"].append(len(idx[key])) + egi_info["kind_bounds"] = np.cumsum(egi_info["kind_bounds"]) + assert egi_info["kind_bounds"][0] == 0 + assert egi_info["kind_bounds"][-1] == info["nchan"] + first_samps = [0] + last_samps = [egi_info["last_samps"][-1] - 1] + + annot = dict(onset=list(), duration=list(), description=list()) + + if len(idx["pns"]): + # PNS Data is present and should be read: + egi_info["pns_filepath"] = op.join(input_fname, egi_info["pns_fname"]) + # Check for PNS bug immediately + pns_samples = np.sum(egi_info["pns_sample_blocks"]["samples_block"]) + eeg_samples = np.sum(egi_info["samples_block"]) + if pns_samples == eeg_samples - 1: + warn("This file has the EGI PSG sample bug") + annot["onset"].append(last_samps[-1] / egi_info["sfreq"]) + annot["duration"].append(1 / egi_info["sfreq"]) + annot["description"].append("BAD_EGI_PSG") + elif pns_samples != eeg_samples: + raise RuntimeError( + f"PNS samples ({pns_samples}) did not match EEG samples " + f"({eeg_samples})." + ) + + super().__init__( + info, + preload=preload, + orig_format="single", + filenames=[file_bin], + first_samps=first_samps, + last_samps=last_samps, + raw_extras=[egi_info], + verbose=verbose, + ) + + # Annotate acquisition skips + for first, prev_last in zip( + egi_info["first_samps"][1:], egi_info["last_samps"][:-1] + ): + gap = first - prev_last + assert gap >= 0 + if gap: + annot["onset"].append((prev_last - 0.5) / egi_info["sfreq"]) + annot["duration"].append(gap / egi_info["sfreq"]) + annot["description"].append("BAD_ACQ_SKIP") + + # create events from annotations + if events_as_annotations: + for code, samples in mff_events.items(): + if code not in include: + continue + annot["onset"].extend(np.array(samples) / egi_info["sfreq"]) + annot["duration"].extend([0.0] * len(samples)) + annot["description"].extend([code] * len(samples)) + + if len(annot["onset"]): + self.set_annotations(Annotations(**annot)) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of data.""" + logger.debug(f"Reading MFF {start:6d} ... {stop:6d} ...") + dtype = "= bounds[1]) & (idx < bounds[2]))[0] + stim_one = idx[stim_out] + stim_in = idx[stim_out] - bounds[1] + pns_out = np.where((idx >= bounds[2]) & (idx < bounds[3]))[0] + pns_in = idx[pns_out] - bounds[2] + pns_one = idx[pns_out, np.newaxis] + del eeg_out, stim_out, pns_out + + # take into account events (already extended to correct size) + one[stim_one, :] = egi_info["egi_events"][stim_in, start:stop] + + # Convert start and stop to limits in terms of the data + # actually on disk, plus an indexer (disk_use_idx) that populates + # the potentially larger `data` with it, taking skips into account + disk_samps = egi_info["disk_samps"][start:stop] + disk_use_idx = np.where(disk_samps > -1)[0] + # short circuit in case we don't need any samples + if not len(disk_use_idx): + _mult_cal_one(data, one, idx, cals, mult) + return + + start = disk_samps[disk_use_idx[0]] + stop = disk_samps[disk_use_idx[-1]] + 1 + assert len(disk_use_idx) == stop - start + + # Get starting/stopping block/samples + block_samples_offset = np.cumsum(samples_block) + offset_blocks = np.sum(block_samples_offset <= start) + offset_samples = start - ( + block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0 + ) + + # TODO: Refactor this reading with the PNS reading in a single function + # (DRY) + samples_to_read = stop - start + with open(self.filenames[fi], "rb", buffering=0) as fid: + # Go to starting block + current_block = 0 + current_block_info = None + current_data_sample = 0 + while current_block < offset_blocks: + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + fid.seek(current_block_info["block_size"], 1) + current_block += 1 + + # Start reading samples + while samples_to_read > 0: + logger.debug(f" Reading from block {current_block}") + this_block_info = _block_r(fid) + current_block += 1 + if this_block_info is not None: + current_block_info = this_block_info + + to_read = current_block_info["nsamples"] * current_block_info["nc"] + block_data = np.fromfile(fid, dtype, to_read) + block_data = block_data.reshape(n_channels, -1, order="C") + + # Compute indexes + samples_read = block_data.shape[1] + logger.debug(f" Read {samples_read} samples") + logger.debug(f" Offset {offset_samples} samples") + if offset_samples > 0: + # First block read, skip to the offset: + block_data = block_data[:, offset_samples:] + samples_read = samples_read - offset_samples + offset_samples = 0 + if samples_to_read < samples_read: + # Last block to read, skip the last samples + block_data = block_data[:, :samples_to_read] + samples_read = samples_to_read + logger.debug(f" Keep {samples_read} samples") + + s_start = current_data_sample + s_end = s_start + samples_read + + one[eeg_one, disk_use_idx[s_start:s_end]] = block_data[eeg_in] + samples_to_read = samples_to_read - samples_read + current_data_sample = current_data_sample + samples_read + + if len(pns_one) > 0: + # PNS Data is present and should be read: + pns_filepath = egi_info["pns_filepath"] + pns_info = egi_info["pns_sample_blocks"] + n_channels = pns_info["n_channels"] + samples_block = pns_info["samples_block"] + + # Get starting/stopping block/samples + block_samples_offset = np.cumsum(samples_block) + offset_blocks = np.sum(block_samples_offset < start) + offset_samples = start - ( + block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0 + ) + + samples_to_read = stop - start + with open(pns_filepath, "rb", buffering=0) as fid: + # Check file size + fid.seek(0, 2) + file_size = fid.tell() + fid.seek(0) + # Go to starting block + current_block = 0 + current_block_info = None + current_data_sample = 0 + while current_block < offset_blocks: + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + fid.seek(current_block_info["block_size"], 1) + current_block += 1 + + # Start reading samples + while samples_to_read > 0: + if samples_to_read == 1 and fid.tell() == file_size: + # We are in the presence of the EEG bug + # fill with zeros and break the loop + one[pns_one, -1] = 0 + break + + this_block_info = _block_r(fid) + if this_block_info is not None: + current_block_info = this_block_info + + to_read = current_block_info["nsamples"] * current_block_info["nc"] + block_data = np.fromfile(fid, dtype, to_read) + block_data = block_data.reshape(n_channels, -1, order="C") + + # Compute indexes + samples_read = block_data.shape[1] + if offset_samples > 0: + # First block read, skip to the offset: + block_data = block_data[:, offset_samples:] + samples_read = samples_read - offset_samples + offset_samples = 0 + + if samples_to_read < samples_read: + # Last block to read, skip the last samples + block_data = block_data[:, :samples_to_read] + samples_read = samples_to_read + + s_start = current_data_sample + s_end = s_start + samples_read + + one[pns_one, disk_use_idx[s_start:s_end]] = block_data[pns_in] + samples_to_read = samples_to_read - samples_read + current_data_sample = current_data_sample + samples_read + + # do the calibration + _mult_cal_one(data, one, idx, cals, mult) + + +@verbose +def read_evokeds_mff( + fname, condition=None, channel_naming="E%d", baseline=None, verbose=None +): + """Read averaged MFF file as EvokedArray or list of EvokedArray. + + Parameters + ---------- + fname : path-like + File path to averaged MFF file. Should end in ``.mff``. + condition : int or str | list of int or str | None + The index (indices) or category (categories) from which to read in + data. Averaged MFF files can contain separate averages for different + categories. These can be indexed by the block number or the category + name. If ``condition`` is a list or None, a list of EvokedArray objects + is returned. + channel_naming : str + Channel naming convention for EEG channels. Defaults to 'E%%d' + (resulting in channel names 'E1', 'E2', 'E3'...). + baseline : None (default) or tuple of length 2 + The time interval to apply baseline correction. If None do not apply + it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". + If a is None the beginning of the data is used and if b is None then b + is set to the end of the interval. If baseline is equal to (None, None) + all the time interval is used. Correction is applied by computing mean + of the baseline period and subtracting it from the data. The baseline + (a, b) includes both endpoints, i.e. all timepoints t such that + a <= t <= b. + %(verbose)s + + Returns + ------- + evoked : EvokedArray or list of EvokedArray + The evoked dataset(s); one EvokedArray if condition is int or str, + or list of EvokedArray if condition is None or list. + + Raises + ------ + ValueError + If ``fname`` has file extension other than '.mff'. + ValueError + If the MFF file specified by ``fname`` is not averaged. + ValueError + If no categories.xml file in MFF directory specified by ``fname``. + + See Also + -------- + Evoked, EvokedArray, create_info + + Notes + ----- + .. versionadded:: 0.22 + """ + mffpy = _import_mffpy() + # Confirm `fname` is a path to an MFF file + fname = Path(fname) # should be replace with _check_fname + if not fname.suffix == ".mff": + raise ValueError('fname must be an MFF file with extension ".mff".') + # Confirm the input MFF is averaged + mff = mffpy.Reader(fname) + try: + flavor = mff.mff_flavor + except AttributeError: # < 6.3 + flavor = mff.flavor + if flavor not in ("averaged", "segmented"): # old, new names + raise ValueError( + f"{fname} is a {flavor} MFF file. " + "fname must be the path to an averaged MFF file." + ) + # Check for categories.xml file + if "categories.xml" not in mff.directory.listdir(): + raise ValueError( + "categories.xml not found in MFF directory. " + f"{fname} may not be an averaged MFF file." + ) + return_list = True + if condition is None: + categories = mff.categories.categories + condition = list(categories.keys()) + elif not isinstance(condition, list): + condition = [condition] + return_list = False + logger.info(f"Reading {len(condition)} evoked datasets from {fname} ...") + output = [ + _read_evoked_mff( + fname, c, channel_naming=channel_naming, verbose=verbose + ).apply_baseline(baseline) + for c in condition + ] + return output if return_list else output[0] + + +def _read_evoked_mff(fname, condition, channel_naming="E%d", verbose=None): + """Read evoked data from MFF file.""" + import mffpy + + egi_info = _read_header(fname) + mff = mffpy.Reader(fname) + categories = mff.categories.categories + + if isinstance(condition, str): + # Condition is interpreted as category name + category = _check_option( + "condition", condition, categories, extra="provided as category name" + ) + epoch = mff.epochs[category] + elif isinstance(condition, int): + # Condition is interpreted as epoch index + try: + epoch = mff.epochs[condition] + except IndexError: + raise ValueError( + f'"condition" parameter ({condition}), provided ' + "as epoch index, is out of range for available " + f"epochs ({len(mff.epochs)})." + ) + category = epoch.name + else: + raise TypeError('"condition" parameter must be either int or str.') + + # Read in signals from the target epoch + data = mff.get_physical_samples_from_epoch(epoch) + eeg_data, t0 = data["EEG"] + if "PNSData" in data: + pns_data, t0 = data["PNSData"] + all_data = np.vstack((eeg_data, pns_data)) + ch_types = egi_info["chan_type"] + egi_info["pns_types"] + else: + all_data = eeg_data + ch_types = egi_info["chan_type"] + all_data *= 1e-6 # convert to volts + + # Load metadata into info object + # Exclude info['meas_date'] because record time info in + # averaged MFF is the time of the averaging, not true record time. + ch_names, mon = _read_locs(fname, egi_info, channel_naming) + ch_names.extend(egi_info["pns_names"]) + info = create_info(ch_names, mff.sampling_rates["EEG"], ch_types) + with info._unlock(): + info["device_info"] = dict(type=egi_info["device"]) + info["nchan"] = sum(mff.num_channels.values()) + + # Add individual channel info + # Get calibration info for EEG channels + cals = _get_eeg_calibration_info(fname, egi_info) + # Initialize calibration for PNS channels, will be updated later + cals = np.concatenate([cals, np.repeat(1, len(egi_info["pns_names"]))]) + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ()) + # Update PNS channel info + chs = _add_pns_channel_info(chs, egi_info, ch_names) + with info._unlock(): + info["chs"] = chs + if mon is not None: + info.set_montage(mon, on_missing="ignore") + + # Add bad channels to info + info["description"] = category + try: + channel_status = categories[category][0]["channelStatus"] + except KeyError: + warn( + f"Channel status data not found for condition {category}. " + "No channels will be marked as bad.", + category=UserWarning, + ) + channel_status = None + bads = [] + if channel_status: + for entry in channel_status: + if entry["exclusion"] == "badChannels": + if entry["signalBin"] == 1: + # Add bad EEG channels + for ch in entry["channels"]: + bads.append(ch_names[ch - 1]) + elif entry["signalBin"] == 2: + # Add bad PNS channels + for ch in entry["channels"]: + bads.append(egi_info["pns_names"][ch - 1]) + info["bads"] = bads + + # Add EEG reference to info + try: + fp = mff.directory.filepointer("history") + except (ValueError, FileNotFoundError): # old (<=0.6.3) vs new mffpy + pass + else: + with fp: + history = mffpy.XML.from_file(fp) + for entry in history.entries: + if entry["method"] == "Montage Operations Tool": + if "Average Reference" in entry["settings"]: + # Average reference has been applied + _, info = setup_proj(info) + + # Get nave from categories.xml + try: + nave = categories[category][0]["keys"]["#seg"]["data"] + except KeyError: + warn( + f"Number of averaged epochs not found for condition {category}. " + "nave will default to 1.", + category=UserWarning, + ) + nave = 1 + + # Let tmin default to 0 + return EvokedArray( + all_data, info, tmin=0.0, comment=category, nave=nave, verbose=verbose + ) + + +def _import_mffpy(why="read averaged .mff files"): + """Import and return module mffpy.""" + try: + import mffpy + except ImportError as exp: + msg = f"mffpy is required to {why}, got:\n{exp}" + raise ImportError(msg) + + return mffpy diff --git a/mne/io/egi/events.py b/mne/io/egi/events.py new file mode 100644 index 0000000..3096a3c --- /dev/null +++ b/mne/io/egi/events.py @@ -0,0 +1,207 @@ +# +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from datetime import datetime +from glob import glob +from os.path import basename, join, splitext + +import numpy as np + +from ...utils import _soft_import, _validate_type, logger, warn + + +def _read_events(input_fname, info): + """Read events for the record. + + Parameters + ---------- + input_fname : path-like + The file path. + info : dict + Header info array. + """ + n_samples = info["last_samps"][-1] + mff_events, event_codes = _read_mff_events(input_fname, info["sfreq"]) + info["n_events"] = len(event_codes) + info["event_codes"] = event_codes + events = np.zeros([info["n_events"], info["n_segments"] * n_samples]) + for n, event in enumerate(event_codes): + for i in mff_events[event]: + if (i < 0) or (i >= events.shape[1]): + continue + events[n][i] = n + 1 + return events, info, mff_events + + +def _read_mff_events(filename, sfreq): + """Extract the events. + + Parameters + ---------- + filename : path-like + File path. + sfreq : float + The sampling frequency + """ + orig = {} + for xml_file in glob(join(filename, "*.xml")): + xml_type = splitext(basename(xml_file))[0] + orig[xml_type] = _parse_xml(xml_file) + xml_files = orig.keys() + xml_events = [x for x in xml_files if x[:7] == "Events_"] + for item in orig["info"]: + if "recordTime" in item: + start_time = _ns2py_time(item["recordTime"]) + break + markers = [] + code = [] + for xml in xml_events: + for event in orig[xml][2:]: + event_start = _ns2py_time(event["beginTime"]) + start = (event_start - start_time).total_seconds() + if event["code"] not in code: + code.append(event["code"]) + marker = { + "name": event["code"], + "start": start, + "start_sample": int(np.fix(start * sfreq)), + "end": start + float(event["duration"]) / 1e9, + "chan": None, + } + markers.append(marker) + events_tims = dict() + for ev in code: + trig_samp = list( + c["start_sample"] for n, c in enumerate(markers) if c["name"] == ev + ) + events_tims.update({ev: trig_samp}) + return events_tims, code + + +def _parse_xml(xml_file): + """Parse XML file.""" + defusedxml = _soft_import("defusedxml", "reading EGI MFF data") + xml = defusedxml.ElementTree.parse(xml_file) + root = xml.getroot() + return _xml2list(root) + + +def _xml2list(root): + """Parse XML item.""" + output = [] + for element in root: + if len(element) > 0: + if element[0].tag != element[-1].tag: + output.append(_xml2dict(element)) + else: + output.append(_xml2list(element)) + + elif element.text: + text = element.text.strip() + if text: + tag = _ns(element.tag) + output.append({tag: text}) + + return output + + +def _ns(s): + """Remove namespace, but only if there is a namespace to begin with.""" + if "}" in s: + return "}".join(s.split("}")[1:]) + else: + return s + + +def _xml2dict(root): + """Use functions instead of Class. + + remove namespace based on + http://stackoverflow.com/questions/2148119 + """ + output = {} + if root.items(): + output.update(dict(root.items())) + + for element in root: + if len(element) > 0: + if len(element) == 1 or element[0].tag != element[1].tag: + one_dict = _xml2dict(element) + else: + one_dict = {_ns(element[0].tag): _xml2list(element)} + + if element.items(): + one_dict.update(dict(element.items())) + output.update({_ns(element.tag): one_dict}) + + elif element.items(): + output.update({_ns(element.tag): dict(element.items())}) + + else: + output.update({_ns(element.tag): element.text}) + return output + + +def _ns2py_time(nstime): + """Parse times.""" + nsdate = nstime[0:10] + nstime0 = nstime[11:26] + nstime00 = nsdate + " " + nstime0 + pytime = datetime.strptime(nstime00, "%Y-%m-%d %H:%M:%S.%f") + return pytime + + +def _combine_triggers(data, remapping=None): + """Combine binary triggers.""" + new_trigger = np.zeros(data.shape[1]) + if data.astype(bool).sum(axis=0).max() > 1: # ensure no overlaps + logger.info( + " Found multiple events at the same time " + "sample. Cannot create trigger channel." + ) + return + if remapping is None: + remapping = np.arange(data) + 1 + for d, event_id in zip(data, remapping): + idx = d.nonzero() + if np.any(idx): + new_trigger[idx] += event_id + return new_trigger + + +def _triage_include_exclude(include, exclude, egi_events, egi_info): + """Triage include and exclude.""" + _validate_type(exclude, (list, None), "exclude") + _validate_type(include, (list, None), "include") + event_codes = list(egi_info["event_codes"]) + for name, lst in dict(exclude=exclude, include=include).items(): + for ii, item in enumerate(lst or []): + what = f"{name}[{ii}]" + _validate_type(item, str, what) + if item not in event_codes: + raise ValueError( + f"Could not find event channel named {what}={repr(item)}" + ) + if include is None: + if exclude is None: + default_exclude = ["sync", "TREV"] + exclude = [code for code in default_exclude if code in event_codes] + for code, event in zip(event_codes, egi_events): + if event.sum() < 1 and code: + exclude.append(code) + if ( + len(exclude) == len(event_codes) + and egi_info["n_events"] + and set(exclude) - set(default_exclude) + ): + warn( + "Did not find any event code with at least one event.", + RuntimeWarning, + ) + include = [k for k in event_codes if k not in exclude] + del exclude + excl_events = ", ".join(k for k in event_codes if k not in include) + logger.info(f" Excluding events {{{excl_events}}} ...") + return include diff --git a/mne/io/egi/general.py b/mne/io/egi/general.py new file mode 100644 index 0000000..ed028e3 --- /dev/null +++ b/mne/io/egi/general.py @@ -0,0 +1,192 @@ +# +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import re + +import numpy as np + +from ...utils import _pl, _soft_import + + +def _extract(tags, filepath=None, obj=None): + """Extract info from XML.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + if obj is not None: + fileobj = obj + elif filepath is not None: + fileobj = parse(filepath) + else: + raise ValueError("There is not object or file to extract data") + infoxml = dict() + for tag in tags: + value = fileobj.getElementsByTagName(tag) + infoxml[tag] = [] + for i in range(len(value)): + infoxml[tag].append(value[i].firstChild.data) + return infoxml + + +def _get_gains(filepath): + """Parse gains.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + file_obj = parse(filepath) + objects = file_obj.getElementsByTagName("calibration") + gains = dict() + for ob in objects: + value = ob.getElementsByTagName("type") + if value[0].firstChild.data == "GCAL": + data_g = _extract(["ch"], obj=ob)["ch"] + gains.update(gcal=np.asarray(data_g, dtype=np.float64)) + elif value[0].firstChild.data == "ICAL": + data_g = _extract(["ch"], obj=ob)["ch"] + gains.update(ical=np.asarray(data_g, dtype=np.float64)) + return gains + + +def _get_ep_info(filepath): + """Get epoch info.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + epochfile = filepath + "/epochs.xml" + epochlist = parse(epochfile) + epochs = epochlist.getElementsByTagName("epoch") + keys = ("first_samps", "last_samps", "first_blocks", "last_blocks") + epoch_info = {key: list() for key in keys} + for epoch in epochs: + ep_begin = int(epoch.getElementsByTagName("beginTime")[0].firstChild.data) + ep_end = int(epoch.getElementsByTagName("endTime")[0].firstChild.data) + first_block = int(epoch.getElementsByTagName("firstBlock")[0].firstChild.data) + last_block = int(epoch.getElementsByTagName("lastBlock")[0].firstChild.data) + epoch_info["first_samps"].append(ep_begin) + epoch_info["last_samps"].append(ep_end) + epoch_info["first_blocks"].append(first_block) + epoch_info["last_blocks"].append(last_block) + # Don't turn into ndarray here, keep native int because it can deal with + # huge numbers (could use np.uint64 but it's more work) + return epoch_info + + +def _get_blocks(filepath): + """Get info from meta data blocks.""" + binfile = os.path.join(filepath) + n_blocks = 0 + samples_block = [] + header_sizes = [] + n_channels = [] + sfreq = [] + # Meta data consists of: + # * 1 byte of flag (1 for meta data, 0 for data) + # * 1 byte of header size + # * 1 byte of block size + # * 1 byte of n_channels + # * n_channels bytes of offsets + # * n_channels bytes of sigfreqs? + with open(binfile, "rb") as fid: + fid.seek(0, 2) # go to end of file + file_length = fid.tell() + block_size = file_length + fid.seek(0) + position = 0 + while position < file_length: + block = _block_r(fid) + if block is None: + samples_block.append(samples_block[n_blocks - 1]) + n_blocks += 1 + fid.seek(block_size, 1) + position = fid.tell() + continue + block_size = block["block_size"] + header_size = block["header_size"] + header_sizes.append(header_size) + samples_block.append(block["nsamples"]) + n_blocks += 1 + fid.seek(block_size, 1) + sfreq.append(block["sfreq"]) + n_channels.append(block["nc"]) + position = fid.tell() + + if any([n != n_channels[0] for n in n_channels]): + raise RuntimeError("All the blocks don't have the same amount of channels.") + if any([f != sfreq[0] for f in sfreq]): + raise RuntimeError("All the blocks don't have the same sampling frequency.") + if len(samples_block) < 1: + raise RuntimeError("There seems to be no data") + samples_block = np.array(samples_block) + signal_blocks = dict( + n_channels=n_channels[0], + sfreq=sfreq[0], + n_blocks=n_blocks, + samples_block=samples_block, + header_sizes=header_sizes, + ) + return signal_blocks + + +def _get_signalfname(filepath): + """Get filenames.""" + _soft_import("defusedxml", "reading EGI MFF data") + from defusedxml.minidom import parse + + listfiles = os.listdir(filepath) + binfiles = list( + f for f in listfiles if "signal" in f and f[-4:] == ".bin" and f[0] != "." + ) + all_files = {} + infofiles = list() + for binfile in binfiles: + bin_num_str = re.search(r"\d+", binfile).group() + infofile = "info" + bin_num_str + ".xml" + infofiles.append(infofile) + infobjfile = os.path.join(filepath, infofile) + infobj = parse(infobjfile) + if len(infobj.getElementsByTagName("EEG")): + signal_type = "EEG" + elif len(infobj.getElementsByTagName("PNSData")): + signal_type = "PNS" + all_files[signal_type] = { + "signal": f"signal{bin_num_str}.bin", + "info": infofile, + } + if "EEG" not in all_files: + infofiles_str = "\n".join(infofiles) + raise FileNotFoundError( + f"Could not find any EEG data in the {len(infofiles)} file{_pl(infofiles)} " + f"found in {filepath}:\n{infofiles_str}" + ) + return all_files + + +def _block_r(fid): + """Read meta data.""" + if np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() != 1: # not meta + return None + header_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() + block_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() + hl = int(block_size / 4) + nc = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() + nsamples = int(hl / nc) + np.fromfile(fid, dtype=np.dtype("i4"), count=nc) # sigoffset + sigfreq = np.fromfile(fid, dtype=np.dtype("i4"), count=nc) + depth = sigfreq[0] & 0xFF + if depth != 32: + raise ValueError("I do not know how to read this MFF (depth != 32)") + sfreq = sigfreq[0] >> 8 + count = int(header_size / 4 - (4 + 2 * nc)) + np.fromfile(fid, dtype=np.dtype("i4"), count=count) # sigoffset + block = dict( + nc=nc, + hl=hl, + nsamples=nsamples, + block_size=block_size, + header_size=header_size, + sfreq=sfreq, + ) + return block diff --git a/mne/io/eximia/__init__.py b/mne/io/eximia/__init__.py new file mode 100644 index 0000000..2530990 --- /dev/null +++ b/mne/io/eximia/__init__.py @@ -0,0 +1,7 @@ +"""Eximia module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .eximia import read_raw_eximia diff --git a/mne/io/eximia/eximia.py b/mne/io/eximia/eximia.py new file mode 100644 index 0000000..5d21879 --- /dev/null +++ b/mne/io/eximia/eximia.py @@ -0,0 +1,103 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os.path as op + +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _file_size, _read_segments_file +from ...utils import _check_fname, fill_doc, logger, verbose, warn +from ..base import BaseRaw + + +@fill_doc +def read_raw_eximia(fname, preload=False, verbose=None) -> "RawEximia": + """Reader for an eXimia EEG file. + + Parameters + ---------- + fname : path-like + Path to the eXimia ``.nxe`` data file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEximia + A Raw object containing eXimia data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawEximia. + """ + return RawEximia(fname, preload, verbose) + + +@fill_doc +class RawEximia(BaseRaw): + """Raw object from an Eximia EEG file. + + Parameters + ---------- + fname : path-like + Path to the eXimia data file (.nxe). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = str(_check_fname(fname, "read", True, "fname")) + data_name = op.basename(fname) + logger.info(f"Loading {data_name}") + # Create vhdr and vmrk files so that we can use mne_brain_vision2fiff + n_chan = 64 + sfreq = 1450.0 + # data are multiplexed int16 + ch_names = ["GateIn", "Trig1", "Trig2", "EOG"] + ch_types = ["stim", "stim", "stim", "eog"] + cals = [ + 0.0015259021896696422, + 0.0015259021896696422, + 0.0015259021896696422, + 0.3814755474174106, + ] + ch_names += ( + "Fp1 Fpz Fp2 AF1 AFz AF2 " + "F7 F3 F1 Fz F2 F4 F8 " + "FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 " + "T7 C5 C3 C1 Cz C2 C4 C6 T8 " + "TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 " + "P9 P7 P3 P1 Pz P2 P4 P8 " + "P10 PO3 POz PO4 O1 Oz O2 Iz".split() + ) + n_eeg = len(ch_names) - len(cals) + cals += [0.07629510948348212] * n_eeg + ch_types += ["eeg"] * n_eeg + assert len(ch_names) == n_chan + info = create_info(ch_names, sfreq, ch_types) + n_bytes = _file_size(fname) + n_samples, extra = divmod(n_bytes, (n_chan * 2)) + if extra != 0: + warn( + f"Incorrect number of samples in file ({n_samples}), the file is likely" + " truncated" + ) + for ch, cal in zip(info["chs"], cals): + ch["cal"] = cal + super().__init__( + info, + preload=preload, + last_samps=(n_samples - 1,), + filenames=[fname], + orig_format="short", + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=" 1: + logger.info( + f"There are {raw_extras['n_blocks']} recording blocks in this file." + f" Times between blocks will be annotated with BAD_ACQ_SKIP." + ) + raw_extras["dfs"]["samples"] = _adjust_times( + raw_extras["dfs"]["samples"], raw_extras["sfreq"] + ) + # Convert timestamps to seconds + for df in raw_extras["dfs"].values(): + df = _convert_times(df, raw_extras["first_samp"]) + # Find overlaps between left and right eye events + if find_overlaps: + for key in raw_extras["dfs"]: + if key not in ["blinks", "fixations", "saccades"]: + continue + raw_extras["dfs"][key] = _find_overlaps( + raw_extras["dfs"][key], max_time=overlap_threshold + ) + # ======================== Info for BaseRaw ======================== + eye_ch_data = raw_extras["dfs"]["samples"][ch_names].to_numpy().T + info = _create_info(ch_names, raw_extras) + + return eye_ch_data, info, raw_extras + + +def _parse_recording_blocks(fname): + """Parse Eyelink ASCII file. + + Eyelink samples occur within START and END blocks. + samples lines start with a posix-like string, + and contain eyetracking sample info. Event Lines + start with an upper case string and contain info + about occular events (i.e. blink/saccade), or experiment + messages sent by the stimulus presentation software. + """ + with fname.open() as file: + data_dict = dict() + data_dict["sample_lines"] = [] + data_dict["event_lines"] = { + "START": [], + "END": [], + "SAMPLES": [], + "EVENTS": [], + "ESACC": [], + "EBLINK": [], + "EFIX": [], + "MSG": [], + "INPUT": [], + "BUTTON": [], + "PUPIL": [], + } + + is_recording_block = False + for line in file: + if line.startswith("START"): # start of recording block + is_recording_block = True + if is_recording_block: + tokens = line.split() + if not tokens: + continue # skip empty lines + if tokens[0][0].isnumeric(): # Samples + data_dict["sample_lines"].append(tokens) + elif tokens[0] in data_dict["event_lines"].keys(): + if _is_sys_msg(line): + continue # system messages don't need to be parsed. + event_key, event_info = tokens[0], tokens[1:] + data_dict["event_lines"][event_key].append(event_info) + if tokens[0] == "END": # end of recording block + is_recording_block = False + if not data_dict["sample_lines"]: # no samples parsed + raise ValueError(f"Couldn't find any samples in {fname}") + return data_dict + + +def _validate_data(raw_extras): + """Check the incoming data for some known problems that can occur.""" + # Detect the datatypes that are in file. + if "GAZE" in raw_extras["rec_info"]: + logger.info( + "Pixel coordinate data detected." + "Pass `scalings=dict(eyegaze=1e3)` when using plot" + " method to make traces more legible." + ) + + elif "HREF" in raw_extras["rec_info"]: + logger.info("Head-referenced eye-angle (HREF) data detected.") + elif "PUPIL" in raw_extras["rec_info"]: + warn("Raw eyegaze coordinates detected. Analyze with caution.") + if "AREA" in raw_extras["pupil_info"]: + logger.info("Pupil-size area detected.") + elif "DIAMETER" in raw_extras["pupil_info"]: + logger.info("Pupil-size diameter detected.") + # If more than 1 recording period, check whether eye being tracked changed. + if raw_extras["n_blocks"] > 1: + if raw_extras["tracking_mode"] == "monocular": + blocks_list = raw_extras["event_lines"]["SAMPLES"] + eye_per_block = [block_info[1].lower() for block_info in blocks_list] + if not all([this_eye == raw_extras["eye"] for this_eye in eye_per_block]): + warn( + "The eye being tracked changed during the" + " recording. The channel names will reflect" + " the eye that was tracked at the start of" + " the recording." + ) + + +def _get_recording_datetime(fname): + """Create a datetime object from the datetime in ASCII file.""" + # create a timezone object for UTC + tz = timezone(timedelta(hours=0)) + in_header = False + with fname.open() as file: + for line in file: + # header lines are at top of file and start with ** + if line.startswith("**"): + in_header = True + if in_header: + if line.startswith("** DATE:"): + dt_str = line.replace("** DATE:", "").strip() + fmt = "%a %b %d %H:%M:%S %Y" + # Eyelink measdate timestamps are timezone naive. + # Force datetime to be in UTC. + # Even though dt is probably in local time zone. + try: + dt_naive = datetime.strptime(dt_str, fmt) + except ValueError: + # date string is missing or in an unexpected format + logger.info( + "Could not detect date from file with date entry: " + f"{repr(dt_str)}" + ) + return + else: + return dt_naive.replace(tzinfo=tz) # make it dt aware + return + + +def _get_metadata(raw_extras): + """Get tracking mode, sfreq, eye tracked, pupil metric, etc. + + Don't call this until after _parse_recording_blocks. + """ + meta_data = dict() + meta_data["rec_info"] = raw_extras["event_lines"]["SAMPLES"][0] + if ("LEFT" in meta_data["rec_info"]) and ("RIGHT" in meta_data["rec_info"]): + meta_data["tracking_mode"] = "binocular" + meta_data["eye"] = "both" + else: + meta_data["tracking_mode"] = "monocular" + meta_data["eye"] = meta_data["rec_info"][1].lower() + meta_data["first_samp"] = float(raw_extras["event_lines"]["START"][0][0]) + meta_data["sfreq"] = _get_sfreq_from_ascii(meta_data["rec_info"]) + meta_data["pupil_info"] = raw_extras["event_lines"]["PUPIL"][0] + meta_data["n_blocks"] = len(raw_extras["event_lines"]["START"]) + return meta_data + + +def _is_sys_msg(line): + """Flag lines from eyelink ASCII file that contain a known system message. + + Some lines in eyelink files are system outputs usually + only meant for Eyelinks DataViewer application to read. + These shouldn't need to be parsed. + + Parameters + ---------- + line : string + single line from Eyelink asc file + + Returns + ------- + bool : + True if any of the following strings that are + known to indicate a system message are in the line + + Notes + ----- + Examples of eyelink system messages: + - ;Sess:22Aug22;Tria:1;Tri2:False;ESNT:182BFE4C2F4; + - ;NTPT:182BFE55C96;SMSG:__NTP_CLOCK_SYNC__;DIFF:-1; + - !V APLAYSTART 0 1 library/audio + - !MODE RECORD CR 500 2 1 R + """ + return "!V" in line or "!MODE" in line or ";" in line + + +def _get_sfreq_from_ascii(rec_info): + """Get sampling frequency from Eyelink ASCII file. + + Parameters + ---------- + rec_info : list + the first list in raw_extras["event_lines"]['SAMPLES']. + The sfreq occurs after RATE: i.e. [..., RATE, 1000, ...]. + + Returns + ------- + sfreq : float + """ + return float(rec_info[rec_info.index("RATE") + 1]) + + +def _create_dataframes(raw_extras, apply_offsets): + """Create pandas.DataFrame for Eyelink samples and events. + + Creates a pandas DataFrame for sample_lines and for each + non-empty key in event_lines. + """ + pd = _check_pandas_installed() + df_dict = dict() + + # dataframe for samples + df_dict["samples"] = pd.DataFrame(raw_extras["sample_lines"]) + df_dict["samples"] = _drop_status_col(df_dict["samples"]) # drop STATUS col + + # dataframe for each type of occular event + for event, label in zip( + ["EFIX", "ESACC", "EBLINK"], ["fixations", "saccades", "blinks"] + ): + if raw_extras["event_lines"][event]: # an empty list returns False + df_dict[label] = pd.DataFrame(raw_extras["event_lines"][event]) + else: + logger.info( + f"No {label} were found in this file. " + f"Not returning any info on {label}." + ) + + # make dataframe for experiment messages + if raw_extras["event_lines"]["MSG"]: + msgs = [] + for token in raw_extras["event_lines"]["MSG"]: + if apply_offsets and len(token) == 2: + ts, msg = token + offset = np.nan + elif apply_offsets: + ts = token[0] + try: + offset = float(token[1]) + msg = " ".join(str(x) for x in token[2:]) + except ValueError: + offset = np.nan + msg = " ".join(str(x) for x in token[1:]) + else: + ts, offset = token[0], np.nan + msg = " ".join(str(x) for x in token[1:]) + msgs.append([ts, offset, msg]) + df_dict["messages"] = pd.DataFrame(msgs) + + # make dataframe for recording block start, end times + i = 1 + blocks = list() + for bgn, end in zip( + raw_extras["event_lines"]["START"], raw_extras["event_lines"]["END"] + ): + blocks.append((float(bgn[0]), float(end[0]), i)) + i += 1 + cols = ["time", "end_time", "block"] + df_dict["recording_blocks"] = pd.DataFrame(blocks, columns=cols) + + # TODO: Make dataframes for other eyelink events (Buttons) + return df_dict + + +def _drop_status_col(samples_df): + """Drop STATUS column from samples dataframe. + + see https://github.com/mne-tools/mne-python/issues/11809, and section 4.9.2.1 of + the Eyelink 1000 Plus User Manual, version 1.0.19. We know that the STATUS + column is either 3, 5, 13, or 17 characters long, i.e. "...", ".....", ".C." + """ + status_cols = [] + # we know the first 3 columns will be the time, xpos, ypos + for col in samples_df.columns[3:]: + if samples_df[col][0][0].isnumeric(): + # if the value is numeric, it's not a status column + continue + if len(samples_df[col][0]) in [3, 5, 13, 17]: + status_cols.append(col) + return samples_df.drop(columns=status_cols) + + +def _infer_col_names(raw_extras): + """Build column and channel names for data from Eyelink ASCII file. + + Returns the expected column names for the sample lines and event + lines, to be passed into pd.DataFrame. The columns present in an eyelink ASCII + file can vary. The order that col_names are built below should NOT change. + """ + col_names = {} + # initiate the column names for the sample lines + col_names["samples"] = list(EYELINK_COLS["timestamp"]) + + # and for the eye message lines + col_names["blinks"] = list(EYELINK_COLS["eye_event"]) + col_names["fixations"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["fixation"]) + col_names["saccades"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["saccade"]) + + # Recording was either binocular or monocular + # If monocular, find out which eye was tracked and append to ch_name + if raw_extras["tracking_mode"] == "monocular": + eye = raw_extras["eye"] + ch_names = list(EYELINK_COLS["pos"][eye]) + elif raw_extras["tracking_mode"] == "binocular": + ch_names = list(EYELINK_COLS["pos"]["left"] + EYELINK_COLS["pos"]["right"]) + col_names["samples"].extend(ch_names) + + # The order of these if statements should not be changed. + if "VEL" in raw_extras["rec_info"]: # If velocity data are reported + if raw_extras["tracking_mode"] == "monocular": + ch_names.extend(EYELINK_COLS["velocity"][eye]) + col_names["samples"].extend(EYELINK_COLS["velocity"][eye]) + elif raw_extras["tracking_mode"] == "binocular": + ch_names.extend( + EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] + ) + col_names["samples"].extend( + EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"] + ) + # if resolution data are reported + if "RES" in raw_extras["rec_info"]: + ch_names.extend(EYELINK_COLS["resolution"]) + col_names["samples"].extend(EYELINK_COLS["resolution"]) + col_names["fixations"].extend(EYELINK_COLS["resolution"]) + col_names["saccades"].extend(EYELINK_COLS["resolution"]) + # if digital input port values are reported + if "INPUT" in raw_extras["rec_info"]: + ch_names.extend(EYELINK_COLS["input"]) + col_names["samples"].extend(EYELINK_COLS["input"]) + + # if head target info was reported, add its cols + if "HTARGET" in raw_extras["rec_info"]: + ch_names.extend(EYELINK_COLS["remote"]) + col_names["samples"].extend(EYELINK_COLS["remote"]) + + return col_names, ch_names + + +def _assign_col_names(col_names, df_dict): + """Assign column names to dataframes. + + Parameters + ---------- + col_names : dict + Dictionary of column names for each dataframe. + """ + for key, df in df_dict.items(): + if key in ("samples", "blinks", "fixations", "saccades"): + df.columns = col_names[key] + elif key == "messages": + cols = ["time", "offset", "event_msg"] + df.columns = cols + return df_dict + + +def _set_df_dtypes(df_dict): + from mne.utils import _set_pandas_dtype + + for key, df in df_dict.items(): + if key in ["samples"]: + # convert missing position values to NaN + _set_missing_values(df, df.columns[1:]) + _set_pandas_dtype(df, df.columns, float, verbose="warning") + elif key in ["blinks", "fixations", "saccades"]: + _set_missing_values(df, df.columns[1:]) + _set_pandas_dtype(df, df.columns[1:], float, verbose="warning") + elif key == "messages": + _set_pandas_dtype(df, ["time"], float, verbose="warning") # timestamp + return df_dict + + +def _set_missing_values(df, columns): + """Set missing values to NaN. operates in-place.""" + missing_vals = (".", "MISSING_DATA") + for col in columns: + # we explicitly use numpy instead of pd.replace because it is faster + df[col] = np.where(df[col].isin(missing_vals), np.nan, df[col]) + + +def _sort_by_time(df, col="time"): + df.sort_values(col, ascending=True, inplace=True) + df.reset_index(drop=True, inplace=True) + + +def _convert_times(df, first_samp, col="time"): + """Set initial time to 0, converts from ms to seconds in place. + + Parameters + ---------- + df pandas.DataFrame: + One of the dataframes in raw_extras["dfs"] dict. + + first_samp int: + timestamp of the first sample of the recording. This should + be the first sample of the first recording block. + col str (default 'time'): + column name to sort pandas.DataFrame by + + Notes + ----- + Each sample in an Eyelink file has a posix timestamp string. + Subtracts the "first" sample's timestamp from each timestamp. + The "first" sample is inferred to be the first sample of + the first recording block, i.e. the first "START" line. + """ + _sort_by_time(df, col) + for col in df.columns: + if col.endswith("time"): # 'time' and 'end_time' cols + df[col] -= first_samp + df[col] /= 1000 + if col in ["duration", "offset"]: + df[col] /= 1000 + return df + + +def _adjust_times( + df, + sfreq, + time_col="time", +): + """Fill missing timestamps if there are multiple recording blocks. + + Parameters + ---------- + df : pandas.DataFrame: + dataframe of the eyetracking data samples, BEFORE + _convert_times() is applied to the dataframe + + sfreq : int | float: + sampling frequency of the data + + time_col : str (default 'time'): + name of column with the timestamps (e.g. 9511881, 9511882, ...) + + Returns + ------- + %(df_return)s + + Notes + ----- + After _parse_recording_blocks, Files with multiple recording blocks will + have missing timestamps for the duration of the period between the blocks. + This would cause the occular annotations (i.e. blinks) to not line up with + the signal. + """ + pd = _check_pandas_installed() + + first, last = df[time_col].iloc[[0, -1]] + step = 1000 / sfreq + df[time_col] = df[time_col].astype(float) + new_times = pd.DataFrame( + np.arange(first, last + step / 2, step), columns=[time_col] + ) + return pd.merge_asof( + new_times, df, on=time_col, direction="nearest", tolerance=step / 2 + ) + + +def _find_overlaps(df, max_time=0.05): + """Merge left/right eye events with onset/offset diffs less than max_time. + + Parameters + ---------- + df : pandas.DataFrame + Pandas DataFrame with occular events (fixations, saccades, blinks) + max_time : float (default 0.05) + Time in seconds. Defaults to .05 (50 ms) + + Returns + ------- + DataFrame: %(df_return)s + :class:`pandas.DataFrame` specifying overlapped eye events, if any + + Notes + ----- + The idea is to cumulative sum the boolean values for rows with onset and + offset differences (against the previous row) that are greater than the + max_time. If onset and offset diffs are less than max_time then no_overlap + will become False. Alternatively, if either the onset or offset diff is + greater than max_time, no_overlap becomes True. Cumulatively summing over + these boolean values will leave rows with no_overlap == False unchanged + and hence with the same group number. + """ + pd = _check_pandas_installed() + + if not len(df): + return + df["overlap_start"] = df.sort_values("time")["time"].diff().lt(max_time) + + df["overlap_end"] = df["end_time"].diff().abs().lt(max_time) + + df["no_overlap"] = ~(df["overlap_end"] & df["overlap_start"]) + df["group"] = df["no_overlap"].cumsum() + + # now use groupby on 'group'. If one left and one right eye in group + # the new start/end times are the mean of the two eyes + ovrlp = pd.concat( + [ + pd.DataFrame(g[1].drop(columns="eye").mean()).T + if (len(g[1]) == 2) and (len(g[1].eye.unique()) == 2) + else g[1] # not an overlap, return group unchanged + for g in df.groupby("group") + ] + ) + # overlapped events get a "both" value in the "eye" col + if "eye" in ovrlp.columns: + ovrlp["eye"] = ovrlp["eye"].fillna("both") + else: + ovrlp["eye"] = "both" + tmp_cols = ["overlap_start", "overlap_end", "no_overlap", "group"] + return ovrlp.drop(columns=tmp_cols).reset_index(drop=True) + + +def _convert_href_samples(samples_df): + """Convert HREF eyegaze samples to radians.""" + # grab the xpos and ypos channel names + pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1] + for col in samples_df.columns: + if col not in pos_names: # 'xpos_left' ... 'ypos_right' + continue + series = _href_to_radian(samples_df[col]) + samples_df[col] = series + return samples_df + + +def _href_to_radian(opposite, f=15_000): + """Convert HREF eyegaze samples to radians. + + Parameters + ---------- + opposite : int + The x or y coordinate in an HREF gaze sample. + f : int (default 15_000) + distance of plane from the eye. Defaults to 15,000 units, which was taken + from the Eyelink 1000 plus user manual. + + Returns + ------- + x or y coordinate in radians + + Notes + ----- + See section 4.4.2.2 in the Eyelink 1000 Plus User Manual + (version 1.0.19) for a detailed description of HREF data. + """ + return np.arcsin(opposite / f) + + +def _create_info(ch_names, raw_extras): + """Create info object for RawEyelink.""" + # assign channel type from ch_name + pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1] + pupil_names = EYELINK_COLS["pos"]["left"][-1] + EYELINK_COLS["pos"]["right"][-1] + ch_types = [ + "eyegaze" + if ch in pos_names + else "pupil" + if ch in pupil_names + else "stim" + if ch == "DIN" + else "misc" + for ch in ch_names + ] + info = create_info(ch_names, raw_extras["sfreq"], ch_types) + # set correct loc for eyepos and pupil channels + for ch_dict in info["chs"]: + # loc index 3 can indicate left or right eye + if ch_dict["ch_name"].endswith("left"): # [x,y,pupil]_left + ch_dict["loc"][3] = -1 # left eye + elif ch_dict["ch_name"].endswith("right"): # [x,y,pupil]_right + ch_dict["loc"][3] = 1 # right eye + else: + logger.debug( + f"leaving index 3 of loc array as" + f" {ch_dict['loc'][3]} for {ch_dict['ch_name']}" + ) + # loc index 4 can indicate x/y coord + if ch_dict["ch_name"].startswith("x"): + ch_dict["loc"][4] = -1 # x-coord + elif ch_dict["ch_name"].startswith("y"): + ch_dict["loc"][4] = 1 # y-coord + else: + logger.debug( + f"leaving index 4 of loc array as" + f" {ch_dict['loc'][4]} for {ch_dict['ch_name']}" + ) + if "HREF" in raw_extras["rec_info"]: + if ch_dict["ch_name"].startswith(("xpos", "ypos")): + ch_dict["unit"] = FIFF.FIFF_UNIT_RAD + return info + + +def _make_eyelink_annots(df_dict, create_annots, apply_offsets): + """Create Annotations for each df in raw_extras.""" + eye_ch_map = { + "L": ("xpos_left", "ypos_left", "pupil_left"), + "R": ("xpos_right", "ypos_right", "pupil_right"), + "both": ( + "xpos_left", + "ypos_left", + "pupil_left", + "xpos_right", + "ypos_right", + "pupil_right", + ), + } + valid_descs = ["blinks", "saccades", "fixations", "messages"] + msg = ( + "create_annotations must be True or a list containing one or" + f" more of {valid_descs}." + ) + wrong_type = msg + f" Got a {type(create_annots)} instead." + if create_annots is True: + descs = valid_descs + else: + if not isinstance(create_annots, list): + raise TypeError(wrong_type) + for desc in create_annots: + if desc not in valid_descs: + raise ValueError(msg + f" Got '{desc}' instead") + descs = create_annots + + annots = None + for key, df in df_dict.items(): + eye_annot_cond = (key in ["blinks", "fixations", "saccades"]) and (key in descs) + if eye_annot_cond: + onsets = df["time"] + durations = df["duration"] + # Create annotations for both eyes + descriptions = key[:-1] # i.e "blink", "fixation", "saccade" + if key == "blinks": + descriptions = "BAD_" + descriptions + ch_names = df["eye"].map(eye_ch_map).tolist() + this_annot = Annotations( + onset=onsets, + duration=durations, + description=descriptions, + ch_names=ch_names, + ) + elif (key in ["messages"]) and (key in descs): + if apply_offsets: + # If df['offset] is all NaNs, time is not changed + onsets = df["time"] + df["offset"].fillna(0) + else: + onsets = df["time"] + durations = [0] * onsets + descriptions = df["event_msg"] + this_annot = Annotations( + onset=onsets, duration=durations, description=descriptions + ) + else: + continue # TODO make df and annotations for Buttons + if not annots: + annots = this_annot + elif annots: + annots += this_annot + if not annots: + warn(f"Annotations for {descs} were requested but none could be made.") + return + return annots + + +def _make_gap_annots(raw_extras, key="recording_blocks"): + """Create Annotations for gap periods between recording blocks.""" + df = raw_extras["dfs"][key] + onsets = df["end_time"].iloc[:-1] + diffs = df["time"].shift(-1) - df["end_time"] + durations = diffs.iloc[:-1] + descriptions = ["BAD_ACQ_SKIP"] * len(onsets) + return Annotations(onset=onsets, duration=durations, description=descriptions) + + +# ======================== Used by read_eyelink-calibration =========================== + + +def _find_recording_start(lines): + """Return the first START line in an SR Research EyeLink ASCII file. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + The line that contains the info on the start of the recording. + """ + for line in lines: + if line.startswith("START"): + return line + raise ValueError("Could not find the start of the recording.") + + +def _parse_validation_line(line): + """Parse a single line of eyelink validation data. + + Parameters + ---------- + line: A string containing a line of validation data from an eyelink + ASCII file. + + Returns + ------- + A list of tuples containing the validation data. + """ + tokens = line.split() + xy = tokens[-6].strip("[]").split(",") # e.g. '960, 540' + xy_diff = tokens[-2].strip("[]").split(",") # e.g. '-1.5, -2.8' + vals = [float(v) for v in [*xy, tokens[-4], *xy_diff]] + vals[3] += vals[0] # pos_x + eye_x i.e. 960 + -1.5 + vals[4] += vals[1] # pos_y + eye_y + + return tuple(vals) + + +def _parse_calibration( + lines, screen_size=None, screen_distance=None, screen_resolution=None +): + """Parse the lines in the given list and returns a list of Calibration instances. + + Parameters + ---------- + lines: A list of strings, which are The lines in an eyelink ASCII file. + + Returns + ------- + A list containing one or more Calibration instances, + one for each calibration that was recorded in the eyelink ASCII file + data. + """ + from ...preprocessing.eyetracking.calibration import Calibration + + regex = re.compile(r"\d+") # for finding numeric characters + calibrations = list() + rec_start = float(_find_recording_start(lines).split()[1]) + + for line_number, line in enumerate(lines): + if ( + "!CAL VALIDATION " in line and "ABORTED" not in line + ): # Start of a calibration + tokens = line.split() + model = tokens[4] # e.g. 'HV13' + this_eye = tokens[6].lower() # e.g. 'left' + timestamp = float(tokens[1]) + onset = (timestamp - rec_start) / 1000.0 # in seconds + avg_error = float(line.split("avg.")[0].split()[-1]) # e.g. 0.3 + max_error = float(line.split("max")[0].split()[-1]) # e.g. 0.9 + + n_points = int(regex.search(model).group()) # e.g. 13 + n_points *= 2 if "LR" in line else 1 # one point per eye if "LR" + # The next n_point lines contain the validation data + points = [] + for validation_index in range(n_points): + subline = lines[line_number + validation_index + 1] + if "!CAL VALIDATION" in subline: + continue # for bino mode, skip the second eye's validation summary + subline_eye = subline.split("at")[0].split()[-1].lower() # e.g. 'left' + if subline_eye != this_eye: + continue # skip the validation lines for the other eye + point_info = _parse_validation_line(subline) + points.append(point_info) + # Convert the list of validation data into a numpy array + positions = np.array([point[:2] for point in points]) + offsets = np.array([point[2] for point in points]) + gaze = np.array([point[3:] for point in points]) + # create the Calibration instance + calibration = Calibration( + onset=onset, + model=model, + eye=this_eye, + avg_error=avg_error, + max_error=max_error, + positions=positions, + offsets=offsets, + gaze=gaze, + screen_size=screen_size, + screen_distance=screen_distance, + screen_resolution=screen_resolution, + ) + calibrations.append(calibration) + return calibrations diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py new file mode 100644 index 0000000..192a555 --- /dev/null +++ b/mne/io/eyelink/eyelink.py @@ -0,0 +1,133 @@ +"""SR Research Eyelink Load Function.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from pathlib import Path + +from ...utils import ( + _check_fname, + fill_doc, + logger, + verbose, +) +from ..base import BaseRaw +from ._utils import _make_eyelink_annots, _make_gap_annots, _parse_eyelink_ascii + + +@fill_doc +def read_raw_eyelink( + fname, + *, + create_annotations=True, + apply_offsets=False, + find_overlaps=False, + overlap_threshold=0.05, + verbose=None, +) -> "RawEyelink": + """Reader for an Eyelink ``.asc`` file. + + Parameters + ---------- + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s + %(verbose)s + + Returns + ------- + raw : instance of RawEyelink + A Raw object containing eyetracker data. + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + + Notes + ----- + It is common for SR Research Eyelink eye trackers to only record data during trials. + To avoid frequent data discontinuities and to ensure that the data is continuous + so that it can be aligned with EEG and MEG data (if applicable), this reader will + preserve the times between recording trials and annotate them with + ``'BAD_ACQ_SKIP'``. + """ + fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname") + + raw_eyelink = RawEyelink( + fname, + create_annotations=create_annotations, + apply_offsets=apply_offsets, + find_overlaps=find_overlaps, + overlap_threshold=overlap_threshold, + verbose=verbose, + ) + return raw_eyelink + + +@fill_doc +class RawEyelink(BaseRaw): + """Raw object from an XXX file. + + Parameters + ---------- + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attribute and methods. + """ + + @verbose + def __init__( + self, + fname, + *, + create_annotations=True, + apply_offsets=False, + find_overlaps=False, + overlap_threshold=0.05, + verbose=None, + ): + logger.info(f"Loading {fname}") + + fname = Path(fname) + + # ======================== Parse ASCII file ========================== + eye_ch_data, info, raw_extras = _parse_eyelink_ascii( + fname, find_overlaps, overlap_threshold, apply_offsets + ) + # ======================== Create Raw Object ========================= + super().__init__( + info, + preload=eye_ch_data, + filenames=[fname], + verbose=verbose, + raw_extras=[raw_extras], + ) + self.set_meas_date(self._raw_extras[0]["dt"]) + + # ======================== Make Annotations ========================= + gap_annots = None + if self._raw_extras[0]["n_blocks"] > 1: + gap_annots = _make_gap_annots(self._raw_extras[0]) + eye_annots = None + if create_annotations: + eye_annots = _make_eyelink_annots( + self._raw_extras[0]["dfs"], create_annotations, apply_offsets + ) + if gap_annots and eye_annots: # set both + self.set_annotations(gap_annots + eye_annots) + elif gap_annots: + self.set_annotations(gap_annots) + elif eye_annots: + self.set_annotations(eye_annots) + else: + logger.info("Not creating any annotations") diff --git a/mne/io/fieldtrip/__init__.py b/mne/io/fieldtrip/__init__.py new file mode 100644 index 0000000..efdd3d2 --- /dev/null +++ b/mne/io/fieldtrip/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .fieldtrip import read_evoked_fieldtrip, read_epochs_fieldtrip, read_raw_fieldtrip diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py new file mode 100644 index 0000000..5d94d3e --- /dev/null +++ b/mne/io/fieldtrip/fieldtrip.py @@ -0,0 +1,185 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ...epochs import EpochsArray +from ...evoked import EvokedArray +from ...utils import _check_fname, _import_pymatreader_funcs +from ..array.array import RawArray +from .utils import ( + _create_event_metadata, + _create_events, + _create_info, + _set_tmin, + _validate_ft_struct, +) + + +def read_raw_fieldtrip(fname, info, data_name="data") -> RawArray: + """Load continuous (raw) data from a FieldTrip preprocessing structure. + + This function expects to find single trial raw data (FT_DATATYPE_RAW) in + the structure data_name is pointing at. + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : path-like + Path and filename of the ``.mat`` file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + data_name : str + Name of heading dict/variable name under which the data was originally + saved in MATLAB. + + Returns + ------- + raw : instance of RawArray + A Raw Object containing the loaded data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawArray. + """ + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + fname = _check_fname(fname, overwrite="read", must_exist=True) + + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) + + # load data and set ft_struct to the heading dictionary + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data = np.array(ft_struct["trial"]) # create the main data array + + if data.ndim > 2: + data = np.squeeze(data) + + if data.ndim == 1: + data = data[np.newaxis, ...] + + if data.ndim != 2: + raise RuntimeError( + "The data you are trying to load does not seem to be raw data" + ) + + raw = RawArray(data, info) # create an MNE RawArray + return raw + + +def read_epochs_fieldtrip( + fname, info, data_name="data", trialinfo_column=0 +) -> EpochsArray: + """Load epoched data from a FieldTrip preprocessing structure. + + This function expects to find epoched data in the structure data_name is + pointing at. + + .. warning:: Only epochs with the same amount of channels and samples are + supported! + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : path-like + Path and filename of the ``.mat`` file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + data_name : str + Name of heading dict/ variable name under which the data was originally + saved in MATLAB. + trialinfo_column : int + Column of the trialinfo matrix to use for the event codes. + + Returns + ------- + epochs : instance of EpochsArray + An EpochsArray containing the loaded data. + """ + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) + + # load data and set ft_struct to the heading dictionary + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data = np.array(ft_struct["trial"]) # create the epochs data array + events = _create_events(ft_struct, trialinfo_column) + if events is not None: + metadata = _create_event_metadata(ft_struct) + else: + metadata = None + tmin = _set_tmin(ft_struct) # create start time + + epochs = EpochsArray( + data=data, info=info, tmin=tmin, events=events, metadata=metadata, proj=False + ) + return epochs + + +def read_evoked_fieldtrip(fname, info, comment=None, data_name="data"): + """Load evoked data from a FieldTrip timelocked structure. + + This function expects to find timelocked data in the structure data_name is + pointing at. + + .. warning:: FieldTrip does not normally store the original information + concerning channel location, orientation, type etc. It is + therefore **highly recommended** to provide the info field. + This can be obtained by reading the original raw data file + with MNE functions (without preload). The returned object + contains the necessary info field. + + Parameters + ---------- + fname : path-like + Path and filename of the ``.mat`` file containing the data. + info : dict or None + The info dict of the raw data file corresponding to the data to import. + If this is set to None, limited information is extracted from the + FieldTrip structure. + comment : str + Comment on dataset. Can be the condition. + data_name : str + Name of heading dict/ variable name under which the data was originally + saved in MATLAB. + + Returns + ------- + evoked : instance of EvokedArray + An EvokedArray containing the loaded data. + """ + read_mat = _import_pymatreader_funcs("FieldTrip I/O") + ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name]) + ft_struct = ft_struct[data_name] + + _validate_ft_struct(ft_struct) + + info = _create_info(ft_struct, info) # create info structure + data_evoked = ft_struct["avg"] # create evoked data + + evoked = EvokedArray(data_evoked, info, comment=comment) + return evoked diff --git a/mne/io/fieldtrip/utils.py b/mne/io/fieldtrip/utils.py new file mode 100644 index 0000000..f855b7c --- /dev/null +++ b/mne/io/fieldtrip/utils.py @@ -0,0 +1,367 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import numpy as np + +from ..._fiff._digitization import DigPoint, _ensure_fiducials_head +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.pick import pick_info +from ...transforms import rotation3d_align_z_axis +from ...utils import _check_pandas_installed, warn + +_supported_megs = ["neuromag306"] + +_unit_dict = { + "m": 1, + "cm": 1e-2, + "mm": 1e-3, + "V": 1, + "mV": 1e-3, + "uV": 1e-6, + "T": 1, + "T/m": 1, + "T/cm": 1e2, +} + +NOINFO_WARNING = ( + "Importing FieldTrip data without an info dict from the " + "original file. Channel locations, orientations and types " + "will be incorrect. The imported data cannot be used for " + "source analysis, channel interpolation etc." +) + + +def _validate_ft_struct(ft_struct): + """Run validation checks on the ft_structure.""" + if isinstance(ft_struct, list): + raise RuntimeError("Loading of data in cell arrays is not supported") + + +def _create_info(ft_struct, raw_info): + """Create MNE info structure from a FieldTrip structure.""" + if raw_info is None: + warn(NOINFO_WARNING) + + sfreq = _set_sfreq(ft_struct) + ch_names = ft_struct["label"] + if raw_info: + info = raw_info.copy() + missing_channels = set(ch_names) - set(info["ch_names"]) + if missing_channels: + warn( + "The following channels are present in the FieldTrip data " + f"but cannot be found in the provided info: {missing_channels}.\n" + "These channels will be removed from the resulting data!" + ) + + missing_chan_idx = [ch_names.index(ch) for ch in missing_channels] + new_chs = [ch for ch in ch_names if ch not in missing_channels] + ch_names = new_chs + ft_struct["label"] = ch_names + + if "trial" in ft_struct: + ft_struct["trial"] = _remove_missing_channels_from_trial( + ft_struct["trial"], missing_chan_idx + ) + + if "avg" in ft_struct: + if ft_struct["avg"].ndim == 2: + ft_struct["avg"] = np.delete( + ft_struct["avg"], missing_chan_idx, axis=0 + ) + + with info._unlock(): + info["sfreq"] = sfreq + ch_idx = [info["ch_names"].index(ch) for ch in ch_names] + pick_info(info, ch_idx, copy=False) + else: + info = create_info(ch_names, sfreq) + chs, dig = _create_info_chs_dig(ft_struct) + with info._unlock(update_redundant=True): + info.update(chs=chs, dig=dig) + + return info + + +def _remove_missing_channels_from_trial(trial, missing_chan_idx): + if isinstance(trial, list): + for idx_trial in range(len(trial)): + trial[idx_trial] = _remove_missing_channels_from_trial( + trial[idx_trial], missing_chan_idx + ) + elif isinstance(trial, np.ndarray): + if trial.ndim == 2: + trial = np.delete(trial, missing_chan_idx, axis=0) + else: + raise ValueError( + '"trial" field of the FieldTrip structure has an unknown format.' + ) + + return trial + + +def _create_info_chs_dig(ft_struct): + """Create the chs info field from the FieldTrip structure.""" + all_channels = ft_struct["label"] + ch_defaults = dict( + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + cal=1.0, + range=1.0, + unit_mul=FIFF.FIFF_UNITM_NONE, + loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]), + unit=FIFF.FIFF_UNIT_V, + ) + try: + elec = ft_struct["elec"] + except KeyError: + elec = None + + try: + grad = ft_struct["grad"] + except KeyError: + grad = None + + if elec is None and grad is None: + warn( + "The supplied FieldTrip structure does not have an elec or grad " + "field. No channel locations will extracted and the kind of " + "channel might be inaccurate." + ) + if "chanpos" not in (elec or grad or {"chanpos": None}): + raise RuntimeError( + "This file was created with an old version of FieldTrip. You can " + "convert the data to the new version by loading it into FieldTrip " + "and applying ft_selectdata with an empty cfg structure on it. " + "Otherwise you can supply the Info field." + ) + + chs = list() + dig = list() + counter = 0 + for idx_chan, cur_channel_label in enumerate(all_channels): + cur_ch = ch_defaults.copy() + cur_ch["ch_name"] = cur_channel_label + cur_ch["logno"] = idx_chan + 1 + cur_ch["scanno"] = idx_chan + 1 + if elec and cur_channel_label in elec["label"]: + cur_ch = _process_channel_eeg(cur_ch, elec) + assert cur_ch["coord_frame"] == FIFF.FIFFV_COORD_HEAD + # Ref gets ident=0 and we don't have it, so start at 1 + counter += 1 + d = DigPoint( + r=cur_ch["loc"][:3], + coord_frame=FIFF.FIFFV_COORD_HEAD, + kind=FIFF.FIFFV_POINT_EEG, + ident=counter, + ) + dig.append(d) + elif grad and cur_channel_label in grad["label"]: + cur_ch = _process_channel_meg(cur_ch, grad) + else: + if cur_channel_label.startswith("EOG"): + cur_ch["kind"] = FIFF.FIFFV_EOG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG + elif cur_channel_label.startswith("ECG"): + cur_ch["kind"] = FIFF.FIFFV_ECG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR + elif cur_channel_label.startswith("STI"): + cur_ch["kind"] = FIFF.FIFFV_STIM_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE + else: + warn( + f"Cannot guess the correct type of channel {cur_channel_label}. " + "Making it a MISC channel." + ) + cur_ch["kind"] = FIFF.FIFFV_MISC_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE + + chs.append(cur_ch) + _ensure_fiducials_head(dig) + + return chs, dig + + +def _set_sfreq(ft_struct): + """Set the sample frequency.""" + try: + sfreq = ft_struct["fsample"] + except KeyError: + try: + time = ft_struct["time"] + except KeyError: + raise ValueError("No Source for sfreq found") + else: + t1, t2 = float(time[0]), float(time[1]) + sfreq = 1 / (t2 - t1) + try: + sfreq = float(sfreq) + except TypeError: + warn( + "FieldTrip structure contained multiple sample rates, trying the " + f"first of:\n{sfreq} Hz" + ) + sfreq = float(sfreq.ravel()[0]) + return sfreq + + +def _set_tmin(ft_struct): + """Set the start time before the event in evoked data if possible.""" + times = ft_struct["time"] + time_check = all(times[i][0] == times[i - 1][0] for i, x in enumerate(times)) + if time_check: + tmin = times[0][0] + else: + raise RuntimeError( + "Loading data with non-uniform times per epoch is not supported" + ) + return tmin + + +def _create_events(ft_struct, trialinfo_column): + """Create an event matrix from the FieldTrip structure.""" + if "trialinfo" not in ft_struct: + return None + + event_type = ft_struct["trialinfo"] + event_number = range(len(event_type)) + + if trialinfo_column < 0: + raise ValueError("trialinfo_column must be positive") + + available_ti_cols = 1 + if event_type.ndim == 2: + available_ti_cols = event_type.shape[1] + + if trialinfo_column > (available_ti_cols - 1): + raise ValueError( + "trialinfo_column is higher than the amount of columns in trialinfo." + ) + + event_trans_val = np.zeros(len(event_type)) + + if event_type.ndim == 2: + event_type = event_type[:, trialinfo_column] + + events = ( + np.vstack([np.array(event_number), event_trans_val, event_type]).astype("int").T + ) + + return events + + +def _create_event_metadata(ft_struct): + """Create event metadata from trialinfo.""" + pandas = _check_pandas_installed(strict=False) + if not pandas: + warn( + "The Pandas library is not installed. Not returning the original " + "trialinfo matrix as metadata." + ) + return None + + metadata = pandas.DataFrame(ft_struct["trialinfo"]) + + return metadata + + +def _process_channel_eeg(cur_ch, elec): + """Convert EEG channel from FieldTrip to MNE. + + Parameters + ---------- + cur_ch: dict + Channel specific dictionary to populate. + + elec: dict + elec dict as loaded from the FieldTrip structure + + Returns + ------- + cur_ch: dict + The original dict (cur_ch) with the added information + """ + all_labels = np.asanyarray(elec["label"]) + chan_idx_in_elec = np.where(all_labels == cur_ch["ch_name"])[0][0] + position = np.squeeze(elec["chanpos"][chan_idx_in_elec, :]) + # chanunit = elec['chanunit'][chan_idx_in_elec] # not used/needed yet + position_unit = elec["unit"] + + position = position * _unit_dict[position_unit] + cur_ch["loc"] = np.hstack((position, np.zeros((9,)))) + cur_ch["unit"] = FIFF.FIFF_UNIT_V + cur_ch["kind"] = FIFF.FIFFV_EEG_CH + cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG + cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + + return cur_ch + + +def _process_channel_meg(cur_ch, grad): + """Convert MEG channel from FieldTrip to MNE. + + Parameters + ---------- + cur_ch: dict + Channel specific dictionary to populate. + + grad: dict + grad dict as loaded from the FieldTrip structure + + Returns + ------- + dict: The original dict (cur_ch) with the added information + """ + all_labels = np.asanyarray(grad["label"]) + chan_idx_in_grad = np.where(all_labels == cur_ch["ch_name"])[0][0] + gradtype = grad["type"] + chantype = grad["chantype"][chan_idx_in_grad] + position_unit = grad["unit"] + position = np.squeeze(grad["chanpos"][chan_idx_in_grad, :]) + position = position * _unit_dict[position_unit] + + if gradtype == "neuromag306" and "tra" in grad and "coilpos" in grad: + # Try to regenerate original channel pos. + idx_in_coilpos = np.where(grad["tra"][chan_idx_in_grad, :] != 0)[0] + cur_coilpos = grad["coilpos"][idx_in_coilpos, :] + cur_coilpos = cur_coilpos * _unit_dict[position_unit] + cur_coilori = grad["coilori"][idx_in_coilpos, :] + if chantype == "megmag": + position = cur_coilpos[0] - 0.0003 * cur_coilori[0] + if chantype == "megplanar": + tmp_pos = cur_coilpos - 0.0003 * cur_coilori + position = np.average(tmp_pos, axis=0) + + original_orientation = np.squeeze(grad["chanori"][chan_idx_in_grad, :]) + try: + orientation = rotation3d_align_z_axis(original_orientation).T + except AssertionError: + orientation = np.eye(3) + assert orientation.shape == (3, 3) + orientation = orientation.flatten() + # chanunit = grad['chanunit'][chan_idx_in_grad] # not used/needed yet + + cur_ch["loc"] = np.hstack((position, orientation)) + cur_ch["kind"] = FIFF.FIFFV_MEG_CH + if chantype == "megmag": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "megplanar": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_VV_PLANAR_T1 + cur_ch["unit"] = FIFF.FIFF_UNIT_T_M + elif chantype == "refmag": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_MAG + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "refgrad": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD + cur_ch["unit"] = FIFF.FIFF_UNIT_T + elif chantype == "meggrad": + cur_ch["coil_type"] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM + cur_ch["unit"] = FIFF.FIFF_UNIT_T + else: + raise RuntimeError(f"Unexpected coil type: {chantype}.") + + cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + + return cur_ch diff --git a/mne/io/fiff/__init__.py b/mne/io/fiff/__init__.py new file mode 100644 index 0000000..3d83b2c --- /dev/null +++ b/mne/io/fiff/__init__.py @@ -0,0 +1,10 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""FIF raw data reader.""" + +from .raw import Raw +from .raw import read_raw_fif + +RawFIF = Raw diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py new file mode 100644 index 0000000..f9e10a0 --- /dev/null +++ b/mne/io/fiff/raw.py @@ -0,0 +1,562 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import copy +import os.path as op +from pathlib import Path + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import read_meas_info +from ..._fiff.open import _fiff_get_fid, _get_next_fname, fiff_open +from ..._fiff.tag import _call_dict, read_tag +from ..._fiff.tree import dir_tree_find +from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations, _read_annotations_fif +from ...channels import fix_mag_coil_types +from ...event import AcqParserFIF +from ...utils import ( + _check_fname, + _file_like, + _on_missing, + check_fname, + fill_doc, + logger, + verbose, + warn, +) +from ..base import ( + BaseRaw, + _check_maxshield, + _check_raw_compatibility, + _get_fname_rep, + _RawShell, +) + + +@fill_doc +class Raw(BaseRaw): + """Raw data in FIF format. + + Parameters + ---------- + fname : path-like | file-like + The raw filename to load. For files that have automatically been split, + the split part will be automatically loaded. Filenames not ending with + ``raw.fif``, ``raw_sss.fif``, ``raw_tsss.fif``, ``_meg.fif``, + ``_eeg.fif``, or ``_ieeg.fif`` (with or without an optional additional + ``.gz`` extension) will generate a warning. If a file-like object is + provided, preloading must be used. + + .. versionchanged:: 0.18 + Support for file-like objects. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(preload)s + %(on_split_missing)s + %(verbose)s + + Attributes + ---------- + %(info_not_none)s + ch_names : list of string + List of channels' names. + n_times : int + Total number of time points in the raw file. + times : ndarray + Time vector in seconds. Starts from 0, independently of `first_samp` + value. Time interval between consecutive time samples is equal to the + inverse of the sampling frequency. + duration : float + The duration of the raw file in seconds. + + .. versionadded:: 1.9 + preload : bool + Indicates whether raw data are in memory. + """ + + _extra_attributes = ( + "fix_mag_coil_types", + "acqparser", + "_read_raw_file", # this would be ugly to move, but maybe we should + ) + + @verbose + def __init__( + self, + fname, + allow_maxshield=False, + preload=False, + on_split_missing="raise", + verbose=None, + ): + raws = [] + do_check_ext = not _file_like(fname) + next_fname = fname + while next_fname is not None: + raw, next_fname, buffer_size_sec = self._read_raw_file( + next_fname, allow_maxshield, preload, do_check_ext + ) + do_check_ext = False + raws.append(raw) + if next_fname is not None: + if not op.exists(next_fname): + msg = ( + f"Split raw file detected but next file {next_fname} " + "does not exist. Ensure all files were transferred " + "properly and that split and original files were not " + "manually renamed on disk (split files should be " + "renamed by loading and re-saving with MNE-Python to " + "preserve proper filename linkage)." + ) + _on_missing(on_split_missing, msg, name="on_split_missing") + break + # If using a file-like object, we need to be careful about serialization and + # types. + # + # 1. We must change both the variable named "fname" here so that _get_argvalues + # (magic) does not store the file-like object. + # 2. We need to ensure "filenames" passed to the constructor below gets a list + # of Path or None. + # 3. We need to remove the file-like objects from _raw_extras. This must + # be done *after* the super().__init__ call, because the constructor + # needs the file-like objects to read the data (which it will do because we + # force preloading for file-like objects). + + # Avoid file-like in _get_argvalues (1) + fname = _path_from_fname(fname) + + _check_raw_compatibility(raws) + super().__init__( + copy.deepcopy(raws[0].info), + preload=False, + first_samps=[r.first_samp for r in raws], + last_samps=[r.last_samp for r in raws], + # Avoid file-like objects in raw.filenames (2) + filenames=[_path_from_fname(r._raw_extras["filename"]) for r in raws], + raw_extras=[r._raw_extras for r in raws], + orig_format=raws[0].orig_format, + dtype=None, + buffer_size_sec=buffer_size_sec, + verbose=verbose, + ) + + # combine annotations + self.set_annotations(raws[0].annotations, emit_warning=False) + + # Add annotations for in-data skips + for extra in self._raw_extras: + mask = [ent is None for ent in extra["ent"]] + start = extra["bounds"][:-1][mask] + stop = extra["bounds"][1:][mask] - 1 + duration = (stop - start + 1.0) / self.info["sfreq"] + annot = Annotations( + onset=(start / self.info["sfreq"]), + duration=duration, + description="BAD_ACQ_SKIP", + orig_time=self.info["meas_date"], + ) + + self._annotations += annot + + if preload: + self._preload_data(preload) + else: + self.preload = False + # Avoid file-like objects in _raw_extras (3) + for extra in self._raw_extras: + if not isinstance(extra["filename"], Path): + extra["filename"] = None + + @verbose + def _read_raw_file( + self, fname, allow_maxshield, preload, do_check_ext=True, verbose=None + ): + """Read in header information from a raw file.""" + logger.info(f"Opening raw data file {fname}...") + + # Read in the whole file if preload is on and .fif.gz (saves time) + if not _file_like(fname): + if do_check_ext: + endings = ( + "raw.fif", + "raw_sss.fif", + "raw_tsss.fif", + "_meg.fif", + "_eeg.fif", + "_ieeg.fif", + ) + endings += tuple([f"{e}.gz" for e in endings]) + check_fname(fname, "raw", endings) + # filename + fname = _check_fname(fname, "read", True, "fname") + whole_file = preload if fname.suffix == ".gz" else False + else: + # file-like + if not preload: + raise ValueError("preload must be used with file-like objects") + whole_file = True + ff, tree, _ = fiff_open(fname, preload=whole_file) + with ff as fid: + # Read the measurement info + + info, meas = read_meas_info(fid, tree, clean_bads=True) + annotations = _read_annotations_fif(fid, tree) + + # Locate the data of interest + raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA) + if len(raw_node) == 0: + raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA) + if len(raw_node) == 0: + raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA) + if len(raw_node) == 0: + raise ValueError(f"No raw data in {_get_fname_rep(fname)}") + _check_maxshield(allow_maxshield) + with info._unlock(): + info["maxshield"] = True + del meas + + if len(raw_node) == 1: + raw_node = raw_node[0] + + # Process the directory + directory = raw_node["directory"] + nent = raw_node["nent"] + nchan = int(info["nchan"]) + first = 0 + first_samp = 0 + first_skip = 0 + + # Get first sample tag if it is there + if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE: + tag = read_tag(fid, directory[first].pos) + first_samp = int(tag.data.item()) + first += 1 + _check_entry(first, nent) + + # Omit initial skip + if directory[first].kind == FIFF.FIFF_DATA_SKIP: + # This first skip can be applied only after we know the bufsize + tag = read_tag(fid, directory[first].pos) + first_skip = int(tag.data.item()) + first += 1 + _check_entry(first, nent) + + raw = _RawShell() + raw.first_samp = first_samp + if info["meas_date"] is None and annotations is not None: + # we need to adjust annotations.onset as when there is no meas + # date set_annotations considers that the origin of time is the + # first available sample (ignores first_samp) + annotations.onset -= first_samp / info["sfreq"] + raw.set_annotations(annotations) + + # Go through the remaining tags in the directory + raw_extras = list() + nskip = 0 + orig_format = None + + _byte_dict = { + FIFF.FIFFT_DAU_PACK16: 2, + FIFF.FIFFT_SHORT: 2, + FIFF.FIFFT_FLOAT: 4, + FIFF.FIFFT_DOUBLE: 8, + FIFF.FIFFT_INT: 4, + FIFF.FIFFT_COMPLEX_FLOAT: 8, + FIFF.FIFFT_COMPLEX_DOUBLE: 16, + } + _orig_format_dict = { + FIFF.FIFFT_DAU_PACK16: "short", + FIFF.FIFFT_SHORT: "short", + FIFF.FIFFT_FLOAT: "single", + FIFF.FIFFT_DOUBLE: "double", + FIFF.FIFFT_INT: "int", + FIFF.FIFFT_COMPLEX_FLOAT: "single", + FIFF.FIFFT_COMPLEX_DOUBLE: "double", + } + + for k in range(first, nent): + ent = directory[k] + # There can be skips in the data (e.g., if the user unclicked) + # an re-clicked the button + if ent.kind == FIFF.FIFF_DATA_BUFFER: + # Figure out the number of samples in this buffer + try: + div = _byte_dict[ent.type] + except KeyError: + raise RuntimeError( + f"Cannot handle data buffers of type {ent.type}" + ) from None + nsamp = ent.size // (div * nchan) + if orig_format is None: + orig_format = _orig_format_dict[ent.type] + + # Do we have an initial skip pending? + if first_skip > 0: + first_samp += nsamp * first_skip + raw.first_samp = first_samp + first_skip = 0 + + # Do we have a skip pending? + if nskip > 0: + raw_extras.append( + dict( + ent=None, + first=first_samp, + nsamp=nskip * nsamp, + last=first_samp + nskip * nsamp - 1, + ) + ) + first_samp += nskip * nsamp + nskip = 0 + + # Add a data buffer + raw_extras.append( + dict( + ent=ent, + first=first_samp, + last=first_samp + nsamp - 1, + nsamp=nsamp, + ) + ) + first_samp += nsamp + elif ent.kind == FIFF.FIFF_DATA_SKIP: + tag = read_tag(fid, ent.pos) + nskip = int(tag.data.item()) + + next_fname = _get_next_fname(fid, _path_from_fname(fname), tree) + + # reformat raw_extras to be a dict of list/ndarray rather than + # list of dict (faster access) + raw_extras = {key: [r[key] for r in raw_extras] for key in raw_extras[0]} + for key in raw_extras: + if key != "ent": # dict or None + raw_extras[key] = np.array(raw_extras[key], int) + if not np.array_equal(raw_extras["last"][:-1], raw_extras["first"][1:] - 1): + raise RuntimeError("FIF file appears to be broken") + bounds = np.cumsum( + np.concatenate([raw_extras["first"][:1], raw_extras["nsamp"]]) + ) + raw_extras["bounds"] = bounds + assert len(raw_extras["bounds"]) == len(raw_extras["ent"]) + 1 + # store the original buffer size + buffer_size_sec = np.median(raw_extras["nsamp"]) / info["sfreq"] + del raw_extras["first"] + del raw_extras["last"] + del raw_extras["nsamp"] + raw_extras["filename"] = fname + + raw.last_samp = first_samp - 1 + raw.orig_format = orig_format + + # Add the calibration factors + cals = np.zeros(info["nchan"]) + for k in range(info["nchan"]): + cals[k] = info["chs"][k]["range"] * info["chs"][k]["cal"] + + raw._cals = cals + raw._raw_extras = raw_extras + logger.info( + " Range : %d ... %d = %9.3f ... %9.3f secs", + raw.first_samp, + raw.last_samp, + float(raw.first_samp) / info["sfreq"], + float(raw.last_samp) / info["sfreq"], + ) + + raw.info = info + + logger.info("Ready.") + + return raw, next_fname, buffer_size_sec + + @property + def _dtype(self): + """Get the dtype to use to store data from disk.""" + if self._dtype_ is not None: + return self._dtype_ + dtype = None + for raw_extra in self._raw_extras: + for ent in raw_extra["ent"]: + if ent is not None: + if ent.type in ( + FIFF.FIFFT_COMPLEX_FLOAT, + FIFF.FIFFT_COMPLEX_DOUBLE, + ): + dtype = np.complex128 + else: + dtype = np.float64 + break + if dtype is not None: + break + if dtype is None: + raise RuntimeError("bug in reading") + self._dtype_ = dtype + return dtype + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + n_bad = 0 + with _fiff_get_fid(self._raw_extras[fi]["filename"]) as fid: + bounds = self._raw_extras[fi]["bounds"] + ents = self._raw_extras[fi]["ent"] + nchan = self._raw_extras[fi]["orig_nchan"] + use = (stop > bounds[:-1]) & (start < bounds[1:]) + offset = 0 + for ei in np.where(use)[0]: + first = bounds[ei] + last = bounds[ei + 1] + nsamp = last - first + ent = ents[ei] + first_pick = max(start - first, 0) + last_pick = min(nsamp, stop - first) + picksamp = last_pick - first_pick + this_start = offset + offset += picksamp + this_stop = offset + # only read data if it exists + if ent is None: + continue # just use zeros for gaps + # faster to always read full tag, taking advantage of knowing the header + # already (cutting out some of read_tag) ... + fid.seek(ent.pos + 16, 0) + one = _call_dict[ent.type](fid, ent, shape=None, rlims=None) + try: + one.shape = (nsamp, nchan) + except AttributeError: # one is None + n_bad += picksamp + else: + # ... then pick samples we want + if first_pick != 0 or last_pick != nsamp: + one = one[first_pick:last_pick] + _mult_cal_one( + data[:, this_start:this_stop], + one.T, + idx, + cals, + mult, + ) + if n_bad: + warn( + f"FIF raw buffer could not be read, acquisition error " + f"likely: {n_bad} samples set to zero" + ) + assert offset == stop - start + + def fix_mag_coil_types(self): + """Fix Elekta magnetometer coil types. + + Returns + ------- + raw : instance of Raw + The raw object. Operates in place. + + Notes + ----- + This function changes magnetometer coil types 3022 (T1: SQ20483N) and + 3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition + records in the info structure. + + Neuromag Vectorview systems can contain magnetometers with two + different coil sizes (3022 and 3023 vs. 3024). The systems + incorporating coils of type 3024 were introduced last and are used at + the majority of MEG sites. At some sites with 3024 magnetometers, + the data files have still defined the magnetometers to be of type + 3022 to ensure compatibility with older versions of Neuromag software. + In the MNE software as well as in the present version of Neuromag + software coil type 3024 is fully supported. Therefore, it is now safe + to upgrade the data files to use the true coil type. + + .. note:: The effect of the difference between the coil sizes on the + current estimates computed by the MNE software is very small. + Therefore the use of mne_fix_mag_coil_types is not mandatory. + """ + fix_mag_coil_types(self.info) + return self + + @property + def acqparser(self): + """The AcqParserFIF for the measurement info. + + See Also + -------- + mne.AcqParserFIF + """ + if getattr(self, "_acqparser", None) is None: + self._acqparser = AcqParserFIF(self.info) + return self._acqparser + + +def _check_entry(first, nent): + """Sanity check entries.""" + if first >= nent: + raise OSError("Could not read data, perhaps this is a corrupt file") + + +@fill_doc +def read_raw_fif( + fname, allow_maxshield=False, preload=False, on_split_missing="raise", verbose=None +) -> Raw: + """Reader function for Raw FIF data. + + Parameters + ---------- + fname : path-like | file-like + The raw filename to load. For files that have automatically been split, + the split part will be automatically loaded. Filenames should end + with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif, + raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided, + preloading must be used. + + .. versionchanged:: 0.18 + Support for file-like objects. + allow_maxshield : bool | str (default False) + If True, allow loading of data that has been recorded with internal + active compensation (MaxShield). Data recorded with MaxShield should + generally not be loaded directly, but should first be processed using + SSS/tSSS to remove the compensation signals that may also affect brain + activity. Can also be "yes" to load without eliciting a warning. + %(preload)s + %(on_split_missing)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + A Raw object containing FIF data. + + Notes + ----- + .. versionadded:: 0.9.0 + + When reading a FIF file, note that the first N seconds annotated + ``BAD_ACQ_SKIP`` are **skipped**. They are removed from ``raw.times`` and + ``raw.n_times`` parameters but ``raw.first_samp`` and ``raw.first_time`` + are updated accordingly. + """ + return Raw( + fname=fname, + allow_maxshield=allow_maxshield, + preload=preload, + verbose=verbose, + on_split_missing=on_split_missing, + ) + + +def _path_from_fname(fname) -> Path | None: + if not isinstance(fname, Path): + if isinstance(fname, str): + fname = Path(fname) + else: + # Try to get a filename from the file-like object + try: + fname = Path(fname.name) + except Exception: + fname = None + return fname diff --git a/mne/io/fil/__init__.py b/mne/io/fil/__init__.py new file mode 100644 index 0000000..ad78045 --- /dev/null +++ b/mne/io/fil/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .fil import read_raw_fil diff --git a/mne/io/fil/fil.py b/mne/io/fil/fil.py new file mode 100644 index 0000000..a7dd157 --- /dev/null +++ b/mne/io/fil/fil.py @@ -0,0 +1,336 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import json +import pathlib + +import numpy as np + +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _read_segments_file +from ..._fiff.write import get_new_file_id +from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans +from ...utils import _check_fname, fill_doc, verbose, warn +from ..base import BaseRaw +from .sensors import ( + _get_plane_vectors, + _get_pos_units, + _refine_sensor_orientation, + _size2units, +) + + +@verbose +def read_raw_fil( + binfile, precision="single", preload=False, *, verbose=None +) -> "RawFIL": + """Raw object from FIL-OPMEG formatted data. + + Parameters + ---------- + binfile : path-like + Path to the MEG data binary (ending in ``'_meg.bin'``). + precision : str, optional + How is the data represented? ``'single'`` if 32-bit or ``'double'`` if + 64-bit (default is single). + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawFIL + The raw data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawFIL. + """ + return RawFIL(binfile, precision=precision, preload=preload) + + +@fill_doc +class RawFIL(BaseRaw): + """Raw object from FIL-OPMEG formatted data. + + Parameters + ---------- + binfile : path-like + Path to the MEG data binary (ending in ``'_meg.bin'``). + precision : str, optional + How is the data represented? ``'single'`` if 32-bit or + ``'double'`` if 64-bit (default is single). + %(preload)s + + Returns + ------- + raw : instance of RawFIL + The raw data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawFIL. + """ + + def __init__(self, binfile, precision="single", preload=False): + if precision == "single": + dt = np.dtype(">f") + bps = 4 + else: + dt = np.dtype(">d") + bps = 8 + + sample_info = dict() + sample_info["dt"] = dt + sample_info["bps"] = bps + + files = _get_file_names(binfile) + + chans = _from_tsv(files["chans"]) + nchans = len(chans["name"]) + nsamples = _determine_nsamples(files["bin"], nchans, precision) - 1 + sample_info["nsamples"] = nsamples + + raw_extras = list() + raw_extras.append(sample_info) + + chans["pos"] = [None] * nchans + chans["ori"] = [None] * nchans + if files["positions"].is_file(): + chanpos = _from_tsv(files["positions"]) + nlocs = len(chanpos["name"]) + for ii in range(0, nlocs): + idx = chans["name"].index(chanpos["name"][ii]) + tmp = np.array( + [chanpos["Px"][ii], chanpos["Py"][ii], chanpos["Pz"][ii]] + ) + chans["pos"][idx] = tmp.astype(np.float64) + tmp = np.array( + [chanpos["Ox"][ii], chanpos["Oy"][ii], chanpos["Oz"][ii]] + ) + chans["ori"][idx] = tmp.astype(np.float64) + else: + warn("No sensor position information found.") + + with open(files["meg"]) as fid: + meg = json.load(fid) + info = _compose_meas_info(meg, chans) + + super().__init__( + info, + preload, + filenames=[files["bin"]], + raw_extras=raw_extras, + last_samps=[nsamples], + orig_format=precision, + ) + + if files["coordsystem"].is_file(): + with open(files["coordsystem"]) as fid: + csys = json.load(fid) + hc = csys["HeadCoilCoordinates"] + + for key in hc: + if key.lower() == "lpa": + lpa = np.asarray(hc[key]) + elif key.lower() == "rpa": + rpa = np.asarray(hc[key]) + elif key.lower().startswith("nas"): + nas = np.asarray(hc[key]) + else: + warn(f"{key} is not a valid fiducial name!") + + size = np.linalg.norm(nas - rpa) + unit, sf = _size2units(size) + # TODO: These are not guaranteed to exist and could lead to a + # confusing error message, should fix later + lpa /= sf + rpa /= sf + nas /= sf + + t = get_ras_to_neuromag_trans(nas, lpa, rpa) + + # transform fiducial points + nas = apply_trans(t, nas) + lpa = apply_trans(t, lpa) + rpa = apply_trans(t, rpa) + + with self.info._unlock(): + self.info["dig"] = _make_dig_points( + nasion=nas, lpa=lpa, rpa=rpa, coord_frame="meg" + ) + else: + warn( + "No fiducials found in files, defaulting sensor array to " + "FIFFV_COORD_DEVICE, this may cause problems later!" + ) + t = np.eye(4) + + with self.info._unlock(): + self.info["dev_head_t"] = Transform( + FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + si = self._raw_extras[fi] + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=si["dt"] + ) + + +def _convert_channel_info(chans): + """Convert the imported _channels.tsv into the chs element of raw.info.""" + nmeg = nstim = nmisc = nref = 0 + + if not all(p is None for p in chans["pos"]): + _, sf = _get_pos_units(chans["pos"]) + + chs = list() + for ii in range(len(chans["name"])): + ch = dict( + scanno=ii + 1, + range=1.0, + cal=1.0, + loc=np.full(12, np.nan), + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name=chans["name"][ii], + coil_type=FIFF.FIFFV_COIL_NONE, + ) + chs.append(ch) + + # create the channel information + if chans["pos"][ii] is not None: + r0 = chans["pos"][ii].copy() / sf # mm to m + ez = chans["ori"][ii].copy() + ez = ez / np.linalg.norm(ez) + ex, ey = _get_plane_vectors(ez) + ch["loc"] = np.concatenate([r0, ex, ey, ez]) + + if chans["type"][ii] == "MEGMAG": + nmeg += 1 + ch.update( + logno=nmeg, + coord_frame=FIFF.FIFFV_COORD_DEVICE, + kind=FIFF.FIFFV_MEG_CH, + unit=FIFF.FIFF_UNIT_T, + coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + ) + elif chans["type"][ii] == "MEGREFMAG": + nref += 1 + ch.update( + logno=nref, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_REF_MEG_CH, + unit=FIFF.FIFF_UNIT_T, + coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2, + ) + elif chans["type"][ii] == "TRIG": + nstim += 1 + ch.update( + logno=nstim, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_STIM_CH, + unit=FIFF.FIFF_UNIT_V, + ) + else: + nmisc += 1 + ch.update( + logno=nmisc, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + kind=FIFF.FIFFV_MISC_CH, + unit=FIFF.FIFF_UNIT_NONE, + ) + + # set the calibration based on the units - MNE expects T units for meg + # and V for eeg + if chans["units"][ii] == "fT": + ch.update(cal=1e-15) + elif chans["units"][ii] == "pT": + ch.update(cal=1e-12) + elif chans["units"][ii] == "nT": + ch.update(cal=1e-9) + elif chans["units"][ii] == "mV": + ch.update(cal=1e3) + elif chans["units"][ii] == "uV": + ch.update(cal=1e6) + + return chs + + +def _compose_meas_info(meg, chans): + """Create info structure.""" + info = _empty_info(meg["SamplingFrequency"]) + # Collect all the necessary data from the structures read + info["meas_id"] = get_new_file_id() + tmp = _convert_channel_info(chans) + info["chs"] = _refine_sensor_orientation(tmp) + info["line_freq"] = meg["PowerLineFrequency"] + info._update_redundant() + info["bads"] = _read_bad_channels(chans) + info._unlocked = False + return info + + +def _determine_nsamples(bin_fname, nchans, precision): + """Identify how many temporal samples in a dataset.""" + bsize = bin_fname.stat().st_size + if precision == "single": + bps = 4 + else: + bps = 8 + nsamples = int(bsize / (nchans * bps)) + return nsamples + + +def _read_bad_channels(chans): + """Check _channels.tsv file to look for premarked bad channels.""" + bads = list() + for ii in range(0, len(chans["status"])): + if chans["status"][ii] == "bad": + bads.append(chans["name"][ii]) + return bads + + +def _from_tsv(fname, dtypes=None): + """Read a tsv file into a dict (which we know is ordered).""" + data = np.loadtxt( + fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig" + ) + column_names = data[0, :] + info = data[1:, :] + data_dict = dict() + if dtypes is None: + dtypes = [str] * info.shape[1] + if not isinstance(dtypes, list | tuple): + dtypes = [dtypes] * info.shape[1] + if not len(dtypes) == info.shape[1]: + raise ValueError( + f"dtypes length mismatch. Provided: {len(dtypes)}, " + f"Expected: {info.shape[1]}" + ) + for i, name in enumerate(column_names): + data_dict[name] = info[:, i].astype(dtypes[i]).tolist() + return data_dict + + +def _get_file_names(binfile): + """Guess the filenames based on predicted suffixes.""" + binfile = pathlib.Path( + _check_fname(binfile, overwrite="read", must_exist=True, name="fname") + ) + if not (binfile.suffix == ".bin" and binfile.stem.endswith("_meg")): + raise ValueError(f"File must be a filename ending in _meg.bin, got {binfile}") + files = dict() + dir_ = binfile.parent + root = binfile.stem[:-4] # no _meg + files["bin"] = dir_ / (root + "_meg.bin") + files["meg"] = dir_ / (root + "_meg.json") + files["chans"] = dir_ / (root + "_channels.tsv") + files["positions"] = dir_ / (root + "_positions.tsv") + files["coordsystem"] = dir_ / (root + "_coordsystem.json") + return files diff --git a/mne/io/fil/sensors.py b/mne/io/fil/sensors.py new file mode 100644 index 0000000..6431a16 --- /dev/null +++ b/mne/io/fil/sensors.py @@ -0,0 +1,145 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from copy import deepcopy + +import numpy as np + +from ...utils import logger + + +def _refine_sensor_orientation(chanin): + """Improve orientation matrices based on multiaxis measures. + + The ex and ey elements from _convert_channel_info were oriented not + based on the physical orientation of the sensor. + It doesn't have to be this way, we can use (if available) the orientation + information from mulit-axis recordings to refine these elements. + """ + logger.info("Refining sensor orientations...") + chanout = deepcopy(chanin) + tmpname = list() + for ii in range(len(chanin)): + tmpname.append(chanin[ii]["ch_name"]) + + for ii in range(len(chanin)): + tmploc = deepcopy(chanin[ii]["loc"]) + tmploc = tmploc.reshape(3, 4, order="F") + if np.isnan(tmploc.sum()) is False: + target, flipFlag = _guess_other_chan_axis(tmpname, ii) + if np.isnan(target) is False: + targetloc = deepcopy(chanin[target]["loc"]) + if np.isnan(targetloc.sum()) is False: + targetloc = targetloc.reshape(3, 4, order="F") + tmploc[:, 2] = targetloc[:, 3] + tmploc[:, 1] = flipFlag * np.cross(tmploc[:, 2], tmploc[:, 3]) + chanout[ii]["loc"] = tmploc.reshape(12, order="F") + logger.info("[done]") + return chanout + + +def _guess_other_chan_axis(tmpname, seedID): + """Try to guess the name of another axis of a multiaxis sensor.""" + # see if its using the old RAD/TAN convention first, otherwise use XYZ + if tmpname[seedID][-3:] == "RAD": + prefix1 = "RAD" + prefix2 = "TAN" + flipflag = 1.0 + elif tmpname[seedID][-3:] == "TAN": + prefix1 = "TAN" + prefix2 = "RAD" + flipflag = -1.0 + elif tmpname[seedID][-1:] == "Z" or tmpname[seedID][-3:] == "[Z]": + prefix1 = "Z" + prefix2 = "Y" + flipflag = -1.0 + elif tmpname[seedID][-1:] == "Y" or tmpname[seedID][-3:] == "[Y]": + prefix1 = "Y" + prefix2 = "Z" + flipflag = 1.0 + elif tmpname[seedID][-1:] == "X" or tmpname[seedID][-3:] == "[X]": + prefix1 = "X" + prefix2 = "Y" + flipflag = 1.0 + else: + prefix1 = "?" + prefix2 = "?" + flipflag = 1.0 + + target_name = tmpname[seedID][: -len(prefix1)] + prefix2 + + target_id = np.where([t == target_name for t in tmpname])[0] + target_id = target_id[0] if len(target_id) else np.nan + + return target_id, flipflag + + +def _get_pos_units(pos): + """Get the units of a point cloud. + + Determines the units a point cloud of sensor positions, provides the + scale factor required to ensure the units can be converted to meters. + """ + # get rid of None elements + nppos = np.empty((0, 3)) + for ii in range(0, len(pos)): + if pos[ii] is not None and sum(np.isnan(pos[ii])) == 0: + nppos = np.vstack((nppos, pos[ii])) + + idrange = np.empty(shape=(0, 3)) + for ii in range(0, 3): + q90, q10 = np.percentile(nppos[:, ii], [90, 10]) + idrange = np.append(idrange, q90 - q10) + + size = np.linalg.norm(idrange) + + unit, sf = _size2units(size) + + return unit, sf + + +def _size2units(size): + """Convert the size returned from _get_pos_units into a physical unit.""" + if size >= 0.050 and size < 0.500: + unit = "m" + sf = 1 + elif size >= 0.50 and size < 5: + unit = "dm" + sf = 10 + elif size >= 5 and size < 50: + unit = "cm" + sf = 100 + elif size >= 50 and size < 500: + unit = "mm" + sf = 1000 + else: + unit = "unknown" + sf = 1 + + return unit, sf + + +def _get_plane_vectors(ez): + """Get two orthogonal vectors orthogonal to ez (ez will be modified). + + Note: the ex and ey positions will not be realistic, this can be fixed + using _refine_sensor_orientation. + """ + assert ez.shape == (3,) + ez_len = np.sqrt(np.sum(ez * ez)) + if ez_len == 0: + raise RuntimeError("Zero length normal. Cannot proceed.") + if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction + ex = np.array([1.0, 0.0, 0.0]) + else: + ex = np.zeros(3) + if ez[1] < ez[2]: + ex[0 if ez[0] < ez[1] else 1] = 1.0 + else: + ex[0 if ez[0] < ez[2] else 2] = 1.0 + ez /= ez_len + ex -= np.dot(ez, ex) * ez + ex /= np.sqrt(np.sum(ex * ex)) + ey = np.cross(ez, ex) + return ex, ey diff --git a/mne/io/hitachi/__init__.py b/mne/io/hitachi/__init__.py new file mode 100644 index 0000000..fc2cdec --- /dev/null +++ b/mne/io/hitachi/__init__.py @@ -0,0 +1,7 @@ +"""fNIRS module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .hitachi import read_raw_hitachi diff --git a/mne/io/hitachi/hitachi.py b/mne/io/hitachi/hitachi.py new file mode 100644 index 0000000..9d8e27b --- /dev/null +++ b/mne/io/hitachi/hitachi.py @@ -0,0 +1,342 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime as dt +import re + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _merge_info, create_info +from ..._fiff.utils import _mult_cal_one +from ...utils import _check_fname, _check_option, fill_doc, logger, verbose, warn +from ..base import BaseRaw +from ..nirx.nirx import _read_csv_rows_cols + + +@fill_doc +def read_raw_hitachi(fname, preload=False, verbose=None) -> "RawHitachi": + """Reader for a Hitachi fNIRS recording. + + Parameters + ---------- + %(hitachi_fname)s + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawHitachi + A Raw object containing Hitachi data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawHitachi. + + Notes + ----- + %(hitachi_notes)s + """ + return RawHitachi(fname, preload, verbose=verbose) + + +def _check_bad(cond, msg): + if cond: + raise RuntimeError(f"Could not parse file: {msg}") + + +@fill_doc +class RawHitachi(BaseRaw): + """Raw object from a Hitachi fNIRS file. + + Parameters + ---------- + %(hitachi_fname)s + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + %(hitachi_notes)s + """ + + @verbose + def __init__(self, fname, preload=False, *, verbose=None): + if not isinstance(fname, list | tuple): + fname = [fname] + fname = list(fname) # our own list that we can modify + for fi, this_fname in enumerate(fname): + fname[fi] = _check_fname(this_fname, "read", True, f"fname[{fi}]") + infos = list() + probes = list() + last_samps = list() + S_offset = D_offset = 0 + ignore_names = ["Time"] + for this_fname in fname: + info, extra, last_samp, offsets = _get_hitachi_info( + this_fname, S_offset, D_offset, ignore_names + ) + ignore_names = list(set(ignore_names + info["ch_names"])) + S_offset += offsets[0] + D_offset += offsets[1] + infos.append(info) + probes.append(extra) + last_samps.append(last_samp) + # combine infos + if len(fname) > 1: + info = _merge_info(infos) + else: + info = infos[0] + if len(set(last_samps)) != 1: + raise RuntimeError( + "All files must have the same number of samples, got: {last_samps}" + ) + last_samps = [last_samps[0]] + raw_extras = [dict(probes=probes)] + # One representative filename is good enough here + # (additional filenames indicate temporal concat, not ch concat) + super().__init__( + info, + preload, + filenames=[fname[0]], + last_samps=last_samps, + raw_extras=raw_extras, + verbose=verbose, + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + this_data = list() + for this_probe in self._raw_extras[fi]["probes"]: + this_data.append( + _read_csv_rows_cols( + this_probe["fname"], + start, + stop, + this_probe["keep_mask"], + this_probe["bounds"], + sep=",", + replace=lambda x: x.replace("\r", "\n") + .replace("\n\n", "\n") + .replace("\n", ",") + .replace(":", ""), + ).T + ) + this_data = np.concatenate(this_data, axis=0) + _mult_cal_one(data, this_data, idx, cals, mult) + return data + + +def _get_hitachi_info(fname, S_offset, D_offset, ignore_names): + logger.info(f"Loading {fname}") + raw_extra = dict(fname=fname) + info_extra = dict() + subject_info = dict() + ch_wavelengths = dict() + fnirs_wavelengths = [None, None] + meas_date = age = ch_names = sfreq = None + with open(fname, "rb") as fid: + lines = fid.read() + lines = lines.decode("latin-1").rstrip("\r\n") + oldlen = len(lines) + assert len(lines) == oldlen + bounds = [0] + end = "\n" if "\n" in lines else "\r" + bounds.extend(a.end() for a in re.finditer(end, lines)) + bounds.append(len(lines)) + lines = lines.split(end) + assert len(bounds) == len(lines) + 1 + line = lines[0].rstrip(",\r\n") + _check_bad(line != "Header", "no header found") + li = 0 + mode = None + for li, line in enumerate(lines[1:], 1): + # Newer format has some blank lines + if len(line) == 0: + continue + parts = line.rstrip(",\r\n").split(",") + if len(parts) == 0: # some header lines are blank + continue + kind, parts = parts[0], parts[1:] + if len(parts) == 0: + parts = [""] # some fields (e.g., Comment) meaningfully blank + if kind == "File Version": + logger.info(f"Reading Hitachi fNIRS file version {parts[0]}") + elif kind == "AnalyzeMode": + _check_bad(parts != ["Continuous"], f"not continuous data ({parts})") + elif kind == "Sampling Period[s]": + sfreq = 1 / float(parts[0]) + elif kind == "Exception": + raise NotImplementedError(kind) + elif kind == "Comment": + info_extra["description"] = parts[0] + elif kind == "ID": + subject_info["his_id"] = parts[0] + elif kind == "Name": + if len(parts): + name = parts[0].split(" ") + if len(name): + subject_info["first_name"] = name[0] + subject_info["last_name"] = " ".join(name[1:]) + elif kind == "Age": + age = int(parts[0].rstrip("y")) + elif kind == "Mode": + mode = parts[0] + elif kind in ("HPF[Hz]", "LPF[Hz]"): + try: + freq = float(parts[0]) + except ValueError: + pass + else: + info_extra[{"HPF[Hz]": "highpass", "LPF[Hz]": "lowpass"}[kind]] = freq + elif kind == "Date": + # 5/17/04 5:14 + try: + mdy, HM = parts[0].split(" ") + H, M = HM.split(":") + if len(H) == 1: + H = f"0{H}" + mdyHM = " ".join([mdy, ":".join([H, M])]) + for fmt in ("%m/%d/%y %H:%M", "%Y/%m/%d %H:%M"): + try: + meas_date = dt.datetime.strptime(mdyHM, fmt) + except Exception: + pass + else: + break + else: + raise RuntimeError # unknown format + except Exception: + warn( + "Extraction of measurement date failed. " + "Please report this as a github issue. " + "The date is being set to January 1st, 2000, " + f"instead of {repr(parts[0])}" + ) + elif kind == "Sex": + try: + subject_info["sex"] = dict( + female=FIFF.FIFFV_SUBJ_SEX_FEMALE, male=FIFF.FIFFV_SUBJ_SEX_MALE + )[parts[0].lower()] + except KeyError: + pass + elif kind == "Wave[nm]": + fnirs_wavelengths[:] = [int(part) for part in parts] + elif kind == "Wave Length": + ch_regex = re.compile(r"^(.*)\(([0-9\.]+)\)$") + for ent in parts: + _, v = ch_regex.match(ent).groups() + ch_wavelengths[ent] = float(v) + elif kind == "Data": + break + fnirs_wavelengths = np.array(fnirs_wavelengths, int) + assert len(fnirs_wavelengths) == 2 + ch_names = lines[li + 1].rstrip(",\r\n").split(",") + # cull to correct ones + raw_extra["keep_mask"] = ~np.isin(ch_names, list(ignore_names)) + for ci, ch_name in enumerate(ch_names): + if re.match("Probe[0-9]+", ch_name): + raw_extra["keep_mask"][ci] = False + # set types + ch_names = [ + ch_name for ci, ch_name in enumerate(ch_names) if raw_extra["keep_mask"][ci] + ] + ch_types = [ + "fnirs_cw_amplitude" if ch_name.startswith("CH") else "stim" + for ch_name in ch_names + ] + # get locations + nirs_names = [ + ch_name + for ch_name, ch_type in zip(ch_names, ch_types) + if ch_type == "fnirs_cw_amplitude" + ] + n_nirs = len(nirs_names) + assert n_nirs % 2 == 0 + names = { + "3x3": "ETG-100", + "3x5": "ETG-7000", + "4x4": "ETG-7000", + "3x11": "ETG-4000", + } + _check_option("Hitachi mode", mode, sorted(names)) + n_row, n_col = (int(x) for x in mode.split("x")) + logger.info(f"Constructing pairing matrix for {names[mode]} ({mode})") + pairs = _compute_pairs(n_row, n_col, n=1 + (mode == "3x3")) + assert n_nirs == len(pairs) * 2 + locs = np.zeros((len(ch_names), 12)) + locs[:, :9] = np.nan + idxs = np.where(np.array(ch_types, "U") == "fnirs_cw_amplitude")[0] + for ii, idx in enumerate(idxs): + ch_name = ch_names[idx] + # Use the actual/accurate wavelength in loc + acc_freq = ch_wavelengths[ch_name] + locs[idx][9] = acc_freq + # Rename channel based on standard naming scheme, using the + # nominal wavelength + sidx, didx = pairs[ii // 2] + nom_freq = fnirs_wavelengths[np.argmin(np.abs(acc_freq - fnirs_wavelengths))] + ch_names[idx] = f"S{S_offset + sidx + 1}_D{D_offset + didx + 1} {nom_freq}" + offsets = np.array(pairs, int).max(axis=0) + 1 + + # figure out bounds + bounds = raw_extra["bounds"] = bounds[li + 2 :] + last_samp = len(bounds) - 2 + + if age is not None and meas_date is not None: + subject_info["birthday"] = dt.date( + meas_date.year - age, + meas_date.month, + meas_date.day, + ) + if meas_date is None: + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0) + meas_date = meas_date.replace(tzinfo=dt.timezone.utc) + if subject_info: + info_extra["subject_info"] = subject_info + + # Create mne structure + info = create_info(ch_names, sfreq, ch_types=ch_types) + with info._unlock(): + info.update(info_extra) + info["meas_date"] = meas_date + for li, loc in enumerate(locs): + info["chs"][li]["loc"][:] = loc + return info, raw_extra, last_samp, offsets + + +def _compute_pairs(n_rows, n_cols, n=1): + n_tot = n_rows * n_cols + sd_idx = (np.arange(n_tot) // 2).reshape(n_rows, n_cols) + d_bool = np.empty((n_rows, n_cols), bool) + for ri in range(n_rows): + d_bool[ri] = np.arange(ri, ri + n_cols) % 2 + pairs = list() + for ri in range(n_rows): + # First iterate over connections within the row + for ci in range(n_cols - 1): + pair = (sd_idx[ri, ci], sd_idx[ri, ci + 1]) + if d_bool[ri, ci]: # reverse + pair = pair[::-1] + pairs.append(pair) + # Next iterate over row-row connections, if applicable + if ri >= n_rows - 1: + continue + for ci in range(n_cols): + pair = (sd_idx[ri, ci], sd_idx[ri + 1, ci]) + if d_bool[ri, ci]: + pair = pair[::-1] + pairs.append(pair) + if n > 1: + assert n == 2 # only one supported for now + pairs = np.array(pairs, int) + second = pairs + pairs.max(axis=0) + 1 + pairs = np.r_[pairs, second] + pairs = tuple(tuple(row) for row in pairs) + return tuple(pairs) diff --git a/mne/io/kit/__init__.py b/mne/io/kit/__init__.py new file mode 100644 index 0000000..c522113 --- /dev/null +++ b/mne/io/kit/__init__.py @@ -0,0 +1,8 @@ +"""KIT module for reading raw data.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .kit import read_raw_kit, read_epochs_kit +from .coreg import read_mrk diff --git a/mne/io/kit/constants.py b/mne/io/kit/constants.py new file mode 100644 index 0000000..7831e81 --- /dev/null +++ b/mne/io/kit/constants.py @@ -0,0 +1,259 @@ +"""KIT constants.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ..._fiff.constants import FIFF +from ...utils import BunchConst + + +KIT = BunchConst() + +# byte values +KIT.SHORT = 2 +KIT.INT = 4 +KIT.DOUBLE = 8 + +# channel parameters +KIT.CALIB_FACTOR = 1.0 # mne_manual p.272 +KIT.RANGE = 1.0 # mne_manual p.272 +KIT.UNIT_MUL = FIFF.FIFF_UNITM_NONE # default is 0 mne_manual p.273 +KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200] + +KIT.HPFS = { + 1: (0, 1, 3, 3), + 2: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30), + 3: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30), + 4: (0, 1, 3, 10, 30, 100, 200, 500), +} +KIT.LPFS = { + 1: (10, 20, 50, 100, 200, 500, 1000, 2000), + 2: (10, 20, 50, 100, 200, 500, 1000, 2000), + 3: (10, 20, 50, 100, 200, 500, 1000, 10000), + 4: (10, 30, 100, 300, 1000, 2000, 5000, 10000), +} +KIT.BEFS = { + 1: (0, 50, 60, 60), + 2: (0, 0, 0), + 3: (0, 60, 50, 50), +} + +# Map FLL-Type to filter options (high, low, band) +KIT.FLL_SETTINGS = { + 0: (1, 1, 1), # Hanger Type #1 + 10: (1, 1, 1), # Hanger Type #2 + 20: (1, 1, 1), # Hanger Type #2 + 50: (2, 1, 1), # Hanger Type #3 + 60: (2, 1, 1), # Hanger Type #3 + 100: (3, 3, 3), # Low Band Kapper Type + 101: (1, 3, 2), # Berlin (DC, 200 Hz, Through) + 120: (3, 3, 3), # Low Band Kapper Type + 200: (4, 4, 3), # High Band Kapper Type + 300: (2, 2, 2), # Kapper Type +} + +# channel types +KIT.CHANNEL_MAGNETOMETER = 1 +KIT.CHANNEL_MAGNETOMETER_REFERENCE = 0x101 +KIT.CHANNEL_AXIAL_GRADIOMETER = 2 +KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE = 0x102 +KIT.CHANNEL_PLANAR_GRADIOMETER = 3 +KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE = 0x103 +KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER = 4 +KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE = 0x104 +KIT.CHANNEL_TRIGGER = -1 +KIT.CHANNEL_EEG = -2 +KIT.CHANNEL_ECG = -3 +KIT.CHANNEL_ETC = -4 +KIT.CHANNEL_NULL = 0 +KIT.CHANNELS_MEG = ( + KIT.CHANNEL_MAGNETOMETER, + KIT.CHANNEL_MAGNETOMETER_REFERENCE, + KIT.CHANNEL_AXIAL_GRADIOMETER, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE, + KIT.CHANNEL_PLANAR_GRADIOMETER, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE, +) +KIT.CHANNELS_REFERENCE = ( + KIT.CHANNEL_MAGNETOMETER_REFERENCE, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE, +) +KIT.CHANNELS_MISC = ( + KIT.CHANNEL_TRIGGER, + KIT.CHANNEL_EEG, + KIT.CHANNEL_ECG, + KIT.CHANNEL_ETC, +) +KIT.CHANNEL_NAME_NCHAR = { + KIT.CHANNEL_MAGNETOMETER: 6, + KIT.CHANNEL_AXIAL_GRADIOMETER: 6, + KIT.CHANNEL_TRIGGER: 32, + KIT.CHANNEL_EEG: 8, + KIT.CHANNEL_ECG: 32, + KIT.CHANNEL_ETC: 32, +} +KIT.CH_TO_FIFF_COIL = { + # KIT.CHANNEL_MAGNETOMETER: FIFF.???, + KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_COIL_KIT_REF_MAG, + KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_COIL_KIT_GRAD, + # KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.???, + # KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.???, + # KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.???, + # KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.???, + # KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.???, + KIT.CHANNEL_TRIGGER: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_EEG: FIFF.FIFFV_COIL_EEG, + KIT.CHANNEL_ECG: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_ETC: FIFF.FIFFV_COIL_NONE, + KIT.CHANNEL_NULL: FIFF.FIFFV_COIL_NONE, +} +KIT.CH_TO_FIFF_KIND = { + KIT.CHANNEL_MAGNETOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH, + KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH, + KIT.CHANNEL_TRIGGER: FIFF.FIFFV_MISC_CH, + KIT.CHANNEL_EEG: FIFF.FIFFV_EEG_CH, + KIT.CHANNEL_ECG: FIFF.FIFFV_ECG_CH, + KIT.CHANNEL_ETC: FIFF.FIFFV_MISC_CH, + KIT.CHANNEL_NULL: FIFF.FIFFV_MISC_CH, +} +KIT.CH_LABEL = { + KIT.CHANNEL_TRIGGER: "TRIGGER", + KIT.CHANNEL_EEG: "EEG", + KIT.CHANNEL_ECG: "ECG", + KIT.CHANNEL_ETC: "MISC", + KIT.CHANNEL_NULL: "MISC", +} + +# Acquisition modes +KIT.CONTINUOUS = 1 +KIT.EVOKED = 2 +KIT.EPOCHS = 3 + +# coreg constants +KIT.DIG_POINTS = 10000 + +# Known KIT systems +# ----------------- +# KIT recording system is encoded in the SQD file as integer: +KIT.SYSTEM_MQ_ADULT = 345 # Macquarie Dept of Cognitive Science, 2006 - +KIT.SYSTEM_MQ_CHILD = 403 # Macquarie Dept of Cognitive Science, 2006 - +KIT.SYSTEM_AS = 260 # Academia Sinica at Taiwan +KIT.SYSTEM_AS_2008 = 261 # Academia Sinica, 2008 or 2009 - +KIT.SYSTEM_NYU_2008 = 32 # NYU-NY, July 7, 2008 - +KIT.SYSTEM_NYU_2009 = 33 # NYU-NY, January 24, 2009 - +KIT.SYSTEM_NYU_2010 = 34 # NYU-NY, January 22, 2010 - +KIT.SYSTEM_NYU_2019 = 35 # NYU-NY, September 18, 2019 - +KIT.SYSTEM_NYUAD_2011 = 440 # NYU-AD initial launch May 20, 2011 - +KIT.SYSTEM_NYUAD_2012 = 441 # NYU-AD more channels July 11, 2012 - +KIT.SYSTEM_NYUAD_2014 = 442 # NYU-AD move to NYUAD campus Nov 20, 2014 - +KIT.SYSTEM_UMD_2004 = 51 # UMD Marie Mount Hall, October 1, 2004 - +KIT.SYSTEM_UMD_2014_07 = 52 # UMD update to 16 bit ADC, July 4, 2014 - +KIT.SYSTEM_UMD_2014_12 = 53 # UMD December 4, 2014 - +KIT.SYSTEM_UMD_2019_09 = 54 # UMD September 3, 2019 - +KIT.SYSTEM_YOKOGAWA_2017_01 = 1001 # Kanazawa (until 2017) +KIT.SYSTEM_YOKOGAWA_2018_01 = 10020 # Kanazawa (since 2018) +KIT.SYSTEM_YOKOGAWA_2020_08 = 10021 # Kanazawa (since August 2020) +KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008 = 124 + +# Sensor layouts for plotting +KIT_LAYOUT = { + KIT.SYSTEM_AS: None, + KIT.SYSTEM_AS_2008: "KIT-AS-2008", + KIT.SYSTEM_MQ_ADULT: "KIT-160", + KIT.SYSTEM_MQ_CHILD: "KIT-125", + KIT.SYSTEM_NYU_2008: "KIT-157", + KIT.SYSTEM_NYU_2009: "KIT-157", + KIT.SYSTEM_NYU_2010: "KIT-157", + KIT.SYSTEM_NYU_2019: None, + KIT.SYSTEM_NYUAD_2011: "KIT-AD", + KIT.SYSTEM_NYUAD_2012: "KIT-AD", + KIT.SYSTEM_NYUAD_2014: "KIT-AD", + KIT.SYSTEM_UMD_2004: None, + KIT.SYSTEM_UMD_2014_07: None, + KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3", + KIT.SYSTEM_UMD_2019_09: None, + KIT.SYSTEM_YOKOGAWA_2017_01: None, + KIT.SYSTEM_YOKOGAWA_2018_01: None, + KIT.SYSTEM_YOKOGAWA_2020_08: None, + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None, +} +# Sensor neighbor definitions +KIT_NEIGHBORS = { + KIT.SYSTEM_AS: None, + KIT.SYSTEM_AS_2008: None, + KIT.SYSTEM_MQ_ADULT: None, + KIT.SYSTEM_MQ_CHILD: None, + KIT.SYSTEM_NYU_2008: "KIT-157", + KIT.SYSTEM_NYU_2009: "KIT-157", + KIT.SYSTEM_NYU_2010: "KIT-157", + KIT.SYSTEM_NYU_2019: "KIT-NYU-2019", + KIT.SYSTEM_NYUAD_2011: "KIT-208", + KIT.SYSTEM_NYUAD_2012: "KIT-208", + KIT.SYSTEM_NYUAD_2014: "KIT-208", + KIT.SYSTEM_UMD_2004: "KIT-UMD-1", + KIT.SYSTEM_UMD_2014_07: "KIT-UMD-2", + KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3", + KIT.SYSTEM_UMD_2019_09: "KIT-UMD-4", + KIT.SYSTEM_YOKOGAWA_2017_01: None, + KIT.SYSTEM_YOKOGAWA_2018_01: None, + KIT.SYSTEM_YOKOGAWA_2020_08: None, + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None, +} +# Names displayed in the info dict description +KIT_SYSNAMES = { + KIT.SYSTEM_MQ_ADULT: "Macquarie Dept of Cognitive Science (Adult), 2006-", + KIT.SYSTEM_MQ_CHILD: "Macquarie Dept of Cognitive Science (Child), 2006-", + KIT.SYSTEM_AS: "Academia Sinica, -2008", + KIT.SYSTEM_AS_2008: "Academia Sinica, 2008-", + KIT.SYSTEM_NYU_2008: "NYU New York, 2008-9", + KIT.SYSTEM_NYU_2009: "NYU New York, 2009-10", + KIT.SYSTEM_NYU_2010: "NYU New York, 2010-", + KIT.SYSTEM_NYUAD_2011: "New York University Abu Dhabi, 2011-12", + KIT.SYSTEM_NYUAD_2012: "New York University Abu Dhabi, 2012-14", + KIT.SYSTEM_NYUAD_2014: "New York University Abu Dhabi, 2014-", + KIT.SYSTEM_UMD_2004: "University of Maryland, 2004-14", + KIT.SYSTEM_UMD_2014_07: "University of Maryland, 2014", + KIT.SYSTEM_UMD_2014_12: "University of Maryland, 2014-", + KIT.SYSTEM_UMD_2019_09: "University of Maryland, 2019-", + KIT.SYSTEM_YOKOGAWA_2017_01: "Yokogawa of Kanazawa (until 2017)", + KIT.SYSTEM_YOKOGAWA_2018_01: "Yokogawa of Kanazawa (since 2018)", + KIT.SYSTEM_YOKOGAWA_2020_08: "Yokogawa of Kanazawa (since August 2020)", + KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: "Eagle Technology MEG (KIT/Yokogawa style) at PTB (since 2008, software upgrade in 2018)", # noqa: E501 +} + +LEGACY_AMP_PARAMS = { + KIT.SYSTEM_NYU_2008: (5.0, 11.0), + KIT.SYSTEM_NYU_2009: (5.0, 11.0), + KIT.SYSTEM_NYU_2010: (5.0, 11.0), + KIT.SYSTEM_UMD_2004: (5.0, 11.0), +} + +# Ones that we don't use are commented out +KIT.DIR_INDEX_DIR = 0 +KIT.DIR_INDEX_SYSTEM = 1 +KIT.DIR_INDEX_CHANNELS = 4 +KIT.DIR_INDEX_CALIBRATION = 5 +# FLL = 6 +KIT.DIR_INDEX_AMP_FILTER = 7 +KIT.DIR_INDEX_ACQ_COND = 8 +KIT.DIR_INDEX_RAW_DATA = 9 +# AVERAGED_DATA = 10 +# MRI = 11 +KIT.DIR_INDEX_COREG = 12 +# MAGNETIC_SOURCE = 13 +# TRIGGER = 14 +# BOOKMARKS = 15 +# DIGITIZER = 25 +KIT.DIR_INDEX_DIG_POINTS = 26 +KIT.DIR_INDEX_CHPI_DATA = 29 diff --git a/mne/io/kit/coreg.py b/mne/io/kit/coreg.py new file mode 100644 index 0000000..8e6698d --- /dev/null +++ b/mne/io/kit/coreg.py @@ -0,0 +1,233 @@ +"""Coordinate Point Extractor for KIT system.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import re +from collections import OrderedDict +from os import SEEK_CUR, PathLike +from pathlib import Path + +import numpy as np + +from ..._fiff._digitization import _make_dig_points +from ...channels.montage import ( + _check_dig_shape, + read_custom_montage, + read_dig_polhemus_isotrak, + read_polhemus_fastscan, +) +from ...transforms import ( + Transform, + als_ras_trans, + apply_trans, + get_ras_to_neuromag_trans, +) +from ...utils import _check_fname, _check_option, warn +from .constants import FIFF, KIT + +INT32 = " KIT.DIG_POINTS: + hsp = _decimate_points(hsp, res=0.005) + n_new = len(hsp) + warn( + f"The selected head shape contained {n_pts} points, which is more than " + f"recommended ({KIT.DIG_POINTS}), and was automatically downsampled to " + f"{n_new} points. The preferred way to downsample is using FastScan." + ) + + if isinstance(elp, str | Path | PathLike): + elp_points = _read_dig_kit(elp) + if len(elp_points) != 8: + raise ValueError( + f"File {repr(elp)} should contain 8 points; got shape " + f"{elp_points.shape}." + ) + elp = elp_points + if len(bad_coils) > 0: + elp = np.delete(elp, np.array(bad_coils) + 3, 0) + # check we have at least 3 marker coils (whether read from file or + # passed in directly) + if len(elp) not in (6, 7, 8): + raise ValueError(f"ELP should contain 6 ~ 8 points; got shape {elp.shape}.") + if isinstance(mrk, str | Path | PathLike): + mrk = read_mrk(mrk) + if len(bad_coils) > 0: + mrk = np.delete(mrk, bad_coils, 0) + if len(mrk) not in (3, 4, 5): + raise ValueError(f"MRK should contain 3 ~ 5 points; got shape {mrk.shape}.") + + mrk = apply_trans(als_ras_trans, mrk) + + nasion, lpa, rpa = elp[:3] + nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa) + elp = apply_trans(nmtrans, elp) + hsp = apply_trans(nmtrans, hsp) + eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items()) + + # device head transform + trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out="trans") + + nasion, lpa, rpa = elp[:3] + elp = elp[3:] + + dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg) + dev_head_t = Transform("meg", "head", trans) + + hpi_results = [ + dict( + dig_points=[ + dict( + ident=ci, + r=r, + kind=FIFF.FIFFV_POINT_HPI, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + for ci, r in enumerate(mrk) + ], + coord_trans=dev_head_t, + ) + ] + + return dig_points, dev_head_t, hpi_results + + +def _read_dig_kit(fname, unit="auto"): + # Read dig points from a file and return ndarray, using FastSCAN for .txt + fname = _check_fname(fname, "read", must_exist=True, name="hsp or elp file") + assert unit in ("auto", "m", "mm") + _check_option("file extension", fname.suffix, (".hsp", ".elp", ".mat", ".txt")) + if fname.suffix == ".txt": + unit = "mm" if unit == "auto" else unit + out = read_polhemus_fastscan(fname, unit=unit, on_header_missing="ignore") + elif fname.suffix in (".hsp", ".elp"): + unit = "m" if unit == "auto" else unit + mon = read_dig_polhemus_isotrak(fname, unit=unit) + if fname.suffix == ".hsp": + dig = [d["r"] for d in mon.dig if d["kind"] != FIFF.FIFFV_POINT_CARDINAL] + else: + dig = [d["r"] for d in mon.dig] + if ( + dig + and mon.dig[0]["kind"] == FIFF.FIFFV_POINT_CARDINAL + and mon.dig[0]["ident"] == FIFF.FIFFV_POINT_LPA + ): + # LPA, Nasion, RPA -> NLR + dig[:3] = [dig[1], dig[0], dig[2]] + out = np.array(dig, float) + else: + assert fname.suffix == ".mat" + out = np.array([d["r"] for d in read_custom_montage(fname).dig]) + _check_dig_shape(out) + return out diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py new file mode 100644 index 0000000..95eb805 --- /dev/null +++ b/mne/io/kit/kit.py @@ -0,0 +1,1044 @@ +"""Conversion tool from SQD to FIF. + +RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py. +""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from collections import OrderedDict, defaultdict +from math import cos, sin +from os import SEEK_CUR, PathLike +from os import path as op +from pathlib import Path + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.pick import pick_types +from ..._fiff.utils import _mult_cal_one +from ...epochs import BaseEpochs +from ...event import read_events +from ...transforms import als_ras_trans, apply_trans +from ...utils import ( + _check_fname, + _check_option, + _stamp_to_dt, + fill_doc, + logger, + verbose, + warn, +) +from ..base import BaseRaw +from .constants import KIT, LEGACY_AMP_PARAMS +from .coreg import _set_dig_kit, read_mrk + +FLOAT64 = "' + Can be submitted as list of trigger channels. + If a list is not specified, the default triggers extracted from + misc channels will be used with specified directionality. + '<' means that largest values assigned to the first channel + in sequence. + '>' means the largest trigger assigned to the last channel + in sequence. + stim_code : 'binary' | 'channel' + How to decode trigger values from stim channels. 'binary' read stim + channel events as binary code, 'channel' encodes channel number. + """ + if inst.preload: + raise NotImplementedError("Can't change stim channel after loading data") + _check_option("stim_code", stim_code, ["binary", "channel"]) + + if stim is not None: + if isinstance(stim, str): + picks = _default_stim_chs(info) + if stim == "<": + stim = picks[::-1] + elif stim == ">": + stim = picks + else: + raise ValueError( + f"stim needs to be list of int, '>' or '<', not {str(stim)!r}" + ) + else: + stim = np.asarray(stim, int) + if stim.max() >= inst._raw_extras[0]["nchan"]: + raise ValueError( + f"Got stim={stim}, but sqd file only has " + f"{inst._raw_extras[0]['nchan']} channels." + ) + + # modify info + nchan = inst._raw_extras[0]["nchan"] + 1 + info["chs"].append( + dict( + cal=KIT.CALIB_FACTOR, + logno=nchan, + scanno=nchan, + range=1.0, + unit=FIFF.FIFF_UNIT_NONE, + unit_mul=FIFF.FIFF_UNITM_NONE, + ch_name="STI 014", + coil_type=FIFF.FIFFV_COIL_NONE, + loc=np.full(12, np.nan), + kind=FIFF.FIFFV_STIM_CH, + coord_frame=FIFF.FIFFV_COORD_UNKNOWN, + ) + ) + info._update_redundant() + + inst._raw_extras[0]["stim"] = stim + inst._raw_extras[0]["stim_code"] = stim_code + + +def _default_stim_chs(info): + """Return default stim channels for SQD files.""" + return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8] + + +def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): + """Create synthetic stim channel from multiple trigger channels.""" + if slope == "+": + trig_chs_bin = trigger_chs > threshold + elif slope == "-": + trig_chs_bin = trigger_chs < threshold + else: + raise ValueError("slope needs to be '+' or '-'") + # trigger value + if stim_code == "binary": + trigger_values = 2 ** np.arange(len(trigger_chs)) + elif stim_code != "channel": + raise ValueError( + f"stim_code must be 'binary' or 'channel', got {repr(stim_code)}" + ) + trig_chs = trig_chs_bin * trigger_values[:, np.newaxis] + return np.array(trig_chs.sum(axis=0), ndmin=2) + + +@fill_doc +class EpochsKIT(BaseEpochs): + """Epochs Array object from KIT SQD file. + + Parameters + ---------- + input_fname : path-like + Path to the sqd file. + events : array of int, shape (n_events, 3) | path-like + The array of :term:`events`. The first column contains the event time + in samples, with :term:`first_samp` included. The third column contains + the event id. If a path, must yield a ``.txt`` file containing the + events. + If some events don't match the events of interest as specified by + ``event_id``, they will be marked as ``IGNORED`` in the drop log. + %(event_id)s + tmin : float + Start time before event. + %(baseline_epochs)s + %(reject_epochs)s + %(flat)s + %(epochs_reject_tmin_tmax)s + %(kit_mrk)s + %(kit_elp)s + %(kit_hsp)s + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Notes + ----- + ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the + Polhemus FastScan system. hsp refers to the headshape surface points. elp + refers to the points in head-space that corresponds to the HPI points. + Currently, '*.elp' and '*.hsp' files are NOT supported. + + See Also + -------- + mne.Epochs : Documentation of attributes and methods. + """ + + @verbose + def __init__( + self, + input_fname, + events, + event_id=None, + tmin=0, + baseline=None, + reject=None, + flat=None, + reject_tmin=None, + reject_tmax=None, + mrk=None, + elp=None, + hsp=None, + allow_unknown_format=False, + standardize_names=None, + verbose=None, + ): + if isinstance(events, str | PathLike | Path): + events = read_events(events) + + input_fname = str( + _check_fname(fname=input_fname, must_exist=True, overwrite="read") + ) + logger.info(f"Extracting KIT Parameters from {input_fname}...") + self.info, kit_info = get_kit_info( + input_fname, allow_unknown_format, standardize_names + ) + kit_info.update(input_fname=input_fname) + self._raw_extras = [kit_info] + self.filenames = [] + if len(events) != self._raw_extras[0]["n_epochs"]: + raise ValueError("Event list does not match number of epochs.") + + if self._raw_extras[0]["acq_type"] == KIT.EPOCHS: + self._raw_extras[0]["data_length"] = KIT.INT + else: + raise TypeError( + "SQD file contains raw data, not epochs or average. Wrong reader." + ) + + if event_id is None: # convert to int to make typing-checks happy + event_id = {str(e): int(e) for e in np.unique(events[:, 2])} + + for key, val in event_id.items(): + if val not in events[:, 2]: + raise ValueError(f"No matching events found for {key} (event id {val})") + + data = self._read_kit_data() + assert data.shape == ( + self._raw_extras[0]["n_epochs"], + self.info["nchan"], + self._raw_extras[0]["frame_length"], + ) + tmax = ((data.shape[2] - 1) / self.info["sfreq"]) + tmin + super().__init__( + self.info, + data, + events, + event_id, + tmin, + tmax, + baseline, + reject=reject, + flat=flat, + reject_tmin=reject_tmin, + reject_tmax=reject_tmax, + filename=input_fname, + verbose=verbose, + ) + self.info = _call_digitization( + info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info + ) + logger.info("Ready.") + + def _read_kit_data(self): + """Read epochs data. + + Returns + ------- + data : array, [channels x samples] + the data matrix (channels x samples). + times : array, [samples] + returns the time values corresponding to the samples. + """ + info = self._raw_extras[0] + epoch_length = info["frame_length"] + n_epochs = info["n_epochs"] + n_samples = info["n_samples"] + input_fname = info["input_fname"] + dtype = info["dtype"] + nchan = info["nchan"] + + with open(input_fname, "rb", buffering=0) as fid: + fid.seek(info["dirs"][KIT.DIR_INDEX_RAW_DATA]["offset"]) + count = n_samples * nchan + data = np.fromfile(fid, dtype=dtype, count=count) + data = data.reshape((n_samples, nchan)).T + data = data * info["conv_factor"] + data = data.reshape((nchan, n_epochs, epoch_length)) + data = data.transpose((1, 0, 2)) + + return data + + +def _read_dir(fid): + return dict( + offset=np.fromfile(fid, UINT32, 1)[0], + size=np.fromfile(fid, INT32, 1)[0], + max_count=np.fromfile(fid, INT32, 1)[0], + count=np.fromfile(fid, INT32, 1)[0], + ) + + +@verbose +def _read_dirs(fid, verbose=None): + dirs = list() + dirs.append(_read_dir(fid)) + for ii in range(dirs[0]["count"] - 1): + logger.debug(f" KIT dir entry {ii} @ {fid.tell()}") + dirs.append(_read_dir(fid)) + assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]["count"] + return dirs + + +@verbose +def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): + """Extract all the information from the sqd/con file. + + Parameters + ---------- + rawfile : path-like + KIT file to be read. + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Returns + ------- + %(info_not_none)s + sqd : dict + A dict containing all the sqd parameter settings. + """ + sqd = dict() + sqd["rawfile"] = rawfile + unsupported_format = False + with open(rawfile, "rb", buffering=0) as fid: # buffering=0 for np bug + # + # directories (0) + # + sqd["dirs"] = dirs = _read_dirs(fid) + + # + # system (1) + # + fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]["offset"]) + # check file format version + version, revision = np.fromfile(fid, INT32, 2) + if version < 2 or (version == 2 and revision < 3): + version_string = f"V{version}R{revision:03d}" + if allow_unknown_format: + unsupported_format = True + warn(f"Force loading KIT format {version_string}") + else: + raise UnsupportedKITFormat( + version_string, + f"SQD file format {version_string} is not officially supported. " + "Set allow_unknown_format=True to load it anyways.", + ) + + sysid = np.fromfile(fid, INT32, 1)[0] + # basic info + system_name = _read_name(fid, n=128) + # model name + model_name = _read_name(fid, n=128) + # channels + sqd["nchan"] = channel_count = int(np.fromfile(fid, INT32, 1)[0]) + comment = _read_name(fid, n=256) + create_time, last_modified_time = np.fromfile(fid, INT32, 2) + del last_modified_time + fid.seek(KIT.INT * 3, SEEK_CUR) # reserved + dewar_style = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + fll_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + trigger_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 3, SEEK_CUR) # spare + adboard_type = np.fromfile(fid, INT32, 1)[0] + fid.seek(KIT.INT * 29, SEEK_CUR) # reserved + + if version < 2 or (version == 2 and revision <= 3): + adc_range = float(np.fromfile(fid, INT32, 1)[0]) + else: + adc_range = np.fromfile(fid, FLOAT64, 1)[0] + adc_polarity, adc_allocated, adc_stored = np.fromfile(fid, INT32, 3) + del adc_polarity + system_name = system_name.replace("\x00", "") + system_name = system_name.strip().replace("\n", "/") + model_name = model_name.replace("\x00", "") + model_name = model_name.strip().replace("\n", "/") + + full_version = f"V{version:d}R{revision:03d}" + logger.debug("SQD file basic information:") + logger.debug("Meg160 version = %s", full_version) + logger.debug("System ID = %i", sysid) + logger.debug("System name = %s", system_name) + logger.debug("Model name = %s", model_name) + logger.debug("Channel count = %i", channel_count) + logger.debug("Comment = %s", comment) + logger.debug("Dewar style = %i", dewar_style) + logger.debug("FLL type = %i", fll_type) + logger.debug("Trigger type = %i", trigger_type) + logger.debug("A/D board type = %i", adboard_type) + logger.debug("ADC range = +/-%s[V]", adc_range / 2.0) + logger.debug("ADC allocate = %i[bit]", adc_allocated) + logger.debug("ADC bit = %i[bit]", adc_stored) + # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' + description = f"{system_name} ({sysid}) {full_version} {model_name}" + assert adc_allocated % 8 == 0 + sqd["dtype"] = np.dtype(f"{use_fll_type}, check your data for correctness, " + "including channel scales and filter settings!" + ) + fll_type = use_fll_type + + # + # channel information (4) + # + chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] + chan_offset, chan_size = chan_dir["offset"], chan_dir["size"] + sqd["channels"] = channels = [] + exg_gains = list() + for i in range(channel_count): + fid.seek(chan_offset + chan_size * i) + (channel_type,) = np.fromfile(fid, INT32, 1) + # System 52 mislabeled reference channels as NULL. This was fixed + # in system 53; not sure about 51... + if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL: + channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE + + if channel_type in KIT.CHANNELS_MEG: + if channel_type not in KIT.CH_TO_FIFF_COIL: + raise NotImplementedError( + "KIT channel type {channel_type} can not be read. Please " + "contact the mne-python developers." + ) + channels.append( + { + "type": channel_type, + # (x, y, z, theta, phi) for all MEG channels. Some channel + # types have additional information which we're not using. + "loc": np.fromfile(fid, dtype=FLOAT64, count=5), + } + ) + if channel_type in KIT.CHANNEL_NAME_NCHAR: + fid.seek(16, SEEK_CUR) # misc fields + channels[-1]["name"] = _read_name(fid, channel_type) + elif channel_type in KIT.CHANNELS_MISC: + (channel_no,) = np.fromfile(fid, INT32, 1) + fid.seek(4, SEEK_CUR) + name = _read_name(fid, channel_type) + channels.append( + { + "type": channel_type, + "no": channel_no, + "name": name, + } + ) + if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG): + offset = 6 if channel_type == KIT.CHANNEL_EEG else 8 + fid.seek(offset, SEEK_CUR) + exg_gains.append(np.fromfile(fid, FLOAT64, 1)[0]) + elif channel_type == KIT.CHANNEL_NULL: + channels.append({"type": channel_type}) + else: + raise OSError("Unknown KIT channel type: {channel_type}") + exg_gains = np.array(exg_gains) + + # + # Channel sensitivity information: (5) + # + + # only sensor channels requires gain. the additional misc channels + # (trigger channels, audio and voice channels) are passed + # through unaffected + fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]["offset"]) + # (offset [Volt], gain [Tesla/Volt]) for each channel + sensitivity = np.fromfile(fid, dtype=FLOAT64, count=channel_count * 2) + sensitivity.shape = (channel_count, 2) + channel_offset, channel_gain = sensitivity.T + assert (channel_offset == 0).all() # otherwise we have a problem + + # + # amplifier gain (7) + # + fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]["offset"]) + amp_data = np.fromfile(fid, INT32, 1)[0] + if fll_type >= 100: # Kapper Type + # gain: mask bit + gain1 = (amp_data & 0x00007000) >> 12 + gain2 = (amp_data & 0x70000000) >> 28 + gain3 = (amp_data & 0x07000000) >> 24 + amp_gain = KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3] + # filter settings + hpf = (amp_data & 0x00000700) >> 8 + lpf = (amp_data & 0x00070000) >> 16 + bef = (amp_data & 0x00000003) >> 0 + else: # Hanger Type + # gain + input_gain = (amp_data & 0x1800) >> 11 + output_gain = (amp_data & 0x0007) >> 0 + amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain] + # filter settings + hpf = (amp_data & 0x007) >> 4 + lpf = (amp_data & 0x0700) >> 8 + bef = (amp_data & 0xC000) >> 14 + hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type] + sqd["highpass"] = KIT.HPFS[hpf_options][hpf] + sqd["lowpass"] = KIT.LPFS[lpf_options][lpf] + sqd["notch"] = KIT.BEFS[bef_options][bef] + + # + # Acquisition Parameters (8) + # + fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]["offset"]) + (sqd["acq_type"],) = (acq_type,) = np.fromfile(fid, INT32, 1) + (sqd["sfreq"],) = np.fromfile(fid, FLOAT64, 1) + if acq_type == KIT.CONTINUOUS: + # samples_count, = np.fromfile(fid, INT32, 1) + fid.seek(KIT.INT, SEEK_CUR) + (sqd["n_samples"],) = np.fromfile(fid, INT32, 1) + elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS: + (sqd["frame_length"],) = np.fromfile(fid, INT32, 1) + (sqd["pretrigger_length"],) = np.fromfile(fid, INT32, 1) + (sqd["average_count"],) = np.fromfile(fid, INT32, 1) + (sqd["n_epochs"],) = np.fromfile(fid, INT32, 1) + if acq_type == KIT.EVOKED: + sqd["n_samples"] = sqd["frame_length"] + else: + sqd["n_samples"] = sqd["frame_length"] * sqd["n_epochs"] + else: + raise OSError( + f"Invalid acquisition type: {acq_type}. Your file is neither " + "continuous nor epoched data." + ) + + # + # digitization information (12 and 26) + # + dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] + cor_dir = dirs[KIT.DIR_INDEX_COREG] + dig = dict() + hsp = list() + if dig_dir["count"] > 0 and cor_dir["count"] > 0: + # directories (0) + fid.seek(dig_dir["offset"]) + for _ in range(dig_dir["count"]): + name = _read_name(fid, n=8).strip() + # Sometimes there are mismatches (e.g., AFz vs AFZ) between + # the channel name and its digitized, name, so let's be case + # insensitive. It will also prevent collisions with HSP + name = name.lower() + rr = np.fromfile(fid, FLOAT64, 3) + if name: + assert name not in dig + dig[name] = rr + else: + hsp.append(rr) + + # nasion, lpa, rpa, HPI in native space + elp = [] + for key in ( + "fidnz", + "fidt9", + "fidt10", + "hpi_1", + "hpi_2", + "hpi_3", + "hpi_4", + "hpi_5", + ): + if key in dig and np.isfinite(dig[key]).all(): + elp.append(dig.pop(key)) + elp = np.array(elp) + hsp = np.array(hsp, float).reshape(-1, 3) + if elp.shape not in ((6, 3), (7, 3), (8, 3)): + raise RuntimeError(f"Fewer than 3 HPI coils found, got {len(elp) - 3}") + # coregistration + fid.seek(cor_dir["offset"]) + mrk = np.zeros((elp.shape[0] - 3, 3)) + meg_done = [True] * 5 + for _ in range(cor_dir["count"]): + done = np.fromfile(fid, INT32, 1)[0] + fid.seek( + 16 * KIT.DOUBLE + 16 * KIT.DOUBLE, # meg_to_mri # mri_to_meg + SEEK_CUR, + ) + marker_count = np.fromfile(fid, INT32, 1)[0] + if not done: + continue + assert marker_count >= len(mrk) + for mi in range(len(mrk)): + mri_type, meg_type, mri_done, this_meg_done = np.fromfile( + fid, INT32, 4 + ) + del mri_type, meg_type, mri_done + meg_done[mi] = bool(this_meg_done) + fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos + mrk[mi] = np.fromfile(fid, FLOAT64, 3) + fid.seek(256, SEEK_CUR) # marker_file (char) + if not all(meg_done): + logger.info( + f"Keeping {sum(meg_done)}/{len(meg_done)} HPI " + "coils that were digitized" + ) + elp = elp[[True] * 3 + meg_done] + mrk = mrk[meg_done] + sqd.update(hsp=hsp, elp=elp, mrk=mrk) + + # precompute conversion factor for reading data + if unsupported_format: + if sysid not in LEGACY_AMP_PARAMS: + raise OSError(f"Legacy parameters for system ID {sysid} unavailable.") + adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid] + is_meg = np.array([ch["type"] in KIT.CHANNELS_MEG for ch in channels]) + ad_to_volt = adc_range / (2.0**adc_stored) + ad_to_tesla = ad_to_volt / amp_gain * channel_gain + conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) + # XXX this is a bit of a hack. Should probably do this more cleanly at + # some point... the 2 ** (adc_stored - 14) was empirically determined using + # the test files with known amplitudes. The conv_factors need to be + # replaced by these values otherwise we're off by a factor off 5000.0 + # for the EEG data. + is_exg = [ch["type"] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) for ch in channels] + exg_gains /= 2.0 ** (adc_stored - 14) + exg_gains[exg_gains == 0] = ad_to_volt + conv_factor[is_exg] = exg_gains + sqd["conv_factor"] = conv_factor[:, np.newaxis] + + # Create raw.info dict for raw fif object with SQD data + info = _empty_info(float(sqd["sfreq"])) + info.update( + meas_date=_stamp_to_dt((create_time, 0)), + lowpass=sqd["lowpass"], + highpass=sqd["highpass"], + kit_system_id=sysid, + description=description, + ) + + # Creates a list of dicts of meg channels for raw.info + logger.info("Setting channel info structure...") + info["chs"] = fiff_channels = [] + channel_index = defaultdict(lambda: 0) + sqd["eeg_dig"] = OrderedDict() + for idx, ch in enumerate(channels, 1): + if ch["type"] in KIT.CHANNELS_MEG: + ch_name = ch.get("name", "") + if ch_name == "" or standardize_names: + ch_name = f"MEG {idx:03d}" + # create three orthogonal vector + # ch_angles[0]: theta, ch_angles[1]: phi + theta, phi = np.radians(ch["loc"][3:]) + x = sin(theta) * cos(phi) + y = sin(theta) * sin(phi) + z = cos(theta) + vec_z = np.array([x, y, z]) + vec_z /= np.linalg.norm(vec_z) + vec_x = np.zeros(vec_z.size, dtype=np.float64) + if vec_z[1] < vec_z[2]: + if vec_z[0] < vec_z[1]: + vec_x[0] = 1.0 + else: + vec_x[1] = 1.0 + elif vec_z[0] < vec_z[2]: + vec_x[0] = 1.0 + else: + vec_x[2] = 1.0 + vec_x -= np.sum(vec_x * vec_z) * vec_z + vec_x /= np.linalg.norm(vec_x) + vec_y = np.cross(vec_z, vec_x) + # transform to Neuromag like coordinate space + vecs = np.vstack((ch["loc"][:3], vec_x, vec_y, vec_z)) + vecs = apply_trans(als_ras_trans, vecs) + unit = FIFF.FIFF_UNIT_T + loc = vecs.ravel() + else: + ch_type_label = KIT.CH_LABEL[ch["type"]] + channel_index[ch_type_label] += 1 + ch_type_index = channel_index[ch_type_label] + ch_name = ch.get("name", "") + eeg_name = ch_name.lower() + # some files have all EEG labeled as EEG + if ch_name in ("", "EEG") or standardize_names: + ch_name = f"{ch_type_label} {ch_type_index:03d}" + unit = FIFF.FIFF_UNIT_V + loc = np.zeros(12) + if eeg_name and eeg_name in dig: + loc[:3] = sqd["eeg_dig"][eeg_name] = dig[eeg_name] + fiff_channels.append( + dict( + cal=KIT.CALIB_FACTOR, + logno=idx, + scanno=idx, + range=KIT.RANGE, + unit=unit, + unit_mul=KIT.UNIT_MUL, + ch_name=ch_name, + coord_frame=FIFF.FIFFV_COORD_DEVICE, + coil_type=KIT.CH_TO_FIFF_COIL[ch["type"]], + kind=KIT.CH_TO_FIFF_KIND[ch["type"]], + loc=loc, + ) + ) + info._unlocked = False + info._update_redundant() + return info, sqd + + +def _read_name(fid, ch_type=None, n=None): + n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type] + return fid.read(n).split(b"\x00")[0].decode("utf-8") + + +@fill_doc +def read_raw_kit( + input_fname, + mrk=None, + elp=None, + hsp=None, + stim=">", + slope="-", + stimthresh=1, + preload=False, + stim_code="binary", + allow_unknown_format=False, + standardize_names=False, + *, + bad_coils=(), + verbose=None, +) -> RawKIT: + r"""Reader function for Ricoh/KIT conversion to FIF. + + Parameters + ---------- + input_fname : path-like + Path to the SQD file. + %(kit_mrk)s + %(kit_elp)s + %(kit_hsp)s + %(kit_stim)s + %(kit_slope)s + %(kit_stimthresh)s + %(preload)s + %(kit_stimcode)s + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(kit_badcoils)s + %(verbose)s + + Returns + ------- + raw : instance of RawKIT + A Raw object containing KIT data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawKIT. + + Notes + ----- + ``elp`` and ``hsp`` are usually the exported text files (\*.txt) from the + Polhemus FastScan system. ``hsp`` refers to the headshape surface points. + ``elp`` refers to the points in head-space that corresponds to the HPI + points. + + If ``mrk``\, ``hsp`` or ``elp`` are :term:`array_like` inputs, then the + numbers in xyz coordinates should be in units of meters. + """ + return RawKIT( + input_fname=input_fname, + mrk=mrk, + elp=elp, + hsp=hsp, + stim=stim, + slope=slope, + stimthresh=stimthresh, + preload=preload, + stim_code=stim_code, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, + bad_coils=bad_coils, + verbose=verbose, + ) + + +@fill_doc +def read_epochs_kit( + input_fname, + events, + event_id=None, + mrk=None, + elp=None, + hsp=None, + allow_unknown_format=False, + standardize_names=False, + verbose=None, +) -> EpochsKIT: + """Reader function for Ricoh/KIT epochs files. + + Parameters + ---------- + input_fname : path-like + Path to the SQD file. + events : array of int, shape (n_events, 3) | path-like + The array of :term:`events`. The first column contains the event time + in samples, with :term:`first_samp` included. The third column contains + the event id. If a path, must yield a ``.txt`` file containing the + events. + If some events don't match the events of interest as specified by + ``event_id``, they will be marked as ``IGNORED`` in the drop log. + %(event_id)s + %(kit_mrk)s + %(kit_elp)s + %(kit_hsp)s + allow_unknown_format : bool + Force reading old data that is not officially supported. Alternatively, + read and re-save the data with the KIT MEG Laboratory application. + %(standardize_names)s + %(verbose)s + + Returns + ------- + EpochsKIT : instance of BaseEpochs + The epochs. + + See Also + -------- + mne.Epochs : Documentation of attributes and methods. + + Notes + ----- + .. versionadded:: 0.9.0 + """ + epochs = EpochsKIT( + input_fname=input_fname, + events=events, + event_id=event_id, + mrk=mrk, + elp=elp, + hsp=hsp, + allow_unknown_format=allow_unknown_format, + standardize_names=standardize_names, + verbose=verbose, + ) + return epochs diff --git a/mne/io/nedf/__init__.py b/mne/io/nedf/__init__.py new file mode 100644 index 0000000..fe67ee7 --- /dev/null +++ b/mne/io/nedf/__init__.py @@ -0,0 +1,7 @@ +"""NEDF file import module.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .nedf import read_raw_nedf, _parse_nedf_header diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py new file mode 100644 index 0000000..bd4054f --- /dev/null +++ b/mne/io/nedf/nedf.py @@ -0,0 +1,229 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +"""Import NeuroElectrics DataFormat (NEDF) files.""" + +from copy import deepcopy +from datetime import datetime, timezone + +import numpy as np + +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...utils import _check_fname, _soft_import, verbose, warn +from ..base import BaseRaw + + +def _getsubnodetext(node, name): + """Get an element from an XML node, raise an error otherwise. + + Parameters + ---------- + node: Element + XML Element + name: str + Child element name + + Returns + ------- + test: str + Text contents of the child nodes + """ + subnode = node.findtext(name) + if not subnode: + raise RuntimeError("NEDF header " + name + " not found") + return subnode + + +def _parse_nedf_header(header): + """Read header information from the first 10kB of an .nedf file. + + Parameters + ---------- + header : bytes + Null-terminated header data, mostly the file's first 10240 bytes. + + Returns + ------- + info : dict + A dictionary with header information. + dt : numpy.dtype + Structure of the binary EEG/accelerometer/trigger data in the file. + n_samples : int + The number of data samples. + """ + defusedxml = _soft_import("defusedxml", "reading NEDF data") + info = {} + # nedf files have three accelerometer channels sampled at 100Hz followed + # by five EEG samples + TTL trigger sampled at 500Hz + # For 32 EEG channels and no stim channels, the data layout may look like + # [ ('acc', '>u2', (3,)), + # ('data', dtype([ + # ('eeg', 'u1', (32, 3)), + # ('trig', '>i4', (1,)) + # ]), (5,)) + # ] + + dt = [] # dtype for the binary data block + datadt = [] # dtype for a single EEG sample + + headerend = header.find(b"\0") + if headerend == -1: + raise RuntimeError("End of header null not found") + headerxml = defusedxml.ElementTree.fromstring(header[:headerend]) + nedfversion = headerxml.findtext("NEDFversion", "") + if nedfversion not in ["1.3", "1.4"]: + warn("NEDFversion unsupported, use with caution") + + if headerxml.findtext("stepDetails/DeviceClass", "") == "STARSTIM": + warn("Found Starstim, this hasn't been tested extensively!") + + if headerxml.findtext("AdditionalChannelStatus", "OFF") != "OFF": + raise RuntimeError("Unknown additional channel, aborting.") + + n_acc = int(headerxml.findtext("NumberOfChannelsOfAccelerometer", 0)) + if n_acc: + # expect one sample of u16 accelerometer data per block + dt.append(("acc", ">u2", (n_acc,))) + + eegset = headerxml.find("EEGSettings") + if eegset is None: + raise RuntimeError("No EEG channels found") + nchantotal = int(_getsubnodetext(eegset, "TotalNumberOfChannels")) + info["nchan"] = nchantotal + + info["sfreq"] = int(_getsubnodetext(eegset, "EEGSamplingRate")) + info["ch_names"] = [e.text for e in eegset.find("EEGMontage")] + if nchantotal != len(info["ch_names"]): + raise RuntimeError( + f"TotalNumberOfChannels ({nchantotal}) != " + f"channel count ({len(info['ch_names'])})" + ) + # expect nchantotal uint24s + datadt.append(("eeg", "B", (nchantotal, 3))) + + if headerxml.find("STIMSettings") is not None: + # 2* -> two stim samples per eeg sample + datadt.append(("stim", "B", (2, nchantotal, 3))) + warn("stim channels are currently ignored") + + # Trigger data: 4 bytes in newer versions, 1 byte in older versions + trigger_type = ">i4" if headerxml.findtext("NEDFversion") else "B" + datadt.append(("trig", trigger_type)) + # 5 data samples per block + dt.append(("data", np.dtype(datadt), (5,))) + + date = headerxml.findtext("StepDetails/StartDate_firstEEGTimestamp", 0) + info["meas_date"] = datetime.fromtimestamp(int(date) / 1000, timezone.utc) + + n_samples = int(_getsubnodetext(eegset, "NumberOfRecordsOfEEG")) + n_full, n_last = divmod(n_samples, 5) + dt_last = deepcopy(dt) + assert dt_last[-1][-1] == (5,) + dt_last[-1] = list(dt_last[-1]) + dt_last[-1][-1] = (n_last,) + dt_last[-1] = tuple(dt_last[-1]) + return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full + + +# the first 10240 bytes are header in XML format, padded with NULL bytes +_HDRLEN = 10240 + + +class RawNedf(BaseRaw): + """Raw object from NeuroElectrics nedf file.""" + + def __init__(self, filename, preload=False, verbose=None): + filename = str(_check_fname(filename, "read", True, "filename")) + with open(filename, mode="rb") as fid: + header = fid.read(_HDRLEN) + header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header) + ch_names = header["ch_names"] + ["STI 014"] + ch_types = ["eeg"] * len(ch_names) + ch_types[-1] = "stim" + info = create_info(ch_names, header["sfreq"], ch_types) + # scaling factor ADC-values -> volts + # taken from the NEDF EEGLAB plugin + # (https://www.neuroelectrics.com/resources/software/): + for ch in info["chs"][:-1]: + ch["cal"] = 2.4 / (6.0 * 8388607) + with info._unlock(): + info["meas_date"] = header["meas_date"] + raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full) + super().__init__( + info, + preload=preload, + filenames=[filename], + verbose=verbose, + raw_extras=[raw_extra], + last_samps=[n_samp - 1], + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + dt = self._raw_extras[fi]["dt"] + dt_last = self._raw_extras[fi]["dt_last"] + n_full = self._raw_extras[fi]["n_full"] + n_eeg = dt[1].subdtype[0][0].shape[0] + # data is stored in 5-sample chunks (except maybe the last one!) + # so we have to do some gymnastics to pick the correct parts to + # read + offset = start // 5 * dt.itemsize + _HDRLEN + start_sl = start % 5 + n_samples = stop - start + n_samples_full = min(stop, n_full * 5) - start + last = None + n_chunks = (n_samples_full - 1) // 5 + 1 + n_tot = n_chunks * 5 + with open(self.filenames[fi], "rb") as fid: + fid.seek(offset, 0) + chunks = np.fromfile(fid, dtype=dt, count=n_chunks) + assert len(chunks) == n_chunks + if n_samples != n_samples_full: + last = np.fromfile(fid, dtype=dt_last, count=1) + eeg = _convert_eeg(chunks, n_eeg, n_tot) + trig = chunks["data"]["trig"].reshape(1, n_tot) + if last is not None: + n_last = dt_last["data"].shape[0] + eeg = np.concatenate((eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1) + trig = np.concatenate( + (trig, last["data"]["trig"].reshape(1, n_last)), axis=-1 + ) + one_ = np.concatenate((eeg, trig)) + one = one_[:, start_sl : n_samples + start_sl] + _mult_cal_one(data, one, idx, cals, mult) + + +def _convert_eeg(chunks, n_eeg, n_tot): + # convert uint8-triplet -> int32 + eeg = chunks["data"]["eeg"] @ np.array([1 << 16, 1 << 8, 1]) + # convert sign if necessary + eeg[eeg > (1 << 23)] -= 1 << 24 + eeg = eeg.reshape((n_tot, n_eeg)).T + return eeg + + +@verbose +def read_raw_nedf(filename, preload=False, verbose=None) -> RawNedf: + """Read NeuroElectrics .nedf files. + + NEDF file versions starting from 1.3 are supported. + + Parameters + ---------- + filename : path-like + Path to the ``.nedf`` file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawNedf + A Raw object containing NEDF data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawNedf. + """ + return RawNedf(filename, preload, verbose) diff --git a/mne/io/neuralynx/__init__.py b/mne/io/neuralynx/__init__.py new file mode 100644 index 0000000..f302a22 --- /dev/null +++ b/mne/io/neuralynx/__init__.py @@ -0,0 +1,5 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .neuralynx import read_raw_neuralynx diff --git a/mne/io/neuralynx/neuralynx.py b/mne/io/neuralynx/neuralynx.py new file mode 100644 index 0000000..2b9bed8 --- /dev/null +++ b/mne/io/neuralynx/neuralynx.py @@ -0,0 +1,426 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import glob +import inspect +import os + +import numpy as np + +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import _check_fname, _soft_import, fill_doc, logger, verbose +from ..base import BaseRaw + + +@fill_doc +def read_raw_neuralynx( + fname, *, preload=False, exclude_fname_patterns=None, verbose=None +) -> "RawNeuralynx": + """Reader for Neuralynx files. + + Parameters + ---------- + fname : path-like + Path to a folder with Neuralynx .ncs files. + %(preload)s + exclude_fname_patterns : list of str + List of glob-like string patterns to exclude from channel list. + Useful when not all channels have the same number of samples + so you can read separate instances. + %(verbose)s + + Returns + ------- + raw : instance of RawNeuralynx + A Raw object containing Neuralynx data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawNeuralynx. + + Notes + ----- + Neuralynx files are read from disk using the `Neo package + `__. + Currently, only reading of the ``.ncs files`` is supported. + + ``raw.info["meas_date"]`` is read from the ``recording_opened`` property + of the first ``.ncs`` file (i.e. channel) in the dataset (a warning is issued + if files have different dates of acquisition). + + Channel-specific high and lowpass frequencies of online filters are determined + based on the ``DspLowCutFrequency`` and ``DspHighCutFrequency`` header fields, + respectively. If no filters were used for a channel, the default lowpass is set + to the Nyquist frequency and the default highpass is set to 0. + If channels have different high/low cutoffs, ``raw.info["highpass"]`` and + ``raw.info["lowpass"]`` are then set to the maximum highpass and minimumlowpass + values across channels, respectively. + + Other header variables can be inspected using Neo directly. For example:: + + from neo.io import NeuralynxIO # doctest: +SKIP + fname = 'path/to/your/data' # doctest: +SKIP + nlx_reader = NeuralynxIO(dirname=fname) # doctest: +SKIP + print(nlx_reader.header) # doctest: +SKIP + print(nlx_reader.file_headers.items()) # doctest: +SKIP + """ + return RawNeuralynx( + fname, + preload=preload, + exclude_fname_patterns=exclude_fname_patterns, + verbose=verbose, + ) + + +# Helper for neo deprecation of exclude_filename -> exclude_filenames in 0.13.2 +def _exclude_kwarg(exclude_fnames): + from neo.io import NeuralynxIO + + key = "exclude_filename" + if "exclude_filenames" in inspect.getfullargspec(NeuralynxIO).args: + key += "s" + return {key: exclude_fnames} + + +@fill_doc +class RawNeuralynx(BaseRaw): + """RawNeuralynx class.""" + + @verbose + def __init__( + self, + fname, + *, + preload=False, + exclude_fname_patterns=None, + verbose=None, + ): + fname = _check_fname(fname, "read", True, "fname", need_dir=True) + + _soft_import("neo", "Reading NeuralynxIO files", strict=True) + from neo.io import NeuralynxIO + + logger.info(f"Checking files in {fname}") + + # construct a list of filenames to ignore + exclude_fnames = None + if exclude_fname_patterns: + exclude_fnames = [] + for pattern in exclude_fname_patterns: + fnames = glob.glob(os.path.join(fname, pattern)) + fnames = [os.path.basename(fname) for fname in fnames] + exclude_fnames.extend(fnames) + + logger.info("Ignoring .ncs files:\n" + "\n".join(exclude_fnames)) + + # get basic file info from header, throw Error if NeuralynxIO can't parse + try: + nlx_reader = NeuralynxIO(dirname=fname, **_exclude_kwarg(exclude_fnames)) + except ValueError as e: + # give a more informative error message and what the user can do about it + if "Incompatible section structures across streams" in str(e): + raise ValueError( + "It seems .ncs channels have different numbers of samples. " + + "This is likely due to different sampling rates. " + + "Try reading in only channels with uniform sampling rate " + + "by excluding other channels with `exclude_fname_patterns` " + + "input argument." + + f"\nOriginal neo.NeuralynxRawIO ValueError:\n{e}" + ) from None + else: + raise + + info = create_info( + ch_types="seeg", + ch_names=nlx_reader.header["signal_channels"]["name"].tolist(), + sfreq=nlx_reader.get_signal_sampling_rate(), + ) + + ncs_fnames = nlx_reader.ncs_filenames.values() + ncs_hdrs = [ + hdr + for hdr_key, hdr in nlx_reader.file_headers.items() + if hdr_key in ncs_fnames + ] + + # if all files have the same recording_opened date, write it to info + meas_dates = np.array([hdr["recording_opened"] for hdr in ncs_hdrs]) + # to be sure, only write if all dates are the same + meas_diff = [] + for md in meas_dates: + meas_diff.append((md - meas_dates[0]).total_seconds()) + + # tolerate a +/-1 second meas_date difference (arbitrary threshold) + # else issue a warning + warn_meas = (np.abs(meas_diff) > 1.0).any() + if warn_meas: + logger.warning( + "Not all .ncs files have the same recording_opened date. " + + "Writing meas_date based on the first .ncs file." + ) + + # Neuarlynx allows channel specific low/highpass filters + # if not enabled, assume default lowpass = nyquist, highpass = 0 + default_lowpass = info["sfreq"] / 2 # nyquist + default_highpass = 0 + + has_hp = [hdr["DSPLowCutFilterEnabled"] for hdr in ncs_hdrs] + has_lp = [hdr["DSPHighCutFilterEnabled"] for hdr in ncs_hdrs] + if not all(has_hp) or not all(has_lp): + logger.warning( + "Not all .ncs files have the same high/lowpass filter settings. " + + "Assuming default highpass = 0, lowpass = nyquist." + ) + + highpass_freqs = [ + float(hdr["DspLowCutFrequency"]) + if hdr["DSPLowCutFilterEnabled"] + else default_highpass + for hdr in ncs_hdrs + ] + + lowpass_freqs = [ + float(hdr["DspHighCutFrequency"]) + if hdr["DSPHighCutFilterEnabled"] + else default_lowpass + for hdr in ncs_hdrs + ] + + with info._unlock(): + info["meas_date"] = meas_dates[0].astimezone(datetime.timezone.utc) + info["highpass"] = np.max(highpass_freqs) + info["lowpass"] = np.min(lowpass_freqs) + + # Neo reads only valid contiguous .ncs samples grouped as segments + n_segments = nlx_reader.header["nb_segment"][0] + block_id = 0 # assumes there's only one block of recording + + # get segment start/stop times + start_times = np.array( + [nlx_reader.segment_t_start(block_id, i) for i in range(n_segments)] + ) + stop_times = np.array( + [nlx_reader.segment_t_stop(block_id, i) for i in range(n_segments)] + ) + + # find discontinuous boundaries (of length n-1) + next_start_times = start_times[1::] + previous_stop_times = stop_times[:-1] + seg_diffs = next_start_times - previous_stop_times + + # mark as discontinuous any two segments that have + # start/stop delta larger than sampling period (1.5/sampling_rate) + logger.info("Checking for temporal discontinuities in Neo data segments.") + delta = 1.5 / info["sfreq"] + gaps = seg_diffs > delta + + seg_gap_dict = {} + + logger.info( + f"N = {gaps.sum()} discontinuous Neo segments detected " + + f"with delta > {delta} sec. " + + "Annotating gaps as BAD_ACQ_SKIP." + if gaps.any() + else "No discontinuities detected." + ) + + gap_starts = stop_times[:-1][gaps] # gap starts at segment offset + gap_stops = start_times[1::][gaps] # gap stops at segment onset + + # (n_gaps,) array of ints giving number of samples per inferred gap + gap_n_samps = np.array( + [ + int(round(stop * info["sfreq"])) - int(round(start * info["sfreq"])) + for start, stop in zip(gap_starts, gap_stops) + ] + ).astype(int) # force an int array (if no gaps, empty array is a float) + + # get sort indices for all segments (valid and gap) in ascending order + all_starts_ids = np.argsort(np.concatenate([start_times, gap_starts])) + + # variable indicating whether each segment is a gap or not + gap_indicator = np.concatenate( + [ + np.full(len(start_times), fill_value=0), + np.full(len(gap_starts), fill_value=1), + ] + ) + gap_indicator = gap_indicator[all_starts_ids].astype(bool) + + # store this in a dict to be passed to _raw_extras + seg_gap_dict = { + "gap_n_samps": gap_n_samps, + "isgap": gap_indicator, # False (data segment) or True (gap segment) + } + + valid_segment_sizes = [ + nlx_reader.get_signal_size(block_id, i) for i in range(n_segments) + ] + + sizes_sorted = np.concatenate([valid_segment_sizes, gap_n_samps])[ + all_starts_ids + ] + + # now construct an (n_samples,) indicator variable + sample2segment = np.concatenate( + [np.full(shape=(n,), fill_value=i) for i, n in enumerate(sizes_sorted)] + ) + + # get the start sample index for each gap segment () + gap_start_ids = np.cumsum(np.hstack([[0], sizes_sorted[:-1]]))[gap_indicator] + + # recreate time axis for gap annotations + mne_times = np.arange(0, len(sample2segment)) / info["sfreq"] + + assert len(gap_start_ids) == len(gap_n_samps) + annotations = Annotations( + onset=[mne_times[onset_id] for onset_id in gap_start_ids], + duration=[ + mne_times[onset_id + (n - 1)] - mne_times[onset_id] + for onset_id, n in zip(gap_start_ids, gap_n_samps) + ], + description=["BAD_ACQ_SKIP"] * len(gap_start_ids), + ) + + super().__init__( + info=info, + last_samps=[sizes_sorted.sum() - 1], + filenames=[fname], + preload=preload, + raw_extras=[ + dict( + smp2seg=sample2segment, + exclude_fnames=exclude_fnames, + segment_sizes=sizes_sorted, + seg_gap_dict=seg_gap_dict, + ) + ], + ) + + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + from neo import AnalogSignal, Segment + from neo.io import NeuralynxIO + from neo.io.proxyobjects import AnalogSignalProxy + + # quantities is a dependency of neo so we are guaranteed it exists + from quantities import Hz + + nlx_reader = NeuralynxIO( + dirname=self.filenames[fi], + **_exclude_kwarg(self._raw_extras[0]["exclude_fnames"]), + ) + neo_block = nlx_reader.read(lazy=True) + + # check that every segment has 1 associated neo.AnalogSignal() object + # (not sure what multiple analogsignals per neo.Segment would mean) + assert sum( + [len(segment.analogsignals) for segment in neo_block[0].segments] + ) == len(neo_block[0].segments) + + segment_sizes = self._raw_extras[fi]["segment_sizes"] + + # construct a (n_segments, 2) array of the first and last + # sample index for each segment relative to the start of the recording + seg_starts = [0] # first chunk starts at sample 0 + seg_stops = [segment_sizes[0] - 1] + for i in range(1, len(segment_sizes)): + ons_new = ( + seg_stops[i - 1] + 1 + ) # current chunk starts one sample after the previous one + seg_starts.append(ons_new) + off_new = ( + seg_stops[i - 1] + segment_sizes[i] + ) # the last sample is len(chunk) samples after the previous ended + seg_stops.append(off_new) + + start_stop_samples = np.stack([np.array(seg_starts), np.array(seg_stops)]).T + + first_seg = self._raw_extras[0]["smp2seg"][ + start + ] # segment containing start sample + last_seg = self._raw_extras[0]["smp2seg"][ + stop - 1 + ] # segment containing stop sample + + # select all segments between the one that contains the start sample + # and the one that contains the stop sample + sel_samples_global = start_stop_samples[first_seg : last_seg + 1, :] + + # express end samples relative to segment onsets + # to be used for slicing the arrays below + sel_samples_local = sel_samples_global.copy() + sel_samples_local[0:-1, 1] = ( + sel_samples_global[0:-1, 1] - sel_samples_global[0:-1, 0] + ) + sel_samples_local[1::, 0] = ( + 0 # now set the start sample for all segments after the first to 0 + ) + + sel_samples_local[0, 0] = ( + start - sel_samples_global[0, 0] + ) # express start sample relative to segment onset + sel_samples_local[-1, -1] = (stop - 1) - sel_samples_global[ + -1, 0 + ] # express stop sample relative to segment onset + + # array containing Segments + segments_arr = np.array(neo_block[0].segments, dtype=object) + + # if gaps were detected, correctly insert gap Segments in between valid Segments + gap_samples = self._raw_extras[fi]["seg_gap_dict"]["gap_n_samps"] + gap_segments = [Segment(f"gap-{i}") for i in range(len(gap_samples))] + + # create AnalogSignal objects representing gap data filled with 0's + sfreq = nlx_reader.get_signal_sampling_rate() + n_chans = ( + np.arange(idx.start, idx.stop, idx.step).size + if type(idx) is slice + else len(idx) # idx can be a slice or an np.array so check both + ) + + for seg, n in zip(gap_segments, gap_samples): + asig = AnalogSignal( + signal=np.zeros((n, n_chans)), units="uV", sampling_rate=sfreq * Hz + ) + seg.analogsignals.append(asig) + + n_total_segments = len(neo_block[0].segments + gap_segments) + segments_arr = np.zeros((n_total_segments,), dtype=object) + + # insert inferred gap segments at the right place in between valid segments + isgap = self._raw_extras[0]["seg_gap_dict"]["isgap"] + segments_arr[~isgap] = neo_block[0].segments + segments_arr[isgap] = gap_segments + + # now load data for selected segments/channels via + # neo.Segment.AnalogSignalProxy.load() or + # pad directly as AnalogSignal.magnitude for any gap data + all_data = np.concatenate( + [ + signal.load(channel_indexes=idx).magnitude[ + samples[0] : samples[-1] + 1, : + ] + if isinstance(signal, AnalogSignalProxy) + else signal.magnitude[samples[0] : samples[-1] + 1, :] + for seg, samples in zip( + segments_arr[first_seg : last_seg + 1], sel_samples_local + ) + for signal in seg.analogsignals + ] + ).T + + all_data *= 1e-6 # Convert uV to V + n_channels = len(nlx_reader.header["signal_channels"]["name"]) + block = np.zeros((n_channels, stop - start), dtype=data.dtype) + block[idx] = all_data # shape = (n_channels, n_samples) + + # Then store the result where it needs to go + _mult_cal_one(data, block, idx, cals, mult) diff --git a/mne/io/nicolet/__init__.py b/mne/io/nicolet/__init__.py new file mode 100644 index 0000000..6e7ad05 --- /dev/null +++ b/mne/io/nicolet/__init__.py @@ -0,0 +1,7 @@ +"""Nicolet module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .nicolet import read_raw_nicolet diff --git a/mne/io/nicolet/nicolet.py b/mne/io/nicolet/nicolet.py new file mode 100644 index 0000000..f55cd77 --- /dev/null +++ b/mne/io/nicolet/nicolet.py @@ -0,0 +1,201 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import calendar +import datetime +from os import path + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _create_chs, _find_channels, _read_segments_file +from ...utils import fill_doc, logger +from ..base import BaseRaw + + +@fill_doc +def read_raw_nicolet( + input_fname, ch_type, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None +) -> "RawNicolet": + """Read Nicolet data as raw object. + + ..note:: This reader takes data files with the extension ``.data`` as an + input. The header file with the same file name stem and an + extension ``.head`` is expected to be found in the same + directory. + + Parameters + ---------- + input_fname : path-like + Path to the data file (ending with ``.data`` not ``.head``). + ch_type : str + Channel type to designate to the data channels. Supported data types + include ``'eeg'``, ``'dbs'``. + eog : list | tuple | ``'auto'`` + Names of channels or list of indices that should be designated + EOG channels. If ``'auto'``, the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + ecg : list or tuple | ``'auto'`` + Names of channels or list of indices that should be designated + ECG channels. If ``'auto'``, the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list or tuple | ``'auto'`` + Names of channels or list of indices that should be designated + EMG channels. If ``'auto'``, the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of Raw + A Raw object containing the data. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + return RawNicolet( + input_fname, + ch_type, + eog=eog, + ecg=ecg, + emg=emg, + misc=misc, + preload=preload, + verbose=verbose, + ) + + +def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc): + """Extract info from Nicolet header files.""" + fname, extension = path.splitext(fname) + + if extension != ".data": + raise ValueError(f'File name should end with .data not "{extension}".') + + header = fname + ".head" + + logger.info("Reading header...") + header_info = dict() + with open(header) as fid: + for line in fid: + var, value = line.split("=") + if var == "elec_names": + value = value[1:-2].split(",") # strip brackets + elif var == "conversion_factor": + value = float(value) + elif var in ["num_channels", "rec_id", "adm_id", "pat_id", "num_samples"]: + value = int(value) + elif var != "start_ts": + value = float(value) + header_info[var] = value + + ch_names = header_info["elec_names"] + if eog == "auto": + eog = _find_channels(ch_names, "EOG") + if ecg == "auto": + ecg = _find_channels(ch_names, "ECG") + if emg == "auto": + emg = _find_channels(ch_names, "EMG") + + date, time = header_info["start_ts"].split() + date = date.split("-") + time = time.split(":") + sec, msec = time[2].split(".") + date = datetime.datetime( + int(date[0]), + int(date[1]), + int(date[2]), + int(time[0]), + int(time[1]), + int(sec), + int(msec), + ) + info = _empty_info(header_info["sample_freq"]) + info["meas_date"] = (calendar.timegm(date.utctimetuple()), 0) + + if ch_type == "eeg": + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_EEG_CH + elif ch_type == "seeg": + ch_coil = FIFF.FIFFV_COIL_EEG + ch_kind = FIFF.FIFFV_SEEG_CH + else: + raise TypeError( + "Channel type not recognized. Available types are 'eeg' and 'seeg'." + ) + cals = np.repeat(header_info["conversion_factor"] * 1e-6, len(ch_names)) + info["chs"] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc) + info["highpass"] = 0.0 + info["lowpass"] = info["sfreq"] / 2.0 + info._unlocked = False + info._update_redundant() + return info, header_info + + +class RawNicolet(BaseRaw): + """Raw object from Nicolet file. + + Parameters + ---------- + input_fname : path-like + Path to the Nicolet file. + ch_type : str + Channel type to designate to the data channels. Supported data types + include ``'eeg'``, ``'seeg'``. + eog : list | tuple | ``'auto'`` + Names of channels or list of indices that should be designated + EOG channels. If ``'auto'``, the channel names beginning with + ``EOG`` are used. Defaults to empty tuple. + ecg : list or tuple | ``'auto'`` + Names of channels or list of indices that should be designated + ECG channels. If ``'auto'``, the channel names beginning with + ``ECG`` are used. Defaults to empty tuple. + emg : list or tuple | ``'auto'`` + Names of channels or list of indices that should be designated + EMG channels. If ``'auto'``, the channel names beginning with + ``EMG`` are used. Defaults to empty tuple. + misc : list or tuple + Names of channels or list of indices that should be designated + MISC channels. Defaults to empty tuple. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + def __init__( + self, + input_fname, + ch_type, + eog=(), + ecg=(), + emg=(), + misc=(), + preload=False, + verbose=None, + ): + input_fname = path.abspath(input_fname) + info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg, emg, misc) + last_samps = [header_info["num_samples"] - 1] + super().__init__( + info, + preload, + filenames=[input_fname], + raw_extras=[header_info], + last_samps=last_samps, + orig_format="int", + verbose=verbose, + ) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + _read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=" "RawNihon": + """Reader for an Nihon Kohden EEG file. + + Parameters + ---------- + fname : path-like + Path to the Nihon Kohden data file (``.EEG``). + preload : bool + If True, all data are loaded at initialization. + %(verbose)s + + Returns + ------- + raw : instance of RawNihon + A Raw object containing Nihon Kohden data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawNihon. + """ + return RawNihon(fname, preload, verbose) + + +_valid_headers = [ + "EEG-1100A V01.00", + "EEG-1100B V01.00", + "EEG-1100C V01.00", + "QI-403A V01.00", + "QI-403A V02.00", + "EEG-2100 V01.00", + "EEG-2100 V02.00", + "DAE-2100D V01.30", + "DAE-2100D V02.00", + # 'EEG-1200A V01.00', # Not working for the moment. +] + + +def _read_nihon_metadata(fname): + metadata = {} + fname = _ensure_path(fname) + pnt_fname = fname.with_suffix(".PNT") + if not pnt_fname.exists(): + warn("No PNT file exists. Metadata will be blank") + return metadata + logger.info("Found PNT file, reading metadata.") + with open(pnt_fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] + if version not in _valid_headers: + raise ValueError(f"Not a valid Nihon Kohden PNT file ({version})") + metadata["version"] = version + + # Read timestamp + fid.seek(0x40) + meas_str = np.fromfile(fid, "|S14", 1).astype("U14")[0] + meas_date = datetime.strptime(meas_str, "%Y%m%d%H%M%S") + meas_date = meas_date.replace(tzinfo=timezone.utc) + metadata["meas_date"] = meas_date + + return metadata + + +_default_chan_labels = [ + "FP1", + "FP2", + "F3", + "F4", + "C3", + "C4", + "P3", + "P4", + "O1", + "O2", + "F7", + "F8", + "T3", + "T4", + "T5", + "T6", + "FZ", + "CZ", + "PZ", + "E", + "PG1", + "PG2", + "A1", + "A2", + "T1", + "T2", +] +_default_chan_labels += [f"X{i}" for i in range(1, 12)] +_default_chan_labels += [f"NA{i}" for i in range(1, 6)] +_default_chan_labels += [f"DC{i:02}" for i in range(1, 33)] +_default_chan_labels += ["BN1", "BN2", "Mark1", "Mark2"] +_default_chan_labels += [f"NA{i}" for i in range(6, 28)] +_default_chan_labels += ["X12/BP1", "X13/BP2", "X14/BP3", "X15/BP4"] +_default_chan_labels += [f"X{i}" for i in range(16, 166)] +_default_chan_labels += ["NA28", "Z"] + +_encodings = ("utf-8", "latin1") + + +def _read_21e_file(fname): + fname = _ensure_path(fname) + e_fname = fname.with_suffix(".21E") + _chan_labels = [x for x in _default_chan_labels] + if e_fname.exists(): + # Read the 21E file and update the labels accordingly. + logger.info("Found 21E file, reading channel names.") + for enc in _encodings: + try: + with open(e_fname, encoding=enc) as fid: + keep_parsing = False + for line in fid: + if line.startswith("["): + if "ELECTRODE" in line or "REFERENCE" in line: + keep_parsing = True + else: + keep_parsing = False + elif keep_parsing is True: + idx, name = line.split("=") + idx = int(idx) + if idx >= len(_chan_labels): + n = idx - len(_chan_labels) + 1 + _chan_labels.extend(["UNK"] * n) + _chan_labels[idx] = name.strip() + except UnicodeDecodeError: + pass + else: + break + else: + warn( + f"Could not decode 21E file as one of {_encodings}; " + f"Default channel names are chosen." + ) + + return _chan_labels + + +def _read_nihon_header(fname): + # Read the Nihon Kohden EEG file header + fname = _ensure_path(fname) + _chan_labels = _read_21e_file(fname) + header = {} + logger.info(f"Reading header from {fname}") + with open(fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] + if version not in _valid_headers: + raise ValueError(f"Not a valid Nihon Kohden EEG file ({version})") + + fid.seek(0x0081) + control_block = np.fromfile(fid, "|S16", 1).astype("U16")[0] + if control_block not in _valid_headers: + raise ValueError( + f"Not a valid Nihon Kohden EEG file (control block {version})" + ) + + fid.seek(0x17FE) + waveform_sign = np.fromfile(fid, np.uint8, 1)[0] + if waveform_sign != 1: + raise ValueError("Not a valid Nihon Kohden EEG file (waveform block)") + header["version"] = version + + fid.seek(0x0091) + n_ctlblocks = np.fromfile(fid, np.uint8, 1)[0] + header["n_ctlblocks"] = n_ctlblocks + controlblocks = [] + for i_ctl_block in range(n_ctlblocks): + t_controlblock = {} + fid.seek(0x0092 + i_ctl_block * 20) + t_ctl_address = np.fromfile(fid, np.uint32, 1)[0] + t_controlblock["address"] = t_ctl_address + fid.seek(t_ctl_address + 17) + n_datablocks = np.fromfile(fid, np.uint8, 1)[0] + t_controlblock["n_datablocks"] = n_datablocks + t_controlblock["datablocks"] = [] + for i_data_block in range(n_datablocks): + t_datablock = {} + fid.seek(t_ctl_address + i_data_block * 20 + 18) + t_data_address = np.fromfile(fid, np.uint32, 1)[0] + t_datablock["address"] = t_data_address + + fid.seek(t_data_address + 0x26) + t_n_channels = np.fromfile(fid, np.uint8, 1)[0].astype(np.int64) + t_datablock["n_channels"] = t_n_channels + + t_channels = [] + for i_ch in range(t_n_channels): + fid.seek(t_data_address + 0x27 + (i_ch * 10)) + t_idx = np.fromfile(fid, np.uint8, 1)[0] + t_channels.append(_chan_labels[t_idx]) + + t_datablock["channels"] = t_channels + + fid.seek(t_data_address + 0x1C) + t_record_duration = np.fromfile(fid, np.uint32, 1)[0].astype(np.int64) + t_datablock["duration"] = t_record_duration + + fid.seek(t_data_address + 0x1A) + sfreq = np.fromfile(fid, np.uint16, 1)[0] & 0x3FFF + t_datablock["sfreq"] = sfreq.astype(np.int64) + + t_datablock["n_samples"] = np.int64(t_record_duration * sfreq // 10) + t_controlblock["datablocks"].append(t_datablock) + controlblocks.append(t_controlblock) + header["controlblocks"] = controlblocks + + # Now check that every data block has the same channels and sfreq + chans = [] + sfreqs = [] + nsamples = [] + for t_ctl in header["controlblocks"]: + for t_dtb in t_ctl["datablocks"]: + chans.append(t_dtb["channels"]) + sfreqs.append(t_dtb["sfreq"]) + nsamples.append(t_dtb["n_samples"]) + for i_elem in range(1, len(chans)): + if chans[0] != chans[i_elem]: + raise ValueError("Channel names in datablocks do not match") + if sfreqs[0] != sfreqs[i_elem]: + raise ValueError("Sample frequency in datablocks do not match") + header["ch_names"] = chans[0] + header["sfreq"] = sfreqs[0] + header["n_samples"] = np.sum(nsamples) + + # TODO: Support more than one controlblock and more than one datablock + if header["n_ctlblocks"] != 1: + raise NotImplementedError( + "I dont know how to read more than one " + "control block for this type of file :(" + ) + if header["controlblocks"][0]["n_datablocks"] > 1: + # Multiple blocks, check that they all have the same kind of data + datablocks = header["controlblocks"][0]["datablocks"] + block_0 = datablocks[0] + for t_block in datablocks[1:]: + if block_0["n_channels"] != t_block["n_channels"]: + raise ValueError( + "Cannot read NK file with different number of channels " + "in each datablock" + ) + if block_0["channels"] != t_block["channels"]: + raise ValueError( + "Cannot read NK file with different channels in each datablock" + ) + if block_0["sfreq"] != t_block["sfreq"]: + raise ValueError( + "Cannot read NK file with different sfreq in each datablock" + ) + + return header + + +def _read_nihon_annotations(fname): + fname = _ensure_path(fname) + log_fname = fname.with_suffix(".LOG") + if not log_fname.exists(): + warn("No LOG file exists. Annotations will not be read") + return dict(onset=[], duration=[], description=[]) + logger.info("Found LOG file, reading events.") + with open(log_fname) as fid: + version = np.fromfile(fid, "|S16", 1).astype("U16")[0] + if version not in _valid_headers: + raise ValueError(f"Not a valid Nihon Kohden LOG file ({version})") + + fid.seek(0x91) + n_logblocks = np.fromfile(fid, np.uint8, 1)[0] + all_onsets = [] + all_descriptions = [] + for t_block in range(n_logblocks): + fid.seek(0x92 + t_block * 20) + t_blk_address = np.fromfile(fid, np.uint32, 1)[0] + fid.seek(t_blk_address + 0x12) + n_logs = np.fromfile(fid, np.uint8, 1)[0] + fid.seek(t_blk_address + 0x14) + t_logs = np.fromfile(fid, "|S45", n_logs) + for t_log in t_logs: + for enc in _encodings: + try: + t_log = t_log.decode(enc) + except UnicodeDecodeError: + pass + else: + break + else: + warn(f"Could not decode log as one of {_encodings}") + continue + t_desc = t_log[:20].strip("\x00") + t_onset = datetime.strptime(t_log[20:26], "%H%M%S") + t_onset = t_onset.hour * 3600 + t_onset.minute * 60 + t_onset.second + all_onsets.append(t_onset) + all_descriptions.append(t_desc) + + annots = dict( + onset=all_onsets, + duration=[0] * len(all_onsets), + description=all_descriptions, + ) + return annots + + +def _map_ch_to_type(ch_name): + ch_type_pattern = OrderedDict( + [("stim", ("Mark",)), ("misc", ("DC", "NA", "Z", "$")), ("bio", ("X",))] + ) + for key, kinds in ch_type_pattern.items(): + if any(kind in ch_name for kind in kinds): + return key + return "eeg" + + +def _map_ch_to_specs(ch_name): + unit_mult = 1e-3 + phys_min = -12002.9 + phys_max = 12002.56 + dig_min = -32768 + if ch_name.upper() in _default_chan_labels: + idx = _default_chan_labels.index(ch_name.upper()) + if (idx < 42 or idx > 73) and idx not in [76, 77]: + unit_mult = 1e-6 + phys_min = -3200 + phys_max = 3199.902 + t_range = phys_max - phys_min + cal = t_range / 65535 + offset = phys_min - (dig_min * cal) + + out = dict( + unit=unit_mult, + phys_min=phys_min, + phys_max=phys_max, + dig_min=dig_min, + cal=cal, + offset=offset, + ) + return out + + +@fill_doc +class RawNihon(BaseRaw): + """Raw object from a Nihon Kohden EEG file. + + Parameters + ---------- + fname : path-like + Path to the Nihon Kohden data ``.eeg`` file. + preload : bool + If True, all data are loaded at initialization. + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = _check_fname(fname, "read", True, "fname") + data_name = fname.name + logger.info(f"Loading {data_name}") + + header = _read_nihon_header(fname) + metadata = _read_nihon_metadata(fname) + + # n_chan = len(header['ch_names']) + 1 + sfreq = header["sfreq"] + # data are multiplexed int16 + ch_names = header["ch_names"] + ch_types = [_map_ch_to_type(x) for x in ch_names] + + info = create_info(ch_names, sfreq, ch_types) + n_samples = header["n_samples"] + + if "meas_date" in metadata: + with info._unlock(): + info["meas_date"] = metadata["meas_date"] + chs = {x: _map_ch_to_specs(x) for x in info["ch_names"]} + + cal = np.array([chs[x]["cal"] for x in info["ch_names"]], float)[:, np.newaxis] + offsets = np.array([chs[x]["offset"] for x in info["ch_names"]], float)[ + :, np.newaxis + ] + gains = np.array([chs[x]["unit"] for x in info["ch_names"]], float)[ + :, np.newaxis + ] + + raw_extras = dict(cal=cal, offsets=offsets, gains=gains, header=header) + for i_ch, ch_name in enumerate(info["ch_names"]): + t_range = chs[ch_name]["phys_max"] - chs[ch_name]["phys_min"] + info["chs"][i_ch]["range"] = t_range + info["chs"][i_ch]["cal"] = 1 / t_range + + super().__init__( + info, + preload=preload, + last_samps=(n_samples - 1,), + filenames=[fname.as_posix()], + orig_format="short", + raw_extras=[raw_extras], + ) + + # Get annotations from LOG file + annots = _read_nihon_annotations(fname) + + # Annotate acquisition skips + controlblock = header["controlblocks"][0] + cur_sample = 0 + if controlblock["n_datablocks"] > 1: + for i_block in range(controlblock["n_datablocks"] - 1): + t_block = controlblock["datablocks"][i_block] + cur_sample = cur_sample + t_block["n_samples"] + cur_tpoint = (cur_sample - 0.5) / t_block["sfreq"] + # Add annotations as in append raw + annots["onset"].append(cur_tpoint) + annots["duration"].append(0.0) + annots["description"].append("BAD boundary") + annots["onset"].append(cur_tpoint) + annots["duration"].append(0.0) + annots["description"].append("EDGE boundary") + + annotations = Annotations(**annots, orig_time=info["meas_date"]) + self.set_annotations(annotations) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + # For now we assume one control block + header = self._raw_extras[fi]["header"] + + # Get the original cal, offsets and gains + cal = self._raw_extras[fi]["cal"] + offsets = self._raw_extras[fi]["offsets"] + gains = self._raw_extras[fi]["gains"] + + # get the right datablock + datablocks = header["controlblocks"][0]["datablocks"] + ends = np.cumsum([t["n_samples"] for t in datablocks]) + + start_block = np.where(start < ends)[0][0] + stop_block = np.where(stop <= ends)[0][0] + + if start_block != stop_block: + # Recursive call for each block independently + new_start = start + sample_start = 0 + for t_block_idx in range(start_block, stop_block + 1): + t_block = datablocks[t_block_idx] + if t_block == stop_block: + # If its the last block, we stop on the last sample to read + new_stop = stop + else: + # Otherwise, stop on the last sample of the block + new_stop = t_block["n_samples"] + new_start + samples_to_read = new_stop - new_start + sample_stop = sample_start + samples_to_read + + self._read_segment_file( + data[:, sample_start:sample_stop], + idx, + fi, + new_start, + new_stop, + cals, + mult, + ) + + # Update variables for next loop + sample_start = sample_stop + new_start = new_stop + else: + datablock = datablocks[start_block] + + n_channels = datablock["n_channels"] + 1 + datastart = datablock["address"] + 0x27 + (datablock["n_channels"] * 10) + + # Compute start offset based on the beginning of the block + rel_start = start + if start_block != 0: + rel_start = start - ends[start_block - 1] + start_offset = datastart + rel_start * n_channels * 2 + + with open(self.filenames[fi], "rb") as fid: + to_read = (stop - start) * n_channels + fid.seek(start_offset) + block_data = np.fromfile(fid, " "RawNIRX": + """Reader for a NIRX fNIRS recording. + + Parameters + ---------- + fname : path-like + Path to the NIRX data folder or header file. + %(saturated)s + %(preload)s + %(encoding_nirx)s + %(verbose)s + + Returns + ------- + raw : instance of RawNIRX + A Raw object containing NIRX data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawNIRX. + + Notes + ----- + %(nirx_notes)s + """ + return RawNIRX( + fname, saturated, preload=preload, encoding=encoding, verbose=verbose + ) + + +def _open(fname): + return open(fname, encoding="latin-1") + + +@fill_doc +class RawNIRX(BaseRaw): + """Raw object from a NIRX fNIRS file. + + Parameters + ---------- + fname : path-like + Path to the NIRX data folder or header file. + %(saturated)s + %(preload)s + %(encoding_nirx)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + + Notes + ----- + %(nirx_notes)s + """ + + @verbose + def __init__(self, fname, saturated, *, preload=False, encoding=None, verbose=None): + logger.info(f"Loading {fname}") + _validate_type(fname, "path-like", "fname") + _validate_type(saturated, str, "saturated") + _check_option("saturated", saturated, ("annotate", "nan", "ignore")) + fname = str(fname) + if fname.endswith(".hdr"): + fname = op.dirname(op.abspath(fname)) + + fname = str(_check_fname(fname, "read", True, "fname", need_dir=True)) + + json_config = glob.glob(f"{fname}/*{'config.json'}") + is_aurora = len(json_config) + + if is_aurora: + # NIRSport2 devices using Aurora software + keys = ( + "hdr", + "config.json", + "description.json", + "wl1", + "wl2", + "probeInfo.mat", + "tri", + ) + else: + # NIRScout devices and NIRSport1 devices + keys = ( + "hdr", + "inf", + "set", + "tpl", + "wl1", + "wl2", + "config.txt", + "probeInfo.mat", + ) + n_dat = len(glob.glob(f"{fname}/*{'dat'}")) + if n_dat != 1: + warn( + "A single dat file was expected in the specified path, " + f"but got {n_dat}. This may indicate that the file " + "structure has been modified since the measurement " + "was saved." + ) + + # Check if required files exist and store names for later use + files = dict() + nan_mask = dict() + for key in keys: + files[key] = glob.glob(f"{fname}/*{key}") + fidx = 0 + if len(files[key]) != 1: + if key not in ("wl1", "wl2"): + raise RuntimeError(f"Need one {key} file, got {len(files[key])}") + noidx = np.where(["nosatflags_" in op.basename(x) for x in files[key]])[ + 0 + ] + if len(noidx) != 1 or len(files[key]) != 2: + raise RuntimeError( + f"Need one nosatflags and one standard {key} file, " + f"got {len(files[key])}" + ) + # Here two files have been found, one that is called + # no sat flags. The nosatflag file has no NaNs in it. + noidx = noidx[0] + if saturated == "ignore": + # Ignore NaN and return values + fidx = noidx + elif saturated == "nan": + # Return NaN + fidx = 0 if noidx == 1 else 1 + else: + assert saturated == "annotate" # guaranteed above + fidx = noidx + nan_mask[key] = files[key][0 if noidx == 1 else 1] + files[key] = files[key][fidx] + + # Read number of rows/samples of wavelength data + with _open(files["wl1"]) as fid: + last_sample = fid.read().count("\n") - 1 + + # Read header file + # The header file isn't compliant with the configparser. So all the + # text between comments must be removed before passing to parser + with open(files["hdr"], encoding=encoding) as f: + hdr_str_all = f.read() + hdr_str = re.sub("#.*?#", "", hdr_str_all, flags=re.DOTALL) + if is_aurora: + hdr_str = re.sub("(\\[DataStructure].*)", "", hdr_str, flags=re.DOTALL) + hdr = RawConfigParser() + hdr.read_string(hdr_str) + + # Check that the file format version is supported + if is_aurora: + # We may need to ease this requirement back + if hdr["GeneralInfo"]["Version"] not in [ + "2021.4.0-34-ge9fdbbc8", + "2021.9.0-5-g3eb32851", + "2021.9.0-6-g14ef4a71", + ]: + warn( + "MNE has not been tested with Aurora version " + f"{hdr['GeneralInfo']['Version']}" + ) + else: + if hdr["GeneralInfo"]["NIRStar"] not in ['"15.0"', '"15.2"', '"15.3"']: + raise RuntimeError( + "MNE does not support this NIRStar version" + f" ({hdr['GeneralInfo']['NIRStar']})" + ) + if ( + "NIRScout" not in hdr["GeneralInfo"]["Device"] + and "NIRSport" not in hdr["GeneralInfo"]["Device"] + ): + warn( + "Only import of data from NIRScout devices have been " + f'thoroughly tested. You are using a {hdr["GeneralInfo"]["Device"]}' + " device." + ) + + # Parse required header fields + + # Extract measurement date and time + if is_aurora: + datetime_str = hdr["GeneralInfo"]["Date"] + else: + datetime_str = hdr["GeneralInfo"]["Date"] + hdr["GeneralInfo"]["Time"] + + meas_date = None + # Several formats have been observed so we try each in turn + for loc, translations in _localized_abbr.items(): + do_break = False + # So far we are lucky in that all the formats below, if they + # include %a (weekday abbr), always come first. Thus we can use + # a .split(), replace, and rejoin. + loc_datetime_str = datetime_str.split(" ") + for key, val in translations["weekday"].items(): + loc_datetime_str[0] = loc_datetime_str[0].replace(key, val) + for ii in range(1, len(loc_datetime_str)): + for key, val in translations["month"].items(): + loc_datetime_str[ii] = loc_datetime_str[ii].replace(key, val) + loc_datetime_str = " ".join(loc_datetime_str) + logger.debug(f"Trying {loc} datetime: {loc_datetime_str}") + for dt_code in [ + '"%a, %b %d, %Y""%H:%M:%S.%f"', + '"%a %d %b %Y""%H:%M:%S.%f"', + '"%a, %d %b %Y""%H:%M:%S.%f"', + "%Y-%m-%d %H:%M:%S.%f", + '"%Y年%m月%d日""%H:%M:%S.%f"', + ]: + try: + meas_date = dt.datetime.strptime(loc_datetime_str, dt_code) + except ValueError: + pass + else: + meas_date = meas_date.replace(tzinfo=dt.timezone.utc) + do_break = True + logger.debug(f"Measurement date language {loc} detected: {dt_code}") + break + if do_break: + break + if meas_date is None: + warn( + "Extraction of measurement date from NIRX file failed. " + "This can be caused by files saved in certain locales " + f"(currently only {list(_localized_abbr)} supported). " + "Please report this as a github issue. " + "The date is being set to January 1st, 2000, " + f"instead of {repr(datetime_str)}." + ) + meas_date = dt.datetime(2000, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc) + + # Extract frequencies of light used by machine + if is_aurora: + fnirs_wavelengths = [760, 850] + else: + fnirs_wavelengths = [ + int(s) + for s in re.findall(r"(\d+)", hdr["ImagingParameters"]["Wavelengths"]) + ] + + # Extract source-detectors + if is_aurora: + sources = re.findall(r"(\d+)-\d+", hdr_str_all.split("\n")[-2]) + detectors = re.findall(r"\d+-(\d+)", hdr_str_all.split("\n")[-2]) + sources = [int(s) + 1 for s in sources] + detectors = [int(d) + 1 for d in detectors] + + else: + sources = np.asarray( + [ + int(s) + for s in re.findall( + r"(\d+)-\d+:\d+", hdr["DataStructure"]["S-D-Key"] + ) + ], + int, + ) + detectors = np.asarray( + [ + int(s) + for s in re.findall( + r"\d+-(\d+):\d+", hdr["DataStructure"]["S-D-Key"] + ) + ], + int, + ) + + # Extract sampling rate + if is_aurora: + samplingrate = float(hdr["GeneralInfo"]["Sampling rate"]) + else: + samplingrate = float(hdr["ImagingParameters"]["SamplingRate"]) + + # Read participant information file + if is_aurora: + with open(files["description.json"]) as f: + inf = json.load(f) + else: + inf = ConfigParser(allow_no_value=True) + inf.read(files["inf"]) + inf = inf._sections["Subject Demographics"] + + # Store subject information from inf file in mne format + # Note: NIRX also records "Study Type", "Experiment History", + # "Additional Notes", "Contact Information" and this information + # is currently discarded + # NIRStar does not record an id, or handedness by default + # The name field is used to populate the his_id variable. + subject_info = {} + if is_aurora: + names = inf["subject"].split() + else: + names = inf["name"].replace('"', "").split() + subject_info["his_id"] = "_".join(names) + if len(names) > 0: + subject_info["first_name"] = names[0].replace('"', "") + if len(names) > 1: + subject_info["last_name"] = names[-1].replace('"', "") + if len(names) > 2: + subject_info["middle_name"] = names[-2].replace('"', "") + subject_info["sex"] = inf["gender"].replace('"', "") + # Recode values + if subject_info["sex"] in {"M", "Male", "1"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE + elif subject_info["sex"] in {"F", "Female", "2"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE + else: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + if inf["age"] != "": + subject_info["birthday"] = dt.date( + meas_date.year - int(inf["age"]), + meas_date.month, + meas_date.day, + ) + + # Read information about probe/montage/optodes + # A word on terminology used here: + # Sources produce light + # Detectors measure light + # Sources and detectors are both called optodes + # Each source - detector pair produces a channel + # Channels are defined as the midpoint between source and detector + mat_data = loadmat(files["probeInfo.mat"]) + probes = mat_data["probeInfo"]["probes"][0, 0] + requested_channels = probes["index_c"][0, 0] + src_locs = probes["coords_s3"][0, 0] / 100.0 + det_locs = probes["coords_d3"][0, 0] / 100.0 + ch_locs = probes["coords_c3"][0, 0] / 100.0 + + # These are all in MNI coordinates, so let's transform them to + # the Neuromag head coordinate frame + src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head( + "fsaverage", "mri", "head", src_locs, det_locs, ch_locs + ) + + # Set up digitization + dig = get_mni_fiducials("fsaverage", verbose=False) + for fid in dig: + fid["r"] = apply_trans(mri_head_t, fid["r"]) + fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD + for ii, ch_loc in enumerate(ch_locs, 1): + dig.append( + dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) + dig = _format_dig_points(dig) + del mri_head_t + + # Determine requested channel indices + # The wl1 and wl2 files include all possible source - detector pairs. + # But most of these are not relevant. We want to extract only the + # subset requested in the probe file + req_ind = np.array([], int) + for req_idx in range(requested_channels.shape[0]): + sd_idx = np.where( + (sources == requested_channels[req_idx][0]) + & (detectors == requested_channels[req_idx][1]) + ) + req_ind = np.concatenate((req_ind, sd_idx[0])) + req_ind = req_ind.astype(int) + + snames = [f"S{sources[idx]}" for idx in req_ind] + dnames = [f"_D{detectors[idx]}" for idx in req_ind] + sdnames = [m + str(n) for m, n in zip(snames, dnames)] + sd1 = [s + " " + str(fnirs_wavelengths[0]) for s in sdnames] + sd2 = [s + " " + str(fnirs_wavelengths[1]) for s in sdnames] + chnames = [val for pair in zip(sd1, sd2) for val in pair] + + # Create mne structure + info = create_info(chnames, samplingrate, ch_types="fnirs_cw_amplitude") + with info._unlock(): + info.update(subject_info=subject_info, dig=dig) + info["meas_date"] = meas_date + + # Store channel, source, and detector locations + # The channel location is stored in the first 3 entries of loc. + # The source location is stored in the second 3 entries of loc. + # The detector location is stored in the third 3 entries of loc. + # NIRx NIRSite uses MNI coordinates. + # Also encode the light frequency in the structure. + for ch_idx2 in range(requested_channels.shape[0]): + # Find source and store location + src = int(requested_channels[ch_idx2, 0]) - 1 + # Find detector and store location + det = int(requested_channels[ch_idx2, 1]) - 1 + # Store channel location as midpoint between source and detector. + midpoint = (src_locs[src, :] + det_locs[det, :]) / 2 + for ii in range(2): + ch_idx3 = ch_idx2 * 2 + ii + info["chs"][ch_idx3]["loc"][3:6] = src_locs[src, :] + info["chs"][ch_idx3]["loc"][6:9] = det_locs[det, :] + info["chs"][ch_idx3]["loc"][:3] = midpoint + info["chs"][ch_idx3]["loc"][9] = fnirs_wavelengths[ii] + info["chs"][ch_idx3]["coord_frame"] = FIFF.FIFFV_COORD_HEAD + + # Extract the start/stop numbers for samples in the CSV. In theory the + # sample bounds should just be 10 * the number of channels, but some + # files have mixed \n and \n\r endings (!) so we can't rely on it, and + # instead make a single pass over the entire file at the beginning so + # that we know how to seek and read later. + bounds = dict() + for key in ("wl1", "wl2"): + offset = 0 + bounds[key] = [offset] + with open(files[key], "rb") as fid: + for line in fid: + offset += len(line) + bounds[key].append(offset) + assert offset == fid.tell() + + # Extras required for reading data + raw_extras = { + "sd_index": req_ind, + "files": files, + "bounds": bounds, + "nan_mask": nan_mask, + } + # Get our saturated mask + annot_mask = None + for ki, key in enumerate(("wl1", "wl2")): + if nan_mask.get(key, None) is None: + continue + mask = np.isnan( + _read_csv_rows_cols( + nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None} + ).T + ) + if saturated == "nan": + nan_mask[key] = mask + else: + assert saturated == "annotate" + if annot_mask is None: + annot_mask = np.zeros( + (len(info["ch_names"]) // 2, last_sample + 1), bool + ) + annot_mask |= mask + nan_mask[key] = None # shouldn't need again + + super().__init__( + info, + preload, + filenames=[fname], + last_samps=[last_sample], + raw_extras=[raw_extras], + verbose=verbose, + ) + + # make onset/duration/description + onset, duration, description, ch_names = list(), list(), list(), list() + if annot_mask is not None: + for ci, mask in enumerate(annot_mask): + on, dur = _mask_to_onsets_offsets(mask) + on = on / info["sfreq"] + dur = dur / info["sfreq"] + dur -= on + onset.extend(on) + duration.extend(dur) + description.extend(["BAD_SATURATED"] * len(on)) + ch_names.extend([self.ch_names[2 * ci : 2 * ci + 2]] * len(on)) + + # Read triggers from event file + if not is_aurora: + files["tri"] = files["hdr"][:-3] + "evt" + if op.isfile(files["tri"]): + with _open(files["tri"]) as fid: + t = [re.findall(r"(\d+)", line) for line in fid] + if is_aurora: + tf_idx, desc_idx = _determine_tri_idxs(t[0]) + for t_ in t: + if is_aurora: + trigger_frame = float(t_[tf_idx]) + desc = float(t_[desc_idx]) + else: + binary_value = "".join(t_[1:])[::-1] + desc = float(int(binary_value, 2)) + trigger_frame = float(t_[0]) + onset.append(trigger_frame / samplingrate) + duration.append(1.0) # No duration info stored in files + description.append(desc) + ch_names.append(list()) + annot = Annotations(onset, duration, description, ch_names=ch_names) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + The NIRX machine records raw data as two different wavelengths. + The returned data interleaves the wavelengths. + """ + sd_index = self._raw_extras[fi]["sd_index"] + + wls = list() + for key in ("wl1", "wl2"): + d = _read_csv_rows_cols( + self._raw_extras[fi]["files"][key], + start, + stop, + sd_index, + self._raw_extras[fi]["bounds"][key], + ).T + nan_mask = self._raw_extras[fi]["nan_mask"].get(key, None) + if nan_mask is not None: + d[nan_mask[:, start:stop]] = np.nan + wls.append(d) + + # TODO: Make this more efficient by only indexing above what we need. + # For now let's just construct the full data matrix and index. + # Interleave wavelength 1 and 2 to match channel names: + this_data = np.zeros((len(wls[0]) * 2, stop - start)) + this_data[0::2, :] = wls[0] + this_data[1::2, :] = wls[1] + _mult_cal_one(data, this_data, idx, cals, mult) + return data + + +def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None): + with open(fname, "rb") as fid: + fid.seek(bounds[start]) + args = list() + if bounds[1] is not None: + args.append(bounds[stop] - bounds[start]) + data = fid.read(*args).decode("latin-1") + if replace is not None: + data = replace(data) + x = np.fromstring(data, float, sep=sep) + x.shape = (stop - start, -1) + x = x[:, cols] + return x + + +def _convert_fnirs_to_head(trans, fro, to, src_locs, det_locs, ch_locs): + mri_head_t, _ = _get_trans(trans, fro, to) + src_locs = apply_trans(mri_head_t, src_locs) + det_locs = apply_trans(mri_head_t, det_locs) + ch_locs = apply_trans(mri_head_t, ch_locs) + return src_locs, det_locs, ch_locs, mri_head_t + + +def _determine_tri_idxs(trigger): + """Determine tri file indexes for frame and description.""" + if len(trigger) == 12: + # Aurora version 2021.9.6 or greater + trigger_frame_idx = 7 + desc_idx = 10 + elif len(trigger) == 9: + # Aurora version 2021.9.5 or earlier + trigger_frame_idx = 7 + desc_idx = 8 + else: + raise RuntimeError("Unable to read trigger file.") + + return trigger_frame_idx, desc_idx diff --git a/mne/io/nsx/__init__.py b/mne/io/nsx/__init__.py new file mode 100644 index 0000000..cb2500e --- /dev/null +++ b/mne/io/nsx/__init__.py @@ -0,0 +1,7 @@ +"""NSx module for reading Blackrock Microsystem files.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .nsx import read_raw_nsx diff --git a/mne/io/nsx/nsx.py b/mne/io/nsx/nsx.py new file mode 100644 index 0000000..1fc8a69 --- /dev/null +++ b/mne/io/nsx/nsx.py @@ -0,0 +1,537 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +from datetime import datetime, timezone + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _empty_info +from ..._fiff.utils import _file_size, _read_segments_file +from ...annotations import Annotations +from ...utils import _check_fname, fill_doc, logger, warn +from ..base import BaseRaw, _get_scaling + +CH_TYPE_MAPPING = { + "CC": "SEEG", +} + + +# See https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf +DATA_BYTE_SIZE = 2 +ORIG_FORMAT = "short" + + +nsx_header_dict = { + "basic": [ + ("file_id", "S8"), # achFileType + # file specification split into major and minor version number + ("ver_major", "uint8"), + ("ver_minor", "uint8"), + # bytes of basic & extended header + ("bytes_in_headers", "uint32"), + # label of the sampling group (e.g., "1 kS/s" or "LFP low") + ("label", "S16"), + ("comment", "S256"), + ("period", "uint32"), + ("timestamp_resolution", "uint32"), + # time origin: 2byte uint16 values for ... + ("year", "uint16"), + ("month", "uint16"), + ("weekday", "uint16"), + ("day", "uint16"), + ("hour", "uint16"), + ("minute", "uint16"), + ("second", "uint16"), + ("millisecond", "uint16"), + # number of channel_count match number of extended headers + ("channel_count", "uint32"), + ], + "extended": [ + ("type", "S2"), + ("electrode_id", "uint16"), + ("electrode_label", "S16"), + # used front-end amplifier bank (e.g., A, B, C, D) + ("physical_connector", "uint8"), + # used connector pin (e.g., 1-37 on bank A, B, C or D) + ("connector_pin", "uint8"), + # digital and analog value ranges of the signal + ("min_digital_val", "int16"), + ("max_digital_val", "int16"), + ("min_analog_val", "int16"), + ("max_analog_val", "int16"), + # units of the analog range values ("mV" or "uV") + ("units", "S16"), + # filter settings used to create nsx from source signal + ("hi_freq_corner", "uint32"), + ("hi_freq_order", "uint32"), + ("hi_freq_type", "uint16"), # 0=None, 1=Butterworth + ("lo_freq_corner", "uint32"), + ("lo_freq_order", "uint32"), + ("lo_freq_type", "uint16"), + ], # 0=None, 1=Butterworth, + "data>2.1<3": [ + ("header", "uint8"), + ("timestamp", "uint32"), + ("nb_data_points", "uint32"), + ], + "data>=3": [ + ("header", "uint8"), + ("timestamp", "uint64"), + ("nb_data_points", "uint32"), + ], +} + + +@fill_doc +def read_raw_nsx( + input_fname, stim_channel=True, eog=None, misc=None, preload=False, *, verbose=None +) -> "RawNSX": + """Reader function for NSx (Blackrock Microsystems) files. + + Parameters + ---------- + input_fname : str + Path to the NSx file. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawEDF + The raw instance. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + Notes + ----- + NSx files with id (= NEURALSG), i.e., version 2.1 is currently not + supported. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + input_fname = _check_fname( + input_fname, overwrite="read", must_exist=True, name="input_fname" + ) + if not input_fname.suffix.lower().startswith(".ns"): + raise NotImplementedError( + f"Only NSx files are supported, got {input_fname.suffix}." + ) + return RawNSX( + input_fname, stim_channel, eog, misc, preload=preload, verbose=verbose + ) + + +@fill_doc +class RawNSX(BaseRaw): + """Raw object from NSx file from Blackrock Microsystems. + + Parameters + ---------- + input_fname : str + Path to the NSx file. + stim_channel : ``'auto'`` | str | list of str | int | list of int + Defaults to ``'auto'``, which means that channels named ``'status'`` or + ``'trigger'`` (case insensitive) are set to STIM. If str (or list of + str), all channels matching the name(s) are set to STIM. If int (or + list of ints), channels corresponding to the indices are set to STIM. + eog : list or tuple + Names of channels or list of indices that should be designated EOG + channels. Values should correspond to the electrodes in the file. + Default is None. + misc : list or tuple + Names of channels or list of indices that should be designated MISC + channels. Values should correspond to the electrodes in the file. + Default is None. + %(preload)s + %(verbose)s + + Notes + ----- + NSx files with id (= NEURALSG), i.e., version 2.1 is currently not + supported. + + If channels named 'status' or 'trigger' are present, they are considered as + STIM channels by default. Use func:`mne.find_events` to parse events + encoded in such analog stim channels. + """ + + def __init__( + self, + input_fname, + stim_channel="auto", + eog=None, + misc=None, + preload=False, + verbose=None, + ): + logger.info(f"Extracting NSX parameters from {input_fname}...") + input_fname = os.path.abspath(input_fname) + ( + info, + data_fname, + fmt, + n_samples, + orig_format, + raw_extras, + orig_units, + ) = _get_hdr_info(input_fname, stim_channel=stim_channel, eog=eog, misc=misc) + raw_extras["orig_format"] = orig_format + first_samps = (raw_extras["timestamp"][0],) + super().__init__( + info, + first_samps=first_samps, + last_samps=[first_samps[0] + n_samples - 1], + filenames=[data_fname], + orig_format=orig_format, + preload=preload, + verbose=verbose, + raw_extras=[raw_extras], + orig_units=orig_units, + ) + + # Add annotations for in-data skips + if len(self._raw_extras[0]["timestamp"]) > 1: + starts = ( + self._raw_extras[0]["timestamp"] + self._raw_extras[0]["nb_data_points"] + )[:-1] + 1 + stops = self._raw_extras[0]["timestamp"][1:] - 1 + durations = (stops - starts + 1) / self.info["sfreq"] + annot = Annotations( + onset=(starts / self.info["sfreq"]), + duration=durations, + description="BAD_ACQ_SKIP", + orig_time=self.info["meas_date"], + ) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + dtype = self._raw_extras[fi]["orig_format"] + first_samps = self._raw_extras[fi]["timestamp"] + recording_extents = self._raw_extras[fi]["nb_data_points"] + offsets = self._raw_extras[fi]["offset_to_data_block"] + for first_samp, recording_extent, offset in zip( + first_samps, recording_extents, offsets + ): + if start > first_samp + recording_extent or stop < first_samp: + # There is nothing to read in this chunk + continue + i_start = max(start, first_samp) + i_stop = min(stop, first_samp + recording_extent) + _read_segments_file( + self, + data[:, i_start - start : i_stop - start], + idx, + fi, + i_start - first_samp, + i_stop - first_samp, + cals, + mult, + dtype, + n_channels=None, + offset=offset, + trigger_ch=None, + ) + + +def _read_header(fname): + nsx_file_id = np.fromfile(fname, count=1, dtype=[("file_id", "S8")])[0][ + "file_id" + ].decode() + + if nsx_file_id in ["NEURALCD", "BRSMPGRP"]: + basic_header = _read_header_22_and_above(fname) + elif nsx_file_id == "NEURALSG": + raise NotImplementedError( + "NSx file id (= NEURALSG), i.e., file" + " version 2.1 is currently not supported." + ) + else: + raise ValueError( + f"NSx file id (={nsx_file_id}) does not match" + " with supported file ids:" + " ('NEURALCD', 'BRSMPGRP')" + ) + + time_origin = datetime( + *[ + basic_header.pop(xx) + for xx in ( + "year", + "month", + "day", + "hour", + "minute", + "second", + "millisecond", + ) + ], + tzinfo=timezone.utc, + ) + basic_header["meas_date"] = time_origin + return basic_header + + +def _read_header_22_and_above(fname): + basic_header = {} + dtype0 = nsx_header_dict["basic"] + dtype1 = nsx_header_dict["extended"] + + nsx_file_header = np.fromfile(fname, count=1, dtype=dtype0)[0] + basic_header.update( + {name: nsx_file_header[name] for name in nsx_file_header.dtype.names} + ) + + offset_dtype0 = np.dtype(dtype0).itemsize + shape = nsx_file_header["channel_count"] + basic_header["extended"] = np.memmap( + fname, shape=shape, offset=offset_dtype0, dtype=dtype1, mode="r" + ) + + # The following values are stored in mHz + # See: + # https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf + basic_header["highpass"] = basic_header["extended"]["hi_freq_corner"] + basic_header["lowpass"] = basic_header["extended"]["lo_freq_corner"] + for x in ["highpass", "lowpass"]: + basic_header[x] = basic_header[x] * 1e-3 + + ver_major, ver_minor = basic_header.pop("ver_major"), basic_header.pop("ver_minor") + basic_header["spec"] = f"{ver_major}.{ver_minor}" + + data_header = list() + index = 0 + offset = basic_header["bytes_in_headers"] + filesize = _file_size(fname) + if float(basic_header["spec"]) < 3.0: + dtype2 = nsx_header_dict["data>2.1<3"] + else: + dtype2 = nsx_header_dict["data>=3"] + while offset < filesize: + dh = np.memmap(fname, dtype=dtype2, shape=1, offset=offset, mode="r")[0] + data_header.append( + { + "header": dh["header"], + "timestamp": dh["timestamp"], + "nb_data_points": dh["nb_data_points"], + "offset_to_data_block": offset + dh.dtype.itemsize, + } + ) + # data size = number of data points * (data_bytes * number of channels) + # use of `int` avoids overflow problem + data_size = ( + int(dh["nb_data_points"]) + * int(basic_header["channel_count"]) + * DATA_BYTE_SIZE + ) + # define new offset (to possible next data block) + offset = data_header[index]["offset_to_data_block"] + data_size + index += 1 + + basic_header["data_header"] = data_header + return basic_header + + +def _get_hdr_info(fname, stim_channel=True, eog=None, misc=None): + """Read header information NSx file.""" + eog = eog if eog is not None else [] + misc = misc if misc is not None else [] + + nsx_info = _read_header(fname) + ch_names = list(nsx_info["extended"]["electrode_label"]) + ch_types = list(nsx_info["extended"]["type"]) + ch_units = list(nsx_info["extended"]["units"]) + ch_names, ch_types, ch_units = ( + list(map(bytes.decode, xx)) for xx in (ch_names, ch_types, ch_units) + ) + max_analog_val = nsx_info["extended"]["max_analog_val"].astype("double") + min_analog_val = nsx_info["extended"]["min_analog_val"].astype("double") + max_digital_val = nsx_info["extended"]["max_digital_val"].astype("double") + min_digital_val = nsx_info["extended"]["min_digital_val"].astype("double") + cals = (max_analog_val - min_analog_val) / (max_digital_val - min_digital_val) + + stim_channel_idxs, _ = _check_stim_channel(stim_channel, ch_names) + + nchan = int(nsx_info["channel_count"]) + logger.info("Setting channel info structure...") + chs = list() + pick_mask = np.ones(len(ch_names)) + + orig_units = {} + for idx, ch_name in enumerate(ch_names): + chan_info = {} + chan_info["logno"] = int(nsx_info["extended"]["electrode_id"][idx]) + chan_info["scanno"] = int(nsx_info["extended"]["electrode_id"][idx]) + chan_info["ch_name"] = ch_name + chan_info["unit_mul"] = FIFF.FIFF_UNITM_NONE + ch_unit = ch_units[idx] + chan_info["unit"] = FIFF.FIFF_UNIT_V + # chan_info["range"] = _unit_range_dict[ch_units[idx]] + chan_info["range"] = 1 / _get_scaling("eeg", ch_units[idx]) + chan_info["cal"] = cals[idx] + chan_info["coord_frame"] = FIFF.FIFFV_COORD_HEAD + chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG + chan_info["kind"] = FIFF.FIFFV_SEEG_CH + # montage can't be stored in NSx so channel locs are unknown: + chan_info["loc"] = np.full(12, np.nan) + orig_units[ch_name] = ch_unit + + # if the NSx info contained channel type information + # set it now. They are always set to 'CC'. + # If not inferable, set it to 'SEEG' with a warning. + ch_type = ch_types[idx] + ch_const = getattr(FIFF, f"FIFFV_{CH_TYPE_MAPPING.get(ch_type, 'SEEG')}_CH") + chan_info["kind"] = ch_const + # if user passes in explicit mapping for eog, misc and stim + # channels set them here. + if ch_name in eog or idx in eog or idx - nchan in eog: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_EOG_CH + pick_mask[idx] = False + elif ch_name in misc or idx in misc or idx - nchan in misc: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["kind"] = FIFF.FIFFV_MISC_CH + pick_mask[idx] = False + elif idx in stim_channel_idxs: + chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE + chan_info["unit"] = FIFF.FIFF_UNIT_NONE + chan_info["kind"] = FIFF.FIFFV_STIM_CH + pick_mask[idx] = False + chan_info["ch_name"] = ch_name + ch_names[idx] = chan_info["ch_name"] + chs.append(chan_info) + + sfreq = nsx_info["timestamp_resolution"] / nsx_info["period"] + info = _empty_info(sfreq) + info["meas_date"] = nsx_info["meas_date"] + info["chs"] = chs + info["ch_names"] = ch_names + + highpass = nsx_info["highpass"][:128] + lowpass = nsx_info["lowpass"][:128] + _decode_online_filters(info, highpass, lowpass) + + # Some keys to be consistent with FIF measurement info + info["description"] = None + + info._unlocked = False + info._update_redundant() + + orig_format = ORIG_FORMAT + + raw_extras = { + key: [r[key] for r in nsx_info["data_header"]] + for key in nsx_info["data_header"][0] + } + for key in raw_extras: + raw_extras[key] = np.array(raw_extras[key], int) + good_data_packets = raw_extras.pop("header") == 1 + if not good_data_packets.any(): + raise RuntimeError("NSx file appears to be broken") + raw_extras = {key: raw_extras[key][good_data_packets] for key in raw_extras.keys()} + raw_extras["timestamp"] = raw_extras["timestamp"] // nsx_info["period"] + first_samp = raw_extras["timestamp"][0] + last_samp = raw_extras["timestamp"][-1] + raw_extras["nb_data_points"][-1] + n_samples = last_samp - first_samp + + return ( + info, + fname, + nsx_info["spec"], + n_samples, + orig_format, + raw_extras, + orig_units, + ) + + +def _decode_online_filters(info, highpass, lowpass): + """Decode low/high-pass filters that are applied online.""" + if np.all(highpass == highpass[0]): + if highpass[0] == "NaN": + # Placeholder for future use. Highpass set in _empty_info. + pass + else: + hp = float(highpass[0]) + info["highpass"] = hp + else: + info["highpass"] = float(np.max(highpass)) + warn( + "Channels contain different highpass filters. Highest filter " + "setting will be stored." + ) + + if np.all(lowpass == lowpass[0]): + if lowpass[0] in ("NaN", "0", "0.0"): + # Placeholder for future use. Lowpass set in _empty_info. + pass + else: + info["lowpass"] = float(lowpass[0]) + else: + info["lowpass"] = float(np.min(lowpass)) + warn( + "Channels contain different lowpass filters. Lowest filter " + "setting will be stored." + ) + + +def _check_stim_channel(stim_channel, ch_names): + """Check that the stimulus channel exists in the current datafile.""" + DEFAULT_STIM_CH_NAMES = ["status", "trigger"] + + if stim_channel is None or stim_channel is False: + return [], [] + + if stim_channel is True: # convenient aliases + stim_channel = "auto" + + if isinstance(stim_channel, str): + if stim_channel == "auto": + if "auto" in ch_names: + warn( + RuntimeWarning, + "Using `stim_channel='auto'` when auto" + " also corresponds to a channel name is ambiguous." + " Please use `stim_channel=['auto']`.", + ) + else: + valid_stim_ch_names = DEFAULT_STIM_CH_NAMES + else: + valid_stim_ch_names = [stim_channel.lower()] + + elif isinstance(stim_channel, int): + valid_stim_ch_names = [ch_names[stim_channel].lower()] + + elif isinstance(stim_channel, list): + if all([isinstance(s, str) for s in stim_channel]): + valid_stim_ch_names = [s.lower() for s in stim_channel] + elif all([isinstance(s, int) for s in stim_channel]): + valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel] + else: + raise ValueError("Invalid stim_channel") + else: + raise ValueError("Invalid stim_channel") + + ch_names_low = [ch.lower() for ch in ch_names] + found = list(set(valid_stim_ch_names) & set(ch_names_low)) + + stim_channel_idxs = [ch_names_low.index(f) for f in found] + names = [ch_names[idx] for idx in stim_channel_idxs] + return stim_channel_idxs, names diff --git a/mne/io/persyst/__init__.py b/mne/io/persyst/__init__.py new file mode 100644 index 0000000..aac0421 --- /dev/null +++ b/mne/io/persyst/__init__.py @@ -0,0 +1,7 @@ +"""Persyst module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from .persyst import read_raw_persyst diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py new file mode 100644 index 0000000..8de4428 --- /dev/null +++ b/mne/io/persyst/persyst.py @@ -0,0 +1,474 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import os +import os.path as op +from collections import OrderedDict +from datetime import datetime, timezone + +import numpy as np + +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations +from ...utils import _check_fname, fill_doc, logger, verbose, warn +from ..base import BaseRaw + + +@fill_doc +def read_raw_persyst(fname, preload=False, verbose=None) -> "RawPersyst": + """Reader for a Persyst (.lay/.dat) recording. + + Parameters + ---------- + fname : path-like + Path to the Persyst header ``.lay`` file. + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawPersyst + A Raw object containing Persyst data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawPersyst. + + Notes + ----- + It is assumed that the ``.lay`` and ``.dat`` file + are in the same directory. To get the correct file path to the + ``.dat`` file, ``read_raw_persyst`` will get the corresponding dat + filename from the lay file, and look for that file inside the same + directory as the lay file. + """ + return RawPersyst(fname, preload, verbose) + + +@fill_doc +class RawPersyst(BaseRaw): + """Raw object from a Persyst file. + + Parameters + ---------- + fname : path-like + Path to the Persyst header (.lay) file. + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__(self, fname, preload=False, verbose=None): + fname = str(_check_fname(fname, "read", True, "fname")) + logger.info(f"Loading {fname}") + + # make sure filename is the Lay file + if not fname.endswith(".lay"): + fname = fname + ".lay" + # get the current directory and Lay filename + curr_path, lay_fname = op.dirname(fname), op.basename(fname) + if not op.exists(fname): + raise FileNotFoundError( + f'The path you specified, "{lay_fname}",does not exist.' + ) + + # sections and subsections currently unused + keys, data, sections = _read_lay_contents(fname) + + # these are the section headers in the Persyst file layout + # Note: We do not make use of "SampleTimes" yet + fileinfo_dict = OrderedDict() + channelmap_dict = OrderedDict() + patient_dict = OrderedDict() + comments_dict = OrderedDict() + + # keep track of total number of comments + num_comments = 0 + + # loop through each line in the lay file + for key, val, section in zip(keys, data, sections): + if key == "": + continue + + # Make sure key are lowercase for everything, but electrodes. + # We also do not want to lower-case comments because those + # are free-form text where casing may matter. + if key is not None and section not in ["channelmap", "comments"]: + key = key.lower() + + # FileInfo + if section == "fileinfo": + # extract the .dat file name + if key == "file": + dat_fname = op.basename(val) + dat_fpath = op.join(curr_path, op.basename(dat_fname)) + + # determine if .dat file exists where it should + error_msg = ( + f"The data path you specified " + f"does not exist for the lay path, " + f"{lay_fname}. Make sure the dat file " + f"is in the same directory as the lay " + f"file, and the specified dat filename " + f"matches." + ) + if not op.exists(dat_fpath): + raise FileNotFoundError(error_msg) + fileinfo_dict[key] = val + # ChannelMap + elif section == "channelmap": + # channel map has = for = + channelmap_dict[key] = val + # Patient (All optional) + elif section == "patient": + patient_dict[key] = val + # Comments (turned into mne.Annotations) + elif section == "comments": + comments_dict[key] = comments_dict.get(key, list()) + [val] + num_comments += 1 + + # get numerical metadata + # datatype is either 7 for 32 bit, or 0 for 16 bit + datatype = fileinfo_dict.get("datatype") + cal = float(fileinfo_dict.get("calibration")) + n_chs = int(fileinfo_dict.get("waveformcount")) + + # Store subject information from lay file in mne format + # Note: Persyst also records "Physician", "Technician", + # "Medications", "History", and "Comments1" and "Comments2" + # and this information is currently discarded + subject_info = _get_subjectinfo(patient_dict) + + # set measurement date + testdate = patient_dict.get("testdate") + if testdate is not None: + # TODO: Persyst may change its internal date schemas + # without notice + # These are the 3 "so far" possible datatime storage + # formats in Persyst .lay + if "/" in testdate: + testdate = datetime.strptime(testdate, "%m/%d/%Y") + elif "-" in testdate: + testdate = datetime.strptime(testdate, "%d-%m-%Y") + elif "." in testdate: + testdate = datetime.strptime(testdate, "%Y.%m.%d") + + if not isinstance(testdate, datetime): + warn( + "Cannot read in the measurement date due " + "to incompatible format. Please set manually " + f"for {lay_fname} " + ) + meas_date = None + else: + testtime = datetime.strptime(patient_dict.get("testtime"), "%H:%M:%S") + meas_date = datetime( + year=testdate.year, + month=testdate.month, + day=testdate.day, + hour=testtime.hour, + minute=testtime.minute, + second=testtime.second, + tzinfo=timezone.utc, + ) + + # Create mne structure + ch_names = list(channelmap_dict.keys()) + if n_chs != len(ch_names): + raise RuntimeError( + "Channels in lay file do not " + "match the number of channels " + "in the .dat file." + ) # noqa + # get rid of the "-Ref" in channel names + ch_names = [ch.upper().split("-REF")[0] for ch in ch_names] + + # get the sampling rate and default channel types to EEG + sfreq = fileinfo_dict.get("samplingrate") + ch_types = "eeg" + info = create_info(ch_names, sfreq, ch_types=ch_types) + info.update(subject_info=subject_info) + with info._unlock(): + for idx in range(n_chs): + # calibration brings to uV then 1e-6 brings to V + info["chs"][idx]["cal"] = cal * 1.0e-6 + info["meas_date"] = meas_date + + # determine number of samples in file + # Note: We do not use the lay file to do this + # because clips in time may be generated by Persyst that + # DO NOT modify the "SampleTimes" section + with open(dat_fpath, "rb") as f: + # determine the precision + if int(datatype) == 7: + # 32 bit + dtype = np.dtype("i4") + elif int(datatype) == 0: + # 16 bit + dtype = np.dtype("i2") + else: + raise RuntimeError(f"Unknown format: {datatype}") + + # allow offset to occur + f.seek(0, os.SEEK_END) + n_samples = f.tell() + n_samples = n_samples // (dtype.itemsize * n_chs) + + logger.debug(f"Loaded {n_samples} samples for {n_chs} channels.") + + raw_extras = {"dtype": dtype, "n_chs": n_chs, "n_samples": n_samples} + # create Raw object + super().__init__( + info, + preload, + filenames=[dat_fpath], + last_samps=[n_samples - 1], + raw_extras=[raw_extras], + verbose=verbose, + ) + + # set annotations based on the comments read in + onset = np.zeros(num_comments, float) + duration = np.zeros(num_comments, float) + description = [""] * num_comments + + # loop through comments dictionary, which may contain + # multiple events for the same "text" annotation + t_idx = 0 + for _description, event_tuples in comments_dict.items(): + for _onset, _duration in event_tuples: + # extract the onset, duration, description to + # create an Annotations object + onset[t_idx] = _onset + duration[t_idx] = _duration + description[t_idx] = _description + t_idx += 1 + annot = Annotations(onset, duration, description) + self.set_annotations(annot) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file. + + The Persyst software records raw data in either 16 or 32 bit + binary files. In addition, it stores the calibration to convert + data to uV in the lay file. + """ + dtype = self._raw_extras[fi]["dtype"] + n_chs = self._raw_extras[fi]["n_chs"] + dat_fname = self.filenames[fi] + + # compute samples count based on start and stop + time_length_samps = stop - start + + # read data from .dat file into array of correct size, then calibrate + # records = recnum rows x inf columns + count = time_length_samps * n_chs + + # seek the dat file + with open(dat_fname, "rb") as dat_file_ID: + # allow offset to occur + dat_file_ID.seek(n_chs * dtype.itemsize * start, 1) + + # read in the actual record starting at possibly offset + record = np.fromfile(dat_file_ID, dtype=dtype, count=count) + + # chs * rows + # cast as float32; more than enough precision + record = np.reshape(record, (n_chs, -1), order="F").astype(np.float32) + + # calibrate to convert to V and handle mult + _mult_cal_one(data, record, idx, cals, mult) + + +def _get_subjectinfo(patient_dict): + # attempt to parse out the birthdate, but if it doesn't + # meet spec, then it will set to None + birthdate = patient_dict.get("birthdate") + if "/" in birthdate: + try: + birthdate = datetime.strptime(birthdate, "%m/%d/%y") + except ValueError: + birthdate = None + print(f"Unable to process birthdate of {birthdate} ") + elif "-" in birthdate: + try: + birthdate = datetime.strptime(birthdate, "%d-%m-%y") + except ValueError: + birthdate = None + print(f"Unable to process birthdate of {birthdate} ") + + subject_info = { + "first_name": patient_dict.get("first"), + "middle_name": patient_dict.get("middle"), + "last_name": patient_dict.get("last"), + "sex": patient_dict.get("sex"), + "hand": patient_dict.get("hand"), + "his_id": patient_dict.get("id"), + "birthday": birthdate, + } + subject_info = {key: val for key, val in subject_info.items() if val is not None} + + # Recode sex values + sex_dict = dict( + m=FIFF.FIFFV_SUBJ_SEX_MALE, + male=FIFF.FIFFV_SUBJ_SEX_MALE, + f=FIFF.FIFFV_SUBJ_SEX_FEMALE, + female=FIFF.FIFFV_SUBJ_SEX_FEMALE, + ) + subject_info["sex"] = sex_dict.get(subject_info["sex"], FIFF.FIFFV_SUBJ_SEX_UNKNOWN) + + # Recode hand values + hand_dict = dict( + r=FIFF.FIFFV_SUBJ_HAND_RIGHT, + right=FIFF.FIFFV_SUBJ_HAND_RIGHT, + l=FIFF.FIFFV_SUBJ_HAND_LEFT, + left=FIFF.FIFFV_SUBJ_HAND_LEFT, + a=FIFF.FIFFV_SUBJ_HAND_AMBI, + ambidextrous=FIFF.FIFFV_SUBJ_HAND_AMBI, + ambi=FIFF.FIFFV_SUBJ_HAND_AMBI, + ) + # no handedness is set when unknown + try: + subject_info["hand"] = hand_dict[subject_info["hand"]] + except KeyError: + subject_info.pop("hand") + + return subject_info + + +def _read_lay_contents(fname): + """Lay file are laid out like a INI file.""" + # keep track of sections, keys and data + sections = [] + keys, data = [], [] + + # initialize all section to empty str + section = "" + with open(fname) as fin: + for line in fin: + # break a line into a status, key and value + status, key, val = _process_lay_line(line, section) + + # handle keys and values if they are + # Section, Subsections, or Line items + if status == 1: # Section was found + section = val.lower() + continue + + # keep track of all sections, subsections, + # keys and the data of the file + sections.append(section) + data.append(val) + keys.append(key) + + return keys, data, sections + + +def _process_lay_line(line, section): + """Process a line read from the Lay (INI) file. + + Each line in the .lay file will be processed + into a structured ``status``, ``key`` and ``value``. + + Parameters + ---------- + line : str + The actual line in the Lay file. + section : str + The section in the Lay file. + + Returns + ------- + status : int + Returns the following integers based on status. + -1 => unknown string found + 0 => empty line found + 1 => section found + 2 => key-value pair found + key : str + The string before the ``'='`` character. If section is "Comments", + then returns the text comment description. + value : str + The string from the line after the ``'='`` character. If section is + "Comments", then returns the onset and duration as a tuple. + + Notes + ----- + The lay file comprises of multiple "sections" that are documented with + bracket ``[]`` characters. For example, ``[FileInfo]`` and the lines + afterward indicate metadata about the data file itself. Within + each section, there are multiple lines in the format of + ``=``. + + For ``FileInfo``, ``Patient`` and ``ChannelMap`` + each line will be denoted with a ``key`` and a ``value`` that + can be represented as a dictionary. The keys describe what sort + of data that line holds, while the values contain the corresponding + value. In some cases, the ``value``. + + For ``SampleTimes``, the ``key`` and ``value`` pair indicate the + start and end time in seconds of the original data file. + + For ``Comments`` section, this denotes an area where users through + Persyst actually annotate data in time. These are instead + represented as 5 data points that are ``,`` delimited. These + data points are ordered as: + + 1. time (in seconds) of the annotation + 2. duration (in seconds) of the annotation + 3. state (unused) + 4. variable type (unused) + 5. free-form text describing the annotation + """ + key = "" # default; only return value possibly not set + line = line.strip() # remove leading and trailing spaces + end_idx = len(line) - 1 # get the last index of the line + + # empty sequence evaluates to false + if not line: + status = 0 + key = "" + value = "" + return status, key, value + # section found + elif (line[0] == "[") and (line[end_idx] == "]") and (end_idx + 1 >= 3): + status = 1 + value = line[1:end_idx].lower() + # key found + else: + # handle Comments section differently from all other sections + # TODO: utilize state and var_type in code. + # Currently not used + if section == "comments": + # Persyst Comments output 5 variables "," separated + time_sec, duration, state, var_type, text = line.split(",", 4) + del var_type, state + status = 2 + key = text + value = (time_sec, duration) + # all other sections + else: + if "=" not in line: + raise RuntimeError( + f"The line {line} does not conform " + "to the standards. Please check the " + ".lay file." + ) # noqa + pos = line.index("=") + status = 2 + + # the line now is composed of a + # = + key = line[0:pos] + key.strip() + value = line[pos + 1 : end_idx + 1] + value.strip() + return status, key, value diff --git a/mne/io/pick.py b/mne/io/pick.py new file mode 100644 index 0000000..e78cfc8 --- /dev/null +++ b/mne/io/pick.py @@ -0,0 +1,18 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + + +from .._fiff.pick import ( + _DATA_CH_TYPES_ORDER_DEFAULT, + _DATA_CH_TYPES_SPLIT, + _picks_to_idx, +) + +__all__ = [ + # mne-bids, autoreject, mne-connectivity, mne-realtime, mne-nirs, mne-realtime + "_picks_to_idx", + # mne-qt-browser + "_DATA_CH_TYPES_ORDER_DEFAULT", + "_DATA_CH_TYPES_SPLIT", +] diff --git a/mne/io/snirf/__init__.py b/mne/io/snirf/__init__.py new file mode 100644 index 0000000..a50ff50 --- /dev/null +++ b/mne/io/snirf/__init__.py @@ -0,0 +1,7 @@ +"""SNIRF module for conversion to FIF.""" + +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +from ._snirf import read_raw_snirf diff --git a/mne/io/snirf/_snirf.py b/mne/io/snirf/_snirf.py new file mode 100644 index 0000000..c07790b --- /dev/null +++ b/mne/io/snirf/_snirf.py @@ -0,0 +1,585 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import datetime +import re + +import numpy as np + +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF +from ..._fiff.meas_info import _format_dig_points, create_info +from ..._fiff.utils import _mult_cal_one +from ..._freesurfer import get_mni_fiducials +from ...annotations import Annotations +from ...transforms import _frame_to_str, apply_trans +from ...utils import _check_fname, _import_h5py, fill_doc, logger, verbose, warn +from ..base import BaseRaw +from ..nirx.nirx import _convert_fnirs_to_head + + +@fill_doc +def read_raw_snirf( + fname, optode_frame="unknown", preload=False, verbose=None +) -> "RawSNIRF": + """Reader for a continuous wave SNIRF data. + + .. note:: This reader supports the .snirf file type only, + not the .jnirs version. + Files with either 3D or 2D locations can be read. + However, we strongly recommend using 3D positions. + If 2D positions are used the behaviour of MNE functions + can not be guaranteed. + + Parameters + ---------- + fname : path-like + Path to the SNIRF data file. + optode_frame : str + Coordinate frame used for the optode positions. The default is unknown, + in which case the positions are not modified. If a known coordinate + frame is provided (head, meg, mri), then the positions are transformed + in to the Neuromag head coordinate frame (head). + %(preload)s + %(verbose)s + + Returns + ------- + raw : instance of RawSNIRF + A Raw object containing fNIRS data. + See :class:`mne.io.Raw` for documentation of attributes and methods. + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods of RawSNIRF. + """ + return RawSNIRF(fname, optode_frame, preload, verbose) + + +def _open(fname): + return open(fname, encoding="latin-1") + + +@fill_doc +class RawSNIRF(BaseRaw): + """Raw object from a continuous wave SNIRF file. + + Parameters + ---------- + fname : path-like + Path to the SNIRF data file. + optode_frame : str + Coordinate frame used for the optode positions. The default is unknown, + in which case the positions are not modified. If a known coordinate + frame is provided (head, meg, mri), then the positions are transformed + in to the Neuromag head coordinate frame (head). + %(preload)s + %(verbose)s + + See Also + -------- + mne.io.Raw : Documentation of attributes and methods. + """ + + @verbose + def __init__(self, fname, optode_frame="unknown", preload=False, verbose=None): + # Must be here due to circular import error + from ...preprocessing.nirs import _validate_nirs_info + + h5py = _import_h5py() + + fname = str(_check_fname(fname, "read", True, "fname")) + logger.info(f"Loading {fname}") + + with h5py.File(fname, "r") as dat: + if "data2" in dat["nirs"]: + warn( + "File contains multiple recordings. " + "MNE does not support this feature. " + "Only the first dataset will be processed." + ) + + manufacturer = _get_metadata_str(dat, "ManufacturerName") + if (optode_frame == "unknown") & (manufacturer == "Gowerlabs"): + optode_frame = "head" + + snirf_data_type = np.array( + dat.get("nirs/data1/measurementList1/dataType") + ).item() + if snirf_data_type not in [1, 99999]: + # 1 = Continuous Wave + # 99999 = Processed + raise RuntimeError( + "MNE only supports reading continuous" + " wave amplitude and processed haemoglobin" + " SNIRF files. Expected type" + " code 1 or 99999 but received type " + f"code {snirf_data_type}" + ) + + last_samps = dat.get("/nirs/data1/dataTimeSeries").shape[0] - 1 + + sampling_rate = _extract_sampling_rate(dat) + + if sampling_rate == 0: + warn("Unable to extract sample rate from SNIRF file.") + + # Extract wavelengths + fnirs_wavelengths = np.array(dat.get("nirs/probe/wavelengths")) + fnirs_wavelengths = [int(w) for w in fnirs_wavelengths] + if len(fnirs_wavelengths) != 2: + raise RuntimeError( + f"The data contains " + f"{len(fnirs_wavelengths)}" + f" wavelengths: {fnirs_wavelengths}. " + f"MNE only supports reading continuous" + " wave amplitude SNIRF files " + "with two wavelengths." + ) + + # Extract channels + def atoi(text): + return int(text) if text.isdigit() else text + + def natural_keys(text): + return [atoi(c) for c in re.split(r"(\d+)", text)] + + channels = np.array([name for name in dat["nirs"]["data1"].keys()]) + channels_idx = np.array(["measurementList" in n for n in channels]) + channels = channels[channels_idx] + channels = sorted(channels, key=natural_keys) + + # Source and detector labels are optional fields. + # Use S1, S2, S3, etc if not specified. + if "sourceLabels_disabled" in dat["nirs/probe"]: + # This is disabled as + # MNE-Python does not currently support custom source names. + # Instead, sources must be integer values. + sources = np.array(dat.get("nirs/probe/sourceLabels")) + sources = [s.decode("UTF-8") for s in sources] + else: + sources = np.unique( + [ + _correct_shape( + np.array(dat.get("nirs/data1/" + c + "/sourceIndex")) + )[0] + for c in channels + ] + ) + sources = {int(s): f"S{int(s)}" for s in sources} + + if "detectorLabels_disabled" in dat["nirs/probe"]: + # This is disabled as + # MNE-Python does not currently support custom detector names. + # Instead, detector must be integer values. + detectors = np.array(dat.get("nirs/probe/detectorLabels")) + detectors = [d.decode("UTF-8") for d in detectors] + else: + detectors = np.unique( + [ + _correct_shape( + np.array(dat.get("nirs/data1/" + c + "/detectorIndex")) + )[0] + for c in channels + ] + ) + detectors = {int(d): f"D{int(d)}" for d in detectors} + + # Extract source and detector locations + # 3D positions are optional in SNIRF, + # but highly recommended in MNE. + if ("detectorPos3D" in dat["nirs/probe"]) & ( + "sourcePos3D" in dat["nirs/probe"] + ): + # If 3D positions are available they are used even if 2D exists + detPos3D = np.array(dat.get("nirs/probe/detectorPos3D")) + srcPos3D = np.array(dat.get("nirs/probe/sourcePos3D")) + elif ("detectorPos2D" in dat["nirs/probe"]) & ( + "sourcePos2D" in dat["nirs/probe"] + ): + warn( + "The data only contains 2D location information for the " + "optode positions. " + "It is highly recommended that data is used " + "which contains 3D location information for the " + "optode positions. With only 2D locations it can not be " + "guaranteed that MNE functions will behave correctly " + "and produce accurate results. If it is not possible to " + "include 3D positions in your data, please consider " + "using the set_montage() function." + ) + + detPos2D = np.array(dat.get("nirs/probe/detectorPos2D")) + srcPos2D = np.array(dat.get("nirs/probe/sourcePos2D")) + # Set the third dimension to zero. See gh#9308 + detPos3D = np.append(detPos2D, np.zeros((detPos2D.shape[0], 1)), axis=1) + srcPos3D = np.append(srcPos2D, np.zeros((srcPos2D.shape[0], 1)), axis=1) + + else: + raise RuntimeError( + "No optode location information is " + "provided. MNE requires at least 2D " + "location information" + ) + + chnames = [] + ch_types = [] + for chan in channels: + src_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/sourceIndex")) + )[0] + ) + det_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/detectorIndex")) + )[0] + ) + + if snirf_data_type == 1: + wve_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex")) + )[0] + ) + ch_name = ( + sources[src_idx] + + "_" + + detectors[det_idx] + + " " + + str(fnirs_wavelengths[wve_idx - 1]) + ) + chnames.append(ch_name) + ch_types.append("fnirs_cw_amplitude") + + elif snirf_data_type == 99999: + dt_id = _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/dataTypeLabel")) + )[0].decode("UTF-8") + + # Convert between SNIRF processed names and MNE type names + dt_id = dt_id.lower().replace("dod", "fnirs_od") + + ch_name = sources[src_idx] + "_" + detectors[det_idx] + + if dt_id == "fnirs_od": + wve_idx = int( + _correct_shape( + np.array( + dat.get("nirs/data1/" + chan + "/wavelengthIndex") + ) + )[0] + ) + suffix = " " + str(fnirs_wavelengths[wve_idx - 1]) + else: + suffix = " " + dt_id.lower() + ch_name = ch_name + suffix + + chnames.append(ch_name) + ch_types.append(dt_id) + + # Create mne structure + info = create_info(chnames, sampling_rate, ch_types=ch_types) + + subject_info = {} + names = np.array(dat.get("nirs/metaDataTags/SubjectID")) + names = _correct_shape(names)[0].decode("UTF-8") + subject_info["his_id"] = names + # Read non standard (but allowed) custom metadata tags + if "lastName" in dat.get("nirs/metaDataTags/"): + ln = dat.get("/nirs/metaDataTags/lastName")[0].decode("UTF-8") + subject_info["last_name"] = ln + if "middleName" in dat.get("nirs/metaDataTags/"): + m = dat.get("/nirs/metaDataTags/middleName")[0].decode("UTF-8") + subject_info["middle_name"] = m + if "firstName" in dat.get("nirs/metaDataTags/"): + fn = dat.get("/nirs/metaDataTags/firstName")[0].decode("UTF-8") + subject_info["first_name"] = fn + else: + # MNE < 1.7 used to not write the firstName tag, so pull it from names + subject_info["first_name"] = names.split("_")[0] + if "sex" in dat.get("nirs/metaDataTags/"): + s = dat.get("/nirs/metaDataTags/sex")[0].decode("UTF-8") + if s in {"M", "Male", "1", "m"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE + elif s in {"F", "Female", "2", "f"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE + elif s in {"0", "u"}: + subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN + # End non standard name reading + # Update info + info.update(subject_info=subject_info) + + length_unit = _get_metadata_str(dat, "LengthUnit") + length_scaling = _get_lengthunit_scaling(length_unit) + + srcPos3D /= length_scaling + detPos3D /= length_scaling + + if optode_frame in ["mri", "meg"]: + # These are all in MNI or MEG coordinates, so let's transform + # them to the Neuromag head coordinate frame + srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head( + "fsaverage", optode_frame, "head", srcPos3D, detPos3D, [] + ) + else: + head_t = np.eye(4) + + if optode_frame in ["head", "mri", "meg"]: + # Then the transformation to head was performed above + coord_frame = FIFF.FIFFV_COORD_HEAD + elif "MNE_coordFrame" in dat.get("nirs/metaDataTags/"): + coord_frame = int(dat.get("/nirs/metaDataTags/MNE_coordFrame")[0]) + else: + coord_frame = FIFF.FIFFV_COORD_UNKNOWN + + for idx, chan in enumerate(channels): + src_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/sourceIndex")) + )[0] + ) + det_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/detectorIndex")) + )[0] + ) + + info["chs"][idx]["loc"][3:6] = srcPos3D[src_idx - 1, :] + info["chs"][idx]["loc"][6:9] = detPos3D[det_idx - 1, :] + # Store channel as mid point + midpoint = ( + info["chs"][idx]["loc"][3:6] + info["chs"][idx]["loc"][6:9] + ) / 2 + info["chs"][idx]["loc"][0:3] = midpoint + info["chs"][idx]["coord_frame"] = coord_frame + + if (snirf_data_type in [1]) or ( + (snirf_data_type == 99999) and (ch_types[idx] == "fnirs_od") + ): + wve_idx = int( + _correct_shape( + np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex")) + )[0] + ) + info["chs"][idx]["loc"][9] = fnirs_wavelengths[wve_idx - 1] + + if "landmarkPos3D" in dat.get("nirs/probe/"): + diglocs = np.array(dat.get("/nirs/probe/landmarkPos3D")) + diglocs /= length_scaling + digname = np.array(dat.get("/nirs/probe/landmarkLabels")) + nasion, lpa, rpa, hpi = None, None, None, None + extra_ps = dict() + for idx, dign in enumerate(digname): + dign = dign.lower() + if dign in [b"lpa", b"al"]: + lpa = diglocs[idx, :3] + elif dign in [b"nasion"]: + nasion = diglocs[idx, :3] + elif dign in [b"rpa", b"ar"]: + rpa = diglocs[idx, :3] + else: + extra_ps[f"EEG{len(extra_ps) + 1:03d}"] = diglocs[idx, :3] + add_missing_fiducials = ( + coord_frame == FIFF.FIFFV_COORD_HEAD + and lpa is None + and rpa is None + and nasion is None + ) + dig = _make_dig_points( + nasion=nasion, + lpa=lpa, + rpa=rpa, + hpi=hpi, + dig_ch_pos=extra_ps, + coord_frame=_frame_to_str[coord_frame], + add_missing_fiducials=add_missing_fiducials, + ) + else: + ch_locs = [info["chs"][idx]["loc"][0:3] for idx in range(len(channels))] + # Set up digitization + dig = get_mni_fiducials("fsaverage", verbose=False) + for fid in dig: + fid["r"] = apply_trans(head_t, fid["r"]) + fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD + for ii, ch_loc in enumerate(ch_locs, 1): + dig.append( + dict( + kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay + r=ch_loc, + ident=ii, + coord_frame=FIFF.FIFFV_COORD_HEAD, + ) + ) + dig = _format_dig_points(dig) + del head_t + with info._unlock(): + info["dig"] = dig + + str_date = _correct_shape( + np.array(dat.get("/nirs/metaDataTags/MeasurementDate")) + )[0].decode("UTF-8") + str_time = _correct_shape( + np.array(dat.get("/nirs/metaDataTags/MeasurementTime")) + )[0].decode("UTF-8") + str_datetime = str_date + str_time + + # Several formats have been observed so we try each in turn + for dt_code in [ + "%Y-%m-%d%H:%M:%SZ", + "%Y-%m-%d%H:%M:%S", + "%Y-%m-%d%H:%M:%S.%f", + "%Y-%m-%d%H:%M:%S.%f%z", + ]: + try: + meas_date = datetime.datetime.strptime(str_datetime, dt_code) + except ValueError: + pass + else: + break + else: + warn( + "Extraction of measurement date from SNIRF file failed. " + "The date is being set to January 1st, 2000, " + f"instead of {str_datetime}" + ) + meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0) + meas_date = meas_date.replace(tzinfo=datetime.timezone.utc) + with info._unlock(): + info["meas_date"] = meas_date + + if "DateOfBirth" in dat.get("nirs/metaDataTags/"): + str_birth = ( + np.array(dat.get("/nirs/metaDataTags/DateOfBirth")).item().decode() + ) + birth_matched = re.fullmatch(r"(\d+)-(\d+)-(\d+)", str_birth) + if birth_matched is not None: + birthday = datetime.date( + int(birth_matched.groups()[0]), + int(birth_matched.groups()[1]), + int(birth_matched.groups()[2]), + ) + with info._unlock(): + info["subject_info"]["birthday"] = birthday + + super().__init__( + info, + preload, + filenames=[fname], + last_samps=[last_samps], + verbose=verbose, + ) + + # Extract annotations + # As described at https://github.com/fNIRS/snirf/ + # blob/master/snirf_specification.md#nirsistimjdata + annot = Annotations([], [], []) + for key in dat["nirs"]: + if "stim" in key: + data = np.atleast_2d(np.array(dat.get("/nirs/" + key + "/data"))) + if data.shape[1] >= 3: + desc = _correct_shape( + np.array(dat.get("/nirs/" + key + "/name")) + )[0] + annot.append(data[:, 0], data[:, 1], desc.decode("UTF-8")) + self.set_annotations(annot, emit_warning=False) + + # Validate that the fNIRS info is correctly formatted + _validate_nirs_info(self.info) + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a segment of data from a file.""" + import h5py + + with h5py.File(self.filenames[0], "r") as dat: + one = dat["/nirs/data1/dataTimeSeries"][start:stop].T + + _mult_cal_one(data, one, idx, cals, mult) + + +# Helper function for when the numpy array has shape (), i.e. just one element. +def _correct_shape(arr): + if arr.shape == (): + arr = arr[np.newaxis] + return arr + + +def _get_timeunit_scaling(time_unit): + """MNE expects time in seconds, return required scaling.""" + scalings = {"ms": 1000, "s": 1, "unknown": 1} + if time_unit in scalings: + return scalings[time_unit] + else: + raise RuntimeError( + f"The time unit {time_unit} is not supported by " + "MNE. Please report this error as a GitHub " + "issue to inform the developers." + ) + + +def _get_lengthunit_scaling(length_unit): + """MNE expects distance in m, return required scaling.""" + scalings = {"m": 1, "cm": 100, "mm": 1000} + if length_unit in scalings: + return scalings[length_unit] + else: + raise RuntimeError( + f"The length unit {length_unit} is not supported " + "by MNE. Please report this error as a GitHub " + "issue to inform the developers." + ) + + +def _extract_sampling_rate(dat): + """Extract the sample rate from the time field.""" + # This is a workaround to provide support for Artinis data. + # It allows for a 1% variation in the sampling times relative + # to the average sampling rate of the file. + MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE = 1.0 + + time_data = np.array(dat.get("nirs/data1/time")) + sampling_rate = 0 + if len(time_data) == 2: + # specified as onset, samplerate + sampling_rate = 1.0 / (time_data[1] - time_data[0]) + else: + # specified as time points + periods = np.diff(time_data) + uniq_periods = np.unique(periods.round(decimals=4)) + if uniq_periods.size == 1: + # Uniformly sampled data + sampling_rate = 1.0 / uniq_periods.item() + else: + # Hopefully uniformly sampled data with some precision issues. + # This is a workaround to provide support for Artinis data. + mean_period = np.mean(periods) + sampling_rate = 1.0 / mean_period + ideal_times = np.linspace(time_data[0], time_data[-1], time_data.size) + max_jitter = np.max(np.abs(time_data - ideal_times)) + percent_jitter = 100.0 * max_jitter / mean_period + msg = ( + f"Found jitter of {percent_jitter:3f}% in sample times. Sampling " + f"rate has been set to {sampling_rate:1f}." + ) + if percent_jitter > MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE: + warn( + f"{msg} Note that MNE-Python does not currently support SNIRF " + "files with non-uniformly-sampled data." + ) + else: + logger.info(msg) + time_unit = _get_metadata_str(dat, "TimeUnit") + time_unit_scaling = _get_timeunit_scaling(time_unit) + sampling_rate *= time_unit_scaling + + return sampling_rate + + +def _get_metadata_str(dat, field): + if field not in np.array(dat.get("nirs/metaDataTags")): + return None + data = dat.get(f"/nirs/metaDataTags/{field}") + data = _correct_shape(np.array(data)) + data = str(data[0], "utf-8") + return data diff --git a/mne/label.py b/mne/label.py new file mode 100644 index 0000000..f681441 --- /dev/null +++ b/mne/label.py @@ -0,0 +1,2984 @@ +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import copy as cp +import os +import os.path as op +import re +from collections import defaultdict +from colorsys import hsv_to_rgb, rgb_to_hsv + +import numpy as np +from scipy import linalg, sparse + +from .fixes import _safe_svd +from .morph_map import read_morph_map +from .parallel import parallel_func +from .source_estimate import ( + SourceEstimate, + VolSourceEstimate, + _center_of_mass, + extract_label_time_course, + spatial_src_adjacency, +) +from .source_space._source_space import ( + SourceSpaces, + _ensure_src, + add_source_space_distances, +) +from .stats.cluster_level import _find_clusters, _get_components +from .surface import ( + _mesh_borders, + complete_surface_info, + fast_cross_3d, + mesh_dist, + mesh_edges, + read_surface, +) +from .utils import ( + _check_fname, + _check_option, + _check_subject, + _validate_type, + check_random_state, + fill_doc, + get_subjects_dir, + logger, + verbose, + warn, +) + + +def _blend_colors(color_1, color_2): + """Blend two colors in HSV space. + + Parameters + ---------- + color_1, color_2 : None | tuple + RGBA tuples with values between 0 and 1. None if no color is available. + If both colors are None, the output is None. If only one is None, the + output is the other color. + + Returns + ------- + color : None | tuple + RGBA tuple of the combined color. Saturation, value and alpha are + averaged, whereas the new hue is determined as angle half way between + the two input colors' hues. + """ + if color_1 is None and color_2 is None: + return None + elif color_1 is None: + return color_2 + elif color_2 is None: + return color_1 + + r_1, g_1, b_1, a_1 = color_1 + h_1, s_1, v_1 = rgb_to_hsv(r_1, g_1, b_1) + r_2, g_2, b_2, a_2 = color_2 + h_2, s_2, v_2 = rgb_to_hsv(r_2, g_2, b_2) + hue_diff = abs(h_1 - h_2) + if hue_diff < 0.5: + h = min(h_1, h_2) + hue_diff / 2.0 + else: + h = max(h_1, h_2) + (1.0 - hue_diff) / 2.0 + h %= 1.0 + s = (s_1 + s_2) / 2.0 + v = (v_1 + v_2) / 2.0 + r, g, b = hsv_to_rgb(h, s, v) + a = (a_1 + a_2) / 2.0 + color = (r, g, b, a) + return color + + +def _split_colors(color, n): + """Create n colors in HSV space that occupy a gradient in value. + + Parameters + ---------- + color : tuple + RGBA tuple with values between 0 and 1. + n : int >= 2 + Number of colors on the gradient. + + Returns + ------- + colors : tuple of tuples, len = n + N RGBA tuples that occupy a gradient in value (low to high) but share + saturation and hue with the input color. + """ + r, g, b, a = color + h, s, v = rgb_to_hsv(r, g, b) + gradient_range = np.sqrt(n / 10.0) + if v > 0.5: + v_max = min(0.95, v + gradient_range / 2) + v_min = max(0.05, v_max - gradient_range) + else: + v_min = max(0.05, v - gradient_range / 2) + v_max = min(0.95, v_min + gradient_range) + + hsv_colors = ((h, s, v_) for v_ in np.linspace(v_min, v_max, n)) + rgb_colors = (hsv_to_rgb(h_, s_, v_) for h_, s_, v_ in hsv_colors) + rgba_colors = ( + ( + r_, + g_, + b_, + a, + ) + for r_, g_, b_ in rgb_colors + ) + return tuple(rgba_colors) + + +def _n_colors(n, bytes_=False, cmap="hsv"): + """Produce a list of n unique RGBA color tuples based on a colormap. + + Parameters + ---------- + n : int + Number of colors. + bytes : bool + Return colors as integers values between 0 and 255 (instead of floats + between 0 and 1). + cmap : str + Which colormap to use. + + Returns + ------- + colors : array, shape (n, 4) + RGBA color values. + """ + n_max = 2**10 + if n > n_max: + raise NotImplementedError(f"Can't produce more than {n_max} unique colors.") + + from .viz.utils import _get_cmap + + cm = _get_cmap(cmap) + pos = np.linspace(0, 1, n, False) + colors = cm(pos, bytes=bytes_) + if bytes_: + # make sure colors are unique + for ii, c in enumerate(colors): + if np.any(np.all(colors[:ii] == c, 1)): + raise RuntimeError( + f"Could not get {n} unique colors from {cmap} " + "colormap. Try using a different colormap." + ) + return colors + + +@fill_doc +class Label: + """A FreeSurfer/MNE label with vertices restricted to one hemisphere. + + Labels can be combined with the ``+`` operator: + + * Duplicate vertices are removed. + * If duplicate vertices have conflicting position values, an error + is raised. + * Values of duplicate vertices are summed. + + Parameters + ---------- + vertices : array, shape (N,) + Vertex indices (0 based). + pos : array, shape (N, 3) | None + Locations in meters. If None, then zeros are used. + values : array, shape (N,) | None + Values at the vertices. If None, then ones are used. + hemi : 'lh' | 'rh' + Hemisphere to which the label applies. + comment : str + Kept as information but not used by the object itself. + name : str + Kept as information but not used by the object itself. + filename : str + Kept as information but not used by the object itself. + %(subject_label)s + color : None | matplotlib color + Default label color and alpha (e.g., ``(1., 0., 0., 1.)`` for red). + %(verbose)s + + Attributes + ---------- + color : None | tuple + Default label color, represented as RGBA tuple with values between 0 + and 1. + comment : str + Comment from the first line of the label file. + hemi : 'lh' | 'rh' + Hemisphere. + name : None | str + A name for the label. It is OK to change that attribute manually. + pos : array, shape (N, 3) + Locations in meters. + subject : str | None + The label subject. + It is best practice to set this to the proper + value on initialization, but it can also be set manually. + values : array, shape (N,) + Values at the vertices. + vertices : array, shape (N,) + Vertex indices (0 based) + """ + + @verbose + def __init__( + self, + vertices=(), + pos=None, + values=None, + hemi=None, + comment="", + name=None, + filename=None, + subject=None, + color=None, + *, + verbose=None, + ): + # check parameters + if not isinstance(hemi, str): + raise ValueError(f"hemi must be a string, not {type(hemi)}") + vertices = np.asarray(vertices, int) + if np.any(np.diff(vertices.astype(int)) <= 0): + raise ValueError("Vertices must be ordered in increasing order.") + + if color is not None: + from matplotlib.colors import colorConverter + + color = colorConverter.to_rgba(color) + + if values is None: + values = np.ones(len(vertices)) + else: + values = np.asarray(values) + + if pos is None: + pos = np.zeros((len(vertices), 3)) + else: + pos = np.asarray(pos) + + if not (len(vertices) == len(values) == len(pos)): + raise ValueError( + "vertices, values and pos need to have same " + "length (number of vertices)" + ) + + # name + if name is None and filename is not None: + name = op.basename(filename[:-6]) + + self.vertices = vertices + self.pos = pos + self.values = values + self.hemi = hemi + self.comment = comment + self.subject = _check_subject(None, subject, raise_error=False) + self.color = color + self.name = name + self.filename = filename + + def __setstate__(self, state): # noqa: D105 + self.vertices = state["vertices"] + self.pos = state["pos"] + self.values = state["values"] + self.hemi = state["hemi"] + self.comment = state["comment"] + self.subject = state.get("subject", None) + self.color = state.get("color", None) + self.name = state["name"] + self.filename = state["filename"] + + def __getstate__(self): # noqa: D105 + out = dict( + vertices=self.vertices, + pos=self.pos, + values=self.values, + hemi=self.hemi, + comment=self.comment, + subject=self.subject, + color=self.color, + name=self.name, + filename=self.filename, + ) + return out + + def __repr__(self): # noqa: D105 + name = "unknown, " if self.subject is None else self.subject + ", " + name += repr(self.name) if self.name is not None else "unnamed" + n_vert = len(self) + return f"