initial commit
This commit is contained in:
9
mne/io/__init__.py
Normal file
9
mne/io/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""IO module for reading raw data."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import lazy_loader as lazy
|
||||
|
||||
(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__)
|
||||
91
mne/io/__init__.pyi
Normal file
91
mne/io/__init__.pyi
Normal file
@@ -0,0 +1,91 @@
|
||||
__all__ = [
|
||||
"BaseRaw",
|
||||
"Raw",
|
||||
"RawArray",
|
||||
"anonymize_info",
|
||||
"concatenate_raws",
|
||||
"constants",
|
||||
"get_channel_type_constants",
|
||||
"match_channel_orders",
|
||||
"pick",
|
||||
"read_epochs_eeglab",
|
||||
"read_epochs_fieldtrip",
|
||||
"read_epochs_kit",
|
||||
"read_evoked_besa",
|
||||
"read_evoked_fieldtrip",
|
||||
"read_evokeds_mff",
|
||||
"read_fiducials",
|
||||
"read_info",
|
||||
"read_raw",
|
||||
"read_raw_ant",
|
||||
"read_raw_artemis123",
|
||||
"read_raw_bdf",
|
||||
"read_raw_boxy",
|
||||
"read_raw_brainvision",
|
||||
"read_raw_bti",
|
||||
"read_raw_cnt",
|
||||
"read_raw_ctf",
|
||||
"read_raw_curry",
|
||||
"read_raw_edf",
|
||||
"read_raw_eeglab",
|
||||
"read_raw_egi",
|
||||
"read_raw_eximia",
|
||||
"read_raw_eyelink",
|
||||
"read_raw_fieldtrip",
|
||||
"read_raw_fif",
|
||||
"read_raw_fil",
|
||||
"read_raw_gdf",
|
||||
"read_raw_hitachi",
|
||||
"read_raw_kit",
|
||||
"read_raw_nedf",
|
||||
"read_raw_neuralynx",
|
||||
"read_raw_nicolet",
|
||||
"read_raw_nihon",
|
||||
"read_raw_nirx",
|
||||
"read_raw_nsx",
|
||||
"read_raw_persyst",
|
||||
"read_raw_snirf",
|
||||
"show_fiff",
|
||||
"write_fiducials",
|
||||
"write_info",
|
||||
]
|
||||
from . import constants, pick
|
||||
from ._fiff_wrap import (
|
||||
anonymize_info,
|
||||
get_channel_type_constants,
|
||||
read_fiducials,
|
||||
read_info,
|
||||
show_fiff,
|
||||
write_fiducials,
|
||||
write_info,
|
||||
)
|
||||
from ._read_raw import read_raw
|
||||
from .ant import read_raw_ant
|
||||
from .array import RawArray
|
||||
from .artemis123 import read_raw_artemis123
|
||||
from .base import BaseRaw, concatenate_raws, match_channel_orders
|
||||
from .besa import read_evoked_besa
|
||||
from .boxy import read_raw_boxy
|
||||
from .brainvision import read_raw_brainvision
|
||||
from .bti import read_raw_bti
|
||||
from .cnt import read_raw_cnt
|
||||
from .ctf import read_raw_ctf
|
||||
from .curry import read_raw_curry
|
||||
from .edf import read_raw_bdf, read_raw_edf, read_raw_gdf
|
||||
from .eeglab import read_epochs_eeglab, read_raw_eeglab
|
||||
from .egi import read_evokeds_mff, read_raw_egi
|
||||
from .eximia import read_raw_eximia
|
||||
from .eyelink import read_raw_eyelink
|
||||
from .fieldtrip import read_epochs_fieldtrip, read_evoked_fieldtrip, read_raw_fieldtrip
|
||||
from .fiff import Raw, read_raw_fif
|
||||
from .fil import read_raw_fil
|
||||
from .hitachi import read_raw_hitachi
|
||||
from .kit import read_epochs_kit, read_raw_kit
|
||||
from .nedf import read_raw_nedf
|
||||
from .neuralynx import read_raw_neuralynx
|
||||
from .nicolet import read_raw_nicolet
|
||||
from .nihon import read_raw_nihon
|
||||
from .nirx import read_raw_nirx
|
||||
from .nsx import read_raw_nsx
|
||||
from .persyst import read_raw_persyst
|
||||
from .snirf import read_raw_snirf
|
||||
20
mne/io/_fiff_wrap.py
Normal file
20
mne/io/_fiff_wrap.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# ruff: noqa: F401
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
# Backward compat since these were in the public API before switching to _fiff
|
||||
# (and _empty_info is convenient to keep here for tests and is private)
|
||||
from .._fiff.meas_info import (
|
||||
Info as _info,
|
||||
)
|
||||
from .._fiff.meas_info import (
|
||||
_empty_info,
|
||||
anonymize_info,
|
||||
read_fiducials,
|
||||
read_info,
|
||||
write_fiducials,
|
||||
write_info,
|
||||
)
|
||||
from .._fiff.open import show_fiff
|
||||
from .._fiff.pick import get_channel_type_constants # moved up a level
|
||||
193
mne/io/_read_raw.py
Normal file
193
mne/io/_read_raw.py
Normal file
@@ -0,0 +1,193 @@
|
||||
"""Generic wrapper function read_raw for specific read_raw_xxx readers."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
from ..utils import fill_doc
|
||||
from .base import BaseRaw
|
||||
|
||||
|
||||
def _read_unsupported(fname, **kwargs):
|
||||
ext = "".join(Path(fname).suffixes)
|
||||
msg = f"Unsupported file type ({ext})."
|
||||
suggest = kwargs.get("suggest")
|
||||
if suggest is not None:
|
||||
msg += f" Try reading a {suggest} file instead."
|
||||
msg += " Consider using a dedicated reader function for more options."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
# supported read file formats
|
||||
def _get_supported():
|
||||
from . import (
|
||||
read_raw_ant,
|
||||
read_raw_artemis123,
|
||||
read_raw_bdf,
|
||||
read_raw_boxy,
|
||||
read_raw_brainvision,
|
||||
read_raw_cnt,
|
||||
read_raw_ctf,
|
||||
read_raw_curry,
|
||||
read_raw_edf,
|
||||
read_raw_eeglab,
|
||||
read_raw_egi,
|
||||
read_raw_eximia,
|
||||
read_raw_eyelink,
|
||||
read_raw_fieldtrip,
|
||||
read_raw_fif,
|
||||
read_raw_fil,
|
||||
read_raw_gdf,
|
||||
read_raw_kit,
|
||||
read_raw_nedf,
|
||||
read_raw_nicolet,
|
||||
read_raw_nihon,
|
||||
read_raw_nirx,
|
||||
read_raw_nsx,
|
||||
read_raw_persyst,
|
||||
read_raw_snirf,
|
||||
)
|
||||
|
||||
return {
|
||||
".edf": dict(EDF=read_raw_edf),
|
||||
".eeg": dict(NihonKoden=read_raw_nihon),
|
||||
".bdf": dict(BDF=read_raw_bdf),
|
||||
".gdf": dict(GDF=read_raw_gdf),
|
||||
".vhdr": dict(brainvision=read_raw_brainvision),
|
||||
".ahdr": dict(brainvision=read_raw_brainvision),
|
||||
".fif": dict(FIF=read_raw_fif),
|
||||
".fif.gz": dict(FIF=read_raw_fif),
|
||||
".set": dict(EEGLAB=read_raw_eeglab),
|
||||
".cnt": dict(CNT=read_raw_cnt, ANT=read_raw_ant),
|
||||
".mff": dict(EGI=read_raw_egi),
|
||||
".nxe": dict(eximia=read_raw_eximia),
|
||||
".hdr": dict(NIRx=read_raw_nirx),
|
||||
".snirf": dict(SNIRF=read_raw_snirf),
|
||||
".mat": dict(fieldtrip=read_raw_fieldtrip),
|
||||
".bin": {
|
||||
"ARTEMIS": read_raw_artemis123,
|
||||
"UCL FIL OPM": read_raw_fil,
|
||||
},
|
||||
".data": dict(Nicolet=read_raw_nicolet),
|
||||
".sqd": dict(KIT=read_raw_kit),
|
||||
".con": dict(KIT=read_raw_kit),
|
||||
".ds": dict(CTF=read_raw_ctf),
|
||||
".txt": dict(BOXY=read_raw_boxy),
|
||||
# Curry
|
||||
".dat": dict(CURRY=read_raw_curry),
|
||||
".dap": dict(CURRY=read_raw_curry),
|
||||
".rs3": dict(CURRY=read_raw_curry),
|
||||
".cdt": dict(CURRY=read_raw_curry),
|
||||
".cdt.dpa": dict(CURRY=read_raw_curry),
|
||||
".cdt.cef": dict(CURRY=read_raw_curry),
|
||||
".cef": dict(CURRY=read_raw_curry),
|
||||
# NEDF
|
||||
".nedf": dict(NEDF=read_raw_nedf),
|
||||
# EyeLink
|
||||
".asc": dict(EyeLink=read_raw_eyelink),
|
||||
".ns3": dict(NSx=read_raw_nsx),
|
||||
".lay": dict(Persyst=read_raw_persyst),
|
||||
}
|
||||
|
||||
|
||||
# known but unsupported file formats
|
||||
_suggested = {
|
||||
".vmrk": dict(brainvision=partial(_read_unsupported, suggest=".vhdr")),
|
||||
".amrk": dict(brainvision=partial(_read_unsupported, suggest=".ahdr")),
|
||||
}
|
||||
|
||||
|
||||
# all known file formats
|
||||
def _get_readers():
|
||||
return {**_get_supported(), **_suggested}
|
||||
|
||||
|
||||
def split_name_ext(fname):
|
||||
"""Return name and supported file extension."""
|
||||
maxsuffixes = max(ext.count(".") for ext in _get_supported())
|
||||
suffixes = Path(fname).suffixes
|
||||
for si in range(-maxsuffixes, 0):
|
||||
ext = "".join(suffixes[si:]).lower()
|
||||
if ext in _get_readers():
|
||||
return Path(fname).name[: -len(ext)], ext
|
||||
return fname, None # unknown file extension
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw(fname, *, preload=False, verbose=None, **kwargs) -> BaseRaw:
|
||||
"""Read raw file.
|
||||
|
||||
This function is a convenient wrapper for readers defined in `mne.io`. The
|
||||
correct reader is automatically selected based on the detected file format.
|
||||
All function arguments are passed to the respective reader.
|
||||
|
||||
The following readers are currently supported:
|
||||
|
||||
* `~mne.io.read_raw_ant`
|
||||
* `~mne.io.read_raw_artemis123`
|
||||
* `~mne.io.read_raw_bdf`
|
||||
* `~mne.io.read_raw_boxy`
|
||||
* `~mne.io.read_raw_brainvision`
|
||||
* `~mne.io.read_raw_cnt`
|
||||
* `~mne.io.read_raw_ctf`
|
||||
* `~mne.io.read_raw_curry`
|
||||
* `~mne.io.read_raw_edf`
|
||||
* `~mne.io.read_raw_eeglab`
|
||||
* `~mne.io.read_raw_egi`
|
||||
* `~mne.io.read_raw_eximia`
|
||||
* `~mne.io.read_raw_eyelink`
|
||||
* `~mne.io.read_raw_fieldtrip`
|
||||
* `~mne.io.read_raw_fif`
|
||||
* `~mne.io.read_raw_fil`
|
||||
* `~mne.io.read_raw_gdf`
|
||||
* `~mne.io.read_raw_kit`
|
||||
* `~mne.io.read_raw_nedf`
|
||||
* `~mne.io.read_raw_nicolet`
|
||||
* `~mne.io.read_raw_nihon`
|
||||
* `~mne.io.read_raw_nirx`
|
||||
* `~mne.io.read_raw_nsx`
|
||||
* `~mne.io.read_raw_persyst`
|
||||
* `~mne.io.read_raw_snirf`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Name of the file to read.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
**kwargs
|
||||
Additional keyword arguments to pass to the underlying reader. For
|
||||
details, see the arguments of the reader for the respective file
|
||||
format.
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : mne.io.Raw
|
||||
Raw object.
|
||||
"""
|
||||
_, ext = split_name_ext(fname)
|
||||
kwargs["verbose"] = verbose
|
||||
kwargs["preload"] = preload
|
||||
readers = _get_readers()
|
||||
if ext not in readers:
|
||||
_read_unsupported(fname)
|
||||
these_readers = list(readers[ext].values())
|
||||
for reader in these_readers:
|
||||
try:
|
||||
return reader(fname, **kwargs)
|
||||
except Exception:
|
||||
if len(these_readers) == 1:
|
||||
raise
|
||||
else:
|
||||
choices = "\n".join(
|
||||
f"mne.io.{func.__name__.ljust(20)} ({kind})"
|
||||
for kind, func in readers[ext].items()
|
||||
)
|
||||
raise RuntimeError(
|
||||
"Could not read file using any of the possible readers for "
|
||||
f"extension {ext}. Consider trying to read the file directly with "
|
||||
f"one of:\n{choices}"
|
||||
)
|
||||
5
mne/io/ant/__init__.py
Normal file
5
mne/io/ant/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .ant import read_raw_ant
|
||||
338
mne/io/ant/ant.py
Normal file
338
mne/io/ant/ant.py
Normal file
@@ -0,0 +1,338 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ...annotations import Annotations
|
||||
from ...utils import (
|
||||
_check_fname,
|
||||
_soft_import,
|
||||
_validate_type,
|
||||
copy_doc,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
warn,
|
||||
)
|
||||
from ..base import BaseRaw
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pathlib import Path
|
||||
|
||||
from numpy.typing import NDArray
|
||||
|
||||
_UNITS: dict[str, float] = {"uv": 1e-6, "µv": 1e-6}
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawANT(BaseRaw):
|
||||
r"""Reader for Raw ANT files in .cnt format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : file-like
|
||||
Path to the ANT raw file to load. The file should have the extension ``.cnt``.
|
||||
eog : str | None
|
||||
Regex pattern to find EOG channel labels. If None, no EOG channels are
|
||||
automatically detected.
|
||||
misc : str | None
|
||||
Regex pattern to find miscellaneous channels. If None, no miscellaneous channels
|
||||
are automatically detected. The default pattern ``"BIP\d+"`` will mark all
|
||||
bipolar channels as ``misc``.
|
||||
|
||||
.. note::
|
||||
|
||||
A bipolar channel might actually contain ECG, EOG or other signal types
|
||||
which might have a dedicated channel type in MNE-Python. In this case, use
|
||||
:meth:`mne.io.Raw.set_channel_types` to change the channel type of the
|
||||
channel.
|
||||
bipolars : list of str | tuple of str | None
|
||||
The list of channels to treat as bipolar EEG channels. Each element should be
|
||||
a string of the form ``'anode-cathode'`` or in ANT terminology as ``'label-
|
||||
reference'``. If None, all channels are interpreted as ``'eeg'`` channels
|
||||
referenced to the same reference electrode. Bipolar channels are treated
|
||||
as EEG channels with a special coil type in MNE-Python, see also
|
||||
:func:`mne.set_bipolar_reference`
|
||||
|
||||
.. warning::
|
||||
|
||||
Do not provide auxiliary channels in this argument, provide them in the
|
||||
``eog`` and ``misc`` arguments.
|
||||
impedance_annotation : str
|
||||
The string to use for impedance annotations. Defaults to ``"impedance"``,
|
||||
however, the impedance measurement might mark the end of a segment and the
|
||||
beginning of a new segment, in which case a discontinuity similar to what
|
||||
:func:`mne.concatenate_raws` produces is present. In this case, it's better to
|
||||
include a ``BAD_xxx`` annotation to mark the discontinuity.
|
||||
|
||||
.. note::
|
||||
|
||||
Note that the impedance annotation will likely have a duration of ``0``.
|
||||
If the measurement marks a discontinuity, the duration should be modified to
|
||||
cover the discontinuity in its entirety.
|
||||
encoding : str
|
||||
Encoding to use for :class:`str` in the CNT file. Defaults to ``'latin-1'``.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
fname: str | Path,
|
||||
eog: str | None,
|
||||
misc: str | None,
|
||||
bipolars: list[str] | tuple[str, ...] | None,
|
||||
impedance_annotation: str,
|
||||
*,
|
||||
encoding: str = "latin-1",
|
||||
preload: bool | NDArray,
|
||||
verbose=None,
|
||||
) -> None:
|
||||
logger.info("Reading ANT file %s", fname)
|
||||
_soft_import("antio", "reading ANT files", min_version="0.5.0")
|
||||
|
||||
from antio import read_cnt
|
||||
from antio.parser import (
|
||||
read_device_info,
|
||||
read_info,
|
||||
read_meas_date,
|
||||
read_subject_info,
|
||||
read_triggers,
|
||||
)
|
||||
|
||||
fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname")
|
||||
_validate_type(eog, (str, None), "eog")
|
||||
_validate_type(misc, (str, None), "misc")
|
||||
_validate_type(bipolars, (list, tuple, None), "bipolar")
|
||||
_validate_type(impedance_annotation, (str,), "impedance_annotation")
|
||||
if len(impedance_annotation) == 0:
|
||||
raise ValueError("The impedance annotation cannot be an empty string.")
|
||||
cnt = read_cnt(fname)
|
||||
# parse channels, sampling frequency, and create info
|
||||
ch_names, ch_units, ch_refs, _, _ = read_info(cnt, encoding=encoding)
|
||||
ch_types = _parse_ch_types(ch_names, eog, misc, ch_refs)
|
||||
if bipolars is not None: # handle bipolar channels
|
||||
bipolars_idx = _handle_bipolar_channels(ch_names, ch_refs, bipolars)
|
||||
for idx, ch in zip(bipolars_idx, bipolars):
|
||||
if ch_types[idx] != "eeg":
|
||||
warn(
|
||||
f"Channel {ch} was not parsed as an EEG channel, changing to "
|
||||
"EEG channel type since bipolar EEG was requested."
|
||||
)
|
||||
ch_names[idx] = ch
|
||||
ch_types[idx] = "eeg"
|
||||
info = create_info(
|
||||
ch_names, sfreq=cnt.get_sample_frequency(), ch_types=ch_types
|
||||
)
|
||||
info.set_meas_date(read_meas_date(cnt))
|
||||
make, model, serial, site = read_device_info(cnt, encoding=encoding)
|
||||
info["device_info"] = dict(type=make, model=model, serial=serial, site=site)
|
||||
his_id, name, sex, birthday = read_subject_info(cnt, encoding=encoding)
|
||||
info["subject_info"] = dict(
|
||||
his_id=his_id,
|
||||
first_name=name,
|
||||
sex=sex,
|
||||
)
|
||||
if birthday is not None:
|
||||
info["subject_info"]["birthday"] = birthday
|
||||
if bipolars is not None:
|
||||
with info._unlock():
|
||||
for idx in bipolars_idx:
|
||||
info["chs"][idx]["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR
|
||||
first_samps = np.array((0,))
|
||||
last_samps = (cnt.get_sample_count() - 1,)
|
||||
raw_extras = {
|
||||
"orig_nchan": cnt.get_channel_count(),
|
||||
"orig_ch_units": ch_units,
|
||||
"first_samples": np.array(first_samps),
|
||||
"last_samples": np.array(last_samps),
|
||||
}
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
first_samps=first_samps,
|
||||
last_samps=last_samps,
|
||||
filenames=[fname],
|
||||
verbose=verbose,
|
||||
raw_extras=[raw_extras],
|
||||
)
|
||||
# look for annotations (called trigger by ant)
|
||||
onsets, durations, descriptions, _, disconnect = read_triggers(cnt)
|
||||
onsets, durations, descriptions = _prepare_annotations(
|
||||
onsets, durations, descriptions, disconnect, impedance_annotation
|
||||
)
|
||||
onsets = np.array(onsets) / self.info["sfreq"]
|
||||
durations = np.array(durations) / self.info["sfreq"]
|
||||
annotations = Annotations(onsets, duration=durations, description=descriptions)
|
||||
self.set_annotations(annotations)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
from antio import read_cnt
|
||||
from antio.parser import read_data
|
||||
|
||||
ch_units = self._raw_extras[0]["orig_ch_units"]
|
||||
first_samples = self._raw_extras[0]["first_samples"]
|
||||
n_times = self._raw_extras[0]["last_samples"] + 1
|
||||
for first_samp, this_n_times in zip(first_samples, n_times):
|
||||
i_start = max(start, first_samp)
|
||||
i_stop = min(stop, this_n_times + first_samp)
|
||||
# read and scale data array
|
||||
cnt = read_cnt(self.filenames[fi])
|
||||
one = read_data(cnt, i_start, i_stop)
|
||||
_scale_data(one, ch_units)
|
||||
data_view = data[:, i_start - start : i_stop - start]
|
||||
if isinstance(idx, slice):
|
||||
data_view[:] = one[idx]
|
||||
else:
|
||||
# faster than doing one = one[idx]
|
||||
np.take(one, idx, axis=0, out=data_view)
|
||||
|
||||
|
||||
def _handle_bipolar_channels(
|
||||
ch_names: list[str], ch_refs: list[str], bipolars: list[str] | tuple[str, ...]
|
||||
) -> list[int]:
|
||||
"""Handle bipolar channels."""
|
||||
bipolars_idx = []
|
||||
for ch in bipolars:
|
||||
_validate_type(ch, (str,), "bipolar_channel")
|
||||
if "-" not in ch:
|
||||
raise ValueError(
|
||||
"Bipolar channels should be provided as 'anode-cathode' or "
|
||||
f"'label-reference'. '{ch}' is not valid."
|
||||
)
|
||||
anode, cathode = ch.split("-")
|
||||
if anode not in ch_names:
|
||||
raise ValueError(f"Anode channel {anode} not found in the channels.")
|
||||
idx = ch_names.index(anode)
|
||||
if cathode != ch_refs[idx]:
|
||||
raise ValueError(
|
||||
f"Reference electrode for {anode} is {ch_refs[idx]}, not {cathode}."
|
||||
)
|
||||
# store idx for later FIFF coil type change
|
||||
bipolars_idx.append(idx)
|
||||
return bipolars_idx
|
||||
|
||||
|
||||
def _parse_ch_types(
|
||||
ch_names: list[str], eog: str | None, misc: str | None, ch_refs: list[str]
|
||||
) -> list[str]:
|
||||
"""Parse the channel types."""
|
||||
eog = re.compile(eog) if eog is not None else None
|
||||
misc = re.compile(misc) if misc is not None else None
|
||||
ch_types = []
|
||||
for ch in ch_names:
|
||||
if eog is not None and re.fullmatch(eog, ch):
|
||||
ch_types.append("eog")
|
||||
elif misc is not None and re.fullmatch(misc, ch):
|
||||
ch_types.append("misc")
|
||||
else:
|
||||
ch_types.append("eeg")
|
||||
eeg_refs = [ch_refs[k] for k, elt in enumerate(ch_types) if elt == "eeg"]
|
||||
if len(set(eeg_refs)) == 1:
|
||||
logger.info(
|
||||
"All %i EEG channels are referenced to %s.", len(eeg_refs), eeg_refs[0]
|
||||
)
|
||||
else:
|
||||
warn("All EEG channels are not referenced to the same electrode.")
|
||||
return ch_types
|
||||
|
||||
|
||||
def _prepare_annotations(
|
||||
onsets: list[int],
|
||||
durations: list[int],
|
||||
descriptions: list[str],
|
||||
disconnect: dict[str, list[int]],
|
||||
impedance_annotation: str,
|
||||
) -> tuple[list[int], list[int], list[str]]:
|
||||
"""Parse the ANT triggers into better Annotations."""
|
||||
# first, let's replace the description 'impedance' with impedance_annotation
|
||||
for k, desc in enumerate(descriptions):
|
||||
if desc.lower() == "impedance":
|
||||
descriptions[k] = impedance_annotation
|
||||
# next, let's look for amplifier connection/disconnection and let's try to create
|
||||
# BAD_disconnection annotations from them.
|
||||
if (
|
||||
len(disconnect["start"]) == len(disconnect["stop"])
|
||||
and len(disconnect["start"]) != 0
|
||||
and all(
|
||||
0 <= stop - start
|
||||
for start, stop in zip(disconnect["start"], disconnect["stop"])
|
||||
)
|
||||
):
|
||||
for start, stop in zip(disconnect["start"], disconnect["stop"]):
|
||||
onsets.append(start)
|
||||
durations.append(stop - start)
|
||||
descriptions.append("BAD_disconnection")
|
||||
else:
|
||||
for elt in disconnect["start"]:
|
||||
onsets.append(elt)
|
||||
durations.append(0)
|
||||
descriptions.append("Amplifier disconnected")
|
||||
for elt in disconnect["stop"]:
|
||||
onsets.append(elt)
|
||||
durations.append(0)
|
||||
descriptions.append("Amplifier reconnected")
|
||||
return onsets, durations, descriptions
|
||||
|
||||
|
||||
def _scale_data(data: NDArray[np.float64], ch_units: list[str]) -> None:
|
||||
"""Scale the data array based on the human-readable units reported by ANT.
|
||||
|
||||
Operates in-place.
|
||||
"""
|
||||
units_index = defaultdict(list)
|
||||
for idx, unit in enumerate(ch_units):
|
||||
units_index[unit].append(idx)
|
||||
for unit, value in units_index.items():
|
||||
if unit in _UNITS:
|
||||
data[np.array(value, dtype=np.int16), :] *= _UNITS[unit]
|
||||
else:
|
||||
warn(
|
||||
f"Unit {unit} not recognized, not scaling. Please report the unit on "
|
||||
"a github issue on https://github.com/mne-tools/mne-python."
|
||||
)
|
||||
|
||||
|
||||
@copy_doc(RawANT)
|
||||
def read_raw_ant(
|
||||
fname,
|
||||
eog=None,
|
||||
misc=r"BIP\d+",
|
||||
bipolars=None,
|
||||
impedance_annotation="impedance",
|
||||
*,
|
||||
encoding: str = "latin-1",
|
||||
preload=False,
|
||||
verbose=None,
|
||||
) -> RawANT:
|
||||
"""
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawANT
|
||||
A Raw object containing ANT data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 1.9
|
||||
"""
|
||||
return RawANT(
|
||||
fname,
|
||||
eog=eog,
|
||||
misc=misc,
|
||||
bipolars=bipolars,
|
||||
impedance_annotation=impedance_annotation,
|
||||
encoding=encoding,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
)
|
||||
7
mne/io/array/__init__.py
Normal file
7
mne/io/array/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Module to convert user data to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .array import RawArray
|
||||
96
mne/io/array/array.py
Normal file
96
mne/io/array/array.py
Normal file
@@ -0,0 +1,96 @@
|
||||
"""Tools for creating Raw objects from numpy arrays."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import _check_option, _validate_type, fill_doc, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawArray(BaseRaw):
|
||||
"""Raw object from numpy array.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
data : array, shape (n_channels, n_times)
|
||||
The channels' time series. See notes for proper units of measure.
|
||||
%(info_not_none)s Consider using :func:`mne.create_info` to populate
|
||||
this structure. This may be modified in place by the class.
|
||||
first_samp : int
|
||||
First sample offset used during recording (default 0).
|
||||
|
||||
.. versionadded:: 0.12
|
||||
copy : {'data', 'info', 'both', 'auto', None}
|
||||
Determines what gets copied on instantiation. "auto" (default)
|
||||
will copy info, and copy "data" only if necessary to get to
|
||||
double floating point precision.
|
||||
|
||||
.. versionadded:: 0.18
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.EpochsArray
|
||||
mne.EvokedArray
|
||||
mne.create_info
|
||||
|
||||
Notes
|
||||
-----
|
||||
Proper units of measure:
|
||||
|
||||
* V: eeg, eog, seeg, dbs, emg, ecg, bio, ecog
|
||||
* T: mag
|
||||
* T/m: grad
|
||||
* M: hbo, hbr
|
||||
* Am: dipole
|
||||
* AU: misc
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, data, info, first_samp=0, copy="auto", verbose=None):
|
||||
_validate_type(info, "info", "info")
|
||||
_check_option("copy", copy, ("data", "info", "both", "auto", None))
|
||||
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
|
||||
orig_data = data
|
||||
data = np.asanyarray(orig_data, dtype=dtype)
|
||||
if data.ndim != 2:
|
||||
raise ValueError(
|
||||
"Data must be a 2D array of shape (n_channels, n_samples), got shape "
|
||||
f"{data.shape}"
|
||||
)
|
||||
if len(data) != len(info["ch_names"]):
|
||||
raise ValueError(
|
||||
'len(data) ({}) does not match len(info["ch_names"]) ({})'.format(
|
||||
len(data), len(info["ch_names"])
|
||||
)
|
||||
)
|
||||
assert len(info["ch_names"]) == info["nchan"]
|
||||
if copy in ("auto", "info", "both"):
|
||||
info = info.copy()
|
||||
if copy in ("data", "both"):
|
||||
if data is orig_data:
|
||||
data = data.copy()
|
||||
elif copy != "auto" and data is not orig_data:
|
||||
raise ValueError(
|
||||
f"data copying was not requested by copy={copy!r} but it was required "
|
||||
"to get to double floating point precision"
|
||||
)
|
||||
logger.info(
|
||||
f"Creating RawArray with {dtype.__name__} data, "
|
||||
f"n_channels={data.shape[0]}, n_times={data.shape[1]}"
|
||||
)
|
||||
super().__init__(
|
||||
info, data, first_samps=(int(first_samp),), dtype=dtype, verbose=verbose
|
||||
)
|
||||
logger.info(
|
||||
" Range : %d ... %d = %9.3f ... %9.3f secs",
|
||||
self.first_samp,
|
||||
self.last_samp,
|
||||
float(self.first_samp) / info["sfreq"],
|
||||
float(self.last_samp) / info["sfreq"],
|
||||
)
|
||||
logger.info("Ready.")
|
||||
7
mne/io/artemis123/__init__.py
Normal file
7
mne/io/artemis123/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""artemis123 module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .artemis123 import read_raw_artemis123
|
||||
530
mne/io/artemis123/artemis123.py
Normal file
530
mne/io/artemis123/artemis123.py
Normal file
@@ -0,0 +1,530 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
import os.path as op
|
||||
|
||||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
from ..._fiff._digitization import DigPoint, _make_dig_points
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _read_segments_file
|
||||
from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans
|
||||
from ...utils import _check_fname, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from .utils import _load_mne_locs, _read_pos
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_artemis123(
|
||||
input_fname, preload=False, verbose=None, pos_fname=None, add_head_trans=True
|
||||
) -> "RawArtemis123":
|
||||
"""Read Artemis123 data as raw object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the data file (extension ``.bin``). The header file with the
|
||||
same file name stem and an extension ``.txt`` is expected to be found
|
||||
in the same directory.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
pos_fname : path-like | None
|
||||
If not None, load digitized head points from this file.
|
||||
add_head_trans : bool (default True)
|
||||
If True attempt to perform initial head localization. Compute initial
|
||||
device to head coordinate transform using HPI coils. If no
|
||||
HPI coils are in info['dig'] hpi coils are assumed to be in canonical
|
||||
order of fiducial points (nas, rpa, lpa).
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of Raw
|
||||
A Raw object containing the data.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
return RawArtemis123(
|
||||
input_fname,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
pos_fname=pos_fname,
|
||||
add_head_trans=add_head_trans,
|
||||
)
|
||||
|
||||
|
||||
def _get_artemis123_info(fname, pos_fname=None):
|
||||
"""Generate info struct from artemis123 header file."""
|
||||
fname = op.splitext(fname)[0]
|
||||
header = fname + ".txt"
|
||||
|
||||
logger.info("Reading header...")
|
||||
|
||||
# key names for artemis channel info...
|
||||
chan_keys = [
|
||||
"name",
|
||||
"scaling",
|
||||
"FLL_Gain",
|
||||
"FLL_Mode",
|
||||
"FLL_HighPass",
|
||||
"FLL_AutoReset",
|
||||
"FLL_ResetLock",
|
||||
]
|
||||
|
||||
header_info = dict()
|
||||
header_info["filter_hist"] = []
|
||||
header_info["comments"] = ""
|
||||
header_info["channels"] = []
|
||||
|
||||
with open(header) as fid:
|
||||
# section flag
|
||||
# 0 - None
|
||||
# 1 - main header
|
||||
# 2 - channel header
|
||||
# 3 - comments
|
||||
# 4 - length
|
||||
# 5 - filtering History
|
||||
sectionFlag = 0
|
||||
for line in fid:
|
||||
# skip emptylines or header line for channel info
|
||||
if (not line.strip()) or (sectionFlag == 2 and line.startswith("DAQ Map")):
|
||||
continue
|
||||
|
||||
# set sectionFlag
|
||||
if line.startswith("<end"):
|
||||
sectionFlag = 0
|
||||
elif line.startswith("<start main header>"):
|
||||
sectionFlag = 1
|
||||
elif line.startswith("<start per channel header>"):
|
||||
sectionFlag = 2
|
||||
elif line.startswith("<start comments>"):
|
||||
sectionFlag = 3
|
||||
elif line.startswith("<start length>"):
|
||||
sectionFlag = 4
|
||||
elif line.startswith("<start filtering history>"):
|
||||
sectionFlag = 5
|
||||
else:
|
||||
# parse header info lines
|
||||
# part of main header - lines are name value pairs
|
||||
if sectionFlag == 1:
|
||||
values = line.strip().split("\t")
|
||||
if len(values) == 1:
|
||||
values.append("")
|
||||
header_info[values[0]] = values[1]
|
||||
# part of channel header - lines are Channel Info
|
||||
elif sectionFlag == 2:
|
||||
values = line.strip().split("\t")
|
||||
if len(values) != 7:
|
||||
raise OSError(
|
||||
f"Error parsing line \n\t:{line}\nfrom file {header}"
|
||||
)
|
||||
tmp = dict()
|
||||
for k, v in zip(chan_keys, values):
|
||||
tmp[k] = v
|
||||
header_info["channels"].append(tmp)
|
||||
elif sectionFlag == 3:
|
||||
header_info["comments"] = f"{header_info['comments']}{line.strip()}"
|
||||
elif sectionFlag == 4:
|
||||
header_info["num_samples"] = int(line.strip())
|
||||
elif sectionFlag == 5:
|
||||
header_info["filter_hist"].append(line.strip())
|
||||
|
||||
for k in [
|
||||
"Temporal Filter Active?",
|
||||
"Decimation Active?",
|
||||
"Spatial Filter Active?",
|
||||
]:
|
||||
if header_info[k] != "FALSE":
|
||||
warn(f"{k} - set to but is not supported")
|
||||
if header_info["filter_hist"]:
|
||||
warn("Non-Empty Filter history found, BUT is not supported")
|
||||
|
||||
# build mne info struct
|
||||
info = _empty_info(float(header_info["DAQ Sample Rate"]))
|
||||
|
||||
# Attempt to get time/date from fname
|
||||
# Artemis123 files saved from the scanner observe the following
|
||||
# naming convention 'Artemis_Data_YYYY-MM-DD-HHh-MMm_[chosen by user].bin'
|
||||
try:
|
||||
date = datetime.datetime.strptime(
|
||||
op.basename(fname).split("_")[2], "%Y-%m-%d-%Hh-%Mm"
|
||||
)
|
||||
meas_date = (calendar.timegm(date.utctimetuple()), 0)
|
||||
except Exception:
|
||||
meas_date = None
|
||||
|
||||
# build subject info must be an integer (as per FIFF)
|
||||
try:
|
||||
subject_info = {"id": int(header_info["Subject ID"])}
|
||||
except ValueError:
|
||||
subject_info = {"id": 0}
|
||||
|
||||
# build description
|
||||
desc = ""
|
||||
for k in ["Purpose", "Notes"]:
|
||||
desc += f"{k} : {header_info[k]}\n"
|
||||
desc += f"Comments : {header_info['comments']}"
|
||||
|
||||
info.update(
|
||||
{
|
||||
"meas_date": meas_date,
|
||||
"description": desc,
|
||||
"subject_info": subject_info,
|
||||
"proj_name": header_info["Project Name"],
|
||||
}
|
||||
)
|
||||
|
||||
# Channel Names by type
|
||||
ref_mag_names = ["REF_001", "REF_002", "REF_003", "REF_004", "REF_005", "REF_006"]
|
||||
|
||||
ref_grad_names = ["REF_007", "REF_008", "REF_009", "REF_010", "REF_011", "REF_012"]
|
||||
|
||||
# load mne loc dictionary
|
||||
loc_dict = _load_mne_locs()
|
||||
info["chs"] = []
|
||||
bads = []
|
||||
|
||||
for i, chan in enumerate(header_info["channels"]):
|
||||
# build chs struct
|
||||
t = {
|
||||
"cal": float(chan["scaling"]),
|
||||
"ch_name": chan["name"],
|
||||
"logno": i + 1,
|
||||
"scanno": i + 1,
|
||||
"range": 1.0,
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"coord_frame": FIFF.FIFFV_COORD_DEVICE,
|
||||
}
|
||||
# REF_018 has a zero cal which can cause problems. Let's set it to
|
||||
# a value of another ref channel to make writers/readers happy.
|
||||
if t["cal"] == 0:
|
||||
t["cal"] = 4.716e-10
|
||||
bads.append(t["ch_name"])
|
||||
t["loc"] = loc_dict.get(chan["name"], np.zeros(12))
|
||||
|
||||
if chan["name"].startswith("MEG"):
|
||||
t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_GRAD
|
||||
t["kind"] = FIFF.FIFFV_MEG_CH
|
||||
# While gradiometer units are T/m, the meg sensors referred to as
|
||||
# gradiometers report the field difference between 2 pick-up coils.
|
||||
# Therefore the units of the measurements should be T
|
||||
# *AND* the baseline (difference between pickup coils)
|
||||
# should not be used in leadfield / forwardfield computations.
|
||||
t["unit"] = FIFF.FIFF_UNIT_T
|
||||
t["unit_mul"] = FIFF.FIFF_UNITM_F
|
||||
|
||||
# 3 axis reference magnetometers
|
||||
elif chan["name"] in ref_mag_names:
|
||||
t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_MAG
|
||||
t["kind"] = FIFF.FIFFV_REF_MEG_CH
|
||||
t["unit"] = FIFF.FIFF_UNIT_T
|
||||
t["unit_mul"] = FIFF.FIFF_UNITM_F
|
||||
|
||||
# reference gradiometers
|
||||
elif chan["name"] in ref_grad_names:
|
||||
t["coil_type"] = FIFF.FIFFV_COIL_ARTEMIS123_REF_GRAD
|
||||
t["kind"] = FIFF.FIFFV_REF_MEG_CH
|
||||
# While gradiometer units are T/m, the meg sensors referred to as
|
||||
# gradiometers report the field difference between 2 pick-up coils.
|
||||
# Therefore the units of the measurements should be T
|
||||
# *AND* the baseline (difference between pickup coils)
|
||||
# should not be used in leadfield / forwardfield computations.
|
||||
t["unit"] = FIFF.FIFF_UNIT_T
|
||||
t["unit_mul"] = FIFF.FIFF_UNITM_F
|
||||
|
||||
# other reference channels are unplugged and should be ignored.
|
||||
elif chan["name"].startswith("REF"):
|
||||
t["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
t["kind"] = FIFF.FIFFV_MISC_CH
|
||||
t["unit"] = FIFF.FIFF_UNIT_V
|
||||
bads.append(t["ch_name"])
|
||||
|
||||
elif chan["name"].startswith(("AUX", "TRG", "MIO")):
|
||||
t["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
t["unit"] = FIFF.FIFF_UNIT_V
|
||||
if chan["name"].startswith("TRG"):
|
||||
t["kind"] = FIFF.FIFFV_STIM_CH
|
||||
else:
|
||||
t["kind"] = FIFF.FIFFV_MISC_CH
|
||||
else:
|
||||
raise ValueError(
|
||||
f'Channel does not match expected channel Types:"{chan["name"]}"'
|
||||
)
|
||||
|
||||
# incorporate multiplier (unit_mul) into calibration
|
||||
t["cal"] *= 10 ** t["unit_mul"]
|
||||
t["unit_mul"] = FIFF.FIFF_UNITM_NONE
|
||||
|
||||
# append this channel to the info
|
||||
info["chs"].append(t)
|
||||
if chan["FLL_ResetLock"] == "TRUE":
|
||||
bads.append(t["ch_name"])
|
||||
|
||||
# HPI information
|
||||
# print header_info.keys()
|
||||
hpi_sub = dict()
|
||||
# Don't know what event_channel is don't think we have it HPIs are either
|
||||
# always on or always off.
|
||||
# hpi_sub['event_channel'] = ???
|
||||
hpi_sub["hpi_coils"] = [dict(), dict(), dict(), dict()]
|
||||
hpi_coils = [dict(), dict(), dict(), dict()]
|
||||
drive_channels = ["MIO_001", "MIO_003", "MIO_009", "MIO_011"]
|
||||
key_base = "Head Tracking %s %d"
|
||||
|
||||
# set default HPI frequencies
|
||||
if info["sfreq"] == 1000:
|
||||
default_freqs = [140, 150, 160, 40]
|
||||
else:
|
||||
default_freqs = [700, 750, 800, 40]
|
||||
|
||||
for i in range(4):
|
||||
# build coil structure
|
||||
hpi_coils[i]["number"] = i + 1
|
||||
hpi_coils[i]["drive_chan"] = drive_channels[i]
|
||||
this_freq = header_info.pop(key_base % ("Frequency", i + 1), default_freqs[i])
|
||||
hpi_coils[i]["coil_freq"] = this_freq
|
||||
|
||||
# check if coil is on
|
||||
if header_info[key_base % ("Channel", i + 1)] == "OFF":
|
||||
hpi_sub["hpi_coils"][i]["event_bits"] = [0]
|
||||
else:
|
||||
hpi_sub["hpi_coils"][i]["event_bits"] = [256]
|
||||
|
||||
info["hpi_subsystem"] = hpi_sub
|
||||
info["hpi_meas"] = [{"hpi_coils": hpi_coils}]
|
||||
# read in digitized points if supplied
|
||||
if pos_fname is not None:
|
||||
info["dig"] = _read_pos(pos_fname)
|
||||
else:
|
||||
info["dig"] = []
|
||||
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
# reduce info['bads'] to unique set
|
||||
info["bads"] = list(set(bads))
|
||||
del bads
|
||||
return info, header_info
|
||||
|
||||
|
||||
class RawArtemis123(BaseRaw):
|
||||
"""Raw object from Artemis123 file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the Artemis123 data file (ending in ``'.bin'``).
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
preload=False,
|
||||
verbose=None,
|
||||
pos_fname=None,
|
||||
add_head_trans=True,
|
||||
):
|
||||
from ...chpi import (
|
||||
_fit_coil_order_dev_head_trans,
|
||||
compute_chpi_amplitudes,
|
||||
compute_chpi_locs,
|
||||
)
|
||||
|
||||
input_fname = str(_check_fname(input_fname, "read", True, "input_fname"))
|
||||
fname, ext = op.splitext(input_fname)
|
||||
if ext == ".txt":
|
||||
input_fname = fname + ".bin"
|
||||
elif ext != ".bin":
|
||||
raise RuntimeError(
|
||||
'Valid artemis123 files must end in "txt"' + ' or ".bin".'
|
||||
)
|
||||
|
||||
if not op.exists(input_fname):
|
||||
raise RuntimeError(f"{input_fname} - Not Found")
|
||||
|
||||
info, header_info = _get_artemis123_info(input_fname, pos_fname=pos_fname)
|
||||
|
||||
last_samps = [header_info.get("num_samples", 1) - 1]
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[input_fname],
|
||||
raw_extras=[header_info],
|
||||
last_samps=last_samps,
|
||||
orig_format="single",
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
if add_head_trans:
|
||||
n_hpis = 0
|
||||
for d in info["hpi_subsystem"]["hpi_coils"]:
|
||||
if d["event_bits"] == [256]:
|
||||
n_hpis += 1
|
||||
if n_hpis < 3:
|
||||
warn(
|
||||
f"{n_hpis:d} HPIs active. At least 3 needed to perform"
|
||||
"head localization\n *NO* head localization performed"
|
||||
)
|
||||
else:
|
||||
# Localized HPIs using the 1st 250 milliseconds of data.
|
||||
with info._unlock():
|
||||
info["hpi_results"] = [
|
||||
dict(
|
||||
dig_points=[
|
||||
dict(
|
||||
r=np.zeros(3),
|
||||
coord_frame=FIFF.FIFFV_COORD_DEVICE,
|
||||
ident=ii + 1,
|
||||
)
|
||||
for ii in range(n_hpis)
|
||||
],
|
||||
coord_trans=Transform("meg", "head"),
|
||||
)
|
||||
]
|
||||
coil_amplitudes = compute_chpi_amplitudes(
|
||||
self, tmin=0, tmax=0.25, t_window=0.25, t_step_min=0.25
|
||||
)
|
||||
assert len(coil_amplitudes["times"]) == 1
|
||||
coil_locs = compute_chpi_locs(self.info, coil_amplitudes)
|
||||
with info._unlock():
|
||||
info["hpi_results"] = None
|
||||
hpi_g = coil_locs["gofs"][0]
|
||||
hpi_dev = coil_locs["rrs"][0]
|
||||
|
||||
# only use HPI coils with localizaton goodness_of_fit > 0.98
|
||||
bad_idx = []
|
||||
for i, g in enumerate(hpi_g):
|
||||
msg = f"HPI coil {i + 1} - location goodness of fit ({g:0.3f})"
|
||||
if g < 0.98:
|
||||
bad_idx.append(i)
|
||||
msg += " *Removed from coregistration*"
|
||||
logger.info(msg)
|
||||
hpi_dev = np.delete(hpi_dev, bad_idx, axis=0)
|
||||
hpi_g = np.delete(hpi_g, bad_idx, axis=0)
|
||||
|
||||
if pos_fname is not None:
|
||||
# Digitized HPI points are needed.
|
||||
hpi_head = np.array(
|
||||
[
|
||||
d["r"]
|
||||
for d in self.info.get("dig", [])
|
||||
if d["kind"] == FIFF.FIFFV_POINT_HPI
|
||||
]
|
||||
)
|
||||
|
||||
if len(hpi_head) != len(hpi_dev):
|
||||
raise RuntimeError(
|
||||
f"number of digitized ({len(hpi_head)}) and active "
|
||||
f"({len(hpi_dev)}) HPI coils are not the same."
|
||||
)
|
||||
|
||||
# compute initial head to dev transform and hpi ordering
|
||||
head_to_dev_t, order, trans_g = _fit_coil_order_dev_head_trans(
|
||||
hpi_dev, hpi_head
|
||||
)
|
||||
|
||||
# set the device to head transform
|
||||
self.info["dev_head_t"] = Transform(
|
||||
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, head_to_dev_t
|
||||
)
|
||||
|
||||
# add hpi_meg_dev to dig...
|
||||
for idx, point in enumerate(hpi_dev):
|
||||
d = {
|
||||
"r": point,
|
||||
"ident": idx + 1,
|
||||
"kind": FIFF.FIFFV_POINT_HPI,
|
||||
"coord_frame": FIFF.FIFFV_COORD_DEVICE,
|
||||
}
|
||||
self.info["dig"].append(DigPoint(d))
|
||||
|
||||
dig_dists = cdist(hpi_head[order], hpi_head[order])
|
||||
dev_dists = cdist(hpi_dev, hpi_dev)
|
||||
tmp_dists = np.abs(dig_dists - dev_dists)
|
||||
dist_limit = tmp_dists.max() * 1.1
|
||||
|
||||
logger.info(
|
||||
"HPI-Dig corrregsitration\n"
|
||||
f"\tGOF : {trans_g:0.3f}\n"
|
||||
f"\tMax Coil Error : {100 * tmp_dists.max():0.3f} cm\n"
|
||||
)
|
||||
|
||||
else:
|
||||
logger.info("Assuming Cardinal HPIs")
|
||||
nas = hpi_dev[0]
|
||||
lpa = hpi_dev[2]
|
||||
rpa = hpi_dev[1]
|
||||
t = get_ras_to_neuromag_trans(nas, lpa, rpa)
|
||||
with self.info._unlock():
|
||||
self.info["dev_head_t"] = Transform(
|
||||
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t
|
||||
)
|
||||
|
||||
# transform fiducial points
|
||||
nas = apply_trans(t, nas)
|
||||
lpa = apply_trans(t, lpa)
|
||||
rpa = apply_trans(t, rpa)
|
||||
|
||||
hpi = apply_trans(self.info["dev_head_t"], hpi_dev)
|
||||
with self.info._unlock():
|
||||
self.info["dig"] = _make_dig_points(
|
||||
nasion=nas, lpa=lpa, rpa=rpa, hpi=hpi
|
||||
)
|
||||
order = np.array([0, 1, 2])
|
||||
dist_limit = 0.005
|
||||
|
||||
# fill in hpi_results
|
||||
hpi_result = dict()
|
||||
|
||||
# add HPI points in device coords...
|
||||
dig = []
|
||||
for idx, point in enumerate(hpi_dev):
|
||||
dig.append(
|
||||
{
|
||||
"r": point,
|
||||
"ident": idx + 1,
|
||||
"kind": FIFF.FIFFV_POINT_HPI,
|
||||
"coord_frame": FIFF.FIFFV_COORD_DEVICE,
|
||||
}
|
||||
)
|
||||
hpi_result["dig_points"] = dig
|
||||
|
||||
# attach Transform
|
||||
hpi_result["coord_trans"] = self.info["dev_head_t"]
|
||||
|
||||
# 1 based indexing
|
||||
hpi_result["order"] = order + 1
|
||||
hpi_result["used"] = np.arange(3) + 1
|
||||
hpi_result["dist_limit"] = dist_limit
|
||||
hpi_result["good_limit"] = 0.98
|
||||
|
||||
# Warn for large discrepancies between digitized and fit
|
||||
# cHPI locations
|
||||
if hpi_result["dist_limit"] > 0.005:
|
||||
warn(
|
||||
"Large difference between digitized geometry"
|
||||
" and HPI geometry. Max coil to coil difference"
|
||||
f" is {100.0 * tmp_dists.max():0.2f} cm\n"
|
||||
"beware of *POOR* head localization"
|
||||
)
|
||||
|
||||
# store it
|
||||
with self.info._unlock():
|
||||
self.info["hpi_results"] = [hpi_result]
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype=">f4")
|
||||
146
mne/io/artemis123/resources/Artemis123_ChannelMap.csv
Normal file
146
mne/io/artemis123/resources/Artemis123_ChannelMap.csv
Normal file
@@ -0,0 +1,146 @@
|
||||
name,Channel Type,CAD X+ (INCH),CAD Y+ (INCH),CAD Z+ (INCH),CAD X- (INCH),CAD Y- (INCH),CAD Z- (INCH)
|
||||
Derived from '90-0395 Channel Map for 6th cooldown 2-01-13.xls',,,,,,,
|
||||
MEG_059,MEG_GRAD,-1.97677,1.56552,2.91489,-4.18768,2.50074,5.40664
|
||||
MEG_045,MEG_GRAD,-1.61144,0.93037,3.41137,-3.33479,1.92534,6.24186
|
||||
MEG_029,MEG_GRAD,-0.91075,1.72387,3.473,-1.93587,2.72988,6.62081
|
||||
MEG_073,MEG_GRAD,-2.38955,0.86972,2.76491,-4.94504,1.79985,4.90406
|
||||
MEG_043,MEG_GRAD,-1.59926,2.33243,2.93122,-3.46787,3.39595,5.64209
|
||||
MEG_085,MEG_GRAD,-2.78631,1.40783,1.84839,-5.89386,2.21359,3.13893
|
||||
REF_013,UNUSED,,,,,,
|
||||
MEG_071,MEG_GRAD,-2.43321,2.17533,2.12153,-5.27622,3.05529,3.88634
|
||||
MEG_032,MEG_GRAD,0.93037,-1.61144,3.41137,1.92534,-3.33479,6.24186
|
||||
MEG_048,MEG_GRAD,1.27145,-2.20222,2.76491,2.6312,-4.55737,4.90406
|
||||
MEG_018,MEG_GRAD,0.44157,-2.50427,2.76491,0.91381,-5.18245,4.90406
|
||||
MEG_006,MEG_GRAD,0,-3.0105,1.94967,0,-6.23006,3.21696
|
||||
MEG_005,MEG_GRAD,0,-1.86073,3.41137,0,-3.85068,6.24186
|
||||
MEG_049,MEG_GRAD,-1.27145,-2.20222,2.76491,-2.6312,-4.55737,4.90406
|
||||
MEG_019,MEG_GRAD,-0.44157,-2.50427,2.76491,-0.91381,-5.18245,4.90406
|
||||
MEG_033,MEG_GRAD,-0.93037,-1.61144,3.41137,-1.92534,-3.33479,6.24186
|
||||
MEG_021,MEG_GRAD,-0.56074,-3.168,1.10519,-1.13708,-6.39559,2.21066
|
||||
MEG_020,MEG_GRAD,0.56022,-3.16809,1.10519,1.13604,-6.39578,2.21066
|
||||
MEG_034,MEG_GRAD,1.02965,-2.82894,1.94967,2.13081,-5.85434,3.21696
|
||||
MEG_077,MEG_GRAD,-2.47272,-2.0647,1.06346,-5.01426,-4.15829,2.12604
|
||||
MEG_035,MEG_GRAD,-1.02965,-2.82894,1.94967,-2.13081,-5.85434,3.21696
|
||||
MEG_007,MEG_GRAD,0,-3.27147,0.25764,0,-6.63351,1.0751
|
||||
MEG_023,MEG_GRAD,-0.576,-3.27431,-0.5962,-1.16503,-6.58484,0.21931
|
||||
MEG_022,MEG_GRAD,0.56022,-3.27709,-0.59609,1.14872,-6.58771,0.21942
|
||||
MEG_047,MEG_GRAD,-1.61144,-0.93037,3.41137,-3.33479,-1.92534,6.24186
|
||||
MEG_061,MEG_GRAD,-1.86073,0,3.41137,-3.85068,0,6.24186
|
||||
MEG_087,MEG_GRAD,-2.5429,0,2.76491,-5.2624,0,4.90406
|
||||
MEG_113,MEG_GRAD,-3.22769,0.0086,0.98505,-6.5452,0.046,1.96703
|
||||
MEG_101,MEG_GRAD,-2.96476,-0.52277,1.94967,-6.13541,-1.08184,3.21696
|
||||
MEG_099,MEG_GRAD,-2.96476,0.52277,1.94967,-6.13541,1.08184,3.21696
|
||||
MEG_063,MEG_GRAD,-1.94798,-1.63455,2.76491,-4.03123,-3.38261,4.90406
|
||||
MEG_075,MEG_GRAD,-2.38955,-0.86972,2.76491,-4.94504,-1.79985,4.90406
|
||||
MEG_089,MEG_GRAD,-2.60717,-1.50525,1.94967,-5.39539,-3.11503,3.21696
|
||||
MEG_123,MEG_GRAD,-3.24454,-0.65992,-1.54654,-6.63007,-1.24165,-1.13258
|
||||
MEG_103,MEG_GRAD,-3.03312,-1.09456,1.02677,-6.15066,-2.19102,2.05164
|
||||
MEG_119,MEG_GRAD,-3.27163,-0.04807,-0.71822,-6.66217,-0.02172,-0.02891
|
||||
MEG_121,MEG_GRAD,-3.24454,0.48346,-1.58979,-6.63007,1.0948,-1.22095
|
||||
MEG_105,MEG_GRAD,-3.07707,-1.16672,-0.67591,-6.26323,-2.29919,0.05723
|
||||
MEG_091,MEG_GRAD,-2.81455,-1.64764,0.19622,-5.75085,-3.31563,0.94961
|
||||
MEG_115,MEG_GRAD,-3.20059,-0.58777,0.15614,-6.53962,-1.15004,0.86771
|
||||
MEG_037,MEG_GRAD,-1.11155,-3.07561,0.25023,-2.27119,-6.23333,1.05996
|
||||
MEG_067,MEG_GRAD,-2.08904,-2.51166,0.2289,-4.26844,-5.08104,1.01638
|
||||
MEG_079,MEG_GRAD,-2.51137,-2.1514,-0.63867,-5.10885,-4.30296,0.13301
|
||||
MEG_093,MEG_GRAD,-2.8532,-1.73435,-1.50591,-5.7542,-3.37531,-0.57691
|
||||
MEG_051,MEG_GRAD,-1.61407,-2.7848,1.0907,-3.27306,-5.61852,2.18127
|
||||
MEG_065,MEG_GRAD,-1.93511,-2.30617,1.94967,-4.00461,-4.7725,3.21696
|
||||
REF_014,UNUSED,,,,,,
|
||||
MEG_053,MEG_GRAD,-1.64275,-2.88336,-0.61098,-3.33826,-5.79135,0.1893
|
||||
MEG_039,MEG_GRAD,-1.37821,4.03301,0.38766,-2.98972,7.09471,0.3625
|
||||
MEG_041,MEG_GRAD,-1.59926,3.67934,1.66789,-3.46787,6.31662,2.90266
|
||||
MEG_055,MEG_GRAD,-2.06278,3.53364,0.8475,-4.47296,6.00069,1.12372
|
||||
MEG_069,MEG_GRAD,-2.43321,2.88136,1.45931,-5.27622,4.58626,2.45038
|
||||
MEG_027,MEG_GRAD,-1.02514,3.32279,2.63742,-2.22293,5.54346,5.00502
|
||||
MEG_025,MEG_GRAD,-0.92333,4.17235,1.20548,-2.00217,7.38566,1.89996
|
||||
MEG_057,MEG_GRAD,-1.84667,3.00588,2.29955,-4.00435,4.85628,4.27238
|
||||
REF_015,UNUSED,,,,,,
|
||||
MEG_083,MEG_GRAD,-2.81067,2.32514,1.52142,-6.13327,3.15736,2.01067
|
||||
MEG_095,MEG_GRAD,-2.85632,2.16654,0.82155,-6.24599,2.85761,0.88605
|
||||
MEG_117,MEG_GRAD,-3.14455,0.87829,-0.52294,-6.53422,1.56936,-0.45844
|
||||
MEG_109,MEG_GRAD,-3.0226,1.3925,0.37679,-6.41227,2.08357,0.44129
|
||||
MEG_107,MEG_GRAD,-2.7791,2.44789,0.19401,-6.01824,3.66345,0.23867
|
||||
MEG_111,MEG_GRAD,-3.20059,0.54013,0.11348,-6.53962,1.15454,0.78055
|
||||
MEG_097,MEG_GRAD,-3.04326,1.22292,1.10768,-6.3884,1.94226,1.62169
|
||||
MEG_081,MEG_GRAD,-2.54021,2.92425,0.68688,-5.5195,4.68347,0.71098
|
||||
REF_001,REF_MAG,-2.26079604,3.98626183,5.04439808,-2.20703425,3.92437924,4.93090704
|
||||
REF_002,REF_MAG,1.93013445,4.03046866,5.17689263,1.8763992,3.96852956,5.06341985
|
||||
REF_004,REF_MAG,1.70031266,4.21202221,5.57217923,1.57144014,4.22797498,5.62449924
|
||||
REF_012,REF_GRAD,4.64675,-0.89642,-0.43802,6.03162,-1.01804,-0.22614
|
||||
REF_006,REF_MAG,2.07781,3.83073028,5.60154279,2.08802749,3.70619491,5.66468189
|
||||
REF_008,REF_GRAD,4.50056,0.78066,1.76423,5.88573,0.92199,1.96135
|
||||
REF_010,REF_GRAD,4.31926,2.18698,-0.37055,5.69806,2.46181,-0.34022
|
||||
MEG_094,REF_GRAD,2.85632,2.16654,0.82155,6.24599,2.85761,0.88605
|
||||
REF_016,UNUSED,,,,,,
|
||||
REF_003,REF_MAG,-2.73073962,4.07852721,5.1569653,-2.8596759,4.06162797,5.1051015
|
||||
REF_017,UNUSED,,,,,,
|
||||
REF_011,REF_GRAD,-4.64675,-0.89642,-0.43802,-6.03162,-1.01804,-0.22614
|
||||
REF_009,REF_GRAD,-4.31926,2.18698,-0.37055,-5.69806,2.46181,-0.34022
|
||||
REF_007,REF_GRAD,-4.50056,0.78066,1.76423,-5.88573,0.92199,1.96135
|
||||
REF_018,UNUSED,,,,,,
|
||||
REF_005,REF_MAG,-2.4058382,3.78665997,5.47001894,-2.41506358,3.66222139,5.53350068
|
||||
MEG_090,MEG_GRAD,2.81455,-1.64764,0.19622,5.75085,-3.31563,0.94961
|
||||
MEG_088,MEG_GRAD,2.60717,-1.50525,1.94967,5.39539,-3.11503,3.21696
|
||||
MEG_102,MEG_GRAD,3.03294,-1.09506,1.02679,6.1503,-2.19202,2.05167
|
||||
MEG_122,MEG_GRAD,3.24454,-0.65992,-1.54654,6.63007,-1.24165,-1.13258
|
||||
MEG_114,MEG_GRAD,3.20059,-0.58777,0.15614,6.53962,-1.15004,0.86771
|
||||
MEG_104,MEG_GRAD,3.07159,-1.18176,-0.67534,6.25756,-2.31475,0.05782
|
||||
MEG_120,MEG_GRAD,3.24454,0.48346,-1.58979,6.63007,1.0948,-1.22094
|
||||
MEG_118,MEG_GRAD,3.27163,-0.06408,-0.71761,6.66217,-0.03828,-0.02828
|
||||
MEG_106,MEG_GRAD,2.7791,2.44789,0.19401,6.01824,3.66345,0.23867
|
||||
MEG_082,MEG_GRAD,2.81067,2.32514,1.52142,6.13327,3.15736,2.01067
|
||||
MEG_110,MEG_GRAD,3.20059,0.54013,0.11348,6.53962,1.15454,0.78055
|
||||
MEG_116,MEG_GRAD,3.14455,0.87829,-0.52294,6.53422,1.56936,-0.45844
|
||||
MEG_096,MEG_GRAD,3.04326,1.22292,1.10768,6.3884,1.94226,1.62169
|
||||
MEG_080,MEG_GRAD,2.54021,2.92425,0.68688,5.5195,4.68347,0.71098
|
||||
MEG_108,MEG_GRAD,3.0226,1.3925,0.37679,6.41227,2.08357,0.44129
|
||||
REF_019,UNUSED,,,,,,
|
||||
MEG_009,MEG_GRAD,-0.48824,4.32904,0.13976,-1.05817,7.74156,0.10133
|
||||
MEG_003,MEG_GRAD,0,3.44805,2.77097,0,5.81508,5.29461
|
||||
MEG_010,MEG_GRAD,0.51257,3.97032,2.03007,1.11147,6.94759,3.68802
|
||||
MEG_012,MEG_GRAD,0.51257,2.67525,3.24478,1.11147,4.13933,6.32201
|
||||
MEG_004,MEG_GRAD,0,4.3528,1.03622,0,7.77696,1.53295
|
||||
MEG_011,MEG_GRAD,-0.51257,3.97032,2.03007,-1.11147,6.94759,3.68802
|
||||
MEG_008,MEG_GRAD,0.48824,4.32904,0.13976,1.05817,7.74156,0.10133
|
||||
MEG_013,MEG_GRAD,-0.51257,2.67525,3.24478,-1.11147,4.13933,6.32201
|
||||
MEG_024,MEG_GRAD,0.92333,4.17235,1.20548,2.00217,7.38566,1.89996
|
||||
REF_020,UNUSED,,,,,,
|
||||
MEG_068,MEG_GRAD,2.43321,2.88136,1.45931,5.27622,4.58626,2.45038
|
||||
MEG_026,MEG_GRAD,1.02514,3.32279,2.63742,2.22293,5.54346,5.00502
|
||||
MEG_038,MEG_GRAD,1.37821,4.03301,0.38766,2.98972,7.09471,0.3625
|
||||
MEG_040,MEG_GRAD,1.59926,3.67934,1.66789,3.46787,6.31662,2.90266
|
||||
MEG_054,MEG_GRAD,2.06278,3.53364,0.8475,4.47296,6.00069,1.12372
|
||||
MEG_056,MEG_GRAD,1.84667,3.00588,2.29955,4.00435,4.85628,4.27238
|
||||
MEG_058,MEG_GRAD,2.00892,1.56358,2.88668,4.25593,2.49543,5.34722
|
||||
MEG_042,MEG_GRAD,1.59926,2.33243,2.93122,3.46787,3.39595,5.64209
|
||||
MEG_028,MEG_GRAD,0.90968,1.7238,3.47337,1.93358,2.72985,6.62156
|
||||
MEG_070,MEG_GRAD,2.43321,2.17533,2.12153,5.27622,3.05529,3.88634
|
||||
REF_021,UNUSED,,,,,,
|
||||
MEG_072,MEG_GRAD,2.38955,0.86972,2.76491,4.94504,1.79985,4.90406
|
||||
MEG_044,MEG_GRAD,1.61144,0.93037,3.41137,3.33479,1.92534,6.24186
|
||||
MEG_084,MEG_GRAD,2.78632,1.40783,1.84839,5.89386,2.21359,3.13893
|
||||
MEG_046,MEG_GRAD,1.61144,-0.93037,3.41137,3.33479,-1.92534,6.24186
|
||||
MEG_098,MEG_GRAD,2.96476,0.52277,1.94967,6.13541,1.08184,3.21696
|
||||
MEG_060,MEG_GRAD,1.8607,0,3.41137,3.85068,0,6.24186
|
||||
MEG_100,MEG_GRAD,2.96476,-0.52277,1.94967,6.13541,-1.08184,3.21696
|
||||
MEG_074,MEG_GRAD,2.38955,-0.86972,2.76491,4.94504,-1.79985,4.90406
|
||||
MEG_086,MEG_GRAD,2.5429,0,2.76491,5.2624,0,4.90406
|
||||
MEG_062,MEG_GRAD,1.94798,-1.63455,2.76491,4.03123,-3.38261,4.90406
|
||||
MEG_112,MEG_GRAD,3.22769,0.00807,0.98507,6.5452,0.04494,1.96707
|
||||
MEG_016,MEG_GRAD,0.50538,-0.87535,3.83752,0.89368,-1.5479,7.20924
|
||||
MEG_031,MEG_GRAD,-1.01076,0,3.83752,-1.78736,0,7.20924
|
||||
MEG_015,MEG_GRAD,-0.50538,0.87535,3.83752,-0.89368,1.5479,7.20924
|
||||
MEG_001,MEG_GRAD,0,0,4,0,0,7.46
|
||||
MEG_002,MEG_GRAD,0,1.80611,3.59215,0,2.82922,6.89743
|
||||
MEG_017,MEG_GRAD,-0.50538,-0.87535,3.83752,-0.89368,-1.5479,7.20924
|
||||
MEG_014,MEG_GRAD,0.50538,0.87535,3.83752,0.89368,1.5479,7.20924
|
||||
MEG_030,MEG_GRAD,1.01076,0,3.83752,1.78736,0,7.20924
|
||||
MEG_050,MEG_GRAD,1.61362,-2.78506,1.09071,3.27214,-5.61905,2.18129
|
||||
MEG_064,MEG_GRAD,1.93511,-2.30617,1.94967,4.00461,-4.7725,3.21696
|
||||
MEG_076,MEG_GRAD,2.47238,-2.0651,1.06348,5.01358,-4.1591,2.12607
|
||||
MEG_078,MEG_GRAD,2.50107,-2.16367,-0.6382,5.0982,-4.31565,0.13349
|
||||
MEG_066,MEG_GRAD,2.08904,-2.51166,0.2289,4.26844,-5.08104,1.01638
|
||||
MEG_036,MEG_GRAD,1.11155,-3.07561,0.25023,2.27119,-6.23333,1.05996
|
||||
MEG_052,MEG_GRAD,1.62888,-2.89137,-0.61068,3.32391,-5.79963,0.18962
|
||||
MEG_092,MEG_GRAD,2.8532,-1.73435,-1.50591,5.7542,-3.37531,-0.57691
|
||||
|
144
mne/io/artemis123/resources/Artemis123_mneLoc.csv
Normal file
144
mne/io/artemis123/resources/Artemis123_mneLoc.csv
Normal file
@@ -0,0 +1,144 @@
|
||||
MEG_001,0.0,0.0,0.10160000191,1.0,-0.0,-0.0,-0.0,1.0,-0.0,0.0,0.0,1.0
|
||||
MEG_002,0.0,0.0458751948625,0.0912406117153,1.0,-0.0,-0.0,-0.0,0.955282042035,-0.295696161906,0.0,0.295696161906,0.955282042035
|
||||
MEG_003,0.0,0.0875804716465,0.0703826393232,1.0,-0.0,-0.0,-0.0,0.729376031116,-0.684113006186,0.0,0.684113006186,0.729376031116
|
||||
MEG_004,0.0,0.110561122079,0.0263199884948,1.0,-0.0,-0.0,-0.0,0.143563509474,-0.989641106032,0.0,0.989641106032,0.143563509474
|
||||
MEG_005,0.0,-0.0472625428885,0.086648799629,1.0,0.0,-0.0,0.0,0.818061560022,0.575130666904,0.0,-0.575130666904,0.818061560022
|
||||
MEG_006,0.0,-0.0764667014376,0.049521618931,1.0,0.0,-0.0,0.0,0.366268930876,0.930509038255,0.0,-0.930509038255,0.366268930876
|
||||
MEG_007,0.0,-0.0830953395622,0.00654405612303,1.0,0.0,-0.0,0.0,0.236260571358,0.971689735678,0.0,-0.971689735678,0.236260571358
|
||||
MEG_008,0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,-0.164284112711,-0.164719723212,-0.164284112711,0.0163303909087,-0.986277875978,0.164719723212,0.986277875978,-0.0111069411385
|
||||
MEG_009,-0.0124012962331,0.109957618067,0.00354990406674,0.972562667953,0.164284112711,0.164719723212,0.164284112711,0.0163303909087,-0.986277875978,-0.164719723212,0.986277875978,-0.0111069411385
|
||||
MEG_010,0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,-0.100693145673,-0.173092369408,-0.100693145673,0.499431154085,-0.860482081594,0.173092369408,0.860482081594,0.479175979061
|
||||
MEG_011,-0.0130192782448,0.100846129896,0.0515637789694,0.979744824976,0.100693145673,0.173092369408,0.100693145673,0.499431154085,-0.860482081594,-0.173092369408,0.860482081594,0.479175979061
|
||||
MEG_012,0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,-0.038765954324,-0.17309280415,-0.038765954324,0.905232161619,-0.423145287527,0.17309280415,0.423145287527,0.889374469386
|
||||
MEG_013,-0.0130192782448,0.0679513512775,0.0824174135494,0.984142307767,0.038765954324,0.17309280415,0.038765954324,0.905232161619,-0.423145287527,-0.17309280415,0.423145287527,0.889374469386
|
||||
MEG_014,0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,-0.112225451567,-0.0110480572384,0.980864355149,-0.194378643965,0.112225451567,0.194378643965,0.974485705791
|
||||
MEG_015,-0.0128366522413,0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,0.112225451567,0.0110480572384,0.980864355149,-0.194378643965,-0.112225451567,0.194378643965,0.974485705791
|
||||
MEG_016,0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,0.0110480572384,-0.112225451567,0.0110480572384,0.980864355149,0.194378643965,0.112225451567,-0.194378643965,0.974485705791
|
||||
MEG_017,-0.0128366522413,-0.022233890418,0.0974730098325,0.993621350642,-0.0110480572384,0.112225451567,-0.0110480572384,0.980864355149,0.194378643965,-0.112225451567,-0.194378643965,0.974485705791
|
||||
MEG_018,0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,0.0652835409099,-0.136485426846,0.0652835409099,0.629762253104,0.774039768908,0.136485426846,-0.774039768908,0.61825089115
|
||||
MEG_019,-0.0112158782109,-0.0636084591958,0.0702287153203,0.988488638046,-0.0652835409099,0.136485426846,-0.0652835409099,0.629762253104,0.774039768908,-0.136485426846,-0.774039768908,0.61825089115
|
||||
MEG_020,0.0142295882675,-0.0804694875128,0.0280718265278,0.979010049739,0.117656650617,-0.166421858768,0.117656650617,0.340489745704,0.93285778425,0.166421858768,-0.93285778425,0.319499795443
|
||||
MEG_021,-0.0142427962678,-0.0804672015128,0.0280718265278,0.978972050612,-0.117759654311,0.166572470526,-0.117759654311,0.340528364062,0.932830690472,-0.166572470526,-0.932830690472,0.319500414673
|
||||
MEG_022,0.0142295882675,-0.0832380875649,-0.0151406862846,0.976588506526,0.131701883642,-0.170086750705,0.131701883642,0.259108088321,0.956826845574,0.170086750705,-0.956826845574,0.235696594848
|
||||
MEG_023,-0.0146304002751,-0.0831674755635,-0.0151434802847,0.976546368958,-0.131816629327,0.170239729521,-0.131816629327,0.259149948413,0.956799707604,-0.170239729521,-0.956799707604,0.235696317371
|
||||
MEG_024,0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,-0.241167202261,-0.311803997292,-0.241167202261,0.281686827798,-0.928703888007,0.311803997292,0.928703888007,0.200717103592
|
||||
MEG_025,-0.0234525824409,0.105977691992,0.0306191925756,0.919030275794,0.241167202261,0.311803997292,0.241167202261,0.281686827798,-0.928703888007,-0.311803997292,0.928703888007,0.200717103592
|
||||
MEG_026,0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,-0.131916373824,-0.346181995721,-0.131916373824,0.755430639878,-0.641811980763,0.346181995721,0.641811980763,0.68427728823
|
||||
MEG_027,-0.0260385564895,0.0843988675867,0.0669904692594,0.928846648352,0.131916373824,0.346181995721,0.131916373824,0.755430639878,-0.641811980763,-0.346181995721,0.641811980763,0.68427728823
|
||||
MEG_028,0.0231058724344,0.0437845208231,0.0882235996586,0.954148215535,-0.0450524345745,-0.295924755521,-0.0450524345745,0.955732979975,-0.290765797726,0.295924755521,0.290765797726,0.90988119551
|
||||
MEG_029,-0.0231330504349,0.0437862988232,0.0882142016584,0.954036318954,0.0451068389737,0.296277024411,0.0451068389737,0.955734030088,-0.290753911081,-0.296277024411,0.290753911081,0.909770349043
|
||||
MEG_030,0.0256733044827,0.0,0.0974730098325,0.974485414074,-0.0,-0.224450835944,-0.0,1.0,-0.0,0.224450835944,0.0,0.974485414074
|
||||
MEG_031,-0.0256733044827,0.0,0.0974730098325,0.974485414074,0.0,0.224450835944,0.0,1.0,-0.0,-0.224450835944,0.0,0.974485414074
|
||||
MEG_032,0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,0.078781387629,-0.287563894118,0.078781387629,0.863545730655,0.498078572146,0.287563894118,-0.498078572146,0.818061576392
|
||||
MEG_033,-0.0236313984443,-0.0409305767695,0.086648799629,0.954515845737,-0.078781387629,0.287563894118,-0.078781387629,0.863545730655,0.498078572146,-0.287563894118,-0.498078572146,0.818061576392
|
||||
MEG_034,0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,0.203678027441,-0.318254036858,0.203678027441,0.440401481873,0.874392243734,0.318254036858,-0.874392243734,0.366268442706
|
||||
MEG_035,-0.0261531104917,-0.0718550773509,0.049521618931,0.925866960833,-0.203678027441,0.318254036858,-0.203678027441,0.440401481873,0.874392243734,-0.318254036858,-0.874392243734,0.366268442706
|
||||
MEG_036,0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,0.247867468623,-0.335155744599,0.247867468623,0.325052548187,0.91263495381,0.335155744599,-0.91263495381,0.23402578479
|
||||
MEG_037,-0.0282333705308,-0.0781204954687,0.00635584211949,0.908973236603,-0.247867468623,0.335155744599,-0.247867468623,0.325052548187,0.91263495381,-0.335155744599,-0.91263495381,0.23402578479
|
||||
MEG_038,0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,-0.415157481208,-0.465754249753,-0.415157481208,0.211244323513,-0.884884230609,0.465754249753,0.884884230609,-0.00727167496558
|
||||
MEG_039,-0.0350065346581,0.102438455926,0.00984656418512,0.781484001521,0.415157481208,0.465754249753,0.415157481208,0.211244323513,-0.884884230609,-0.465754249753,0.884884230609,-0.00727167496558
|
||||
MEG_040,0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,-0.303378150058,-0.540060556425,-0.303378150058,0.57182444299,-0.762219459517,0.540060556425,0.762219459517,0.356869851524
|
||||
MEG_041,-0.0406212047637,0.093455237757,0.0423644067965,0.785045408535,0.303378150058,0.540060556425,0.303378150058,0.57182444299,-0.762219459517,-0.540060556425,0.762219459517,0.356869851524
|
||||
MEG_042,0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,-0.0930769183398,-0.540060822675,-0.0930769183398,0.947025241119,-0.307375795983,0.540060822675,0.307375795983,0.7834886265
|
||||
MEG_043,-0.0406212047637,0.0592437231138,0.0744529893997,0.836463385382,0.0930769183398,0.540060822675,0.0930769183398,0.947025241119,-0.307375795983,-0.540060822675,0.307375795983,0.7834886265
|
||||
MEG_044,0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,-0.498078572146,-0.078781387629,0.954515845737,-0.287563894118,0.498078572146,0.287563894118,0.818061576392
|
||||
MEG_045,-0.0409305767695,0.0236313984443,0.086648799629,0.863545730655,0.078781387629,0.498078572146,0.078781387629,0.954515845737,-0.287563894118,-0.498078572146,0.287563894118,0.818061576392
|
||||
MEG_046,0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,0.078781387629,-0.498078572146,0.078781387629,0.954515845737,0.287563894118,0.498078572146,-0.287563894118,0.818061576392
|
||||
MEG_047,-0.0409305767695,-0.0236313984443,0.086648799629,0.863545730655,-0.078781387629,0.498078572146,-0.078781387629,0.954515845737,0.287563894118,-0.498078572146,-0.287563894118,0.818061576392
|
||||
MEG_048,0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,0.165302346745,-0.392991094644,0.165302346745,0.713688676641,0.680678784005,0.392991094644,-0.680678784005,0.618251075645
|
||||
MEG_049,-0.0322948306071,-0.0559363890516,0.0702287153203,0.904562399003,-0.165302346745,0.392991094644,-0.165302346745,0.713688676641,0.680678784005,-0.392991094644,-0.680678784005,0.618251075645
|
||||
MEG_050,0.0409859487705,-0.0707405253299,0.0277040345208,0.825297111533,0.298522923381,-0.479341988471,0.298522923381,0.489900043633,0.819073874241,0.479341988471,-0.819073874241,0.315197155166
|
||||
MEG_051,-0.0409973787708,-0.0707339213298,0.0277037805208,0.825197788666,-0.298579570884,0.479477683976,-0.298579570884,0.489996382374,0.818995595294,-0.479477683976,-0.818995595294,0.31519417104
|
||||
MEG_052,0.0413735527778,-0.0734407993807,-0.0155112722916,0.805087785644,0.334422043575,-0.489893411036,0.334422043575,0.426212956438,0.840538168399,0.489893411036,-0.840538168399,0.231300742083
|
||||
MEG_053,-0.0417258507844,-0.0732373453769,-0.0155188922918,0.804976833568,-0.334486625117,0.490031626568,-0.334486625117,0.426317886079,0.840459253996,-0.490031626568,-0.840459253996,0.231294719647
|
||||
MEG_054,0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,-0.459958724875,-0.696583791076,-0.459958724875,0.529188204946,-0.713020206696,0.696583791076,0.713020206696,0.0798323671971
|
||||
MEG_055,-0.052394612985,0.0897544576874,0.0215265004047,0.550644162251,0.459958724875,0.696583791076,0.459958724875,0.529188204946,-0.713020206696,-0.696583791076,0.713020206696,0.0798323671971
|
||||
MEG_056,0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,-0.212397698952,-0.623606380317,-0.212397698952,0.817850328992,-0.534797210957,0.623606380317,0.534797210957,0.570181572467
|
||||
MEG_057,-0.0469054188818,0.0763493534354,0.0584085710981,0.752331243475,0.212397698952,0.623606380317,0.212397698952,0.817850328992,-0.534797210957,-0.623606380317,0.534797210957,0.570181572467
|
||||
MEG_058,0.0510265689593,0.0397149327466,0.0733216733784,0.75352606525,-0.102214380931,-0.649423351381,-0.102214380931,0.95761101603,-0.269320185484,0.649423351381,0.269320185484,0.71113708128
|
||||
MEG_059,-0.0502099589439,0.0397642087476,0.0740382073919,0.762632097113,0.100407167247,0.638991928915,0.100407167247,0.957527538003,-0.27029505125,-0.638991928915,0.27029505125,0.720159635116
|
||||
MEG_060,0.0472617808885,0.0,0.086648799629,0.818057480605,-0.0,-0.575136469394,-0.0,1.0,-0.0,0.575136469394,0.0,0.818057480605
|
||||
MEG_061,-0.0472625428885,0.0,0.086648799629,0.818061560022,0.0,0.575130666904,0.0,1.0,-0.0,-0.575130666904,0.0,0.818061560022
|
||||
MEG_062,0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,0.187974664221,-0.602095198473,0.187974664221,0.842270014862,0.505219504448,0.602095198473,-0.505219504448,0.618251263081
|
||||
MEG_063,-0.0494786929302,-0.0415175707805,0.0702287153203,0.775981248219,-0.187974664221,0.602095198473,-0.187974664221,0.842270014862,0.505219504448,-0.602095198473,-0.505219504448,0.618251263081
|
||||
MEG_064,0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,0.312052080775,-0.598120441436,0.312052080775,0.628111423833,0.712811011513,0.598120441436,-0.712811011513,0.366268206923
|
||||
MEG_065,-0.0491517949241,-0.0585767191012,0.049521618931,0.738156783089,-0.312052080775,0.598120441436,-0.312052080775,0.628111423833,0.712811011513,-0.598120441436,-0.712811011513,0.366268206923
|
||||
MEG_066,0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,0.381028180993,-0.629883796022,0.381028180993,0.550790957291,0.742594671847,0.629883796022,-0.742594671847,0.227595159994
|
||||
MEG_067,-0.0530616169976,-0.0637961651994,0.0058140601093,0.676804202704,-0.381028180993,0.629883796022,-0.381028180993,0.550790957291,0.742594671847,-0.629883796022,-0.742594671847,0.227595159994
|
||||
MEG_068,0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,-0.314728784866,-0.821678860785,-0.314728784866,0.811263025695,-0.492745466865,0.821678860785,0.492745466865,0.286436301159
|
||||
MEG_069,-0.0618035351619,0.0731865453759,0.0370664746968,0.475173275464,0.314728784866,0.821678860785,0.314728784866,0.811263025695,-0.492745466865,-0.821678860785,0.492745466865,0.286436301159
|
||||
MEG_070,0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,-0.138386914129,-0.821679540869,-0.138386914129,0.957166893906,-0.254323807789,0.821679540869,0.254323807789,0.510060910979
|
||||
MEG_071,-0.0618035351619,0.0552533830388,0.0538868630131,0.552894017073,0.138386914129,0.821679540869,0.138386914129,0.957166893906,-0.254323807789,-0.821679540869,0.254323807789,0.510060910979
|
||||
MEG_072,0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,-0.738579885939,-0.12269267874,0.955343146999,-0.268823321284,0.738579885939,0.268823321284,0.61825057543
|
||||
MEG_073,-0.0606945711411,0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,0.738579885939,0.12269267874,0.955343146999,-0.268823321284,-0.738579885939,0.268823321284,0.61825057543
|
||||
MEG_074,0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,0.12269267874,-0.738579885939,0.12269267874,0.955343146999,0.268823321284,0.738579885939,-0.268823321284,0.61825057543
|
||||
MEG_075,-0.0606945711411,-0.0220908884153,0.0702287153203,0.662907428432,-0.12269267874,0.738579885939,-0.12269267874,0.955343146999,0.268823321284,-0.738579885939,-0.268823321284,0.61825057543
|
||||
MEG_076,0.0627984531806,-0.0524535409861,0.0270123925078,0.587320034467,0.340056606259,-0.73444991773,0.340056606259,0.719786504995,0.605201529878,0.73444991773,-0.605201529878,0.307106539462
|
||||
MEG_077,-0.0628070891808,-0.0524433809859,0.0270118845078,0.587208379993,-0.340036516337,0.734548491268,-0.340036516337,0.719895397972,0.605083286446,-0.734548491268,-0.605083286446,0.307103777966
|
||||
MEG_078,0.0635271791943,-0.0549572190332,-0.0162102803048,0.539322307512,0.381717195782,-0.750615368258,0.381717195782,0.683709413476,0.621959339803,0.750615368258,-0.621959339803,0.223031720988
|
||||
MEG_079,-0.0637887991992,-0.0546455610273,-0.016222218305,0.539196876386,-0.381695169412,0.750716675014,-0.381695169412,0.683831999207,0.621838077403,-0.750716675014,-0.621838077403,0.223028875593
|
||||
MEG_080,0.064521335213,0.0742759513964,0.017446752328,0.263693428198,-0.434776489447,-0.861066304154,-0.434776489447,0.743271888347,-0.508444986421,0.861066304154,0.508444986421,0.00696531654525
|
||||
MEG_081,-0.064521335213,0.0742759513964,0.017446752328,0.263693428198,0.434776489447,0.861066304154,0.434776489447,0.743271888347,-0.508444986421,-0.861066304154,0.508444986421,0.00696531654525
|
||||
MEG_082,0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,-0.202359959395,-0.960287956478,-0.202359959395,0.949314390716,-0.240525745844,0.960287956478,0.240525745844,0.141401577893
|
||||
MEG_083,-0.0713910193422,0.0590585571103,0.0386440687265,0.192087187177,0.202359959395,0.960287956478,0.202359959395,0.949314390716,-0.240525745844,-0.960287956478,0.240525745844,0.141401577893
|
||||
MEG_084,0.0707725293305,0.0357588826723,0.0469491068826,0.412488973038,-0.15233685973,-0.89813491653,-0.15233685973,0.960500283795,-0.232879123147,0.89813491653,0.232879123147,0.372989256833
|
||||
MEG_085,-0.0707722753305,0.0357588826723,0.0469491068826,0.412487827635,0.152336666507,0.898135475354,0.152336666507,0.960500461005,-0.232878518647,-0.898135475354,0.232878518647,0.37298828864
|
||||
MEG_086,0.0645896612143,0.0,0.0702287153203,0.618250335472,-0.0,-0.785981248306,-0.0,1.0,-0.0,0.785981248306,0.0,0.618250335472
|
||||
MEG_087,-0.0645896612143,0.0,0.0702287153203,0.618250335472,0.0,0.785981248306,0.0,1.0,-0.0,-0.785981248306,0.0,0.618250335472
|
||||
MEG_088,0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,0.274413611706,-0.80584438968,0.274413611706,0.841567184852,0.46525460029,0.80584438968,-0.46525460029,0.36626899477
|
||||
MEG_089,-0.066222119245,-0.0382333507188,0.049521618931,0.524701809918,-0.274413611706,0.80584438968,-0.274413611706,0.841567184852,0.46525460029,-0.80584438968,-0.46525460029,0.36626899477
|
||||
MEG_090,0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,0.335957722235,-0.848640029826,0.335957722235,0.809156380101,0.482077132224,0.848640029826,-0.482077132224,0.217742366948
|
||||
MEG_091,-0.071489571344,-0.0418500567868,0.0049839880937,0.408585986848,-0.335957722235,0.848640029826,-0.335957722235,0.809156380101,0.482077132224,-0.848640029826,-0.482077132224,0.217742366948
|
||||
MEG_092,0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,0.313476011591,-0.83843959625,0.313476011591,0.822681283702,0.474266059932,0.83843959625,-0.474266059932,0.26849720266
|
||||
MEG_093,-0.0724712813625,-0.0440524908282,-0.0382501147191,0.445815918958,-0.313476011591,0.83843959625,-0.313476011591,0.822681283702,0.474266059932,-0.83843959625,-0.474266059932,0.26849720266
|
||||
MEG_094,0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_095,-0.0725505293639,0.0550301170346,0.0208673703923,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_096,0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,-0.175001933135,-0.966802743999,-0.175001933135,0.962367527045,-0.20790157837,0.966802743999,0.20790157837,0.148557692187
|
||||
MEG_097,-0.0772988054532,0.031062168584,0.0281350725289,0.186190165142,0.175001933135,0.966802743999,0.175001933135,0.962367527045,-0.20790157837,-0.966802743999,0.20790157837,0.148557692187
|
||||
MEG_098,0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,-0.91637265511,-0.108374224505,0.980890739219,-0.1615808936,0.91637265511,0.1615808936,0.366268715277
|
||||
MEG_099,-0.0753049054157,0.0132783582496,0.049521618931,0.385377976058,0.108374224505,0.91637265511,0.108374224505,0.980890739219,-0.1615808936,-0.91637265511,0.1615808936,0.366268715277
|
||||
MEG_100,0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,0.108374224505,-0.91637265511,0.108374224505,0.980890739219,0.1615808936,0.91637265511,-0.1615808936,0.366268715277
|
||||
MEG_101,-0.0753049054157,-0.0132783582496,0.049521618931,0.385377976058,-0.108374224505,0.91637265511,-0.108374224505,0.980890739219,0.1615808936,-0.91637265511,-0.1615808936,0.366268715277
|
||||
MEG_102,0.0770366774483,-0.0278145245229,0.0260804664903,0.373752636547,0.220368615692,-0.900969832953,0.220368615692,0.922455039947,0.31704001718,0.900969832953,-0.31704001718,0.296207676495
|
||||
MEG_103,-0.0770412494484,-0.0278018245227,0.0260799584903,0.373679152588,-0.220281297547,0.901021665041,-0.220281297547,0.92252557096,0.31689544155,-0.901021665041,-0.31689544155,0.296204723548
|
||||
MEG_104,0.0780183874667,-0.0300167045643,-0.0171536363225,0.300373897506,0.24880001314,-0.920800779299,0.24880001314,0.911522102566,0.327453828799,0.920800779299,-0.327453828799,0.211896000073
|
||||
MEG_105,-0.0781575794694,-0.0296346885571,-0.0171681143228,0.300287289279,-0.248701776907,0.920855564169,-0.248701776907,0.911602900892,0.327303494098,-0.920855564169,-0.327303494098,0.211890190171
|
||||
MEG_106,0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,-0.324701145067,-0.936167295022,-0.324701145067,0.878148606143,-0.351317793345,0.936167295022,0.351317793345,0.0129075098315
|
||||
MEG_107,-0.0705891413271,0.0621764071689,0.00492785409264,0.134758903688,0.324701145067,0.936167295022,0.324701145067,0.878148606143,-0.351317793345,-0.936167295022,0.351317793345,0.0129075098315
|
||||
MEG_108,0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_109,-0.0767740414434,0.0353695006649,0.00957046617992,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_110,0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,-0.143668166804,-0.965037436268,-0.143668166804,0.973563831901,-0.177575119486,0.965037436268,0.177575119486,0.192794770521
|
||||
MEG_111,-0.0812949875283,0.0137193022579,0.00288239205419,0.219230938619,0.143668166804,0.965037436268,0.143668166804,0.973563831901,-0.177575119486,-0.965037436268,0.177575119486,0.192794770521
|
||||
MEG_112,0.0819833275413,0.000204978003854,0.0250207784704,0.283903999524,-0.00795851694118,-0.958819681203,-0.00795851694118,0.999911550977,-0.010656088948,0.958819681203,0.010656088948,0.283815550501
|
||||
MEG_113,-0.0819833275413,0.000218440004107,0.0250202704704,0.283900779742,0.00807295557139,0.958819677859,0.00807295557139,0.999908989411,-0.0108092683826,-0.958819677859,0.0108092683826,0.283809769153
|
||||
MEG_114,0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,0.130073723873,-0.965037541675,0.130073723873,0.978096467321,0.162505775197,0.965037541675,-0.162505775197,0.205656062847
|
||||
MEG_115,-0.0812949875283,-0.0149293582807,0.00396595607456,0.227559595527,-0.130073723873,0.965037541675,-0.130073723873,0.978096467321,0.162505775197,-0.965037541675,-0.162505775197,0.205656062847
|
||||
MEG_116,0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,-0.192090470788,-0.979673381608,-0.192090470788,0.96083749697,-0.199731208002,0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_117,-0.0798715715016,0.0223085664194,-0.0132826762497,0.0578041209796,0.192090470788,0.979673381608,0.192090470788,0.96083749697,-0.199731208002,-0.979673381608,0.199731208002,0.0186416179491
|
||||
MEG_118,0.0830994035623,-0.0016276320306,-0.0182272943427,0.199274663362,-0.00609304526277,-0.979924733508,-0.00609304526277,0.999953635537,-0.00745664647062,0.979924733508,0.00745664647062,0.199228298899
|
||||
MEG_119,-0.0830994035623,-0.00122097802295,-0.018242788343,0.199270871862,0.00622296522868,0.979924688091,0.00622296522868,0.999951637458,-0.00761560563545,-0.979924688091,0.00761560563545,0.19922250932
|
||||
MEG_120,0.0824113175493,0.0122798842309,-0.0403806667592,0.134815219165,-0.156230210311,-0.978476866394,-0.156230210311,0.971788825746,-0.176687859065,0.978476866394,0.176687859065,0.106604044912
|
||||
MEG_121,-0.0824113175493,0.0122798842309,-0.0403806667592,0.134812452063,0.156230709979,0.978477167862,0.156230709979,0.971788735519,-0.176687913503,-0.978477167862,0.176687913503,0.106601187582
|
||||
MEG_122,0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,0.146932333372,-0.978477448481,0.146932333372,0.974752861061,0.168130155723,0.978477448481,-0.168130155723,0.119641688177
|
||||
MEG_123,-0.0824113175493,-0.0167619683151,-0.0392821167385,0.144888827116,-0.146932333372,0.978477448481,-0.146932333372,0.974752861061,0.168130155723,-0.978477448481,-0.168130155723,0.119641688177
|
||||
REF_001,-0.0574242204956,0.101251052386,0.128127713641,0.221198761686,0.896440347728,-0.384012774259,0.896440347728,-0.0318490232173,0.442018486814,0.384012774259,-0.442018486814,-0.810650261531
|
||||
REF_002,0.0490254159517,0.102373905889,0.131493075274,0.222503034056,-0.896198721013,0.383823204472,-0.896198721013,-0.0330228704748,0.442422131545,-0.383823204472,-0.442422131545,-0.810519836419
|
||||
REF_003,-0.069360787652,0.103594593082,0.130986921083,-0.347310972431,-0.17658747001,0.920973373049,-0.17658747001,0.976855280479,0.120708849866,-0.920973373049,-0.120708849866,-0.370455691952
|
||||
REF_004,0.0431879423759,0.106985366145,0.141533355103,0.383166262537,0.0763561288473,0.920517982899,0.0763561288473,0.990548087664,-0.113948355026,-0.920517982899,0.113948355026,0.3737143502
|
||||
REF_005,-0.0611082914288,0.0961811650462,0.138938483688,0.997012450802,-0.040298218601,0.0658955728709,-0.040298218601,0.456428559123,0.888847019455,-0.0658955728709,-0.888847019455,0.453441009925
|
||||
REF_006,0.0527763749922,0.0973005509413,0.142279189541,0.99632914816,0.0447419955488,-0.072982068763,0.0447419955488,0.454664406796,0.889538324653,0.072982068763,-0.889538324653,0.450993554956
|
||||
REF_007,-0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,0.0868247934117,0.985012933323,0.0868247934117,0.991141197071,-0.100501655296,-0.985012933323,0.100501655296,0.14017467128
|
||||
REF_008,0.114314226149,0.0198287643728,0.0448114428425,0.149033474209,-0.0868247934117,-0.985012933323,-0.0868247934117,0.991141197071,-0.100501655296,0.985012933323,0.100501655296,0.14017467128
|
||||
REF_009,-0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,0.187574011648,0.98047954998,0.187574011648,0.96261171626,-0.195434576966,-0.98047954998,0.195434576966,0.0215679901007
|
||||
REF_010,0.109709206063,0.0555492930443,-0.00941197017695,0.0589562738411,-0.187574011648,-0.98047954998,-0.187574011648,0.96261171626,-0.195434576966,0.98047954998,0.195434576966,0.0215679901007
|
||||
REF_011,-0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,-0.0740177576494,0.984793851615,-0.0740177576494,0.993499722223,0.0864851056297,-0.984793851615,-0.0864851056297,0.150669825529
|
||||
REF_012,0.118027452219,-0.0227690684281,-0.0111257082092,0.157170103306,0.0740177576494,-0.984793851615,0.0740177576494,0.993499722223,0.0864851056297,0.984793851615,-0.0864851056297,0.150669825529
|
||||
REF_013,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_014,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_015,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_016,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_017,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_018,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_019,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_020,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
REF_021,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
||||
|
123
mne/io/artemis123/utils.py
Normal file
123
mne/io/artemis123/utils.py
Normal file
@@ -0,0 +1,123 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _artemis123_read_pos
|
||||
from ...transforms import rotation3d_align_z_axis
|
||||
from ...utils import logger
|
||||
|
||||
|
||||
def _load_mne_locs(fname=None):
|
||||
"""Load MNE locs structure from file (if exists) or recreate it."""
|
||||
if not fname:
|
||||
# find input file
|
||||
resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources")
|
||||
fname = op.join(resource_dir, "Artemis123_mneLoc.csv")
|
||||
|
||||
if not op.exists(fname):
|
||||
raise OSError(f'MNE locs file "{fname}" does not exist')
|
||||
|
||||
logger.info(f"Loading mne loc file {fname}")
|
||||
locs = dict()
|
||||
with open(fname) as fid:
|
||||
for line in fid:
|
||||
vals = line.strip().split(",")
|
||||
locs[vals[0]] = np.array(vals[1::], np.float64)
|
||||
|
||||
return locs
|
||||
|
||||
|
||||
def _generate_mne_locs_file(output_fname):
|
||||
"""Generate mne coil locs and save to supplied file."""
|
||||
logger.info("Converting Tristan coil file to mne loc file...")
|
||||
resource_dir = op.join(op.dirname(op.abspath(__file__)), "resources")
|
||||
chan_fname = op.join(resource_dir, "Artemis123_ChannelMap.csv")
|
||||
chans = _load_tristan_coil_locs(chan_fname)
|
||||
|
||||
# compute a dict of loc structs
|
||||
locs = {n: _compute_mne_loc(cinfo) for n, cinfo in chans.items()}
|
||||
|
||||
# write it out to output_fname
|
||||
with open(output_fname, "w") as fid:
|
||||
for n in sorted(locs.keys()):
|
||||
fid.write(f"{n},")
|
||||
fid.write(",".join(locs[n].astype(str)))
|
||||
fid.write("\n")
|
||||
|
||||
|
||||
def _load_tristan_coil_locs(coil_loc_path):
|
||||
"""Load the Coil locations from Tristan CAD drawings."""
|
||||
channel_info = dict()
|
||||
with open(coil_loc_path) as fid:
|
||||
# skip 2 Header lines
|
||||
fid.readline()
|
||||
fid.readline()
|
||||
for line in fid:
|
||||
line = line.strip()
|
||||
vals = line.split(",")
|
||||
channel_info[vals[0]] = dict()
|
||||
if vals[6]:
|
||||
channel_info[vals[0]]["inner_coil"] = np.array(vals[2:5], np.float64)
|
||||
channel_info[vals[0]]["outer_coil"] = np.array(vals[5:8], np.float64)
|
||||
else: # nothing supplied
|
||||
channel_info[vals[0]]["inner_coil"] = np.zeros(3)
|
||||
channel_info[vals[0]]["outer_coil"] = np.zeros(3)
|
||||
return channel_info
|
||||
|
||||
|
||||
def _compute_mne_loc(coil_loc):
|
||||
"""Convert a set of coils to an mne Struct.
|
||||
|
||||
Note input coil locations are in inches.
|
||||
"""
|
||||
loc = np.zeros(12)
|
||||
if (np.linalg.norm(coil_loc["inner_coil"]) == 0) and (
|
||||
np.linalg.norm(coil_loc["outer_coil"]) == 0
|
||||
):
|
||||
return loc
|
||||
|
||||
# channel location is inner coil location converted to meters From inches
|
||||
loc[0:3] = coil_loc["inner_coil"] / 39.370078
|
||||
|
||||
# figure out rotation
|
||||
z_axis = coil_loc["outer_coil"] - coil_loc["inner_coil"]
|
||||
R = rotation3d_align_z_axis(z_axis)
|
||||
loc[3:13] = R.T.reshape(9)
|
||||
return loc
|
||||
|
||||
|
||||
def _read_pos(fname):
|
||||
"""Read the .pos file and return positions as dig points."""
|
||||
nas, lpa, rpa, hpi, extra = None, None, None, None, None
|
||||
with open(fname) as fid:
|
||||
for line in fid:
|
||||
line = line.strip()
|
||||
if len(line) > 0:
|
||||
parts = line.split()
|
||||
# The lines can have 4 or 5 parts. First part is for the id,
|
||||
# which can be an int or a string. The last three are for xyz
|
||||
# coordinates. The extra part is for additional info
|
||||
# (e.g. 'Pz', 'Cz') which is ignored.
|
||||
if len(parts) not in [4, 5]:
|
||||
continue
|
||||
|
||||
if parts[0].lower() == "nasion":
|
||||
nas = np.array([float(p) for p in parts[-3:]]) / 100.0
|
||||
elif parts[0].lower() == "left":
|
||||
lpa = np.array([float(p) for p in parts[-3:]]) / 100.0
|
||||
elif parts[0].lower() == "right":
|
||||
rpa = np.array([float(p) for p in parts[-3:]]) / 100.0
|
||||
elif "hpi" in parts[0].lower():
|
||||
if hpi is None:
|
||||
hpi = list()
|
||||
hpi.append(np.array([float(p) for p in parts[-3:]]) / 100.0)
|
||||
else:
|
||||
if extra is None:
|
||||
extra = list()
|
||||
extra.append(np.array([float(p) for p in parts[-3:]]) / 100.0)
|
||||
|
||||
return _artemis123_read_pos(nas, lpa, rpa, hpi, extra)
|
||||
3259
mne/io/base.py
Normal file
3259
mne/io/base.py
Normal file
File diff suppressed because it is too large
Load Diff
7
mne/io/besa/__init__.py
Normal file
7
mne/io/besa/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Support for various BESA file formats."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .besa import read_evoked_besa
|
||||
274
mne/io/besa/besa.py
Normal file
274
mne/io/besa/besa.py
Normal file
@@ -0,0 +1,274 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ...evoked import EvokedArray
|
||||
from ...utils import fill_doc, logger, verbose
|
||||
|
||||
|
||||
@fill_doc
|
||||
@verbose
|
||||
def read_evoked_besa(fname, verbose=None):
|
||||
"""Reader function for BESA ``.avr`` or ``.mul`` files.
|
||||
|
||||
When a ``.elp`` sidecar file is present, it will be used to determine
|
||||
electrode information.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the ``.avr`` or ``.mul`` file.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
ev : Evoked
|
||||
The evoked data in the .avr or .mul file.
|
||||
"""
|
||||
fname = Path(fname)
|
||||
if fname.suffix == ".avr":
|
||||
return _read_evoked_besa_avr(fname, verbose)
|
||||
elif fname.suffix == ".mul":
|
||||
return _read_evoked_besa_mul(fname, verbose)
|
||||
else:
|
||||
raise ValueError("Filename must end in either .avr or .mul")
|
||||
|
||||
|
||||
@verbose
|
||||
def _read_evoked_besa_avr(fname, verbose):
|
||||
"""Create EvokedArray from a BESA .avr file."""
|
||||
with open(fname) as f:
|
||||
header = f.readline().strip()
|
||||
|
||||
# There are two versions of .avr files. The old style, generated by
|
||||
# BESA 1, 2 and 3 does not define Nchan and does not have channel names
|
||||
# in the file.
|
||||
new_style = "Nchan=" in header
|
||||
if new_style:
|
||||
ch_names = f.readline().strip().split()
|
||||
else:
|
||||
ch_names = None
|
||||
|
||||
fields = _parse_header(header)
|
||||
data = np.loadtxt(fname, skiprows=2 if new_style else 1, ndmin=2)
|
||||
ch_types = _read_elp_sidecar(fname)
|
||||
|
||||
# Consolidate channel names
|
||||
if new_style:
|
||||
if len(ch_names) != len(data):
|
||||
raise RuntimeError(
|
||||
"Mismatch between the number of channel names defined in "
|
||||
f"the .avr file ({len(ch_names)}) and the number of rows "
|
||||
f"in the data matrix ({len(data)})."
|
||||
)
|
||||
else:
|
||||
# Determine channel names from the .elp sidecar file
|
||||
if ch_types is not None:
|
||||
ch_names = list(ch_types.keys())
|
||||
if len(ch_names) != len(data):
|
||||
raise RuntimeError(
|
||||
"Mismatch between the number of channels "
|
||||
f"defined in the .avr file ({len(data)}) "
|
||||
f"and .elp file ({len(ch_names)})."
|
||||
)
|
||||
else:
|
||||
logger.info(
|
||||
"No .elp file found and no channel names present in "
|
||||
"the .avr file. Falling back to generic names. "
|
||||
)
|
||||
ch_names = [f"CH{i + 1:02d}" for i in range(len(data))]
|
||||
|
||||
# Consolidate channel types
|
||||
if ch_types is None:
|
||||
logger.info("Marking all channels as EEG.")
|
||||
ch_types = ["eeg"] * len(ch_names)
|
||||
else:
|
||||
ch_types = [ch_types[ch] for ch in ch_names]
|
||||
|
||||
# Go over all the header fields and make sure they are all defined to
|
||||
# something sensible.
|
||||
if "Npts" in fields:
|
||||
fields["Npts"] = int(fields["Npts"])
|
||||
if fields["Npts"] != data.shape[1]:
|
||||
logger.warn(
|
||||
f"The size of the data matrix ({data.shape}) does not "
|
||||
f'match the "Npts" field ({fields["Npts"]}).'
|
||||
)
|
||||
if "Nchan" in fields:
|
||||
fields["Nchan"] = int(fields["Nchan"])
|
||||
if fields["Nchan"] != data.shape[0]:
|
||||
logger.warn(
|
||||
f"The size of the data matrix ({data.shape}) does not "
|
||||
f'match the "Nchan" field ({fields["Nchan"]}).'
|
||||
)
|
||||
if "DI" in fields:
|
||||
fields["DI"] = float(fields["DI"])
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'No "DI" field present. Could not determine sampling frequency.'
|
||||
)
|
||||
if "TSB" in fields:
|
||||
fields["TSB"] = float(fields["TSB"])
|
||||
else:
|
||||
fields["TSB"] = 0
|
||||
if "SB" in fields:
|
||||
fields["SB"] = float(fields["SB"])
|
||||
else:
|
||||
fields["SB"] = 1.0
|
||||
if "SegmentName" not in fields:
|
||||
fields["SegmentName"] = ""
|
||||
|
||||
# Build the Evoked object based on the header fields.
|
||||
info = create_info(ch_names, sfreq=1000 / fields["DI"], ch_types="eeg")
|
||||
return EvokedArray(
|
||||
data / fields["SB"] / 1e6,
|
||||
info,
|
||||
tmin=fields["TSB"] / 1000,
|
||||
comment=fields["SegmentName"],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
@verbose
|
||||
def _read_evoked_besa_mul(fname, verbose):
|
||||
"""Create EvokedArray from a BESA .mul file."""
|
||||
with open(fname) as f:
|
||||
header = f.readline().strip()
|
||||
ch_names = f.readline().strip().split()
|
||||
|
||||
fields = _parse_header(header)
|
||||
data = np.loadtxt(fname, skiprows=2, ndmin=2)
|
||||
|
||||
if len(ch_names) != data.shape[1]:
|
||||
raise RuntimeError(
|
||||
"Mismatch between the number of channel names "
|
||||
f"defined in the .mul file ({len(ch_names)}) "
|
||||
"and the number of columns in the data matrix "
|
||||
f"({data.shape[1]})."
|
||||
)
|
||||
|
||||
# Consolidate channel types
|
||||
ch_types = _read_elp_sidecar(fname)
|
||||
if ch_types is None:
|
||||
logger.info("Marking all channels as EEG.")
|
||||
ch_types = ["eeg"] * len(ch_names)
|
||||
else:
|
||||
ch_types = [ch_types[ch] for ch in ch_names]
|
||||
|
||||
# Go over all the header fields and make sure they are all defined to
|
||||
# something sensible.
|
||||
if "TimePoints" in fields:
|
||||
fields["TimePoints"] = int(fields["TimePoints"])
|
||||
if fields["TimePoints"] != data.shape[0]:
|
||||
logger.warn(
|
||||
f"The size of the data matrix ({data.shape}) does not "
|
||||
f'match the "TimePoints" field ({fields["TimePoints"]}).'
|
||||
)
|
||||
if "Channels" in fields:
|
||||
fields["Channels"] = int(fields["Channels"])
|
||||
if fields["Channels"] != data.shape[1]:
|
||||
logger.warn(
|
||||
f"The size of the data matrix ({data.shape}) does not "
|
||||
f'match the "Channels" field ({fields["Channels"]}).'
|
||||
)
|
||||
if "SamplingInterval[ms]" in fields:
|
||||
fields["SamplingInterval[ms]"] = float(fields["SamplingInterval[ms]"])
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'No "SamplingInterval[ms]" field present. Could '
|
||||
"not determine sampling frequency."
|
||||
)
|
||||
if "BeginSweep[ms]" in fields:
|
||||
fields["BeginSweep[ms]"] = float(fields["BeginSweep[ms]"])
|
||||
else:
|
||||
fields["BeginSweep[ms]"] = 0.0
|
||||
if "Bins/uV" in fields:
|
||||
fields["Bins/uV"] = float(fields["Bins/uV"])
|
||||
else:
|
||||
fields["Bins/uV"] = 1
|
||||
if "SegmentName" not in fields:
|
||||
fields["SegmentName"] = ""
|
||||
|
||||
# Build the Evoked object based on the header fields.
|
||||
info = create_info(
|
||||
ch_names, sfreq=1000 / fields["SamplingInterval[ms]"], ch_types=ch_types
|
||||
)
|
||||
return EvokedArray(
|
||||
data.T / fields["Bins/uV"] / 1e6,
|
||||
info,
|
||||
tmin=fields["BeginSweep[ms]"] / 1000,
|
||||
comment=fields["SegmentName"],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
def _parse_header(header):
|
||||
"""Parse an .avr or .mul header string into name/val pairs.
|
||||
|
||||
The header line looks like:
|
||||
Npts= 256 TSB= 0.000 DI= 4.000000 SB= 1.000 SC= 200.0 Nchan= 27
|
||||
No consistent use of separation chars, so parsing this is a bit iffy.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : str
|
||||
The first line of the file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
fields : dict
|
||||
The parsed header fields
|
||||
"""
|
||||
parts = header.split() # Splits on one or more spaces
|
||||
name_val_pairs = zip(parts[::2], parts[1::2])
|
||||
return dict((name.replace("=", ""), val) for name, val in name_val_pairs)
|
||||
|
||||
|
||||
def _read_elp_sidecar(fname):
|
||||
"""Read a possible .elp sidecar file with electrode information.
|
||||
|
||||
The reason we don't use the read_custom_montage for this is that we are
|
||||
interested in the channel types, which a DigMontage object does not provide
|
||||
us.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : Path
|
||||
The path of the .avr or .mul file. The corresponding .elp file will be
|
||||
derived from this path.
|
||||
|
||||
Returns
|
||||
-------
|
||||
ch_type : OrderedDict | None
|
||||
If the sidecar file exists, return a dictionary mapping channel names
|
||||
to channel types. Otherwise returns ``None``.
|
||||
"""
|
||||
fname_elp = fname.parent / (fname.stem + ".elp")
|
||||
if not fname_elp.exists():
|
||||
logger.info(f"No {fname_elp} file present containing electrode information.")
|
||||
return None
|
||||
|
||||
logger.info(f"Reading electrode names and types from {fname_elp}")
|
||||
ch_types = OrderedDict()
|
||||
with open(fname_elp) as f:
|
||||
lines = f.readlines()
|
||||
if len(lines[0].split()) > 3:
|
||||
# Channel types present
|
||||
for line in lines:
|
||||
ch_type, ch_name = line.split()[:2]
|
||||
ch_types[ch_name] = ch_type.lower()
|
||||
else:
|
||||
# No channel types present
|
||||
logger.info(
|
||||
"No channel types present in .elp file. Marking all channels as EEG."
|
||||
)
|
||||
for line in lines:
|
||||
ch_name = line.split()[:1]
|
||||
ch_types[ch_name] = "eeg"
|
||||
return ch_types
|
||||
7
mne/io/boxy/__init__.py
Normal file
7
mne/io/boxy/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""fNIRS module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .boxy import read_raw_boxy
|
||||
283
mne/io/boxy/boxy.py
Normal file
283
mne/io/boxy/boxy.py
Normal file
@@ -0,0 +1,283 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import re as re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, fill_doc, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_boxy(fname, preload=False, verbose=None) -> "RawBOXY":
|
||||
"""Reader for an optical imaging recording.
|
||||
|
||||
This function has been tested using the ISS Imagent I and II systems
|
||||
and versions 0.40/0.84 of the BOXY recording software.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the BOXY data file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawBOXY
|
||||
A Raw object containing BOXY data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawBOXY.
|
||||
"""
|
||||
return RawBOXY(fname, preload, verbose)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawBOXY(BaseRaw):
|
||||
"""Raw object from a BOXY optical imaging file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the BOXY data file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, verbose=None):
|
||||
logger.info(f"Loading {fname}")
|
||||
|
||||
# Read header file and grab some info.
|
||||
start_line = np.inf
|
||||
col_names = mrk_col = filetype = mrk_data = end_line = None
|
||||
raw_extras = dict()
|
||||
raw_extras["offsets"] = list() # keep track of our offsets
|
||||
sfreq = None
|
||||
fname = str(_check_fname(fname, "read", True, "fname"))
|
||||
with open(fname) as fid:
|
||||
line_num = 0
|
||||
i_line = fid.readline()
|
||||
while i_line:
|
||||
# most of our lines will be data lines, so check that first
|
||||
if line_num >= start_line:
|
||||
assert col_names is not None
|
||||
assert filetype is not None
|
||||
if "#DATA ENDS" in i_line:
|
||||
# Data ends just before this.
|
||||
end_line = line_num
|
||||
break
|
||||
if mrk_col is not None:
|
||||
if filetype == "non-parsed":
|
||||
# Non-parsed files have different lines lengths.
|
||||
crnt_line = i_line.rsplit(" ")[0]
|
||||
temp_data = re.findall(r"[-+]?\d*\.?\d+", crnt_line)
|
||||
if len(temp_data) == len(col_names):
|
||||
mrk_data.append(
|
||||
float(
|
||||
re.findall(r"[-+]?\d*\.?\d+", crnt_line)[
|
||||
mrk_col
|
||||
]
|
||||
)
|
||||
)
|
||||
else:
|
||||
crnt_line = i_line.rsplit(" ")[0]
|
||||
mrk_data.append(
|
||||
float(re.findall(r"[-+]?\d*\.?\d+", crnt_line)[mrk_col])
|
||||
)
|
||||
raw_extras["offsets"].append(fid.tell())
|
||||
# now proceed with more standard header parsing
|
||||
elif "BOXY.EXE:" in i_line:
|
||||
boxy_ver = re.findall(r"\d*\.\d+", i_line.rsplit(" ")[-1])[0]
|
||||
# Check that the BOXY version is supported
|
||||
if boxy_ver not in ["0.40", "0.84"]:
|
||||
raise RuntimeError(
|
||||
f"MNE has not been tested with BOXY version ({boxy_ver})"
|
||||
)
|
||||
elif "Detector Channels" in i_line:
|
||||
raw_extras["detect_num"] = int(i_line.rsplit(" ")[0])
|
||||
elif "External MUX Channels" in i_line:
|
||||
raw_extras["source_num"] = int(i_line.rsplit(" ")[0])
|
||||
elif "Update Rate (Hz)" in i_line or "Updata Rate (Hz)" in i_line:
|
||||
# Version 0.40 of the BOXY recording software
|
||||
# (and possibly other versions lower than 0.84) contains a
|
||||
# typo in the raw data file where 'Update Rate' is spelled
|
||||
# "Updata Rate. This will account for this typo.
|
||||
sfreq = float(i_line.rsplit(" ")[0])
|
||||
elif "#DATA BEGINS" in i_line:
|
||||
# Data should start a couple lines later.
|
||||
start_line = line_num + 3
|
||||
elif line_num == start_line - 2:
|
||||
# Grab names for each column of data.
|
||||
raw_extras["col_names"] = col_names = re.findall(
|
||||
r"\w+\-\w+|\w+\-\d+|\w+", i_line.rsplit(" ")[0]
|
||||
)
|
||||
if "exmux" in col_names:
|
||||
# Change filetype based on data organisation.
|
||||
filetype = "non-parsed"
|
||||
else:
|
||||
filetype = "parsed"
|
||||
if "digaux" in col_names:
|
||||
mrk_col = col_names.index("digaux")
|
||||
mrk_data = list()
|
||||
# raw_extras['offsets'].append(fid.tell())
|
||||
elif line_num == start_line - 1:
|
||||
raw_extras["offsets"].append(fid.tell())
|
||||
line_num += 1
|
||||
i_line = fid.readline()
|
||||
assert sfreq is not None
|
||||
raw_extras.update(filetype=filetype, start_line=start_line, end_line=end_line)
|
||||
|
||||
# Label each channel in our data, for each data type (DC, AC, Ph).
|
||||
# Data is organised by channels x timepoint, where the first
|
||||
# 'source_num' rows correspond to the first detector, the next
|
||||
# 'source_num' rows correspond to the second detector, and so on.
|
||||
ch_names = list()
|
||||
ch_types = list()
|
||||
cals = list()
|
||||
for det_num in range(raw_extras["detect_num"]):
|
||||
for src_num in range(raw_extras["source_num"]):
|
||||
for i_type, ch_type in [
|
||||
("DC", "fnirs_cw_amplitude"),
|
||||
("AC", "fnirs_fd_ac_amplitude"),
|
||||
("Ph", "fnirs_fd_phase"),
|
||||
]:
|
||||
ch_names.append(f"S{src_num + 1}_D{det_num + 1} {i_type}")
|
||||
ch_types.append(ch_type)
|
||||
cals.append(np.pi / 180.0 if i_type == "Ph" else 1.0)
|
||||
|
||||
# Create info structure.
|
||||
info = create_info(ch_names, sfreq, ch_types)
|
||||
for ch, cal in zip(info["chs"], cals):
|
||||
ch["cal"] = cal
|
||||
|
||||
# Determine how long our data is.
|
||||
delta = end_line - start_line
|
||||
assert len(raw_extras["offsets"]) == delta + 1
|
||||
if filetype == "non-parsed":
|
||||
delta //= raw_extras["source_num"]
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[fname],
|
||||
first_samps=[0],
|
||||
last_samps=[delta - 1],
|
||||
raw_extras=[raw_extras],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Now let's grab our markers, if they are present.
|
||||
if mrk_data is not None:
|
||||
mrk_data = np.array(mrk_data, float)
|
||||
# We only want the first instance of each trigger.
|
||||
prev_mrk = 0
|
||||
mrk_idx = list()
|
||||
duration = list()
|
||||
tmp_dur = 0
|
||||
for i_num, i_mrk in enumerate(mrk_data):
|
||||
if i_mrk != 0 and i_mrk != prev_mrk:
|
||||
mrk_idx.append(i_num)
|
||||
if i_mrk != 0 and i_mrk == prev_mrk:
|
||||
tmp_dur += 1
|
||||
if i_mrk == 0 and i_mrk != prev_mrk:
|
||||
duration.append((tmp_dur + 1) / sfreq)
|
||||
tmp_dur = 0
|
||||
prev_mrk = i_mrk
|
||||
onset = np.array(mrk_idx) / sfreq
|
||||
description = mrk_data[mrk_idx]
|
||||
annot = Annotations(onset, duration, description)
|
||||
self.set_annotations(annot)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file.
|
||||
|
||||
Boxy file organises data in two ways, parsed or un-parsed.
|
||||
Regardless of type, output has (n_montages x n_sources x n_detectors
|
||||
+ n_marker_channels) rows, and (n_timepoints x n_blocks) columns.
|
||||
"""
|
||||
source_num = self._raw_extras[fi]["source_num"]
|
||||
detect_num = self._raw_extras[fi]["detect_num"]
|
||||
start_line = self._raw_extras[fi]["start_line"]
|
||||
end_line = self._raw_extras[fi]["end_line"]
|
||||
filetype = self._raw_extras[fi]["filetype"]
|
||||
col_names = self._raw_extras[fi]["col_names"]
|
||||
offsets = self._raw_extras[fi]["offsets"]
|
||||
boxy_file = self.filenames[fi]
|
||||
|
||||
# Non-parsed multiplexes sources, so we need source_num times as many
|
||||
# lines in that case
|
||||
if filetype == "parsed":
|
||||
start_read = start_line + start
|
||||
stop_read = start_read + (stop - start)
|
||||
else:
|
||||
assert filetype == "non-parsed"
|
||||
start_read = start_line + start * source_num
|
||||
stop_read = start_read + (stop - start) * source_num
|
||||
assert start_read >= start_line
|
||||
assert stop_read <= end_line
|
||||
|
||||
# Possible detector names.
|
||||
detectors = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"[:detect_num]
|
||||
|
||||
# Loop through our data.
|
||||
one = np.zeros((len(col_names), stop_read - start_read))
|
||||
with open(boxy_file) as fid:
|
||||
# Just a more efficient version of this:
|
||||
# ii = 0
|
||||
# for line_num, i_line in enumerate(fid):
|
||||
# if line_num >= start_read:
|
||||
# if line_num >= stop_read:
|
||||
# break
|
||||
# # Grab actual data.
|
||||
# i_data = i_line.strip().split()
|
||||
# one[:len(i_data), ii] = i_data
|
||||
# ii += 1
|
||||
fid.seek(offsets[start_read - start_line], 0)
|
||||
for oo in one.T:
|
||||
i_data = fid.readline().strip().split()
|
||||
oo[: len(i_data)] = i_data
|
||||
|
||||
# in theory we could index in the loop above, but it's painfully slow,
|
||||
# so let's just take a hopefully minor memory hit
|
||||
if filetype == "non-parsed":
|
||||
ch_idxs = [
|
||||
col_names.index(f"{det}-{i_type}")
|
||||
for det in detectors
|
||||
for i_type in ["DC", "AC", "Ph"]
|
||||
]
|
||||
one = (
|
||||
one[ch_idxs]
|
||||
.reshape( # each "time point" multiplexes srcs
|
||||
len(detectors), 3, -1, source_num
|
||||
)
|
||||
.transpose( # reorganize into (det, source, DC/AC/Ph, t) order
|
||||
0, 3, 1, 2
|
||||
)
|
||||
.reshape( # reshape the way we store it (det x source x DAP, t)
|
||||
len(detectors) * source_num * 3, -1
|
||||
)
|
||||
)
|
||||
else:
|
||||
assert filetype == "parsed"
|
||||
ch_idxs = [
|
||||
col_names.index(f"{det}-{i_type}{si + 1}")
|
||||
for det in detectors
|
||||
for si in range(source_num)
|
||||
for i_type in ["DC", "AC", "Ph"]
|
||||
]
|
||||
one = one[ch_idxs]
|
||||
|
||||
# Place our data into the data object in place.
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
7
mne/io/brainvision/__init__.py
Normal file
7
mne/io/brainvision/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""BrainVision module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .brainvision import read_raw_brainvision
|
||||
1144
mne/io/brainvision/brainvision.py
Normal file
1144
mne/io/brainvision/brainvision.py
Normal file
File diff suppressed because it is too large
Load Diff
7
mne/io/bti/__init__.py
Normal file
7
mne/io/bti/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""BTi module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .bti import read_raw_bti
|
||||
1414
mne/io/bti/bti.py
Normal file
1414
mne/io/bti/bti.py
Normal file
File diff suppressed because it is too large
Load Diff
99
mne/io/bti/constants.py
Normal file
99
mne/io/bti/constants.py
Normal file
@@ -0,0 +1,99 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from ...utils import BunchConst
|
||||
|
||||
BTI = BunchConst()
|
||||
|
||||
BTI.ELEC_STATE_NOT_COLLECTED = 0
|
||||
BTI.ELEC_STATE_COLLECTED = 1
|
||||
BTI.ELEC_STATE_SKIPPED = 2
|
||||
BTI.ELEC_STATE_NOT_APPLICABLE = 3
|
||||
#
|
||||
## Byte offesets and data sizes for different files
|
||||
#
|
||||
BTI.FILE_MASK = 2147483647
|
||||
BTI.FILE_CURPOS = 8
|
||||
BTI.FILE_END = -8
|
||||
|
||||
BTI.FILE_HS_VERSION = 0
|
||||
BTI.FILE_HS_TIMESTAMP = 4
|
||||
BTI.FILE_HS_CHECKSUM = 8
|
||||
BTI.FILE_HS_N_DIGPOINTS = 12
|
||||
BTI.FILE_HS_N_INDEXPOINTS = 16
|
||||
|
||||
BTI.FILE_PDF_H_ENTER = 1
|
||||
BTI.FILE_PDF_H_FTYPE = 5
|
||||
BTI.FILE_PDF_H_XLABEL = 16
|
||||
BTI.FILE_PDF_H_NEXT = 2
|
||||
BTI.FILE_PDF_H_EXIT = 20
|
||||
|
||||
BTI.FILE_PDF_EPOCH_EXIT = 28
|
||||
|
||||
BTI.FILE_PDF_CH_NEXT = 6
|
||||
BTI.FILE_PDF_CH_LABELSIZE = 16
|
||||
BTI.FILE_PDF_CH_YLABEL = 16
|
||||
BTI.FILE_PDF_CH_OFF_FLAG = 16
|
||||
BTI.FILE_PDF_CH_EXIT = 12
|
||||
|
||||
BTI.FILE_PDF_EVENT_NAME = 16
|
||||
BTI.FILE_PDF_EVENT_EXIT = 32
|
||||
|
||||
BTI.FILE_PDF_PROCESS_BLOCKTYPE = 20
|
||||
BTI.FILE_PDF_PROCESS_USER = 32
|
||||
BTI.FILE_PDF_PROCESS_FNAME = 256
|
||||
BTI.FILE_PDF_PROCESS_EXIT = 32
|
||||
|
||||
BTI.FILE_PDF_ASSOC_NEXT = 32
|
||||
|
||||
BTI.FILE_PDFED_NAME = 17
|
||||
BTI.FILE_PDFED_NEXT = 9
|
||||
BTI.FILE_PDFED_EXIT = 8
|
||||
|
||||
#
|
||||
## General data constants
|
||||
#
|
||||
BTI.DATA_N_IDX_POINTS = 5
|
||||
BTI.DATA_ROT_N_ROW = 3
|
||||
BTI.DATA_ROT_N_COL = 3
|
||||
BTI.DATA_XFM_N_COL = 4
|
||||
BTI.DATA_XFM_N_ROW = 4
|
||||
BTI.FIFF_LOGNO = 111
|
||||
#
|
||||
## Channel Types
|
||||
#
|
||||
BTI.CHTYPE_MEG = 1
|
||||
BTI.CHTYPE_EEG = 2
|
||||
BTI.CHTYPE_REFERENCE = 3
|
||||
BTI.CHTYPE_EXTERNAL = 4
|
||||
BTI.CHTYPE_TRIGGER = 5
|
||||
BTI.CHTYPE_UTILITY = 6
|
||||
BTI.CHTYPE_DERIVED = 7
|
||||
BTI.CHTYPE_SHORTED = 8
|
||||
#
|
||||
## Processes
|
||||
#
|
||||
BTI.PROC_DEFAULTS = "BTi_defaults"
|
||||
BTI.PROC_FILTER = "b_filt_hp,b_filt_lp,b_filt_notch"
|
||||
BTI.PROC_BPFILTER = "b_filt_b_pass,b_filt_b_reject"
|
||||
#
|
||||
## User blocks
|
||||
#
|
||||
BTI.UB_B_MAG_INFO = "B_Mag_Info"
|
||||
BTI.UB_B_COH_POINTS = "B_COH_Points"
|
||||
BTI.UB_B_CCP_XFM_BLOCK = "b_ccp_xfm_block"
|
||||
BTI.UB_B_EEG_LOCS = "b_eeg_elec_locs"
|
||||
BTI.UB_B_WHC_CHAN_MAP_VER = "B_WHChanMapVer"
|
||||
BTI.UB_B_WHC_CHAN_MAP = "B_WHChanMap"
|
||||
BTI.UB_B_WHS_SUBSYS_VER = "B_WHSubsysVer" # B_WHSubsysVer
|
||||
BTI.UB_B_WHS_SUBSYS = "B_WHSubsys"
|
||||
BTI.UB_B_CH_LABELS = "B_ch_labels"
|
||||
BTI.UB_B_CALIBRATION = "B_Calibration"
|
||||
BTI.UB_B_SYS_CONFIG_TIME = "B_SysConfigTime"
|
||||
BTI.UB_B_DELTA_ENABLED = "B_DELTA_ENABLED"
|
||||
BTI.UB_B_E_TABLE_USED = "B_E_table_used"
|
||||
BTI.UB_B_E_TABLE = "B_E_TABLE"
|
||||
BTI.UB_B_WEIGHTS_USED = "B_weights_used"
|
||||
BTI.UB_B_TRIG_MASK = "B_trig_mask"
|
||||
BTI.UB_B_WEIGHT_TABLE = "BWT_"
|
||||
98
mne/io/bti/read.py
Normal file
98
mne/io/bti/read.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.utils import read_str
|
||||
|
||||
|
||||
def _unpack_matrix(fid, rows, cols, dtype, out_dtype):
|
||||
"""Unpack matrix."""
|
||||
dtype = np.dtype(dtype)
|
||||
|
||||
string = fid.read(int(dtype.itemsize * rows * cols))
|
||||
out = np.frombuffer(string, dtype=dtype).reshape(rows, cols).astype(out_dtype)
|
||||
return out
|
||||
|
||||
|
||||
def _unpack_simple(fid, dtype, out_dtype):
|
||||
"""Unpack a NumPy type."""
|
||||
dtype = np.dtype(dtype)
|
||||
string = fid.read(dtype.itemsize)
|
||||
out = np.frombuffer(string, dtype=dtype).astype(out_dtype)
|
||||
|
||||
if len(out) > 0:
|
||||
out = out[0]
|
||||
return out
|
||||
|
||||
|
||||
def read_char(fid, count=1):
|
||||
"""Read character from bti file."""
|
||||
return _unpack_simple(fid, f">S{count}", "S")
|
||||
|
||||
|
||||
def read_uint16(fid):
|
||||
"""Read unsigned 16bit integer from bti file."""
|
||||
return _unpack_simple(fid, ">u2", np.uint32)
|
||||
|
||||
|
||||
def read_int16(fid):
|
||||
"""Read 16bit integer from bti file."""
|
||||
return _unpack_simple(fid, ">i2", np.int32)
|
||||
|
||||
|
||||
def read_uint32(fid):
|
||||
"""Read unsigned 32bit integer from bti file."""
|
||||
return _unpack_simple(fid, ">u4", np.uint32)
|
||||
|
||||
|
||||
def read_int32(fid):
|
||||
"""Read 32bit integer from bti file."""
|
||||
return _unpack_simple(fid, ">i4", np.int32)
|
||||
|
||||
|
||||
def read_int64(fid):
|
||||
"""Read 64bit integer from bti file."""
|
||||
return _unpack_simple(fid, ">u8", np.int64)
|
||||
|
||||
|
||||
def read_float(fid):
|
||||
"""Read 32bit float from bti file."""
|
||||
return _unpack_simple(fid, ">f4", np.float32)
|
||||
|
||||
|
||||
def read_double(fid):
|
||||
"""Read 64bit float from bti file."""
|
||||
return _unpack_simple(fid, ">f8", np.float64)
|
||||
|
||||
|
||||
def read_int16_matrix(fid, rows, cols):
|
||||
"""Read 16bit integer matrix from bti file."""
|
||||
return _unpack_matrix(
|
||||
fid,
|
||||
rows,
|
||||
cols,
|
||||
dtype=">i2",
|
||||
out_dtype=np.int32,
|
||||
)
|
||||
|
||||
|
||||
def read_float_matrix(fid, rows, cols):
|
||||
"""Read 32bit float matrix from bti file."""
|
||||
return _unpack_matrix(fid, rows, cols, dtype=">f4", out_dtype=np.float32)
|
||||
|
||||
|
||||
def read_double_matrix(fid, rows, cols):
|
||||
"""Read 64bit float matrix from bti file."""
|
||||
return _unpack_matrix(fid, rows, cols, dtype=">f8", out_dtype=np.float64)
|
||||
|
||||
|
||||
def read_transform(fid):
|
||||
"""Read 64bit float matrix transform from bti file."""
|
||||
return read_double_matrix(fid, rows=4, cols=4)
|
||||
|
||||
|
||||
def read_dev_header(x):
|
||||
"""Create a dev header."""
|
||||
return dict(size=read_int32(x), checksum=read_int32(x), reserved=read_str(x, 32))
|
||||
7
mne/io/cnt/__init__.py
Normal file
7
mne/io/cnt/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""CNT data reader."""
|
||||
|
||||
from .cnt import read_raw_cnt
|
||||
150
mne/io/cnt/_utils.py
Normal file
150
mne/io/cnt/_utils.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from collections import namedtuple
|
||||
from datetime import datetime
|
||||
from math import modf
|
||||
from os import SEEK_END
|
||||
from struct import Struct
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import warn
|
||||
|
||||
|
||||
def _read_teeg(f, teeg_offset):
|
||||
"""
|
||||
Read TEEG structure from an open CNT file.
|
||||
|
||||
# from TEEG structure in http://paulbourke.net/dataformats/eeg/
|
||||
typedef struct {
|
||||
char Teeg; /* Either 1 or 2 */
|
||||
long Size; /* Total length of all the events */
|
||||
long Offset; /* Hopefully always 0 */
|
||||
} TEEG;
|
||||
"""
|
||||
# we use a more descriptive names based on TEEG doc comments
|
||||
Teeg = namedtuple("Teeg", "event_type total_length offset")
|
||||
teeg_parser = Struct("<Bll")
|
||||
|
||||
f.seek(teeg_offset)
|
||||
return Teeg(*teeg_parser.unpack(f.read(teeg_parser.size)))
|
||||
|
||||
|
||||
CNTEventType1 = namedtuple("CNTEventType1", ("StimType KeyBoard KeyPad_Accept Offset"))
|
||||
# typedef struct {
|
||||
# unsigned short StimType; /* range 0-65535 */
|
||||
# unsigned char KeyBoard; /* range 0-11 corresponding to fcn keys +1 */
|
||||
# char KeyPad_Accept; /* 0->3 range 0-15 bit coded response pad */
|
||||
# /* 4->7 values 0xd=Accept 0xc=Reject */
|
||||
# long Offset; /* file offset of event */
|
||||
# } EVENT1;
|
||||
|
||||
|
||||
CNTEventType2 = namedtuple(
|
||||
"CNTEventType2",
|
||||
(
|
||||
"StimType KeyBoard KeyPad_Accept Offset Type "
|
||||
"Code Latency EpochEvent Accept2 Accuracy"
|
||||
),
|
||||
)
|
||||
# unsigned short StimType; /* range 0-65535 */
|
||||
# unsigned char KeyBoard; /* range 0-11 corresponding to fcn keys +1 */
|
||||
# char KeyPad_Accept; /* 0->3 range 0-15 bit coded response pad */
|
||||
# /* 4->7 values 0xd=Accept 0xc=Reject */
|
||||
# long Offset; /* file offset of event */
|
||||
# short Type;
|
||||
# short Code;
|
||||
# float Latency;
|
||||
# char EpochEvent;
|
||||
# char Accept2;
|
||||
# char Accuracy;
|
||||
|
||||
|
||||
# needed for backward compat: EVENT type 3 has the same structure as type 2
|
||||
CNTEventType3 = namedtuple(
|
||||
"CNTEventType3",
|
||||
(
|
||||
"StimType KeyBoard KeyPad_Accept Offset Type "
|
||||
"Code Latency EpochEvent Accept2 Accuracy"
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _get_event_parser(event_type):
|
||||
if event_type == 1:
|
||||
event_maker = CNTEventType1
|
||||
struct_pattern = "<HBcl"
|
||||
elif event_type == 2:
|
||||
event_maker = CNTEventType2
|
||||
struct_pattern = "<HBclhhfccc"
|
||||
elif event_type == 3:
|
||||
event_maker = CNTEventType3
|
||||
struct_pattern = "<HBclhhfccc" # Same as event type 2
|
||||
else:
|
||||
raise ValueError(f"unknown CNT even type {event_type}")
|
||||
|
||||
def parser(buffer):
|
||||
struct = Struct(struct_pattern)
|
||||
for chunk in struct.iter_unpack(buffer):
|
||||
yield event_maker(*chunk)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
def _session_date_2_meas_date(session_date, date_format):
|
||||
try:
|
||||
frac_part, int_part = modf(
|
||||
datetime.strptime(session_date, date_format).timestamp()
|
||||
)
|
||||
except ValueError:
|
||||
warn(" Could not parse meas date from the header. Setting to None.")
|
||||
return None
|
||||
else:
|
||||
return (int_part, frac_part)
|
||||
|
||||
|
||||
def _compute_robust_event_table_position(fid, data_format="int32"):
|
||||
"""Compute `event_table_position`.
|
||||
|
||||
When recording event_table_position is computed (as accomulation). If the
|
||||
file recording is large then this value overflows and ends up pointing
|
||||
somewhere else. (SEE #gh-6535)
|
||||
|
||||
If the file is smaller than 2G the value in the SETUP is returned.
|
||||
Otherwise, the address of the table position is computed from:
|
||||
n_samples, n_channels, and the bytes size.
|
||||
"""
|
||||
SETUP_NCHANNELS_OFFSET = 370
|
||||
SETUP_NSAMPLES_OFFSET = 864
|
||||
SETUP_EVENTTABLEPOS_OFFSET = 886
|
||||
|
||||
fid_origin = fid.tell() # save the state
|
||||
|
||||
if fid.seek(0, SEEK_END) < 2e9:
|
||||
fid.seek(SETUP_EVENTTABLEPOS_OFFSET)
|
||||
(event_table_pos,) = np.frombuffer(fid.read(4), dtype="<i4")
|
||||
|
||||
else:
|
||||
if data_format == "auto":
|
||||
warn(
|
||||
"Using `data_format='auto' for a CNT file larger"
|
||||
" than 2Gb is not granted to work. Please pass"
|
||||
" 'int16' or 'int32'.` (assuming int32)"
|
||||
)
|
||||
|
||||
n_bytes = 2 if data_format == "int16" else 4
|
||||
|
||||
fid.seek(SETUP_NSAMPLES_OFFSET)
|
||||
(n_samples,) = np.frombuffer(fid.read(4), dtype="<i4")
|
||||
|
||||
fid.seek(SETUP_NCHANNELS_OFFSET)
|
||||
(n_channels,) = np.frombuffer(fid.read(2), dtype="<u2")
|
||||
|
||||
event_table_pos = (
|
||||
900 + 75 * int(n_channels) + n_bytes * int(n_channels) * int(n_samples)
|
||||
)
|
||||
|
||||
fid.seek(fid_origin) # restore the state
|
||||
return event_table_pos
|
||||
642
mne/io/cnt/cnt.py
Normal file
642
mne/io/cnt/cnt.py
Normal file
@@ -0,0 +1,642 @@
|
||||
"""Conversion tool from Neuroscan CNT to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from os import path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _make_dig_points
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _create_chs, _find_channels, _mult_cal_one, read_str
|
||||
from ...annotations import Annotations
|
||||
from ...channels.layout import _topo_to_sphere
|
||||
from ...utils import _check_option, _explain_exception, _validate_type, fill_doc, warn
|
||||
from ..base import BaseRaw
|
||||
from ._utils import (
|
||||
CNTEventType3,
|
||||
_compute_robust_event_table_position,
|
||||
_get_event_parser,
|
||||
_read_teeg,
|
||||
_session_date_2_meas_date,
|
||||
)
|
||||
|
||||
|
||||
def _read_annotations_cnt(fname, data_format="int16"):
|
||||
"""CNT Annotation File Reader.
|
||||
|
||||
This method opens the .cnt files, searches all the metadata to construct
|
||||
the annotations and parses the event table. Notice that CNT files, can
|
||||
point to a different file containing the events. This case when the
|
||||
event table is separated from the main .cnt is not supported.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname: path-like
|
||||
Path to CNT file containing the annotations.
|
||||
data_format : 'int16' | 'int32'
|
||||
Defines the data format the data is read in.
|
||||
|
||||
Returns
|
||||
-------
|
||||
annot : instance of Annotations
|
||||
The annotations.
|
||||
"""
|
||||
# Offsets from SETUP structure in http://paulbourke.net/dataformats/eeg/
|
||||
SETUP_NCHANNELS_OFFSET = 370
|
||||
SETUP_RATE_OFFSET = 376
|
||||
|
||||
def _accept_reject_function(keypad_accept):
|
||||
accept_list = []
|
||||
for code in keypad_accept:
|
||||
if "xd0" in str(code):
|
||||
accept_list.append("good")
|
||||
elif "xc0" in str(code):
|
||||
accept_list.append("bad")
|
||||
else:
|
||||
accept_list.append("NA")
|
||||
return np.array(accept_list)
|
||||
|
||||
def _translating_function(offset, n_channels, event_type, data_format=data_format):
|
||||
n_bytes = 2 if data_format == "int16" else 4
|
||||
if event_type == CNTEventType3:
|
||||
offset *= n_bytes * n_channels
|
||||
event_time = offset - 900 - (75 * n_channels)
|
||||
event_time //= n_channels * n_bytes
|
||||
event_time = event_time - 1
|
||||
# Prevent negative event times
|
||||
np.clip(event_time, 0, None, out=event_time)
|
||||
return event_time
|
||||
|
||||
def _update_bad_span_onset(accept_reject, onset, duration, description):
|
||||
accept_reject = accept_reject.tolist()
|
||||
onset = onset.tolist()
|
||||
duration = duration.tolist()
|
||||
description = description.tolist()
|
||||
# If there are no bad spans, return original parameters
|
||||
if "bad" not in accept_reject:
|
||||
return np.array(onset), np.array(duration), np.array(description)
|
||||
# Create lists of bad and good span markers and onset
|
||||
bad_good_span_markers = [i for i in accept_reject if i in ["bad", "good"]]
|
||||
bad_good_onset = [
|
||||
onset[i]
|
||||
for i, value in enumerate(accept_reject)
|
||||
if value in ["bad", "good"]
|
||||
]
|
||||
# Calculate duration of bad span
|
||||
first_bad_index = bad_good_span_markers.index("bad")
|
||||
duration_list = [
|
||||
bad_good_onset[i + 1] - bad_good_onset[i]
|
||||
for i in range(first_bad_index, len(bad_good_span_markers), 2)
|
||||
]
|
||||
# Add bad event marker duration and description
|
||||
duration_list_index = 0
|
||||
for i in range(len(onset)):
|
||||
if accept_reject[i] == "bad":
|
||||
duration[i] = duration_list[duration_list_index]
|
||||
description[i] = "BAD_" + description[i]
|
||||
duration_list_index += 1
|
||||
# Remove good span markers
|
||||
final_onset, final_duration, final_description = [], [], []
|
||||
for i in range(len(accept_reject)):
|
||||
if accept_reject[i] != "good":
|
||||
final_onset.append(onset[i])
|
||||
final_duration.append(duration[i])
|
||||
final_description.append(description[i])
|
||||
return (
|
||||
np.array(final_onset),
|
||||
np.array(final_duration),
|
||||
np.array(final_description),
|
||||
)
|
||||
|
||||
with open(fname, "rb") as fid:
|
||||
fid.seek(SETUP_NCHANNELS_OFFSET)
|
||||
(n_channels,) = np.frombuffer(fid.read(2), dtype="<u2")
|
||||
|
||||
fid.seek(SETUP_RATE_OFFSET)
|
||||
(sfreq,) = np.frombuffer(fid.read(2), dtype="<u2")
|
||||
|
||||
event_table_pos = _compute_robust_event_table_position(
|
||||
fid=fid, data_format=data_format
|
||||
)
|
||||
|
||||
with open(fname, "rb") as fid:
|
||||
teeg = _read_teeg(fid, teeg_offset=event_table_pos)
|
||||
|
||||
event_parser = _get_event_parser(event_type=teeg.event_type)
|
||||
|
||||
with open(fname, "rb") as fid:
|
||||
fid.seek(event_table_pos + 9) # the real table stats at +9
|
||||
buffer = fid.read(teeg.total_length)
|
||||
|
||||
my_events = list(event_parser(buffer))
|
||||
|
||||
if not my_events:
|
||||
return Annotations(list(), list(), list(), None)
|
||||
else:
|
||||
onset = _translating_function(
|
||||
np.array([e.Offset for e in my_events], dtype=float),
|
||||
n_channels=n_channels,
|
||||
event_type=type(my_events[0]),
|
||||
data_format=data_format,
|
||||
)
|
||||
# There is a Latency field but it's not useful for durations, see
|
||||
# https://github.com/mne-tools/mne-python/pull/11828
|
||||
duration = np.zeros(len(my_events), dtype=float)
|
||||
accept_reject = _accept_reject_function(
|
||||
np.array([e.KeyPad_Accept for e in my_events])
|
||||
)
|
||||
|
||||
# Check to see if there are any button presses
|
||||
description = []
|
||||
for event in my_events:
|
||||
# Extract the 4-bit fields
|
||||
# Upper nibble (4 bits) currently not used
|
||||
# accept = (event.KeyPad_Accept[0] & 0xF0) >> 4
|
||||
# Lower nibble (4 bits) keypad button press
|
||||
keypad = event.KeyPad_Accept[0] & 0x0F
|
||||
if str(keypad) != "0":
|
||||
description.append(f"KeyPad Response {keypad}")
|
||||
elif event.KeyBoard != 0:
|
||||
description.append(f"Keyboard Response {event.KeyBoard}")
|
||||
else:
|
||||
description.append(str(event.StimType))
|
||||
|
||||
description = np.array(description)
|
||||
|
||||
onset, duration, description = _update_bad_span_onset(
|
||||
accept_reject, onset / sfreq, duration, description
|
||||
)
|
||||
return Annotations(
|
||||
onset=onset, duration=duration, description=description, orig_time=None
|
||||
)
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_cnt(
|
||||
input_fname,
|
||||
eog=(),
|
||||
misc=(),
|
||||
ecg=(),
|
||||
emg=(),
|
||||
data_format="auto",
|
||||
date_format="mm/dd/yy",
|
||||
*,
|
||||
header="auto",
|
||||
preload=False,
|
||||
verbose=None,
|
||||
) -> "RawCNT":
|
||||
"""Read CNT data as raw object.
|
||||
|
||||
.. Note::
|
||||
2d spatial coordinates (x, y) for EEG channels are read from the file
|
||||
header and fit to a sphere to compute corresponding z-coordinates.
|
||||
If channels assigned as EEG channels have locations
|
||||
far away from the head (i.e. x and y coordinates don't fit to a
|
||||
sphere), all the channel locations will be distorted
|
||||
(all channels that are not assigned with keywords ``eog``, ``ecg``,
|
||||
``emg`` and ``misc`` are assigned as EEG channels). If you are not
|
||||
sure that the channel locations in the header are correct, it is
|
||||
probably safer to replace them with :meth:`mne.io.Raw.set_montage`.
|
||||
Montages can be created/imported with:
|
||||
|
||||
- Standard montages with :func:`mne.channels.make_standard_montage`
|
||||
- Montages for `Compumedics systems
|
||||
<https://compumedicsneuroscan.com>`__ with
|
||||
:func:`mne.channels.read_dig_dat`
|
||||
- Other reader functions are listed under *See Also* at
|
||||
:class:`mne.channels.DigMontage`
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the data file.
|
||||
eog : list | tuple | ``'auto'`` | ``'header'``
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. If 'header', VEOG and HEOG channels assigned in the file
|
||||
header are used. If ``'auto'``, channel names containing ``'EOG'`` are
|
||||
used. Defaults to empty tuple.
|
||||
misc : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Defaults to empty tuple.
|
||||
ecg : list | tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
ECG channels. If ``'auto'``, the channel names containing ``'ECG'`` are
|
||||
used. Defaults to empty tuple.
|
||||
emg : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
EMG channels. If 'auto', the channel names containing 'EMG' are used.
|
||||
Defaults to empty tuple.
|
||||
data_format : ``'auto'`` | ``'int16'`` | ``'int32'``
|
||||
Defines the data format the data is read in. If ``'auto'``, it is
|
||||
determined from the file header using ``numsamples`` field.
|
||||
Defaults to ``'auto'``.
|
||||
date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'``
|
||||
Format of date in the header. Defaults to ``'mm/dd/yy'``.
|
||||
header : ``'auto'`` | ``'new'`` | ``'old'``
|
||||
Defines the header format. Used to describe how bad channels
|
||||
are formatted. If auto, reads using old and new header and
|
||||
if either contain a bad channel make channel bad.
|
||||
Defaults to ``'auto'``.
|
||||
|
||||
.. versionadded:: 1.6
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawCNT.
|
||||
The raw data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawCNT.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.12
|
||||
"""
|
||||
return RawCNT(
|
||||
input_fname,
|
||||
eog=eog,
|
||||
misc=misc,
|
||||
ecg=ecg,
|
||||
emg=emg,
|
||||
data_format=data_format,
|
||||
date_format=date_format,
|
||||
header=header,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format, header):
|
||||
"""Read the cnt header."""
|
||||
data_offset = 900 # Size of the 'SETUP' header.
|
||||
cnt_info = dict()
|
||||
# Reading only the fields of interest. Structure of the whole header at
|
||||
# http://paulbourke.net/dataformats/eeg/
|
||||
with open(input_fname, "rb", buffering=0) as fid:
|
||||
fid.seek(21)
|
||||
patient_id = read_str(fid, 20)
|
||||
patient_id = int(patient_id) if patient_id.isdigit() else 0
|
||||
fid.seek(121)
|
||||
patient_name = read_str(fid, 20).split()
|
||||
last_name = patient_name[0] if len(patient_name) > 0 else ""
|
||||
first_name = patient_name[-1] if len(patient_name) > 0 else ""
|
||||
fid.seek(2, 1)
|
||||
sex = read_str(fid, 1)
|
||||
if sex == "M":
|
||||
sex = FIFF.FIFFV_SUBJ_SEX_MALE
|
||||
elif sex == "F":
|
||||
sex = FIFF.FIFFV_SUBJ_SEX_FEMALE
|
||||
else: # can be 'U'
|
||||
sex = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
|
||||
hand = read_str(fid, 1)
|
||||
if hand == "R":
|
||||
hand = FIFF.FIFFV_SUBJ_HAND_RIGHT
|
||||
elif hand == "L":
|
||||
hand = FIFF.FIFFV_SUBJ_HAND_LEFT
|
||||
else: # can be 'M' for mixed or 'U'
|
||||
hand = None
|
||||
fid.seek(205)
|
||||
session_label = read_str(fid, 20)
|
||||
|
||||
session_date = f"{read_str(fid, 10)} {read_str(fid, 12)}"
|
||||
meas_date = _session_date_2_meas_date(session_date, date_format)
|
||||
|
||||
fid.seek(370)
|
||||
n_channels = np.fromfile(fid, dtype="<u2", count=1).item()
|
||||
fid.seek(376)
|
||||
sfreq = np.fromfile(fid, dtype="<u2", count=1).item()
|
||||
if eog == "header":
|
||||
fid.seek(402)
|
||||
eog = [idx for idx in np.fromfile(fid, dtype="i2", count=2) if idx >= 0]
|
||||
fid.seek(438)
|
||||
lowpass_toggle = np.fromfile(fid, "i1", count=1).item()
|
||||
highpass_toggle = np.fromfile(fid, "i1", count=1).item()
|
||||
|
||||
# Header has a field for number of samples, but it does not seem to be
|
||||
# too reliable. That's why we have option for setting n_bytes manually.
|
||||
fid.seek(864)
|
||||
n_samples = np.fromfile(fid, dtype="<u4", count=1).item()
|
||||
n_samples_header = n_samples
|
||||
fid.seek(869)
|
||||
lowcutoff = np.fromfile(fid, dtype="f4", count=1).item()
|
||||
fid.seek(2, 1)
|
||||
highcutoff = np.fromfile(fid, dtype="f4", count=1).item()
|
||||
|
||||
event_offset = _compute_robust_event_table_position(
|
||||
fid=fid, data_format=data_format
|
||||
)
|
||||
fid.seek(890)
|
||||
cnt_info["continuous_seconds"] = np.fromfile(fid, dtype="<f4", count=1).item()
|
||||
|
||||
if event_offset < data_offset: # no events
|
||||
data_size = n_samples * n_channels
|
||||
else:
|
||||
data_size = event_offset - (data_offset + 75 * n_channels)
|
||||
|
||||
_check_option("data_format", data_format, ["auto", "int16", "int32"])
|
||||
if data_format == "auto":
|
||||
if n_samples == 0 or data_size // (n_samples * n_channels) not in [2, 4]:
|
||||
warn(
|
||||
"Could not define the number of bytes automatically. "
|
||||
"Defaulting to 2."
|
||||
)
|
||||
n_bytes = 2
|
||||
n_samples = data_size // (n_bytes * n_channels)
|
||||
# See: PR #12393
|
||||
annotations = _read_annotations_cnt(input_fname, data_format="int16")
|
||||
# See: PR #12986
|
||||
if len(annotations) and annotations.onset[-1] * sfreq > n_samples:
|
||||
n_bytes = 4
|
||||
n_samples = n_samples_header
|
||||
warn(
|
||||
"Annotations are outside data range. "
|
||||
"Changing data format to 'int32'."
|
||||
)
|
||||
else:
|
||||
n_bytes = data_size // (n_samples * n_channels)
|
||||
else:
|
||||
n_bytes = 2 if data_format == "int16" else 4
|
||||
n_samples = data_size // (n_bytes * n_channels)
|
||||
|
||||
# See PR #12393
|
||||
if n_samples_header != 0:
|
||||
n_samples = n_samples_header
|
||||
# Channel offset refers to the size of blocks per channel in the file.
|
||||
cnt_info["channel_offset"] = np.fromfile(fid, dtype="<i4", count=1).item()
|
||||
if cnt_info["channel_offset"] > 1:
|
||||
cnt_info["channel_offset"] //= n_bytes
|
||||
else:
|
||||
cnt_info["channel_offset"] = 1
|
||||
|
||||
ch_names, cals, baselines, chs, pos = (list(), list(), list(), list(), list())
|
||||
|
||||
bads = list()
|
||||
_validate_type(header, str, "header")
|
||||
_check_option("header", header, ("auto", "new", "old"))
|
||||
for ch_idx in range(n_channels): # ELECTLOC fields
|
||||
fid.seek(data_offset + 75 * ch_idx)
|
||||
ch_name = read_str(fid, 10)
|
||||
ch_names.append(ch_name)
|
||||
|
||||
# Some files have bad channels marked differently in the header.
|
||||
if header in ("new", "auto"):
|
||||
fid.seek(data_offset + 75 * ch_idx + 14)
|
||||
if np.fromfile(fid, dtype="u1", count=1).item():
|
||||
bads.append(ch_name)
|
||||
if header in ("old", "auto"):
|
||||
fid.seek(data_offset + 75 * ch_idx + 4)
|
||||
if np.fromfile(fid, dtype="u1", count=1).item():
|
||||
bads.append(ch_name)
|
||||
|
||||
fid.seek(data_offset + 75 * ch_idx + 19)
|
||||
xy = np.fromfile(fid, dtype="f4", count=2)
|
||||
xy[1] *= -1 # invert y-axis
|
||||
pos.append(xy)
|
||||
fid.seek(data_offset + 75 * ch_idx + 47)
|
||||
# Baselines are subtracted before scaling the data.
|
||||
baselines.append(np.fromfile(fid, dtype="i2", count=1).item())
|
||||
fid.seek(data_offset + 75 * ch_idx + 59)
|
||||
sensitivity = np.fromfile(fid, dtype="f4", count=1).item()
|
||||
fid.seek(data_offset + 75 * ch_idx + 71)
|
||||
cal = np.fromfile(fid, dtype="f4", count=1).item()
|
||||
cals.append(cal * sensitivity * 1e-6 / 204.8)
|
||||
|
||||
info = _empty_info(sfreq)
|
||||
if lowpass_toggle == 1:
|
||||
info["lowpass"] = highcutoff
|
||||
if highpass_toggle == 1:
|
||||
info["highpass"] = lowcutoff
|
||||
subject_info = {
|
||||
"hand": hand,
|
||||
"id": patient_id,
|
||||
"sex": sex,
|
||||
"first_name": first_name,
|
||||
"last_name": last_name,
|
||||
}
|
||||
subject_info = {key: val for key, val in subject_info.items() if val is not None}
|
||||
|
||||
if eog == "auto":
|
||||
eog = _find_channels(ch_names, "EOG")
|
||||
if ecg == "auto":
|
||||
ecg = _find_channels(ch_names, "ECG")
|
||||
if emg == "auto":
|
||||
emg = _find_channels(ch_names, "EMG")
|
||||
|
||||
chs = _create_chs(
|
||||
ch_names, cals, FIFF.FIFFV_COIL_EEG, FIFF.FIFFV_EEG_CH, eog, ecg, emg, misc
|
||||
)
|
||||
eegs = [idx for idx, ch in enumerate(chs) if ch["coil_type"] == FIFF.FIFFV_COIL_EEG]
|
||||
coords = _topo_to_sphere(pos, eegs)
|
||||
locs = np.full((len(chs), 12), np.nan)
|
||||
locs[:, :3] = coords
|
||||
dig = _make_dig_points(
|
||||
dig_ch_pos=dict(zip(ch_names, coords)),
|
||||
coord_frame="head",
|
||||
add_missing_fiducials=True,
|
||||
)
|
||||
for ch, loc in zip(chs, locs):
|
||||
ch.update(loc=loc)
|
||||
|
||||
cnt_info.update(baselines=np.array(baselines), n_samples=n_samples, n_bytes=n_bytes)
|
||||
|
||||
session_label = None if str(session_label) == "" else str(session_label)
|
||||
info.update(
|
||||
meas_date=meas_date,
|
||||
dig=dig,
|
||||
description=session_label,
|
||||
subject_info=subject_info,
|
||||
chs=chs,
|
||||
)
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
info["bads"] = bads
|
||||
return info, cnt_info
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawCNT(BaseRaw):
|
||||
"""Raw object from Neuroscan CNT file.
|
||||
|
||||
.. note::
|
||||
|
||||
The channel positions are read from the file header. Channels that are
|
||||
not assigned with keywords ``eog``, ``ecg``, ``emg`` and ``misc`` are
|
||||
assigned as eeg channels. All the eeg channel locations are fit to a
|
||||
sphere when computing the z-coordinates for the channels. If channels
|
||||
assigned as eeg channels have locations far away from the head (i.e.
|
||||
x and y coordinates don't fit to a sphere), all the channel locations
|
||||
will be distorted. If you are not sure that the channel locations in
|
||||
the header are correct, it is probably safer to use a (standard)
|
||||
montage. See :func:`mne.channels.make_standard_montage`
|
||||
|
||||
.. note::
|
||||
|
||||
A CNT file can also come from the EEG manufacturer ANT Neuro, in which case the
|
||||
function :func:`mne.io.read_raw_ant` should be used.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the Neuroscan CNT file.
|
||||
eog : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. If ``'auto'``, the channel names beginning with
|
||||
``EOG`` are used. Defaults to empty tuple.
|
||||
misc : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Defaults to empty tuple.
|
||||
ecg : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
ECG channels. If ``'auto'``, the channel names beginning with
|
||||
``ECG`` are used. Defaults to empty tuple.
|
||||
emg : list | tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
EMG channels. If ``'auto'``, the channel names beginning with
|
||||
``EMG`` are used. Defaults to empty tuple.
|
||||
data_format : ``'auto'`` | ``'int16'`` | ``'int32'``
|
||||
Defines the data format the data is read in. If ``'auto'``, it is
|
||||
determined from the file header using ``numsamples`` field.
|
||||
Defaults to ``'auto'``.
|
||||
date_format : ``'mm/dd/yy'`` | ``'dd/mm/yy'``
|
||||
Format of date in the header. Defaults to ``'mm/dd/yy'``.
|
||||
header : ``'auto'`` | ``'new'`` | ``'old'``
|
||||
Defines the header format. Used to describe how bad channels
|
||||
are formatted. If auto, reads using old and new header and
|
||||
if either contain a bad channel make channel bad.
|
||||
Defaults to ``'auto'``.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=(),
|
||||
misc=(),
|
||||
ecg=(),
|
||||
emg=(),
|
||||
data_format="auto",
|
||||
date_format="mm/dd/yy",
|
||||
*,
|
||||
header="auto",
|
||||
preload=False,
|
||||
verbose=None,
|
||||
):
|
||||
_check_option("date_format", date_format, ["mm/dd/yy", "dd/mm/yy"])
|
||||
if date_format == "dd/mm/yy":
|
||||
_date_format = "%d/%m/%y %H:%M:%S"
|
||||
else:
|
||||
_date_format = "%m/%d/%y %H:%M:%S"
|
||||
|
||||
input_fname = path.abspath(input_fname)
|
||||
try:
|
||||
info, cnt_info = _get_cnt_info(
|
||||
input_fname, eog, ecg, emg, misc, data_format, _date_format, header
|
||||
)
|
||||
except Exception:
|
||||
raise RuntimeError(
|
||||
f"{_explain_exception()}\n"
|
||||
"WARNING: mne.io.read_raw_cnt "
|
||||
"supports Neuroscan CNT files only. If this file is an ANT Neuro CNT, "
|
||||
"please use mne.io.read_raw_ant instead."
|
||||
)
|
||||
last_samps = [cnt_info["n_samples"] - 1]
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[input_fname],
|
||||
raw_extras=[cnt_info],
|
||||
last_samps=last_samps,
|
||||
orig_format="int",
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
data_format = "int32" if cnt_info["n_bytes"] == 4 else "int16"
|
||||
self.set_annotations(
|
||||
_read_annotations_cnt(input_fname, data_format=data_format)
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Take a chunk of raw data, multiply by mult or cals, and store."""
|
||||
n_channels = self._raw_extras[fi]["orig_nchan"]
|
||||
if "stim_channel" in self._raw_extras[fi]:
|
||||
f_channels = n_channels - 1 # Stim channel already read.
|
||||
stim_ch = self._raw_extras[fi]["stim_channel"]
|
||||
else:
|
||||
f_channels = n_channels
|
||||
stim_ch = None
|
||||
|
||||
channel_offset = self._raw_extras[fi]["channel_offset"]
|
||||
baselines = self._raw_extras[fi]["baselines"]
|
||||
n_bytes = self._raw_extras[fi]["n_bytes"]
|
||||
n_samples = self._raw_extras[fi]["n_samples"]
|
||||
dtype = "<i4" if n_bytes == 4 else "<i2"
|
||||
chunk_size = channel_offset * f_channels # Size of chunks in file.
|
||||
# The data is divided into blocks of samples / channel.
|
||||
# channel_offset determines the amount of successive samples.
|
||||
# Here we use sample offset to align the data because start can be in
|
||||
# the middle of these blocks.
|
||||
data_left = (stop - start) * f_channels
|
||||
# Read up to 100 MB of data at a time, block_size is in data samples
|
||||
block_size = ((int(100e6) // n_bytes) // chunk_size) * chunk_size
|
||||
block_size = min(data_left, block_size)
|
||||
s_offset = start % channel_offset
|
||||
with open(self.filenames[fi], "rb", buffering=0) as fid:
|
||||
fid.seek(900 + f_channels * (75 + (start - s_offset) * n_bytes))
|
||||
for sample_start in np.arange(0, data_left, block_size) // f_channels:
|
||||
# Earlier comment says n_samples is unreliable, but I think it
|
||||
# is because it needed to be changed to unsigned int
|
||||
# See: PR #12393
|
||||
sample_stop = sample_start + min(
|
||||
(
|
||||
n_samples,
|
||||
block_size // f_channels,
|
||||
data_left // f_channels - sample_start,
|
||||
)
|
||||
)
|
||||
n_samps = sample_stop - sample_start
|
||||
one = np.zeros((n_channels, n_samps))
|
||||
|
||||
# In case channel offset and start time do not align perfectly,
|
||||
# extra sample sets are read here to cover the desired time
|
||||
# window. The whole (up to 100 MB) block is read at once and
|
||||
# then reshaped to (n_channels, n_samples).
|
||||
extra_samps = (
|
||||
chunk_size
|
||||
if (s_offset != 0 or n_samps % channel_offset != 0)
|
||||
else 0
|
||||
)
|
||||
if s_offset >= (channel_offset / 2): # Extend at the end.
|
||||
extra_samps += chunk_size
|
||||
count = n_samps // channel_offset * chunk_size + extra_samps
|
||||
n_chunks = count // chunk_size
|
||||
samps = np.fromfile(fid, dtype=dtype, count=count)
|
||||
samps = samps.reshape((n_chunks, f_channels, channel_offset), order="C")
|
||||
|
||||
# Intermediate shaping to chunk sizes.
|
||||
block = np.zeros((n_channels, channel_offset * n_chunks))
|
||||
for set_idx, row in enumerate(samps): # Final shape.
|
||||
block_slice = slice(
|
||||
set_idx * channel_offset, (set_idx + 1) * channel_offset
|
||||
)
|
||||
block[:f_channels, block_slice] = row
|
||||
if "stim_channel" in self._raw_extras[fi]:
|
||||
_data_start = start + sample_start
|
||||
_data_stop = start + sample_stop
|
||||
block[-1] = stim_ch[_data_start:_data_stop]
|
||||
one[idx] = block[idx, s_offset : n_samps + s_offset]
|
||||
|
||||
one[idx] -= baselines[idx][:, None]
|
||||
_mult_cal_one(data[:, sample_start:sample_stop], one, idx, cals, mult)
|
||||
7
mne/io/constants.py
Normal file
7
mne/io/constants.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .._fiff.constants import FIFF
|
||||
|
||||
__all__ = ["FIFF"]
|
||||
7
mne/io/ctf/__init__.py
Normal file
7
mne/io/ctf/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""CTF module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .ctf import read_raw_ctf, RawCTF
|
||||
38
mne/io/ctf/constants.py
Normal file
38
mne/io/ctf/constants.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""CTF constants."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from ...utils import BunchConst
|
||||
|
||||
|
||||
CTF = BunchConst()
|
||||
|
||||
# ctf_types.h
|
||||
CTF.CTFV_MAX_AVERAGE_BINS = 8
|
||||
CTF.CTFV_MAX_COILS = 8
|
||||
CTF.CTFV_MAX_BALANCING = 50
|
||||
CTF.CTFV_SENSOR_LABEL = 31
|
||||
|
||||
CTF.CTFV_COIL_LPA = 1
|
||||
CTF.CTFV_COIL_RPA = 2
|
||||
CTF.CTFV_COIL_NAS = 3
|
||||
CTF.CTFV_COIL_SPARE = 4
|
||||
|
||||
CTF.CTFV_REF_MAG_CH = 0
|
||||
CTF.CTFV_REF_GRAD_CH = 1
|
||||
CTF.CTFV_MEG_CH = 5
|
||||
CTF.CTFV_EEG_CH = 9
|
||||
CTF.CTFV_STIM_CH = 11
|
||||
|
||||
CTF.CTFV_FILTER_LOWPASS = 1
|
||||
CTF.CTFV_FILTER_HIGHPASS = 2
|
||||
|
||||
# read_res4.c
|
||||
CTF.FUNNY_POS = 1844
|
||||
|
||||
# read_write_data.c
|
||||
CTF.HEADER_SIZE = 8
|
||||
CTF.BLOCK_SIZE = 2000
|
||||
CTF.SYSTEM_CLOCK_CH = "SCLK01-177"
|
||||
303
mne/io/ctf/ctf.py
Normal file
303
mne/io/ctf/ctf.py
Normal file
@@ -0,0 +1,303 @@
|
||||
"""Conversion tool from CTF to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _format_dig_points
|
||||
from ..._fiff.utils import _blk_read_lims, _mult_cal_one
|
||||
from ...utils import (
|
||||
_check_fname,
|
||||
_check_option,
|
||||
_clean_names,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
)
|
||||
from ..base import BaseRaw
|
||||
from .constants import CTF
|
||||
from .eeg import _read_eeg, _read_pos
|
||||
from .hc import _read_hc
|
||||
from .info import _annotate_bad_segments, _compose_meas_info, _read_bad_chans
|
||||
from .markers import _read_annotations_ctf_call
|
||||
from .res4 import _make_ctf_name, _read_res4
|
||||
from .trans import _make_ctf_coord_trans_set
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_ctf(
|
||||
directory, system_clock="truncate", preload=False, clean_names=False, verbose=None
|
||||
) -> "RawCTF":
|
||||
"""Raw object from CTF directory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
directory : path-like
|
||||
Path to the CTF data (ending in ``'.ds'``).
|
||||
system_clock : str
|
||||
How to treat the system clock. Use "truncate" (default) to truncate
|
||||
the data file when the system clock drops to zero, and use "ignore"
|
||||
to ignore the system clock (e.g., if head positions are measured
|
||||
multiple times during a recording).
|
||||
%(preload)s
|
||||
clean_names : bool, optional
|
||||
If True main channel names and compensation channel names will
|
||||
be cleaned from CTF suffixes. The default is False.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawCTF
|
||||
The raw data.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.11
|
||||
|
||||
To read in the Polhemus digitization data (for example, from
|
||||
a .pos file), include the file in the CTF directory. The
|
||||
points will then automatically be read into the `mne.io.Raw`
|
||||
instance via `mne.io.read_raw_ctf`.
|
||||
"""
|
||||
return RawCTF(
|
||||
directory,
|
||||
system_clock,
|
||||
preload=preload,
|
||||
clean_names=clean_names,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawCTF(BaseRaw):
|
||||
"""Raw object from CTF directory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
directory : path-like
|
||||
Path to the CTF data (ending in ``'.ds'``).
|
||||
system_clock : str
|
||||
How to treat the system clock. Use ``"truncate"`` (default) to truncate
|
||||
the data file when the system clock drops to zero, and use ``"ignore"``
|
||||
to ignore the system clock (e.g., if head positions are measured
|
||||
multiple times during a recording).
|
||||
%(preload)s
|
||||
clean_names : bool, optional
|
||||
If True main channel names and compensation channel names will
|
||||
be cleaned from CTF suffixes. The default is False.
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
directory,
|
||||
system_clock="truncate",
|
||||
preload=False,
|
||||
verbose=None,
|
||||
clean_names=False,
|
||||
):
|
||||
# adapted from mne_ctf2fiff.c
|
||||
directory = str(
|
||||
_check_fname(directory, "read", True, "directory", need_dir=True)
|
||||
)
|
||||
if not directory.endswith(".ds"):
|
||||
raise TypeError(
|
||||
f'directory must be a directory ending with ".ds", got {directory}'
|
||||
)
|
||||
_check_option("system_clock", system_clock, ["ignore", "truncate"])
|
||||
logger.info(f"ds directory : {directory}")
|
||||
res4 = _read_res4(directory) # Read the magical res4 file
|
||||
coils = _read_hc(directory) # Read the coil locations
|
||||
eeg = _read_eeg(directory) # Read the EEG electrode loc info
|
||||
|
||||
# Investigate the coil location data to get the coordinate trans
|
||||
coord_trans = _make_ctf_coord_trans_set(res4, coils)
|
||||
|
||||
digs = _read_pos(directory, coord_trans)
|
||||
|
||||
# Compose a structure which makes fiff writing a piece of cake
|
||||
info = _compose_meas_info(res4, coils, coord_trans, eeg)
|
||||
with info._unlock():
|
||||
info["dig"] += digs
|
||||
info["dig"] = _format_dig_points(info["dig"])
|
||||
info["bads"] += _read_bad_chans(directory, info)
|
||||
|
||||
# Determine how our data is distributed across files
|
||||
fnames = list()
|
||||
last_samps = list()
|
||||
raw_extras = list()
|
||||
missing_names = list()
|
||||
no_samps = list()
|
||||
while True:
|
||||
suffix = "meg4" if len(fnames) == 0 else f"{len(fnames)}_meg4"
|
||||
meg4_name, found = _make_ctf_name(directory, suffix, raise_error=False)
|
||||
if not found:
|
||||
missing_names.append(os.path.relpath(meg4_name, directory))
|
||||
break
|
||||
# check how much data is in the file
|
||||
sample_info = _get_sample_info(meg4_name, res4, system_clock)
|
||||
if sample_info["n_samp"] == 0:
|
||||
no_samps.append(os.path.relpath(meg4_name, directory))
|
||||
break
|
||||
if len(fnames) == 0:
|
||||
buffer_size_sec = sample_info["block_size"] / info["sfreq"]
|
||||
else:
|
||||
buffer_size_sec = 1.0
|
||||
fnames.append(meg4_name)
|
||||
last_samps.append(sample_info["n_samp"] - 1)
|
||||
raw_extras.append(sample_info)
|
||||
first_samps = [0] * len(last_samps)
|
||||
if len(fnames) == 0:
|
||||
raise OSError(
|
||||
f"Could not find any data, could not find the following "
|
||||
f"file(s): {missing_names}, and the following file(s) had no "
|
||||
f"valid samples: {no_samps}"
|
||||
)
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
first_samps=first_samps,
|
||||
last_samps=last_samps,
|
||||
filenames=fnames,
|
||||
raw_extras=raw_extras,
|
||||
orig_format="int",
|
||||
buffer_size_sec=buffer_size_sec,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Add bad segments as Annotations (correct for start time)
|
||||
start_time = -res4["pre_trig_pts"] / float(info["sfreq"])
|
||||
annot = _annotate_bad_segments(directory, start_time, info["meas_date"])
|
||||
marker_annot = _read_annotations_ctf_call(
|
||||
directory=directory,
|
||||
total_offset=(res4["pre_trig_pts"] / res4["sfreq"]),
|
||||
trial_duration=(res4["nsamp"] / res4["sfreq"]),
|
||||
meas_date=info["meas_date"],
|
||||
)
|
||||
annot = marker_annot if annot is None else annot + marker_annot
|
||||
self.set_annotations(annot)
|
||||
if clean_names:
|
||||
_clean_names_inst(self)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
si = self._raw_extras[fi]
|
||||
offset = 0
|
||||
trial_start_idx, r_lims, d_lims = _blk_read_lims(
|
||||
start, stop, int(si["block_size"])
|
||||
)
|
||||
with open(self.filenames[fi], "rb") as fid:
|
||||
for bi in range(len(r_lims)):
|
||||
samp_offset = (bi + trial_start_idx) * si["res4_nsamp"]
|
||||
n_read = min(si["n_samp_tot"] - samp_offset, si["block_size"])
|
||||
# read the chunk of data
|
||||
# have to be careful on Windows and make sure we are using
|
||||
# 64-bit integers here
|
||||
with np.errstate(over="raise"):
|
||||
pos = np.int64(CTF.HEADER_SIZE)
|
||||
pos += np.int64(samp_offset) * si["n_chan"] * 4
|
||||
fid.seek(pos, 0)
|
||||
this_data = np.fromfile(fid, ">i4", count=si["n_chan"] * n_read)
|
||||
this_data.shape = (si["n_chan"], n_read)
|
||||
this_data = this_data[:, r_lims[bi, 0] : r_lims[bi, 1]]
|
||||
data_view = data[:, d_lims[bi, 0] : d_lims[bi, 1]]
|
||||
_mult_cal_one(data_view, this_data, idx, cals, mult)
|
||||
offset += n_read
|
||||
|
||||
|
||||
def _clean_names_inst(inst):
|
||||
"""Clean up CTF suffixes from channel names."""
|
||||
mapping = dict(zip(inst.ch_names, _clean_names(inst.ch_names)))
|
||||
inst.rename_channels(mapping)
|
||||
for comp in inst.info["comps"]:
|
||||
for key in ("row_names", "col_names"):
|
||||
comp["data"][key] = _clean_names(comp["data"][key])
|
||||
|
||||
|
||||
def _get_sample_info(fname, res4, system_clock):
|
||||
"""Determine the number of valid samples."""
|
||||
logger.info(f"Finding samples for {fname}: ")
|
||||
if CTF.SYSTEM_CLOCK_CH in res4["ch_names"]:
|
||||
clock_ch = res4["ch_names"].index(CTF.SYSTEM_CLOCK_CH)
|
||||
else:
|
||||
clock_ch = None
|
||||
for k, ch in enumerate(res4["chs"]):
|
||||
if ch["ch_name"] == CTF.SYSTEM_CLOCK_CH:
|
||||
clock_ch = k
|
||||
break
|
||||
with open(fname, "rb") as fid:
|
||||
fid.seek(0, os.SEEK_END)
|
||||
st_size = fid.tell()
|
||||
fid.seek(0, 0)
|
||||
if (st_size - CTF.HEADER_SIZE) % (4 * res4["nsamp"] * res4["nchan"]) != 0:
|
||||
raise RuntimeError(
|
||||
"The number of samples is not an even multiple of the trial size"
|
||||
)
|
||||
n_samp_tot = (st_size - CTF.HEADER_SIZE) // (4 * res4["nchan"])
|
||||
n_trial = n_samp_tot // res4["nsamp"]
|
||||
n_samp = n_samp_tot
|
||||
if clock_ch is None:
|
||||
logger.info(
|
||||
" System clock channel is not available, assuming "
|
||||
"all samples to be valid."
|
||||
)
|
||||
elif system_clock == "ignore":
|
||||
logger.info(" System clock channel is available, but ignored.")
|
||||
else: # use it
|
||||
logger.info(
|
||||
" System clock channel is available, checking "
|
||||
"which samples are valid."
|
||||
)
|
||||
for t in range(n_trial):
|
||||
# Skip to the correct trial
|
||||
samp_offset = t * res4["nsamp"]
|
||||
offset = (
|
||||
CTF.HEADER_SIZE
|
||||
+ (samp_offset * res4["nchan"] + (clock_ch * res4["nsamp"])) * 4
|
||||
)
|
||||
fid.seek(offset, 0)
|
||||
this_data = np.fromfile(fid, ">i4", res4["nsamp"])
|
||||
if len(this_data) != res4["nsamp"]:
|
||||
raise RuntimeError(f"Cannot read data for trial {t+1}.")
|
||||
end = np.where(this_data == 0)[0]
|
||||
if len(end) > 0:
|
||||
n_samp = samp_offset + end[0]
|
||||
break
|
||||
if n_samp < res4["nsamp"]:
|
||||
n_trial = 1
|
||||
logger.info(
|
||||
" %d x %d = %d samples from %d chs",
|
||||
n_trial,
|
||||
n_samp,
|
||||
n_samp,
|
||||
res4["nchan"],
|
||||
)
|
||||
else:
|
||||
n_trial = n_samp // res4["nsamp"]
|
||||
n_omit = n_samp_tot - n_samp
|
||||
logger.info(
|
||||
" %d x %d = %d samples from %d chs",
|
||||
n_trial,
|
||||
res4["nsamp"],
|
||||
n_samp,
|
||||
res4["nchan"],
|
||||
)
|
||||
if n_omit != 0:
|
||||
logger.info(" %d samples omitted at the end", n_omit)
|
||||
|
||||
return dict(
|
||||
n_samp=n_samp,
|
||||
n_samp_tot=n_samp_tot,
|
||||
block_size=res4["nsamp"],
|
||||
res4_nsamp=res4["nsamp"],
|
||||
n_chan=res4["nchan"],
|
||||
)
|
||||
108
mne/io/ctf/eeg.py
Normal file
108
mne/io/ctf/eeg.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""Read .eeg files."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from os import listdir
|
||||
from os.path import join
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ...transforms import apply_trans
|
||||
from ...utils import logger, warn
|
||||
from .res4 import _make_ctf_name
|
||||
|
||||
_cardinal_dict = dict(
|
||||
nasion=FIFF.FIFFV_POINT_NASION,
|
||||
lpa=FIFF.FIFFV_POINT_LPA,
|
||||
left=FIFF.FIFFV_POINT_LPA,
|
||||
rpa=FIFF.FIFFV_POINT_RPA,
|
||||
right=FIFF.FIFFV_POINT_RPA,
|
||||
)
|
||||
|
||||
|
||||
def _read_eeg(directory):
|
||||
"""Read the .eeg file."""
|
||||
# Missing file is ok
|
||||
fname, found = _make_ctf_name(directory, "eeg", raise_error=False)
|
||||
if not found:
|
||||
logger.info(" Separate EEG position data file not present.")
|
||||
return
|
||||
eeg = dict(
|
||||
labels=list(),
|
||||
kinds=list(),
|
||||
ids=list(),
|
||||
rr=list(),
|
||||
np=0,
|
||||
assign_to_chs=True,
|
||||
coord_frame=FIFF.FIFFV_MNE_COORD_CTF_HEAD,
|
||||
)
|
||||
with open(fname, "rb") as fid:
|
||||
for line in fid:
|
||||
line = line.strip()
|
||||
if len(line) > 0:
|
||||
parts = line.decode("utf-8").split()
|
||||
if len(parts) != 5:
|
||||
raise RuntimeError(f"Illegal data in EEG position file: {line}")
|
||||
r = np.array([float(p) for p in parts[2:]]) / 100.0
|
||||
if (r * r).sum() > 1e-4:
|
||||
label = parts[1]
|
||||
eeg["labels"].append(label)
|
||||
eeg["rr"].append(r)
|
||||
id_ = _cardinal_dict.get(label.lower(), int(parts[0]))
|
||||
if label.lower() in _cardinal_dict:
|
||||
kind = FIFF.FIFFV_POINT_CARDINAL
|
||||
else:
|
||||
kind = FIFF.FIFFV_POINT_EXTRA
|
||||
eeg["ids"].append(id_)
|
||||
eeg["kinds"].append(kind)
|
||||
eeg["np"] += 1
|
||||
logger.info(" Separate EEG position data file read.")
|
||||
return eeg
|
||||
|
||||
|
||||
def _read_pos(directory, transformations):
|
||||
"""Read the .pos file and return eeg positions as dig extra points."""
|
||||
fname = [join(directory, f) for f in listdir(directory) if f.endswith(".pos")]
|
||||
if len(fname) < 1:
|
||||
return list()
|
||||
elif len(fname) > 1:
|
||||
warn(" Found multiple pos files. Extra digitizer points not added.")
|
||||
return list()
|
||||
logger.info(f" Reading digitizer points from {fname}...")
|
||||
if transformations["t_ctf_head_head"] is None:
|
||||
warn(" No transformation found. Extra digitizer points not added.")
|
||||
return list()
|
||||
fname = fname[0]
|
||||
digs = list()
|
||||
i = 2000
|
||||
with open(fname) as fid:
|
||||
for line in fid:
|
||||
line = line.strip()
|
||||
if len(line) > 0:
|
||||
parts = line.split()
|
||||
# The lines can have 4 or 5 parts. First part is for the id,
|
||||
# which can be an int or a string. The last three are for xyz
|
||||
# coordinates. The extra part is for additional info
|
||||
# (e.g. 'Pz', 'Cz') which is ignored.
|
||||
if len(parts) not in [4, 5]:
|
||||
continue
|
||||
try:
|
||||
ident = int(parts[0]) + 1000
|
||||
except ValueError: # if id is not an int
|
||||
ident = i
|
||||
i += 1
|
||||
dig = dict(
|
||||
kind=FIFF.FIFFV_POINT_EXTRA,
|
||||
ident=ident,
|
||||
r=list(),
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
r = np.array([float(p) for p in parts[-3:]]) / 100.0 # cm to m
|
||||
if (r * r).sum() > 1e-4:
|
||||
r = apply_trans(transformations["t_ctf_head_head"], r)
|
||||
dig["r"] = r
|
||||
digs.append(dig)
|
||||
return digs
|
||||
89
mne/io/ctf/hc.py
Normal file
89
mne/io/ctf/hc.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""Read .hc files."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ...utils import logger
|
||||
from .constants import CTF
|
||||
from .res4 import _make_ctf_name
|
||||
|
||||
_kind_dict = {
|
||||
"nasion": CTF.CTFV_COIL_NAS,
|
||||
"left ear": CTF.CTFV_COIL_LPA,
|
||||
"right ear": CTF.CTFV_COIL_RPA,
|
||||
"spare": CTF.CTFV_COIL_SPARE,
|
||||
}
|
||||
|
||||
_coord_dict = {
|
||||
"relative to dewar": FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
|
||||
"relative to head": FIFF.FIFFV_MNE_COORD_CTF_HEAD,
|
||||
}
|
||||
|
||||
|
||||
def _read_one_coil_point(fid):
|
||||
"""Read coil coordinate information from the hc file."""
|
||||
# Descriptor
|
||||
one = "#"
|
||||
while len(one) > 0 and one[0] == "#":
|
||||
one = fid.readline()
|
||||
if len(one) == 0:
|
||||
return None
|
||||
one = one.strip().decode("utf-8")
|
||||
if "Unable" in one:
|
||||
raise RuntimeError("HPI information not available")
|
||||
|
||||
# Hopefully this is an unambiguous interpretation
|
||||
p = dict()
|
||||
p["valid"] = "measured" in one
|
||||
for key, val in _coord_dict.items():
|
||||
if key in one:
|
||||
p["coord_frame"] = val
|
||||
break
|
||||
else:
|
||||
p["coord_frame"] = -1
|
||||
|
||||
for key, val in _kind_dict.items():
|
||||
if key in one:
|
||||
p["kind"] = val
|
||||
break
|
||||
else:
|
||||
p["kind"] = -1
|
||||
|
||||
# Three coordinates
|
||||
p["r"] = np.empty(3)
|
||||
for ii, coord in enumerate("xyz"):
|
||||
sp = fid.readline().decode("utf-8").strip()
|
||||
if len(sp) == 0: # blank line
|
||||
continue
|
||||
sp = sp.split(" ")
|
||||
if len(sp) != 3 or sp[0] != coord or sp[1] != "=":
|
||||
raise RuntimeError(f"Bad line: {one}")
|
||||
# We do not deal with centimeters
|
||||
p["r"][ii] = float(sp[2]) / 100.0
|
||||
return p
|
||||
|
||||
|
||||
def _read_hc(directory):
|
||||
"""Read the hc file to get the HPI info and to prepare for coord trans."""
|
||||
fname, found = _make_ctf_name(directory, "hc", raise_error=False)
|
||||
if not found:
|
||||
logger.info(" hc data not present")
|
||||
return None
|
||||
s = list()
|
||||
with open(fname, "rb") as fid:
|
||||
while True:
|
||||
p = _read_one_coil_point(fid)
|
||||
if p is None:
|
||||
# First point bad indicates that the file is empty
|
||||
if len(s) == 0:
|
||||
logger.info("hc file empty, no data present")
|
||||
return None
|
||||
# Returns None if at EOF
|
||||
logger.info(" hc data read.")
|
||||
return s
|
||||
if p["valid"]:
|
||||
s.append(p)
|
||||
561
mne/io/ctf/info.py
Normal file
561
mne/io/ctf/info.py
Normal file
@@ -0,0 +1,561 @@
|
||||
"""Populate measurement info."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
from calendar import timegm
|
||||
from time import strptime
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.ctf_comp import _add_kind, _calibrate_comp
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.write import get_new_file_id
|
||||
from ...annotations import Annotations
|
||||
from ...transforms import (
|
||||
_coord_frame_name,
|
||||
apply_trans,
|
||||
combine_transforms,
|
||||
invert_transform,
|
||||
)
|
||||
from ...utils import _clean_names, logger, warn
|
||||
from .constants import CTF
|
||||
|
||||
_ctf_to_fiff = {
|
||||
CTF.CTFV_COIL_LPA: FIFF.FIFFV_POINT_LPA,
|
||||
CTF.CTFV_COIL_RPA: FIFF.FIFFV_POINT_RPA,
|
||||
CTF.CTFV_COIL_NAS: FIFF.FIFFV_POINT_NASION,
|
||||
}
|
||||
|
||||
|
||||
def _pick_isotrak_and_hpi_coils(res4, coils, t):
|
||||
"""Pick the HPI coil locations given in device coordinates."""
|
||||
if coils is None:
|
||||
return list(), list()
|
||||
dig = list()
|
||||
hpi_result = dict(dig_points=list())
|
||||
n_coil_dev = 0
|
||||
n_coil_head = 0
|
||||
for p in coils:
|
||||
if p["valid"]:
|
||||
if p["kind"] in [CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS]:
|
||||
kind = FIFF.FIFFV_POINT_CARDINAL
|
||||
ident = _ctf_to_fiff[p["kind"]]
|
||||
else: # CTF.CTFV_COIL_SPARE
|
||||
kind = FIFF.FIFFV_POINT_HPI
|
||||
ident = p["kind"]
|
||||
if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
|
||||
if t is None or t["t_ctf_dev_dev"] is None:
|
||||
raise RuntimeError(
|
||||
"No coordinate transformation "
|
||||
"available for HPI coil locations"
|
||||
)
|
||||
d = dict(
|
||||
kind=kind,
|
||||
ident=ident,
|
||||
r=apply_trans(t["t_ctf_dev_dev"], p["r"]),
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
)
|
||||
hpi_result["dig_points"].append(d)
|
||||
n_coil_dev += 1
|
||||
elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
|
||||
if t is None or t["t_ctf_head_head"] is None:
|
||||
raise RuntimeError(
|
||||
"No coordinate transformation "
|
||||
"available for (virtual) Polhemus data"
|
||||
)
|
||||
d = dict(
|
||||
kind=kind,
|
||||
ident=ident,
|
||||
r=apply_trans(t["t_ctf_head_head"], p["r"]),
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
dig.append(d)
|
||||
n_coil_head += 1
|
||||
if n_coil_head > 0:
|
||||
logger.info(" Polhemus data for %d HPI coils added", n_coil_head)
|
||||
if n_coil_dev > 0:
|
||||
logger.info(
|
||||
" Device coordinate locations for %d HPI coils added", n_coil_dev
|
||||
)
|
||||
return dig, [hpi_result]
|
||||
|
||||
|
||||
def _convert_time(date_str, time_str):
|
||||
"""Convert date and time strings to float time."""
|
||||
if date_str == time_str == "":
|
||||
date_str = "01/01/1970"
|
||||
time_str = "00:00:00"
|
||||
logger.info(
|
||||
"No date or time found, setting to the start of the "
|
||||
"POSIX epoch (1970/01/01 midnight)"
|
||||
)
|
||||
|
||||
for fmt in ("%d/%m/%Y", "%d-%b-%Y", "%a, %b %d, %Y", "%Y/%m/%d"):
|
||||
try:
|
||||
date = strptime(date_str.strip(), fmt)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"Illegal date: {date_str}.\nIf the language of the date does not "
|
||||
"correspond to your local machine's language try to set the "
|
||||
"locale to the language of the date string:\n"
|
||||
'locale.setlocale(locale.LC_ALL, "en_US")'
|
||||
)
|
||||
|
||||
for fmt in ("%H:%M:%S", "%H:%M"):
|
||||
try:
|
||||
time = strptime(time_str, fmt)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f"Illegal time: {time_str}")
|
||||
# MNE-C uses mktime which uses local time, but here we instead decouple
|
||||
# conversion location from the process, and instead assume that the
|
||||
# acquisition was in GMT. This will be wrong for most sites, but at least
|
||||
# the value we obtain here won't depend on the geographical location
|
||||
# that the file was converted.
|
||||
res = timegm(
|
||||
(
|
||||
date.tm_year,
|
||||
date.tm_mon,
|
||||
date.tm_mday,
|
||||
time.tm_hour,
|
||||
time.tm_min,
|
||||
time.tm_sec,
|
||||
date.tm_wday,
|
||||
date.tm_yday,
|
||||
date.tm_isdst,
|
||||
)
|
||||
)
|
||||
return res
|
||||
|
||||
|
||||
def _get_plane_vectors(ez):
|
||||
"""Get two orthogonal vectors orthogonal to ez (ez will be modified)."""
|
||||
assert ez.shape == (3,)
|
||||
ez_len = np.sqrt(np.sum(ez * ez))
|
||||
if ez_len == 0:
|
||||
raise RuntimeError("Zero length normal. Cannot proceed.")
|
||||
if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction
|
||||
ex = np.array([1.0, 0.0, 0.0])
|
||||
else:
|
||||
ex = np.zeros(3)
|
||||
if ez[1] < ez[2]:
|
||||
ex[0 if ez[0] < ez[1] else 1] = 1.0
|
||||
else:
|
||||
ex[0 if ez[0] < ez[2] else 2] = 1.0
|
||||
ez /= ez_len
|
||||
ex -= np.dot(ez, ex) * ez
|
||||
ex /= np.sqrt(np.sum(ex * ex))
|
||||
ey = np.cross(ez, ex)
|
||||
return ex, ey
|
||||
|
||||
|
||||
def _at_origin(x):
|
||||
"""Determine if a vector is at the origin."""
|
||||
return np.sum(x * x) < 1e-8
|
||||
|
||||
|
||||
def _check_comp_ch(cch, kind, desired=None):
|
||||
if desired is None:
|
||||
desired = cch["grad_order_no"]
|
||||
if cch["grad_order_no"] != desired:
|
||||
raise RuntimeError(
|
||||
f"{kind} channel with inconsistent compensation "
|
||||
f"grade {cch['grad_order_no']}, should be {desired}"
|
||||
)
|
||||
return desired
|
||||
|
||||
|
||||
def _convert_channel_info(res4, t, use_eeg_pos):
|
||||
"""Convert CTF channel information to fif format."""
|
||||
nmeg = neeg = nstim = nmisc = nref = 0
|
||||
chs = list()
|
||||
this_comp = None
|
||||
for k, cch in enumerate(res4["chs"]):
|
||||
cal = float(1.0 / (cch["proper_gain"] * cch["qgain"]))
|
||||
ch = dict(
|
||||
scanno=k + 1,
|
||||
range=1.0,
|
||||
cal=cal,
|
||||
loc=np.full(12, np.nan),
|
||||
unit_mul=FIFF.FIFF_UNITM_NONE,
|
||||
ch_name=cch["ch_name"][:15],
|
||||
coil_type=FIFF.FIFFV_COIL_NONE,
|
||||
)
|
||||
del k
|
||||
chs.append(ch)
|
||||
# Create the channel position information
|
||||
if cch["sensor_type_index"] in (
|
||||
CTF.CTFV_REF_MAG_CH,
|
||||
CTF.CTFV_REF_GRAD_CH,
|
||||
CTF.CTFV_MEG_CH,
|
||||
):
|
||||
# Extra check for a valid MEG channel
|
||||
if (
|
||||
np.sum(cch["coil"]["pos"][0] ** 2) < 1e-6
|
||||
or np.sum(cch["coil"]["norm"][0] ** 2) < 1e-6
|
||||
):
|
||||
nmisc += 1
|
||||
ch.update(
|
||||
logno=nmisc,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_MISC_CH,
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
)
|
||||
text = "MEG"
|
||||
if cch["sensor_type_index"] != CTF.CTFV_MEG_CH:
|
||||
text += " ref"
|
||||
warn(
|
||||
f"{text} channel {ch['ch_name']} did not have position "
|
||||
"assigned, so it was changed to a MISC channel"
|
||||
)
|
||||
continue
|
||||
ch["unit"] = FIFF.FIFF_UNIT_T
|
||||
# Set up the local coordinate frame
|
||||
r0 = cch["coil"]["pos"][0].copy()
|
||||
ez = cch["coil"]["norm"][0].copy()
|
||||
# It turns out that positive proper_gain requires swapping
|
||||
# of the normal direction
|
||||
if cch["proper_gain"] > 0.0:
|
||||
ez *= -1
|
||||
# Check how the other vectors should be defined
|
||||
off_diag = False
|
||||
# Default: ex and ey are arbitrary in the plane normal to ez
|
||||
if cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH:
|
||||
# The off-diagonal gradiometers are an exception:
|
||||
#
|
||||
# We use the same convention for ex as for Neuromag planar
|
||||
# gradiometers: ex pointing in the positive gradient direction
|
||||
diff = cch["coil"]["pos"][0] - cch["coil"]["pos"][1]
|
||||
size = np.sqrt(np.sum(diff * diff))
|
||||
if size > 0.0:
|
||||
diff /= size
|
||||
# Is ez normal to the line joining the coils?
|
||||
if np.abs(np.dot(diff, ez)) < 1e-3:
|
||||
off_diag = True
|
||||
# Handle the off-diagonal gradiometer coordinate system
|
||||
r0 -= size * diff / 2.0
|
||||
ex = diff
|
||||
ey = np.cross(ez, ex)
|
||||
else:
|
||||
ex, ey = _get_plane_vectors(ez)
|
||||
else:
|
||||
ex, ey = _get_plane_vectors(ez)
|
||||
# Transform into a Neuromag-like device coordinate system
|
||||
ch["loc"] = np.concatenate(
|
||||
[
|
||||
apply_trans(t["t_ctf_dev_dev"], r0),
|
||||
apply_trans(t["t_ctf_dev_dev"], ex, move=False),
|
||||
apply_trans(t["t_ctf_dev_dev"], ey, move=False),
|
||||
apply_trans(t["t_ctf_dev_dev"], ez, move=False),
|
||||
]
|
||||
)
|
||||
del r0, ex, ey, ez
|
||||
# Set the coil type
|
||||
if cch["sensor_type_index"] == CTF.CTFV_REF_MAG_CH:
|
||||
ch["kind"] = FIFF.FIFFV_REF_MEG_CH
|
||||
ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_MAG
|
||||
nref += 1
|
||||
ch["logno"] = nref
|
||||
elif cch["sensor_type_index"] == CTF.CTFV_REF_GRAD_CH:
|
||||
ch["kind"] = FIFF.FIFFV_REF_MEG_CH
|
||||
if off_diag:
|
||||
ch["coil_type"] = FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD
|
||||
else:
|
||||
ch["coil_type"] = FIFF.FIFFV_COIL_CTF_REF_GRAD
|
||||
nref += 1
|
||||
ch["logno"] = nref
|
||||
else:
|
||||
this_comp = _check_comp_ch(cch, "Gradiometer", this_comp)
|
||||
ch["kind"] = FIFF.FIFFV_MEG_CH
|
||||
ch["coil_type"] = FIFF.FIFFV_COIL_CTF_GRAD
|
||||
nmeg += 1
|
||||
ch["logno"] = nmeg
|
||||
# Encode the software gradiometer order
|
||||
ch["coil_type"] = int(ch["coil_type"] | (cch["grad_order_no"] << 16))
|
||||
ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE
|
||||
elif cch["sensor_type_index"] == CTF.CTFV_EEG_CH:
|
||||
coord_frame = FIFF.FIFFV_COORD_HEAD
|
||||
if use_eeg_pos:
|
||||
# EEG electrode coordinates may be present but in the
|
||||
# CTF head frame
|
||||
ch["loc"][:3] = cch["coil"]["pos"][0]
|
||||
if not _at_origin(ch["loc"][:3]):
|
||||
if t["t_ctf_head_head"] is None:
|
||||
warn(
|
||||
f"EEG electrode ({ch['ch_name']}) location omitted because "
|
||||
"of missing HPI information"
|
||||
)
|
||||
ch["loc"].fill(np.nan)
|
||||
coord_frame = FIFF.FIFFV_MNE_COORD_CTF_HEAD
|
||||
else:
|
||||
ch["loc"][:3] = apply_trans(t["t_ctf_head_head"], ch["loc"][:3])
|
||||
neeg += 1
|
||||
ch.update(
|
||||
logno=neeg,
|
||||
kind=FIFF.FIFFV_EEG_CH,
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
coord_frame=coord_frame,
|
||||
coil_type=FIFF.FIFFV_COIL_EEG,
|
||||
)
|
||||
elif cch["sensor_type_index"] == CTF.CTFV_STIM_CH:
|
||||
nstim += 1
|
||||
ch.update(
|
||||
logno=nstim,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_STIM_CH,
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
)
|
||||
else:
|
||||
nmisc += 1
|
||||
ch.update(
|
||||
logno=nmisc,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_MISC_CH,
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
)
|
||||
return chs
|
||||
|
||||
|
||||
def _comp_sort_keys(c):
|
||||
"""Sort the compensation data."""
|
||||
return (int(c["coeff_type"]), int(c["scanno"]))
|
||||
|
||||
|
||||
def _check_comp(comp):
|
||||
"""Check that conversion to named matrices is possible."""
|
||||
ref_sens = None
|
||||
kind = -1
|
||||
for k, c_k in enumerate(comp):
|
||||
if c_k["coeff_type"] != kind:
|
||||
c_ref = c_k
|
||||
ref_sens = c_ref["sensors"]
|
||||
kind = c_k["coeff_type"]
|
||||
elif not c_k["sensors"] == ref_sens:
|
||||
raise RuntimeError("Cannot use an uneven compensation matrix")
|
||||
|
||||
|
||||
def _conv_comp(comp, first, last, chs):
|
||||
"""Add a new converted compensation data item."""
|
||||
ch_names = [c["ch_name"] for c in chs]
|
||||
n_col = comp[first]["ncoeff"]
|
||||
col_names = comp[first]["sensors"][:n_col]
|
||||
row_names = [comp[p]["sensor_name"] for p in range(first, last + 1)]
|
||||
mask = np.isin(col_names, ch_names) # missing channels excluded
|
||||
col_names = np.array(col_names)[mask].tolist()
|
||||
n_col = len(col_names)
|
||||
n_row = len(row_names)
|
||||
ccomp = dict(ctfkind=comp[first]["coeff_type"], save_calibrated=False)
|
||||
_add_kind(ccomp)
|
||||
|
||||
data = np.empty((n_row, n_col))
|
||||
for ii, coeffs in enumerate(comp[first : last + 1]):
|
||||
# Pick the elements to the matrix
|
||||
data[ii, :] = coeffs["coeffs"][mask]
|
||||
ccomp["data"] = dict(
|
||||
row_names=row_names,
|
||||
col_names=col_names,
|
||||
data=data,
|
||||
nrow=len(row_names),
|
||||
ncol=len(col_names),
|
||||
)
|
||||
mk = ("proper_gain", "qgain")
|
||||
_calibrate_comp(ccomp, chs, row_names, col_names, mult_keys=mk, flip=True)
|
||||
return ccomp
|
||||
|
||||
|
||||
def _convert_comp_data(res4):
|
||||
"""Convert the compensation data into named matrices."""
|
||||
if res4["ncomp"] == 0:
|
||||
return
|
||||
# Sort the coefficients in our favorite order
|
||||
res4["comp"] = sorted(res4["comp"], key=_comp_sort_keys)
|
||||
# Check that all items for a given compensation type have the correct
|
||||
# number of channels
|
||||
_check_comp(res4["comp"])
|
||||
# Create named matrices
|
||||
first = 0
|
||||
kind = -1
|
||||
comps = list()
|
||||
for k in range(len(res4["comp"])):
|
||||
if res4["comp"][k]["coeff_type"] != kind:
|
||||
if k > 0:
|
||||
comps.append(_conv_comp(res4["comp"], first, k - 1, res4["chs"]))
|
||||
kind = res4["comp"][k]["coeff_type"]
|
||||
first = k
|
||||
comps.append(_conv_comp(res4["comp"], first, k, res4["chs"]))
|
||||
return comps
|
||||
|
||||
|
||||
def _pick_eeg_pos(c):
|
||||
"""Pick EEG positions."""
|
||||
eeg = dict(
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
assign_to_chs=False,
|
||||
labels=list(),
|
||||
ids=list(),
|
||||
rr=list(),
|
||||
kinds=list(),
|
||||
np=0,
|
||||
)
|
||||
for ch in c["chs"]:
|
||||
if ch["kind"] == FIFF.FIFFV_EEG_CH and not _at_origin(ch["loc"][:3]):
|
||||
eeg["labels"].append(ch["ch_name"])
|
||||
eeg["ids"].append(ch["logno"])
|
||||
eeg["rr"].append(ch["loc"][:3])
|
||||
eeg["kinds"].append(FIFF.FIFFV_POINT_EEG)
|
||||
eeg["np"] += 1
|
||||
if eeg["np"] == 0:
|
||||
return None
|
||||
logger.info("Picked positions of %d EEG channels from channel info", eeg["np"])
|
||||
return eeg
|
||||
|
||||
|
||||
def _add_eeg_pos(eeg, t, c):
|
||||
"""Pick the (virtual) EEG position data."""
|
||||
if eeg is None:
|
||||
return
|
||||
if t is None or t["t_ctf_head_head"] is None:
|
||||
raise RuntimeError(
|
||||
"No coordinate transformation available for EEG position data"
|
||||
)
|
||||
eeg_assigned = 0
|
||||
if eeg["assign_to_chs"]:
|
||||
for k in range(eeg["np"]):
|
||||
# Look for a channel name match
|
||||
for ch in c["chs"]:
|
||||
if ch["ch_name"].lower() == eeg["labels"][k].lower():
|
||||
r0 = ch["loc"][:3]
|
||||
r0[:] = eeg["rr"][k]
|
||||
if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
|
||||
r0[:] = apply_trans(t["t_ctf_head_head"], r0)
|
||||
elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD:
|
||||
raise RuntimeError(
|
||||
"Illegal coordinate frame for EEG electrode "
|
||||
f"positions : {_coord_frame_name(eeg['coord_frame'])}"
|
||||
)
|
||||
# Use the logical channel number as an identifier
|
||||
eeg["ids"][k] = ch["logno"]
|
||||
eeg["kinds"][k] = FIFF.FIFFV_POINT_EEG
|
||||
eeg_assigned += 1
|
||||
break
|
||||
|
||||
# Add these to the Polhemus data
|
||||
fid_count = eeg_count = extra_count = 0
|
||||
for k in range(eeg["np"]):
|
||||
d = dict(
|
||||
r=eeg["rr"][k].copy(),
|
||||
kind=eeg["kinds"][k],
|
||||
ident=eeg["ids"][k],
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
c["dig"].append(d)
|
||||
if eeg["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
|
||||
d["r"] = apply_trans(t["t_ctf_head_head"], d["r"])
|
||||
elif eeg["coord_frame"] != FIFF.FIFFV_COORD_HEAD:
|
||||
raise RuntimeError(
|
||||
"Illegal coordinate frame for EEG electrode positions: "
|
||||
+ _coord_frame_name(eeg["coord_frame"])
|
||||
)
|
||||
if eeg["kinds"][k] == FIFF.FIFFV_POINT_CARDINAL:
|
||||
fid_count += 1
|
||||
elif eeg["kinds"][k] == FIFF.FIFFV_POINT_EEG:
|
||||
eeg_count += 1
|
||||
else:
|
||||
extra_count += 1
|
||||
if eeg_assigned > 0:
|
||||
logger.info(
|
||||
" %d EEG electrode locations assigned to channel info.", eeg_assigned
|
||||
)
|
||||
for count, kind in zip(
|
||||
(fid_count, eeg_count, extra_count),
|
||||
("fiducials", "EEG locations", "extra points"),
|
||||
):
|
||||
if count > 0:
|
||||
logger.info(" %d %s added to Polhemus data.", count, kind)
|
||||
|
||||
|
||||
_filt_map = {CTF.CTFV_FILTER_LOWPASS: "lowpass", CTF.CTFV_FILTER_HIGHPASS: "highpass"}
|
||||
|
||||
|
||||
def _compose_meas_info(res4, coils, trans, eeg):
|
||||
"""Create meas info from CTF data."""
|
||||
info = _empty_info(res4["sfreq"])
|
||||
|
||||
# Collect all the necessary data from the structures read
|
||||
info["meas_id"] = get_new_file_id()
|
||||
info["meas_id"]["usecs"] = 0
|
||||
info["meas_id"]["secs"] = _convert_time(res4["data_date"], res4["data_time"])
|
||||
info["meas_date"] = (info["meas_id"]["secs"], info["meas_id"]["usecs"])
|
||||
info["experimenter"] = res4["nf_operator"]
|
||||
info["subject_info"] = dict(his_id=res4["nf_subject_id"])
|
||||
for filt in res4["filters"]:
|
||||
if filt["type"] in _filt_map:
|
||||
info[_filt_map[filt["type"]]] = filt["freq"]
|
||||
info["dig"], info["hpi_results"] = _pick_isotrak_and_hpi_coils(res4, coils, trans)
|
||||
if trans is not None:
|
||||
if len(info["hpi_results"]) > 0:
|
||||
info["hpi_results"][0]["coord_trans"] = trans["t_ctf_head_head"]
|
||||
if trans["t_dev_head"] is not None:
|
||||
info["dev_head_t"] = trans["t_dev_head"]
|
||||
info["dev_ctf_t"] = combine_transforms(
|
||||
trans["t_dev_head"],
|
||||
invert_transform(trans["t_ctf_head_head"]),
|
||||
FIFF.FIFFV_COORD_DEVICE,
|
||||
FIFF.FIFFV_MNE_COORD_CTF_HEAD,
|
||||
)
|
||||
if trans["t_ctf_head_head"] is not None:
|
||||
info["ctf_head_t"] = trans["t_ctf_head_head"]
|
||||
info["chs"] = _convert_channel_info(res4, trans, eeg is None)
|
||||
info["comps"] = _convert_comp_data(res4)
|
||||
if eeg is None:
|
||||
# Pick EEG locations from chan info if not read from a separate file
|
||||
eeg = _pick_eeg_pos(info)
|
||||
_add_eeg_pos(eeg, trans, info)
|
||||
logger.info(" Measurement info composed.")
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
return info
|
||||
|
||||
|
||||
def _read_bad_chans(directory, info):
|
||||
"""Read Bad channel list and match to internal names."""
|
||||
fname = op.join(directory, "BadChannels")
|
||||
if not op.exists(fname):
|
||||
return []
|
||||
mapping = dict(zip(_clean_names(info["ch_names"]), info["ch_names"]))
|
||||
with open(fname) as fid:
|
||||
bad_chans = [mapping[f.strip()] for f in fid.readlines()]
|
||||
return bad_chans
|
||||
|
||||
|
||||
def _annotate_bad_segments(directory, start_time, meas_date):
|
||||
fname = op.join(directory, "bad.segments")
|
||||
if not op.exists(fname):
|
||||
return None
|
||||
|
||||
# read in bad segment file
|
||||
onsets = []
|
||||
durations = []
|
||||
desc = []
|
||||
with open(fname) as fid:
|
||||
for f in fid.readlines():
|
||||
tmp = f.strip().split()
|
||||
desc.append(f"bad_{tmp[0]}")
|
||||
onsets.append(np.float64(tmp[1]) - start_time)
|
||||
durations.append(np.float64(tmp[2]) - np.float64(tmp[1]))
|
||||
# return None if there are no bad segments
|
||||
if len(onsets) == 0:
|
||||
return None
|
||||
|
||||
return Annotations(onsets, durations, desc, meas_date)
|
||||
89
mne/io/ctf/markers.py
Normal file
89
mne/io/ctf/markers.py
Normal file
@@ -0,0 +1,89 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...annotations import Annotations
|
||||
from .info import _convert_time
|
||||
from .res4 import _read_res4
|
||||
|
||||
|
||||
def _get_markers(fname):
|
||||
def consume(fid, predicate): # just a consumer to move around conveniently
|
||||
while predicate(fid.readline()):
|
||||
pass
|
||||
|
||||
def parse_marker(string): # XXX: there should be a nicer way to do that
|
||||
data = np.genfromtxt(
|
||||
BytesIO(string.encode()), dtype=[("trial", int), ("sync", float)]
|
||||
)
|
||||
return int(data["trial"]), float(data["sync"])
|
||||
|
||||
markers = dict()
|
||||
with open(fname) as fid:
|
||||
consume(fid, lambda line: not line.startswith("NUMBER OF MARKERS:"))
|
||||
num_of_markers = int(fid.readline())
|
||||
|
||||
for _ in range(num_of_markers):
|
||||
consume(fid, lambda line: not line.startswith("NAME:"))
|
||||
label = fid.readline().strip("\n")
|
||||
|
||||
consume(fid, lambda line: not line.startswith("NUMBER OF SAMPLES:"))
|
||||
n_markers = int(fid.readline())
|
||||
|
||||
consume(fid, lambda line: not line.startswith("LIST OF SAMPLES:"))
|
||||
next(fid) # skip the samples header
|
||||
markers[label] = [parse_marker(next(fid)) for _ in range(n_markers)]
|
||||
|
||||
return markers
|
||||
|
||||
|
||||
def _get_res4_info_needed_by_markers(directory):
|
||||
"""Get required information from CTF res4 information file."""
|
||||
# we only need a few values from res4. Maybe we can read them directly
|
||||
# instead of parsing the entire res4 file.
|
||||
res4 = _read_res4(directory)
|
||||
|
||||
total_offset_duration = res4["pre_trig_pts"] / res4["sfreq"]
|
||||
trial_duration = res4["nsamp"] / res4["sfreq"]
|
||||
|
||||
meas_date = (_convert_time(res4["data_date"], res4["data_time"]), 0)
|
||||
return total_offset_duration, trial_duration, meas_date
|
||||
|
||||
|
||||
def _read_annotations_ctf(directory):
|
||||
total_offset, trial_duration, meas_date = _get_res4_info_needed_by_markers(
|
||||
directory
|
||||
)
|
||||
return _read_annotations_ctf_call(
|
||||
directory, total_offset, trial_duration, meas_date
|
||||
)
|
||||
|
||||
|
||||
def _read_annotations_ctf_call(directory, total_offset, trial_duration, meas_date):
|
||||
fname = op.join(directory, "MarkerFile.mrk")
|
||||
if not op.exists(fname):
|
||||
return Annotations(list(), list(), list(), orig_time=meas_date)
|
||||
else:
|
||||
markers = _get_markers(fname)
|
||||
|
||||
onset = [
|
||||
synctime + (trialnum * trial_duration) + total_offset
|
||||
for _, m in markers.items()
|
||||
for (trialnum, synctime) in m
|
||||
]
|
||||
|
||||
description = np.concatenate(
|
||||
[np.repeat(label, len(m)) for label, m in markers.items()]
|
||||
)
|
||||
|
||||
return Annotations(
|
||||
onset=onset,
|
||||
duration=np.zeros_like(onset),
|
||||
description=description,
|
||||
orig_time=meas_date,
|
||||
)
|
||||
232
mne/io/ctf/res4.py
Normal file
232
mne/io/ctf/res4.py
Normal file
@@ -0,0 +1,232 @@
|
||||
"""Read .res4 files."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import logger
|
||||
from .constants import CTF
|
||||
|
||||
|
||||
def _make_ctf_name(directory, extra, raise_error=True):
|
||||
"""Make a CTF name."""
|
||||
fname = op.join(directory, op.basename(directory)[:-3] + "." + extra)
|
||||
found = True
|
||||
if not op.isfile(fname):
|
||||
if raise_error:
|
||||
raise OSError(f"Standard file {fname} not found")
|
||||
found = False
|
||||
return fname, found
|
||||
|
||||
|
||||
def _read_double(fid, n=1):
|
||||
"""Read a double."""
|
||||
return np.fromfile(fid, ">f8", n)
|
||||
|
||||
|
||||
def _read_string(fid, n_bytes, decode=True):
|
||||
"""Read string."""
|
||||
s0 = fid.read(n_bytes)
|
||||
s = s0.split(b"\x00")[0]
|
||||
return s.decode("utf-8") if decode else s
|
||||
|
||||
|
||||
def _read_ustring(fid, n_bytes):
|
||||
"""Read unsigned character string."""
|
||||
return np.fromfile(fid, ">B", n_bytes)
|
||||
|
||||
|
||||
def _read_int2(fid):
|
||||
"""Read int from short."""
|
||||
return _auto_cast(np.fromfile(fid, ">i2", 1)[0])
|
||||
|
||||
|
||||
def _read_int(fid):
|
||||
"""Read a 32-bit integer."""
|
||||
return np.fromfile(fid, ">i4", 1)[0]
|
||||
|
||||
|
||||
def _move_to_next(fid, byte=8):
|
||||
"""Move to next byte boundary."""
|
||||
now = fid.tell()
|
||||
if now % byte != 0:
|
||||
now = now - (now % byte) + byte
|
||||
fid.seek(now, 0)
|
||||
|
||||
|
||||
def _read_filter(fid):
|
||||
"""Read filter information."""
|
||||
f = dict()
|
||||
f["freq"] = _read_double(fid)[0]
|
||||
f["class"] = _read_int(fid)
|
||||
f["type"] = _read_int(fid)
|
||||
f["npar"] = _read_int2(fid)
|
||||
f["pars"] = _read_double(fid, f["npar"])
|
||||
return f
|
||||
|
||||
|
||||
def _read_comp_coeff(fid, d):
|
||||
"""Read compensation coefficients."""
|
||||
# Read the coefficients and initialize
|
||||
d["ncomp"] = _read_int2(fid)
|
||||
d["comp"] = list()
|
||||
# Read each record
|
||||
dt = np.dtype(
|
||||
[
|
||||
("sensor_name", "S32"),
|
||||
("coeff_type", ">i4"),
|
||||
("d0", ">i4"),
|
||||
("ncoeff", ">i2"),
|
||||
("sensors", f"S{CTF.CTFV_SENSOR_LABEL}", CTF.CTFV_MAX_BALANCING),
|
||||
("coeffs", ">f8", CTF.CTFV_MAX_BALANCING),
|
||||
]
|
||||
)
|
||||
comps = np.fromfile(fid, dt, d["ncomp"])
|
||||
for k in range(d["ncomp"]):
|
||||
comp = dict()
|
||||
d["comp"].append(comp)
|
||||
comp["sensor_name"] = comps["sensor_name"][k].split(b"\x00")[0].decode("utf-8")
|
||||
comp["coeff_type"] = comps["coeff_type"][k].item()
|
||||
comp["ncoeff"] = comps["ncoeff"][k].item()
|
||||
comp["sensors"] = [
|
||||
s.split(b"\x00")[0].decode("utf-8")
|
||||
for s in comps["sensors"][k][: comp["ncoeff"]]
|
||||
]
|
||||
comp["coeffs"] = comps["coeffs"][k][: comp["ncoeff"]]
|
||||
comp["scanno"] = d["ch_names"].index(comp["sensor_name"])
|
||||
|
||||
|
||||
def _read_res4(dsdir):
|
||||
"""Read the magical res4 file."""
|
||||
# adapted from read_res4.c
|
||||
name, _ = _make_ctf_name(dsdir, "res4")
|
||||
res = dict()
|
||||
with open(name, "rb") as fid:
|
||||
# Read the fields
|
||||
res["head"] = _read_string(fid, 8)
|
||||
res["appname"] = _read_string(fid, 256)
|
||||
res["origin"] = _read_string(fid, 256)
|
||||
res["desc"] = _read_string(fid, 256)
|
||||
res["nave"] = _read_int2(fid)
|
||||
res["data_time"] = _read_string(fid, 255)
|
||||
res["data_date"] = _read_string(fid, 255)
|
||||
# Seems that date and time can be swapped
|
||||
# (are they entered manually?!)
|
||||
if "/" in res["data_time"] and ":" in res["data_date"]:
|
||||
data_date = res["data_date"]
|
||||
res["data_date"] = res["data_time"]
|
||||
res["data_time"] = data_date
|
||||
res["nsamp"] = _read_int(fid)
|
||||
res["nchan"] = _read_int2(fid)
|
||||
_move_to_next(fid, 8)
|
||||
res["sfreq"] = _read_double(fid)[0]
|
||||
res["epoch_time"] = _read_double(fid)[0]
|
||||
res["no_trials"] = _read_int2(fid)
|
||||
_move_to_next(fid, 4)
|
||||
res["pre_trig_pts"] = _read_int(fid)
|
||||
res["no_trials_done"] = _read_int2(fid)
|
||||
res["no_trials_bst_message_windowlay"] = _read_int2(fid)
|
||||
_move_to_next(fid, 4)
|
||||
res["save_trials"] = _read_int(fid)
|
||||
res["primary_trigger"] = fid.read(1)
|
||||
res["secondary_trigger"] = [
|
||||
fid.read(1) for k in range(CTF.CTFV_MAX_AVERAGE_BINS)
|
||||
]
|
||||
res["trigger_polarity_mask"] = fid.read(1)
|
||||
res["trigger_mode"] = _read_int2(fid)
|
||||
_move_to_next(fid, 4)
|
||||
res["accept_reject"] = _read_int(fid)
|
||||
res["run_time_bst_message_windowlay"] = _read_int2(fid)
|
||||
_move_to_next(fid, 4)
|
||||
res["zero_head"] = _read_int(fid)
|
||||
_move_to_next(fid, 4)
|
||||
res["artifact_mode"] = _read_int(fid)
|
||||
_read_int(fid) # padding
|
||||
res["nf_run_name"] = _read_string(fid, 32)
|
||||
res["nf_run_title"] = _read_string(fid, 256)
|
||||
res["nf_instruments"] = _read_string(fid, 32)
|
||||
res["nf_collect_descriptor"] = _read_string(fid, 32)
|
||||
res["nf_subject_id"] = _read_string(fid, 32)
|
||||
res["nf_operator"] = _read_string(fid, 32)
|
||||
if len(res["nf_operator"]) == 0:
|
||||
res["nf_operator"] = None
|
||||
res["nf_sensor_file_name"] = _read_ustring(fid, 60)
|
||||
_move_to_next(fid, 4)
|
||||
res["rdlen"] = _read_int(fid)
|
||||
fid.seek(CTF.FUNNY_POS, 0)
|
||||
|
||||
if res["rdlen"] > 0:
|
||||
res["run_desc"] = _read_string(fid, res["rdlen"])
|
||||
|
||||
# Filters
|
||||
res["nfilt"] = _read_int2(fid)
|
||||
res["filters"] = list()
|
||||
for k in range(res["nfilt"]):
|
||||
res["filters"].append(_read_filter(fid))
|
||||
|
||||
# Channel information (names, then data)
|
||||
res["ch_names"] = list()
|
||||
for k in range(res["nchan"]):
|
||||
ch_name = _read_string(fid, 32)
|
||||
res["ch_names"].append(ch_name)
|
||||
_coil_dt = np.dtype(
|
||||
[
|
||||
("pos", ">f8", 3),
|
||||
("d0", ">f8"),
|
||||
("norm", ">f8", 3),
|
||||
("d1", ">f8"),
|
||||
("turns", ">i2"),
|
||||
("d2", ">i4"),
|
||||
("d3", ">i2"),
|
||||
("area", ">f8"),
|
||||
]
|
||||
)
|
||||
_ch_dt = np.dtype(
|
||||
[
|
||||
("sensor_type_index", ">i2"),
|
||||
("original_run_no", ">i2"),
|
||||
("coil_type", ">i4"),
|
||||
("proper_gain", ">f8"),
|
||||
("qgain", ">f8"),
|
||||
("io_gain", ">f8"),
|
||||
("io_offset", ">f8"),
|
||||
("num_coils", ">i2"),
|
||||
("grad_order_no", ">i2"),
|
||||
("d0", ">i4"),
|
||||
("coil", _coil_dt, CTF.CTFV_MAX_COILS),
|
||||
("head_coil", _coil_dt, CTF.CTFV_MAX_COILS),
|
||||
]
|
||||
)
|
||||
chs = np.fromfile(fid, _ch_dt, res["nchan"])
|
||||
for coil in (chs["coil"], chs["head_coil"]):
|
||||
coil["pos"] /= 100.0
|
||||
coil["area"] *= 1e-4
|
||||
# convert to dict
|
||||
chs = [dict(zip(chs.dtype.names, x)) for x in chs]
|
||||
for ch in chs:
|
||||
for key, val in ch.items():
|
||||
ch[key] = _auto_cast(val)
|
||||
res["chs"] = chs
|
||||
for k in range(res["nchan"]):
|
||||
res["chs"][k]["ch_name"] = res["ch_names"][k]
|
||||
|
||||
# The compensation coefficients
|
||||
_read_comp_coeff(fid, res)
|
||||
logger.info(" res4 data read.")
|
||||
return res
|
||||
|
||||
|
||||
def _auto_cast(x):
|
||||
# Upcast scalars
|
||||
if isinstance(x, np.ScalarType):
|
||||
if x.dtype.kind == "i":
|
||||
if x.dtype != np.int64:
|
||||
x = x.astype(np.int64)
|
||||
elif x.dtype.kind == "f":
|
||||
if x.dtype != np.float64:
|
||||
x = x.astype(np.float64)
|
||||
return x
|
||||
132
mne/io/ctf/trans.py
Normal file
132
mne/io/ctf/trans.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Create coordinate transforms."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ...transforms import (
|
||||
Transform,
|
||||
_fit_matched_points,
|
||||
_quat_to_affine,
|
||||
apply_trans,
|
||||
combine_transforms,
|
||||
get_ras_to_neuromag_trans,
|
||||
invert_transform,
|
||||
)
|
||||
from ...utils import logger
|
||||
from .constants import CTF
|
||||
|
||||
|
||||
def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa):
|
||||
"""Make a transform from cardinal landmarks."""
|
||||
return invert_transform(
|
||||
Transform(to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa))
|
||||
)
|
||||
|
||||
|
||||
def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4):
|
||||
"""Perform an alignment using the unit quaternions (modifies points)."""
|
||||
assert from_pts.shape[1] == to_pts.shape[1] == 3
|
||||
trans = _quat_to_affine(_fit_matched_points(from_pts, to_pts)[0])
|
||||
|
||||
# Test the transformation and print the results
|
||||
logger.info(" Quaternion matching (desired vs. transformed):")
|
||||
for fro, to in zip(from_pts, to_pts):
|
||||
rr = apply_trans(trans, fro)
|
||||
diff = np.linalg.norm(to - rr)
|
||||
logger.info(
|
||||
" %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm "
|
||||
"(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm"
|
||||
% (tuple(1000 * to) + tuple(1000 * rr) + tuple(1000 * fro) + (1000 * diff,))
|
||||
)
|
||||
if diff > diff_tol:
|
||||
raise RuntimeError(
|
||||
"Something is wrong: quaternion matching did not work (see above)"
|
||||
)
|
||||
return Transform(from_frame, to_frame, trans)
|
||||
|
||||
|
||||
def _make_ctf_coord_trans_set(res4, coils):
|
||||
"""Figure out the necessary coordinate transforms."""
|
||||
# CTF head > Neuromag head
|
||||
lpa = rpa = nas = T1 = T2 = T3 = T5 = None
|
||||
if coils is not None:
|
||||
for p in coils:
|
||||
if p["valid"] and (p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD):
|
||||
if lpa is None and p["kind"] == CTF.CTFV_COIL_LPA:
|
||||
lpa = p
|
||||
elif rpa is None and p["kind"] == CTF.CTFV_COIL_RPA:
|
||||
rpa = p
|
||||
elif nas is None and p["kind"] == CTF.CTFV_COIL_NAS:
|
||||
nas = p
|
||||
if lpa is None or rpa is None or nas is None:
|
||||
raise RuntimeError(
|
||||
"Some of the mandatory HPI device-coordinate info was not there."
|
||||
)
|
||||
t = _make_transform_card("head", "ctf_head", lpa["r"], nas["r"], rpa["r"])
|
||||
T3 = invert_transform(t)
|
||||
|
||||
# CTF device -> Neuromag device
|
||||
#
|
||||
# Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm
|
||||
# in z direction to get a coordinate system comparable to the Neuromag one
|
||||
#
|
||||
R = np.eye(4)
|
||||
R[:3, 3] = [0.0, 0.0, 0.19]
|
||||
val = 0.5 * np.sqrt(2.0)
|
||||
R[0, 0] = val
|
||||
R[0, 1] = -val
|
||||
R[1, 0] = val
|
||||
R[1, 1] = val
|
||||
T4 = Transform("ctf_meg", "meg", R)
|
||||
|
||||
# CTF device -> CTF head
|
||||
# We need to make the implicit transform explicit!
|
||||
h_pts = dict()
|
||||
d_pts = dict()
|
||||
kinds = (
|
||||
CTF.CTFV_COIL_LPA,
|
||||
CTF.CTFV_COIL_RPA,
|
||||
CTF.CTFV_COIL_NAS,
|
||||
CTF.CTFV_COIL_SPARE,
|
||||
)
|
||||
if coils is not None:
|
||||
for p in coils:
|
||||
if p["valid"]:
|
||||
if p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
|
||||
for kind in kinds:
|
||||
if kind not in h_pts and p["kind"] == kind:
|
||||
h_pts[kind] = p["r"]
|
||||
elif p["coord_frame"] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
|
||||
for kind in kinds:
|
||||
if kind not in d_pts and p["kind"] == kind:
|
||||
d_pts[kind] = p["r"]
|
||||
if any(kind not in h_pts for kind in kinds[:-1]):
|
||||
raise RuntimeError(
|
||||
"Some of the mandatory HPI device-coordinate info was not there."
|
||||
)
|
||||
if any(kind not in d_pts for kind in kinds[:-1]):
|
||||
raise RuntimeError(
|
||||
"Some of the mandatory HPI head-coordinate info was not there."
|
||||
)
|
||||
use_kinds = [kind for kind in kinds if (kind in h_pts and kind in d_pts)]
|
||||
r_head = np.array([h_pts[kind] for kind in use_kinds])
|
||||
r_dev = np.array([d_pts[kind] for kind in use_kinds])
|
||||
T2 = _quaternion_align("ctf_meg", "ctf_head", r_dev, r_head)
|
||||
|
||||
# The final missing transform
|
||||
if T3 is not None and T2 is not None:
|
||||
T5 = combine_transforms(T2, T3, "ctf_meg", "head")
|
||||
T1 = combine_transforms(invert_transform(T4), T5, "meg", "head")
|
||||
s = dict(
|
||||
t_dev_head=T1,
|
||||
t_ctf_dev_ctf_head=T2,
|
||||
t_ctf_head_head=T3,
|
||||
t_ctf_dev_dev=T4,
|
||||
t_ctf_dev_head=T5,
|
||||
)
|
||||
logger.info(" Coordinate transformations established.")
|
||||
return s
|
||||
7
mne/io/curry/__init__.py
Normal file
7
mne/io/curry/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Reader for CURRY data."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .curry import read_raw_curry
|
||||
631
mne/io/curry/curry.py
Normal file
631
mne/io/curry/curry.py
Normal file
@@ -0,0 +1,631 @@
|
||||
#
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _make_dig_points
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.tag import _coil_trans_to_loc
|
||||
from ..._fiff.utils import _mult_cal_one, _read_segments_file
|
||||
from ...annotations import Annotations
|
||||
from ...surface import _normal_orth
|
||||
from ...transforms import (
|
||||
Transform,
|
||||
_angle_between_quats,
|
||||
apply_trans,
|
||||
combine_transforms,
|
||||
get_ras_to_neuromag_trans,
|
||||
invert_transform,
|
||||
rot_to_quat,
|
||||
)
|
||||
from ...utils import _check_fname, check_fname, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
from ..ctf.trans import _quaternion_align
|
||||
|
||||
FILE_EXTENSIONS = {
|
||||
"Curry 7": {
|
||||
"info": ".dap",
|
||||
"data": ".dat",
|
||||
"labels": ".rs3",
|
||||
"events_cef": ".cef",
|
||||
"events_ceo": ".ceo",
|
||||
"hpi": ".hpi",
|
||||
},
|
||||
"Curry 8": {
|
||||
"info": ".cdt.dpa",
|
||||
"data": ".cdt",
|
||||
"labels": ".cdt.dpa",
|
||||
"events_cef": ".cdt.cef",
|
||||
"events_ceo": ".cdt.ceo",
|
||||
"hpi": ".cdt.hpi",
|
||||
},
|
||||
}
|
||||
CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"}
|
||||
FIFFV_CHANTYPES = {
|
||||
"meg": FIFF.FIFFV_MEG_CH,
|
||||
"eeg": FIFF.FIFFV_EEG_CH,
|
||||
"misc": FIFF.FIFFV_MISC_CH,
|
||||
}
|
||||
FIFFV_COILTYPES = {
|
||||
"meg": FIFF.FIFFV_COIL_CTF_GRAD,
|
||||
"eeg": FIFF.FIFFV_COIL_EEG,
|
||||
"misc": FIFF.FIFFV_COIL_NONE,
|
||||
}
|
||||
SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T)
|
||||
SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15)
|
||||
|
||||
CurryParameters = namedtuple(
|
||||
"CurryParameters",
|
||||
"n_samples, sfreq, is_ascii, unit_dict, n_chans, dt_start, chanidx_in_file",
|
||||
)
|
||||
|
||||
|
||||
def _get_curry_version(file_extension):
|
||||
"""Check out the curry file version."""
|
||||
return "Curry 8" if "cdt" in file_extension else "Curry 7"
|
||||
|
||||
|
||||
def _get_curry_file_structure(fname, required=()):
|
||||
"""Store paths to a dict and check for required files."""
|
||||
_msg = (
|
||||
"The following required files cannot be found: {0}.\nPlease make "
|
||||
"sure all required files are located in the same directory as {1}."
|
||||
)
|
||||
fname = Path(_check_fname(fname, "read", True, "fname"))
|
||||
|
||||
# we don't use os.path.splitext to also handle extensions like .cdt.dpa
|
||||
# this won't handle a dot in the filename, but it should handle it in
|
||||
# the parent directories
|
||||
fname_base = fname.name.split(".", maxsplit=1)[0]
|
||||
ext = fname.name[len(fname_base) :]
|
||||
fname_base = str(fname)
|
||||
fname_base = fname_base[: len(fname_base) - len(ext)]
|
||||
del fname
|
||||
version = _get_curry_version(ext)
|
||||
my_curry = dict()
|
||||
for key in ("info", "data", "labels", "events_cef", "events_ceo", "hpi"):
|
||||
fname = fname_base + FILE_EXTENSIONS[version][key]
|
||||
if op.isfile(fname):
|
||||
_key = "events" if key.startswith("events") else key
|
||||
my_curry[_key] = fname
|
||||
|
||||
missing = [field for field in required if field not in my_curry]
|
||||
if missing:
|
||||
raise FileNotFoundError(_msg.format(np.unique(missing), fname))
|
||||
|
||||
return my_curry
|
||||
|
||||
|
||||
def _read_curry_lines(fname, regex_list):
|
||||
"""Read through the lines of a curry parameter files and save data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to a curry file.
|
||||
regex_list : list of str
|
||||
A list of strings or regular expressions to search within the file.
|
||||
Each element `regex` in `regex_list` must be formulated so that
|
||||
`regex + " START_LIST"` initiates the start and `regex + " END_LIST"`
|
||||
initiates the end of the elements that should be saved.
|
||||
|
||||
Returns
|
||||
-------
|
||||
data_dict : dict
|
||||
A dictionary containing the extracted data. For each element `regex`
|
||||
in `regex_list` a dictionary key `data_dict[regex]` is created, which
|
||||
contains a list of the according data.
|
||||
|
||||
"""
|
||||
save_lines = {}
|
||||
data_dict = {}
|
||||
|
||||
for regex in regex_list:
|
||||
save_lines[regex] = False
|
||||
data_dict[regex] = []
|
||||
|
||||
with open(fname) as fid:
|
||||
for line in fid:
|
||||
for regex in regex_list:
|
||||
if re.match(regex + " END_LIST", line):
|
||||
save_lines[regex] = False
|
||||
|
||||
if save_lines[regex] and line != "\n":
|
||||
result = line.replace("\n", "")
|
||||
if "\t" in result:
|
||||
result = result.split("\t")
|
||||
data_dict[regex].append(result)
|
||||
|
||||
if re.match(regex + " START_LIST", line):
|
||||
save_lines[regex] = True
|
||||
|
||||
return data_dict
|
||||
|
||||
|
||||
def _read_curry_parameters(fname):
|
||||
"""Extract Curry params from a Curry info file."""
|
||||
_msg_match = (
|
||||
"The sampling frequency and the time steps extracted from "
|
||||
"the parameter file do not match."
|
||||
)
|
||||
_msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}"
|
||||
|
||||
var_names = [
|
||||
"NumSamples",
|
||||
"SampleFreqHz",
|
||||
"DataFormat",
|
||||
"SampleTimeUsec",
|
||||
"NumChannels",
|
||||
"StartYear",
|
||||
"StartMonth",
|
||||
"StartDay",
|
||||
"StartHour",
|
||||
"StartMin",
|
||||
"StartSec",
|
||||
"StartMillisec",
|
||||
"NUM_SAMPLES",
|
||||
"SAMPLE_FREQ_HZ",
|
||||
"DATA_FORMAT",
|
||||
"SAMPLE_TIME_USEC",
|
||||
"NUM_CHANNELS",
|
||||
"START_YEAR",
|
||||
"START_MONTH",
|
||||
"START_DAY",
|
||||
"START_HOUR",
|
||||
"START_MIN",
|
||||
"START_SEC",
|
||||
"START_MILLISEC",
|
||||
]
|
||||
|
||||
param_dict = dict()
|
||||
unit_dict = dict()
|
||||
|
||||
with open(fname) as fid:
|
||||
for line in iter(fid):
|
||||
if any(var_name in line for var_name in var_names):
|
||||
key, val = line.replace(" ", "").replace("\n", "").split("=")
|
||||
param_dict[key.lower().replace("_", "")] = val
|
||||
for key, type_ in CHANTYPES.items():
|
||||
if f"DEVICE_PARAMETERS{type_} START" in line:
|
||||
data_unit = next(fid)
|
||||
unit_dict[key] = (
|
||||
data_unit.replace(" ", "").replace("\n", "").split("=")[-1]
|
||||
)
|
||||
|
||||
# look for CHAN_IN_FILE sections, which may or may not exist; issue #8391
|
||||
types = ["meg", "eeg", "misc"]
|
||||
chanidx_in_file = _read_curry_lines(
|
||||
fname, ["CHAN_IN_FILE" + CHANTYPES[key] for key in types]
|
||||
)
|
||||
|
||||
n_samples = int(param_dict["numsamples"])
|
||||
sfreq = float(param_dict["samplefreqhz"])
|
||||
time_step = float(param_dict["sampletimeusec"]) * 1e-6
|
||||
is_ascii = param_dict["dataformat"] == "ASCII"
|
||||
n_channels = int(param_dict["numchannels"])
|
||||
try:
|
||||
dt_start = datetime(
|
||||
int(param_dict["startyear"]),
|
||||
int(param_dict["startmonth"]),
|
||||
int(param_dict["startday"]),
|
||||
int(param_dict["starthour"]),
|
||||
int(param_dict["startmin"]),
|
||||
int(param_dict["startsec"]),
|
||||
int(param_dict["startmillisec"]) * 1000,
|
||||
timezone.utc,
|
||||
)
|
||||
# Note that the time zone information is not stored in the Curry info
|
||||
# file, and it seems the start time info is in the local timezone
|
||||
# of the acquisition system (which is unknown); therefore, just set
|
||||
# the timezone to be UTC. If the user knows otherwise, they can
|
||||
# change it later. (Some Curry files might include StartOffsetUTCMin,
|
||||
# but its presence is unpredictable, so we won't rely on it.)
|
||||
except (ValueError, KeyError):
|
||||
dt_start = None # if missing keywords or illegal values, don't set
|
||||
|
||||
if time_step == 0:
|
||||
true_sfreq = sfreq
|
||||
elif sfreq == 0:
|
||||
true_sfreq = 1 / time_step
|
||||
elif not np.isclose(sfreq, 1 / time_step):
|
||||
raise ValueError(_msg_match)
|
||||
else: # they're equal and != 0
|
||||
true_sfreq = sfreq
|
||||
if true_sfreq <= 0:
|
||||
raise ValueError(_msg_invalid.format(true_sfreq))
|
||||
|
||||
return CurryParameters(
|
||||
n_samples,
|
||||
true_sfreq,
|
||||
is_ascii,
|
||||
unit_dict,
|
||||
n_channels,
|
||||
dt_start,
|
||||
chanidx_in_file,
|
||||
)
|
||||
|
||||
|
||||
def _read_curry_info(curry_paths):
|
||||
"""Extract info from curry parameter files."""
|
||||
curry_params = _read_curry_parameters(curry_paths["info"])
|
||||
R = np.eye(4)
|
||||
R[[0, 1], [0, 1]] = -1 # rotate 180 deg
|
||||
# shift down and back
|
||||
# (chosen by eyeballing to make the CTF helmet look roughly correct)
|
||||
R[:3, 3] = [0.0, -0.015, -0.12]
|
||||
curry_dev_dev_t = Transform("ctf_meg", "meg", R)
|
||||
|
||||
# read labels from label files
|
||||
label_fname = curry_paths["labels"]
|
||||
types = ["meg", "eeg", "misc"]
|
||||
labels = _read_curry_lines(
|
||||
label_fname, ["LABELS" + CHANTYPES[key] for key in types]
|
||||
)
|
||||
sensors = _read_curry_lines(
|
||||
label_fname, ["SENSORS" + CHANTYPES[key] for key in types]
|
||||
)
|
||||
normals = _read_curry_lines(
|
||||
label_fname, ["NORMALS" + CHANTYPES[key] for key in types]
|
||||
)
|
||||
assert len(labels) == len(sensors) == len(normals)
|
||||
|
||||
all_chans = list()
|
||||
dig_ch_pos = dict()
|
||||
for key in ["meg", "eeg", "misc"]:
|
||||
chanidx_is_explicit = (
|
||||
len(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]]) > 0
|
||||
) # channel index
|
||||
# position in the datafile may or may not be explicitly declared,
|
||||
# based on the CHAN_IN_FILE section in info file
|
||||
for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]):
|
||||
chanidx = len(all_chans) + 1 # by default, just assume the
|
||||
# channel index in the datafile is in order of the channel
|
||||
# names as we found them in the labels file
|
||||
if chanidx_is_explicit: # but, if explicitly declared, use
|
||||
# that index number
|
||||
chanidx = int(
|
||||
curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]][ind]
|
||||
)
|
||||
if chanidx <= 0: # if chanidx was explicitly declared to be ' 0',
|
||||
# it means the channel is not actually saved in the data file
|
||||
# (e.g. the "Ref" channel), so don't add it to our list.
|
||||
# Git issue #8391
|
||||
continue
|
||||
ch = {
|
||||
"ch_name": chan,
|
||||
"unit": curry_params.unit_dict[key],
|
||||
"kind": FIFFV_CHANTYPES[key],
|
||||
"coil_type": FIFFV_COILTYPES[key],
|
||||
"ch_idx": chanidx,
|
||||
}
|
||||
if key == "eeg":
|
||||
loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
|
||||
# XXX just the sensor, where is ref (next 3)?
|
||||
assert loc.shape == (3,)
|
||||
loc /= 1000.0 # to meters
|
||||
loc = np.concatenate([loc, np.zeros(9)])
|
||||
ch["loc"] = loc
|
||||
# XXX need to check/ensure this
|
||||
ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
dig_ch_pos[chan] = loc[:3]
|
||||
elif key == "meg":
|
||||
pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
|
||||
pos /= 1000.0 # to meters
|
||||
pos = pos[:3] # just the inner coil
|
||||
pos = apply_trans(curry_dev_dev_t, pos)
|
||||
nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float)
|
||||
assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4)
|
||||
nn /= np.linalg.norm(nn)
|
||||
nn = apply_trans(curry_dev_dev_t, nn, move=False)
|
||||
trans = np.eye(4)
|
||||
trans[:3, 3] = pos
|
||||
trans[:3, :3] = _normal_orth(nn).T
|
||||
ch["loc"] = _coil_trans_to_loc(trans)
|
||||
ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE
|
||||
all_chans.append(ch)
|
||||
dig = _make_dig_points(
|
||||
dig_ch_pos=dig_ch_pos, coord_frame="head", add_missing_fiducials=True
|
||||
)
|
||||
del dig_ch_pos
|
||||
|
||||
ch_count = len(all_chans)
|
||||
assert ch_count == curry_params.n_chans # ensure that we have assembled
|
||||
# the same number of channels as declared in the info (.DAP) file in the
|
||||
# DATA_PARAMETERS section. Git issue #8391
|
||||
|
||||
# sort the channels to assure they are in the order that matches how
|
||||
# recorded in the datafile. In general they most likely are already in
|
||||
# the correct order, but if the channel index in the data file was
|
||||
# explicitly declared we might as well use it.
|
||||
all_chans = sorted(all_chans, key=lambda ch: ch["ch_idx"])
|
||||
|
||||
ch_names = [chan["ch_name"] for chan in all_chans]
|
||||
info = create_info(ch_names, curry_params.sfreq)
|
||||
with info._unlock():
|
||||
info["meas_date"] = curry_params.dt_start # for Git issue #8398
|
||||
info["dig"] = dig
|
||||
_make_trans_dig(curry_paths, info, curry_dev_dev_t)
|
||||
|
||||
for ind, ch_dict in enumerate(info["chs"]):
|
||||
all_chans[ind].pop("ch_idx")
|
||||
ch_dict.update(all_chans[ind])
|
||||
assert ch_dict["loc"].shape == (12,)
|
||||
ch_dict["unit"] = SI_UNITS[all_chans[ind]["unit"][1]]
|
||||
ch_dict["cal"] = SI_UNIT_SCALE[all_chans[ind]["unit"][0]]
|
||||
|
||||
return info, curry_params.n_samples, curry_params.is_ascii
|
||||
|
||||
|
||||
_card_dict = {
|
||||
"Left ear": FIFF.FIFFV_POINT_LPA,
|
||||
"Nasion": FIFF.FIFFV_POINT_NASION,
|
||||
"Right ear": FIFF.FIFFV_POINT_RPA,
|
||||
}
|
||||
|
||||
|
||||
def _make_trans_dig(curry_paths, info, curry_dev_dev_t):
|
||||
# Coordinate frame transformations and definitions
|
||||
no_msg = "Leaving device<->head transform as None"
|
||||
info["dev_head_t"] = None
|
||||
label_fname = curry_paths["labels"]
|
||||
key = "LANDMARKS" + CHANTYPES["meg"]
|
||||
lm = _read_curry_lines(label_fname, [key])[key]
|
||||
lm = np.array(lm, float)
|
||||
lm.shape = (-1, 3)
|
||||
if len(lm) == 0:
|
||||
# no dig
|
||||
logger.info(no_msg + " (no landmarks found)")
|
||||
return
|
||||
lm /= 1000.0
|
||||
key = "LM_REMARKS" + CHANTYPES["meg"]
|
||||
remarks = _read_curry_lines(label_fname, [key])[key]
|
||||
assert len(remarks) == len(lm)
|
||||
with info._unlock():
|
||||
info["dig"] = list()
|
||||
cards = dict()
|
||||
for remark, r in zip(remarks, lm):
|
||||
kind = ident = None
|
||||
if remark in _card_dict:
|
||||
kind = FIFF.FIFFV_POINT_CARDINAL
|
||||
ident = _card_dict[remark]
|
||||
cards[ident] = r
|
||||
elif remark.startswith("HPI"):
|
||||
kind = FIFF.FIFFV_POINT_HPI
|
||||
ident = int(remark[3:]) - 1
|
||||
if kind is not None:
|
||||
info["dig"].append(
|
||||
dict(kind=kind, ident=ident, r=r, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
|
||||
)
|
||||
with info._unlock():
|
||||
info["dig"].sort(key=lambda x: (x["kind"], x["ident"]))
|
||||
has_cards = len(cards) == 3
|
||||
has_hpi = "hpi" in curry_paths
|
||||
if has_cards and has_hpi: # have all three
|
||||
logger.info("Composing device<->head transformation from dig points")
|
||||
hpi_u = np.array(
|
||||
[d["r"] for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], float
|
||||
)
|
||||
hpi_c = np.ascontiguousarray(_first_hpi(curry_paths["hpi"])[: len(hpi_u), 1:4])
|
||||
unknown_curry_t = _quaternion_align("unknown", "ctf_meg", hpi_u, hpi_c, 1e-2)
|
||||
angle = np.rad2deg(
|
||||
_angle_between_quats(
|
||||
np.zeros(3), rot_to_quat(unknown_curry_t["trans"][:3, :3])
|
||||
)
|
||||
)
|
||||
dist = 1000 * np.linalg.norm(unknown_curry_t["trans"][:3, 3])
|
||||
logger.info(f" Fit a {angle:0.1f}° rotation, {dist:0.1f} mm translation")
|
||||
unknown_dev_t = combine_transforms(
|
||||
unknown_curry_t, curry_dev_dev_t, "unknown", "meg"
|
||||
)
|
||||
unknown_head_t = Transform(
|
||||
"unknown",
|
||||
"head",
|
||||
get_ras_to_neuromag_trans(
|
||||
*(
|
||||
cards[key]
|
||||
for key in (
|
||||
FIFF.FIFFV_POINT_NASION,
|
||||
FIFF.FIFFV_POINT_LPA,
|
||||
FIFF.FIFFV_POINT_RPA,
|
||||
)
|
||||
)
|
||||
),
|
||||
)
|
||||
with info._unlock():
|
||||
info["dev_head_t"] = combine_transforms(
|
||||
invert_transform(unknown_dev_t), unknown_head_t, "meg", "head"
|
||||
)
|
||||
for d in info["dig"]:
|
||||
d.update(
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
r=apply_trans(unknown_head_t, d["r"]),
|
||||
)
|
||||
else:
|
||||
if has_cards:
|
||||
no_msg += " (no .hpi file found)"
|
||||
elif has_hpi:
|
||||
no_msg += " (not all cardinal points found)"
|
||||
else:
|
||||
no_msg += " (neither cardinal points nor .hpi file found)"
|
||||
logger.info(no_msg)
|
||||
|
||||
|
||||
def _first_hpi(fname):
|
||||
# Get the first HPI result
|
||||
with open(fname) as fid:
|
||||
for line in fid:
|
||||
line = line.strip()
|
||||
if any(x in line for x in ("FileVersion", "NumCoils")) or not line:
|
||||
continue
|
||||
hpi = np.array(line.split(), float)
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(f"Could not find valid HPI in {fname}")
|
||||
# t is the first entry
|
||||
assert hpi.ndim == 1
|
||||
hpi = hpi[1:]
|
||||
hpi.shape = (-1, 5)
|
||||
hpi /= 1000.0
|
||||
return hpi
|
||||
|
||||
|
||||
def _read_events_curry(fname):
|
||||
"""Read events from Curry event files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to a curry event file with extensions .cef, .ceo,
|
||||
.cdt.cef, or .cdt.ceo
|
||||
|
||||
Returns
|
||||
-------
|
||||
events : ndarray, shape (n_events, 3)
|
||||
The array of events.
|
||||
"""
|
||||
check_fname(
|
||||
fname,
|
||||
"curry event",
|
||||
(".cef", ".ceo", ".cdt.cef", ".cdt.ceo"),
|
||||
endings_err=(".cef", ".ceo", ".cdt.cef", ".cdt.ceo"),
|
||||
)
|
||||
|
||||
events_dict = _read_curry_lines(fname, ["NUMBER_LIST"])
|
||||
# The first 3 column seem to contain the event information
|
||||
curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3]
|
||||
|
||||
return curry_events
|
||||
|
||||
|
||||
def _read_annotations_curry(fname, sfreq="auto"):
|
||||
r"""Read events from Curry event files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : str
|
||||
The filename.
|
||||
sfreq : float | 'auto'
|
||||
The sampling frequency in the file. If set to 'auto' then the
|
||||
``sfreq`` is taken from the respective info file of the same name with
|
||||
according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8).
|
||||
So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa.
|
||||
|
||||
Returns
|
||||
-------
|
||||
annot : instance of Annotations | None
|
||||
The annotations.
|
||||
"""
|
||||
required = ["events", "info"] if sfreq == "auto" else ["events"]
|
||||
curry_paths = _get_curry_file_structure(fname, required)
|
||||
events = _read_events_curry(curry_paths["events"])
|
||||
|
||||
if sfreq == "auto":
|
||||
sfreq = _read_curry_parameters(curry_paths["info"]).sfreq
|
||||
|
||||
onset = events[:, 0] / sfreq
|
||||
duration = np.zeros(events.shape[0])
|
||||
description = events[:, 2]
|
||||
|
||||
return Annotations(onset, duration, description)
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry":
|
||||
"""Read raw data from Curry files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``,
|
||||
``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawCurry
|
||||
A Raw object containing Curry data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawCurry.
|
||||
"""
|
||||
return RawCurry(fname, preload, verbose)
|
||||
|
||||
|
||||
class RawCurry(BaseRaw):
|
||||
"""Raw object from Curry file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``,
|
||||
``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, verbose=None):
|
||||
curry_paths = _get_curry_file_structure(
|
||||
fname, required=["info", "data", "labels"]
|
||||
)
|
||||
|
||||
data_fname = op.abspath(curry_paths["data"])
|
||||
|
||||
info, n_samples, is_ascii = _read_curry_info(curry_paths)
|
||||
|
||||
last_samps = [n_samples - 1]
|
||||
raw_extras = dict(is_ascii=is_ascii)
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[data_fname],
|
||||
last_samps=last_samps,
|
||||
orig_format="int",
|
||||
raw_extras=[raw_extras],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
if "events" in curry_paths:
|
||||
logger.info(
|
||||
"Event file found. Extracting Annotations from "
|
||||
f"{curry_paths['events']}..."
|
||||
)
|
||||
annots = _read_annotations_curry(
|
||||
curry_paths["events"], sfreq=self.info["sfreq"]
|
||||
)
|
||||
self.set_annotations(annots)
|
||||
else:
|
||||
logger.info("Event file not found. No Annotations set.")
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
if self._raw_extras[fi]["is_ascii"]:
|
||||
if isinstance(idx, slice):
|
||||
idx = np.arange(idx.start, idx.stop)
|
||||
block = np.loadtxt(
|
||||
self.filenames[0], skiprows=start, max_rows=stop - start, ndmin=2
|
||||
).T
|
||||
_mult_cal_one(data, block, idx, cals, mult)
|
||||
|
||||
else:
|
||||
_read_segments_file(
|
||||
self, data, idx, fi, start, stop, cals, mult, dtype="<f4"
|
||||
)
|
||||
7
mne/io/edf/__init__.py
Normal file
7
mne/io/edf/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""EDF+,BDF module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf
|
||||
2038
mne/io/edf/edf.py
Normal file
2038
mne/io/edf/edf.py
Normal file
File diff suppressed because it is too large
Load Diff
7
mne/io/eeglab/__init__.py
Normal file
7
mne/io/eeglab/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""EEGLAB module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .eeglab import read_raw_eeglab, read_epochs_eeglab
|
||||
83
mne/io/eeglab/_eeglab.py
Normal file
83
mne/io/eeglab/_eeglab.py
Normal file
@@ -0,0 +1,83 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from scipy.io.matlab import MatlabFunction, MatlabOpaque
|
||||
except ImportError: # scipy < 1.8
|
||||
from scipy.io.matlab.mio5 import MatlabFunction
|
||||
from scipy.io.matlab.mio5_params import MatlabOpaque
|
||||
from scipy.io import loadmat
|
||||
|
||||
from ...utils import _import_pymatreader_funcs
|
||||
|
||||
|
||||
def _todict_from_np_struct(data): # taken from pymatreader.utils
|
||||
data_dict = {}
|
||||
|
||||
for cur_field_name in data.dtype.names:
|
||||
try:
|
||||
n_items = len(data[cur_field_name])
|
||||
cur_list = []
|
||||
|
||||
for idx in np.arange(n_items):
|
||||
cur_value = data[cur_field_name].item(idx)
|
||||
cur_value = _check_for_scipy_mat_struct(cur_value)
|
||||
cur_list.append(cur_value)
|
||||
|
||||
data_dict[cur_field_name] = cur_list
|
||||
except TypeError:
|
||||
cur_value = data[cur_field_name].item(0)
|
||||
cur_value = _check_for_scipy_mat_struct(cur_value)
|
||||
data_dict[cur_field_name] = cur_value
|
||||
|
||||
return data_dict
|
||||
|
||||
|
||||
def _handle_scipy_ndarray(data): # taken from pymatreader.utils
|
||||
if data.dtype == np.dtype("object") and not isinstance(data, MatlabFunction):
|
||||
as_list = []
|
||||
for element in data:
|
||||
as_list.append(_check_for_scipy_mat_struct(element))
|
||||
data = as_list
|
||||
elif isinstance(data.dtype.names, tuple):
|
||||
data = _todict_from_np_struct(data)
|
||||
data = _check_for_scipy_mat_struct(data)
|
||||
|
||||
if isinstance(data, np.ndarray):
|
||||
data = np.array(data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _check_for_scipy_mat_struct(data): # taken from pymatreader.utils
|
||||
"""Convert all scipy.io.matlab.mio5_params.mat_struct elements."""
|
||||
if isinstance(data, dict):
|
||||
for key in data:
|
||||
data[key] = _check_for_scipy_mat_struct(data[key])
|
||||
|
||||
if isinstance(data, MatlabOpaque):
|
||||
try:
|
||||
if data[0][2] == b"string":
|
||||
return None
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
if isinstance(data, np.ndarray):
|
||||
data = _handle_scipy_ndarray(data)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def _readmat(fname, uint16_codec=None):
|
||||
try:
|
||||
read_mat = _import_pymatreader_funcs("EEGLAB I/O")
|
||||
except RuntimeError: # pymatreader not installed
|
||||
eeg = loadmat(
|
||||
fname, squeeze_me=True, mat_dtype=False, uint16_codec=uint16_codec
|
||||
)
|
||||
return _check_for_scipy_mat_struct(eeg)
|
||||
else:
|
||||
return read_mat(fname, uint16_codec=uint16_codec)
|
||||
825
mne/io/eeglab/eeglab.py
Normal file
825
mne/io/eeglab/eeglab.py
Normal file
@@ -0,0 +1,825 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
from os import PathLike
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from mne.utils.check import _check_option
|
||||
|
||||
from ..._fiff._digitization import _ensure_fiducials_head
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.pick import _PICK_TYPES_KEYS
|
||||
from ..._fiff.utils import _find_channels, _read_segments_file
|
||||
from ...annotations import Annotations, read_annotations
|
||||
from ...channels import make_dig_montage
|
||||
from ...defaults import DEFAULTS
|
||||
from ...epochs import BaseEpochs
|
||||
from ...event import read_events
|
||||
from ...utils import (
|
||||
Bunch,
|
||||
_check_fname,
|
||||
_check_head_radius,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
warn,
|
||||
)
|
||||
from ..base import BaseRaw
|
||||
from ._eeglab import _readmat
|
||||
|
||||
# just fix the scaling for now, EEGLAB doesn't seem to provide this info
|
||||
CAL = 1e-6
|
||||
|
||||
|
||||
def _check_eeglab_fname(fname, dataname):
|
||||
"""Check whether the filename is valid.
|
||||
|
||||
Check if the file extension is ``.fdt`` (older ``.dat`` being invalid) or
|
||||
whether the ``EEG.data`` filename exists. If ``EEG.data`` file is absent
|
||||
the set file name with .set changed to .fdt is checked.
|
||||
"""
|
||||
fmt = str(op.splitext(dataname)[-1])
|
||||
if fmt == ".dat":
|
||||
raise NotImplementedError(
|
||||
"Old data format .dat detected. Please update your EEGLAB "
|
||||
"version and resave the data in .fdt format"
|
||||
)
|
||||
|
||||
basedir = op.dirname(fname)
|
||||
data_fname = op.join(basedir, dataname)
|
||||
if not op.exists(data_fname):
|
||||
fdt_from_set_fname = op.splitext(fname)[0] + ".fdt"
|
||||
if op.exists(fdt_from_set_fname):
|
||||
data_fname = fdt_from_set_fname
|
||||
msg = (
|
||||
"Data file name in EEG.data ({}) is incorrect, the file "
|
||||
"name must have changed on disk, using the correct file "
|
||||
"name ({})."
|
||||
)
|
||||
warn(msg.format(dataname, op.basename(fdt_from_set_fname)))
|
||||
elif not data_fname == fdt_from_set_fname:
|
||||
msg = "Could not find the .fdt data file, tried {} and {}."
|
||||
raise FileNotFoundError(msg.format(data_fname, fdt_from_set_fname))
|
||||
return data_fname
|
||||
|
||||
|
||||
def _check_load_mat(fname, uint16_codec):
|
||||
"""Check if the mat struct contains 'EEG'."""
|
||||
fname = _check_fname(fname, "read", True)
|
||||
eeg = _readmat(fname, uint16_codec=uint16_codec)
|
||||
if "ALLEEG" in eeg:
|
||||
raise NotImplementedError(
|
||||
"Loading an ALLEEG array is not supported. Please contact"
|
||||
"mne-python developers for more information."
|
||||
)
|
||||
if "EEG" in eeg: # fields are contained in EEG structure
|
||||
eeg = eeg["EEG"]
|
||||
eeg = eeg.get("EEG", eeg) # handle nested EEG structure
|
||||
eeg = Bunch(**eeg)
|
||||
eeg.trials = int(eeg.trials)
|
||||
eeg.nbchan = int(eeg.nbchan)
|
||||
eeg.pnts = int(eeg.pnts)
|
||||
return eeg
|
||||
|
||||
|
||||
def _to_loc(ll):
|
||||
"""Check if location exists."""
|
||||
if isinstance(ll, int | float) or len(ll) > 0:
|
||||
return ll
|
||||
else:
|
||||
return np.nan
|
||||
|
||||
|
||||
def _eeg_has_montage_information(eeg):
|
||||
try:
|
||||
from scipy.io.matlab import mat_struct
|
||||
except ImportError: # SciPy < 1.8
|
||||
from scipy.io.matlab.mio5_params import mat_struct
|
||||
if not len(eeg.chanlocs):
|
||||
has_pos = False
|
||||
else:
|
||||
pos_fields = ["X", "Y", "Z"]
|
||||
if isinstance(eeg.chanlocs[0], mat_struct):
|
||||
has_pos = all(hasattr(eeg.chanlocs[0], fld) for fld in pos_fields)
|
||||
elif isinstance(eeg.chanlocs[0], np.ndarray):
|
||||
# Old files
|
||||
has_pos = all(fld in eeg.chanlocs[0].dtype.names for fld in pos_fields)
|
||||
elif isinstance(eeg.chanlocs[0], dict):
|
||||
# new files
|
||||
has_pos = all(fld in eeg.chanlocs[0] for fld in pos_fields)
|
||||
else:
|
||||
has_pos = False # unknown (sometimes we get [0, 0])
|
||||
|
||||
return has_pos
|
||||
|
||||
|
||||
def _get_montage_information(eeg, get_pos, *, montage_units):
|
||||
"""Get channel name, type and montage information from ['chanlocs']."""
|
||||
ch_names, ch_types, pos_ch_names, pos = list(), list(), list(), list()
|
||||
unknown_types = dict()
|
||||
for chanloc in eeg.chanlocs:
|
||||
# channel name
|
||||
ch_names.append(chanloc["labels"])
|
||||
|
||||
# channel type
|
||||
ch_type = "eeg"
|
||||
try_type = chanloc.get("type", None)
|
||||
if isinstance(try_type, str):
|
||||
try_type = try_type.strip().lower()
|
||||
if try_type in _PICK_TYPES_KEYS:
|
||||
ch_type = try_type
|
||||
else:
|
||||
if try_type in unknown_types:
|
||||
unknown_types[try_type].append(chanloc["labels"])
|
||||
else:
|
||||
unknown_types[try_type] = [chanloc["labels"]]
|
||||
ch_types.append(ch_type)
|
||||
|
||||
# channel loc
|
||||
if get_pos:
|
||||
loc_x = _to_loc(chanloc["X"])
|
||||
loc_y = _to_loc(chanloc["Y"])
|
||||
loc_z = _to_loc(chanloc["Z"])
|
||||
locs = np.r_[-loc_y, loc_x, loc_z]
|
||||
pos_ch_names.append(chanloc["labels"])
|
||||
pos.append(locs)
|
||||
|
||||
# warn if unknown types were provided
|
||||
if len(unknown_types):
|
||||
warn(
|
||||
"Unknown types found, setting as type EEG:\n"
|
||||
+ "\n".join(
|
||||
[
|
||||
f"{key}: {sorted(unknown_types[key])}"
|
||||
for key in sorted(unknown_types)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
lpa, rpa, nasion = None, None, None
|
||||
if hasattr(eeg, "chaninfo") and isinstance(eeg.chaninfo["nodatchans"], dict):
|
||||
nodatchans = eeg.chaninfo["nodatchans"]
|
||||
types = nodatchans.get("type", [])
|
||||
descriptions = nodatchans.get("description", [])
|
||||
xs = nodatchans.get("X", [])
|
||||
ys = nodatchans.get("Y", [])
|
||||
zs = nodatchans.get("Z", [])
|
||||
|
||||
for type_, description, x, y, z in zip(types, descriptions, xs, ys, zs):
|
||||
if type_ != "FID":
|
||||
continue
|
||||
if description == "Nasion":
|
||||
nasion = np.array([x, y, z])
|
||||
elif description == "Right periauricular point":
|
||||
rpa = np.array([x, y, z])
|
||||
elif description == "Left periauricular point":
|
||||
lpa = np.array([x, y, z])
|
||||
|
||||
# Always check this even if it's not used
|
||||
_check_option("montage_units", montage_units, ("m", "dm", "cm", "mm", "auto"))
|
||||
if pos_ch_names:
|
||||
pos_array = np.array(pos, float)
|
||||
pos_array.shape = (-1, 3)
|
||||
|
||||
# roughly estimate head radius and check if its reasonable
|
||||
is_nan_pos = np.isnan(pos).any(axis=1)
|
||||
if not is_nan_pos.all():
|
||||
mean_radius = np.mean(np.linalg.norm(pos_array[~is_nan_pos], axis=1))
|
||||
scale_units = _handle_montage_units(montage_units, mean_radius)
|
||||
mean_radius *= scale_units
|
||||
pos_array *= scale_units
|
||||
additional_info = (
|
||||
" Check if the montage_units argument is correct (the default "
|
||||
'is "mm", but your channel positions may be in different units'
|
||||
")."
|
||||
)
|
||||
_check_head_radius(mean_radius, add_info=additional_info)
|
||||
|
||||
montage = make_dig_montage(
|
||||
ch_pos=dict(zip(ch_names, pos_array)),
|
||||
coord_frame="head",
|
||||
lpa=lpa,
|
||||
rpa=rpa,
|
||||
nasion=nasion,
|
||||
)
|
||||
_ensure_fiducials_head(montage.dig)
|
||||
else:
|
||||
montage = None
|
||||
|
||||
return ch_names, ch_types, montage
|
||||
|
||||
|
||||
def _get_info(eeg, *, eog, montage_units):
|
||||
"""Get measurement info."""
|
||||
# add the ch_names and info['chs'][idx]['loc']
|
||||
if not isinstance(eeg.chanlocs, np.ndarray) and eeg.nbchan == 1:
|
||||
eeg.chanlocs = [eeg.chanlocs]
|
||||
|
||||
if isinstance(eeg.chanlocs, dict):
|
||||
eeg.chanlocs = _dol_to_lod(eeg.chanlocs)
|
||||
|
||||
eeg_has_ch_names_info = len(eeg.chanlocs) > 0
|
||||
|
||||
if eeg_has_ch_names_info:
|
||||
has_pos = _eeg_has_montage_information(eeg)
|
||||
ch_names, ch_types, eeg_montage = _get_montage_information(
|
||||
eeg, has_pos, montage_units=montage_units
|
||||
)
|
||||
update_ch_names = False
|
||||
else: # if eeg.chanlocs is empty, we still need default chan names
|
||||
ch_names = [f"EEG {ii:03d}" for ii in range(eeg.nbchan)]
|
||||
ch_types = "eeg"
|
||||
eeg_montage = None
|
||||
update_ch_names = True
|
||||
|
||||
info = create_info(ch_names, sfreq=eeg.srate, ch_types=ch_types)
|
||||
|
||||
eog = _find_channels(ch_names, ch_type="EOG") if eog == "auto" else eog
|
||||
for idx, ch in enumerate(info["chs"]):
|
||||
ch["cal"] = CAL
|
||||
if ch["ch_name"] in eog or idx in eog:
|
||||
ch["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
ch["kind"] = FIFF.FIFFV_EOG_CH
|
||||
|
||||
return info, eeg_montage, update_ch_names
|
||||
|
||||
|
||||
def _set_dig_montage_in_init(self, montage):
|
||||
"""Set EEG sensor configuration and head digitization from when init.
|
||||
|
||||
This is done from the information within fname when
|
||||
read_raw_eeglab(fname) or read_epochs_eeglab(fname).
|
||||
"""
|
||||
if montage is None:
|
||||
self.set_montage(None)
|
||||
else:
|
||||
missing_channels = set(self.ch_names) - set(montage.ch_names)
|
||||
ch_pos = dict(
|
||||
zip(list(missing_channels), np.full((len(missing_channels), 3), np.nan))
|
||||
)
|
||||
self.set_montage(montage + make_dig_montage(ch_pos=ch_pos, coord_frame="head"))
|
||||
|
||||
|
||||
def _handle_montage_units(montage_units, mean_radius):
|
||||
if montage_units == "auto":
|
||||
# radius should be between 0.05 and 0.11 meters
|
||||
if mean_radius < 0.25:
|
||||
montage_units = "m"
|
||||
elif mean_radius < 2.5:
|
||||
montage_units = "dm"
|
||||
elif mean_radius < 25:
|
||||
montage_units = "cm"
|
||||
else: # mean_radius >= 25
|
||||
montage_units = "mm"
|
||||
prefix = montage_units[:-1]
|
||||
scale_units = 1 / DEFAULTS["prefixes"][prefix]
|
||||
return scale_units
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_eeglab(
|
||||
input_fname,
|
||||
eog=(),
|
||||
preload=False,
|
||||
uint16_codec=None,
|
||||
montage_units="auto",
|
||||
verbose=None,
|
||||
) -> "RawEEGLAB":
|
||||
r"""Read an EEGLAB .set file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the ``.set`` file. If the data is stored in a separate ``.fdt``
|
||||
file, it is expected to be in the same folder as the ``.set`` file.
|
||||
eog : list | tuple | ``'auto'``
|
||||
Names or indices of channels that should be designated EOG channels.
|
||||
If 'auto', the channel names containing ``EOG`` or ``EYE`` are used.
|
||||
Defaults to empty tuple.
|
||||
%(preload)s
|
||||
Note that ``preload=False`` will be effective only if the data is
|
||||
stored in a separate binary file.
|
||||
%(uint16_codec)s
|
||||
%(montage_units)s
|
||||
|
||||
.. versionchanged:: 1.6
|
||||
Support for ``'auto'`` was added and is the new default.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEEGLAB
|
||||
A Raw object containing EEGLAB .set data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawEEGLAB.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.11.0
|
||||
"""
|
||||
return RawEEGLAB(
|
||||
input_fname=input_fname,
|
||||
preload=preload,
|
||||
eog=eog,
|
||||
uint16_codec=uint16_codec,
|
||||
montage_units=montage_units,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_epochs_eeglab(
|
||||
input_fname,
|
||||
events=None,
|
||||
event_id=None,
|
||||
eog=(),
|
||||
*,
|
||||
uint16_codec=None,
|
||||
montage_units="auto",
|
||||
verbose=None,
|
||||
) -> "EpochsEEGLAB":
|
||||
r"""Reader function for EEGLAB epochs files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the ``.set`` file. If the data is stored in a separate ``.fdt``
|
||||
file, it is expected to be in the same folder as the ``.set`` file.
|
||||
events : path-like | array, shape (n_events, 3) | None
|
||||
Path to events file. If array, it is the events typically returned
|
||||
by the read_events function. If some events don't match the events
|
||||
of interest as specified by event_id, they will be marked as 'IGNORED'
|
||||
in the drop log. If None, it is constructed from the EEGLAB (.set) file
|
||||
with each unique event encoded with a different integer.
|
||||
event_id : int | list of int | dict | None
|
||||
The id of the event to consider. If dict, the keys can later be used
|
||||
to access associated events.
|
||||
Example::
|
||||
|
||||
{"auditory":1, "visual":3}
|
||||
|
||||
If int, a dict will be created with
|
||||
the id as string. If a list, all events with the IDs specified
|
||||
in the list are used. If None, the event_id is constructed from the
|
||||
EEGLAB (.set) file with each descriptions copied from ``eventtype``.
|
||||
eog : list | tuple | 'auto'
|
||||
Names or indices of channels that should be designated EOG channels.
|
||||
If 'auto', the channel names containing ``EOG`` or ``EYE`` are used.
|
||||
Defaults to empty tuple.
|
||||
%(uint16_codec)s
|
||||
%(montage_units)s
|
||||
|
||||
.. versionchanged:: 1.6
|
||||
Support for ``'auto'`` was added and is the new default.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
EpochsEEGLAB : instance of BaseEpochs
|
||||
The epochs.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.Epochs : Documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.11.0
|
||||
"""
|
||||
epochs = EpochsEEGLAB(
|
||||
input_fname=input_fname,
|
||||
events=events,
|
||||
eog=eog,
|
||||
event_id=event_id,
|
||||
uint16_codec=uint16_codec,
|
||||
montage_units=montage_units,
|
||||
verbose=verbose,
|
||||
)
|
||||
return epochs
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawEEGLAB(BaseRaw):
|
||||
r"""Raw object from EEGLAB .set file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the ``.set`` file. If the data is stored in a separate ``.fdt``
|
||||
file, it is expected to be in the same folder as the ``.set`` file.
|
||||
eog : list | tuple | 'auto'
|
||||
Names or indices of channels that should be designated EOG channels.
|
||||
If 'auto', the channel names containing ``EOG`` or ``EYE`` are used.
|
||||
Defaults to empty tuple.
|
||||
%(preload)s
|
||||
Note that preload=False will be effective only if the data is stored
|
||||
in a separate binary file.
|
||||
%(uint16_codec)s
|
||||
%(montage_units)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.11.0
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=(),
|
||||
preload=False,
|
||||
*,
|
||||
uint16_codec=None,
|
||||
montage_units="auto",
|
||||
verbose=None,
|
||||
):
|
||||
input_fname = str(_check_fname(input_fname, "read", True, "input_fname"))
|
||||
eeg = _check_load_mat(input_fname, uint16_codec)
|
||||
if eeg.trials != 1:
|
||||
raise TypeError(
|
||||
f"The number of trials is {eeg.trials:d}. It must be 1 for raw"
|
||||
" files. Please use `mne.io.read_epochs_eeglab` if"
|
||||
" the .set file contains epochs."
|
||||
)
|
||||
|
||||
last_samps = [eeg.pnts - 1]
|
||||
info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units)
|
||||
|
||||
# read the data
|
||||
if isinstance(eeg.data, str):
|
||||
data_fname = _check_eeglab_fname(input_fname, eeg.data)
|
||||
logger.info(f"Reading {data_fname}")
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[data_fname],
|
||||
last_samps=last_samps,
|
||||
orig_format="double",
|
||||
verbose=verbose,
|
||||
)
|
||||
else:
|
||||
if preload is False or isinstance(preload, str):
|
||||
warn(
|
||||
"Data will be preloaded. preload=False or a string "
|
||||
"preload is not supported when the data is stored in "
|
||||
"the .set file"
|
||||
)
|
||||
# can't be done in standard way with preload=True because of
|
||||
# different reading path (.set file)
|
||||
if eeg.nbchan == 1 and len(eeg.data.shape) == 1:
|
||||
n_chan, n_times = [1, eeg.data.shape[0]]
|
||||
else:
|
||||
n_chan, n_times = eeg.data.shape
|
||||
data = np.empty((n_chan, n_times), dtype=float)
|
||||
data[:n_chan] = eeg.data
|
||||
data *= CAL
|
||||
super().__init__(
|
||||
info,
|
||||
data,
|
||||
filenames=[input_fname],
|
||||
last_samps=last_samps,
|
||||
orig_format="double",
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# create event_ch from annotations
|
||||
annot = read_annotations(input_fname, uint16_codec=uint16_codec)
|
||||
self.set_annotations(annot)
|
||||
_check_boundary(annot, None)
|
||||
|
||||
_set_dig_montage_in_init(self, eeg_montage)
|
||||
|
||||
latencies = np.round(annot.onset * self.info["sfreq"])
|
||||
_check_latencies(latencies)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype="<f4")
|
||||
|
||||
|
||||
class EpochsEEGLAB(BaseEpochs):
|
||||
r"""Epochs from EEGLAB .set file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the ``.set`` file. If the data is stored in a separate ``.fdt``
|
||||
file, it is expected to be in the same folder as the ``.set`` file.
|
||||
events : path-like | array, shape (n_events, 3) | None
|
||||
Path to events file. If array, it is the events typically returned
|
||||
by the read_events function. If some events don't match the events
|
||||
of interest as specified by event_id, they will be marked as 'IGNORED'
|
||||
in the drop log. If None, it is constructed from the EEGLAB (.set) file
|
||||
with each unique event encoded with a different integer.
|
||||
event_id : int | list of int | dict | None
|
||||
The id of the event to consider. If dict,
|
||||
the keys can later be used to access associated events. Example:
|
||||
dict(auditory=1, visual=3). If int, a dict will be created with
|
||||
the id as string. If a list, all events with the IDs specified
|
||||
in the list are used. If None, the event_id is constructed from the
|
||||
EEGLAB (.set) file with each descriptions copied from ``eventtype``.
|
||||
tmin : float
|
||||
Start time before event.
|
||||
baseline : None or tuple of length 2 (default (None, 0))
|
||||
The time interval to apply baseline correction.
|
||||
If None do not apply it. If baseline is (a, b)
|
||||
the interval is between "a (s)" and "b (s)".
|
||||
If a is None the beginning of the data is used
|
||||
and if b is None then b is set to the end of the interval.
|
||||
If baseline is equal to (None, None) all the time
|
||||
interval is used.
|
||||
The baseline (a, b) includes both endpoints, i.e. all
|
||||
timepoints t such that a <= t <= b.
|
||||
reject : dict | None
|
||||
Rejection parameters based on peak-to-peak amplitude.
|
||||
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
|
||||
If reject is None then no rejection is done. Example::
|
||||
|
||||
reject = dict(grad=4000e-13, # T / m (gradiometers)
|
||||
mag=4e-12, # T (magnetometers)
|
||||
eeg=40e-6, # V (EEG channels)
|
||||
eog=250e-6 # V (EOG channels)
|
||||
)
|
||||
flat : dict | None
|
||||
Rejection parameters based on flatness of signal.
|
||||
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
|
||||
are floats that set the minimum acceptable peak-to-peak amplitude.
|
||||
If flat is None then no rejection is done.
|
||||
reject_tmin : scalar | None
|
||||
Start of the time window used to reject epochs (with the default None,
|
||||
the window will start with tmin).
|
||||
reject_tmax : scalar | None
|
||||
End of the time window used to reject epochs (with the default None,
|
||||
the window will end with tmax).
|
||||
eog : list | tuple | 'auto'
|
||||
Names or indices of channels that should be designated EOG channels.
|
||||
If 'auto', the channel names containing ``EOG`` or ``EYE`` are used.
|
||||
Defaults to empty tuple.
|
||||
%(uint16_codec)s
|
||||
%(montage_units)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.Epochs : Documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.11.0
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
events=None,
|
||||
event_id=None,
|
||||
tmin=0,
|
||||
baseline=None,
|
||||
reject=None,
|
||||
flat=None,
|
||||
reject_tmin=None,
|
||||
reject_tmax=None,
|
||||
eog=(),
|
||||
uint16_codec=None,
|
||||
montage_units="auto",
|
||||
verbose=None,
|
||||
):
|
||||
input_fname = str(
|
||||
_check_fname(fname=input_fname, must_exist=True, overwrite="read")
|
||||
)
|
||||
eeg = _check_load_mat(input_fname, uint16_codec)
|
||||
|
||||
if not (
|
||||
(events is None and event_id is None)
|
||||
or (events is not None and event_id is not None)
|
||||
):
|
||||
raise ValueError("Both `events` and `event_id` must be None or not None")
|
||||
|
||||
if eeg.trials <= 1:
|
||||
raise ValueError(
|
||||
"The file does not seem to contain epochs "
|
||||
"(trials less than 2). "
|
||||
"You should try using read_raw_eeglab function."
|
||||
)
|
||||
|
||||
if events is None and eeg.trials > 1:
|
||||
# first extract the events and construct an event_id dict
|
||||
event_name, event_latencies, unique_ev = list(), list(), list()
|
||||
ev_idx = 0
|
||||
warn_multiple_events = False
|
||||
epochs = _bunchify(eeg.epoch)
|
||||
events = _bunchify(eeg.event)
|
||||
for ep in epochs:
|
||||
if isinstance(ep.eventtype, int | float):
|
||||
ep.eventtype = str(ep.eventtype)
|
||||
if not isinstance(ep.eventtype, str):
|
||||
event_type = "/".join([str(et) for et in ep.eventtype])
|
||||
event_name.append(event_type)
|
||||
# store latency of only first event
|
||||
event_latencies.append(events[ev_idx].latency)
|
||||
ev_idx += len(ep.eventtype)
|
||||
warn_multiple_events = True
|
||||
else:
|
||||
event_type = ep.eventtype
|
||||
event_name.append(ep.eventtype)
|
||||
event_latencies.append(events[ev_idx].latency)
|
||||
ev_idx += 1
|
||||
|
||||
if event_type not in unique_ev:
|
||||
unique_ev.append(event_type)
|
||||
|
||||
# invent event dict but use id > 0 so you know its a trigger
|
||||
event_id = {ev: idx + 1 for idx, ev in enumerate(unique_ev)}
|
||||
|
||||
# warn about multiple events in epoch if necessary
|
||||
if warn_multiple_events:
|
||||
warn(
|
||||
"At least one epoch has multiple events. Only the latency"
|
||||
" of the first event will be retained."
|
||||
)
|
||||
|
||||
# now fill up the event array
|
||||
events = np.zeros((eeg.trials, 3), dtype=int)
|
||||
for idx in range(0, eeg.trials):
|
||||
if idx == 0:
|
||||
prev_stim = 0
|
||||
elif idx > 0 and event_latencies[idx] - event_latencies[idx - 1] == 1:
|
||||
prev_stim = event_id[event_name[idx - 1]]
|
||||
events[idx, 0] = event_latencies[idx]
|
||||
events[idx, 1] = prev_stim
|
||||
events[idx, 2] = event_id[event_name[idx]]
|
||||
elif isinstance(events, str | Path | PathLike):
|
||||
events = read_events(events)
|
||||
|
||||
logger.info(f"Extracting parameters from {input_fname}...")
|
||||
info, eeg_montage, _ = _get_info(eeg, eog=eog, montage_units=montage_units)
|
||||
|
||||
for key, val in event_id.items():
|
||||
if val not in events[:, 2]:
|
||||
raise ValueError(f"No matching events found for {key} (event id {val})")
|
||||
|
||||
if isinstance(eeg.data, str):
|
||||
data_fname = _check_eeglab_fname(input_fname, eeg.data)
|
||||
with open(data_fname, "rb") as data_fid:
|
||||
data = np.fromfile(data_fid, dtype=np.float32)
|
||||
data = data.reshape((eeg.nbchan, eeg.pnts, eeg.trials), order="F")
|
||||
else:
|
||||
data = eeg.data
|
||||
|
||||
if eeg.nbchan == 1 and len(data.shape) == 2:
|
||||
data = data[np.newaxis, :]
|
||||
data = data.transpose((2, 0, 1)).astype("double")
|
||||
data *= CAL
|
||||
assert data.shape == (eeg.trials, eeg.nbchan, eeg.pnts)
|
||||
tmin, tmax = eeg.xmin, eeg.xmax
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
data,
|
||||
events,
|
||||
event_id,
|
||||
tmin,
|
||||
tmax,
|
||||
baseline,
|
||||
reject=reject,
|
||||
flat=flat,
|
||||
reject_tmin=reject_tmin,
|
||||
reject_tmax=reject_tmax,
|
||||
filename=input_fname,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# data are preloaded but _bad_dropped is not set so we do it here:
|
||||
self._bad_dropped = True
|
||||
|
||||
_set_dig_montage_in_init(self, eeg_montage)
|
||||
|
||||
logger.info("Ready.")
|
||||
|
||||
|
||||
def _check_boundary(annot, event_id):
|
||||
if event_id is None:
|
||||
event_id = dict()
|
||||
if "boundary" in annot.description and "boundary" not in event_id:
|
||||
warn(
|
||||
"The data contains 'boundary' events, indicating data "
|
||||
"discontinuities. Be cautious of filtering and epoching around "
|
||||
"these events."
|
||||
)
|
||||
|
||||
|
||||
def _check_latencies(latencies):
|
||||
if (latencies < -1).any():
|
||||
raise ValueError(
|
||||
"At least one event sample index is negative. Please"
|
||||
" check if EEG.event.sample values are correct."
|
||||
)
|
||||
if (latencies == -1).any():
|
||||
warn(
|
||||
"At least one event has a sample index of -1. This usually is "
|
||||
"a consequence of how eeglab handles event latency after "
|
||||
"resampling - especially when you had a boundary event at the "
|
||||
"beginning of the file. Please make sure that the events at "
|
||||
"the very beginning of your EEGLAB file can be safely dropped "
|
||||
"(e.g., because they are boundary events)."
|
||||
)
|
||||
|
||||
|
||||
def _bunchify(items):
|
||||
if isinstance(items, dict):
|
||||
items = _dol_to_lod(items)
|
||||
if len(items) > 0 and isinstance(items[0], dict):
|
||||
items = [Bunch(**item) for item in items]
|
||||
return items
|
||||
|
||||
|
||||
def _read_annotations_eeglab(eeg, uint16_codec=None):
|
||||
r"""Create Annotations from EEGLAB file.
|
||||
|
||||
This function reads the event attribute from the EEGLAB
|
||||
structure and makes an :class:`mne.Annotations` object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
eeg : object | str | Path
|
||||
'EEG' struct or the path to the (EEGLAB) .set file.
|
||||
uint16_codec : str | None
|
||||
If your \*.set file contains non-ascii characters, sometimes reading
|
||||
it may fail and give rise to error message stating that "buffer is
|
||||
too small". ``uint16_codec`` allows to specify what codec (for example:
|
||||
'latin1' or 'utf-8') should be used when reading character arrays and
|
||||
can therefore help you solve this problem.
|
||||
|
||||
Returns
|
||||
-------
|
||||
annotations : instance of Annotations
|
||||
The annotations present in the file.
|
||||
"""
|
||||
if isinstance(eeg, (str | Path | PathLike)):
|
||||
eeg = _check_load_mat(eeg, uint16_codec=uint16_codec)
|
||||
|
||||
if not hasattr(eeg, "event"):
|
||||
events = []
|
||||
elif isinstance(eeg.event, dict) and np.array(eeg.event["latency"]).ndim > 0:
|
||||
events = _dol_to_lod(eeg.event)
|
||||
elif not isinstance(eeg.event, np.ndarray | list):
|
||||
events = [eeg.event]
|
||||
else:
|
||||
events = eeg.event
|
||||
events = _bunchify(events)
|
||||
description = [str(event.type) for event in events]
|
||||
onset = [event.latency - 1 for event in events]
|
||||
duration = np.zeros(len(onset))
|
||||
if len(events) > 0 and hasattr(events[0], "duration"):
|
||||
for idx, event in enumerate(events):
|
||||
# empty duration fields are read as empty arrays
|
||||
is_empty_array = (
|
||||
isinstance(event.duration, np.ndarray) and len(event.duration) == 0
|
||||
)
|
||||
duration[idx] = np.nan if is_empty_array else event.duration
|
||||
|
||||
# Drop events with NaN onset see PR #12484
|
||||
valid_indices = [
|
||||
idx for idx, onset_idx in enumerate(onset) if not np.isnan(onset_idx)
|
||||
]
|
||||
n_dropped = len(onset) - len(valid_indices)
|
||||
if len(valid_indices) != len(onset):
|
||||
warn(
|
||||
f"{n_dropped} events have an onset that is NaN. These values are "
|
||||
"usually ignored by EEGLAB and will be dropped from the "
|
||||
"annotations."
|
||||
)
|
||||
|
||||
onset = np.array([onset[idx] for idx in valid_indices])
|
||||
duration = np.array([duration[idx] for idx in valid_indices])
|
||||
description = [description[idx] for idx in valid_indices]
|
||||
|
||||
return Annotations(
|
||||
onset=np.array(onset) / eeg.srate,
|
||||
duration=duration / eeg.srate,
|
||||
description=description,
|
||||
orig_time=None,
|
||||
)
|
||||
|
||||
|
||||
def _dol_to_lod(dol):
|
||||
"""Convert a dict of lists to a list of dicts."""
|
||||
return [
|
||||
{key: dol[key][ii] for key in dol.keys()}
|
||||
for ii in range(len(dol[list(dol.keys())[0]]))
|
||||
]
|
||||
8
mne/io/egi/__init__.py
Normal file
8
mne/io/egi/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""EGI module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .egi import read_raw_egi
|
||||
from .egimff import read_evokeds_mff
|
||||
332
mne/io/egi/egi.py
Normal file
332
mne/io/egi/egi.py
Normal file
@@ -0,0 +1,332 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _create_chs, _read_segments_file
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, _validate_type, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
from .egimff import _read_raw_egi_mff
|
||||
from .events import _combine_triggers, _triage_include_exclude
|
||||
|
||||
|
||||
def _read_header(fid):
|
||||
"""Read EGI binary header."""
|
||||
version = np.fromfile(fid, "<i4", 1)[0]
|
||||
|
||||
if version > 6 & ~np.bitwise_and(version, 6):
|
||||
version = version.byteswap().astype(np.uint32)
|
||||
else:
|
||||
raise ValueError("Watchout. This does not seem to be a simple binary EGI file.")
|
||||
|
||||
def my_fread(*x, **y):
|
||||
return int(np.fromfile(*x, **y)[0])
|
||||
|
||||
info = dict(
|
||||
version=version,
|
||||
year=my_fread(fid, ">i2", 1),
|
||||
month=my_fread(fid, ">i2", 1),
|
||||
day=my_fread(fid, ">i2", 1),
|
||||
hour=my_fread(fid, ">i2", 1),
|
||||
minute=my_fread(fid, ">i2", 1),
|
||||
second=my_fread(fid, ">i2", 1),
|
||||
millisecond=my_fread(fid, ">i4", 1),
|
||||
samp_rate=my_fread(fid, ">i2", 1),
|
||||
n_channels=my_fread(fid, ">i2", 1),
|
||||
gain=my_fread(fid, ">i2", 1),
|
||||
bits=my_fread(fid, ">i2", 1),
|
||||
value_range=my_fread(fid, ">i2", 1),
|
||||
)
|
||||
|
||||
unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0
|
||||
precision = np.bitwise_and(version, 6)
|
||||
if precision == 0:
|
||||
raise RuntimeError("Floating point precision is undefined.")
|
||||
|
||||
if unsegmented:
|
||||
info.update(
|
||||
dict(
|
||||
n_categories=0,
|
||||
n_segments=1,
|
||||
n_samples=int(np.fromfile(fid, ">i4", 1)[0]),
|
||||
n_events=int(np.fromfile(fid, ">i2", 1)[0]),
|
||||
event_codes=[],
|
||||
category_names=[],
|
||||
category_lengths=[],
|
||||
pre_baseline=0,
|
||||
)
|
||||
)
|
||||
for event in range(info["n_events"]):
|
||||
event_codes = "".join(np.fromfile(fid, "S1", 4).astype("U1"))
|
||||
info["event_codes"].append(event_codes)
|
||||
else:
|
||||
raise NotImplementedError("Only continuous files are supported")
|
||||
info["unsegmented"] = unsegmented
|
||||
info["dtype"], info["orig_format"] = {
|
||||
2: (">i2", "short"),
|
||||
4: (">f4", "float"),
|
||||
6: (">f8", "double"),
|
||||
}[precision]
|
||||
info["dtype"] = np.dtype(info["dtype"])
|
||||
return info
|
||||
|
||||
|
||||
def _read_events(fid, info):
|
||||
"""Read events."""
|
||||
events = np.zeros([info["n_events"], info["n_segments"] * info["n_samples"]])
|
||||
fid.seek(36 + info["n_events"] * 4, 0) # skip header
|
||||
for si in range(info["n_samples"]):
|
||||
# skip data channels
|
||||
fid.seek(info["n_channels"] * info["dtype"].itemsize, 1)
|
||||
# read event channels
|
||||
events[:, si] = np.fromfile(fid, info["dtype"], info["n_events"])
|
||||
return events
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_egi(
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
) -> "RawEGI":
|
||||
"""Read EGI simple binary as raw object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the raw file. Files with an extension ``.mff`` are
|
||||
automatically considered to be EGI's native MFF format files.
|
||||
eog : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. Default is None.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Default is None.
|
||||
include : None | list
|
||||
The event channels to be included when creating the synthetic
|
||||
trigger or annotations. Defaults to None.
|
||||
Note. Overrides ``exclude`` parameter.
|
||||
exclude : None | list
|
||||
The event channels to be ignored when creating the synthetic
|
||||
trigger or annotations. Defaults to None. If None, the ``sync`` and ``TREV``
|
||||
channels will be ignored. This is ignored when ``include`` is not None.
|
||||
%(preload)s
|
||||
|
||||
.. versionadded:: 0.11
|
||||
channel_naming : str
|
||||
Channel naming convention for the data channels. Defaults to ``'E%%d'``
|
||||
(resulting in channel names ``'E1'``, ``'E2'``, ``'E3'``...). The
|
||||
effective default prior to 0.14.0 was ``'EEG %%03d'``.
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
events_as_annotations : bool
|
||||
If True, annotations are created from experiment events. If False (default),
|
||||
a synthetic trigger channel ``STI 014`` is created from experiment events.
|
||||
See the Notes section for details.
|
||||
The default will change from False to True in version 1.9.
|
||||
|
||||
.. versionadded:: 1.8.0
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEGI
|
||||
A Raw object containing EGI data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawEGI.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When ``events_from_annotations=True``, event codes on stimulus channels like
|
||||
``DIN1`` are stored as annotations with the ``description`` set to the stimulus
|
||||
channel name.
|
||||
|
||||
When ``events_from_annotations=False`` and events are present on the included
|
||||
stimulus channels, a new stim channel ``STI014`` will be synthesized from the
|
||||
events. It will contain 1-sample pulses where the Netstation file had event
|
||||
timestamps. A ``raw.event_id`` dictionary is added to the raw object that will have
|
||||
arbitrary sequential integer IDs for the events. This will fail if any timestamps
|
||||
are duplicated. The ``event_id`` will also not survive a save/load roundtrip.
|
||||
|
||||
For these reasons, it is recommended to use ``events_as_annotations=True``.
|
||||
"""
|
||||
_validate_type(input_fname, "path-like", "input_fname")
|
||||
input_fname = str(input_fname)
|
||||
_validate_type(events_as_annotations, bool, "events_as_annotations")
|
||||
|
||||
if input_fname.rstrip("/\\").endswith(".mff"): # allows .mff or .mff/
|
||||
return _read_raw_egi_mff(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
return RawEGI(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
class RawEGI(BaseRaw):
|
||||
"""Raw object from EGI simple binary file."""
|
||||
|
||||
_extra_attributes = ("event_id",)
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
input_fname = str(_check_fname(input_fname, "read", True, "input_fname"))
|
||||
if eog is None:
|
||||
eog = []
|
||||
if misc is None:
|
||||
misc = []
|
||||
with open(input_fname, "rb") as fid: # 'rb' important for py3k
|
||||
logger.info(f"Reading EGI header from {input_fname}...")
|
||||
egi_info = _read_header(fid)
|
||||
logger.info(" Reading events ...")
|
||||
egi_events = _read_events(fid, egi_info) # update info + jump
|
||||
if egi_info["value_range"] != 0 and egi_info["bits"] != 0:
|
||||
cal = egi_info["value_range"] / 2.0 ** egi_info["bits"]
|
||||
else:
|
||||
cal = 1e-6
|
||||
|
||||
logger.info(" Assembling measurement info ...")
|
||||
|
||||
event_codes = egi_info["event_codes"]
|
||||
include = _triage_include_exclude(include, exclude, egi_events, egi_info)
|
||||
if egi_info["n_events"] > 0 and not events_as_annotations:
|
||||
event_ids = np.arange(len(include)) + 1
|
||||
logger.info(' Synthesizing trigger channel "STI 014" ...')
|
||||
egi_info["new_trigger"] = _combine_triggers(
|
||||
egi_events[[e in include for e in event_codes]], remapping=event_ids
|
||||
)
|
||||
self.event_id = dict(
|
||||
zip([e for e in event_codes if e in include], event_ids)
|
||||
)
|
||||
else:
|
||||
self.event_id = None
|
||||
egi_info["new_trigger"] = None
|
||||
info = _empty_info(egi_info["samp_rate"])
|
||||
my_time = datetime.datetime(
|
||||
egi_info["year"],
|
||||
egi_info["month"],
|
||||
egi_info["day"],
|
||||
egi_info["hour"],
|
||||
egi_info["minute"],
|
||||
egi_info["second"],
|
||||
)
|
||||
my_timestamp = time.mktime(my_time.timetuple())
|
||||
info["meas_date"] = (my_timestamp, 0)
|
||||
ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])]
|
||||
cals = np.repeat(cal, len(ch_names))
|
||||
ch_names.extend(list(event_codes))
|
||||
cals = np.concatenate([cals, np.ones(egi_info["n_events"])])
|
||||
if egi_info["new_trigger"] is not None:
|
||||
ch_names.append("STI 014") # our new_trigger
|
||||
cals = np.concatenate([cals, [1.0]])
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
|
||||
sti_ch_idx = [
|
||||
i
|
||||
for i, name in enumerate(ch_names)
|
||||
if name.startswith("STI") or name in event_codes
|
||||
]
|
||||
for idx in sti_ch_idx:
|
||||
chs[idx].update(
|
||||
{
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"kind": FIFF.FIFFV_STIM_CH,
|
||||
"coil_type": FIFF.FIFFV_COIL_NONE,
|
||||
"unit": FIFF.FIFF_UNIT_NONE,
|
||||
"loc": np.zeros(12),
|
||||
}
|
||||
)
|
||||
info["chs"] = chs
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
orig_format = (
|
||||
egi_info["orig_format"] if egi_info["orig_format"] != "float" else "single"
|
||||
)
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
orig_format=orig_format,
|
||||
filenames=[input_fname],
|
||||
last_samps=[egi_info["n_samples"] - 1],
|
||||
raw_extras=[egi_info],
|
||||
verbose=verbose,
|
||||
)
|
||||
if events_as_annotations:
|
||||
annot = dict(onset=list(), duration=list(), description=list())
|
||||
for code, row in zip(egi_info["event_codes"], egi_events):
|
||||
if code not in include:
|
||||
continue
|
||||
onset = np.where(row)[0] / self.info["sfreq"]
|
||||
annot["onset"].extend(onset)
|
||||
annot["duration"].extend([0.0] * len(onset))
|
||||
annot["description"].extend([code] * len(onset))
|
||||
if annot:
|
||||
self.set_annotations(Annotations(**annot))
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file."""
|
||||
egi_info = self._raw_extras[fi]
|
||||
dtype = egi_info["dtype"]
|
||||
n_chan_read = egi_info["n_channels"] + egi_info["n_events"]
|
||||
offset = 36 + egi_info["n_events"] * 4
|
||||
trigger_ch = egi_info["new_trigger"]
|
||||
_read_segments_file(
|
||||
self,
|
||||
data,
|
||||
idx,
|
||||
fi,
|
||||
start,
|
||||
stop,
|
||||
cals,
|
||||
mult,
|
||||
dtype=dtype,
|
||||
n_channels=n_chan_read,
|
||||
offset=offset,
|
||||
trigger_ch=trigger_ch,
|
||||
)
|
||||
974
mne/io/egi/egimff.py
Normal file
974
mne/io/egi/egimff.py
Normal file
@@ -0,0 +1,974 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""EGI NetStation Load Function."""
|
||||
|
||||
import datetime
|
||||
import math
|
||||
import os.path as op
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info, _ensure_meas_date_none_or_dt, create_info
|
||||
from ..._fiff.proj import setup_proj
|
||||
from ..._fiff.utils import _create_chs, _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...channels.montage import make_dig_montage
|
||||
from ...evoked import EvokedArray
|
||||
from ...utils import _check_fname, _check_option, _soft_import, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from .events import _combine_triggers, _read_events, _triage_include_exclude
|
||||
from .general import (
|
||||
_block_r,
|
||||
_extract,
|
||||
_get_blocks,
|
||||
_get_ep_info,
|
||||
_get_gains,
|
||||
_get_signalfname,
|
||||
)
|
||||
|
||||
REFERENCE_NAMES = ("VREF", "Vertex Reference")
|
||||
|
||||
|
||||
def _read_mff_header(filepath):
|
||||
"""Read mff header."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
all_files = _get_signalfname(filepath)
|
||||
eeg_file = all_files["EEG"]["signal"]
|
||||
eeg_info_file = all_files["EEG"]["info"]
|
||||
|
||||
info_filepath = op.join(filepath, "info.xml") # add with filepath
|
||||
tags = ["mffVersion", "recordTime"]
|
||||
version_and_date = _extract(tags, filepath=info_filepath)
|
||||
version = ""
|
||||
if len(version_and_date["mffVersion"]):
|
||||
version = version_and_date["mffVersion"][0]
|
||||
|
||||
fname = op.join(filepath, eeg_file)
|
||||
signal_blocks = _get_blocks(fname)
|
||||
epochs = _get_ep_info(filepath)
|
||||
summaryinfo = dict(eeg_fname=eeg_file, info_fname=eeg_info_file)
|
||||
summaryinfo.update(signal_blocks)
|
||||
# sanity check and update relevant values
|
||||
record_time = version_and_date["recordTime"][0]
|
||||
# e.g.,
|
||||
# 2018-07-30T10:47:01.021673-04:00
|
||||
# 2017-09-20T09:55:44.072000000+01:00
|
||||
g = re.match(
|
||||
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}", # noqa: E501
|
||||
record_time,
|
||||
)
|
||||
if g is None:
|
||||
raise RuntimeError(f"Could not parse recordTime {repr(record_time)}")
|
||||
frac = g.groups()[0]
|
||||
assert len(frac) in (6, 9) and all(f.isnumeric() for f in frac) # regex
|
||||
div = 1000 if len(frac) == 6 else 1000000
|
||||
for key in ("last_samps", "first_samps"):
|
||||
# convert from times in µS to samples
|
||||
for ei, e in enumerate(epochs[key]):
|
||||
if e % div != 0:
|
||||
raise RuntimeError(f"Could not parse epoch time {e}")
|
||||
epochs[key][ei] = e // div
|
||||
epochs[key] = np.array(epochs[key], np.uint64)
|
||||
# I guess they refer to times in milliseconds?
|
||||
# What we really need to do here is:
|
||||
# epochs[key] *= signal_blocks['sfreq']
|
||||
# epochs[key] //= 1000
|
||||
# But that multiplication risks an overflow, so let's only multiply
|
||||
# by what we need to (e.g., a sample rate of 500 means we can multiply
|
||||
# by 1 and divide by 2 rather than multiplying by 500 and dividing by
|
||||
# 1000)
|
||||
numerator = int(signal_blocks["sfreq"])
|
||||
denominator = 1000
|
||||
this_gcd = math.gcd(numerator, denominator)
|
||||
numerator = numerator // this_gcd
|
||||
denominator = denominator // this_gcd
|
||||
with np.errstate(over="raise"):
|
||||
epochs[key] *= numerator
|
||||
epochs[key] //= denominator
|
||||
# Should be safe to cast to int now, which makes things later not
|
||||
# upbroadcast to float
|
||||
epochs[key] = epochs[key].astype(np.int64)
|
||||
n_samps_block = signal_blocks["samples_block"].sum()
|
||||
n_samps_epochs = (epochs["last_samps"] - epochs["first_samps"]).sum()
|
||||
bad = (
|
||||
n_samps_epochs != n_samps_block
|
||||
or not (epochs["first_samps"] < epochs["last_samps"]).all()
|
||||
or not (epochs["first_samps"][1:] >= epochs["last_samps"][:-1]).all()
|
||||
)
|
||||
if bad:
|
||||
raise RuntimeError(
|
||||
"EGI epoch first/last samps could not be parsed:\n"
|
||||
f'{list(epochs["first_samps"])}\n{list(epochs["last_samps"])}'
|
||||
)
|
||||
summaryinfo.update(epochs)
|
||||
# index which samples in raw are actually readable from disk (i.e., not
|
||||
# in a skip)
|
||||
disk_samps = np.full(epochs["last_samps"][-1], -1)
|
||||
offset = 0
|
||||
for first, last in zip(epochs["first_samps"], epochs["last_samps"]):
|
||||
n_this = last - first
|
||||
disk_samps[first:last] = np.arange(offset, offset + n_this)
|
||||
offset += n_this
|
||||
summaryinfo["disk_samps"] = disk_samps
|
||||
|
||||
# Add the sensor info.
|
||||
sensor_layout_file = op.join(filepath, "sensorLayout.xml")
|
||||
sensor_layout_obj = parse(sensor_layout_file)
|
||||
summaryinfo["device"] = sensor_layout_obj.getElementsByTagName("name")[
|
||||
0
|
||||
].firstChild.data
|
||||
sensors = sensor_layout_obj.getElementsByTagName("sensor")
|
||||
chan_type = list()
|
||||
chan_unit = list()
|
||||
n_chans = 0
|
||||
numbers = list() # used for identification
|
||||
for sensor in sensors:
|
||||
sensortype = int(sensor.getElementsByTagName("type")[0].firstChild.data)
|
||||
if sensortype in [0, 1]:
|
||||
sn = sensor.getElementsByTagName("number")[0].firstChild.data
|
||||
sn = sn.encode()
|
||||
numbers.append(sn)
|
||||
chan_type.append("eeg")
|
||||
chan_unit.append("uV")
|
||||
n_chans = n_chans + 1
|
||||
if n_chans != summaryinfo["n_channels"]:
|
||||
raise RuntimeError(
|
||||
f"Number of defined channels ({n_chans}) did not match the "
|
||||
f"expected channels ({summaryinfo['n_channels']})."
|
||||
)
|
||||
|
||||
# Check presence of PNS data
|
||||
pns_names = []
|
||||
if "PNS" in all_files:
|
||||
pns_fpath = op.join(filepath, all_files["PNS"]["signal"])
|
||||
pns_blocks = _get_blocks(pns_fpath)
|
||||
pns_samples = pns_blocks["samples_block"]
|
||||
signal_samples = signal_blocks["samples_block"]
|
||||
same_blocks = np.array_equal(
|
||||
pns_samples[:-1], signal_samples[:-1]
|
||||
) and pns_samples[-1] in (signal_samples[-1] - np.arange(2))
|
||||
if not same_blocks:
|
||||
raise RuntimeError(
|
||||
"PNS and signals samples did not match:\n"
|
||||
f"{list(pns_samples)}\nvs\n{list(signal_samples)}"
|
||||
)
|
||||
|
||||
pns_file = op.join(filepath, "pnsSet.xml")
|
||||
pns_obj = parse(pns_file)
|
||||
sensors = pns_obj.getElementsByTagName("sensor")
|
||||
pns_types = []
|
||||
pns_units = []
|
||||
for sensor in sensors:
|
||||
# sensor number:
|
||||
# sensor.getElementsByTagName('number')[0].firstChild.data
|
||||
name = sensor.getElementsByTagName("name")[0].firstChild.data
|
||||
unit_elem = sensor.getElementsByTagName("unit")[0].firstChild
|
||||
unit = ""
|
||||
if unit_elem is not None:
|
||||
unit = unit_elem.data
|
||||
|
||||
if name == "ECG":
|
||||
ch_type = "ecg"
|
||||
elif "EMG" in name:
|
||||
ch_type = "emg"
|
||||
else:
|
||||
ch_type = "bio"
|
||||
pns_types.append(ch_type)
|
||||
pns_units.append(unit)
|
||||
pns_names.append(name)
|
||||
|
||||
summaryinfo.update(
|
||||
pns_types=pns_types,
|
||||
pns_units=pns_units,
|
||||
pns_fname=all_files["PNS"]["signal"],
|
||||
pns_sample_blocks=pns_blocks,
|
||||
)
|
||||
summaryinfo.update(
|
||||
pns_names=pns_names,
|
||||
version=version,
|
||||
date=version_and_date["recordTime"][0],
|
||||
chan_type=chan_type,
|
||||
chan_unit=chan_unit,
|
||||
numbers=numbers,
|
||||
)
|
||||
|
||||
return summaryinfo
|
||||
|
||||
|
||||
def _read_header(input_fname):
|
||||
"""Obtain the headers from the file package mff.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path for the file
|
||||
|
||||
Returns
|
||||
-------
|
||||
info : dict
|
||||
Main headers set.
|
||||
"""
|
||||
input_fname = str(input_fname) # cast to str any Paths
|
||||
mff_hdr = _read_mff_header(input_fname)
|
||||
with open(input_fname + "/signal1.bin", "rb") as fid:
|
||||
version = np.fromfile(fid, np.int32, 1)[0]
|
||||
"""
|
||||
the datetime.strptime .f directive (milleseconds)
|
||||
will only accept up to 6 digits. if there are more than
|
||||
six millesecond digits in the provided timestamp string
|
||||
(i.e. because of trailing zeros, as in test_egi_pns.mff)
|
||||
then slice both the first 26 elements and the last 6
|
||||
elements of the timestamp string to truncate the
|
||||
milleseconds to 6 digits and extract the timezone,
|
||||
and then piece these together and assign back to mff_hdr['date']
|
||||
"""
|
||||
if len(mff_hdr["date"]) > 32:
|
||||
dt, tz = [mff_hdr["date"][:26], mff_hdr["date"][-6:]]
|
||||
mff_hdr["date"] = dt + tz
|
||||
|
||||
time_n = datetime.datetime.strptime(mff_hdr["date"], "%Y-%m-%dT%H:%M:%S.%f%z")
|
||||
|
||||
info = dict(
|
||||
version=version,
|
||||
meas_dt_local=time_n,
|
||||
utc_offset=time_n.strftime("%z"),
|
||||
gain=0,
|
||||
bits=0,
|
||||
value_range=0,
|
||||
)
|
||||
info.update(
|
||||
n_categories=0,
|
||||
n_segments=1,
|
||||
n_events=0,
|
||||
event_codes=[],
|
||||
category_names=[],
|
||||
category_lengths=[],
|
||||
pre_baseline=0,
|
||||
)
|
||||
info.update(mff_hdr)
|
||||
return info
|
||||
|
||||
|
||||
def _get_eeg_calibration_info(filepath, egi_info):
|
||||
"""Calculate calibration info for EEG channels."""
|
||||
gains = _get_gains(op.join(filepath, egi_info["info_fname"]))
|
||||
if egi_info["value_range"] != 0 and egi_info["bits"] != 0:
|
||||
cals = [egi_info["value_range"] / 2 ** egi_info["bits"]] * len(
|
||||
egi_info["chan_type"]
|
||||
)
|
||||
else:
|
||||
cal_scales = {"uV": 1e-6, "V": 1}
|
||||
cals = [cal_scales[t] for t in egi_info["chan_unit"]]
|
||||
if "gcal" in gains:
|
||||
cals *= gains["gcal"]
|
||||
return cals
|
||||
|
||||
|
||||
def _read_locs(filepath, egi_info, channel_naming):
|
||||
"""Read channel locations."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
fname = op.join(filepath, "coordinates.xml")
|
||||
if not op.exists(fname):
|
||||
logger.warn("File coordinates.xml not found, not setting channel locations")
|
||||
ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])]
|
||||
return ch_names, None
|
||||
dig_ident_map = {
|
||||
"Left periauricular point": "lpa",
|
||||
"Right periauricular point": "rpa",
|
||||
"Nasion": "nasion",
|
||||
}
|
||||
numbers = np.array(egi_info["numbers"])
|
||||
coordinates = parse(fname)
|
||||
sensors = coordinates.getElementsByTagName("sensor")
|
||||
ch_pos = OrderedDict()
|
||||
hsp = list()
|
||||
nlr = dict()
|
||||
ch_names = list()
|
||||
|
||||
for sensor in sensors:
|
||||
name_element = sensor.getElementsByTagName("name")[0].firstChild
|
||||
num_element = sensor.getElementsByTagName("number")[0].firstChild
|
||||
name = (
|
||||
channel_naming % int(num_element.data)
|
||||
if name_element is None
|
||||
else name_element.data
|
||||
)
|
||||
nr = num_element.data.encode()
|
||||
coords = [
|
||||
float(sensor.getElementsByTagName(coord)[0].firstChild.data)
|
||||
for coord in "xyz"
|
||||
]
|
||||
loc = np.array(coords) / 100 # cm -> m
|
||||
# create dig entry
|
||||
if name in dig_ident_map:
|
||||
nlr[dig_ident_map[name]] = loc
|
||||
else:
|
||||
# id_ is the index of the channel in egi_info['numbers']
|
||||
id_ = np.flatnonzero(numbers == nr)
|
||||
# if it's not in egi_info['numbers'], it's a headshape point
|
||||
if len(id_) == 0:
|
||||
hsp.append(loc)
|
||||
# not HSP, must be a data or reference channel
|
||||
else:
|
||||
ch_names.append(name)
|
||||
ch_pos[name] = loc
|
||||
mon = make_dig_montage(ch_pos=ch_pos, hsp=hsp, **nlr)
|
||||
return ch_names, mon
|
||||
|
||||
|
||||
def _add_pns_channel_info(chs, egi_info, ch_names):
|
||||
"""Add info for PNS channels to channel info dict."""
|
||||
for i_ch, ch_name in enumerate(egi_info["pns_names"]):
|
||||
idx = ch_names.index(ch_name)
|
||||
ch_type = egi_info["pns_types"][i_ch]
|
||||
type_to_kind_map = {"ecg": FIFF.FIFFV_ECG_CH, "emg": FIFF.FIFFV_EMG_CH}
|
||||
ch_kind = type_to_kind_map.get(ch_type, FIFF.FIFFV_BIO_CH)
|
||||
ch_unit = FIFF.FIFF_UNIT_V
|
||||
ch_cal = 1e-6
|
||||
if egi_info["pns_units"][i_ch] != "uV":
|
||||
ch_unit = FIFF.FIFF_UNIT_NONE
|
||||
ch_cal = 1.0
|
||||
chs[idx].update(
|
||||
cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, unit=ch_unit
|
||||
)
|
||||
return chs
|
||||
|
||||
|
||||
@verbose
|
||||
def _read_raw_egi_mff(
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
"""Read EGI mff binary as raw object."""
|
||||
return RawMff(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
class RawMff(BaseRaw):
|
||||
"""RawMff class."""
|
||||
|
||||
_extra_attributes = ("event_id",)
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
"""Init the RawMff class."""
|
||||
input_fname = str(
|
||||
_check_fname(
|
||||
input_fname,
|
||||
"read",
|
||||
True,
|
||||
"input_fname",
|
||||
need_dir=True,
|
||||
)
|
||||
)
|
||||
logger.info(f"Reading EGI MFF Header from {input_fname}...")
|
||||
egi_info = _read_header(input_fname)
|
||||
if eog is None:
|
||||
eog = []
|
||||
if misc is None:
|
||||
misc = np.where(np.array(egi_info["chan_type"]) != "eeg")[0].tolist()
|
||||
|
||||
logger.info(" Reading events ...")
|
||||
egi_events, egi_info, mff_events = _read_events(input_fname, egi_info)
|
||||
cals = _get_eeg_calibration_info(input_fname, egi_info)
|
||||
logger.info(" Assembling measurement info ...")
|
||||
event_codes = egi_info["event_codes"]
|
||||
include = _triage_include_exclude(include, exclude, egi_events, egi_info)
|
||||
if egi_info["n_events"] > 0 and not events_as_annotations:
|
||||
logger.info(' Synthesizing trigger channel "STI 014" ...')
|
||||
if all(ch.startswith("D") for ch in include):
|
||||
# support the DIN format DIN1, DIN2, ..., DIN9, DI10, DI11, ... DI99,
|
||||
# D100, D101, ..., D255 that we get when sending 0-255 triggers on a
|
||||
# parallel port.
|
||||
events_ids = list()
|
||||
for ch in include:
|
||||
while not ch[0].isnumeric():
|
||||
ch = ch[1:]
|
||||
events_ids.append(int(ch))
|
||||
else:
|
||||
events_ids = np.arange(len(include)) + 1
|
||||
egi_info["new_trigger"] = _combine_triggers(
|
||||
egi_events[[c in include for c in event_codes]], remapping=events_ids
|
||||
)
|
||||
self.event_id = dict(
|
||||
zip([e for e in event_codes if e in include], events_ids)
|
||||
)
|
||||
if egi_info["new_trigger"] is not None:
|
||||
egi_events = np.vstack([egi_events, egi_info["new_trigger"]])
|
||||
else:
|
||||
self.event_id = None
|
||||
egi_info["new_trigger"] = None
|
||||
assert egi_events.shape[1] == egi_info["last_samps"][-1]
|
||||
|
||||
meas_dt_utc = egi_info["meas_dt_local"].astimezone(datetime.timezone.utc)
|
||||
info = _empty_info(egi_info["sfreq"])
|
||||
info["meas_date"] = _ensure_meas_date_none_or_dt(meas_dt_utc)
|
||||
info["utc_offset"] = egi_info["utc_offset"]
|
||||
info["device_info"] = dict(type=egi_info["device"])
|
||||
|
||||
# read in the montage, if it exists
|
||||
ch_names, mon = _read_locs(input_fname, egi_info, channel_naming)
|
||||
# Second: Stim
|
||||
ch_names.extend(list(egi_info["event_codes"]))
|
||||
n_extra = len(event_codes) + len(misc) + len(eog) + len(egi_info["pns_names"])
|
||||
if egi_info["new_trigger"] is not None:
|
||||
ch_names.append("STI 014") # channel for combined events
|
||||
n_extra += 1
|
||||
|
||||
# Third: PNS
|
||||
ch_names.extend(egi_info["pns_names"])
|
||||
|
||||
cals = np.concatenate([cals, np.ones(n_extra)])
|
||||
assert len(cals) == len(ch_names), (len(cals), len(ch_names))
|
||||
|
||||
# Actually create channels as EEG, then update stim and PNS
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
|
||||
|
||||
sti_ch_idx = [
|
||||
i
|
||||
for i, name in enumerate(ch_names)
|
||||
if name.startswith("STI") or name in event_codes
|
||||
]
|
||||
for idx in sti_ch_idx:
|
||||
chs[idx].update(
|
||||
{
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"cal": cals[idx],
|
||||
"kind": FIFF.FIFFV_STIM_CH,
|
||||
"coil_type": FIFF.FIFFV_COIL_NONE,
|
||||
"unit": FIFF.FIFF_UNIT_NONE,
|
||||
}
|
||||
)
|
||||
chs = _add_pns_channel_info(chs, egi_info, ch_names)
|
||||
info["chs"] = chs
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
|
||||
if mon is not None:
|
||||
info.set_montage(mon, on_missing="ignore")
|
||||
|
||||
ref_idx = np.flatnonzero(np.isin(mon.ch_names, REFERENCE_NAMES))
|
||||
if len(ref_idx):
|
||||
ref_idx = ref_idx.item()
|
||||
ref_coords = info["chs"][int(ref_idx)]["loc"][:3]
|
||||
for chan in info["chs"]:
|
||||
if chan["kind"] == FIFF.FIFFV_EEG_CH:
|
||||
chan["loc"][3:6] = ref_coords
|
||||
|
||||
file_bin = op.join(input_fname, egi_info["eeg_fname"])
|
||||
egi_info["egi_events"] = egi_events
|
||||
|
||||
# Check how many channels to read are from EEG
|
||||
keys = ("eeg", "sti", "pns")
|
||||
idx = dict()
|
||||
idx["eeg"] = np.where([ch["kind"] == FIFF.FIFFV_EEG_CH for ch in chs])[0]
|
||||
idx["sti"] = np.where([ch["kind"] == FIFF.FIFFV_STIM_CH for ch in chs])[0]
|
||||
idx["pns"] = np.where(
|
||||
[
|
||||
ch["kind"] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, FIFF.FIFFV_BIO_CH)
|
||||
for ch in chs
|
||||
]
|
||||
)[0]
|
||||
# By construction this should always be true, but check anyway
|
||||
if not np.array_equal(
|
||||
np.concatenate([idx[key] for key in keys]), np.arange(len(chs))
|
||||
):
|
||||
raise ValueError(
|
||||
"Currently interlacing EEG and PNS channels is not supported"
|
||||
)
|
||||
egi_info["kind_bounds"] = [0]
|
||||
for key in keys:
|
||||
egi_info["kind_bounds"].append(len(idx[key]))
|
||||
egi_info["kind_bounds"] = np.cumsum(egi_info["kind_bounds"])
|
||||
assert egi_info["kind_bounds"][0] == 0
|
||||
assert egi_info["kind_bounds"][-1] == info["nchan"]
|
||||
first_samps = [0]
|
||||
last_samps = [egi_info["last_samps"][-1] - 1]
|
||||
|
||||
annot = dict(onset=list(), duration=list(), description=list())
|
||||
|
||||
if len(idx["pns"]):
|
||||
# PNS Data is present and should be read:
|
||||
egi_info["pns_filepath"] = op.join(input_fname, egi_info["pns_fname"])
|
||||
# Check for PNS bug immediately
|
||||
pns_samples = np.sum(egi_info["pns_sample_blocks"]["samples_block"])
|
||||
eeg_samples = np.sum(egi_info["samples_block"])
|
||||
if pns_samples == eeg_samples - 1:
|
||||
warn("This file has the EGI PSG sample bug")
|
||||
annot["onset"].append(last_samps[-1] / egi_info["sfreq"])
|
||||
annot["duration"].append(1 / egi_info["sfreq"])
|
||||
annot["description"].append("BAD_EGI_PSG")
|
||||
elif pns_samples != eeg_samples:
|
||||
raise RuntimeError(
|
||||
f"PNS samples ({pns_samples}) did not match EEG samples "
|
||||
f"({eeg_samples})."
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
orig_format="single",
|
||||
filenames=[file_bin],
|
||||
first_samps=first_samps,
|
||||
last_samps=last_samps,
|
||||
raw_extras=[egi_info],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Annotate acquisition skips
|
||||
for first, prev_last in zip(
|
||||
egi_info["first_samps"][1:], egi_info["last_samps"][:-1]
|
||||
):
|
||||
gap = first - prev_last
|
||||
assert gap >= 0
|
||||
if gap:
|
||||
annot["onset"].append((prev_last - 0.5) / egi_info["sfreq"])
|
||||
annot["duration"].append(gap / egi_info["sfreq"])
|
||||
annot["description"].append("BAD_ACQ_SKIP")
|
||||
|
||||
# create events from annotations
|
||||
if events_as_annotations:
|
||||
for code, samples in mff_events.items():
|
||||
if code not in include:
|
||||
continue
|
||||
annot["onset"].extend(np.array(samples) / egi_info["sfreq"])
|
||||
annot["duration"].extend([0.0] * len(samples))
|
||||
annot["description"].extend([code] * len(samples))
|
||||
|
||||
if len(annot["onset"]):
|
||||
self.set_annotations(Annotations(**annot))
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of data."""
|
||||
logger.debug(f"Reading MFF {start:6d} ... {stop:6d} ...")
|
||||
dtype = "<f4" # Data read in four byte floats.
|
||||
|
||||
egi_info = self._raw_extras[fi]
|
||||
one = np.zeros((egi_info["kind_bounds"][-1], stop - start))
|
||||
|
||||
# info about the binary file structure
|
||||
n_channels = egi_info["n_channels"]
|
||||
samples_block = egi_info["samples_block"]
|
||||
|
||||
# Check how many channels to read are from each type
|
||||
bounds = egi_info["kind_bounds"]
|
||||
if isinstance(idx, slice):
|
||||
idx = np.arange(idx.start, idx.stop)
|
||||
eeg_out = np.where(idx < bounds[1])[0]
|
||||
eeg_one = idx[eeg_out, np.newaxis]
|
||||
eeg_in = idx[eeg_out]
|
||||
stim_out = np.where((idx >= bounds[1]) & (idx < bounds[2]))[0]
|
||||
stim_one = idx[stim_out]
|
||||
stim_in = idx[stim_out] - bounds[1]
|
||||
pns_out = np.where((idx >= bounds[2]) & (idx < bounds[3]))[0]
|
||||
pns_in = idx[pns_out] - bounds[2]
|
||||
pns_one = idx[pns_out, np.newaxis]
|
||||
del eeg_out, stim_out, pns_out
|
||||
|
||||
# take into account events (already extended to correct size)
|
||||
one[stim_one, :] = egi_info["egi_events"][stim_in, start:stop]
|
||||
|
||||
# Convert start and stop to limits in terms of the data
|
||||
# actually on disk, plus an indexer (disk_use_idx) that populates
|
||||
# the potentially larger `data` with it, taking skips into account
|
||||
disk_samps = egi_info["disk_samps"][start:stop]
|
||||
disk_use_idx = np.where(disk_samps > -1)[0]
|
||||
# short circuit in case we don't need any samples
|
||||
if not len(disk_use_idx):
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
return
|
||||
|
||||
start = disk_samps[disk_use_idx[0]]
|
||||
stop = disk_samps[disk_use_idx[-1]] + 1
|
||||
assert len(disk_use_idx) == stop - start
|
||||
|
||||
# Get starting/stopping block/samples
|
||||
block_samples_offset = np.cumsum(samples_block)
|
||||
offset_blocks = np.sum(block_samples_offset <= start)
|
||||
offset_samples = start - (
|
||||
block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0
|
||||
)
|
||||
|
||||
# TODO: Refactor this reading with the PNS reading in a single function
|
||||
# (DRY)
|
||||
samples_to_read = stop - start
|
||||
with open(self.filenames[fi], "rb", buffering=0) as fid:
|
||||
# Go to starting block
|
||||
current_block = 0
|
||||
current_block_info = None
|
||||
current_data_sample = 0
|
||||
while current_block < offset_blocks:
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
fid.seek(current_block_info["block_size"], 1)
|
||||
current_block += 1
|
||||
|
||||
# Start reading samples
|
||||
while samples_to_read > 0:
|
||||
logger.debug(f" Reading from block {current_block}")
|
||||
this_block_info = _block_r(fid)
|
||||
current_block += 1
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
|
||||
to_read = current_block_info["nsamples"] * current_block_info["nc"]
|
||||
block_data = np.fromfile(fid, dtype, to_read)
|
||||
block_data = block_data.reshape(n_channels, -1, order="C")
|
||||
|
||||
# Compute indexes
|
||||
samples_read = block_data.shape[1]
|
||||
logger.debug(f" Read {samples_read} samples")
|
||||
logger.debug(f" Offset {offset_samples} samples")
|
||||
if offset_samples > 0:
|
||||
# First block read, skip to the offset:
|
||||
block_data = block_data[:, offset_samples:]
|
||||
samples_read = samples_read - offset_samples
|
||||
offset_samples = 0
|
||||
if samples_to_read < samples_read:
|
||||
# Last block to read, skip the last samples
|
||||
block_data = block_data[:, :samples_to_read]
|
||||
samples_read = samples_to_read
|
||||
logger.debug(f" Keep {samples_read} samples")
|
||||
|
||||
s_start = current_data_sample
|
||||
s_end = s_start + samples_read
|
||||
|
||||
one[eeg_one, disk_use_idx[s_start:s_end]] = block_data[eeg_in]
|
||||
samples_to_read = samples_to_read - samples_read
|
||||
current_data_sample = current_data_sample + samples_read
|
||||
|
||||
if len(pns_one) > 0:
|
||||
# PNS Data is present and should be read:
|
||||
pns_filepath = egi_info["pns_filepath"]
|
||||
pns_info = egi_info["pns_sample_blocks"]
|
||||
n_channels = pns_info["n_channels"]
|
||||
samples_block = pns_info["samples_block"]
|
||||
|
||||
# Get starting/stopping block/samples
|
||||
block_samples_offset = np.cumsum(samples_block)
|
||||
offset_blocks = np.sum(block_samples_offset < start)
|
||||
offset_samples = start - (
|
||||
block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0
|
||||
)
|
||||
|
||||
samples_to_read = stop - start
|
||||
with open(pns_filepath, "rb", buffering=0) as fid:
|
||||
# Check file size
|
||||
fid.seek(0, 2)
|
||||
file_size = fid.tell()
|
||||
fid.seek(0)
|
||||
# Go to starting block
|
||||
current_block = 0
|
||||
current_block_info = None
|
||||
current_data_sample = 0
|
||||
while current_block < offset_blocks:
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
fid.seek(current_block_info["block_size"], 1)
|
||||
current_block += 1
|
||||
|
||||
# Start reading samples
|
||||
while samples_to_read > 0:
|
||||
if samples_to_read == 1 and fid.tell() == file_size:
|
||||
# We are in the presence of the EEG bug
|
||||
# fill with zeros and break the loop
|
||||
one[pns_one, -1] = 0
|
||||
break
|
||||
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
|
||||
to_read = current_block_info["nsamples"] * current_block_info["nc"]
|
||||
block_data = np.fromfile(fid, dtype, to_read)
|
||||
block_data = block_data.reshape(n_channels, -1, order="C")
|
||||
|
||||
# Compute indexes
|
||||
samples_read = block_data.shape[1]
|
||||
if offset_samples > 0:
|
||||
# First block read, skip to the offset:
|
||||
block_data = block_data[:, offset_samples:]
|
||||
samples_read = samples_read - offset_samples
|
||||
offset_samples = 0
|
||||
|
||||
if samples_to_read < samples_read:
|
||||
# Last block to read, skip the last samples
|
||||
block_data = block_data[:, :samples_to_read]
|
||||
samples_read = samples_to_read
|
||||
|
||||
s_start = current_data_sample
|
||||
s_end = s_start + samples_read
|
||||
|
||||
one[pns_one, disk_use_idx[s_start:s_end]] = block_data[pns_in]
|
||||
samples_to_read = samples_to_read - samples_read
|
||||
current_data_sample = current_data_sample + samples_read
|
||||
|
||||
# do the calibration
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
|
||||
|
||||
@verbose
|
||||
def read_evokeds_mff(
|
||||
fname, condition=None, channel_naming="E%d", baseline=None, verbose=None
|
||||
):
|
||||
"""Read averaged MFF file as EvokedArray or list of EvokedArray.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
File path to averaged MFF file. Should end in ``.mff``.
|
||||
condition : int or str | list of int or str | None
|
||||
The index (indices) or category (categories) from which to read in
|
||||
data. Averaged MFF files can contain separate averages for different
|
||||
categories. These can be indexed by the block number or the category
|
||||
name. If ``condition`` is a list or None, a list of EvokedArray objects
|
||||
is returned.
|
||||
channel_naming : str
|
||||
Channel naming convention for EEG channels. Defaults to 'E%%d'
|
||||
(resulting in channel names 'E1', 'E2', 'E3'...).
|
||||
baseline : None (default) or tuple of length 2
|
||||
The time interval to apply baseline correction. If None do not apply
|
||||
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
|
||||
If a is None the beginning of the data is used and if b is None then b
|
||||
is set to the end of the interval. If baseline is equal to (None, None)
|
||||
all the time interval is used. Correction is applied by computing mean
|
||||
of the baseline period and subtracting it from the data. The baseline
|
||||
(a, b) includes both endpoints, i.e. all timepoints t such that
|
||||
a <= t <= b.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
evoked : EvokedArray or list of EvokedArray
|
||||
The evoked dataset(s); one EvokedArray if condition is int or str,
|
||||
or list of EvokedArray if condition is None or list.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If ``fname`` has file extension other than '.mff'.
|
||||
ValueError
|
||||
If the MFF file specified by ``fname`` is not averaged.
|
||||
ValueError
|
||||
If no categories.xml file in MFF directory specified by ``fname``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
Evoked, EvokedArray, create_info
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.22
|
||||
"""
|
||||
mffpy = _import_mffpy()
|
||||
# Confirm `fname` is a path to an MFF file
|
||||
fname = Path(fname) # should be replace with _check_fname
|
||||
if not fname.suffix == ".mff":
|
||||
raise ValueError('fname must be an MFF file with extension ".mff".')
|
||||
# Confirm the input MFF is averaged
|
||||
mff = mffpy.Reader(fname)
|
||||
try:
|
||||
flavor = mff.mff_flavor
|
||||
except AttributeError: # < 6.3
|
||||
flavor = mff.flavor
|
||||
if flavor not in ("averaged", "segmented"): # old, new names
|
||||
raise ValueError(
|
||||
f"{fname} is a {flavor} MFF file. "
|
||||
"fname must be the path to an averaged MFF file."
|
||||
)
|
||||
# Check for categories.xml file
|
||||
if "categories.xml" not in mff.directory.listdir():
|
||||
raise ValueError(
|
||||
"categories.xml not found in MFF directory. "
|
||||
f"{fname} may not be an averaged MFF file."
|
||||
)
|
||||
return_list = True
|
||||
if condition is None:
|
||||
categories = mff.categories.categories
|
||||
condition = list(categories.keys())
|
||||
elif not isinstance(condition, list):
|
||||
condition = [condition]
|
||||
return_list = False
|
||||
logger.info(f"Reading {len(condition)} evoked datasets from {fname} ...")
|
||||
output = [
|
||||
_read_evoked_mff(
|
||||
fname, c, channel_naming=channel_naming, verbose=verbose
|
||||
).apply_baseline(baseline)
|
||||
for c in condition
|
||||
]
|
||||
return output if return_list else output[0]
|
||||
|
||||
|
||||
def _read_evoked_mff(fname, condition, channel_naming="E%d", verbose=None):
|
||||
"""Read evoked data from MFF file."""
|
||||
import mffpy
|
||||
|
||||
egi_info = _read_header(fname)
|
||||
mff = mffpy.Reader(fname)
|
||||
categories = mff.categories.categories
|
||||
|
||||
if isinstance(condition, str):
|
||||
# Condition is interpreted as category name
|
||||
category = _check_option(
|
||||
"condition", condition, categories, extra="provided as category name"
|
||||
)
|
||||
epoch = mff.epochs[category]
|
||||
elif isinstance(condition, int):
|
||||
# Condition is interpreted as epoch index
|
||||
try:
|
||||
epoch = mff.epochs[condition]
|
||||
except IndexError:
|
||||
raise ValueError(
|
||||
f'"condition" parameter ({condition}), provided '
|
||||
"as epoch index, is out of range for available "
|
||||
f"epochs ({len(mff.epochs)})."
|
||||
)
|
||||
category = epoch.name
|
||||
else:
|
||||
raise TypeError('"condition" parameter must be either int or str.')
|
||||
|
||||
# Read in signals from the target epoch
|
||||
data = mff.get_physical_samples_from_epoch(epoch)
|
||||
eeg_data, t0 = data["EEG"]
|
||||
if "PNSData" in data:
|
||||
pns_data, t0 = data["PNSData"]
|
||||
all_data = np.vstack((eeg_data, pns_data))
|
||||
ch_types = egi_info["chan_type"] + egi_info["pns_types"]
|
||||
else:
|
||||
all_data = eeg_data
|
||||
ch_types = egi_info["chan_type"]
|
||||
all_data *= 1e-6 # convert to volts
|
||||
|
||||
# Load metadata into info object
|
||||
# Exclude info['meas_date'] because record time info in
|
||||
# averaged MFF is the time of the averaging, not true record time.
|
||||
ch_names, mon = _read_locs(fname, egi_info, channel_naming)
|
||||
ch_names.extend(egi_info["pns_names"])
|
||||
info = create_info(ch_names, mff.sampling_rates["EEG"], ch_types)
|
||||
with info._unlock():
|
||||
info["device_info"] = dict(type=egi_info["device"])
|
||||
info["nchan"] = sum(mff.num_channels.values())
|
||||
|
||||
# Add individual channel info
|
||||
# Get calibration info for EEG channels
|
||||
cals = _get_eeg_calibration_info(fname, egi_info)
|
||||
# Initialize calibration for PNS channels, will be updated later
|
||||
cals = np.concatenate([cals, np.repeat(1, len(egi_info["pns_names"]))])
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ())
|
||||
# Update PNS channel info
|
||||
chs = _add_pns_channel_info(chs, egi_info, ch_names)
|
||||
with info._unlock():
|
||||
info["chs"] = chs
|
||||
if mon is not None:
|
||||
info.set_montage(mon, on_missing="ignore")
|
||||
|
||||
# Add bad channels to info
|
||||
info["description"] = category
|
||||
try:
|
||||
channel_status = categories[category][0]["channelStatus"]
|
||||
except KeyError:
|
||||
warn(
|
||||
f"Channel status data not found for condition {category}. "
|
||||
"No channels will be marked as bad.",
|
||||
category=UserWarning,
|
||||
)
|
||||
channel_status = None
|
||||
bads = []
|
||||
if channel_status:
|
||||
for entry in channel_status:
|
||||
if entry["exclusion"] == "badChannels":
|
||||
if entry["signalBin"] == 1:
|
||||
# Add bad EEG channels
|
||||
for ch in entry["channels"]:
|
||||
bads.append(ch_names[ch - 1])
|
||||
elif entry["signalBin"] == 2:
|
||||
# Add bad PNS channels
|
||||
for ch in entry["channels"]:
|
||||
bads.append(egi_info["pns_names"][ch - 1])
|
||||
info["bads"] = bads
|
||||
|
||||
# Add EEG reference to info
|
||||
try:
|
||||
fp = mff.directory.filepointer("history")
|
||||
except (ValueError, FileNotFoundError): # old (<=0.6.3) vs new mffpy
|
||||
pass
|
||||
else:
|
||||
with fp:
|
||||
history = mffpy.XML.from_file(fp)
|
||||
for entry in history.entries:
|
||||
if entry["method"] == "Montage Operations Tool":
|
||||
if "Average Reference" in entry["settings"]:
|
||||
# Average reference has been applied
|
||||
_, info = setup_proj(info)
|
||||
|
||||
# Get nave from categories.xml
|
||||
try:
|
||||
nave = categories[category][0]["keys"]["#seg"]["data"]
|
||||
except KeyError:
|
||||
warn(
|
||||
f"Number of averaged epochs not found for condition {category}. "
|
||||
"nave will default to 1.",
|
||||
category=UserWarning,
|
||||
)
|
||||
nave = 1
|
||||
|
||||
# Let tmin default to 0
|
||||
return EvokedArray(
|
||||
all_data, info, tmin=0.0, comment=category, nave=nave, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
def _import_mffpy(why="read averaged .mff files"):
|
||||
"""Import and return module mffpy."""
|
||||
try:
|
||||
import mffpy
|
||||
except ImportError as exp:
|
||||
msg = f"mffpy is required to {why}, got:\n{exp}"
|
||||
raise ImportError(msg)
|
||||
|
||||
return mffpy
|
||||
207
mne/io/egi/events.py
Normal file
207
mne/io/egi/events.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from datetime import datetime
|
||||
from glob import glob
|
||||
from os.path import basename, join, splitext
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import _soft_import, _validate_type, logger, warn
|
||||
|
||||
|
||||
def _read_events(input_fname, info):
|
||||
"""Read events for the record.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
The file path.
|
||||
info : dict
|
||||
Header info array.
|
||||
"""
|
||||
n_samples = info["last_samps"][-1]
|
||||
mff_events, event_codes = _read_mff_events(input_fname, info["sfreq"])
|
||||
info["n_events"] = len(event_codes)
|
||||
info["event_codes"] = event_codes
|
||||
events = np.zeros([info["n_events"], info["n_segments"] * n_samples])
|
||||
for n, event in enumerate(event_codes):
|
||||
for i in mff_events[event]:
|
||||
if (i < 0) or (i >= events.shape[1]):
|
||||
continue
|
||||
events[n][i] = n + 1
|
||||
return events, info, mff_events
|
||||
|
||||
|
||||
def _read_mff_events(filename, sfreq):
|
||||
"""Extract the events.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : path-like
|
||||
File path.
|
||||
sfreq : float
|
||||
The sampling frequency
|
||||
"""
|
||||
orig = {}
|
||||
for xml_file in glob(join(filename, "*.xml")):
|
||||
xml_type = splitext(basename(xml_file))[0]
|
||||
orig[xml_type] = _parse_xml(xml_file)
|
||||
xml_files = orig.keys()
|
||||
xml_events = [x for x in xml_files if x[:7] == "Events_"]
|
||||
for item in orig["info"]:
|
||||
if "recordTime" in item:
|
||||
start_time = _ns2py_time(item["recordTime"])
|
||||
break
|
||||
markers = []
|
||||
code = []
|
||||
for xml in xml_events:
|
||||
for event in orig[xml][2:]:
|
||||
event_start = _ns2py_time(event["beginTime"])
|
||||
start = (event_start - start_time).total_seconds()
|
||||
if event["code"] not in code:
|
||||
code.append(event["code"])
|
||||
marker = {
|
||||
"name": event["code"],
|
||||
"start": start,
|
||||
"start_sample": int(np.fix(start * sfreq)),
|
||||
"end": start + float(event["duration"]) / 1e9,
|
||||
"chan": None,
|
||||
}
|
||||
markers.append(marker)
|
||||
events_tims = dict()
|
||||
for ev in code:
|
||||
trig_samp = list(
|
||||
c["start_sample"] for n, c in enumerate(markers) if c["name"] == ev
|
||||
)
|
||||
events_tims.update({ev: trig_samp})
|
||||
return events_tims, code
|
||||
|
||||
|
||||
def _parse_xml(xml_file):
|
||||
"""Parse XML file."""
|
||||
defusedxml = _soft_import("defusedxml", "reading EGI MFF data")
|
||||
xml = defusedxml.ElementTree.parse(xml_file)
|
||||
root = xml.getroot()
|
||||
return _xml2list(root)
|
||||
|
||||
|
||||
def _xml2list(root):
|
||||
"""Parse XML item."""
|
||||
output = []
|
||||
for element in root:
|
||||
if len(element) > 0:
|
||||
if element[0].tag != element[-1].tag:
|
||||
output.append(_xml2dict(element))
|
||||
else:
|
||||
output.append(_xml2list(element))
|
||||
|
||||
elif element.text:
|
||||
text = element.text.strip()
|
||||
if text:
|
||||
tag = _ns(element.tag)
|
||||
output.append({tag: text})
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _ns(s):
|
||||
"""Remove namespace, but only if there is a namespace to begin with."""
|
||||
if "}" in s:
|
||||
return "}".join(s.split("}")[1:])
|
||||
else:
|
||||
return s
|
||||
|
||||
|
||||
def _xml2dict(root):
|
||||
"""Use functions instead of Class.
|
||||
|
||||
remove namespace based on
|
||||
http://stackoverflow.com/questions/2148119
|
||||
"""
|
||||
output = {}
|
||||
if root.items():
|
||||
output.update(dict(root.items()))
|
||||
|
||||
for element in root:
|
||||
if len(element) > 0:
|
||||
if len(element) == 1 or element[0].tag != element[1].tag:
|
||||
one_dict = _xml2dict(element)
|
||||
else:
|
||||
one_dict = {_ns(element[0].tag): _xml2list(element)}
|
||||
|
||||
if element.items():
|
||||
one_dict.update(dict(element.items()))
|
||||
output.update({_ns(element.tag): one_dict})
|
||||
|
||||
elif element.items():
|
||||
output.update({_ns(element.tag): dict(element.items())})
|
||||
|
||||
else:
|
||||
output.update({_ns(element.tag): element.text})
|
||||
return output
|
||||
|
||||
|
||||
def _ns2py_time(nstime):
|
||||
"""Parse times."""
|
||||
nsdate = nstime[0:10]
|
||||
nstime0 = nstime[11:26]
|
||||
nstime00 = nsdate + " " + nstime0
|
||||
pytime = datetime.strptime(nstime00, "%Y-%m-%d %H:%M:%S.%f")
|
||||
return pytime
|
||||
|
||||
|
||||
def _combine_triggers(data, remapping=None):
|
||||
"""Combine binary triggers."""
|
||||
new_trigger = np.zeros(data.shape[1])
|
||||
if data.astype(bool).sum(axis=0).max() > 1: # ensure no overlaps
|
||||
logger.info(
|
||||
" Found multiple events at the same time "
|
||||
"sample. Cannot create trigger channel."
|
||||
)
|
||||
return
|
||||
if remapping is None:
|
||||
remapping = np.arange(data) + 1
|
||||
for d, event_id in zip(data, remapping):
|
||||
idx = d.nonzero()
|
||||
if np.any(idx):
|
||||
new_trigger[idx] += event_id
|
||||
return new_trigger
|
||||
|
||||
|
||||
def _triage_include_exclude(include, exclude, egi_events, egi_info):
|
||||
"""Triage include and exclude."""
|
||||
_validate_type(exclude, (list, None), "exclude")
|
||||
_validate_type(include, (list, None), "include")
|
||||
event_codes = list(egi_info["event_codes"])
|
||||
for name, lst in dict(exclude=exclude, include=include).items():
|
||||
for ii, item in enumerate(lst or []):
|
||||
what = f"{name}[{ii}]"
|
||||
_validate_type(item, str, what)
|
||||
if item not in event_codes:
|
||||
raise ValueError(
|
||||
f"Could not find event channel named {what}={repr(item)}"
|
||||
)
|
||||
if include is None:
|
||||
if exclude is None:
|
||||
default_exclude = ["sync", "TREV"]
|
||||
exclude = [code for code in default_exclude if code in event_codes]
|
||||
for code, event in zip(event_codes, egi_events):
|
||||
if event.sum() < 1 and code:
|
||||
exclude.append(code)
|
||||
if (
|
||||
len(exclude) == len(event_codes)
|
||||
and egi_info["n_events"]
|
||||
and set(exclude) - set(default_exclude)
|
||||
):
|
||||
warn(
|
||||
"Did not find any event code with at least one event.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
include = [k for k in event_codes if k not in exclude]
|
||||
del exclude
|
||||
excl_events = ", ".join(k for k in event_codes if k not in include)
|
||||
logger.info(f" Excluding events {{{excl_events}}} ...")
|
||||
return include
|
||||
192
mne/io/egi/general.py
Normal file
192
mne/io/egi/general.py
Normal file
@@ -0,0 +1,192 @@
|
||||
#
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import _pl, _soft_import
|
||||
|
||||
|
||||
def _extract(tags, filepath=None, obj=None):
|
||||
"""Extract info from XML."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
if obj is not None:
|
||||
fileobj = obj
|
||||
elif filepath is not None:
|
||||
fileobj = parse(filepath)
|
||||
else:
|
||||
raise ValueError("There is not object or file to extract data")
|
||||
infoxml = dict()
|
||||
for tag in tags:
|
||||
value = fileobj.getElementsByTagName(tag)
|
||||
infoxml[tag] = []
|
||||
for i in range(len(value)):
|
||||
infoxml[tag].append(value[i].firstChild.data)
|
||||
return infoxml
|
||||
|
||||
|
||||
def _get_gains(filepath):
|
||||
"""Parse gains."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
file_obj = parse(filepath)
|
||||
objects = file_obj.getElementsByTagName("calibration")
|
||||
gains = dict()
|
||||
for ob in objects:
|
||||
value = ob.getElementsByTagName("type")
|
||||
if value[0].firstChild.data == "GCAL":
|
||||
data_g = _extract(["ch"], obj=ob)["ch"]
|
||||
gains.update(gcal=np.asarray(data_g, dtype=np.float64))
|
||||
elif value[0].firstChild.data == "ICAL":
|
||||
data_g = _extract(["ch"], obj=ob)["ch"]
|
||||
gains.update(ical=np.asarray(data_g, dtype=np.float64))
|
||||
return gains
|
||||
|
||||
|
||||
def _get_ep_info(filepath):
|
||||
"""Get epoch info."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
epochfile = filepath + "/epochs.xml"
|
||||
epochlist = parse(epochfile)
|
||||
epochs = epochlist.getElementsByTagName("epoch")
|
||||
keys = ("first_samps", "last_samps", "first_blocks", "last_blocks")
|
||||
epoch_info = {key: list() for key in keys}
|
||||
for epoch in epochs:
|
||||
ep_begin = int(epoch.getElementsByTagName("beginTime")[0].firstChild.data)
|
||||
ep_end = int(epoch.getElementsByTagName("endTime")[0].firstChild.data)
|
||||
first_block = int(epoch.getElementsByTagName("firstBlock")[0].firstChild.data)
|
||||
last_block = int(epoch.getElementsByTagName("lastBlock")[0].firstChild.data)
|
||||
epoch_info["first_samps"].append(ep_begin)
|
||||
epoch_info["last_samps"].append(ep_end)
|
||||
epoch_info["first_blocks"].append(first_block)
|
||||
epoch_info["last_blocks"].append(last_block)
|
||||
# Don't turn into ndarray here, keep native int because it can deal with
|
||||
# huge numbers (could use np.uint64 but it's more work)
|
||||
return epoch_info
|
||||
|
||||
|
||||
def _get_blocks(filepath):
|
||||
"""Get info from meta data blocks."""
|
||||
binfile = os.path.join(filepath)
|
||||
n_blocks = 0
|
||||
samples_block = []
|
||||
header_sizes = []
|
||||
n_channels = []
|
||||
sfreq = []
|
||||
# Meta data consists of:
|
||||
# * 1 byte of flag (1 for meta data, 0 for data)
|
||||
# * 1 byte of header size
|
||||
# * 1 byte of block size
|
||||
# * 1 byte of n_channels
|
||||
# * n_channels bytes of offsets
|
||||
# * n_channels bytes of sigfreqs?
|
||||
with open(binfile, "rb") as fid:
|
||||
fid.seek(0, 2) # go to end of file
|
||||
file_length = fid.tell()
|
||||
block_size = file_length
|
||||
fid.seek(0)
|
||||
position = 0
|
||||
while position < file_length:
|
||||
block = _block_r(fid)
|
||||
if block is None:
|
||||
samples_block.append(samples_block[n_blocks - 1])
|
||||
n_blocks += 1
|
||||
fid.seek(block_size, 1)
|
||||
position = fid.tell()
|
||||
continue
|
||||
block_size = block["block_size"]
|
||||
header_size = block["header_size"]
|
||||
header_sizes.append(header_size)
|
||||
samples_block.append(block["nsamples"])
|
||||
n_blocks += 1
|
||||
fid.seek(block_size, 1)
|
||||
sfreq.append(block["sfreq"])
|
||||
n_channels.append(block["nc"])
|
||||
position = fid.tell()
|
||||
|
||||
if any([n != n_channels[0] for n in n_channels]):
|
||||
raise RuntimeError("All the blocks don't have the same amount of channels.")
|
||||
if any([f != sfreq[0] for f in sfreq]):
|
||||
raise RuntimeError("All the blocks don't have the same sampling frequency.")
|
||||
if len(samples_block) < 1:
|
||||
raise RuntimeError("There seems to be no data")
|
||||
samples_block = np.array(samples_block)
|
||||
signal_blocks = dict(
|
||||
n_channels=n_channels[0],
|
||||
sfreq=sfreq[0],
|
||||
n_blocks=n_blocks,
|
||||
samples_block=samples_block,
|
||||
header_sizes=header_sizes,
|
||||
)
|
||||
return signal_blocks
|
||||
|
||||
|
||||
def _get_signalfname(filepath):
|
||||
"""Get filenames."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
listfiles = os.listdir(filepath)
|
||||
binfiles = list(
|
||||
f for f in listfiles if "signal" in f and f[-4:] == ".bin" and f[0] != "."
|
||||
)
|
||||
all_files = {}
|
||||
infofiles = list()
|
||||
for binfile in binfiles:
|
||||
bin_num_str = re.search(r"\d+", binfile).group()
|
||||
infofile = "info" + bin_num_str + ".xml"
|
||||
infofiles.append(infofile)
|
||||
infobjfile = os.path.join(filepath, infofile)
|
||||
infobj = parse(infobjfile)
|
||||
if len(infobj.getElementsByTagName("EEG")):
|
||||
signal_type = "EEG"
|
||||
elif len(infobj.getElementsByTagName("PNSData")):
|
||||
signal_type = "PNS"
|
||||
all_files[signal_type] = {
|
||||
"signal": f"signal{bin_num_str}.bin",
|
||||
"info": infofile,
|
||||
}
|
||||
if "EEG" not in all_files:
|
||||
infofiles_str = "\n".join(infofiles)
|
||||
raise FileNotFoundError(
|
||||
f"Could not find any EEG data in the {len(infofiles)} file{_pl(infofiles)} "
|
||||
f"found in {filepath}:\n{infofiles_str}"
|
||||
)
|
||||
return all_files
|
||||
|
||||
|
||||
def _block_r(fid):
|
||||
"""Read meta data."""
|
||||
if np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() != 1: # not meta
|
||||
return None
|
||||
header_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
block_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
hl = int(block_size / 4)
|
||||
nc = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
nsamples = int(hl / nc)
|
||||
np.fromfile(fid, dtype=np.dtype("i4"), count=nc) # sigoffset
|
||||
sigfreq = np.fromfile(fid, dtype=np.dtype("i4"), count=nc)
|
||||
depth = sigfreq[0] & 0xFF
|
||||
if depth != 32:
|
||||
raise ValueError("I do not know how to read this MFF (depth != 32)")
|
||||
sfreq = sigfreq[0] >> 8
|
||||
count = int(header_size / 4 - (4 + 2 * nc))
|
||||
np.fromfile(fid, dtype=np.dtype("i4"), count=count) # sigoffset
|
||||
block = dict(
|
||||
nc=nc,
|
||||
hl=hl,
|
||||
nsamples=nsamples,
|
||||
block_size=block_size,
|
||||
header_size=header_size,
|
||||
sfreq=sfreq,
|
||||
)
|
||||
return block
|
||||
7
mne/io/eximia/__init__.py
Normal file
7
mne/io/eximia/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Eximia module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .eximia import read_raw_eximia
|
||||
103
mne/io/eximia/eximia.py
Normal file
103
mne/io/eximia/eximia.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _file_size, _read_segments_file
|
||||
from ...utils import _check_fname, fill_doc, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_eximia(fname, preload=False, verbose=None) -> "RawEximia":
|
||||
"""Reader for an eXimia EEG file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the eXimia ``.nxe`` data file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEximia
|
||||
A Raw object containing eXimia data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawEximia.
|
||||
"""
|
||||
return RawEximia(fname, preload, verbose)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawEximia(BaseRaw):
|
||||
"""Raw object from an Eximia EEG file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the eXimia data file (.nxe).
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, verbose=None):
|
||||
fname = str(_check_fname(fname, "read", True, "fname"))
|
||||
data_name = op.basename(fname)
|
||||
logger.info(f"Loading {data_name}")
|
||||
# Create vhdr and vmrk files so that we can use mne_brain_vision2fiff
|
||||
n_chan = 64
|
||||
sfreq = 1450.0
|
||||
# data are multiplexed int16
|
||||
ch_names = ["GateIn", "Trig1", "Trig2", "EOG"]
|
||||
ch_types = ["stim", "stim", "stim", "eog"]
|
||||
cals = [
|
||||
0.0015259021896696422,
|
||||
0.0015259021896696422,
|
||||
0.0015259021896696422,
|
||||
0.3814755474174106,
|
||||
]
|
||||
ch_names += (
|
||||
"Fp1 Fpz Fp2 AF1 AFz AF2 "
|
||||
"F7 F3 F1 Fz F2 F4 F8 "
|
||||
"FT9 FT7 FC5 FC3 FC1 FCz FC2 FC4 FC6 FT8 FT10 "
|
||||
"T7 C5 C3 C1 Cz C2 C4 C6 T8 "
|
||||
"TP9 TP7 CP5 CP3 CP1 CPz CP2 CP4 CP6 TP8 TP10 "
|
||||
"P9 P7 P3 P1 Pz P2 P4 P8 "
|
||||
"P10 PO3 POz PO4 O1 Oz O2 Iz".split()
|
||||
)
|
||||
n_eeg = len(ch_names) - len(cals)
|
||||
cals += [0.07629510948348212] * n_eeg
|
||||
ch_types += ["eeg"] * n_eeg
|
||||
assert len(ch_names) == n_chan
|
||||
info = create_info(ch_names, sfreq, ch_types)
|
||||
n_bytes = _file_size(fname)
|
||||
n_samples, extra = divmod(n_bytes, (n_chan * 2))
|
||||
if extra != 0:
|
||||
warn(
|
||||
f"Incorrect number of samples in file ({n_samples}), the file is likely"
|
||||
" truncated"
|
||||
)
|
||||
for ch, cal in zip(info["chs"], cals):
|
||||
ch["cal"] = cal
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
last_samps=(n_samples - 1,),
|
||||
filenames=[fname],
|
||||
orig_format="short",
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype="<i2")
|
||||
7
mne/io/eyelink/__init__.py
Normal file
7
mne/io/eyelink/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Module for loading Eye-Tracker data."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .eyelink import read_raw_eyelink
|
||||
846
mne/io/eyelink/_utils.py
Normal file
846
mne/io/eyelink/_utils.py
Normal file
@@ -0,0 +1,846 @@
|
||||
"""Helper functions for reading eyelink ASCII files."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import re
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_pandas_installed, logger, warn
|
||||
|
||||
EYELINK_COLS = {
|
||||
"timestamp": ("time",),
|
||||
"pos": {
|
||||
"left": ("xpos_left", "ypos_left", "pupil_left"),
|
||||
"right": ("xpos_right", "ypos_right", "pupil_right"),
|
||||
},
|
||||
"velocity": {
|
||||
"left": ("xvel_left", "yvel_left"),
|
||||
"right": ("xvel_right", "yvel_right"),
|
||||
},
|
||||
"resolution": ("xres", "yres"),
|
||||
"input": ("DIN",),
|
||||
"remote": ("x_head", "y_head", "distance"),
|
||||
"block_num": ("block",),
|
||||
"eye_event": ("eye", "time", "end_time", "duration"),
|
||||
"fixation": ("fix_avg_x", "fix_avg_y", "fix_avg_pupil_size"),
|
||||
"saccade": (
|
||||
"sacc_start_x",
|
||||
"sacc_start_y",
|
||||
"sacc_end_x",
|
||||
"sacc_end_y",
|
||||
"sacc_visual_angle",
|
||||
"peak_velocity",
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _parse_eyelink_ascii(
|
||||
fname, find_overlaps=True, overlap_threshold=0.05, apply_offsets=False
|
||||
):
|
||||
# ======================== Parse ASCII File =========================
|
||||
raw_extras = dict()
|
||||
raw_extras.update(_parse_recording_blocks(fname))
|
||||
raw_extras.update(_get_metadata(raw_extras))
|
||||
raw_extras["dt"] = _get_recording_datetime(fname)
|
||||
_validate_data(raw_extras)
|
||||
|
||||
# ======================== Create DataFrames ========================
|
||||
raw_extras["dfs"] = _create_dataframes(raw_extras, apply_offsets)
|
||||
del raw_extras["sample_lines"] # free up memory
|
||||
# add column names to dataframes and set the dtype of each column
|
||||
col_names, ch_names = _infer_col_names(raw_extras)
|
||||
raw_extras["dfs"] = _assign_col_names(col_names, raw_extras["dfs"])
|
||||
raw_extras["dfs"] = _set_df_dtypes(raw_extras["dfs"]) # set dtypes for dataframes
|
||||
# if HREF data, convert to radians
|
||||
if "HREF" in raw_extras["rec_info"]:
|
||||
raw_extras["dfs"]["samples"] = _convert_href_samples(
|
||||
raw_extras["dfs"]["samples"]
|
||||
)
|
||||
# fill in times between recording blocks with BAD_ACQ_SKIP
|
||||
if raw_extras["n_blocks"] > 1:
|
||||
logger.info(
|
||||
f"There are {raw_extras['n_blocks']} recording blocks in this file."
|
||||
f" Times between blocks will be annotated with BAD_ACQ_SKIP."
|
||||
)
|
||||
raw_extras["dfs"]["samples"] = _adjust_times(
|
||||
raw_extras["dfs"]["samples"], raw_extras["sfreq"]
|
||||
)
|
||||
# Convert timestamps to seconds
|
||||
for df in raw_extras["dfs"].values():
|
||||
df = _convert_times(df, raw_extras["first_samp"])
|
||||
# Find overlaps between left and right eye events
|
||||
if find_overlaps:
|
||||
for key in raw_extras["dfs"]:
|
||||
if key not in ["blinks", "fixations", "saccades"]:
|
||||
continue
|
||||
raw_extras["dfs"][key] = _find_overlaps(
|
||||
raw_extras["dfs"][key], max_time=overlap_threshold
|
||||
)
|
||||
# ======================== Info for BaseRaw ========================
|
||||
eye_ch_data = raw_extras["dfs"]["samples"][ch_names].to_numpy().T
|
||||
info = _create_info(ch_names, raw_extras)
|
||||
|
||||
return eye_ch_data, info, raw_extras
|
||||
|
||||
|
||||
def _parse_recording_blocks(fname):
|
||||
"""Parse Eyelink ASCII file.
|
||||
|
||||
Eyelink samples occur within START and END blocks.
|
||||
samples lines start with a posix-like string,
|
||||
and contain eyetracking sample info. Event Lines
|
||||
start with an upper case string and contain info
|
||||
about occular events (i.e. blink/saccade), or experiment
|
||||
messages sent by the stimulus presentation software.
|
||||
"""
|
||||
with fname.open() as file:
|
||||
data_dict = dict()
|
||||
data_dict["sample_lines"] = []
|
||||
data_dict["event_lines"] = {
|
||||
"START": [],
|
||||
"END": [],
|
||||
"SAMPLES": [],
|
||||
"EVENTS": [],
|
||||
"ESACC": [],
|
||||
"EBLINK": [],
|
||||
"EFIX": [],
|
||||
"MSG": [],
|
||||
"INPUT": [],
|
||||
"BUTTON": [],
|
||||
"PUPIL": [],
|
||||
}
|
||||
|
||||
is_recording_block = False
|
||||
for line in file:
|
||||
if line.startswith("START"): # start of recording block
|
||||
is_recording_block = True
|
||||
if is_recording_block:
|
||||
tokens = line.split()
|
||||
if not tokens:
|
||||
continue # skip empty lines
|
||||
if tokens[0][0].isnumeric(): # Samples
|
||||
data_dict["sample_lines"].append(tokens)
|
||||
elif tokens[0] in data_dict["event_lines"].keys():
|
||||
if _is_sys_msg(line):
|
||||
continue # system messages don't need to be parsed.
|
||||
event_key, event_info = tokens[0], tokens[1:]
|
||||
data_dict["event_lines"][event_key].append(event_info)
|
||||
if tokens[0] == "END": # end of recording block
|
||||
is_recording_block = False
|
||||
if not data_dict["sample_lines"]: # no samples parsed
|
||||
raise ValueError(f"Couldn't find any samples in {fname}")
|
||||
return data_dict
|
||||
|
||||
|
||||
def _validate_data(raw_extras):
|
||||
"""Check the incoming data for some known problems that can occur."""
|
||||
# Detect the datatypes that are in file.
|
||||
if "GAZE" in raw_extras["rec_info"]:
|
||||
logger.info(
|
||||
"Pixel coordinate data detected."
|
||||
"Pass `scalings=dict(eyegaze=1e3)` when using plot"
|
||||
" method to make traces more legible."
|
||||
)
|
||||
|
||||
elif "HREF" in raw_extras["rec_info"]:
|
||||
logger.info("Head-referenced eye-angle (HREF) data detected.")
|
||||
elif "PUPIL" in raw_extras["rec_info"]:
|
||||
warn("Raw eyegaze coordinates detected. Analyze with caution.")
|
||||
if "AREA" in raw_extras["pupil_info"]:
|
||||
logger.info("Pupil-size area detected.")
|
||||
elif "DIAMETER" in raw_extras["pupil_info"]:
|
||||
logger.info("Pupil-size diameter detected.")
|
||||
# If more than 1 recording period, check whether eye being tracked changed.
|
||||
if raw_extras["n_blocks"] > 1:
|
||||
if raw_extras["tracking_mode"] == "monocular":
|
||||
blocks_list = raw_extras["event_lines"]["SAMPLES"]
|
||||
eye_per_block = [block_info[1].lower() for block_info in blocks_list]
|
||||
if not all([this_eye == raw_extras["eye"] for this_eye in eye_per_block]):
|
||||
warn(
|
||||
"The eye being tracked changed during the"
|
||||
" recording. The channel names will reflect"
|
||||
" the eye that was tracked at the start of"
|
||||
" the recording."
|
||||
)
|
||||
|
||||
|
||||
def _get_recording_datetime(fname):
|
||||
"""Create a datetime object from the datetime in ASCII file."""
|
||||
# create a timezone object for UTC
|
||||
tz = timezone(timedelta(hours=0))
|
||||
in_header = False
|
||||
with fname.open() as file:
|
||||
for line in file:
|
||||
# header lines are at top of file and start with **
|
||||
if line.startswith("**"):
|
||||
in_header = True
|
||||
if in_header:
|
||||
if line.startswith("** DATE:"):
|
||||
dt_str = line.replace("** DATE:", "").strip()
|
||||
fmt = "%a %b %d %H:%M:%S %Y"
|
||||
# Eyelink measdate timestamps are timezone naive.
|
||||
# Force datetime to be in UTC.
|
||||
# Even though dt is probably in local time zone.
|
||||
try:
|
||||
dt_naive = datetime.strptime(dt_str, fmt)
|
||||
except ValueError:
|
||||
# date string is missing or in an unexpected format
|
||||
logger.info(
|
||||
"Could not detect date from file with date entry: "
|
||||
f"{repr(dt_str)}"
|
||||
)
|
||||
return
|
||||
else:
|
||||
return dt_naive.replace(tzinfo=tz) # make it dt aware
|
||||
return
|
||||
|
||||
|
||||
def _get_metadata(raw_extras):
|
||||
"""Get tracking mode, sfreq, eye tracked, pupil metric, etc.
|
||||
|
||||
Don't call this until after _parse_recording_blocks.
|
||||
"""
|
||||
meta_data = dict()
|
||||
meta_data["rec_info"] = raw_extras["event_lines"]["SAMPLES"][0]
|
||||
if ("LEFT" in meta_data["rec_info"]) and ("RIGHT" in meta_data["rec_info"]):
|
||||
meta_data["tracking_mode"] = "binocular"
|
||||
meta_data["eye"] = "both"
|
||||
else:
|
||||
meta_data["tracking_mode"] = "monocular"
|
||||
meta_data["eye"] = meta_data["rec_info"][1].lower()
|
||||
meta_data["first_samp"] = float(raw_extras["event_lines"]["START"][0][0])
|
||||
meta_data["sfreq"] = _get_sfreq_from_ascii(meta_data["rec_info"])
|
||||
meta_data["pupil_info"] = raw_extras["event_lines"]["PUPIL"][0]
|
||||
meta_data["n_blocks"] = len(raw_extras["event_lines"]["START"])
|
||||
return meta_data
|
||||
|
||||
|
||||
def _is_sys_msg(line):
|
||||
"""Flag lines from eyelink ASCII file that contain a known system message.
|
||||
|
||||
Some lines in eyelink files are system outputs usually
|
||||
only meant for Eyelinks DataViewer application to read.
|
||||
These shouldn't need to be parsed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : string
|
||||
single line from Eyelink asc file
|
||||
|
||||
Returns
|
||||
-------
|
||||
bool :
|
||||
True if any of the following strings that are
|
||||
known to indicate a system message are in the line
|
||||
|
||||
Notes
|
||||
-----
|
||||
Examples of eyelink system messages:
|
||||
- ;Sess:22Aug22;Tria:1;Tri2:False;ESNT:182BFE4C2F4;
|
||||
- ;NTPT:182BFE55C96;SMSG:__NTP_CLOCK_SYNC__;DIFF:-1;
|
||||
- !V APLAYSTART 0 1 library/audio
|
||||
- !MODE RECORD CR 500 2 1 R
|
||||
"""
|
||||
return "!V" in line or "!MODE" in line or ";" in line
|
||||
|
||||
|
||||
def _get_sfreq_from_ascii(rec_info):
|
||||
"""Get sampling frequency from Eyelink ASCII file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
rec_info : list
|
||||
the first list in raw_extras["event_lines"]['SAMPLES'].
|
||||
The sfreq occurs after RATE: i.e. [..., RATE, 1000, ...].
|
||||
|
||||
Returns
|
||||
-------
|
||||
sfreq : float
|
||||
"""
|
||||
return float(rec_info[rec_info.index("RATE") + 1])
|
||||
|
||||
|
||||
def _create_dataframes(raw_extras, apply_offsets):
|
||||
"""Create pandas.DataFrame for Eyelink samples and events.
|
||||
|
||||
Creates a pandas DataFrame for sample_lines and for each
|
||||
non-empty key in event_lines.
|
||||
"""
|
||||
pd = _check_pandas_installed()
|
||||
df_dict = dict()
|
||||
|
||||
# dataframe for samples
|
||||
df_dict["samples"] = pd.DataFrame(raw_extras["sample_lines"])
|
||||
df_dict["samples"] = _drop_status_col(df_dict["samples"]) # drop STATUS col
|
||||
|
||||
# dataframe for each type of occular event
|
||||
for event, label in zip(
|
||||
["EFIX", "ESACC", "EBLINK"], ["fixations", "saccades", "blinks"]
|
||||
):
|
||||
if raw_extras["event_lines"][event]: # an empty list returns False
|
||||
df_dict[label] = pd.DataFrame(raw_extras["event_lines"][event])
|
||||
else:
|
||||
logger.info(
|
||||
f"No {label} were found in this file. "
|
||||
f"Not returning any info on {label}."
|
||||
)
|
||||
|
||||
# make dataframe for experiment messages
|
||||
if raw_extras["event_lines"]["MSG"]:
|
||||
msgs = []
|
||||
for token in raw_extras["event_lines"]["MSG"]:
|
||||
if apply_offsets and len(token) == 2:
|
||||
ts, msg = token
|
||||
offset = np.nan
|
||||
elif apply_offsets:
|
||||
ts = token[0]
|
||||
try:
|
||||
offset = float(token[1])
|
||||
msg = " ".join(str(x) for x in token[2:])
|
||||
except ValueError:
|
||||
offset = np.nan
|
||||
msg = " ".join(str(x) for x in token[1:])
|
||||
else:
|
||||
ts, offset = token[0], np.nan
|
||||
msg = " ".join(str(x) for x in token[1:])
|
||||
msgs.append([ts, offset, msg])
|
||||
df_dict["messages"] = pd.DataFrame(msgs)
|
||||
|
||||
# make dataframe for recording block start, end times
|
||||
i = 1
|
||||
blocks = list()
|
||||
for bgn, end in zip(
|
||||
raw_extras["event_lines"]["START"], raw_extras["event_lines"]["END"]
|
||||
):
|
||||
blocks.append((float(bgn[0]), float(end[0]), i))
|
||||
i += 1
|
||||
cols = ["time", "end_time", "block"]
|
||||
df_dict["recording_blocks"] = pd.DataFrame(blocks, columns=cols)
|
||||
|
||||
# TODO: Make dataframes for other eyelink events (Buttons)
|
||||
return df_dict
|
||||
|
||||
|
||||
def _drop_status_col(samples_df):
|
||||
"""Drop STATUS column from samples dataframe.
|
||||
|
||||
see https://github.com/mne-tools/mne-python/issues/11809, and section 4.9.2.1 of
|
||||
the Eyelink 1000 Plus User Manual, version 1.0.19. We know that the STATUS
|
||||
column is either 3, 5, 13, or 17 characters long, i.e. "...", ".....", ".C."
|
||||
"""
|
||||
status_cols = []
|
||||
# we know the first 3 columns will be the time, xpos, ypos
|
||||
for col in samples_df.columns[3:]:
|
||||
if samples_df[col][0][0].isnumeric():
|
||||
# if the value is numeric, it's not a status column
|
||||
continue
|
||||
if len(samples_df[col][0]) in [3, 5, 13, 17]:
|
||||
status_cols.append(col)
|
||||
return samples_df.drop(columns=status_cols)
|
||||
|
||||
|
||||
def _infer_col_names(raw_extras):
|
||||
"""Build column and channel names for data from Eyelink ASCII file.
|
||||
|
||||
Returns the expected column names for the sample lines and event
|
||||
lines, to be passed into pd.DataFrame. The columns present in an eyelink ASCII
|
||||
file can vary. The order that col_names are built below should NOT change.
|
||||
"""
|
||||
col_names = {}
|
||||
# initiate the column names for the sample lines
|
||||
col_names["samples"] = list(EYELINK_COLS["timestamp"])
|
||||
|
||||
# and for the eye message lines
|
||||
col_names["blinks"] = list(EYELINK_COLS["eye_event"])
|
||||
col_names["fixations"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["fixation"])
|
||||
col_names["saccades"] = list(EYELINK_COLS["eye_event"] + EYELINK_COLS["saccade"])
|
||||
|
||||
# Recording was either binocular or monocular
|
||||
# If monocular, find out which eye was tracked and append to ch_name
|
||||
if raw_extras["tracking_mode"] == "monocular":
|
||||
eye = raw_extras["eye"]
|
||||
ch_names = list(EYELINK_COLS["pos"][eye])
|
||||
elif raw_extras["tracking_mode"] == "binocular":
|
||||
ch_names = list(EYELINK_COLS["pos"]["left"] + EYELINK_COLS["pos"]["right"])
|
||||
col_names["samples"].extend(ch_names)
|
||||
|
||||
# The order of these if statements should not be changed.
|
||||
if "VEL" in raw_extras["rec_info"]: # If velocity data are reported
|
||||
if raw_extras["tracking_mode"] == "monocular":
|
||||
ch_names.extend(EYELINK_COLS["velocity"][eye])
|
||||
col_names["samples"].extend(EYELINK_COLS["velocity"][eye])
|
||||
elif raw_extras["tracking_mode"] == "binocular":
|
||||
ch_names.extend(
|
||||
EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"]
|
||||
)
|
||||
col_names["samples"].extend(
|
||||
EYELINK_COLS["velocity"]["left"] + EYELINK_COLS["velocity"]["right"]
|
||||
)
|
||||
# if resolution data are reported
|
||||
if "RES" in raw_extras["rec_info"]:
|
||||
ch_names.extend(EYELINK_COLS["resolution"])
|
||||
col_names["samples"].extend(EYELINK_COLS["resolution"])
|
||||
col_names["fixations"].extend(EYELINK_COLS["resolution"])
|
||||
col_names["saccades"].extend(EYELINK_COLS["resolution"])
|
||||
# if digital input port values are reported
|
||||
if "INPUT" in raw_extras["rec_info"]:
|
||||
ch_names.extend(EYELINK_COLS["input"])
|
||||
col_names["samples"].extend(EYELINK_COLS["input"])
|
||||
|
||||
# if head target info was reported, add its cols
|
||||
if "HTARGET" in raw_extras["rec_info"]:
|
||||
ch_names.extend(EYELINK_COLS["remote"])
|
||||
col_names["samples"].extend(EYELINK_COLS["remote"])
|
||||
|
||||
return col_names, ch_names
|
||||
|
||||
|
||||
def _assign_col_names(col_names, df_dict):
|
||||
"""Assign column names to dataframes.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
col_names : dict
|
||||
Dictionary of column names for each dataframe.
|
||||
"""
|
||||
for key, df in df_dict.items():
|
||||
if key in ("samples", "blinks", "fixations", "saccades"):
|
||||
df.columns = col_names[key]
|
||||
elif key == "messages":
|
||||
cols = ["time", "offset", "event_msg"]
|
||||
df.columns = cols
|
||||
return df_dict
|
||||
|
||||
|
||||
def _set_df_dtypes(df_dict):
|
||||
from mne.utils import _set_pandas_dtype
|
||||
|
||||
for key, df in df_dict.items():
|
||||
if key in ["samples"]:
|
||||
# convert missing position values to NaN
|
||||
_set_missing_values(df, df.columns[1:])
|
||||
_set_pandas_dtype(df, df.columns, float, verbose="warning")
|
||||
elif key in ["blinks", "fixations", "saccades"]:
|
||||
_set_missing_values(df, df.columns[1:])
|
||||
_set_pandas_dtype(df, df.columns[1:], float, verbose="warning")
|
||||
elif key == "messages":
|
||||
_set_pandas_dtype(df, ["time"], float, verbose="warning") # timestamp
|
||||
return df_dict
|
||||
|
||||
|
||||
def _set_missing_values(df, columns):
|
||||
"""Set missing values to NaN. operates in-place."""
|
||||
missing_vals = (".", "MISSING_DATA")
|
||||
for col in columns:
|
||||
# we explicitly use numpy instead of pd.replace because it is faster
|
||||
df[col] = np.where(df[col].isin(missing_vals), np.nan, df[col])
|
||||
|
||||
|
||||
def _sort_by_time(df, col="time"):
|
||||
df.sort_values(col, ascending=True, inplace=True)
|
||||
df.reset_index(drop=True, inplace=True)
|
||||
|
||||
|
||||
def _convert_times(df, first_samp, col="time"):
|
||||
"""Set initial time to 0, converts from ms to seconds in place.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df pandas.DataFrame:
|
||||
One of the dataframes in raw_extras["dfs"] dict.
|
||||
|
||||
first_samp int:
|
||||
timestamp of the first sample of the recording. This should
|
||||
be the first sample of the first recording block.
|
||||
col str (default 'time'):
|
||||
column name to sort pandas.DataFrame by
|
||||
|
||||
Notes
|
||||
-----
|
||||
Each sample in an Eyelink file has a posix timestamp string.
|
||||
Subtracts the "first" sample's timestamp from each timestamp.
|
||||
The "first" sample is inferred to be the first sample of
|
||||
the first recording block, i.e. the first "START" line.
|
||||
"""
|
||||
_sort_by_time(df, col)
|
||||
for col in df.columns:
|
||||
if col.endswith("time"): # 'time' and 'end_time' cols
|
||||
df[col] -= first_samp
|
||||
df[col] /= 1000
|
||||
if col in ["duration", "offset"]:
|
||||
df[col] /= 1000
|
||||
return df
|
||||
|
||||
|
||||
def _adjust_times(
|
||||
df,
|
||||
sfreq,
|
||||
time_col="time",
|
||||
):
|
||||
"""Fill missing timestamps if there are multiple recording blocks.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df : pandas.DataFrame:
|
||||
dataframe of the eyetracking data samples, BEFORE
|
||||
_convert_times() is applied to the dataframe
|
||||
|
||||
sfreq : int | float:
|
||||
sampling frequency of the data
|
||||
|
||||
time_col : str (default 'time'):
|
||||
name of column with the timestamps (e.g. 9511881, 9511882, ...)
|
||||
|
||||
Returns
|
||||
-------
|
||||
%(df_return)s
|
||||
|
||||
Notes
|
||||
-----
|
||||
After _parse_recording_blocks, Files with multiple recording blocks will
|
||||
have missing timestamps for the duration of the period between the blocks.
|
||||
This would cause the occular annotations (i.e. blinks) to not line up with
|
||||
the signal.
|
||||
"""
|
||||
pd = _check_pandas_installed()
|
||||
|
||||
first, last = df[time_col].iloc[[0, -1]]
|
||||
step = 1000 / sfreq
|
||||
df[time_col] = df[time_col].astype(float)
|
||||
new_times = pd.DataFrame(
|
||||
np.arange(first, last + step / 2, step), columns=[time_col]
|
||||
)
|
||||
return pd.merge_asof(
|
||||
new_times, df, on=time_col, direction="nearest", tolerance=step / 2
|
||||
)
|
||||
|
||||
|
||||
def _find_overlaps(df, max_time=0.05):
|
||||
"""Merge left/right eye events with onset/offset diffs less than max_time.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
df : pandas.DataFrame
|
||||
Pandas DataFrame with occular events (fixations, saccades, blinks)
|
||||
max_time : float (default 0.05)
|
||||
Time in seconds. Defaults to .05 (50 ms)
|
||||
|
||||
Returns
|
||||
-------
|
||||
DataFrame: %(df_return)s
|
||||
:class:`pandas.DataFrame` specifying overlapped eye events, if any
|
||||
|
||||
Notes
|
||||
-----
|
||||
The idea is to cumulative sum the boolean values for rows with onset and
|
||||
offset differences (against the previous row) that are greater than the
|
||||
max_time. If onset and offset diffs are less than max_time then no_overlap
|
||||
will become False. Alternatively, if either the onset or offset diff is
|
||||
greater than max_time, no_overlap becomes True. Cumulatively summing over
|
||||
these boolean values will leave rows with no_overlap == False unchanged
|
||||
and hence with the same group number.
|
||||
"""
|
||||
pd = _check_pandas_installed()
|
||||
|
||||
if not len(df):
|
||||
return
|
||||
df["overlap_start"] = df.sort_values("time")["time"].diff().lt(max_time)
|
||||
|
||||
df["overlap_end"] = df["end_time"].diff().abs().lt(max_time)
|
||||
|
||||
df["no_overlap"] = ~(df["overlap_end"] & df["overlap_start"])
|
||||
df["group"] = df["no_overlap"].cumsum()
|
||||
|
||||
# now use groupby on 'group'. If one left and one right eye in group
|
||||
# the new start/end times are the mean of the two eyes
|
||||
ovrlp = pd.concat(
|
||||
[
|
||||
pd.DataFrame(g[1].drop(columns="eye").mean()).T
|
||||
if (len(g[1]) == 2) and (len(g[1].eye.unique()) == 2)
|
||||
else g[1] # not an overlap, return group unchanged
|
||||
for g in df.groupby("group")
|
||||
]
|
||||
)
|
||||
# overlapped events get a "both" value in the "eye" col
|
||||
if "eye" in ovrlp.columns:
|
||||
ovrlp["eye"] = ovrlp["eye"].fillna("both")
|
||||
else:
|
||||
ovrlp["eye"] = "both"
|
||||
tmp_cols = ["overlap_start", "overlap_end", "no_overlap", "group"]
|
||||
return ovrlp.drop(columns=tmp_cols).reset_index(drop=True)
|
||||
|
||||
|
||||
def _convert_href_samples(samples_df):
|
||||
"""Convert HREF eyegaze samples to radians."""
|
||||
# grab the xpos and ypos channel names
|
||||
pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1]
|
||||
for col in samples_df.columns:
|
||||
if col not in pos_names: # 'xpos_left' ... 'ypos_right'
|
||||
continue
|
||||
series = _href_to_radian(samples_df[col])
|
||||
samples_df[col] = series
|
||||
return samples_df
|
||||
|
||||
|
||||
def _href_to_radian(opposite, f=15_000):
|
||||
"""Convert HREF eyegaze samples to radians.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
opposite : int
|
||||
The x or y coordinate in an HREF gaze sample.
|
||||
f : int (default 15_000)
|
||||
distance of plane from the eye. Defaults to 15,000 units, which was taken
|
||||
from the Eyelink 1000 plus user manual.
|
||||
|
||||
Returns
|
||||
-------
|
||||
x or y coordinate in radians
|
||||
|
||||
Notes
|
||||
-----
|
||||
See section 4.4.2.2 in the Eyelink 1000 Plus User Manual
|
||||
(version 1.0.19) for a detailed description of HREF data.
|
||||
"""
|
||||
return np.arcsin(opposite / f)
|
||||
|
||||
|
||||
def _create_info(ch_names, raw_extras):
|
||||
"""Create info object for RawEyelink."""
|
||||
# assign channel type from ch_name
|
||||
pos_names = EYELINK_COLS["pos"]["left"][:-1] + EYELINK_COLS["pos"]["right"][:-1]
|
||||
pupil_names = EYELINK_COLS["pos"]["left"][-1] + EYELINK_COLS["pos"]["right"][-1]
|
||||
ch_types = [
|
||||
"eyegaze"
|
||||
if ch in pos_names
|
||||
else "pupil"
|
||||
if ch in pupil_names
|
||||
else "stim"
|
||||
if ch == "DIN"
|
||||
else "misc"
|
||||
for ch in ch_names
|
||||
]
|
||||
info = create_info(ch_names, raw_extras["sfreq"], ch_types)
|
||||
# set correct loc for eyepos and pupil channels
|
||||
for ch_dict in info["chs"]:
|
||||
# loc index 3 can indicate left or right eye
|
||||
if ch_dict["ch_name"].endswith("left"): # [x,y,pupil]_left
|
||||
ch_dict["loc"][3] = -1 # left eye
|
||||
elif ch_dict["ch_name"].endswith("right"): # [x,y,pupil]_right
|
||||
ch_dict["loc"][3] = 1 # right eye
|
||||
else:
|
||||
logger.debug(
|
||||
f"leaving index 3 of loc array as"
|
||||
f" {ch_dict['loc'][3]} for {ch_dict['ch_name']}"
|
||||
)
|
||||
# loc index 4 can indicate x/y coord
|
||||
if ch_dict["ch_name"].startswith("x"):
|
||||
ch_dict["loc"][4] = -1 # x-coord
|
||||
elif ch_dict["ch_name"].startswith("y"):
|
||||
ch_dict["loc"][4] = 1 # y-coord
|
||||
else:
|
||||
logger.debug(
|
||||
f"leaving index 4 of loc array as"
|
||||
f" {ch_dict['loc'][4]} for {ch_dict['ch_name']}"
|
||||
)
|
||||
if "HREF" in raw_extras["rec_info"]:
|
||||
if ch_dict["ch_name"].startswith(("xpos", "ypos")):
|
||||
ch_dict["unit"] = FIFF.FIFF_UNIT_RAD
|
||||
return info
|
||||
|
||||
|
||||
def _make_eyelink_annots(df_dict, create_annots, apply_offsets):
|
||||
"""Create Annotations for each df in raw_extras."""
|
||||
eye_ch_map = {
|
||||
"L": ("xpos_left", "ypos_left", "pupil_left"),
|
||||
"R": ("xpos_right", "ypos_right", "pupil_right"),
|
||||
"both": (
|
||||
"xpos_left",
|
||||
"ypos_left",
|
||||
"pupil_left",
|
||||
"xpos_right",
|
||||
"ypos_right",
|
||||
"pupil_right",
|
||||
),
|
||||
}
|
||||
valid_descs = ["blinks", "saccades", "fixations", "messages"]
|
||||
msg = (
|
||||
"create_annotations must be True or a list containing one or"
|
||||
f" more of {valid_descs}."
|
||||
)
|
||||
wrong_type = msg + f" Got a {type(create_annots)} instead."
|
||||
if create_annots is True:
|
||||
descs = valid_descs
|
||||
else:
|
||||
if not isinstance(create_annots, list):
|
||||
raise TypeError(wrong_type)
|
||||
for desc in create_annots:
|
||||
if desc not in valid_descs:
|
||||
raise ValueError(msg + f" Got '{desc}' instead")
|
||||
descs = create_annots
|
||||
|
||||
annots = None
|
||||
for key, df in df_dict.items():
|
||||
eye_annot_cond = (key in ["blinks", "fixations", "saccades"]) and (key in descs)
|
||||
if eye_annot_cond:
|
||||
onsets = df["time"]
|
||||
durations = df["duration"]
|
||||
# Create annotations for both eyes
|
||||
descriptions = key[:-1] # i.e "blink", "fixation", "saccade"
|
||||
if key == "blinks":
|
||||
descriptions = "BAD_" + descriptions
|
||||
ch_names = df["eye"].map(eye_ch_map).tolist()
|
||||
this_annot = Annotations(
|
||||
onset=onsets,
|
||||
duration=durations,
|
||||
description=descriptions,
|
||||
ch_names=ch_names,
|
||||
)
|
||||
elif (key in ["messages"]) and (key in descs):
|
||||
if apply_offsets:
|
||||
# If df['offset] is all NaNs, time is not changed
|
||||
onsets = df["time"] + df["offset"].fillna(0)
|
||||
else:
|
||||
onsets = df["time"]
|
||||
durations = [0] * onsets
|
||||
descriptions = df["event_msg"]
|
||||
this_annot = Annotations(
|
||||
onset=onsets, duration=durations, description=descriptions
|
||||
)
|
||||
else:
|
||||
continue # TODO make df and annotations for Buttons
|
||||
if not annots:
|
||||
annots = this_annot
|
||||
elif annots:
|
||||
annots += this_annot
|
||||
if not annots:
|
||||
warn(f"Annotations for {descs} were requested but none could be made.")
|
||||
return
|
||||
return annots
|
||||
|
||||
|
||||
def _make_gap_annots(raw_extras, key="recording_blocks"):
|
||||
"""Create Annotations for gap periods between recording blocks."""
|
||||
df = raw_extras["dfs"][key]
|
||||
onsets = df["end_time"].iloc[:-1]
|
||||
diffs = df["time"].shift(-1) - df["end_time"]
|
||||
durations = diffs.iloc[:-1]
|
||||
descriptions = ["BAD_ACQ_SKIP"] * len(onsets)
|
||||
return Annotations(onset=onsets, duration=durations, description=descriptions)
|
||||
|
||||
|
||||
# ======================== Used by read_eyelink-calibration ===========================
|
||||
|
||||
|
||||
def _find_recording_start(lines):
|
||||
"""Return the first START line in an SR Research EyeLink ASCII file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
lines: A list of strings, which are The lines in an eyelink ASCII file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
The line that contains the info on the start of the recording.
|
||||
"""
|
||||
for line in lines:
|
||||
if line.startswith("START"):
|
||||
return line
|
||||
raise ValueError("Could not find the start of the recording.")
|
||||
|
||||
|
||||
def _parse_validation_line(line):
|
||||
"""Parse a single line of eyelink validation data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line: A string containing a line of validation data from an eyelink
|
||||
ASCII file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A list of tuples containing the validation data.
|
||||
"""
|
||||
tokens = line.split()
|
||||
xy = tokens[-6].strip("[]").split(",") # e.g. '960, 540'
|
||||
xy_diff = tokens[-2].strip("[]").split(",") # e.g. '-1.5, -2.8'
|
||||
vals = [float(v) for v in [*xy, tokens[-4], *xy_diff]]
|
||||
vals[3] += vals[0] # pos_x + eye_x i.e. 960 + -1.5
|
||||
vals[4] += vals[1] # pos_y + eye_y
|
||||
|
||||
return tuple(vals)
|
||||
|
||||
|
||||
def _parse_calibration(
|
||||
lines, screen_size=None, screen_distance=None, screen_resolution=None
|
||||
):
|
||||
"""Parse the lines in the given list and returns a list of Calibration instances.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
lines: A list of strings, which are The lines in an eyelink ASCII file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
A list containing one or more Calibration instances,
|
||||
one for each calibration that was recorded in the eyelink ASCII file
|
||||
data.
|
||||
"""
|
||||
from ...preprocessing.eyetracking.calibration import Calibration
|
||||
|
||||
regex = re.compile(r"\d+") # for finding numeric characters
|
||||
calibrations = list()
|
||||
rec_start = float(_find_recording_start(lines).split()[1])
|
||||
|
||||
for line_number, line in enumerate(lines):
|
||||
if (
|
||||
"!CAL VALIDATION " in line and "ABORTED" not in line
|
||||
): # Start of a calibration
|
||||
tokens = line.split()
|
||||
model = tokens[4] # e.g. 'HV13'
|
||||
this_eye = tokens[6].lower() # e.g. 'left'
|
||||
timestamp = float(tokens[1])
|
||||
onset = (timestamp - rec_start) / 1000.0 # in seconds
|
||||
avg_error = float(line.split("avg.")[0].split()[-1]) # e.g. 0.3
|
||||
max_error = float(line.split("max")[0].split()[-1]) # e.g. 0.9
|
||||
|
||||
n_points = int(regex.search(model).group()) # e.g. 13
|
||||
n_points *= 2 if "LR" in line else 1 # one point per eye if "LR"
|
||||
# The next n_point lines contain the validation data
|
||||
points = []
|
||||
for validation_index in range(n_points):
|
||||
subline = lines[line_number + validation_index + 1]
|
||||
if "!CAL VALIDATION" in subline:
|
||||
continue # for bino mode, skip the second eye's validation summary
|
||||
subline_eye = subline.split("at")[0].split()[-1].lower() # e.g. 'left'
|
||||
if subline_eye != this_eye:
|
||||
continue # skip the validation lines for the other eye
|
||||
point_info = _parse_validation_line(subline)
|
||||
points.append(point_info)
|
||||
# Convert the list of validation data into a numpy array
|
||||
positions = np.array([point[:2] for point in points])
|
||||
offsets = np.array([point[2] for point in points])
|
||||
gaze = np.array([point[3:] for point in points])
|
||||
# create the Calibration instance
|
||||
calibration = Calibration(
|
||||
onset=onset,
|
||||
model=model,
|
||||
eye=this_eye,
|
||||
avg_error=avg_error,
|
||||
max_error=max_error,
|
||||
positions=positions,
|
||||
offsets=offsets,
|
||||
gaze=gaze,
|
||||
screen_size=screen_size,
|
||||
screen_distance=screen_distance,
|
||||
screen_resolution=screen_resolution,
|
||||
)
|
||||
calibrations.append(calibration)
|
||||
return calibrations
|
||||
133
mne/io/eyelink/eyelink.py
Normal file
133
mne/io/eyelink/eyelink.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""SR Research Eyelink Load Function."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ...utils import (
|
||||
_check_fname,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
)
|
||||
from ..base import BaseRaw
|
||||
from ._utils import _make_eyelink_annots, _make_gap_annots, _parse_eyelink_ascii
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_eyelink(
|
||||
fname,
|
||||
*,
|
||||
create_annotations=True,
|
||||
apply_offsets=False,
|
||||
find_overlaps=False,
|
||||
overlap_threshold=0.05,
|
||||
verbose=None,
|
||||
) -> "RawEyelink":
|
||||
"""Reader for an Eyelink ``.asc`` file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(eyelink_fname)s
|
||||
%(eyelink_create_annotations)s
|
||||
%(eyelink_apply_offsets)s
|
||||
%(eyelink_find_overlaps)s
|
||||
%(eyelink_overlap_threshold)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEyelink
|
||||
A Raw object containing eyetracker data.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attribute and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
It is common for SR Research Eyelink eye trackers to only record data during trials.
|
||||
To avoid frequent data discontinuities and to ensure that the data is continuous
|
||||
so that it can be aligned with EEG and MEG data (if applicable), this reader will
|
||||
preserve the times between recording trials and annotate them with
|
||||
``'BAD_ACQ_SKIP'``.
|
||||
"""
|
||||
fname = _check_fname(fname, overwrite="read", must_exist=True, name="fname")
|
||||
|
||||
raw_eyelink = RawEyelink(
|
||||
fname,
|
||||
create_annotations=create_annotations,
|
||||
apply_offsets=apply_offsets,
|
||||
find_overlaps=find_overlaps,
|
||||
overlap_threshold=overlap_threshold,
|
||||
verbose=verbose,
|
||||
)
|
||||
return raw_eyelink
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawEyelink(BaseRaw):
|
||||
"""Raw object from an XXX file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(eyelink_fname)s
|
||||
%(eyelink_create_annotations)s
|
||||
%(eyelink_apply_offsets)s
|
||||
%(eyelink_find_overlaps)s
|
||||
%(eyelink_overlap_threshold)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attribute and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
fname,
|
||||
*,
|
||||
create_annotations=True,
|
||||
apply_offsets=False,
|
||||
find_overlaps=False,
|
||||
overlap_threshold=0.05,
|
||||
verbose=None,
|
||||
):
|
||||
logger.info(f"Loading {fname}")
|
||||
|
||||
fname = Path(fname)
|
||||
|
||||
# ======================== Parse ASCII file ==========================
|
||||
eye_ch_data, info, raw_extras = _parse_eyelink_ascii(
|
||||
fname, find_overlaps, overlap_threshold, apply_offsets
|
||||
)
|
||||
# ======================== Create Raw Object =========================
|
||||
super().__init__(
|
||||
info,
|
||||
preload=eye_ch_data,
|
||||
filenames=[fname],
|
||||
verbose=verbose,
|
||||
raw_extras=[raw_extras],
|
||||
)
|
||||
self.set_meas_date(self._raw_extras[0]["dt"])
|
||||
|
||||
# ======================== Make Annotations =========================
|
||||
gap_annots = None
|
||||
if self._raw_extras[0]["n_blocks"] > 1:
|
||||
gap_annots = _make_gap_annots(self._raw_extras[0])
|
||||
eye_annots = None
|
||||
if create_annotations:
|
||||
eye_annots = _make_eyelink_annots(
|
||||
self._raw_extras[0]["dfs"], create_annotations, apply_offsets
|
||||
)
|
||||
if gap_annots and eye_annots: # set both
|
||||
self.set_annotations(gap_annots + eye_annots)
|
||||
elif gap_annots:
|
||||
self.set_annotations(gap_annots)
|
||||
elif eye_annots:
|
||||
self.set_annotations(eye_annots)
|
||||
else:
|
||||
logger.info("Not creating any annotations")
|
||||
5
mne/io/fieldtrip/__init__.py
Normal file
5
mne/io/fieldtrip/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .fieldtrip import read_evoked_fieldtrip, read_epochs_fieldtrip, read_raw_fieldtrip
|
||||
185
mne/io/fieldtrip/fieldtrip.py
Normal file
185
mne/io/fieldtrip/fieldtrip.py
Normal file
@@ -0,0 +1,185 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...epochs import EpochsArray
|
||||
from ...evoked import EvokedArray
|
||||
from ...utils import _check_fname, _import_pymatreader_funcs
|
||||
from ..array.array import RawArray
|
||||
from .utils import (
|
||||
_create_event_metadata,
|
||||
_create_events,
|
||||
_create_info,
|
||||
_set_tmin,
|
||||
_validate_ft_struct,
|
||||
)
|
||||
|
||||
|
||||
def read_raw_fieldtrip(fname, info, data_name="data") -> RawArray:
|
||||
"""Load continuous (raw) data from a FieldTrip preprocessing structure.
|
||||
|
||||
This function expects to find single trial raw data (FT_DATATYPE_RAW) in
|
||||
the structure data_name is pointing at.
|
||||
|
||||
.. warning:: FieldTrip does not normally store the original information
|
||||
concerning channel location, orientation, type etc. It is
|
||||
therefore **highly recommended** to provide the info field.
|
||||
This can be obtained by reading the original raw data file
|
||||
with MNE functions (without preload). The returned object
|
||||
contains the necessary info field.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path and filename of the ``.mat`` file containing the data.
|
||||
info : dict or None
|
||||
The info dict of the raw data file corresponding to the data to import.
|
||||
If this is set to None, limited information is extracted from the
|
||||
FieldTrip structure.
|
||||
data_name : str
|
||||
Name of heading dict/variable name under which the data was originally
|
||||
saved in MATLAB.
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawArray
|
||||
A Raw Object containing the loaded data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawArray.
|
||||
"""
|
||||
read_mat = _import_pymatreader_funcs("FieldTrip I/O")
|
||||
fname = _check_fname(fname, overwrite="read", must_exist=True)
|
||||
|
||||
ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name])
|
||||
|
||||
# load data and set ft_struct to the heading dictionary
|
||||
ft_struct = ft_struct[data_name]
|
||||
|
||||
_validate_ft_struct(ft_struct)
|
||||
|
||||
info = _create_info(ft_struct, info) # create info structure
|
||||
data = np.array(ft_struct["trial"]) # create the main data array
|
||||
|
||||
if data.ndim > 2:
|
||||
data = np.squeeze(data)
|
||||
|
||||
if data.ndim == 1:
|
||||
data = data[np.newaxis, ...]
|
||||
|
||||
if data.ndim != 2:
|
||||
raise RuntimeError(
|
||||
"The data you are trying to load does not seem to be raw data"
|
||||
)
|
||||
|
||||
raw = RawArray(data, info) # create an MNE RawArray
|
||||
return raw
|
||||
|
||||
|
||||
def read_epochs_fieldtrip(
|
||||
fname, info, data_name="data", trialinfo_column=0
|
||||
) -> EpochsArray:
|
||||
"""Load epoched data from a FieldTrip preprocessing structure.
|
||||
|
||||
This function expects to find epoched data in the structure data_name is
|
||||
pointing at.
|
||||
|
||||
.. warning:: Only epochs with the same amount of channels and samples are
|
||||
supported!
|
||||
|
||||
.. warning:: FieldTrip does not normally store the original information
|
||||
concerning channel location, orientation, type etc. It is
|
||||
therefore **highly recommended** to provide the info field.
|
||||
This can be obtained by reading the original raw data file
|
||||
with MNE functions (without preload). The returned object
|
||||
contains the necessary info field.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path and filename of the ``.mat`` file containing the data.
|
||||
info : dict or None
|
||||
The info dict of the raw data file corresponding to the data to import.
|
||||
If this is set to None, limited information is extracted from the
|
||||
FieldTrip structure.
|
||||
data_name : str
|
||||
Name of heading dict/ variable name under which the data was originally
|
||||
saved in MATLAB.
|
||||
trialinfo_column : int
|
||||
Column of the trialinfo matrix to use for the event codes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
epochs : instance of EpochsArray
|
||||
An EpochsArray containing the loaded data.
|
||||
"""
|
||||
read_mat = _import_pymatreader_funcs("FieldTrip I/O")
|
||||
ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name])
|
||||
|
||||
# load data and set ft_struct to the heading dictionary
|
||||
ft_struct = ft_struct[data_name]
|
||||
|
||||
_validate_ft_struct(ft_struct)
|
||||
|
||||
info = _create_info(ft_struct, info) # create info structure
|
||||
data = np.array(ft_struct["trial"]) # create the epochs data array
|
||||
events = _create_events(ft_struct, trialinfo_column)
|
||||
if events is not None:
|
||||
metadata = _create_event_metadata(ft_struct)
|
||||
else:
|
||||
metadata = None
|
||||
tmin = _set_tmin(ft_struct) # create start time
|
||||
|
||||
epochs = EpochsArray(
|
||||
data=data, info=info, tmin=tmin, events=events, metadata=metadata, proj=False
|
||||
)
|
||||
return epochs
|
||||
|
||||
|
||||
def read_evoked_fieldtrip(fname, info, comment=None, data_name="data"):
|
||||
"""Load evoked data from a FieldTrip timelocked structure.
|
||||
|
||||
This function expects to find timelocked data in the structure data_name is
|
||||
pointing at.
|
||||
|
||||
.. warning:: FieldTrip does not normally store the original information
|
||||
concerning channel location, orientation, type etc. It is
|
||||
therefore **highly recommended** to provide the info field.
|
||||
This can be obtained by reading the original raw data file
|
||||
with MNE functions (without preload). The returned object
|
||||
contains the necessary info field.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path and filename of the ``.mat`` file containing the data.
|
||||
info : dict or None
|
||||
The info dict of the raw data file corresponding to the data to import.
|
||||
If this is set to None, limited information is extracted from the
|
||||
FieldTrip structure.
|
||||
comment : str
|
||||
Comment on dataset. Can be the condition.
|
||||
data_name : str
|
||||
Name of heading dict/ variable name under which the data was originally
|
||||
saved in MATLAB.
|
||||
|
||||
Returns
|
||||
-------
|
||||
evoked : instance of EvokedArray
|
||||
An EvokedArray containing the loaded data.
|
||||
"""
|
||||
read_mat = _import_pymatreader_funcs("FieldTrip I/O")
|
||||
ft_struct = read_mat(fname, ignore_fields=["previous"], variable_names=[data_name])
|
||||
ft_struct = ft_struct[data_name]
|
||||
|
||||
_validate_ft_struct(ft_struct)
|
||||
|
||||
info = _create_info(ft_struct, info) # create info structure
|
||||
data_evoked = ft_struct["avg"] # create evoked data
|
||||
|
||||
evoked = EvokedArray(data_evoked, info, comment=comment)
|
||||
return evoked
|
||||
367
mne/io/fieldtrip/utils.py
Normal file
367
mne/io/fieldtrip/utils.py
Normal file
@@ -0,0 +1,367 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import DigPoint, _ensure_fiducials_head
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.pick import pick_info
|
||||
from ...transforms import rotation3d_align_z_axis
|
||||
from ...utils import _check_pandas_installed, warn
|
||||
|
||||
_supported_megs = ["neuromag306"]
|
||||
|
||||
_unit_dict = {
|
||||
"m": 1,
|
||||
"cm": 1e-2,
|
||||
"mm": 1e-3,
|
||||
"V": 1,
|
||||
"mV": 1e-3,
|
||||
"uV": 1e-6,
|
||||
"T": 1,
|
||||
"T/m": 1,
|
||||
"T/cm": 1e2,
|
||||
}
|
||||
|
||||
NOINFO_WARNING = (
|
||||
"Importing FieldTrip data without an info dict from the "
|
||||
"original file. Channel locations, orientations and types "
|
||||
"will be incorrect. The imported data cannot be used for "
|
||||
"source analysis, channel interpolation etc."
|
||||
)
|
||||
|
||||
|
||||
def _validate_ft_struct(ft_struct):
|
||||
"""Run validation checks on the ft_structure."""
|
||||
if isinstance(ft_struct, list):
|
||||
raise RuntimeError("Loading of data in cell arrays is not supported")
|
||||
|
||||
|
||||
def _create_info(ft_struct, raw_info):
|
||||
"""Create MNE info structure from a FieldTrip structure."""
|
||||
if raw_info is None:
|
||||
warn(NOINFO_WARNING)
|
||||
|
||||
sfreq = _set_sfreq(ft_struct)
|
||||
ch_names = ft_struct["label"]
|
||||
if raw_info:
|
||||
info = raw_info.copy()
|
||||
missing_channels = set(ch_names) - set(info["ch_names"])
|
||||
if missing_channels:
|
||||
warn(
|
||||
"The following channels are present in the FieldTrip data "
|
||||
f"but cannot be found in the provided info: {missing_channels}.\n"
|
||||
"These channels will be removed from the resulting data!"
|
||||
)
|
||||
|
||||
missing_chan_idx = [ch_names.index(ch) for ch in missing_channels]
|
||||
new_chs = [ch for ch in ch_names if ch not in missing_channels]
|
||||
ch_names = new_chs
|
||||
ft_struct["label"] = ch_names
|
||||
|
||||
if "trial" in ft_struct:
|
||||
ft_struct["trial"] = _remove_missing_channels_from_trial(
|
||||
ft_struct["trial"], missing_chan_idx
|
||||
)
|
||||
|
||||
if "avg" in ft_struct:
|
||||
if ft_struct["avg"].ndim == 2:
|
||||
ft_struct["avg"] = np.delete(
|
||||
ft_struct["avg"], missing_chan_idx, axis=0
|
||||
)
|
||||
|
||||
with info._unlock():
|
||||
info["sfreq"] = sfreq
|
||||
ch_idx = [info["ch_names"].index(ch) for ch in ch_names]
|
||||
pick_info(info, ch_idx, copy=False)
|
||||
else:
|
||||
info = create_info(ch_names, sfreq)
|
||||
chs, dig = _create_info_chs_dig(ft_struct)
|
||||
with info._unlock(update_redundant=True):
|
||||
info.update(chs=chs, dig=dig)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
def _remove_missing_channels_from_trial(trial, missing_chan_idx):
|
||||
if isinstance(trial, list):
|
||||
for idx_trial in range(len(trial)):
|
||||
trial[idx_trial] = _remove_missing_channels_from_trial(
|
||||
trial[idx_trial], missing_chan_idx
|
||||
)
|
||||
elif isinstance(trial, np.ndarray):
|
||||
if trial.ndim == 2:
|
||||
trial = np.delete(trial, missing_chan_idx, axis=0)
|
||||
else:
|
||||
raise ValueError(
|
||||
'"trial" field of the FieldTrip structure has an unknown format.'
|
||||
)
|
||||
|
||||
return trial
|
||||
|
||||
|
||||
def _create_info_chs_dig(ft_struct):
|
||||
"""Create the chs info field from the FieldTrip structure."""
|
||||
all_channels = ft_struct["label"]
|
||||
ch_defaults = dict(
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
cal=1.0,
|
||||
range=1.0,
|
||||
unit_mul=FIFF.FIFF_UNITM_NONE,
|
||||
loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]),
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
)
|
||||
try:
|
||||
elec = ft_struct["elec"]
|
||||
except KeyError:
|
||||
elec = None
|
||||
|
||||
try:
|
||||
grad = ft_struct["grad"]
|
||||
except KeyError:
|
||||
grad = None
|
||||
|
||||
if elec is None and grad is None:
|
||||
warn(
|
||||
"The supplied FieldTrip structure does not have an elec or grad "
|
||||
"field. No channel locations will extracted and the kind of "
|
||||
"channel might be inaccurate."
|
||||
)
|
||||
if "chanpos" not in (elec or grad or {"chanpos": None}):
|
||||
raise RuntimeError(
|
||||
"This file was created with an old version of FieldTrip. You can "
|
||||
"convert the data to the new version by loading it into FieldTrip "
|
||||
"and applying ft_selectdata with an empty cfg structure on it. "
|
||||
"Otherwise you can supply the Info field."
|
||||
)
|
||||
|
||||
chs = list()
|
||||
dig = list()
|
||||
counter = 0
|
||||
for idx_chan, cur_channel_label in enumerate(all_channels):
|
||||
cur_ch = ch_defaults.copy()
|
||||
cur_ch["ch_name"] = cur_channel_label
|
||||
cur_ch["logno"] = idx_chan + 1
|
||||
cur_ch["scanno"] = idx_chan + 1
|
||||
if elec and cur_channel_label in elec["label"]:
|
||||
cur_ch = _process_channel_eeg(cur_ch, elec)
|
||||
assert cur_ch["coord_frame"] == FIFF.FIFFV_COORD_HEAD
|
||||
# Ref gets ident=0 and we don't have it, so start at 1
|
||||
counter += 1
|
||||
d = DigPoint(
|
||||
r=cur_ch["loc"][:3],
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
kind=FIFF.FIFFV_POINT_EEG,
|
||||
ident=counter,
|
||||
)
|
||||
dig.append(d)
|
||||
elif grad and cur_channel_label in grad["label"]:
|
||||
cur_ch = _process_channel_meg(cur_ch, grad)
|
||||
else:
|
||||
if cur_channel_label.startswith("EOG"):
|
||||
cur_ch["kind"] = FIFF.FIFFV_EOG_CH
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG
|
||||
elif cur_channel_label.startswith("ECG"):
|
||||
cur_ch["kind"] = FIFF.FIFFV_ECG_CH
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR
|
||||
elif cur_channel_label.startswith("STI"):
|
||||
cur_ch["kind"] = FIFF.FIFFV_STIM_CH
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
else:
|
||||
warn(
|
||||
f"Cannot guess the correct type of channel {cur_channel_label}. "
|
||||
"Making it a MISC channel."
|
||||
)
|
||||
cur_ch["kind"] = FIFF.FIFFV_MISC_CH
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
|
||||
chs.append(cur_ch)
|
||||
_ensure_fiducials_head(dig)
|
||||
|
||||
return chs, dig
|
||||
|
||||
|
||||
def _set_sfreq(ft_struct):
|
||||
"""Set the sample frequency."""
|
||||
try:
|
||||
sfreq = ft_struct["fsample"]
|
||||
except KeyError:
|
||||
try:
|
||||
time = ft_struct["time"]
|
||||
except KeyError:
|
||||
raise ValueError("No Source for sfreq found")
|
||||
else:
|
||||
t1, t2 = float(time[0]), float(time[1])
|
||||
sfreq = 1 / (t2 - t1)
|
||||
try:
|
||||
sfreq = float(sfreq)
|
||||
except TypeError:
|
||||
warn(
|
||||
"FieldTrip structure contained multiple sample rates, trying the "
|
||||
f"first of:\n{sfreq} Hz"
|
||||
)
|
||||
sfreq = float(sfreq.ravel()[0])
|
||||
return sfreq
|
||||
|
||||
|
||||
def _set_tmin(ft_struct):
|
||||
"""Set the start time before the event in evoked data if possible."""
|
||||
times = ft_struct["time"]
|
||||
time_check = all(times[i][0] == times[i - 1][0] for i, x in enumerate(times))
|
||||
if time_check:
|
||||
tmin = times[0][0]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Loading data with non-uniform times per epoch is not supported"
|
||||
)
|
||||
return tmin
|
||||
|
||||
|
||||
def _create_events(ft_struct, trialinfo_column):
|
||||
"""Create an event matrix from the FieldTrip structure."""
|
||||
if "trialinfo" not in ft_struct:
|
||||
return None
|
||||
|
||||
event_type = ft_struct["trialinfo"]
|
||||
event_number = range(len(event_type))
|
||||
|
||||
if trialinfo_column < 0:
|
||||
raise ValueError("trialinfo_column must be positive")
|
||||
|
||||
available_ti_cols = 1
|
||||
if event_type.ndim == 2:
|
||||
available_ti_cols = event_type.shape[1]
|
||||
|
||||
if trialinfo_column > (available_ti_cols - 1):
|
||||
raise ValueError(
|
||||
"trialinfo_column is higher than the amount of columns in trialinfo."
|
||||
)
|
||||
|
||||
event_trans_val = np.zeros(len(event_type))
|
||||
|
||||
if event_type.ndim == 2:
|
||||
event_type = event_type[:, trialinfo_column]
|
||||
|
||||
events = (
|
||||
np.vstack([np.array(event_number), event_trans_val, event_type]).astype("int").T
|
||||
)
|
||||
|
||||
return events
|
||||
|
||||
|
||||
def _create_event_metadata(ft_struct):
|
||||
"""Create event metadata from trialinfo."""
|
||||
pandas = _check_pandas_installed(strict=False)
|
||||
if not pandas:
|
||||
warn(
|
||||
"The Pandas library is not installed. Not returning the original "
|
||||
"trialinfo matrix as metadata."
|
||||
)
|
||||
return None
|
||||
|
||||
metadata = pandas.DataFrame(ft_struct["trialinfo"])
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def _process_channel_eeg(cur_ch, elec):
|
||||
"""Convert EEG channel from FieldTrip to MNE.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cur_ch: dict
|
||||
Channel specific dictionary to populate.
|
||||
|
||||
elec: dict
|
||||
elec dict as loaded from the FieldTrip structure
|
||||
|
||||
Returns
|
||||
-------
|
||||
cur_ch: dict
|
||||
The original dict (cur_ch) with the added information
|
||||
"""
|
||||
all_labels = np.asanyarray(elec["label"])
|
||||
chan_idx_in_elec = np.where(all_labels == cur_ch["ch_name"])[0][0]
|
||||
position = np.squeeze(elec["chanpos"][chan_idx_in_elec, :])
|
||||
# chanunit = elec['chanunit'][chan_idx_in_elec] # not used/needed yet
|
||||
position_unit = elec["unit"]
|
||||
|
||||
position = position * _unit_dict[position_unit]
|
||||
cur_ch["loc"] = np.hstack((position, np.zeros((9,))))
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_V
|
||||
cur_ch["kind"] = FIFF.FIFFV_EEG_CH
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_EEG
|
||||
cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
|
||||
return cur_ch
|
||||
|
||||
|
||||
def _process_channel_meg(cur_ch, grad):
|
||||
"""Convert MEG channel from FieldTrip to MNE.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cur_ch: dict
|
||||
Channel specific dictionary to populate.
|
||||
|
||||
grad: dict
|
||||
grad dict as loaded from the FieldTrip structure
|
||||
|
||||
Returns
|
||||
-------
|
||||
dict: The original dict (cur_ch) with the added information
|
||||
"""
|
||||
all_labels = np.asanyarray(grad["label"])
|
||||
chan_idx_in_grad = np.where(all_labels == cur_ch["ch_name"])[0][0]
|
||||
gradtype = grad["type"]
|
||||
chantype = grad["chantype"][chan_idx_in_grad]
|
||||
position_unit = grad["unit"]
|
||||
position = np.squeeze(grad["chanpos"][chan_idx_in_grad, :])
|
||||
position = position * _unit_dict[position_unit]
|
||||
|
||||
if gradtype == "neuromag306" and "tra" in grad and "coilpos" in grad:
|
||||
# Try to regenerate original channel pos.
|
||||
idx_in_coilpos = np.where(grad["tra"][chan_idx_in_grad, :] != 0)[0]
|
||||
cur_coilpos = grad["coilpos"][idx_in_coilpos, :]
|
||||
cur_coilpos = cur_coilpos * _unit_dict[position_unit]
|
||||
cur_coilori = grad["coilori"][idx_in_coilpos, :]
|
||||
if chantype == "megmag":
|
||||
position = cur_coilpos[0] - 0.0003 * cur_coilori[0]
|
||||
if chantype == "megplanar":
|
||||
tmp_pos = cur_coilpos - 0.0003 * cur_coilori
|
||||
position = np.average(tmp_pos, axis=0)
|
||||
|
||||
original_orientation = np.squeeze(grad["chanori"][chan_idx_in_grad, :])
|
||||
try:
|
||||
orientation = rotation3d_align_z_axis(original_orientation).T
|
||||
except AssertionError:
|
||||
orientation = np.eye(3)
|
||||
assert orientation.shape == (3, 3)
|
||||
orientation = orientation.flatten()
|
||||
# chanunit = grad['chanunit'][chan_idx_in_grad] # not used/needed yet
|
||||
|
||||
cur_ch["loc"] = np.hstack((position, orientation))
|
||||
cur_ch["kind"] = FIFF.FIFFV_MEG_CH
|
||||
if chantype == "megmag":
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_T
|
||||
elif chantype == "megplanar":
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_VV_PLANAR_T1
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_T_M
|
||||
elif chantype == "refmag":
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_MAG
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_T
|
||||
elif chantype == "refgrad":
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_T
|
||||
elif chantype == "meggrad":
|
||||
cur_ch["coil_type"] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM
|
||||
cur_ch["unit"] = FIFF.FIFF_UNIT_T
|
||||
else:
|
||||
raise RuntimeError(f"Unexpected coil type: {chantype}.")
|
||||
|
||||
cur_ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
|
||||
return cur_ch
|
||||
10
mne/io/fiff/__init__.py
Normal file
10
mne/io/fiff/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""FIF raw data reader."""
|
||||
|
||||
from .raw import Raw
|
||||
from .raw import read_raw_fif
|
||||
|
||||
RawFIF = Raw
|
||||
562
mne/io/fiff/raw.py
Normal file
562
mne/io/fiff/raw.py
Normal file
@@ -0,0 +1,562 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import copy
|
||||
import os.path as op
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import read_meas_info
|
||||
from ..._fiff.open import _fiff_get_fid, _get_next_fname, fiff_open
|
||||
from ..._fiff.tag import _call_dict, read_tag
|
||||
from ..._fiff.tree import dir_tree_find
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...annotations import Annotations, _read_annotations_fif
|
||||
from ...channels import fix_mag_coil_types
|
||||
from ...event import AcqParserFIF
|
||||
from ...utils import (
|
||||
_check_fname,
|
||||
_file_like,
|
||||
_on_missing,
|
||||
check_fname,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
warn,
|
||||
)
|
||||
from ..base import (
|
||||
BaseRaw,
|
||||
_check_maxshield,
|
||||
_check_raw_compatibility,
|
||||
_get_fname_rep,
|
||||
_RawShell,
|
||||
)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class Raw(BaseRaw):
|
||||
"""Raw data in FIF format.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like | file-like
|
||||
The raw filename to load. For files that have automatically been split,
|
||||
the split part will be automatically loaded. Filenames not ending with
|
||||
``raw.fif``, ``raw_sss.fif``, ``raw_tsss.fif``, ``_meg.fif``,
|
||||
``_eeg.fif``, or ``_ieeg.fif`` (with or without an optional additional
|
||||
``.gz`` extension) will generate a warning. If a file-like object is
|
||||
provided, preloading must be used.
|
||||
|
||||
.. versionchanged:: 0.18
|
||||
Support for file-like objects.
|
||||
allow_maxshield : bool | str (default False)
|
||||
If True, allow loading of data that has been recorded with internal
|
||||
active compensation (MaxShield). Data recorded with MaxShield should
|
||||
generally not be loaded directly, but should first be processed using
|
||||
SSS/tSSS to remove the compensation signals that may also affect brain
|
||||
activity. Can also be "yes" to load without eliciting a warning.
|
||||
%(preload)s
|
||||
%(on_split_missing)s
|
||||
%(verbose)s
|
||||
|
||||
Attributes
|
||||
----------
|
||||
%(info_not_none)s
|
||||
ch_names : list of string
|
||||
List of channels' names.
|
||||
n_times : int
|
||||
Total number of time points in the raw file.
|
||||
times : ndarray
|
||||
Time vector in seconds. Starts from 0, independently of `first_samp`
|
||||
value. Time interval between consecutive time samples is equal to the
|
||||
inverse of the sampling frequency.
|
||||
duration : float
|
||||
The duration of the raw file in seconds.
|
||||
|
||||
.. versionadded:: 1.9
|
||||
preload : bool
|
||||
Indicates whether raw data are in memory.
|
||||
"""
|
||||
|
||||
_extra_attributes = (
|
||||
"fix_mag_coil_types",
|
||||
"acqparser",
|
||||
"_read_raw_file", # this would be ugly to move, but maybe we should
|
||||
)
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
fname,
|
||||
allow_maxshield=False,
|
||||
preload=False,
|
||||
on_split_missing="raise",
|
||||
verbose=None,
|
||||
):
|
||||
raws = []
|
||||
do_check_ext = not _file_like(fname)
|
||||
next_fname = fname
|
||||
while next_fname is not None:
|
||||
raw, next_fname, buffer_size_sec = self._read_raw_file(
|
||||
next_fname, allow_maxshield, preload, do_check_ext
|
||||
)
|
||||
do_check_ext = False
|
||||
raws.append(raw)
|
||||
if next_fname is not None:
|
||||
if not op.exists(next_fname):
|
||||
msg = (
|
||||
f"Split raw file detected but next file {next_fname} "
|
||||
"does not exist. Ensure all files were transferred "
|
||||
"properly and that split and original files were not "
|
||||
"manually renamed on disk (split files should be "
|
||||
"renamed by loading and re-saving with MNE-Python to "
|
||||
"preserve proper filename linkage)."
|
||||
)
|
||||
_on_missing(on_split_missing, msg, name="on_split_missing")
|
||||
break
|
||||
# If using a file-like object, we need to be careful about serialization and
|
||||
# types.
|
||||
#
|
||||
# 1. We must change both the variable named "fname" here so that _get_argvalues
|
||||
# (magic) does not store the file-like object.
|
||||
# 2. We need to ensure "filenames" passed to the constructor below gets a list
|
||||
# of Path or None.
|
||||
# 3. We need to remove the file-like objects from _raw_extras. This must
|
||||
# be done *after* the super().__init__ call, because the constructor
|
||||
# needs the file-like objects to read the data (which it will do because we
|
||||
# force preloading for file-like objects).
|
||||
|
||||
# Avoid file-like in _get_argvalues (1)
|
||||
fname = _path_from_fname(fname)
|
||||
|
||||
_check_raw_compatibility(raws)
|
||||
super().__init__(
|
||||
copy.deepcopy(raws[0].info),
|
||||
preload=False,
|
||||
first_samps=[r.first_samp for r in raws],
|
||||
last_samps=[r.last_samp for r in raws],
|
||||
# Avoid file-like objects in raw.filenames (2)
|
||||
filenames=[_path_from_fname(r._raw_extras["filename"]) for r in raws],
|
||||
raw_extras=[r._raw_extras for r in raws],
|
||||
orig_format=raws[0].orig_format,
|
||||
dtype=None,
|
||||
buffer_size_sec=buffer_size_sec,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# combine annotations
|
||||
self.set_annotations(raws[0].annotations, emit_warning=False)
|
||||
|
||||
# Add annotations for in-data skips
|
||||
for extra in self._raw_extras:
|
||||
mask = [ent is None for ent in extra["ent"]]
|
||||
start = extra["bounds"][:-1][mask]
|
||||
stop = extra["bounds"][1:][mask] - 1
|
||||
duration = (stop - start + 1.0) / self.info["sfreq"]
|
||||
annot = Annotations(
|
||||
onset=(start / self.info["sfreq"]),
|
||||
duration=duration,
|
||||
description="BAD_ACQ_SKIP",
|
||||
orig_time=self.info["meas_date"],
|
||||
)
|
||||
|
||||
self._annotations += annot
|
||||
|
||||
if preload:
|
||||
self._preload_data(preload)
|
||||
else:
|
||||
self.preload = False
|
||||
# Avoid file-like objects in _raw_extras (3)
|
||||
for extra in self._raw_extras:
|
||||
if not isinstance(extra["filename"], Path):
|
||||
extra["filename"] = None
|
||||
|
||||
@verbose
|
||||
def _read_raw_file(
|
||||
self, fname, allow_maxshield, preload, do_check_ext=True, verbose=None
|
||||
):
|
||||
"""Read in header information from a raw file."""
|
||||
logger.info(f"Opening raw data file {fname}...")
|
||||
|
||||
# Read in the whole file if preload is on and .fif.gz (saves time)
|
||||
if not _file_like(fname):
|
||||
if do_check_ext:
|
||||
endings = (
|
||||
"raw.fif",
|
||||
"raw_sss.fif",
|
||||
"raw_tsss.fif",
|
||||
"_meg.fif",
|
||||
"_eeg.fif",
|
||||
"_ieeg.fif",
|
||||
)
|
||||
endings += tuple([f"{e}.gz" for e in endings])
|
||||
check_fname(fname, "raw", endings)
|
||||
# filename
|
||||
fname = _check_fname(fname, "read", True, "fname")
|
||||
whole_file = preload if fname.suffix == ".gz" else False
|
||||
else:
|
||||
# file-like
|
||||
if not preload:
|
||||
raise ValueError("preload must be used with file-like objects")
|
||||
whole_file = True
|
||||
ff, tree, _ = fiff_open(fname, preload=whole_file)
|
||||
with ff as fid:
|
||||
# Read the measurement info
|
||||
|
||||
info, meas = read_meas_info(fid, tree, clean_bads=True)
|
||||
annotations = _read_annotations_fif(fid, tree)
|
||||
|
||||
# Locate the data of interest
|
||||
raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
|
||||
if len(raw_node) == 0:
|
||||
raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
|
||||
if len(raw_node) == 0:
|
||||
raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA)
|
||||
if len(raw_node) == 0:
|
||||
raise ValueError(f"No raw data in {_get_fname_rep(fname)}")
|
||||
_check_maxshield(allow_maxshield)
|
||||
with info._unlock():
|
||||
info["maxshield"] = True
|
||||
del meas
|
||||
|
||||
if len(raw_node) == 1:
|
||||
raw_node = raw_node[0]
|
||||
|
||||
# Process the directory
|
||||
directory = raw_node["directory"]
|
||||
nent = raw_node["nent"]
|
||||
nchan = int(info["nchan"])
|
||||
first = 0
|
||||
first_samp = 0
|
||||
first_skip = 0
|
||||
|
||||
# Get first sample tag if it is there
|
||||
if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
|
||||
tag = read_tag(fid, directory[first].pos)
|
||||
first_samp = int(tag.data.item())
|
||||
first += 1
|
||||
_check_entry(first, nent)
|
||||
|
||||
# Omit initial skip
|
||||
if directory[first].kind == FIFF.FIFF_DATA_SKIP:
|
||||
# This first skip can be applied only after we know the bufsize
|
||||
tag = read_tag(fid, directory[first].pos)
|
||||
first_skip = int(tag.data.item())
|
||||
first += 1
|
||||
_check_entry(first, nent)
|
||||
|
||||
raw = _RawShell()
|
||||
raw.first_samp = first_samp
|
||||
if info["meas_date"] is None and annotations is not None:
|
||||
# we need to adjust annotations.onset as when there is no meas
|
||||
# date set_annotations considers that the origin of time is the
|
||||
# first available sample (ignores first_samp)
|
||||
annotations.onset -= first_samp / info["sfreq"]
|
||||
raw.set_annotations(annotations)
|
||||
|
||||
# Go through the remaining tags in the directory
|
||||
raw_extras = list()
|
||||
nskip = 0
|
||||
orig_format = None
|
||||
|
||||
_byte_dict = {
|
||||
FIFF.FIFFT_DAU_PACK16: 2,
|
||||
FIFF.FIFFT_SHORT: 2,
|
||||
FIFF.FIFFT_FLOAT: 4,
|
||||
FIFF.FIFFT_DOUBLE: 8,
|
||||
FIFF.FIFFT_INT: 4,
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: 8,
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: 16,
|
||||
}
|
||||
_orig_format_dict = {
|
||||
FIFF.FIFFT_DAU_PACK16: "short",
|
||||
FIFF.FIFFT_SHORT: "short",
|
||||
FIFF.FIFFT_FLOAT: "single",
|
||||
FIFF.FIFFT_DOUBLE: "double",
|
||||
FIFF.FIFFT_INT: "int",
|
||||
FIFF.FIFFT_COMPLEX_FLOAT: "single",
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE: "double",
|
||||
}
|
||||
|
||||
for k in range(first, nent):
|
||||
ent = directory[k]
|
||||
# There can be skips in the data (e.g., if the user unclicked)
|
||||
# an re-clicked the button
|
||||
if ent.kind == FIFF.FIFF_DATA_BUFFER:
|
||||
# Figure out the number of samples in this buffer
|
||||
try:
|
||||
div = _byte_dict[ent.type]
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
f"Cannot handle data buffers of type {ent.type}"
|
||||
) from None
|
||||
nsamp = ent.size // (div * nchan)
|
||||
if orig_format is None:
|
||||
orig_format = _orig_format_dict[ent.type]
|
||||
|
||||
# Do we have an initial skip pending?
|
||||
if first_skip > 0:
|
||||
first_samp += nsamp * first_skip
|
||||
raw.first_samp = first_samp
|
||||
first_skip = 0
|
||||
|
||||
# Do we have a skip pending?
|
||||
if nskip > 0:
|
||||
raw_extras.append(
|
||||
dict(
|
||||
ent=None,
|
||||
first=first_samp,
|
||||
nsamp=nskip * nsamp,
|
||||
last=first_samp + nskip * nsamp - 1,
|
||||
)
|
||||
)
|
||||
first_samp += nskip * nsamp
|
||||
nskip = 0
|
||||
|
||||
# Add a data buffer
|
||||
raw_extras.append(
|
||||
dict(
|
||||
ent=ent,
|
||||
first=first_samp,
|
||||
last=first_samp + nsamp - 1,
|
||||
nsamp=nsamp,
|
||||
)
|
||||
)
|
||||
first_samp += nsamp
|
||||
elif ent.kind == FIFF.FIFF_DATA_SKIP:
|
||||
tag = read_tag(fid, ent.pos)
|
||||
nskip = int(tag.data.item())
|
||||
|
||||
next_fname = _get_next_fname(fid, _path_from_fname(fname), tree)
|
||||
|
||||
# reformat raw_extras to be a dict of list/ndarray rather than
|
||||
# list of dict (faster access)
|
||||
raw_extras = {key: [r[key] for r in raw_extras] for key in raw_extras[0]}
|
||||
for key in raw_extras:
|
||||
if key != "ent": # dict or None
|
||||
raw_extras[key] = np.array(raw_extras[key], int)
|
||||
if not np.array_equal(raw_extras["last"][:-1], raw_extras["first"][1:] - 1):
|
||||
raise RuntimeError("FIF file appears to be broken")
|
||||
bounds = np.cumsum(
|
||||
np.concatenate([raw_extras["first"][:1], raw_extras["nsamp"]])
|
||||
)
|
||||
raw_extras["bounds"] = bounds
|
||||
assert len(raw_extras["bounds"]) == len(raw_extras["ent"]) + 1
|
||||
# store the original buffer size
|
||||
buffer_size_sec = np.median(raw_extras["nsamp"]) / info["sfreq"]
|
||||
del raw_extras["first"]
|
||||
del raw_extras["last"]
|
||||
del raw_extras["nsamp"]
|
||||
raw_extras["filename"] = fname
|
||||
|
||||
raw.last_samp = first_samp - 1
|
||||
raw.orig_format = orig_format
|
||||
|
||||
# Add the calibration factors
|
||||
cals = np.zeros(info["nchan"])
|
||||
for k in range(info["nchan"]):
|
||||
cals[k] = info["chs"][k]["range"] * info["chs"][k]["cal"]
|
||||
|
||||
raw._cals = cals
|
||||
raw._raw_extras = raw_extras
|
||||
logger.info(
|
||||
" Range : %d ... %d = %9.3f ... %9.3f secs",
|
||||
raw.first_samp,
|
||||
raw.last_samp,
|
||||
float(raw.first_samp) / info["sfreq"],
|
||||
float(raw.last_samp) / info["sfreq"],
|
||||
)
|
||||
|
||||
raw.info = info
|
||||
|
||||
logger.info("Ready.")
|
||||
|
||||
return raw, next_fname, buffer_size_sec
|
||||
|
||||
@property
|
||||
def _dtype(self):
|
||||
"""Get the dtype to use to store data from disk."""
|
||||
if self._dtype_ is not None:
|
||||
return self._dtype_
|
||||
dtype = None
|
||||
for raw_extra in self._raw_extras:
|
||||
for ent in raw_extra["ent"]:
|
||||
if ent is not None:
|
||||
if ent.type in (
|
||||
FIFF.FIFFT_COMPLEX_FLOAT,
|
||||
FIFF.FIFFT_COMPLEX_DOUBLE,
|
||||
):
|
||||
dtype = np.complex128
|
||||
else:
|
||||
dtype = np.float64
|
||||
break
|
||||
if dtype is not None:
|
||||
break
|
||||
if dtype is None:
|
||||
raise RuntimeError("bug in reading")
|
||||
self._dtype_ = dtype
|
||||
return dtype
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file."""
|
||||
n_bad = 0
|
||||
with _fiff_get_fid(self._raw_extras[fi]["filename"]) as fid:
|
||||
bounds = self._raw_extras[fi]["bounds"]
|
||||
ents = self._raw_extras[fi]["ent"]
|
||||
nchan = self._raw_extras[fi]["orig_nchan"]
|
||||
use = (stop > bounds[:-1]) & (start < bounds[1:])
|
||||
offset = 0
|
||||
for ei in np.where(use)[0]:
|
||||
first = bounds[ei]
|
||||
last = bounds[ei + 1]
|
||||
nsamp = last - first
|
||||
ent = ents[ei]
|
||||
first_pick = max(start - first, 0)
|
||||
last_pick = min(nsamp, stop - first)
|
||||
picksamp = last_pick - first_pick
|
||||
this_start = offset
|
||||
offset += picksamp
|
||||
this_stop = offset
|
||||
# only read data if it exists
|
||||
if ent is None:
|
||||
continue # just use zeros for gaps
|
||||
# faster to always read full tag, taking advantage of knowing the header
|
||||
# already (cutting out some of read_tag) ...
|
||||
fid.seek(ent.pos + 16, 0)
|
||||
one = _call_dict[ent.type](fid, ent, shape=None, rlims=None)
|
||||
try:
|
||||
one.shape = (nsamp, nchan)
|
||||
except AttributeError: # one is None
|
||||
n_bad += picksamp
|
||||
else:
|
||||
# ... then pick samples we want
|
||||
if first_pick != 0 or last_pick != nsamp:
|
||||
one = one[first_pick:last_pick]
|
||||
_mult_cal_one(
|
||||
data[:, this_start:this_stop],
|
||||
one.T,
|
||||
idx,
|
||||
cals,
|
||||
mult,
|
||||
)
|
||||
if n_bad:
|
||||
warn(
|
||||
f"FIF raw buffer could not be read, acquisition error "
|
||||
f"likely: {n_bad} samples set to zero"
|
||||
)
|
||||
assert offset == stop - start
|
||||
|
||||
def fix_mag_coil_types(self):
|
||||
"""Fix Elekta magnetometer coil types.
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of Raw
|
||||
The raw object. Operates in place.
|
||||
|
||||
Notes
|
||||
-----
|
||||
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
|
||||
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
|
||||
records in the info structure.
|
||||
|
||||
Neuromag Vectorview systems can contain magnetometers with two
|
||||
different coil sizes (3022 and 3023 vs. 3024). The systems
|
||||
incorporating coils of type 3024 were introduced last and are used at
|
||||
the majority of MEG sites. At some sites with 3024 magnetometers,
|
||||
the data files have still defined the magnetometers to be of type
|
||||
3022 to ensure compatibility with older versions of Neuromag software.
|
||||
In the MNE software as well as in the present version of Neuromag
|
||||
software coil type 3024 is fully supported. Therefore, it is now safe
|
||||
to upgrade the data files to use the true coil type.
|
||||
|
||||
.. note:: The effect of the difference between the coil sizes on the
|
||||
current estimates computed by the MNE software is very small.
|
||||
Therefore the use of mne_fix_mag_coil_types is not mandatory.
|
||||
"""
|
||||
fix_mag_coil_types(self.info)
|
||||
return self
|
||||
|
||||
@property
|
||||
def acqparser(self):
|
||||
"""The AcqParserFIF for the measurement info.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.AcqParserFIF
|
||||
"""
|
||||
if getattr(self, "_acqparser", None) is None:
|
||||
self._acqparser = AcqParserFIF(self.info)
|
||||
return self._acqparser
|
||||
|
||||
|
||||
def _check_entry(first, nent):
|
||||
"""Sanity check entries."""
|
||||
if first >= nent:
|
||||
raise OSError("Could not read data, perhaps this is a corrupt file")
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_fif(
|
||||
fname, allow_maxshield=False, preload=False, on_split_missing="raise", verbose=None
|
||||
) -> Raw:
|
||||
"""Reader function for Raw FIF data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like | file-like
|
||||
The raw filename to load. For files that have automatically been split,
|
||||
the split part will be automatically loaded. Filenames should end
|
||||
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif,
|
||||
raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided,
|
||||
preloading must be used.
|
||||
|
||||
.. versionchanged:: 0.18
|
||||
Support for file-like objects.
|
||||
allow_maxshield : bool | str (default False)
|
||||
If True, allow loading of data that has been recorded with internal
|
||||
active compensation (MaxShield). Data recorded with MaxShield should
|
||||
generally not be loaded directly, but should first be processed using
|
||||
SSS/tSSS to remove the compensation signals that may also affect brain
|
||||
activity. Can also be "yes" to load without eliciting a warning.
|
||||
%(preload)s
|
||||
%(on_split_missing)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of Raw
|
||||
A Raw object containing FIF data.
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.9.0
|
||||
|
||||
When reading a FIF file, note that the first N seconds annotated
|
||||
``BAD_ACQ_SKIP`` are **skipped**. They are removed from ``raw.times`` and
|
||||
``raw.n_times`` parameters but ``raw.first_samp`` and ``raw.first_time``
|
||||
are updated accordingly.
|
||||
"""
|
||||
return Raw(
|
||||
fname=fname,
|
||||
allow_maxshield=allow_maxshield,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
on_split_missing=on_split_missing,
|
||||
)
|
||||
|
||||
|
||||
def _path_from_fname(fname) -> Path | None:
|
||||
if not isinstance(fname, Path):
|
||||
if isinstance(fname, str):
|
||||
fname = Path(fname)
|
||||
else:
|
||||
# Try to get a filename from the file-like object
|
||||
try:
|
||||
fname = Path(fname.name)
|
||||
except Exception:
|
||||
fname = None
|
||||
return fname
|
||||
5
mne/io/fil/__init__.py
Normal file
5
mne/io/fil/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .fil import read_raw_fil
|
||||
336
mne/io/fil/fil.py
Normal file
336
mne/io/fil/fil.py
Normal file
@@ -0,0 +1,336 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import json
|
||||
import pathlib
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _make_dig_points
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _read_segments_file
|
||||
from ..._fiff.write import get_new_file_id
|
||||
from ...transforms import Transform, apply_trans, get_ras_to_neuromag_trans
|
||||
from ...utils import _check_fname, fill_doc, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from .sensors import (
|
||||
_get_plane_vectors,
|
||||
_get_pos_units,
|
||||
_refine_sensor_orientation,
|
||||
_size2units,
|
||||
)
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_fil(
|
||||
binfile, precision="single", preload=False, *, verbose=None
|
||||
) -> "RawFIL":
|
||||
"""Raw object from FIL-OPMEG formatted data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
binfile : path-like
|
||||
Path to the MEG data binary (ending in ``'_meg.bin'``).
|
||||
precision : str, optional
|
||||
How is the data represented? ``'single'`` if 32-bit or ``'double'`` if
|
||||
64-bit (default is single).
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawFIL
|
||||
The raw data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawFIL.
|
||||
"""
|
||||
return RawFIL(binfile, precision=precision, preload=preload)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawFIL(BaseRaw):
|
||||
"""Raw object from FIL-OPMEG formatted data.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
binfile : path-like
|
||||
Path to the MEG data binary (ending in ``'_meg.bin'``).
|
||||
precision : str, optional
|
||||
How is the data represented? ``'single'`` if 32-bit or
|
||||
``'double'`` if 64-bit (default is single).
|
||||
%(preload)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawFIL
|
||||
The raw data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawFIL.
|
||||
"""
|
||||
|
||||
def __init__(self, binfile, precision="single", preload=False):
|
||||
if precision == "single":
|
||||
dt = np.dtype(">f")
|
||||
bps = 4
|
||||
else:
|
||||
dt = np.dtype(">d")
|
||||
bps = 8
|
||||
|
||||
sample_info = dict()
|
||||
sample_info["dt"] = dt
|
||||
sample_info["bps"] = bps
|
||||
|
||||
files = _get_file_names(binfile)
|
||||
|
||||
chans = _from_tsv(files["chans"])
|
||||
nchans = len(chans["name"])
|
||||
nsamples = _determine_nsamples(files["bin"], nchans, precision) - 1
|
||||
sample_info["nsamples"] = nsamples
|
||||
|
||||
raw_extras = list()
|
||||
raw_extras.append(sample_info)
|
||||
|
||||
chans["pos"] = [None] * nchans
|
||||
chans["ori"] = [None] * nchans
|
||||
if files["positions"].is_file():
|
||||
chanpos = _from_tsv(files["positions"])
|
||||
nlocs = len(chanpos["name"])
|
||||
for ii in range(0, nlocs):
|
||||
idx = chans["name"].index(chanpos["name"][ii])
|
||||
tmp = np.array(
|
||||
[chanpos["Px"][ii], chanpos["Py"][ii], chanpos["Pz"][ii]]
|
||||
)
|
||||
chans["pos"][idx] = tmp.astype(np.float64)
|
||||
tmp = np.array(
|
||||
[chanpos["Ox"][ii], chanpos["Oy"][ii], chanpos["Oz"][ii]]
|
||||
)
|
||||
chans["ori"][idx] = tmp.astype(np.float64)
|
||||
else:
|
||||
warn("No sensor position information found.")
|
||||
|
||||
with open(files["meg"]) as fid:
|
||||
meg = json.load(fid)
|
||||
info = _compose_meas_info(meg, chans)
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[files["bin"]],
|
||||
raw_extras=raw_extras,
|
||||
last_samps=[nsamples],
|
||||
orig_format=precision,
|
||||
)
|
||||
|
||||
if files["coordsystem"].is_file():
|
||||
with open(files["coordsystem"]) as fid:
|
||||
csys = json.load(fid)
|
||||
hc = csys["HeadCoilCoordinates"]
|
||||
|
||||
for key in hc:
|
||||
if key.lower() == "lpa":
|
||||
lpa = np.asarray(hc[key])
|
||||
elif key.lower() == "rpa":
|
||||
rpa = np.asarray(hc[key])
|
||||
elif key.lower().startswith("nas"):
|
||||
nas = np.asarray(hc[key])
|
||||
else:
|
||||
warn(f"{key} is not a valid fiducial name!")
|
||||
|
||||
size = np.linalg.norm(nas - rpa)
|
||||
unit, sf = _size2units(size)
|
||||
# TODO: These are not guaranteed to exist and could lead to a
|
||||
# confusing error message, should fix later
|
||||
lpa /= sf
|
||||
rpa /= sf
|
||||
nas /= sf
|
||||
|
||||
t = get_ras_to_neuromag_trans(nas, lpa, rpa)
|
||||
|
||||
# transform fiducial points
|
||||
nas = apply_trans(t, nas)
|
||||
lpa = apply_trans(t, lpa)
|
||||
rpa = apply_trans(t, rpa)
|
||||
|
||||
with self.info._unlock():
|
||||
self.info["dig"] = _make_dig_points(
|
||||
nasion=nas, lpa=lpa, rpa=rpa, coord_frame="meg"
|
||||
)
|
||||
else:
|
||||
warn(
|
||||
"No fiducials found in files, defaulting sensor array to "
|
||||
"FIFFV_COORD_DEVICE, this may cause problems later!"
|
||||
)
|
||||
t = np.eye(4)
|
||||
|
||||
with self.info._unlock():
|
||||
self.info["dev_head_t"] = Transform(
|
||||
FIFF.FIFFV_COORD_DEVICE, FIFF.FIFFV_COORD_HEAD, t
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
si = self._raw_extras[fi]
|
||||
_read_segments_file(
|
||||
self, data, idx, fi, start, stop, cals, mult, dtype=si["dt"]
|
||||
)
|
||||
|
||||
|
||||
def _convert_channel_info(chans):
|
||||
"""Convert the imported _channels.tsv into the chs element of raw.info."""
|
||||
nmeg = nstim = nmisc = nref = 0
|
||||
|
||||
if not all(p is None for p in chans["pos"]):
|
||||
_, sf = _get_pos_units(chans["pos"])
|
||||
|
||||
chs = list()
|
||||
for ii in range(len(chans["name"])):
|
||||
ch = dict(
|
||||
scanno=ii + 1,
|
||||
range=1.0,
|
||||
cal=1.0,
|
||||
loc=np.full(12, np.nan),
|
||||
unit_mul=FIFF.FIFF_UNITM_NONE,
|
||||
ch_name=chans["name"][ii],
|
||||
coil_type=FIFF.FIFFV_COIL_NONE,
|
||||
)
|
||||
chs.append(ch)
|
||||
|
||||
# create the channel information
|
||||
if chans["pos"][ii] is not None:
|
||||
r0 = chans["pos"][ii].copy() / sf # mm to m
|
||||
ez = chans["ori"][ii].copy()
|
||||
ez = ez / np.linalg.norm(ez)
|
||||
ex, ey = _get_plane_vectors(ez)
|
||||
ch["loc"] = np.concatenate([r0, ex, ey, ez])
|
||||
|
||||
if chans["type"][ii] == "MEGMAG":
|
||||
nmeg += 1
|
||||
ch.update(
|
||||
logno=nmeg,
|
||||
coord_frame=FIFF.FIFFV_COORD_DEVICE,
|
||||
kind=FIFF.FIFFV_MEG_CH,
|
||||
unit=FIFF.FIFF_UNIT_T,
|
||||
coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2,
|
||||
)
|
||||
elif chans["type"][ii] == "MEGREFMAG":
|
||||
nref += 1
|
||||
ch.update(
|
||||
logno=nref,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_REF_MEG_CH,
|
||||
unit=FIFF.FIFF_UNIT_T,
|
||||
coil_type=FIFF.FIFFV_COIL_QUSPIN_ZFOPM_MAG2,
|
||||
)
|
||||
elif chans["type"][ii] == "TRIG":
|
||||
nstim += 1
|
||||
ch.update(
|
||||
logno=nstim,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_STIM_CH,
|
||||
unit=FIFF.FIFF_UNIT_V,
|
||||
)
|
||||
else:
|
||||
nmisc += 1
|
||||
ch.update(
|
||||
logno=nmisc,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
kind=FIFF.FIFFV_MISC_CH,
|
||||
unit=FIFF.FIFF_UNIT_NONE,
|
||||
)
|
||||
|
||||
# set the calibration based on the units - MNE expects T units for meg
|
||||
# and V for eeg
|
||||
if chans["units"][ii] == "fT":
|
||||
ch.update(cal=1e-15)
|
||||
elif chans["units"][ii] == "pT":
|
||||
ch.update(cal=1e-12)
|
||||
elif chans["units"][ii] == "nT":
|
||||
ch.update(cal=1e-9)
|
||||
elif chans["units"][ii] == "mV":
|
||||
ch.update(cal=1e3)
|
||||
elif chans["units"][ii] == "uV":
|
||||
ch.update(cal=1e6)
|
||||
|
||||
return chs
|
||||
|
||||
|
||||
def _compose_meas_info(meg, chans):
|
||||
"""Create info structure."""
|
||||
info = _empty_info(meg["SamplingFrequency"])
|
||||
# Collect all the necessary data from the structures read
|
||||
info["meas_id"] = get_new_file_id()
|
||||
tmp = _convert_channel_info(chans)
|
||||
info["chs"] = _refine_sensor_orientation(tmp)
|
||||
info["line_freq"] = meg["PowerLineFrequency"]
|
||||
info._update_redundant()
|
||||
info["bads"] = _read_bad_channels(chans)
|
||||
info._unlocked = False
|
||||
return info
|
||||
|
||||
|
||||
def _determine_nsamples(bin_fname, nchans, precision):
|
||||
"""Identify how many temporal samples in a dataset."""
|
||||
bsize = bin_fname.stat().st_size
|
||||
if precision == "single":
|
||||
bps = 4
|
||||
else:
|
||||
bps = 8
|
||||
nsamples = int(bsize / (nchans * bps))
|
||||
return nsamples
|
||||
|
||||
|
||||
def _read_bad_channels(chans):
|
||||
"""Check _channels.tsv file to look for premarked bad channels."""
|
||||
bads = list()
|
||||
for ii in range(0, len(chans["status"])):
|
||||
if chans["status"][ii] == "bad":
|
||||
bads.append(chans["name"][ii])
|
||||
return bads
|
||||
|
||||
|
||||
def _from_tsv(fname, dtypes=None):
|
||||
"""Read a tsv file into a dict (which we know is ordered)."""
|
||||
data = np.loadtxt(
|
||||
fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig"
|
||||
)
|
||||
column_names = data[0, :]
|
||||
info = data[1:, :]
|
||||
data_dict = dict()
|
||||
if dtypes is None:
|
||||
dtypes = [str] * info.shape[1]
|
||||
if not isinstance(dtypes, list | tuple):
|
||||
dtypes = [dtypes] * info.shape[1]
|
||||
if not len(dtypes) == info.shape[1]:
|
||||
raise ValueError(
|
||||
f"dtypes length mismatch. Provided: {len(dtypes)}, "
|
||||
f"Expected: {info.shape[1]}"
|
||||
)
|
||||
for i, name in enumerate(column_names):
|
||||
data_dict[name] = info[:, i].astype(dtypes[i]).tolist()
|
||||
return data_dict
|
||||
|
||||
|
||||
def _get_file_names(binfile):
|
||||
"""Guess the filenames based on predicted suffixes."""
|
||||
binfile = pathlib.Path(
|
||||
_check_fname(binfile, overwrite="read", must_exist=True, name="fname")
|
||||
)
|
||||
if not (binfile.suffix == ".bin" and binfile.stem.endswith("_meg")):
|
||||
raise ValueError(f"File must be a filename ending in _meg.bin, got {binfile}")
|
||||
files = dict()
|
||||
dir_ = binfile.parent
|
||||
root = binfile.stem[:-4] # no _meg
|
||||
files["bin"] = dir_ / (root + "_meg.bin")
|
||||
files["meg"] = dir_ / (root + "_meg.json")
|
||||
files["chans"] = dir_ / (root + "_channels.tsv")
|
||||
files["positions"] = dir_ / (root + "_positions.tsv")
|
||||
files["coordsystem"] = dir_ / (root + "_coordsystem.json")
|
||||
return files
|
||||
145
mne/io/fil/sensors.py
Normal file
145
mne/io/fil/sensors.py
Normal file
@@ -0,0 +1,145 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import logger
|
||||
|
||||
|
||||
def _refine_sensor_orientation(chanin):
|
||||
"""Improve orientation matrices based on multiaxis measures.
|
||||
|
||||
The ex and ey elements from _convert_channel_info were oriented not
|
||||
based on the physical orientation of the sensor.
|
||||
It doesn't have to be this way, we can use (if available) the orientation
|
||||
information from mulit-axis recordings to refine these elements.
|
||||
"""
|
||||
logger.info("Refining sensor orientations...")
|
||||
chanout = deepcopy(chanin)
|
||||
tmpname = list()
|
||||
for ii in range(len(chanin)):
|
||||
tmpname.append(chanin[ii]["ch_name"])
|
||||
|
||||
for ii in range(len(chanin)):
|
||||
tmploc = deepcopy(chanin[ii]["loc"])
|
||||
tmploc = tmploc.reshape(3, 4, order="F")
|
||||
if np.isnan(tmploc.sum()) is False:
|
||||
target, flipFlag = _guess_other_chan_axis(tmpname, ii)
|
||||
if np.isnan(target) is False:
|
||||
targetloc = deepcopy(chanin[target]["loc"])
|
||||
if np.isnan(targetloc.sum()) is False:
|
||||
targetloc = targetloc.reshape(3, 4, order="F")
|
||||
tmploc[:, 2] = targetloc[:, 3]
|
||||
tmploc[:, 1] = flipFlag * np.cross(tmploc[:, 2], tmploc[:, 3])
|
||||
chanout[ii]["loc"] = tmploc.reshape(12, order="F")
|
||||
logger.info("[done]")
|
||||
return chanout
|
||||
|
||||
|
||||
def _guess_other_chan_axis(tmpname, seedID):
|
||||
"""Try to guess the name of another axis of a multiaxis sensor."""
|
||||
# see if its using the old RAD/TAN convention first, otherwise use XYZ
|
||||
if tmpname[seedID][-3:] == "RAD":
|
||||
prefix1 = "RAD"
|
||||
prefix2 = "TAN"
|
||||
flipflag = 1.0
|
||||
elif tmpname[seedID][-3:] == "TAN":
|
||||
prefix1 = "TAN"
|
||||
prefix2 = "RAD"
|
||||
flipflag = -1.0
|
||||
elif tmpname[seedID][-1:] == "Z" or tmpname[seedID][-3:] == "[Z]":
|
||||
prefix1 = "Z"
|
||||
prefix2 = "Y"
|
||||
flipflag = -1.0
|
||||
elif tmpname[seedID][-1:] == "Y" or tmpname[seedID][-3:] == "[Y]":
|
||||
prefix1 = "Y"
|
||||
prefix2 = "Z"
|
||||
flipflag = 1.0
|
||||
elif tmpname[seedID][-1:] == "X" or tmpname[seedID][-3:] == "[X]":
|
||||
prefix1 = "X"
|
||||
prefix2 = "Y"
|
||||
flipflag = 1.0
|
||||
else:
|
||||
prefix1 = "?"
|
||||
prefix2 = "?"
|
||||
flipflag = 1.0
|
||||
|
||||
target_name = tmpname[seedID][: -len(prefix1)] + prefix2
|
||||
|
||||
target_id = np.where([t == target_name for t in tmpname])[0]
|
||||
target_id = target_id[0] if len(target_id) else np.nan
|
||||
|
||||
return target_id, flipflag
|
||||
|
||||
|
||||
def _get_pos_units(pos):
|
||||
"""Get the units of a point cloud.
|
||||
|
||||
Determines the units a point cloud of sensor positions, provides the
|
||||
scale factor required to ensure the units can be converted to meters.
|
||||
"""
|
||||
# get rid of None elements
|
||||
nppos = np.empty((0, 3))
|
||||
for ii in range(0, len(pos)):
|
||||
if pos[ii] is not None and sum(np.isnan(pos[ii])) == 0:
|
||||
nppos = np.vstack((nppos, pos[ii]))
|
||||
|
||||
idrange = np.empty(shape=(0, 3))
|
||||
for ii in range(0, 3):
|
||||
q90, q10 = np.percentile(nppos[:, ii], [90, 10])
|
||||
idrange = np.append(idrange, q90 - q10)
|
||||
|
||||
size = np.linalg.norm(idrange)
|
||||
|
||||
unit, sf = _size2units(size)
|
||||
|
||||
return unit, sf
|
||||
|
||||
|
||||
def _size2units(size):
|
||||
"""Convert the size returned from _get_pos_units into a physical unit."""
|
||||
if size >= 0.050 and size < 0.500:
|
||||
unit = "m"
|
||||
sf = 1
|
||||
elif size >= 0.50 and size < 5:
|
||||
unit = "dm"
|
||||
sf = 10
|
||||
elif size >= 5 and size < 50:
|
||||
unit = "cm"
|
||||
sf = 100
|
||||
elif size >= 50 and size < 500:
|
||||
unit = "mm"
|
||||
sf = 1000
|
||||
else:
|
||||
unit = "unknown"
|
||||
sf = 1
|
||||
|
||||
return unit, sf
|
||||
|
||||
|
||||
def _get_plane_vectors(ez):
|
||||
"""Get two orthogonal vectors orthogonal to ez (ez will be modified).
|
||||
|
||||
Note: the ex and ey positions will not be realistic, this can be fixed
|
||||
using _refine_sensor_orientation.
|
||||
"""
|
||||
assert ez.shape == (3,)
|
||||
ez_len = np.sqrt(np.sum(ez * ez))
|
||||
if ez_len == 0:
|
||||
raise RuntimeError("Zero length normal. Cannot proceed.")
|
||||
if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction
|
||||
ex = np.array([1.0, 0.0, 0.0])
|
||||
else:
|
||||
ex = np.zeros(3)
|
||||
if ez[1] < ez[2]:
|
||||
ex[0 if ez[0] < ez[1] else 1] = 1.0
|
||||
else:
|
||||
ex[0 if ez[0] < ez[2] else 2] = 1.0
|
||||
ez /= ez_len
|
||||
ex -= np.dot(ez, ex) * ez
|
||||
ex /= np.sqrt(np.sum(ex * ex))
|
||||
ey = np.cross(ez, ex)
|
||||
return ex, ey
|
||||
7
mne/io/hitachi/__init__.py
Normal file
7
mne/io/hitachi/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""fNIRS module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .hitachi import read_raw_hitachi
|
||||
342
mne/io/hitachi/hitachi.py
Normal file
342
mne/io/hitachi/hitachi.py
Normal file
@@ -0,0 +1,342 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime as dt
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _merge_info, create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...utils import _check_fname, _check_option, fill_doc, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from ..nirx.nirx import _read_csv_rows_cols
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_hitachi(fname, preload=False, verbose=None) -> "RawHitachi":
|
||||
"""Reader for a Hitachi fNIRS recording.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(hitachi_fname)s
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawHitachi
|
||||
A Raw object containing Hitachi data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawHitachi.
|
||||
|
||||
Notes
|
||||
-----
|
||||
%(hitachi_notes)s
|
||||
"""
|
||||
return RawHitachi(fname, preload, verbose=verbose)
|
||||
|
||||
|
||||
def _check_bad(cond, msg):
|
||||
if cond:
|
||||
raise RuntimeError(f"Could not parse file: {msg}")
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawHitachi(BaseRaw):
|
||||
"""Raw object from a Hitachi fNIRS file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(hitachi_fname)s
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
%(hitachi_notes)s
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, *, verbose=None):
|
||||
if not isinstance(fname, list | tuple):
|
||||
fname = [fname]
|
||||
fname = list(fname) # our own list that we can modify
|
||||
for fi, this_fname in enumerate(fname):
|
||||
fname[fi] = _check_fname(this_fname, "read", True, f"fname[{fi}]")
|
||||
infos = list()
|
||||
probes = list()
|
||||
last_samps = list()
|
||||
S_offset = D_offset = 0
|
||||
ignore_names = ["Time"]
|
||||
for this_fname in fname:
|
||||
info, extra, last_samp, offsets = _get_hitachi_info(
|
||||
this_fname, S_offset, D_offset, ignore_names
|
||||
)
|
||||
ignore_names = list(set(ignore_names + info["ch_names"]))
|
||||
S_offset += offsets[0]
|
||||
D_offset += offsets[1]
|
||||
infos.append(info)
|
||||
probes.append(extra)
|
||||
last_samps.append(last_samp)
|
||||
# combine infos
|
||||
if len(fname) > 1:
|
||||
info = _merge_info(infos)
|
||||
else:
|
||||
info = infos[0]
|
||||
if len(set(last_samps)) != 1:
|
||||
raise RuntimeError(
|
||||
"All files must have the same number of samples, got: {last_samps}"
|
||||
)
|
||||
last_samps = [last_samps[0]]
|
||||
raw_extras = [dict(probes=probes)]
|
||||
# One representative filename is good enough here
|
||||
# (additional filenames indicate temporal concat, not ch concat)
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[fname[0]],
|
||||
last_samps=last_samps,
|
||||
raw_extras=raw_extras,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file."""
|
||||
this_data = list()
|
||||
for this_probe in self._raw_extras[fi]["probes"]:
|
||||
this_data.append(
|
||||
_read_csv_rows_cols(
|
||||
this_probe["fname"],
|
||||
start,
|
||||
stop,
|
||||
this_probe["keep_mask"],
|
||||
this_probe["bounds"],
|
||||
sep=",",
|
||||
replace=lambda x: x.replace("\r", "\n")
|
||||
.replace("\n\n", "\n")
|
||||
.replace("\n", ",")
|
||||
.replace(":", ""),
|
||||
).T
|
||||
)
|
||||
this_data = np.concatenate(this_data, axis=0)
|
||||
_mult_cal_one(data, this_data, idx, cals, mult)
|
||||
return data
|
||||
|
||||
|
||||
def _get_hitachi_info(fname, S_offset, D_offset, ignore_names):
|
||||
logger.info(f"Loading {fname}")
|
||||
raw_extra = dict(fname=fname)
|
||||
info_extra = dict()
|
||||
subject_info = dict()
|
||||
ch_wavelengths = dict()
|
||||
fnirs_wavelengths = [None, None]
|
||||
meas_date = age = ch_names = sfreq = None
|
||||
with open(fname, "rb") as fid:
|
||||
lines = fid.read()
|
||||
lines = lines.decode("latin-1").rstrip("\r\n")
|
||||
oldlen = len(lines)
|
||||
assert len(lines) == oldlen
|
||||
bounds = [0]
|
||||
end = "\n" if "\n" in lines else "\r"
|
||||
bounds.extend(a.end() for a in re.finditer(end, lines))
|
||||
bounds.append(len(lines))
|
||||
lines = lines.split(end)
|
||||
assert len(bounds) == len(lines) + 1
|
||||
line = lines[0].rstrip(",\r\n")
|
||||
_check_bad(line != "Header", "no header found")
|
||||
li = 0
|
||||
mode = None
|
||||
for li, line in enumerate(lines[1:], 1):
|
||||
# Newer format has some blank lines
|
||||
if len(line) == 0:
|
||||
continue
|
||||
parts = line.rstrip(",\r\n").split(",")
|
||||
if len(parts) == 0: # some header lines are blank
|
||||
continue
|
||||
kind, parts = parts[0], parts[1:]
|
||||
if len(parts) == 0:
|
||||
parts = [""] # some fields (e.g., Comment) meaningfully blank
|
||||
if kind == "File Version":
|
||||
logger.info(f"Reading Hitachi fNIRS file version {parts[0]}")
|
||||
elif kind == "AnalyzeMode":
|
||||
_check_bad(parts != ["Continuous"], f"not continuous data ({parts})")
|
||||
elif kind == "Sampling Period[s]":
|
||||
sfreq = 1 / float(parts[0])
|
||||
elif kind == "Exception":
|
||||
raise NotImplementedError(kind)
|
||||
elif kind == "Comment":
|
||||
info_extra["description"] = parts[0]
|
||||
elif kind == "ID":
|
||||
subject_info["his_id"] = parts[0]
|
||||
elif kind == "Name":
|
||||
if len(parts):
|
||||
name = parts[0].split(" ")
|
||||
if len(name):
|
||||
subject_info["first_name"] = name[0]
|
||||
subject_info["last_name"] = " ".join(name[1:])
|
||||
elif kind == "Age":
|
||||
age = int(parts[0].rstrip("y"))
|
||||
elif kind == "Mode":
|
||||
mode = parts[0]
|
||||
elif kind in ("HPF[Hz]", "LPF[Hz]"):
|
||||
try:
|
||||
freq = float(parts[0])
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
info_extra[{"HPF[Hz]": "highpass", "LPF[Hz]": "lowpass"}[kind]] = freq
|
||||
elif kind == "Date":
|
||||
# 5/17/04 5:14
|
||||
try:
|
||||
mdy, HM = parts[0].split(" ")
|
||||
H, M = HM.split(":")
|
||||
if len(H) == 1:
|
||||
H = f"0{H}"
|
||||
mdyHM = " ".join([mdy, ":".join([H, M])])
|
||||
for fmt in ("%m/%d/%y %H:%M", "%Y/%m/%d %H:%M"):
|
||||
try:
|
||||
meas_date = dt.datetime.strptime(mdyHM, fmt)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError # unknown format
|
||||
except Exception:
|
||||
warn(
|
||||
"Extraction of measurement date failed. "
|
||||
"Please report this as a github issue. "
|
||||
"The date is being set to January 1st, 2000, "
|
||||
f"instead of {repr(parts[0])}"
|
||||
)
|
||||
elif kind == "Sex":
|
||||
try:
|
||||
subject_info["sex"] = dict(
|
||||
female=FIFF.FIFFV_SUBJ_SEX_FEMALE, male=FIFF.FIFFV_SUBJ_SEX_MALE
|
||||
)[parts[0].lower()]
|
||||
except KeyError:
|
||||
pass
|
||||
elif kind == "Wave[nm]":
|
||||
fnirs_wavelengths[:] = [int(part) for part in parts]
|
||||
elif kind == "Wave Length":
|
||||
ch_regex = re.compile(r"^(.*)\(([0-9\.]+)\)$")
|
||||
for ent in parts:
|
||||
_, v = ch_regex.match(ent).groups()
|
||||
ch_wavelengths[ent] = float(v)
|
||||
elif kind == "Data":
|
||||
break
|
||||
fnirs_wavelengths = np.array(fnirs_wavelengths, int)
|
||||
assert len(fnirs_wavelengths) == 2
|
||||
ch_names = lines[li + 1].rstrip(",\r\n").split(",")
|
||||
# cull to correct ones
|
||||
raw_extra["keep_mask"] = ~np.isin(ch_names, list(ignore_names))
|
||||
for ci, ch_name in enumerate(ch_names):
|
||||
if re.match("Probe[0-9]+", ch_name):
|
||||
raw_extra["keep_mask"][ci] = False
|
||||
# set types
|
||||
ch_names = [
|
||||
ch_name for ci, ch_name in enumerate(ch_names) if raw_extra["keep_mask"][ci]
|
||||
]
|
||||
ch_types = [
|
||||
"fnirs_cw_amplitude" if ch_name.startswith("CH") else "stim"
|
||||
for ch_name in ch_names
|
||||
]
|
||||
# get locations
|
||||
nirs_names = [
|
||||
ch_name
|
||||
for ch_name, ch_type in zip(ch_names, ch_types)
|
||||
if ch_type == "fnirs_cw_amplitude"
|
||||
]
|
||||
n_nirs = len(nirs_names)
|
||||
assert n_nirs % 2 == 0
|
||||
names = {
|
||||
"3x3": "ETG-100",
|
||||
"3x5": "ETG-7000",
|
||||
"4x4": "ETG-7000",
|
||||
"3x11": "ETG-4000",
|
||||
}
|
||||
_check_option("Hitachi mode", mode, sorted(names))
|
||||
n_row, n_col = (int(x) for x in mode.split("x"))
|
||||
logger.info(f"Constructing pairing matrix for {names[mode]} ({mode})")
|
||||
pairs = _compute_pairs(n_row, n_col, n=1 + (mode == "3x3"))
|
||||
assert n_nirs == len(pairs) * 2
|
||||
locs = np.zeros((len(ch_names), 12))
|
||||
locs[:, :9] = np.nan
|
||||
idxs = np.where(np.array(ch_types, "U") == "fnirs_cw_amplitude")[0]
|
||||
for ii, idx in enumerate(idxs):
|
||||
ch_name = ch_names[idx]
|
||||
# Use the actual/accurate wavelength in loc
|
||||
acc_freq = ch_wavelengths[ch_name]
|
||||
locs[idx][9] = acc_freq
|
||||
# Rename channel based on standard naming scheme, using the
|
||||
# nominal wavelength
|
||||
sidx, didx = pairs[ii // 2]
|
||||
nom_freq = fnirs_wavelengths[np.argmin(np.abs(acc_freq - fnirs_wavelengths))]
|
||||
ch_names[idx] = f"S{S_offset + sidx + 1}_D{D_offset + didx + 1} {nom_freq}"
|
||||
offsets = np.array(pairs, int).max(axis=0) + 1
|
||||
|
||||
# figure out bounds
|
||||
bounds = raw_extra["bounds"] = bounds[li + 2 :]
|
||||
last_samp = len(bounds) - 2
|
||||
|
||||
if age is not None and meas_date is not None:
|
||||
subject_info["birthday"] = dt.date(
|
||||
meas_date.year - age,
|
||||
meas_date.month,
|
||||
meas_date.day,
|
||||
)
|
||||
if meas_date is None:
|
||||
meas_date = dt.datetime(2000, 1, 1, 0, 0, 0)
|
||||
meas_date = meas_date.replace(tzinfo=dt.timezone.utc)
|
||||
if subject_info:
|
||||
info_extra["subject_info"] = subject_info
|
||||
|
||||
# Create mne structure
|
||||
info = create_info(ch_names, sfreq, ch_types=ch_types)
|
||||
with info._unlock():
|
||||
info.update(info_extra)
|
||||
info["meas_date"] = meas_date
|
||||
for li, loc in enumerate(locs):
|
||||
info["chs"][li]["loc"][:] = loc
|
||||
return info, raw_extra, last_samp, offsets
|
||||
|
||||
|
||||
def _compute_pairs(n_rows, n_cols, n=1):
|
||||
n_tot = n_rows * n_cols
|
||||
sd_idx = (np.arange(n_tot) // 2).reshape(n_rows, n_cols)
|
||||
d_bool = np.empty((n_rows, n_cols), bool)
|
||||
for ri in range(n_rows):
|
||||
d_bool[ri] = np.arange(ri, ri + n_cols) % 2
|
||||
pairs = list()
|
||||
for ri in range(n_rows):
|
||||
# First iterate over connections within the row
|
||||
for ci in range(n_cols - 1):
|
||||
pair = (sd_idx[ri, ci], sd_idx[ri, ci + 1])
|
||||
if d_bool[ri, ci]: # reverse
|
||||
pair = pair[::-1]
|
||||
pairs.append(pair)
|
||||
# Next iterate over row-row connections, if applicable
|
||||
if ri >= n_rows - 1:
|
||||
continue
|
||||
for ci in range(n_cols):
|
||||
pair = (sd_idx[ri, ci], sd_idx[ri + 1, ci])
|
||||
if d_bool[ri, ci]:
|
||||
pair = pair[::-1]
|
||||
pairs.append(pair)
|
||||
if n > 1:
|
||||
assert n == 2 # only one supported for now
|
||||
pairs = np.array(pairs, int)
|
||||
second = pairs + pairs.max(axis=0) + 1
|
||||
pairs = np.r_[pairs, second]
|
||||
pairs = tuple(tuple(row) for row in pairs)
|
||||
return tuple(pairs)
|
||||
8
mne/io/kit/__init__.py
Normal file
8
mne/io/kit/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""KIT module for reading raw data."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .kit import read_raw_kit, read_epochs_kit
|
||||
from .coreg import read_mrk
|
||||
259
mne/io/kit/constants.py
Normal file
259
mne/io/kit/constants.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""KIT constants."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ...utils import BunchConst
|
||||
|
||||
|
||||
KIT = BunchConst()
|
||||
|
||||
# byte values
|
||||
KIT.SHORT = 2
|
||||
KIT.INT = 4
|
||||
KIT.DOUBLE = 8
|
||||
|
||||
# channel parameters
|
||||
KIT.CALIB_FACTOR = 1.0 # mne_manual p.272
|
||||
KIT.RANGE = 1.0 # mne_manual p.272
|
||||
KIT.UNIT_MUL = FIFF.FIFF_UNITM_NONE # default is 0 mne_manual p.273
|
||||
KIT.GAINS = [1, 2, 5, 10, 20, 50, 100, 200]
|
||||
|
||||
KIT.HPFS = {
|
||||
1: (0, 1, 3, 3),
|
||||
2: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30),
|
||||
3: (0, 0.03, 0.1, 0.3, 1, 3, 10, 30),
|
||||
4: (0, 1, 3, 10, 30, 100, 200, 500),
|
||||
}
|
||||
KIT.LPFS = {
|
||||
1: (10, 20, 50, 100, 200, 500, 1000, 2000),
|
||||
2: (10, 20, 50, 100, 200, 500, 1000, 2000),
|
||||
3: (10, 20, 50, 100, 200, 500, 1000, 10000),
|
||||
4: (10, 30, 100, 300, 1000, 2000, 5000, 10000),
|
||||
}
|
||||
KIT.BEFS = {
|
||||
1: (0, 50, 60, 60),
|
||||
2: (0, 0, 0),
|
||||
3: (0, 60, 50, 50),
|
||||
}
|
||||
|
||||
# Map FLL-Type to filter options (high, low, band)
|
||||
KIT.FLL_SETTINGS = {
|
||||
0: (1, 1, 1), # Hanger Type #1
|
||||
10: (1, 1, 1), # Hanger Type #2
|
||||
20: (1, 1, 1), # Hanger Type #2
|
||||
50: (2, 1, 1), # Hanger Type #3
|
||||
60: (2, 1, 1), # Hanger Type #3
|
||||
100: (3, 3, 3), # Low Band Kapper Type
|
||||
101: (1, 3, 2), # Berlin (DC, 200 Hz, Through)
|
||||
120: (3, 3, 3), # Low Band Kapper Type
|
||||
200: (4, 4, 3), # High Band Kapper Type
|
||||
300: (2, 2, 2), # Kapper Type
|
||||
}
|
||||
|
||||
# channel types
|
||||
KIT.CHANNEL_MAGNETOMETER = 1
|
||||
KIT.CHANNEL_MAGNETOMETER_REFERENCE = 0x101
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER = 2
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE = 0x102
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER = 3
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE = 0x103
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER = 4
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE = 0x104
|
||||
KIT.CHANNEL_TRIGGER = -1
|
||||
KIT.CHANNEL_EEG = -2
|
||||
KIT.CHANNEL_ECG = -3
|
||||
KIT.CHANNEL_ETC = -4
|
||||
KIT.CHANNEL_NULL = 0
|
||||
KIT.CHANNELS_MEG = (
|
||||
KIT.CHANNEL_MAGNETOMETER,
|
||||
KIT.CHANNEL_MAGNETOMETER_REFERENCE,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE,
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER,
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE,
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER,
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE,
|
||||
)
|
||||
KIT.CHANNELS_REFERENCE = (
|
||||
KIT.CHANNEL_MAGNETOMETER_REFERENCE,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE,
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE,
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE,
|
||||
)
|
||||
KIT.CHANNELS_MISC = (
|
||||
KIT.CHANNEL_TRIGGER,
|
||||
KIT.CHANNEL_EEG,
|
||||
KIT.CHANNEL_ECG,
|
||||
KIT.CHANNEL_ETC,
|
||||
)
|
||||
KIT.CHANNEL_NAME_NCHAR = {
|
||||
KIT.CHANNEL_MAGNETOMETER: 6,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER: 6,
|
||||
KIT.CHANNEL_TRIGGER: 32,
|
||||
KIT.CHANNEL_EEG: 8,
|
||||
KIT.CHANNEL_ECG: 32,
|
||||
KIT.CHANNEL_ETC: 32,
|
||||
}
|
||||
KIT.CH_TO_FIFF_COIL = {
|
||||
# KIT.CHANNEL_MAGNETOMETER: FIFF.???,
|
||||
KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_COIL_KIT_REF_MAG,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_COIL_KIT_GRAD,
|
||||
# KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.???,
|
||||
# KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.???,
|
||||
# KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.???,
|
||||
# KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.???,
|
||||
# KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.???,
|
||||
KIT.CHANNEL_TRIGGER: FIFF.FIFFV_COIL_NONE,
|
||||
KIT.CHANNEL_EEG: FIFF.FIFFV_COIL_EEG,
|
||||
KIT.CHANNEL_ECG: FIFF.FIFFV_COIL_NONE,
|
||||
KIT.CHANNEL_ETC: FIFF.FIFFV_COIL_NONE,
|
||||
KIT.CHANNEL_NULL: FIFF.FIFFV_COIL_NONE,
|
||||
}
|
||||
KIT.CH_TO_FIFF_KIND = {
|
||||
KIT.CHANNEL_MAGNETOMETER: FIFF.FIFFV_MEG_CH,
|
||||
KIT.CHANNEL_MAGNETOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH,
|
||||
KIT.CHANNEL_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH,
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER: FIFF.FIFFV_MEG_CH,
|
||||
KIT.CHANNEL_PLANAR_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH,
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER: FIFF.FIFFV_MEG_CH,
|
||||
KIT.CHANNEL_2ND_ORDER_AXIAL_GRADIOMETER_REFERENCE: FIFF.FIFFV_REF_MEG_CH,
|
||||
KIT.CHANNEL_TRIGGER: FIFF.FIFFV_MISC_CH,
|
||||
KIT.CHANNEL_EEG: FIFF.FIFFV_EEG_CH,
|
||||
KIT.CHANNEL_ECG: FIFF.FIFFV_ECG_CH,
|
||||
KIT.CHANNEL_ETC: FIFF.FIFFV_MISC_CH,
|
||||
KIT.CHANNEL_NULL: FIFF.FIFFV_MISC_CH,
|
||||
}
|
||||
KIT.CH_LABEL = {
|
||||
KIT.CHANNEL_TRIGGER: "TRIGGER",
|
||||
KIT.CHANNEL_EEG: "EEG",
|
||||
KIT.CHANNEL_ECG: "ECG",
|
||||
KIT.CHANNEL_ETC: "MISC",
|
||||
KIT.CHANNEL_NULL: "MISC",
|
||||
}
|
||||
|
||||
# Acquisition modes
|
||||
KIT.CONTINUOUS = 1
|
||||
KIT.EVOKED = 2
|
||||
KIT.EPOCHS = 3
|
||||
|
||||
# coreg constants
|
||||
KIT.DIG_POINTS = 10000
|
||||
|
||||
# Known KIT systems
|
||||
# -----------------
|
||||
# KIT recording system is encoded in the SQD file as integer:
|
||||
KIT.SYSTEM_MQ_ADULT = 345 # Macquarie Dept of Cognitive Science, 2006 -
|
||||
KIT.SYSTEM_MQ_CHILD = 403 # Macquarie Dept of Cognitive Science, 2006 -
|
||||
KIT.SYSTEM_AS = 260 # Academia Sinica at Taiwan
|
||||
KIT.SYSTEM_AS_2008 = 261 # Academia Sinica, 2008 or 2009 -
|
||||
KIT.SYSTEM_NYU_2008 = 32 # NYU-NY, July 7, 2008 -
|
||||
KIT.SYSTEM_NYU_2009 = 33 # NYU-NY, January 24, 2009 -
|
||||
KIT.SYSTEM_NYU_2010 = 34 # NYU-NY, January 22, 2010 -
|
||||
KIT.SYSTEM_NYU_2019 = 35 # NYU-NY, September 18, 2019 -
|
||||
KIT.SYSTEM_NYUAD_2011 = 440 # NYU-AD initial launch May 20, 2011 -
|
||||
KIT.SYSTEM_NYUAD_2012 = 441 # NYU-AD more channels July 11, 2012 -
|
||||
KIT.SYSTEM_NYUAD_2014 = 442 # NYU-AD move to NYUAD campus Nov 20, 2014 -
|
||||
KIT.SYSTEM_UMD_2004 = 51 # UMD Marie Mount Hall, October 1, 2004 -
|
||||
KIT.SYSTEM_UMD_2014_07 = 52 # UMD update to 16 bit ADC, July 4, 2014 -
|
||||
KIT.SYSTEM_UMD_2014_12 = 53 # UMD December 4, 2014 -
|
||||
KIT.SYSTEM_UMD_2019_09 = 54 # UMD September 3, 2019 -
|
||||
KIT.SYSTEM_YOKOGAWA_2017_01 = 1001 # Kanazawa (until 2017)
|
||||
KIT.SYSTEM_YOKOGAWA_2018_01 = 10020 # Kanazawa (since 2018)
|
||||
KIT.SYSTEM_YOKOGAWA_2020_08 = 10021 # Kanazawa (since August 2020)
|
||||
KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008 = 124
|
||||
|
||||
# Sensor layouts for plotting
|
||||
KIT_LAYOUT = {
|
||||
KIT.SYSTEM_AS: None,
|
||||
KIT.SYSTEM_AS_2008: "KIT-AS-2008",
|
||||
KIT.SYSTEM_MQ_ADULT: "KIT-160",
|
||||
KIT.SYSTEM_MQ_CHILD: "KIT-125",
|
||||
KIT.SYSTEM_NYU_2008: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2009: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2010: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2019: None,
|
||||
KIT.SYSTEM_NYUAD_2011: "KIT-AD",
|
||||
KIT.SYSTEM_NYUAD_2012: "KIT-AD",
|
||||
KIT.SYSTEM_NYUAD_2014: "KIT-AD",
|
||||
KIT.SYSTEM_UMD_2004: None,
|
||||
KIT.SYSTEM_UMD_2014_07: None,
|
||||
KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3",
|
||||
KIT.SYSTEM_UMD_2019_09: None,
|
||||
KIT.SYSTEM_YOKOGAWA_2017_01: None,
|
||||
KIT.SYSTEM_YOKOGAWA_2018_01: None,
|
||||
KIT.SYSTEM_YOKOGAWA_2020_08: None,
|
||||
KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None,
|
||||
}
|
||||
# Sensor neighbor definitions
|
||||
KIT_NEIGHBORS = {
|
||||
KIT.SYSTEM_AS: None,
|
||||
KIT.SYSTEM_AS_2008: None,
|
||||
KIT.SYSTEM_MQ_ADULT: None,
|
||||
KIT.SYSTEM_MQ_CHILD: None,
|
||||
KIT.SYSTEM_NYU_2008: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2009: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2010: "KIT-157",
|
||||
KIT.SYSTEM_NYU_2019: "KIT-NYU-2019",
|
||||
KIT.SYSTEM_NYUAD_2011: "KIT-208",
|
||||
KIT.SYSTEM_NYUAD_2012: "KIT-208",
|
||||
KIT.SYSTEM_NYUAD_2014: "KIT-208",
|
||||
KIT.SYSTEM_UMD_2004: "KIT-UMD-1",
|
||||
KIT.SYSTEM_UMD_2014_07: "KIT-UMD-2",
|
||||
KIT.SYSTEM_UMD_2014_12: "KIT-UMD-3",
|
||||
KIT.SYSTEM_UMD_2019_09: "KIT-UMD-4",
|
||||
KIT.SYSTEM_YOKOGAWA_2017_01: None,
|
||||
KIT.SYSTEM_YOKOGAWA_2018_01: None,
|
||||
KIT.SYSTEM_YOKOGAWA_2020_08: None,
|
||||
KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: None,
|
||||
}
|
||||
# Names displayed in the info dict description
|
||||
KIT_SYSNAMES = {
|
||||
KIT.SYSTEM_MQ_ADULT: "Macquarie Dept of Cognitive Science (Adult), 2006-",
|
||||
KIT.SYSTEM_MQ_CHILD: "Macquarie Dept of Cognitive Science (Child), 2006-",
|
||||
KIT.SYSTEM_AS: "Academia Sinica, -2008",
|
||||
KIT.SYSTEM_AS_2008: "Academia Sinica, 2008-",
|
||||
KIT.SYSTEM_NYU_2008: "NYU New York, 2008-9",
|
||||
KIT.SYSTEM_NYU_2009: "NYU New York, 2009-10",
|
||||
KIT.SYSTEM_NYU_2010: "NYU New York, 2010-",
|
||||
KIT.SYSTEM_NYUAD_2011: "New York University Abu Dhabi, 2011-12",
|
||||
KIT.SYSTEM_NYUAD_2012: "New York University Abu Dhabi, 2012-14",
|
||||
KIT.SYSTEM_NYUAD_2014: "New York University Abu Dhabi, 2014-",
|
||||
KIT.SYSTEM_UMD_2004: "University of Maryland, 2004-14",
|
||||
KIT.SYSTEM_UMD_2014_07: "University of Maryland, 2014",
|
||||
KIT.SYSTEM_UMD_2014_12: "University of Maryland, 2014-",
|
||||
KIT.SYSTEM_UMD_2019_09: "University of Maryland, 2019-",
|
||||
KIT.SYSTEM_YOKOGAWA_2017_01: "Yokogawa of Kanazawa (until 2017)",
|
||||
KIT.SYSTEM_YOKOGAWA_2018_01: "Yokogawa of Kanazawa (since 2018)",
|
||||
KIT.SYSTEM_YOKOGAWA_2020_08: "Yokogawa of Kanazawa (since August 2020)",
|
||||
KIT.SYSTEM_EAGLE_TECHNOLOGY_PTB_2008: "Eagle Technology MEG (KIT/Yokogawa style) at PTB (since 2008, software upgrade in 2018)", # noqa: E501
|
||||
}
|
||||
|
||||
LEGACY_AMP_PARAMS = {
|
||||
KIT.SYSTEM_NYU_2008: (5.0, 11.0),
|
||||
KIT.SYSTEM_NYU_2009: (5.0, 11.0),
|
||||
KIT.SYSTEM_NYU_2010: (5.0, 11.0),
|
||||
KIT.SYSTEM_UMD_2004: (5.0, 11.0),
|
||||
}
|
||||
|
||||
# Ones that we don't use are commented out
|
||||
KIT.DIR_INDEX_DIR = 0
|
||||
KIT.DIR_INDEX_SYSTEM = 1
|
||||
KIT.DIR_INDEX_CHANNELS = 4
|
||||
KIT.DIR_INDEX_CALIBRATION = 5
|
||||
# FLL = 6
|
||||
KIT.DIR_INDEX_AMP_FILTER = 7
|
||||
KIT.DIR_INDEX_ACQ_COND = 8
|
||||
KIT.DIR_INDEX_RAW_DATA = 9
|
||||
# AVERAGED_DATA = 10
|
||||
# MRI = 11
|
||||
KIT.DIR_INDEX_COREG = 12
|
||||
# MAGNETIC_SOURCE = 13
|
||||
# TRIGGER = 14
|
||||
# BOOKMARKS = 15
|
||||
# DIGITIZER = 25
|
||||
KIT.DIR_INDEX_DIG_POINTS = 26
|
||||
KIT.DIR_INDEX_CHPI_DATA = 29
|
||||
233
mne/io/kit/coreg.py
Normal file
233
mne/io/kit/coreg.py
Normal file
@@ -0,0 +1,233 @@
|
||||
"""Coordinate Point Extractor for KIT system."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from os import SEEK_CUR, PathLike
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _make_dig_points
|
||||
from ...channels.montage import (
|
||||
_check_dig_shape,
|
||||
read_custom_montage,
|
||||
read_dig_polhemus_isotrak,
|
||||
read_polhemus_fastscan,
|
||||
)
|
||||
from ...transforms import (
|
||||
Transform,
|
||||
als_ras_trans,
|
||||
apply_trans,
|
||||
get_ras_to_neuromag_trans,
|
||||
)
|
||||
from ...utils import _check_fname, _check_option, warn
|
||||
from .constants import FIFF, KIT
|
||||
|
||||
INT32 = "<i4"
|
||||
FLOAT64 = "<f8"
|
||||
|
||||
|
||||
def read_mrk(fname):
|
||||
r"""Marker Point Extraction in MEG space directly from sqd.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Absolute path to Marker file.
|
||||
File formats allowed: \*.sqd, \*.mrk, \*.txt.
|
||||
|
||||
Returns
|
||||
-------
|
||||
mrk_points : ndarray, shape (n_points, 3)
|
||||
Marker points in MEG space [m].
|
||||
"""
|
||||
from .kit import _read_dirs
|
||||
|
||||
fname = Path(_check_fname(fname, "read", must_exist=True, name="mrk file"))
|
||||
_check_option("file extension", fname.suffix, (".sqd", ".mrk", ".txt"))
|
||||
if fname.suffix in (".sqd", ".mrk"):
|
||||
with open(fname, "rb", buffering=0) as fid:
|
||||
dirs = _read_dirs(fid)
|
||||
fid.seek(dirs[KIT.DIR_INDEX_COREG]["offset"])
|
||||
# skips match_done, meg_to_mri and mri_to_meg
|
||||
fid.seek(KIT.INT + (2 * KIT.DOUBLE * 16), SEEK_CUR)
|
||||
mrk_count = np.fromfile(fid, INT32, 1)[0]
|
||||
pts = []
|
||||
for _ in range(mrk_count):
|
||||
# mri_type, meg_type, mri_done, meg_done
|
||||
_, _, _, meg_done = np.fromfile(fid, INT32, 4)
|
||||
_, meg_pts = np.fromfile(fid, FLOAT64, 6).reshape(2, 3)
|
||||
if meg_done:
|
||||
pts.append(meg_pts)
|
||||
mrk_points = np.array(pts)
|
||||
else:
|
||||
assert fname.suffix == ".txt"
|
||||
mrk_points = _read_dig_kit(fname, unit="m")
|
||||
|
||||
# check output
|
||||
mrk_points = np.asarray(mrk_points)
|
||||
if mrk_points.shape != (5, 3):
|
||||
err = f"{repr(fname)} is no marker file, shape is {mrk_points.shape}"
|
||||
raise ValueError(err)
|
||||
return mrk_points
|
||||
|
||||
|
||||
def read_sns(fname):
|
||||
"""Sensor coordinate extraction in MEG space.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Absolute path to sensor definition file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
locs : numpy.array, shape = (n_points, 3)
|
||||
Sensor coil location.
|
||||
"""
|
||||
p = re.compile(
|
||||
r"\d,[A-Za-z]*,([\.\-0-9]+),"
|
||||
+ r"([\.\-0-9]+),([\.\-0-9]+),"
|
||||
+ r"([\.\-0-9]+),([\.\-0-9]+)"
|
||||
)
|
||||
with open(fname) as fid:
|
||||
locs = np.array(p.findall(fid.read()), dtype=float)
|
||||
return locs
|
||||
|
||||
|
||||
def _set_dig_kit(mrk, elp, hsp, eeg, *, bad_coils=()):
|
||||
"""Add landmark points and head shape data to the KIT instance.
|
||||
|
||||
Digitizer data (elp and hsp) are represented in [mm] in the Polhemus
|
||||
ALS coordinate system. This is converted to [m].
|
||||
|
||||
Parameters
|
||||
----------
|
||||
mrk : path-like | array_like, shape (5, 3) | None
|
||||
Marker points representing the location of the marker coils with
|
||||
respect to the MEG Sensors, or path to a marker file.
|
||||
elp : path-like | array_like, shape (8, 3) | None
|
||||
Digitizer points representing the location of the fiducials and the
|
||||
marker coils with respect to the digitized head shape, or path to a
|
||||
file containing these points.
|
||||
hsp : path-like | array, shape (n_points, 3) | None
|
||||
Digitizer head shape points, or path to head shape file. If more
|
||||
than 10`000 points are in the head shape, they are automatically
|
||||
decimated.
|
||||
bad_coils : list
|
||||
Indices of bad marker coils (up to two). Bad coils will be excluded
|
||||
when computing the device-head transformation.
|
||||
eeg : dict
|
||||
Ordered dict of EEG dig points.
|
||||
|
||||
Returns
|
||||
-------
|
||||
dig_points : list
|
||||
List of digitizer points for info['dig'].
|
||||
dev_head_t : Transform
|
||||
A dictionary describing the device-head transformation.
|
||||
hpi_results : list
|
||||
The hpi results.
|
||||
"""
|
||||
from ...coreg import _decimate_points, fit_matched_points
|
||||
|
||||
if isinstance(hsp, str | Path | PathLike):
|
||||
hsp = _read_dig_kit(hsp)
|
||||
n_pts = len(hsp)
|
||||
if n_pts > KIT.DIG_POINTS:
|
||||
hsp = _decimate_points(hsp, res=0.005)
|
||||
n_new = len(hsp)
|
||||
warn(
|
||||
f"The selected head shape contained {n_pts} points, which is more than "
|
||||
f"recommended ({KIT.DIG_POINTS}), and was automatically downsampled to "
|
||||
f"{n_new} points. The preferred way to downsample is using FastScan."
|
||||
)
|
||||
|
||||
if isinstance(elp, str | Path | PathLike):
|
||||
elp_points = _read_dig_kit(elp)
|
||||
if len(elp_points) != 8:
|
||||
raise ValueError(
|
||||
f"File {repr(elp)} should contain 8 points; got shape "
|
||||
f"{elp_points.shape}."
|
||||
)
|
||||
elp = elp_points
|
||||
if len(bad_coils) > 0:
|
||||
elp = np.delete(elp, np.array(bad_coils) + 3, 0)
|
||||
# check we have at least 3 marker coils (whether read from file or
|
||||
# passed in directly)
|
||||
if len(elp) not in (6, 7, 8):
|
||||
raise ValueError(f"ELP should contain 6 ~ 8 points; got shape {elp.shape}.")
|
||||
if isinstance(mrk, str | Path | PathLike):
|
||||
mrk = read_mrk(mrk)
|
||||
if len(bad_coils) > 0:
|
||||
mrk = np.delete(mrk, bad_coils, 0)
|
||||
if len(mrk) not in (3, 4, 5):
|
||||
raise ValueError(f"MRK should contain 3 ~ 5 points; got shape {mrk.shape}.")
|
||||
|
||||
mrk = apply_trans(als_ras_trans, mrk)
|
||||
|
||||
nasion, lpa, rpa = elp[:3]
|
||||
nmtrans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
|
||||
elp = apply_trans(nmtrans, elp)
|
||||
hsp = apply_trans(nmtrans, hsp)
|
||||
eeg = OrderedDict((k, apply_trans(nmtrans, p)) for k, p in eeg.items())
|
||||
|
||||
# device head transform
|
||||
trans = fit_matched_points(tgt_pts=elp[3:], src_pts=mrk, out="trans")
|
||||
|
||||
nasion, lpa, rpa = elp[:3]
|
||||
elp = elp[3:]
|
||||
|
||||
dig_points = _make_dig_points(nasion, lpa, rpa, elp, hsp, dig_ch_pos=eeg)
|
||||
dev_head_t = Transform("meg", "head", trans)
|
||||
|
||||
hpi_results = [
|
||||
dict(
|
||||
dig_points=[
|
||||
dict(
|
||||
ident=ci,
|
||||
r=r,
|
||||
kind=FIFF.FIFFV_POINT_HPI,
|
||||
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
|
||||
)
|
||||
for ci, r in enumerate(mrk)
|
||||
],
|
||||
coord_trans=dev_head_t,
|
||||
)
|
||||
]
|
||||
|
||||
return dig_points, dev_head_t, hpi_results
|
||||
|
||||
|
||||
def _read_dig_kit(fname, unit="auto"):
|
||||
# Read dig points from a file and return ndarray, using FastSCAN for .txt
|
||||
fname = _check_fname(fname, "read", must_exist=True, name="hsp or elp file")
|
||||
assert unit in ("auto", "m", "mm")
|
||||
_check_option("file extension", fname.suffix, (".hsp", ".elp", ".mat", ".txt"))
|
||||
if fname.suffix == ".txt":
|
||||
unit = "mm" if unit == "auto" else unit
|
||||
out = read_polhemus_fastscan(fname, unit=unit, on_header_missing="ignore")
|
||||
elif fname.suffix in (".hsp", ".elp"):
|
||||
unit = "m" if unit == "auto" else unit
|
||||
mon = read_dig_polhemus_isotrak(fname, unit=unit)
|
||||
if fname.suffix == ".hsp":
|
||||
dig = [d["r"] for d in mon.dig if d["kind"] != FIFF.FIFFV_POINT_CARDINAL]
|
||||
else:
|
||||
dig = [d["r"] for d in mon.dig]
|
||||
if (
|
||||
dig
|
||||
and mon.dig[0]["kind"] == FIFF.FIFFV_POINT_CARDINAL
|
||||
and mon.dig[0]["ident"] == FIFF.FIFFV_POINT_LPA
|
||||
):
|
||||
# LPA, Nasion, RPA -> NLR
|
||||
dig[:3] = [dig[1], dig[0], dig[2]]
|
||||
out = np.array(dig, float)
|
||||
else:
|
||||
assert fname.suffix == ".mat"
|
||||
out = np.array([d["r"] for d in read_custom_montage(fname).dig])
|
||||
_check_dig_shape(out)
|
||||
return out
|
||||
1044
mne/io/kit/kit.py
Normal file
1044
mne/io/kit/kit.py
Normal file
File diff suppressed because it is too large
Load Diff
7
mne/io/nedf/__init__.py
Normal file
7
mne/io/nedf/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""NEDF file import module."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .nedf import read_raw_nedf, _parse_nedf_header
|
||||
229
mne/io/nedf/nedf.py
Normal file
229
mne/io/nedf/nedf.py
Normal file
@@ -0,0 +1,229 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""Import NeuroElectrics DataFormat (NEDF) files."""
|
||||
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...utils import _check_fname, _soft_import, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
def _getsubnodetext(node, name):
|
||||
"""Get an element from an XML node, raise an error otherwise.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
node: Element
|
||||
XML Element
|
||||
name: str
|
||||
Child element name
|
||||
|
||||
Returns
|
||||
-------
|
||||
test: str
|
||||
Text contents of the child nodes
|
||||
"""
|
||||
subnode = node.findtext(name)
|
||||
if not subnode:
|
||||
raise RuntimeError("NEDF header " + name + " not found")
|
||||
return subnode
|
||||
|
||||
|
||||
def _parse_nedf_header(header):
|
||||
"""Read header information from the first 10kB of an .nedf file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
header : bytes
|
||||
Null-terminated header data, mostly the file's first 10240 bytes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
info : dict
|
||||
A dictionary with header information.
|
||||
dt : numpy.dtype
|
||||
Structure of the binary EEG/accelerometer/trigger data in the file.
|
||||
n_samples : int
|
||||
The number of data samples.
|
||||
"""
|
||||
defusedxml = _soft_import("defusedxml", "reading NEDF data")
|
||||
info = {}
|
||||
# nedf files have three accelerometer channels sampled at 100Hz followed
|
||||
# by five EEG samples + TTL trigger sampled at 500Hz
|
||||
# For 32 EEG channels and no stim channels, the data layout may look like
|
||||
# [ ('acc', '>u2', (3,)),
|
||||
# ('data', dtype([
|
||||
# ('eeg', 'u1', (32, 3)),
|
||||
# ('trig', '>i4', (1,))
|
||||
# ]), (5,))
|
||||
# ]
|
||||
|
||||
dt = [] # dtype for the binary data block
|
||||
datadt = [] # dtype for a single EEG sample
|
||||
|
||||
headerend = header.find(b"\0")
|
||||
if headerend == -1:
|
||||
raise RuntimeError("End of header null not found")
|
||||
headerxml = defusedxml.ElementTree.fromstring(header[:headerend])
|
||||
nedfversion = headerxml.findtext("NEDFversion", "")
|
||||
if nedfversion not in ["1.3", "1.4"]:
|
||||
warn("NEDFversion unsupported, use with caution")
|
||||
|
||||
if headerxml.findtext("stepDetails/DeviceClass", "") == "STARSTIM":
|
||||
warn("Found Starstim, this hasn't been tested extensively!")
|
||||
|
||||
if headerxml.findtext("AdditionalChannelStatus", "OFF") != "OFF":
|
||||
raise RuntimeError("Unknown additional channel, aborting.")
|
||||
|
||||
n_acc = int(headerxml.findtext("NumberOfChannelsOfAccelerometer", 0))
|
||||
if n_acc:
|
||||
# expect one sample of u16 accelerometer data per block
|
||||
dt.append(("acc", ">u2", (n_acc,)))
|
||||
|
||||
eegset = headerxml.find("EEGSettings")
|
||||
if eegset is None:
|
||||
raise RuntimeError("No EEG channels found")
|
||||
nchantotal = int(_getsubnodetext(eegset, "TotalNumberOfChannels"))
|
||||
info["nchan"] = nchantotal
|
||||
|
||||
info["sfreq"] = int(_getsubnodetext(eegset, "EEGSamplingRate"))
|
||||
info["ch_names"] = [e.text for e in eegset.find("EEGMontage")]
|
||||
if nchantotal != len(info["ch_names"]):
|
||||
raise RuntimeError(
|
||||
f"TotalNumberOfChannels ({nchantotal}) != "
|
||||
f"channel count ({len(info['ch_names'])})"
|
||||
)
|
||||
# expect nchantotal uint24s
|
||||
datadt.append(("eeg", "B", (nchantotal, 3)))
|
||||
|
||||
if headerxml.find("STIMSettings") is not None:
|
||||
# 2* -> two stim samples per eeg sample
|
||||
datadt.append(("stim", "B", (2, nchantotal, 3)))
|
||||
warn("stim channels are currently ignored")
|
||||
|
||||
# Trigger data: 4 bytes in newer versions, 1 byte in older versions
|
||||
trigger_type = ">i4" if headerxml.findtext("NEDFversion") else "B"
|
||||
datadt.append(("trig", trigger_type))
|
||||
# 5 data samples per block
|
||||
dt.append(("data", np.dtype(datadt), (5,)))
|
||||
|
||||
date = headerxml.findtext("StepDetails/StartDate_firstEEGTimestamp", 0)
|
||||
info["meas_date"] = datetime.fromtimestamp(int(date) / 1000, timezone.utc)
|
||||
|
||||
n_samples = int(_getsubnodetext(eegset, "NumberOfRecordsOfEEG"))
|
||||
n_full, n_last = divmod(n_samples, 5)
|
||||
dt_last = deepcopy(dt)
|
||||
assert dt_last[-1][-1] == (5,)
|
||||
dt_last[-1] = list(dt_last[-1])
|
||||
dt_last[-1][-1] = (n_last,)
|
||||
dt_last[-1] = tuple(dt_last[-1])
|
||||
return info, np.dtype(dt), np.dtype(dt_last), n_samples, n_full
|
||||
|
||||
|
||||
# the first 10240 bytes are header in XML format, padded with NULL bytes
|
||||
_HDRLEN = 10240
|
||||
|
||||
|
||||
class RawNedf(BaseRaw):
|
||||
"""Raw object from NeuroElectrics nedf file."""
|
||||
|
||||
def __init__(self, filename, preload=False, verbose=None):
|
||||
filename = str(_check_fname(filename, "read", True, "filename"))
|
||||
with open(filename, mode="rb") as fid:
|
||||
header = fid.read(_HDRLEN)
|
||||
header, dt, dt_last, n_samp, n_full = _parse_nedf_header(header)
|
||||
ch_names = header["ch_names"] + ["STI 014"]
|
||||
ch_types = ["eeg"] * len(ch_names)
|
||||
ch_types[-1] = "stim"
|
||||
info = create_info(ch_names, header["sfreq"], ch_types)
|
||||
# scaling factor ADC-values -> volts
|
||||
# taken from the NEDF EEGLAB plugin
|
||||
# (https://www.neuroelectrics.com/resources/software/):
|
||||
for ch in info["chs"][:-1]:
|
||||
ch["cal"] = 2.4 / (6.0 * 8388607)
|
||||
with info._unlock():
|
||||
info["meas_date"] = header["meas_date"]
|
||||
raw_extra = dict(dt=dt, dt_last=dt_last, n_full=n_full)
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
filenames=[filename],
|
||||
verbose=verbose,
|
||||
raw_extras=[raw_extra],
|
||||
last_samps=[n_samp - 1],
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
dt = self._raw_extras[fi]["dt"]
|
||||
dt_last = self._raw_extras[fi]["dt_last"]
|
||||
n_full = self._raw_extras[fi]["n_full"]
|
||||
n_eeg = dt[1].subdtype[0][0].shape[0]
|
||||
# data is stored in 5-sample chunks (except maybe the last one!)
|
||||
# so we have to do some gymnastics to pick the correct parts to
|
||||
# read
|
||||
offset = start // 5 * dt.itemsize + _HDRLEN
|
||||
start_sl = start % 5
|
||||
n_samples = stop - start
|
||||
n_samples_full = min(stop, n_full * 5) - start
|
||||
last = None
|
||||
n_chunks = (n_samples_full - 1) // 5 + 1
|
||||
n_tot = n_chunks * 5
|
||||
with open(self.filenames[fi], "rb") as fid:
|
||||
fid.seek(offset, 0)
|
||||
chunks = np.fromfile(fid, dtype=dt, count=n_chunks)
|
||||
assert len(chunks) == n_chunks
|
||||
if n_samples != n_samples_full:
|
||||
last = np.fromfile(fid, dtype=dt_last, count=1)
|
||||
eeg = _convert_eeg(chunks, n_eeg, n_tot)
|
||||
trig = chunks["data"]["trig"].reshape(1, n_tot)
|
||||
if last is not None:
|
||||
n_last = dt_last["data"].shape[0]
|
||||
eeg = np.concatenate((eeg, _convert_eeg(last, n_eeg, n_last)), axis=-1)
|
||||
trig = np.concatenate(
|
||||
(trig, last["data"]["trig"].reshape(1, n_last)), axis=-1
|
||||
)
|
||||
one_ = np.concatenate((eeg, trig))
|
||||
one = one_[:, start_sl : n_samples + start_sl]
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
|
||||
|
||||
def _convert_eeg(chunks, n_eeg, n_tot):
|
||||
# convert uint8-triplet -> int32
|
||||
eeg = chunks["data"]["eeg"] @ np.array([1 << 16, 1 << 8, 1])
|
||||
# convert sign if necessary
|
||||
eeg[eeg > (1 << 23)] -= 1 << 24
|
||||
eeg = eeg.reshape((n_tot, n_eeg)).T
|
||||
return eeg
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_nedf(filename, preload=False, verbose=None) -> RawNedf:
|
||||
"""Read NeuroElectrics .nedf files.
|
||||
|
||||
NEDF file versions starting from 1.3 are supported.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : path-like
|
||||
Path to the ``.nedf`` file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawNedf
|
||||
A Raw object containing NEDF data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawNedf.
|
||||
"""
|
||||
return RawNedf(filename, preload, verbose)
|
||||
5
mne/io/neuralynx/__init__.py
Normal file
5
mne/io/neuralynx/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .neuralynx import read_raw_neuralynx
|
||||
426
mne/io/neuralynx/neuralynx.py
Normal file
426
mne/io/neuralynx/neuralynx.py
Normal file
@@ -0,0 +1,426 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import inspect
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, _soft_import, fill_doc, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_neuralynx(
|
||||
fname, *, preload=False, exclude_fname_patterns=None, verbose=None
|
||||
) -> "RawNeuralynx":
|
||||
"""Reader for Neuralynx files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to a folder with Neuralynx .ncs files.
|
||||
%(preload)s
|
||||
exclude_fname_patterns : list of str
|
||||
List of glob-like string patterns to exclude from channel list.
|
||||
Useful when not all channels have the same number of samples
|
||||
so you can read separate instances.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawNeuralynx
|
||||
A Raw object containing Neuralynx data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawNeuralynx.
|
||||
|
||||
Notes
|
||||
-----
|
||||
Neuralynx files are read from disk using the `Neo package
|
||||
<http://neuralensemble.org/neo/>`__.
|
||||
Currently, only reading of the ``.ncs files`` is supported.
|
||||
|
||||
``raw.info["meas_date"]`` is read from the ``recording_opened`` property
|
||||
of the first ``.ncs`` file (i.e. channel) in the dataset (a warning is issued
|
||||
if files have different dates of acquisition).
|
||||
|
||||
Channel-specific high and lowpass frequencies of online filters are determined
|
||||
based on the ``DspLowCutFrequency`` and ``DspHighCutFrequency`` header fields,
|
||||
respectively. If no filters were used for a channel, the default lowpass is set
|
||||
to the Nyquist frequency and the default highpass is set to 0.
|
||||
If channels have different high/low cutoffs, ``raw.info["highpass"]`` and
|
||||
``raw.info["lowpass"]`` are then set to the maximum highpass and minimumlowpass
|
||||
values across channels, respectively.
|
||||
|
||||
Other header variables can be inspected using Neo directly. For example::
|
||||
|
||||
from neo.io import NeuralynxIO # doctest: +SKIP
|
||||
fname = 'path/to/your/data' # doctest: +SKIP
|
||||
nlx_reader = NeuralynxIO(dirname=fname) # doctest: +SKIP
|
||||
print(nlx_reader.header) # doctest: +SKIP
|
||||
print(nlx_reader.file_headers.items()) # doctest: +SKIP
|
||||
"""
|
||||
return RawNeuralynx(
|
||||
fname,
|
||||
preload=preload,
|
||||
exclude_fname_patterns=exclude_fname_patterns,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
# Helper for neo deprecation of exclude_filename -> exclude_filenames in 0.13.2
|
||||
def _exclude_kwarg(exclude_fnames):
|
||||
from neo.io import NeuralynxIO
|
||||
|
||||
key = "exclude_filename"
|
||||
if "exclude_filenames" in inspect.getfullargspec(NeuralynxIO).args:
|
||||
key += "s"
|
||||
return {key: exclude_fnames}
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawNeuralynx(BaseRaw):
|
||||
"""RawNeuralynx class."""
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
fname,
|
||||
*,
|
||||
preload=False,
|
||||
exclude_fname_patterns=None,
|
||||
verbose=None,
|
||||
):
|
||||
fname = _check_fname(fname, "read", True, "fname", need_dir=True)
|
||||
|
||||
_soft_import("neo", "Reading NeuralynxIO files", strict=True)
|
||||
from neo.io import NeuralynxIO
|
||||
|
||||
logger.info(f"Checking files in {fname}")
|
||||
|
||||
# construct a list of filenames to ignore
|
||||
exclude_fnames = None
|
||||
if exclude_fname_patterns:
|
||||
exclude_fnames = []
|
||||
for pattern in exclude_fname_patterns:
|
||||
fnames = glob.glob(os.path.join(fname, pattern))
|
||||
fnames = [os.path.basename(fname) for fname in fnames]
|
||||
exclude_fnames.extend(fnames)
|
||||
|
||||
logger.info("Ignoring .ncs files:\n" + "\n".join(exclude_fnames))
|
||||
|
||||
# get basic file info from header, throw Error if NeuralynxIO can't parse
|
||||
try:
|
||||
nlx_reader = NeuralynxIO(dirname=fname, **_exclude_kwarg(exclude_fnames))
|
||||
except ValueError as e:
|
||||
# give a more informative error message and what the user can do about it
|
||||
if "Incompatible section structures across streams" in str(e):
|
||||
raise ValueError(
|
||||
"It seems .ncs channels have different numbers of samples. "
|
||||
+ "This is likely due to different sampling rates. "
|
||||
+ "Try reading in only channels with uniform sampling rate "
|
||||
+ "by excluding other channels with `exclude_fname_patterns` "
|
||||
+ "input argument."
|
||||
+ f"\nOriginal neo.NeuralynxRawIO ValueError:\n{e}"
|
||||
) from None
|
||||
else:
|
||||
raise
|
||||
|
||||
info = create_info(
|
||||
ch_types="seeg",
|
||||
ch_names=nlx_reader.header["signal_channels"]["name"].tolist(),
|
||||
sfreq=nlx_reader.get_signal_sampling_rate(),
|
||||
)
|
||||
|
||||
ncs_fnames = nlx_reader.ncs_filenames.values()
|
||||
ncs_hdrs = [
|
||||
hdr
|
||||
for hdr_key, hdr in nlx_reader.file_headers.items()
|
||||
if hdr_key in ncs_fnames
|
||||
]
|
||||
|
||||
# if all files have the same recording_opened date, write it to info
|
||||
meas_dates = np.array([hdr["recording_opened"] for hdr in ncs_hdrs])
|
||||
# to be sure, only write if all dates are the same
|
||||
meas_diff = []
|
||||
for md in meas_dates:
|
||||
meas_diff.append((md - meas_dates[0]).total_seconds())
|
||||
|
||||
# tolerate a +/-1 second meas_date difference (arbitrary threshold)
|
||||
# else issue a warning
|
||||
warn_meas = (np.abs(meas_diff) > 1.0).any()
|
||||
if warn_meas:
|
||||
logger.warning(
|
||||
"Not all .ncs files have the same recording_opened date. "
|
||||
+ "Writing meas_date based on the first .ncs file."
|
||||
)
|
||||
|
||||
# Neuarlynx allows channel specific low/highpass filters
|
||||
# if not enabled, assume default lowpass = nyquist, highpass = 0
|
||||
default_lowpass = info["sfreq"] / 2 # nyquist
|
||||
default_highpass = 0
|
||||
|
||||
has_hp = [hdr["DSPLowCutFilterEnabled"] for hdr in ncs_hdrs]
|
||||
has_lp = [hdr["DSPHighCutFilterEnabled"] for hdr in ncs_hdrs]
|
||||
if not all(has_hp) or not all(has_lp):
|
||||
logger.warning(
|
||||
"Not all .ncs files have the same high/lowpass filter settings. "
|
||||
+ "Assuming default highpass = 0, lowpass = nyquist."
|
||||
)
|
||||
|
||||
highpass_freqs = [
|
||||
float(hdr["DspLowCutFrequency"])
|
||||
if hdr["DSPLowCutFilterEnabled"]
|
||||
else default_highpass
|
||||
for hdr in ncs_hdrs
|
||||
]
|
||||
|
||||
lowpass_freqs = [
|
||||
float(hdr["DspHighCutFrequency"])
|
||||
if hdr["DSPHighCutFilterEnabled"]
|
||||
else default_lowpass
|
||||
for hdr in ncs_hdrs
|
||||
]
|
||||
|
||||
with info._unlock():
|
||||
info["meas_date"] = meas_dates[0].astimezone(datetime.timezone.utc)
|
||||
info["highpass"] = np.max(highpass_freqs)
|
||||
info["lowpass"] = np.min(lowpass_freqs)
|
||||
|
||||
# Neo reads only valid contiguous .ncs samples grouped as segments
|
||||
n_segments = nlx_reader.header["nb_segment"][0]
|
||||
block_id = 0 # assumes there's only one block of recording
|
||||
|
||||
# get segment start/stop times
|
||||
start_times = np.array(
|
||||
[nlx_reader.segment_t_start(block_id, i) for i in range(n_segments)]
|
||||
)
|
||||
stop_times = np.array(
|
||||
[nlx_reader.segment_t_stop(block_id, i) for i in range(n_segments)]
|
||||
)
|
||||
|
||||
# find discontinuous boundaries (of length n-1)
|
||||
next_start_times = start_times[1::]
|
||||
previous_stop_times = stop_times[:-1]
|
||||
seg_diffs = next_start_times - previous_stop_times
|
||||
|
||||
# mark as discontinuous any two segments that have
|
||||
# start/stop delta larger than sampling period (1.5/sampling_rate)
|
||||
logger.info("Checking for temporal discontinuities in Neo data segments.")
|
||||
delta = 1.5 / info["sfreq"]
|
||||
gaps = seg_diffs > delta
|
||||
|
||||
seg_gap_dict = {}
|
||||
|
||||
logger.info(
|
||||
f"N = {gaps.sum()} discontinuous Neo segments detected "
|
||||
+ f"with delta > {delta} sec. "
|
||||
+ "Annotating gaps as BAD_ACQ_SKIP."
|
||||
if gaps.any()
|
||||
else "No discontinuities detected."
|
||||
)
|
||||
|
||||
gap_starts = stop_times[:-1][gaps] # gap starts at segment offset
|
||||
gap_stops = start_times[1::][gaps] # gap stops at segment onset
|
||||
|
||||
# (n_gaps,) array of ints giving number of samples per inferred gap
|
||||
gap_n_samps = np.array(
|
||||
[
|
||||
int(round(stop * info["sfreq"])) - int(round(start * info["sfreq"]))
|
||||
for start, stop in zip(gap_starts, gap_stops)
|
||||
]
|
||||
).astype(int) # force an int array (if no gaps, empty array is a float)
|
||||
|
||||
# get sort indices for all segments (valid and gap) in ascending order
|
||||
all_starts_ids = np.argsort(np.concatenate([start_times, gap_starts]))
|
||||
|
||||
# variable indicating whether each segment is a gap or not
|
||||
gap_indicator = np.concatenate(
|
||||
[
|
||||
np.full(len(start_times), fill_value=0),
|
||||
np.full(len(gap_starts), fill_value=1),
|
||||
]
|
||||
)
|
||||
gap_indicator = gap_indicator[all_starts_ids].astype(bool)
|
||||
|
||||
# store this in a dict to be passed to _raw_extras
|
||||
seg_gap_dict = {
|
||||
"gap_n_samps": gap_n_samps,
|
||||
"isgap": gap_indicator, # False (data segment) or True (gap segment)
|
||||
}
|
||||
|
||||
valid_segment_sizes = [
|
||||
nlx_reader.get_signal_size(block_id, i) for i in range(n_segments)
|
||||
]
|
||||
|
||||
sizes_sorted = np.concatenate([valid_segment_sizes, gap_n_samps])[
|
||||
all_starts_ids
|
||||
]
|
||||
|
||||
# now construct an (n_samples,) indicator variable
|
||||
sample2segment = np.concatenate(
|
||||
[np.full(shape=(n,), fill_value=i) for i, n in enumerate(sizes_sorted)]
|
||||
)
|
||||
|
||||
# get the start sample index for each gap segment ()
|
||||
gap_start_ids = np.cumsum(np.hstack([[0], sizes_sorted[:-1]]))[gap_indicator]
|
||||
|
||||
# recreate time axis for gap annotations
|
||||
mne_times = np.arange(0, len(sample2segment)) / info["sfreq"]
|
||||
|
||||
assert len(gap_start_ids) == len(gap_n_samps)
|
||||
annotations = Annotations(
|
||||
onset=[mne_times[onset_id] for onset_id in gap_start_ids],
|
||||
duration=[
|
||||
mne_times[onset_id + (n - 1)] - mne_times[onset_id]
|
||||
for onset_id, n in zip(gap_start_ids, gap_n_samps)
|
||||
],
|
||||
description=["BAD_ACQ_SKIP"] * len(gap_start_ids),
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
info=info,
|
||||
last_samps=[sizes_sorted.sum() - 1],
|
||||
filenames=[fname],
|
||||
preload=preload,
|
||||
raw_extras=[
|
||||
dict(
|
||||
smp2seg=sample2segment,
|
||||
exclude_fnames=exclude_fnames,
|
||||
segment_sizes=sizes_sorted,
|
||||
seg_gap_dict=seg_gap_dict,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
self.set_annotations(annotations)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
from neo import AnalogSignal, Segment
|
||||
from neo.io import NeuralynxIO
|
||||
from neo.io.proxyobjects import AnalogSignalProxy
|
||||
|
||||
# quantities is a dependency of neo so we are guaranteed it exists
|
||||
from quantities import Hz
|
||||
|
||||
nlx_reader = NeuralynxIO(
|
||||
dirname=self.filenames[fi],
|
||||
**_exclude_kwarg(self._raw_extras[0]["exclude_fnames"]),
|
||||
)
|
||||
neo_block = nlx_reader.read(lazy=True)
|
||||
|
||||
# check that every segment has 1 associated neo.AnalogSignal() object
|
||||
# (not sure what multiple analogsignals per neo.Segment would mean)
|
||||
assert sum(
|
||||
[len(segment.analogsignals) for segment in neo_block[0].segments]
|
||||
) == len(neo_block[0].segments)
|
||||
|
||||
segment_sizes = self._raw_extras[fi]["segment_sizes"]
|
||||
|
||||
# construct a (n_segments, 2) array of the first and last
|
||||
# sample index for each segment relative to the start of the recording
|
||||
seg_starts = [0] # first chunk starts at sample 0
|
||||
seg_stops = [segment_sizes[0] - 1]
|
||||
for i in range(1, len(segment_sizes)):
|
||||
ons_new = (
|
||||
seg_stops[i - 1] + 1
|
||||
) # current chunk starts one sample after the previous one
|
||||
seg_starts.append(ons_new)
|
||||
off_new = (
|
||||
seg_stops[i - 1] + segment_sizes[i]
|
||||
) # the last sample is len(chunk) samples after the previous ended
|
||||
seg_stops.append(off_new)
|
||||
|
||||
start_stop_samples = np.stack([np.array(seg_starts), np.array(seg_stops)]).T
|
||||
|
||||
first_seg = self._raw_extras[0]["smp2seg"][
|
||||
start
|
||||
] # segment containing start sample
|
||||
last_seg = self._raw_extras[0]["smp2seg"][
|
||||
stop - 1
|
||||
] # segment containing stop sample
|
||||
|
||||
# select all segments between the one that contains the start sample
|
||||
# and the one that contains the stop sample
|
||||
sel_samples_global = start_stop_samples[first_seg : last_seg + 1, :]
|
||||
|
||||
# express end samples relative to segment onsets
|
||||
# to be used for slicing the arrays below
|
||||
sel_samples_local = sel_samples_global.copy()
|
||||
sel_samples_local[0:-1, 1] = (
|
||||
sel_samples_global[0:-1, 1] - sel_samples_global[0:-1, 0]
|
||||
)
|
||||
sel_samples_local[1::, 0] = (
|
||||
0 # now set the start sample for all segments after the first to 0
|
||||
)
|
||||
|
||||
sel_samples_local[0, 0] = (
|
||||
start - sel_samples_global[0, 0]
|
||||
) # express start sample relative to segment onset
|
||||
sel_samples_local[-1, -1] = (stop - 1) - sel_samples_global[
|
||||
-1, 0
|
||||
] # express stop sample relative to segment onset
|
||||
|
||||
# array containing Segments
|
||||
segments_arr = np.array(neo_block[0].segments, dtype=object)
|
||||
|
||||
# if gaps were detected, correctly insert gap Segments in between valid Segments
|
||||
gap_samples = self._raw_extras[fi]["seg_gap_dict"]["gap_n_samps"]
|
||||
gap_segments = [Segment(f"gap-{i}") for i in range(len(gap_samples))]
|
||||
|
||||
# create AnalogSignal objects representing gap data filled with 0's
|
||||
sfreq = nlx_reader.get_signal_sampling_rate()
|
||||
n_chans = (
|
||||
np.arange(idx.start, idx.stop, idx.step).size
|
||||
if type(idx) is slice
|
||||
else len(idx) # idx can be a slice or an np.array so check both
|
||||
)
|
||||
|
||||
for seg, n in zip(gap_segments, gap_samples):
|
||||
asig = AnalogSignal(
|
||||
signal=np.zeros((n, n_chans)), units="uV", sampling_rate=sfreq * Hz
|
||||
)
|
||||
seg.analogsignals.append(asig)
|
||||
|
||||
n_total_segments = len(neo_block[0].segments + gap_segments)
|
||||
segments_arr = np.zeros((n_total_segments,), dtype=object)
|
||||
|
||||
# insert inferred gap segments at the right place in between valid segments
|
||||
isgap = self._raw_extras[0]["seg_gap_dict"]["isgap"]
|
||||
segments_arr[~isgap] = neo_block[0].segments
|
||||
segments_arr[isgap] = gap_segments
|
||||
|
||||
# now load data for selected segments/channels via
|
||||
# neo.Segment.AnalogSignalProxy.load() or
|
||||
# pad directly as AnalogSignal.magnitude for any gap data
|
||||
all_data = np.concatenate(
|
||||
[
|
||||
signal.load(channel_indexes=idx).magnitude[
|
||||
samples[0] : samples[-1] + 1, :
|
||||
]
|
||||
if isinstance(signal, AnalogSignalProxy)
|
||||
else signal.magnitude[samples[0] : samples[-1] + 1, :]
|
||||
for seg, samples in zip(
|
||||
segments_arr[first_seg : last_seg + 1], sel_samples_local
|
||||
)
|
||||
for signal in seg.analogsignals
|
||||
]
|
||||
).T
|
||||
|
||||
all_data *= 1e-6 # Convert uV to V
|
||||
n_channels = len(nlx_reader.header["signal_channels"]["name"])
|
||||
block = np.zeros((n_channels, stop - start), dtype=data.dtype)
|
||||
block[idx] = all_data # shape = (n_channels, n_samples)
|
||||
|
||||
# Then store the result where it needs to go
|
||||
_mult_cal_one(data, block, idx, cals, mult)
|
||||
7
mne/io/nicolet/__init__.py
Normal file
7
mne/io/nicolet/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Nicolet module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .nicolet import read_raw_nicolet
|
||||
201
mne/io/nicolet/nicolet.py
Normal file
201
mne/io/nicolet/nicolet.py
Normal file
@@ -0,0 +1,201 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import calendar
|
||||
import datetime
|
||||
from os import path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _create_chs, _find_channels, _read_segments_file
|
||||
from ...utils import fill_doc, logger
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_nicolet(
|
||||
input_fname, ch_type, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None
|
||||
) -> "RawNicolet":
|
||||
"""Read Nicolet data as raw object.
|
||||
|
||||
..note:: This reader takes data files with the extension ``.data`` as an
|
||||
input. The header file with the same file name stem and an
|
||||
extension ``.head`` is expected to be found in the same
|
||||
directory.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the data file (ending with ``.data`` not ``.head``).
|
||||
ch_type : str
|
||||
Channel type to designate to the data channels. Supported data types
|
||||
include ``'eeg'``, ``'dbs'``.
|
||||
eog : list | tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. If ``'auto'``, the channel names beginning with
|
||||
``EOG`` are used. Defaults to empty tuple.
|
||||
ecg : list or tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
ECG channels. If ``'auto'``, the channel names beginning with
|
||||
``ECG`` are used. Defaults to empty tuple.
|
||||
emg : list or tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
EMG channels. If ``'auto'``, the channel names beginning with
|
||||
``EMG`` are used. Defaults to empty tuple.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Defaults to empty tuple.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of Raw
|
||||
A Raw object containing the data.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
return RawNicolet(
|
||||
input_fname,
|
||||
ch_type,
|
||||
eog=eog,
|
||||
ecg=ecg,
|
||||
emg=emg,
|
||||
misc=misc,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc):
|
||||
"""Extract info from Nicolet header files."""
|
||||
fname, extension = path.splitext(fname)
|
||||
|
||||
if extension != ".data":
|
||||
raise ValueError(f'File name should end with .data not "{extension}".')
|
||||
|
||||
header = fname + ".head"
|
||||
|
||||
logger.info("Reading header...")
|
||||
header_info = dict()
|
||||
with open(header) as fid:
|
||||
for line in fid:
|
||||
var, value = line.split("=")
|
||||
if var == "elec_names":
|
||||
value = value[1:-2].split(",") # strip brackets
|
||||
elif var == "conversion_factor":
|
||||
value = float(value)
|
||||
elif var in ["num_channels", "rec_id", "adm_id", "pat_id", "num_samples"]:
|
||||
value = int(value)
|
||||
elif var != "start_ts":
|
||||
value = float(value)
|
||||
header_info[var] = value
|
||||
|
||||
ch_names = header_info["elec_names"]
|
||||
if eog == "auto":
|
||||
eog = _find_channels(ch_names, "EOG")
|
||||
if ecg == "auto":
|
||||
ecg = _find_channels(ch_names, "ECG")
|
||||
if emg == "auto":
|
||||
emg = _find_channels(ch_names, "EMG")
|
||||
|
||||
date, time = header_info["start_ts"].split()
|
||||
date = date.split("-")
|
||||
time = time.split(":")
|
||||
sec, msec = time[2].split(".")
|
||||
date = datetime.datetime(
|
||||
int(date[0]),
|
||||
int(date[1]),
|
||||
int(date[2]),
|
||||
int(time[0]),
|
||||
int(time[1]),
|
||||
int(sec),
|
||||
int(msec),
|
||||
)
|
||||
info = _empty_info(header_info["sample_freq"])
|
||||
info["meas_date"] = (calendar.timegm(date.utctimetuple()), 0)
|
||||
|
||||
if ch_type == "eeg":
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
elif ch_type == "seeg":
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_SEEG_CH
|
||||
else:
|
||||
raise TypeError(
|
||||
"Channel type not recognized. Available types are 'eeg' and 'seeg'."
|
||||
)
|
||||
cals = np.repeat(header_info["conversion_factor"] * 1e-6, len(ch_names))
|
||||
info["chs"] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc)
|
||||
info["highpass"] = 0.0
|
||||
info["lowpass"] = info["sfreq"] / 2.0
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
return info, header_info
|
||||
|
||||
|
||||
class RawNicolet(BaseRaw):
|
||||
"""Raw object from Nicolet file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the Nicolet file.
|
||||
ch_type : str
|
||||
Channel type to designate to the data channels. Supported data types
|
||||
include ``'eeg'``, ``'seeg'``.
|
||||
eog : list | tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. If ``'auto'``, the channel names beginning with
|
||||
``EOG`` are used. Defaults to empty tuple.
|
||||
ecg : list or tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
ECG channels. If ``'auto'``, the channel names beginning with
|
||||
``ECG`` are used. Defaults to empty tuple.
|
||||
emg : list or tuple | ``'auto'``
|
||||
Names of channels or list of indices that should be designated
|
||||
EMG channels. If ``'auto'``, the channel names beginning with
|
||||
``EMG`` are used. Defaults to empty tuple.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Defaults to empty tuple.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
ch_type,
|
||||
eog=(),
|
||||
ecg=(),
|
||||
emg=(),
|
||||
misc=(),
|
||||
preload=False,
|
||||
verbose=None,
|
||||
):
|
||||
input_fname = path.abspath(input_fname)
|
||||
info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg, emg, misc)
|
||||
last_samps = [header_info["num_samples"] - 1]
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[input_fname],
|
||||
raw_extras=[header_info],
|
||||
last_samps=last_samps,
|
||||
orig_format="int",
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
_read_segments_file(self, data, idx, fi, start, stop, cals, mult, dtype="<i2")
|
||||
7
mne/io/nihon/__init__.py
Normal file
7
mne/io/nihon/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Nihon Kohden module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .nihon import read_raw_nihon
|
||||
514
mne/io/nihon/nihon.py
Normal file
514
mne/io/nihon/nihon.py
Normal file
@@ -0,0 +1,514 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, fill_doc, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
def _ensure_path(fname):
|
||||
out = fname
|
||||
if not isinstance(out, Path):
|
||||
out = Path(out)
|
||||
return out
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_nihon(fname, preload=False, verbose=None) -> "RawNihon":
|
||||
"""Reader for an Nihon Kohden EEG file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the Nihon Kohden data file (``.EEG``).
|
||||
preload : bool
|
||||
If True, all data are loaded at initialization.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawNihon
|
||||
A Raw object containing Nihon Kohden data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawNihon.
|
||||
"""
|
||||
return RawNihon(fname, preload, verbose)
|
||||
|
||||
|
||||
_valid_headers = [
|
||||
"EEG-1100A V01.00",
|
||||
"EEG-1100B V01.00",
|
||||
"EEG-1100C V01.00",
|
||||
"QI-403A V01.00",
|
||||
"QI-403A V02.00",
|
||||
"EEG-2100 V01.00",
|
||||
"EEG-2100 V02.00",
|
||||
"DAE-2100D V01.30",
|
||||
"DAE-2100D V02.00",
|
||||
# 'EEG-1200A V01.00', # Not working for the moment.
|
||||
]
|
||||
|
||||
|
||||
def _read_nihon_metadata(fname):
|
||||
metadata = {}
|
||||
fname = _ensure_path(fname)
|
||||
pnt_fname = fname.with_suffix(".PNT")
|
||||
if not pnt_fname.exists():
|
||||
warn("No PNT file exists. Metadata will be blank")
|
||||
return metadata
|
||||
logger.info("Found PNT file, reading metadata.")
|
||||
with open(pnt_fname) as fid:
|
||||
version = np.fromfile(fid, "|S16", 1).astype("U16")[0]
|
||||
if version not in _valid_headers:
|
||||
raise ValueError(f"Not a valid Nihon Kohden PNT file ({version})")
|
||||
metadata["version"] = version
|
||||
|
||||
# Read timestamp
|
||||
fid.seek(0x40)
|
||||
meas_str = np.fromfile(fid, "|S14", 1).astype("U14")[0]
|
||||
meas_date = datetime.strptime(meas_str, "%Y%m%d%H%M%S")
|
||||
meas_date = meas_date.replace(tzinfo=timezone.utc)
|
||||
metadata["meas_date"] = meas_date
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
_default_chan_labels = [
|
||||
"FP1",
|
||||
"FP2",
|
||||
"F3",
|
||||
"F4",
|
||||
"C3",
|
||||
"C4",
|
||||
"P3",
|
||||
"P4",
|
||||
"O1",
|
||||
"O2",
|
||||
"F7",
|
||||
"F8",
|
||||
"T3",
|
||||
"T4",
|
||||
"T5",
|
||||
"T6",
|
||||
"FZ",
|
||||
"CZ",
|
||||
"PZ",
|
||||
"E",
|
||||
"PG1",
|
||||
"PG2",
|
||||
"A1",
|
||||
"A2",
|
||||
"T1",
|
||||
"T2",
|
||||
]
|
||||
_default_chan_labels += [f"X{i}" for i in range(1, 12)]
|
||||
_default_chan_labels += [f"NA{i}" for i in range(1, 6)]
|
||||
_default_chan_labels += [f"DC{i:02}" for i in range(1, 33)]
|
||||
_default_chan_labels += ["BN1", "BN2", "Mark1", "Mark2"]
|
||||
_default_chan_labels += [f"NA{i}" for i in range(6, 28)]
|
||||
_default_chan_labels += ["X12/BP1", "X13/BP2", "X14/BP3", "X15/BP4"]
|
||||
_default_chan_labels += [f"X{i}" for i in range(16, 166)]
|
||||
_default_chan_labels += ["NA28", "Z"]
|
||||
|
||||
_encodings = ("utf-8", "latin1")
|
||||
|
||||
|
||||
def _read_21e_file(fname):
|
||||
fname = _ensure_path(fname)
|
||||
e_fname = fname.with_suffix(".21E")
|
||||
_chan_labels = [x for x in _default_chan_labels]
|
||||
if e_fname.exists():
|
||||
# Read the 21E file and update the labels accordingly.
|
||||
logger.info("Found 21E file, reading channel names.")
|
||||
for enc in _encodings:
|
||||
try:
|
||||
with open(e_fname, encoding=enc) as fid:
|
||||
keep_parsing = False
|
||||
for line in fid:
|
||||
if line.startswith("["):
|
||||
if "ELECTRODE" in line or "REFERENCE" in line:
|
||||
keep_parsing = True
|
||||
else:
|
||||
keep_parsing = False
|
||||
elif keep_parsing is True:
|
||||
idx, name = line.split("=")
|
||||
idx = int(idx)
|
||||
if idx >= len(_chan_labels):
|
||||
n = idx - len(_chan_labels) + 1
|
||||
_chan_labels.extend(["UNK"] * n)
|
||||
_chan_labels[idx] = name.strip()
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
warn(
|
||||
f"Could not decode 21E file as one of {_encodings}; "
|
||||
f"Default channel names are chosen."
|
||||
)
|
||||
|
||||
return _chan_labels
|
||||
|
||||
|
||||
def _read_nihon_header(fname):
|
||||
# Read the Nihon Kohden EEG file header
|
||||
fname = _ensure_path(fname)
|
||||
_chan_labels = _read_21e_file(fname)
|
||||
header = {}
|
||||
logger.info(f"Reading header from {fname}")
|
||||
with open(fname) as fid:
|
||||
version = np.fromfile(fid, "|S16", 1).astype("U16")[0]
|
||||
if version not in _valid_headers:
|
||||
raise ValueError(f"Not a valid Nihon Kohden EEG file ({version})")
|
||||
|
||||
fid.seek(0x0081)
|
||||
control_block = np.fromfile(fid, "|S16", 1).astype("U16")[0]
|
||||
if control_block not in _valid_headers:
|
||||
raise ValueError(
|
||||
f"Not a valid Nihon Kohden EEG file (control block {version})"
|
||||
)
|
||||
|
||||
fid.seek(0x17FE)
|
||||
waveform_sign = np.fromfile(fid, np.uint8, 1)[0]
|
||||
if waveform_sign != 1:
|
||||
raise ValueError("Not a valid Nihon Kohden EEG file (waveform block)")
|
||||
header["version"] = version
|
||||
|
||||
fid.seek(0x0091)
|
||||
n_ctlblocks = np.fromfile(fid, np.uint8, 1)[0]
|
||||
header["n_ctlblocks"] = n_ctlblocks
|
||||
controlblocks = []
|
||||
for i_ctl_block in range(n_ctlblocks):
|
||||
t_controlblock = {}
|
||||
fid.seek(0x0092 + i_ctl_block * 20)
|
||||
t_ctl_address = np.fromfile(fid, np.uint32, 1)[0]
|
||||
t_controlblock["address"] = t_ctl_address
|
||||
fid.seek(t_ctl_address + 17)
|
||||
n_datablocks = np.fromfile(fid, np.uint8, 1)[0]
|
||||
t_controlblock["n_datablocks"] = n_datablocks
|
||||
t_controlblock["datablocks"] = []
|
||||
for i_data_block in range(n_datablocks):
|
||||
t_datablock = {}
|
||||
fid.seek(t_ctl_address + i_data_block * 20 + 18)
|
||||
t_data_address = np.fromfile(fid, np.uint32, 1)[0]
|
||||
t_datablock["address"] = t_data_address
|
||||
|
||||
fid.seek(t_data_address + 0x26)
|
||||
t_n_channels = np.fromfile(fid, np.uint8, 1)[0].astype(np.int64)
|
||||
t_datablock["n_channels"] = t_n_channels
|
||||
|
||||
t_channels = []
|
||||
for i_ch in range(t_n_channels):
|
||||
fid.seek(t_data_address + 0x27 + (i_ch * 10))
|
||||
t_idx = np.fromfile(fid, np.uint8, 1)[0]
|
||||
t_channels.append(_chan_labels[t_idx])
|
||||
|
||||
t_datablock["channels"] = t_channels
|
||||
|
||||
fid.seek(t_data_address + 0x1C)
|
||||
t_record_duration = np.fromfile(fid, np.uint32, 1)[0].astype(np.int64)
|
||||
t_datablock["duration"] = t_record_duration
|
||||
|
||||
fid.seek(t_data_address + 0x1A)
|
||||
sfreq = np.fromfile(fid, np.uint16, 1)[0] & 0x3FFF
|
||||
t_datablock["sfreq"] = sfreq.astype(np.int64)
|
||||
|
||||
t_datablock["n_samples"] = np.int64(t_record_duration * sfreq // 10)
|
||||
t_controlblock["datablocks"].append(t_datablock)
|
||||
controlblocks.append(t_controlblock)
|
||||
header["controlblocks"] = controlblocks
|
||||
|
||||
# Now check that every data block has the same channels and sfreq
|
||||
chans = []
|
||||
sfreqs = []
|
||||
nsamples = []
|
||||
for t_ctl in header["controlblocks"]:
|
||||
for t_dtb in t_ctl["datablocks"]:
|
||||
chans.append(t_dtb["channels"])
|
||||
sfreqs.append(t_dtb["sfreq"])
|
||||
nsamples.append(t_dtb["n_samples"])
|
||||
for i_elem in range(1, len(chans)):
|
||||
if chans[0] != chans[i_elem]:
|
||||
raise ValueError("Channel names in datablocks do not match")
|
||||
if sfreqs[0] != sfreqs[i_elem]:
|
||||
raise ValueError("Sample frequency in datablocks do not match")
|
||||
header["ch_names"] = chans[0]
|
||||
header["sfreq"] = sfreqs[0]
|
||||
header["n_samples"] = np.sum(nsamples)
|
||||
|
||||
# TODO: Support more than one controlblock and more than one datablock
|
||||
if header["n_ctlblocks"] != 1:
|
||||
raise NotImplementedError(
|
||||
"I dont know how to read more than one "
|
||||
"control block for this type of file :("
|
||||
)
|
||||
if header["controlblocks"][0]["n_datablocks"] > 1:
|
||||
# Multiple blocks, check that they all have the same kind of data
|
||||
datablocks = header["controlblocks"][0]["datablocks"]
|
||||
block_0 = datablocks[0]
|
||||
for t_block in datablocks[1:]:
|
||||
if block_0["n_channels"] != t_block["n_channels"]:
|
||||
raise ValueError(
|
||||
"Cannot read NK file with different number of channels "
|
||||
"in each datablock"
|
||||
)
|
||||
if block_0["channels"] != t_block["channels"]:
|
||||
raise ValueError(
|
||||
"Cannot read NK file with different channels in each datablock"
|
||||
)
|
||||
if block_0["sfreq"] != t_block["sfreq"]:
|
||||
raise ValueError(
|
||||
"Cannot read NK file with different sfreq in each datablock"
|
||||
)
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def _read_nihon_annotations(fname):
|
||||
fname = _ensure_path(fname)
|
||||
log_fname = fname.with_suffix(".LOG")
|
||||
if not log_fname.exists():
|
||||
warn("No LOG file exists. Annotations will not be read")
|
||||
return dict(onset=[], duration=[], description=[])
|
||||
logger.info("Found LOG file, reading events.")
|
||||
with open(log_fname) as fid:
|
||||
version = np.fromfile(fid, "|S16", 1).astype("U16")[0]
|
||||
if version not in _valid_headers:
|
||||
raise ValueError(f"Not a valid Nihon Kohden LOG file ({version})")
|
||||
|
||||
fid.seek(0x91)
|
||||
n_logblocks = np.fromfile(fid, np.uint8, 1)[0]
|
||||
all_onsets = []
|
||||
all_descriptions = []
|
||||
for t_block in range(n_logblocks):
|
||||
fid.seek(0x92 + t_block * 20)
|
||||
t_blk_address = np.fromfile(fid, np.uint32, 1)[0]
|
||||
fid.seek(t_blk_address + 0x12)
|
||||
n_logs = np.fromfile(fid, np.uint8, 1)[0]
|
||||
fid.seek(t_blk_address + 0x14)
|
||||
t_logs = np.fromfile(fid, "|S45", n_logs)
|
||||
for t_log in t_logs:
|
||||
for enc in _encodings:
|
||||
try:
|
||||
t_log = t_log.decode(enc)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
warn(f"Could not decode log as one of {_encodings}")
|
||||
continue
|
||||
t_desc = t_log[:20].strip("\x00")
|
||||
t_onset = datetime.strptime(t_log[20:26], "%H%M%S")
|
||||
t_onset = t_onset.hour * 3600 + t_onset.minute * 60 + t_onset.second
|
||||
all_onsets.append(t_onset)
|
||||
all_descriptions.append(t_desc)
|
||||
|
||||
annots = dict(
|
||||
onset=all_onsets,
|
||||
duration=[0] * len(all_onsets),
|
||||
description=all_descriptions,
|
||||
)
|
||||
return annots
|
||||
|
||||
|
||||
def _map_ch_to_type(ch_name):
|
||||
ch_type_pattern = OrderedDict(
|
||||
[("stim", ("Mark",)), ("misc", ("DC", "NA", "Z", "$")), ("bio", ("X",))]
|
||||
)
|
||||
for key, kinds in ch_type_pattern.items():
|
||||
if any(kind in ch_name for kind in kinds):
|
||||
return key
|
||||
return "eeg"
|
||||
|
||||
|
||||
def _map_ch_to_specs(ch_name):
|
||||
unit_mult = 1e-3
|
||||
phys_min = -12002.9
|
||||
phys_max = 12002.56
|
||||
dig_min = -32768
|
||||
if ch_name.upper() in _default_chan_labels:
|
||||
idx = _default_chan_labels.index(ch_name.upper())
|
||||
if (idx < 42 or idx > 73) and idx not in [76, 77]:
|
||||
unit_mult = 1e-6
|
||||
phys_min = -3200
|
||||
phys_max = 3199.902
|
||||
t_range = phys_max - phys_min
|
||||
cal = t_range / 65535
|
||||
offset = phys_min - (dig_min * cal)
|
||||
|
||||
out = dict(
|
||||
unit=unit_mult,
|
||||
phys_min=phys_min,
|
||||
phys_max=phys_max,
|
||||
dig_min=dig_min,
|
||||
cal=cal,
|
||||
offset=offset,
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawNihon(BaseRaw):
|
||||
"""Raw object from a Nihon Kohden EEG file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the Nihon Kohden data ``.eeg`` file.
|
||||
preload : bool
|
||||
If True, all data are loaded at initialization.
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, verbose=None):
|
||||
fname = _check_fname(fname, "read", True, "fname")
|
||||
data_name = fname.name
|
||||
logger.info(f"Loading {data_name}")
|
||||
|
||||
header = _read_nihon_header(fname)
|
||||
metadata = _read_nihon_metadata(fname)
|
||||
|
||||
# n_chan = len(header['ch_names']) + 1
|
||||
sfreq = header["sfreq"]
|
||||
# data are multiplexed int16
|
||||
ch_names = header["ch_names"]
|
||||
ch_types = [_map_ch_to_type(x) for x in ch_names]
|
||||
|
||||
info = create_info(ch_names, sfreq, ch_types)
|
||||
n_samples = header["n_samples"]
|
||||
|
||||
if "meas_date" in metadata:
|
||||
with info._unlock():
|
||||
info["meas_date"] = metadata["meas_date"]
|
||||
chs = {x: _map_ch_to_specs(x) for x in info["ch_names"]}
|
||||
|
||||
cal = np.array([chs[x]["cal"] for x in info["ch_names"]], float)[:, np.newaxis]
|
||||
offsets = np.array([chs[x]["offset"] for x in info["ch_names"]], float)[
|
||||
:, np.newaxis
|
||||
]
|
||||
gains = np.array([chs[x]["unit"] for x in info["ch_names"]], float)[
|
||||
:, np.newaxis
|
||||
]
|
||||
|
||||
raw_extras = dict(cal=cal, offsets=offsets, gains=gains, header=header)
|
||||
for i_ch, ch_name in enumerate(info["ch_names"]):
|
||||
t_range = chs[ch_name]["phys_max"] - chs[ch_name]["phys_min"]
|
||||
info["chs"][i_ch]["range"] = t_range
|
||||
info["chs"][i_ch]["cal"] = 1 / t_range
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
last_samps=(n_samples - 1,),
|
||||
filenames=[fname.as_posix()],
|
||||
orig_format="short",
|
||||
raw_extras=[raw_extras],
|
||||
)
|
||||
|
||||
# Get annotations from LOG file
|
||||
annots = _read_nihon_annotations(fname)
|
||||
|
||||
# Annotate acquisition skips
|
||||
controlblock = header["controlblocks"][0]
|
||||
cur_sample = 0
|
||||
if controlblock["n_datablocks"] > 1:
|
||||
for i_block in range(controlblock["n_datablocks"] - 1):
|
||||
t_block = controlblock["datablocks"][i_block]
|
||||
cur_sample = cur_sample + t_block["n_samples"]
|
||||
cur_tpoint = (cur_sample - 0.5) / t_block["sfreq"]
|
||||
# Add annotations as in append raw
|
||||
annots["onset"].append(cur_tpoint)
|
||||
annots["duration"].append(0.0)
|
||||
annots["description"].append("BAD boundary")
|
||||
annots["onset"].append(cur_tpoint)
|
||||
annots["duration"].append(0.0)
|
||||
annots["description"].append("EDGE boundary")
|
||||
|
||||
annotations = Annotations(**annots, orig_time=info["meas_date"])
|
||||
self.set_annotations(annotations)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
# For now we assume one control block
|
||||
header = self._raw_extras[fi]["header"]
|
||||
|
||||
# Get the original cal, offsets and gains
|
||||
cal = self._raw_extras[fi]["cal"]
|
||||
offsets = self._raw_extras[fi]["offsets"]
|
||||
gains = self._raw_extras[fi]["gains"]
|
||||
|
||||
# get the right datablock
|
||||
datablocks = header["controlblocks"][0]["datablocks"]
|
||||
ends = np.cumsum([t["n_samples"] for t in datablocks])
|
||||
|
||||
start_block = np.where(start < ends)[0][0]
|
||||
stop_block = np.where(stop <= ends)[0][0]
|
||||
|
||||
if start_block != stop_block:
|
||||
# Recursive call for each block independently
|
||||
new_start = start
|
||||
sample_start = 0
|
||||
for t_block_idx in range(start_block, stop_block + 1):
|
||||
t_block = datablocks[t_block_idx]
|
||||
if t_block == stop_block:
|
||||
# If its the last block, we stop on the last sample to read
|
||||
new_stop = stop
|
||||
else:
|
||||
# Otherwise, stop on the last sample of the block
|
||||
new_stop = t_block["n_samples"] + new_start
|
||||
samples_to_read = new_stop - new_start
|
||||
sample_stop = sample_start + samples_to_read
|
||||
|
||||
self._read_segment_file(
|
||||
data[:, sample_start:sample_stop],
|
||||
idx,
|
||||
fi,
|
||||
new_start,
|
||||
new_stop,
|
||||
cals,
|
||||
mult,
|
||||
)
|
||||
|
||||
# Update variables for next loop
|
||||
sample_start = sample_stop
|
||||
new_start = new_stop
|
||||
else:
|
||||
datablock = datablocks[start_block]
|
||||
|
||||
n_channels = datablock["n_channels"] + 1
|
||||
datastart = datablock["address"] + 0x27 + (datablock["n_channels"] * 10)
|
||||
|
||||
# Compute start offset based on the beginning of the block
|
||||
rel_start = start
|
||||
if start_block != 0:
|
||||
rel_start = start - ends[start_block - 1]
|
||||
start_offset = datastart + rel_start * n_channels * 2
|
||||
|
||||
with open(self.filenames[fi], "rb") as fid:
|
||||
to_read = (stop - start) * n_channels
|
||||
fid.seek(start_offset)
|
||||
block_data = np.fromfile(fid, "<u2", to_read) + 0x8000
|
||||
block_data = block_data.astype(np.int16)
|
||||
block_data = block_data.reshape(n_channels, -1, order="F")
|
||||
block_data = block_data[:-1] * cal # cast to float64
|
||||
block_data += offsets
|
||||
block_data *= gains
|
||||
_mult_cal_one(data, block_data, idx, cals, mult)
|
||||
7
mne/io/nirx/__init__.py
Normal file
7
mne/io/nirx/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""fNIRS module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .nirx import read_raw_nirx
|
||||
146
mne/io/nirx/_localized_abbr.py
Normal file
146
mne/io/nirx/_localized_abbr.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""Localizations for meas_date extraction."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
# This file was generated on 2021/01/31 on an Ubuntu system.
|
||||
# When getting "unsupported locale setting" on Ubuntu (e.g., with localepurge),
|
||||
# use "sudo locale-gen de_DE" etc. then "sudo update-locale".
|
||||
|
||||
"""
|
||||
import datetime
|
||||
import locale
|
||||
print('_localized_abbr = {')
|
||||
for loc in ('en_US.utf8', 'de_DE', 'fr_FR', 'it_IT'):
|
||||
print(f' {repr(loc)}: {{')
|
||||
print(' "month": {', end='')
|
||||
month_abbr = set()
|
||||
for month in range(1, 13): # Month as locale’s abbreviated name
|
||||
locale.setlocale(locale.LC_TIME, "en_US.utf8")
|
||||
dt = datetime.datetime(year=2000, month=month, day=1)
|
||||
val = dt.strftime("%b").lower()
|
||||
locale.setlocale(locale.LC_TIME, loc)
|
||||
key = dt.strftime("%b").lower()
|
||||
month_abbr.add(key)
|
||||
print(f'{repr(key)}: {repr(val)}, ', end='')
|
||||
print('}, # noqa')
|
||||
print(' "weekday": {', end='')
|
||||
weekday_abbr = set()
|
||||
for day in range(1, 8): # Weekday as locale’s abbreviated name.
|
||||
locale.setlocale(locale.LC_TIME, "en_US.utf8")
|
||||
dt = datetime.datetime(year=2000, month=1, day=day)
|
||||
val = dt.strftime("%a").lower()
|
||||
locale.setlocale(locale.LC_TIME, loc)
|
||||
key = dt.strftime("%a").lower()
|
||||
assert key not in weekday_abbr, key
|
||||
weekday_abbr.add(key)
|
||||
print(f'{repr(key)}: {repr(val)}, ', end='')
|
||||
print('}, # noqa')
|
||||
print(' },')
|
||||
print('}\n')
|
||||
"""
|
||||
|
||||
# TODO: this should really be outsourced to a dedicated module like arrow or babel
|
||||
_localized_abbr = {
|
||||
"en_US.utf8": {
|
||||
"month": {
|
||||
"jan": "jan",
|
||||
"feb": "feb",
|
||||
"mar": "mar",
|
||||
"apr": "apr",
|
||||
"may": "may",
|
||||
"jun": "jun",
|
||||
"jul": "jul",
|
||||
"aug": "aug",
|
||||
"sep": "sep",
|
||||
"oct": "oct",
|
||||
"nov": "nov",
|
||||
"dec": "dec",
|
||||
}, # noqa
|
||||
"weekday": {
|
||||
"sat": "sat",
|
||||
"sun": "sun",
|
||||
"mon": "mon",
|
||||
"tue": "tue",
|
||||
"wed": "wed",
|
||||
"thu": "thu",
|
||||
"fri": "fri",
|
||||
}, # noqa
|
||||
},
|
||||
"de_DE": {
|
||||
"month": {
|
||||
"jan": "jan",
|
||||
"feb": "feb",
|
||||
"mär": "mar",
|
||||
"apr": "apr",
|
||||
"mai": "may",
|
||||
"jun": "jun",
|
||||
"jul": "jul",
|
||||
"aug": "aug",
|
||||
"sep": "sep",
|
||||
"okt": "oct",
|
||||
"nov": "nov",
|
||||
"dez": "dec",
|
||||
}, # noqa
|
||||
"weekday": {
|
||||
"sa": "sat",
|
||||
"so": "sun",
|
||||
"mo": "mon",
|
||||
"di": "tue",
|
||||
"mi": "wed",
|
||||
"do": "thu",
|
||||
"fr": "fri",
|
||||
}, # noqa
|
||||
},
|
||||
"fr_FR": {
|
||||
"month": {
|
||||
"janv.": "jan",
|
||||
"févr.": "feb",
|
||||
"mars": "mar",
|
||||
"avril": "apr",
|
||||
"mai": "may",
|
||||
"juin": "jun",
|
||||
"juil.": "jul",
|
||||
"août": "aug",
|
||||
"sept.": "sep",
|
||||
"oct.": "oct",
|
||||
"nov.": "nov",
|
||||
"déc.": "dec",
|
||||
}, # noqa
|
||||
"weekday": {
|
||||
"sam.": "sat",
|
||||
"dim.": "sun",
|
||||
"lun.": "mon",
|
||||
"mar.": "tue",
|
||||
"mer.": "wed",
|
||||
"jeu.": "thu",
|
||||
"ven.": "fri",
|
||||
}, # noqa
|
||||
},
|
||||
"it_IT": {
|
||||
"month": {
|
||||
"gen": "jan",
|
||||
"feb": "feb",
|
||||
"mar": "mar",
|
||||
"apr": "apr",
|
||||
"mag": "may",
|
||||
"giu": "jun",
|
||||
"lug": "jul",
|
||||
"ago": "aug",
|
||||
"set": "sep",
|
||||
"ott": "oct",
|
||||
"nov": "nov",
|
||||
"dic": "dec",
|
||||
}, # noqa
|
||||
"weekday": {
|
||||
"sab": "sat",
|
||||
"dom": "sun",
|
||||
"lun": "mon",
|
||||
"mar": "tue",
|
||||
"mer": "wed",
|
||||
"gio": "thu",
|
||||
"ven": "fri",
|
||||
}, # noqa
|
||||
},
|
||||
}
|
||||
593
mne/io/nirx/nirx.py
Normal file
593
mne/io/nirx/nirx.py
Normal file
@@ -0,0 +1,593 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime as dt
|
||||
import glob as glob
|
||||
import json
|
||||
import os.path as op
|
||||
import re as re
|
||||
from configparser import ConfigParser, RawConfigParser
|
||||
|
||||
import numpy as np
|
||||
from scipy.io import loadmat
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _format_dig_points, create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ..._freesurfer import get_mni_fiducials
|
||||
from ...annotations import Annotations
|
||||
from ...transforms import _get_trans, apply_trans
|
||||
from ...utils import (
|
||||
_check_fname,
|
||||
_check_option,
|
||||
_mask_to_onsets_offsets,
|
||||
_validate_type,
|
||||
fill_doc,
|
||||
logger,
|
||||
verbose,
|
||||
warn,
|
||||
)
|
||||
from ..base import BaseRaw
|
||||
from ._localized_abbr import _localized_abbr
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_nirx(
|
||||
fname, saturated="annotate", *, preload=False, encoding="latin-1", verbose=None
|
||||
) -> "RawNIRX":
|
||||
"""Reader for a NIRX fNIRS recording.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the NIRX data folder or header file.
|
||||
%(saturated)s
|
||||
%(preload)s
|
||||
%(encoding_nirx)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawNIRX
|
||||
A Raw object containing NIRX data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawNIRX.
|
||||
|
||||
Notes
|
||||
-----
|
||||
%(nirx_notes)s
|
||||
"""
|
||||
return RawNIRX(
|
||||
fname, saturated, preload=preload, encoding=encoding, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
def _open(fname):
|
||||
return open(fname, encoding="latin-1")
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawNIRX(BaseRaw):
|
||||
"""Raw object from a NIRX fNIRS file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the NIRX data folder or header file.
|
||||
%(saturated)s
|
||||
%(preload)s
|
||||
%(encoding_nirx)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
%(nirx_notes)s
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, saturated, *, preload=False, encoding=None, verbose=None):
|
||||
logger.info(f"Loading {fname}")
|
||||
_validate_type(fname, "path-like", "fname")
|
||||
_validate_type(saturated, str, "saturated")
|
||||
_check_option("saturated", saturated, ("annotate", "nan", "ignore"))
|
||||
fname = str(fname)
|
||||
if fname.endswith(".hdr"):
|
||||
fname = op.dirname(op.abspath(fname))
|
||||
|
||||
fname = str(_check_fname(fname, "read", True, "fname", need_dir=True))
|
||||
|
||||
json_config = glob.glob(f"{fname}/*{'config.json'}")
|
||||
is_aurora = len(json_config)
|
||||
|
||||
if is_aurora:
|
||||
# NIRSport2 devices using Aurora software
|
||||
keys = (
|
||||
"hdr",
|
||||
"config.json",
|
||||
"description.json",
|
||||
"wl1",
|
||||
"wl2",
|
||||
"probeInfo.mat",
|
||||
"tri",
|
||||
)
|
||||
else:
|
||||
# NIRScout devices and NIRSport1 devices
|
||||
keys = (
|
||||
"hdr",
|
||||
"inf",
|
||||
"set",
|
||||
"tpl",
|
||||
"wl1",
|
||||
"wl2",
|
||||
"config.txt",
|
||||
"probeInfo.mat",
|
||||
)
|
||||
n_dat = len(glob.glob(f"{fname}/*{'dat'}"))
|
||||
if n_dat != 1:
|
||||
warn(
|
||||
"A single dat file was expected in the specified path, "
|
||||
f"but got {n_dat}. This may indicate that the file "
|
||||
"structure has been modified since the measurement "
|
||||
"was saved."
|
||||
)
|
||||
|
||||
# Check if required files exist and store names for later use
|
||||
files = dict()
|
||||
nan_mask = dict()
|
||||
for key in keys:
|
||||
files[key] = glob.glob(f"{fname}/*{key}")
|
||||
fidx = 0
|
||||
if len(files[key]) != 1:
|
||||
if key not in ("wl1", "wl2"):
|
||||
raise RuntimeError(f"Need one {key} file, got {len(files[key])}")
|
||||
noidx = np.where(["nosatflags_" in op.basename(x) for x in files[key]])[
|
||||
0
|
||||
]
|
||||
if len(noidx) != 1 or len(files[key]) != 2:
|
||||
raise RuntimeError(
|
||||
f"Need one nosatflags and one standard {key} file, "
|
||||
f"got {len(files[key])}"
|
||||
)
|
||||
# Here two files have been found, one that is called
|
||||
# no sat flags. The nosatflag file has no NaNs in it.
|
||||
noidx = noidx[0]
|
||||
if saturated == "ignore":
|
||||
# Ignore NaN and return values
|
||||
fidx = noidx
|
||||
elif saturated == "nan":
|
||||
# Return NaN
|
||||
fidx = 0 if noidx == 1 else 1
|
||||
else:
|
||||
assert saturated == "annotate" # guaranteed above
|
||||
fidx = noidx
|
||||
nan_mask[key] = files[key][0 if noidx == 1 else 1]
|
||||
files[key] = files[key][fidx]
|
||||
|
||||
# Read number of rows/samples of wavelength data
|
||||
with _open(files["wl1"]) as fid:
|
||||
last_sample = fid.read().count("\n") - 1
|
||||
|
||||
# Read header file
|
||||
# The header file isn't compliant with the configparser. So all the
|
||||
# text between comments must be removed before passing to parser
|
||||
with open(files["hdr"], encoding=encoding) as f:
|
||||
hdr_str_all = f.read()
|
||||
hdr_str = re.sub("#.*?#", "", hdr_str_all, flags=re.DOTALL)
|
||||
if is_aurora:
|
||||
hdr_str = re.sub("(\\[DataStructure].*)", "", hdr_str, flags=re.DOTALL)
|
||||
hdr = RawConfigParser()
|
||||
hdr.read_string(hdr_str)
|
||||
|
||||
# Check that the file format version is supported
|
||||
if is_aurora:
|
||||
# We may need to ease this requirement back
|
||||
if hdr["GeneralInfo"]["Version"] not in [
|
||||
"2021.4.0-34-ge9fdbbc8",
|
||||
"2021.9.0-5-g3eb32851",
|
||||
"2021.9.0-6-g14ef4a71",
|
||||
]:
|
||||
warn(
|
||||
"MNE has not been tested with Aurora version "
|
||||
f"{hdr['GeneralInfo']['Version']}"
|
||||
)
|
||||
else:
|
||||
if hdr["GeneralInfo"]["NIRStar"] not in ['"15.0"', '"15.2"', '"15.3"']:
|
||||
raise RuntimeError(
|
||||
"MNE does not support this NIRStar version"
|
||||
f" ({hdr['GeneralInfo']['NIRStar']})"
|
||||
)
|
||||
if (
|
||||
"NIRScout" not in hdr["GeneralInfo"]["Device"]
|
||||
and "NIRSport" not in hdr["GeneralInfo"]["Device"]
|
||||
):
|
||||
warn(
|
||||
"Only import of data from NIRScout devices have been "
|
||||
f'thoroughly tested. You are using a {hdr["GeneralInfo"]["Device"]}'
|
||||
" device."
|
||||
)
|
||||
|
||||
# Parse required header fields
|
||||
|
||||
# Extract measurement date and time
|
||||
if is_aurora:
|
||||
datetime_str = hdr["GeneralInfo"]["Date"]
|
||||
else:
|
||||
datetime_str = hdr["GeneralInfo"]["Date"] + hdr["GeneralInfo"]["Time"]
|
||||
|
||||
meas_date = None
|
||||
# Several formats have been observed so we try each in turn
|
||||
for loc, translations in _localized_abbr.items():
|
||||
do_break = False
|
||||
# So far we are lucky in that all the formats below, if they
|
||||
# include %a (weekday abbr), always come first. Thus we can use
|
||||
# a .split(), replace, and rejoin.
|
||||
loc_datetime_str = datetime_str.split(" ")
|
||||
for key, val in translations["weekday"].items():
|
||||
loc_datetime_str[0] = loc_datetime_str[0].replace(key, val)
|
||||
for ii in range(1, len(loc_datetime_str)):
|
||||
for key, val in translations["month"].items():
|
||||
loc_datetime_str[ii] = loc_datetime_str[ii].replace(key, val)
|
||||
loc_datetime_str = " ".join(loc_datetime_str)
|
||||
logger.debug(f"Trying {loc} datetime: {loc_datetime_str}")
|
||||
for dt_code in [
|
||||
'"%a, %b %d, %Y""%H:%M:%S.%f"',
|
||||
'"%a %d %b %Y""%H:%M:%S.%f"',
|
||||
'"%a, %d %b %Y""%H:%M:%S.%f"',
|
||||
"%Y-%m-%d %H:%M:%S.%f",
|
||||
'"%Y年%m月%d日""%H:%M:%S.%f"',
|
||||
]:
|
||||
try:
|
||||
meas_date = dt.datetime.strptime(loc_datetime_str, dt_code)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
meas_date = meas_date.replace(tzinfo=dt.timezone.utc)
|
||||
do_break = True
|
||||
logger.debug(f"Measurement date language {loc} detected: {dt_code}")
|
||||
break
|
||||
if do_break:
|
||||
break
|
||||
if meas_date is None:
|
||||
warn(
|
||||
"Extraction of measurement date from NIRX file failed. "
|
||||
"This can be caused by files saved in certain locales "
|
||||
f"(currently only {list(_localized_abbr)} supported). "
|
||||
"Please report this as a github issue. "
|
||||
"The date is being set to January 1st, 2000, "
|
||||
f"instead of {repr(datetime_str)}."
|
||||
)
|
||||
meas_date = dt.datetime(2000, 1, 1, 0, 0, 0, tzinfo=dt.timezone.utc)
|
||||
|
||||
# Extract frequencies of light used by machine
|
||||
if is_aurora:
|
||||
fnirs_wavelengths = [760, 850]
|
||||
else:
|
||||
fnirs_wavelengths = [
|
||||
int(s)
|
||||
for s in re.findall(r"(\d+)", hdr["ImagingParameters"]["Wavelengths"])
|
||||
]
|
||||
|
||||
# Extract source-detectors
|
||||
if is_aurora:
|
||||
sources = re.findall(r"(\d+)-\d+", hdr_str_all.split("\n")[-2])
|
||||
detectors = re.findall(r"\d+-(\d+)", hdr_str_all.split("\n")[-2])
|
||||
sources = [int(s) + 1 for s in sources]
|
||||
detectors = [int(d) + 1 for d in detectors]
|
||||
|
||||
else:
|
||||
sources = np.asarray(
|
||||
[
|
||||
int(s)
|
||||
for s in re.findall(
|
||||
r"(\d+)-\d+:\d+", hdr["DataStructure"]["S-D-Key"]
|
||||
)
|
||||
],
|
||||
int,
|
||||
)
|
||||
detectors = np.asarray(
|
||||
[
|
||||
int(s)
|
||||
for s in re.findall(
|
||||
r"\d+-(\d+):\d+", hdr["DataStructure"]["S-D-Key"]
|
||||
)
|
||||
],
|
||||
int,
|
||||
)
|
||||
|
||||
# Extract sampling rate
|
||||
if is_aurora:
|
||||
samplingrate = float(hdr["GeneralInfo"]["Sampling rate"])
|
||||
else:
|
||||
samplingrate = float(hdr["ImagingParameters"]["SamplingRate"])
|
||||
|
||||
# Read participant information file
|
||||
if is_aurora:
|
||||
with open(files["description.json"]) as f:
|
||||
inf = json.load(f)
|
||||
else:
|
||||
inf = ConfigParser(allow_no_value=True)
|
||||
inf.read(files["inf"])
|
||||
inf = inf._sections["Subject Demographics"]
|
||||
|
||||
# Store subject information from inf file in mne format
|
||||
# Note: NIRX also records "Study Type", "Experiment History",
|
||||
# "Additional Notes", "Contact Information" and this information
|
||||
# is currently discarded
|
||||
# NIRStar does not record an id, or handedness by default
|
||||
# The name field is used to populate the his_id variable.
|
||||
subject_info = {}
|
||||
if is_aurora:
|
||||
names = inf["subject"].split()
|
||||
else:
|
||||
names = inf["name"].replace('"', "").split()
|
||||
subject_info["his_id"] = "_".join(names)
|
||||
if len(names) > 0:
|
||||
subject_info["first_name"] = names[0].replace('"', "")
|
||||
if len(names) > 1:
|
||||
subject_info["last_name"] = names[-1].replace('"', "")
|
||||
if len(names) > 2:
|
||||
subject_info["middle_name"] = names[-2].replace('"', "")
|
||||
subject_info["sex"] = inf["gender"].replace('"', "")
|
||||
# Recode values
|
||||
if subject_info["sex"] in {"M", "Male", "1"}:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE
|
||||
elif subject_info["sex"] in {"F", "Female", "2"}:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE
|
||||
else:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
|
||||
if inf["age"] != "":
|
||||
subject_info["birthday"] = dt.date(
|
||||
meas_date.year - int(inf["age"]),
|
||||
meas_date.month,
|
||||
meas_date.day,
|
||||
)
|
||||
|
||||
# Read information about probe/montage/optodes
|
||||
# A word on terminology used here:
|
||||
# Sources produce light
|
||||
# Detectors measure light
|
||||
# Sources and detectors are both called optodes
|
||||
# Each source - detector pair produces a channel
|
||||
# Channels are defined as the midpoint between source and detector
|
||||
mat_data = loadmat(files["probeInfo.mat"])
|
||||
probes = mat_data["probeInfo"]["probes"][0, 0]
|
||||
requested_channels = probes["index_c"][0, 0]
|
||||
src_locs = probes["coords_s3"][0, 0] / 100.0
|
||||
det_locs = probes["coords_d3"][0, 0] / 100.0
|
||||
ch_locs = probes["coords_c3"][0, 0] / 100.0
|
||||
|
||||
# These are all in MNI coordinates, so let's transform them to
|
||||
# the Neuromag head coordinate frame
|
||||
src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head(
|
||||
"fsaverage", "mri", "head", src_locs, det_locs, ch_locs
|
||||
)
|
||||
|
||||
# Set up digitization
|
||||
dig = get_mni_fiducials("fsaverage", verbose=False)
|
||||
for fid in dig:
|
||||
fid["r"] = apply_trans(mri_head_t, fid["r"])
|
||||
fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
for ii, ch_loc in enumerate(ch_locs, 1):
|
||||
dig.append(
|
||||
dict(
|
||||
kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay
|
||||
r=ch_loc,
|
||||
ident=ii,
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
)
|
||||
dig = _format_dig_points(dig)
|
||||
del mri_head_t
|
||||
|
||||
# Determine requested channel indices
|
||||
# The wl1 and wl2 files include all possible source - detector pairs.
|
||||
# But most of these are not relevant. We want to extract only the
|
||||
# subset requested in the probe file
|
||||
req_ind = np.array([], int)
|
||||
for req_idx in range(requested_channels.shape[0]):
|
||||
sd_idx = np.where(
|
||||
(sources == requested_channels[req_idx][0])
|
||||
& (detectors == requested_channels[req_idx][1])
|
||||
)
|
||||
req_ind = np.concatenate((req_ind, sd_idx[0]))
|
||||
req_ind = req_ind.astype(int)
|
||||
|
||||
snames = [f"S{sources[idx]}" for idx in req_ind]
|
||||
dnames = [f"_D{detectors[idx]}" for idx in req_ind]
|
||||
sdnames = [m + str(n) for m, n in zip(snames, dnames)]
|
||||
sd1 = [s + " " + str(fnirs_wavelengths[0]) for s in sdnames]
|
||||
sd2 = [s + " " + str(fnirs_wavelengths[1]) for s in sdnames]
|
||||
chnames = [val for pair in zip(sd1, sd2) for val in pair]
|
||||
|
||||
# Create mne structure
|
||||
info = create_info(chnames, samplingrate, ch_types="fnirs_cw_amplitude")
|
||||
with info._unlock():
|
||||
info.update(subject_info=subject_info, dig=dig)
|
||||
info["meas_date"] = meas_date
|
||||
|
||||
# Store channel, source, and detector locations
|
||||
# The channel location is stored in the first 3 entries of loc.
|
||||
# The source location is stored in the second 3 entries of loc.
|
||||
# The detector location is stored in the third 3 entries of loc.
|
||||
# NIRx NIRSite uses MNI coordinates.
|
||||
# Also encode the light frequency in the structure.
|
||||
for ch_idx2 in range(requested_channels.shape[0]):
|
||||
# Find source and store location
|
||||
src = int(requested_channels[ch_idx2, 0]) - 1
|
||||
# Find detector and store location
|
||||
det = int(requested_channels[ch_idx2, 1]) - 1
|
||||
# Store channel location as midpoint between source and detector.
|
||||
midpoint = (src_locs[src, :] + det_locs[det, :]) / 2
|
||||
for ii in range(2):
|
||||
ch_idx3 = ch_idx2 * 2 + ii
|
||||
info["chs"][ch_idx3]["loc"][3:6] = src_locs[src, :]
|
||||
info["chs"][ch_idx3]["loc"][6:9] = det_locs[det, :]
|
||||
info["chs"][ch_idx3]["loc"][:3] = midpoint
|
||||
info["chs"][ch_idx3]["loc"][9] = fnirs_wavelengths[ii]
|
||||
info["chs"][ch_idx3]["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
|
||||
# Extract the start/stop numbers for samples in the CSV. In theory the
|
||||
# sample bounds should just be 10 * the number of channels, but some
|
||||
# files have mixed \n and \n\r endings (!) so we can't rely on it, and
|
||||
# instead make a single pass over the entire file at the beginning so
|
||||
# that we know how to seek and read later.
|
||||
bounds = dict()
|
||||
for key in ("wl1", "wl2"):
|
||||
offset = 0
|
||||
bounds[key] = [offset]
|
||||
with open(files[key], "rb") as fid:
|
||||
for line in fid:
|
||||
offset += len(line)
|
||||
bounds[key].append(offset)
|
||||
assert offset == fid.tell()
|
||||
|
||||
# Extras required for reading data
|
||||
raw_extras = {
|
||||
"sd_index": req_ind,
|
||||
"files": files,
|
||||
"bounds": bounds,
|
||||
"nan_mask": nan_mask,
|
||||
}
|
||||
# Get our saturated mask
|
||||
annot_mask = None
|
||||
for ki, key in enumerate(("wl1", "wl2")):
|
||||
if nan_mask.get(key, None) is None:
|
||||
continue
|
||||
mask = np.isnan(
|
||||
_read_csv_rows_cols(
|
||||
nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None}
|
||||
).T
|
||||
)
|
||||
if saturated == "nan":
|
||||
nan_mask[key] = mask
|
||||
else:
|
||||
assert saturated == "annotate"
|
||||
if annot_mask is None:
|
||||
annot_mask = np.zeros(
|
||||
(len(info["ch_names"]) // 2, last_sample + 1), bool
|
||||
)
|
||||
annot_mask |= mask
|
||||
nan_mask[key] = None # shouldn't need again
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[fname],
|
||||
last_samps=[last_sample],
|
||||
raw_extras=[raw_extras],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# make onset/duration/description
|
||||
onset, duration, description, ch_names = list(), list(), list(), list()
|
||||
if annot_mask is not None:
|
||||
for ci, mask in enumerate(annot_mask):
|
||||
on, dur = _mask_to_onsets_offsets(mask)
|
||||
on = on / info["sfreq"]
|
||||
dur = dur / info["sfreq"]
|
||||
dur -= on
|
||||
onset.extend(on)
|
||||
duration.extend(dur)
|
||||
description.extend(["BAD_SATURATED"] * len(on))
|
||||
ch_names.extend([self.ch_names[2 * ci : 2 * ci + 2]] * len(on))
|
||||
|
||||
# Read triggers from event file
|
||||
if not is_aurora:
|
||||
files["tri"] = files["hdr"][:-3] + "evt"
|
||||
if op.isfile(files["tri"]):
|
||||
with _open(files["tri"]) as fid:
|
||||
t = [re.findall(r"(\d+)", line) for line in fid]
|
||||
if is_aurora:
|
||||
tf_idx, desc_idx = _determine_tri_idxs(t[0])
|
||||
for t_ in t:
|
||||
if is_aurora:
|
||||
trigger_frame = float(t_[tf_idx])
|
||||
desc = float(t_[desc_idx])
|
||||
else:
|
||||
binary_value = "".join(t_[1:])[::-1]
|
||||
desc = float(int(binary_value, 2))
|
||||
trigger_frame = float(t_[0])
|
||||
onset.append(trigger_frame / samplingrate)
|
||||
duration.append(1.0) # No duration info stored in files
|
||||
description.append(desc)
|
||||
ch_names.append(list())
|
||||
annot = Annotations(onset, duration, description, ch_names=ch_names)
|
||||
self.set_annotations(annot)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file.
|
||||
|
||||
The NIRX machine records raw data as two different wavelengths.
|
||||
The returned data interleaves the wavelengths.
|
||||
"""
|
||||
sd_index = self._raw_extras[fi]["sd_index"]
|
||||
|
||||
wls = list()
|
||||
for key in ("wl1", "wl2"):
|
||||
d = _read_csv_rows_cols(
|
||||
self._raw_extras[fi]["files"][key],
|
||||
start,
|
||||
stop,
|
||||
sd_index,
|
||||
self._raw_extras[fi]["bounds"][key],
|
||||
).T
|
||||
nan_mask = self._raw_extras[fi]["nan_mask"].get(key, None)
|
||||
if nan_mask is not None:
|
||||
d[nan_mask[:, start:stop]] = np.nan
|
||||
wls.append(d)
|
||||
|
||||
# TODO: Make this more efficient by only indexing above what we need.
|
||||
# For now let's just construct the full data matrix and index.
|
||||
# Interleave wavelength 1 and 2 to match channel names:
|
||||
this_data = np.zeros((len(wls[0]) * 2, stop - start))
|
||||
this_data[0::2, :] = wls[0]
|
||||
this_data[1::2, :] = wls[1]
|
||||
_mult_cal_one(data, this_data, idx, cals, mult)
|
||||
return data
|
||||
|
||||
|
||||
def _read_csv_rows_cols(fname, start, stop, cols, bounds, sep=" ", replace=None):
|
||||
with open(fname, "rb") as fid:
|
||||
fid.seek(bounds[start])
|
||||
args = list()
|
||||
if bounds[1] is not None:
|
||||
args.append(bounds[stop] - bounds[start])
|
||||
data = fid.read(*args).decode("latin-1")
|
||||
if replace is not None:
|
||||
data = replace(data)
|
||||
x = np.fromstring(data, float, sep=sep)
|
||||
x.shape = (stop - start, -1)
|
||||
x = x[:, cols]
|
||||
return x
|
||||
|
||||
|
||||
def _convert_fnirs_to_head(trans, fro, to, src_locs, det_locs, ch_locs):
|
||||
mri_head_t, _ = _get_trans(trans, fro, to)
|
||||
src_locs = apply_trans(mri_head_t, src_locs)
|
||||
det_locs = apply_trans(mri_head_t, det_locs)
|
||||
ch_locs = apply_trans(mri_head_t, ch_locs)
|
||||
return src_locs, det_locs, ch_locs, mri_head_t
|
||||
|
||||
|
||||
def _determine_tri_idxs(trigger):
|
||||
"""Determine tri file indexes for frame and description."""
|
||||
if len(trigger) == 12:
|
||||
# Aurora version 2021.9.6 or greater
|
||||
trigger_frame_idx = 7
|
||||
desc_idx = 10
|
||||
elif len(trigger) == 9:
|
||||
# Aurora version 2021.9.5 or earlier
|
||||
trigger_frame_idx = 7
|
||||
desc_idx = 8
|
||||
else:
|
||||
raise RuntimeError("Unable to read trigger file.")
|
||||
|
||||
return trigger_frame_idx, desc_idx
|
||||
7
mne/io/nsx/__init__.py
Normal file
7
mne/io/nsx/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""NSx module for reading Blackrock Microsystem files."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .nsx import read_raw_nsx
|
||||
537
mne/io/nsx/nsx.py
Normal file
537
mne/io/nsx/nsx.py
Normal file
@@ -0,0 +1,537 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _file_size, _read_segments_file
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, fill_doc, logger, warn
|
||||
from ..base import BaseRaw, _get_scaling
|
||||
|
||||
CH_TYPE_MAPPING = {
|
||||
"CC": "SEEG",
|
||||
}
|
||||
|
||||
|
||||
# See https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf
|
||||
DATA_BYTE_SIZE = 2
|
||||
ORIG_FORMAT = "short"
|
||||
|
||||
|
||||
nsx_header_dict = {
|
||||
"basic": [
|
||||
("file_id", "S8"), # achFileType
|
||||
# file specification split into major and minor version number
|
||||
("ver_major", "uint8"),
|
||||
("ver_minor", "uint8"),
|
||||
# bytes of basic & extended header
|
||||
("bytes_in_headers", "uint32"),
|
||||
# label of the sampling group (e.g., "1 kS/s" or "LFP low")
|
||||
("label", "S16"),
|
||||
("comment", "S256"),
|
||||
("period", "uint32"),
|
||||
("timestamp_resolution", "uint32"),
|
||||
# time origin: 2byte uint16 values for ...
|
||||
("year", "uint16"),
|
||||
("month", "uint16"),
|
||||
("weekday", "uint16"),
|
||||
("day", "uint16"),
|
||||
("hour", "uint16"),
|
||||
("minute", "uint16"),
|
||||
("second", "uint16"),
|
||||
("millisecond", "uint16"),
|
||||
# number of channel_count match number of extended headers
|
||||
("channel_count", "uint32"),
|
||||
],
|
||||
"extended": [
|
||||
("type", "S2"),
|
||||
("electrode_id", "uint16"),
|
||||
("electrode_label", "S16"),
|
||||
# used front-end amplifier bank (e.g., A, B, C, D)
|
||||
("physical_connector", "uint8"),
|
||||
# used connector pin (e.g., 1-37 on bank A, B, C or D)
|
||||
("connector_pin", "uint8"),
|
||||
# digital and analog value ranges of the signal
|
||||
("min_digital_val", "int16"),
|
||||
("max_digital_val", "int16"),
|
||||
("min_analog_val", "int16"),
|
||||
("max_analog_val", "int16"),
|
||||
# units of the analog range values ("mV" or "uV")
|
||||
("units", "S16"),
|
||||
# filter settings used to create nsx from source signal
|
||||
("hi_freq_corner", "uint32"),
|
||||
("hi_freq_order", "uint32"),
|
||||
("hi_freq_type", "uint16"), # 0=None, 1=Butterworth
|
||||
("lo_freq_corner", "uint32"),
|
||||
("lo_freq_order", "uint32"),
|
||||
("lo_freq_type", "uint16"),
|
||||
], # 0=None, 1=Butterworth,
|
||||
"data>2.1<3": [
|
||||
("header", "uint8"),
|
||||
("timestamp", "uint32"),
|
||||
("nb_data_points", "uint32"),
|
||||
],
|
||||
"data>=3": [
|
||||
("header", "uint8"),
|
||||
("timestamp", "uint64"),
|
||||
("nb_data_points", "uint32"),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_nsx(
|
||||
input_fname, stim_channel=True, eog=None, misc=None, preload=False, *, verbose=None
|
||||
) -> "RawNSX":
|
||||
"""Reader function for NSx (Blackrock Microsystems) files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : str
|
||||
Path to the NSx file.
|
||||
stim_channel : ``'auto'`` | str | list of str | int | list of int
|
||||
Defaults to ``'auto'``, which means that channels named ``'status'`` or
|
||||
``'trigger'`` (case insensitive) are set to STIM. If str (or list of
|
||||
str), all channels matching the name(s) are set to STIM. If int (or
|
||||
list of ints), channels corresponding to the indices are set to STIM.
|
||||
eog : list or tuple
|
||||
Names of channels or list of indices that should be designated EOG
|
||||
channels. Values should correspond to the electrodes in the file.
|
||||
Default is None.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated MISC
|
||||
channels. Values should correspond to the electrodes in the file.
|
||||
Default is None.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEDF
|
||||
The raw instance.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
Notes
|
||||
-----
|
||||
NSx files with id (= NEURALSG), i.e., version 2.1 is currently not
|
||||
supported.
|
||||
|
||||
If channels named 'status' or 'trigger' are present, they are considered as
|
||||
STIM channels by default. Use func:`mne.find_events` to parse events
|
||||
encoded in such analog stim channels.
|
||||
"""
|
||||
input_fname = _check_fname(
|
||||
input_fname, overwrite="read", must_exist=True, name="input_fname"
|
||||
)
|
||||
if not input_fname.suffix.lower().startswith(".ns"):
|
||||
raise NotImplementedError(
|
||||
f"Only NSx files are supported, got {input_fname.suffix}."
|
||||
)
|
||||
return RawNSX(
|
||||
input_fname, stim_channel, eog, misc, preload=preload, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawNSX(BaseRaw):
|
||||
"""Raw object from NSx file from Blackrock Microsystems.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : str
|
||||
Path to the NSx file.
|
||||
stim_channel : ``'auto'`` | str | list of str | int | list of int
|
||||
Defaults to ``'auto'``, which means that channels named ``'status'`` or
|
||||
``'trigger'`` (case insensitive) are set to STIM. If str (or list of
|
||||
str), all channels matching the name(s) are set to STIM. If int (or
|
||||
list of ints), channels corresponding to the indices are set to STIM.
|
||||
eog : list or tuple
|
||||
Names of channels or list of indices that should be designated EOG
|
||||
channels. Values should correspond to the electrodes in the file.
|
||||
Default is None.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated MISC
|
||||
channels. Values should correspond to the electrodes in the file.
|
||||
Default is None.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Notes
|
||||
-----
|
||||
NSx files with id (= NEURALSG), i.e., version 2.1 is currently not
|
||||
supported.
|
||||
|
||||
If channels named 'status' or 'trigger' are present, they are considered as
|
||||
STIM channels by default. Use func:`mne.find_events` to parse events
|
||||
encoded in such analog stim channels.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
stim_channel="auto",
|
||||
eog=None,
|
||||
misc=None,
|
||||
preload=False,
|
||||
verbose=None,
|
||||
):
|
||||
logger.info(f"Extracting NSX parameters from {input_fname}...")
|
||||
input_fname = os.path.abspath(input_fname)
|
||||
(
|
||||
info,
|
||||
data_fname,
|
||||
fmt,
|
||||
n_samples,
|
||||
orig_format,
|
||||
raw_extras,
|
||||
orig_units,
|
||||
) = _get_hdr_info(input_fname, stim_channel=stim_channel, eog=eog, misc=misc)
|
||||
raw_extras["orig_format"] = orig_format
|
||||
first_samps = (raw_extras["timestamp"][0],)
|
||||
super().__init__(
|
||||
info,
|
||||
first_samps=first_samps,
|
||||
last_samps=[first_samps[0] + n_samples - 1],
|
||||
filenames=[data_fname],
|
||||
orig_format=orig_format,
|
||||
preload=preload,
|
||||
verbose=verbose,
|
||||
raw_extras=[raw_extras],
|
||||
orig_units=orig_units,
|
||||
)
|
||||
|
||||
# Add annotations for in-data skips
|
||||
if len(self._raw_extras[0]["timestamp"]) > 1:
|
||||
starts = (
|
||||
self._raw_extras[0]["timestamp"] + self._raw_extras[0]["nb_data_points"]
|
||||
)[:-1] + 1
|
||||
stops = self._raw_extras[0]["timestamp"][1:] - 1
|
||||
durations = (stops - starts + 1) / self.info["sfreq"]
|
||||
annot = Annotations(
|
||||
onset=(starts / self.info["sfreq"]),
|
||||
duration=durations,
|
||||
description="BAD_ACQ_SKIP",
|
||||
orig_time=self.info["meas_date"],
|
||||
)
|
||||
self.set_annotations(annot)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of raw data."""
|
||||
dtype = self._raw_extras[fi]["orig_format"]
|
||||
first_samps = self._raw_extras[fi]["timestamp"]
|
||||
recording_extents = self._raw_extras[fi]["nb_data_points"]
|
||||
offsets = self._raw_extras[fi]["offset_to_data_block"]
|
||||
for first_samp, recording_extent, offset in zip(
|
||||
first_samps, recording_extents, offsets
|
||||
):
|
||||
if start > first_samp + recording_extent or stop < first_samp:
|
||||
# There is nothing to read in this chunk
|
||||
continue
|
||||
i_start = max(start, first_samp)
|
||||
i_stop = min(stop, first_samp + recording_extent)
|
||||
_read_segments_file(
|
||||
self,
|
||||
data[:, i_start - start : i_stop - start],
|
||||
idx,
|
||||
fi,
|
||||
i_start - first_samp,
|
||||
i_stop - first_samp,
|
||||
cals,
|
||||
mult,
|
||||
dtype,
|
||||
n_channels=None,
|
||||
offset=offset,
|
||||
trigger_ch=None,
|
||||
)
|
||||
|
||||
|
||||
def _read_header(fname):
|
||||
nsx_file_id = np.fromfile(fname, count=1, dtype=[("file_id", "S8")])[0][
|
||||
"file_id"
|
||||
].decode()
|
||||
|
||||
if nsx_file_id in ["NEURALCD", "BRSMPGRP"]:
|
||||
basic_header = _read_header_22_and_above(fname)
|
||||
elif nsx_file_id == "NEURALSG":
|
||||
raise NotImplementedError(
|
||||
"NSx file id (= NEURALSG), i.e., file"
|
||||
" version 2.1 is currently not supported."
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"NSx file id (={nsx_file_id}) does not match"
|
||||
" with supported file ids:"
|
||||
" ('NEURALCD', 'BRSMPGRP')"
|
||||
)
|
||||
|
||||
time_origin = datetime(
|
||||
*[
|
||||
basic_header.pop(xx)
|
||||
for xx in (
|
||||
"year",
|
||||
"month",
|
||||
"day",
|
||||
"hour",
|
||||
"minute",
|
||||
"second",
|
||||
"millisecond",
|
||||
)
|
||||
],
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
basic_header["meas_date"] = time_origin
|
||||
return basic_header
|
||||
|
||||
|
||||
def _read_header_22_and_above(fname):
|
||||
basic_header = {}
|
||||
dtype0 = nsx_header_dict["basic"]
|
||||
dtype1 = nsx_header_dict["extended"]
|
||||
|
||||
nsx_file_header = np.fromfile(fname, count=1, dtype=dtype0)[0]
|
||||
basic_header.update(
|
||||
{name: nsx_file_header[name] for name in nsx_file_header.dtype.names}
|
||||
)
|
||||
|
||||
offset_dtype0 = np.dtype(dtype0).itemsize
|
||||
shape = nsx_file_header["channel_count"]
|
||||
basic_header["extended"] = np.memmap(
|
||||
fname, shape=shape, offset=offset_dtype0, dtype=dtype1, mode="r"
|
||||
)
|
||||
|
||||
# The following values are stored in mHz
|
||||
# See:
|
||||
# https://blackrockneurotech.com/wp-content/uploads/LB-0023-7.00_NEV_File_Format.pdf
|
||||
basic_header["highpass"] = basic_header["extended"]["hi_freq_corner"]
|
||||
basic_header["lowpass"] = basic_header["extended"]["lo_freq_corner"]
|
||||
for x in ["highpass", "lowpass"]:
|
||||
basic_header[x] = basic_header[x] * 1e-3
|
||||
|
||||
ver_major, ver_minor = basic_header.pop("ver_major"), basic_header.pop("ver_minor")
|
||||
basic_header["spec"] = f"{ver_major}.{ver_minor}"
|
||||
|
||||
data_header = list()
|
||||
index = 0
|
||||
offset = basic_header["bytes_in_headers"]
|
||||
filesize = _file_size(fname)
|
||||
if float(basic_header["spec"]) < 3.0:
|
||||
dtype2 = nsx_header_dict["data>2.1<3"]
|
||||
else:
|
||||
dtype2 = nsx_header_dict["data>=3"]
|
||||
while offset < filesize:
|
||||
dh = np.memmap(fname, dtype=dtype2, shape=1, offset=offset, mode="r")[0]
|
||||
data_header.append(
|
||||
{
|
||||
"header": dh["header"],
|
||||
"timestamp": dh["timestamp"],
|
||||
"nb_data_points": dh["nb_data_points"],
|
||||
"offset_to_data_block": offset + dh.dtype.itemsize,
|
||||
}
|
||||
)
|
||||
# data size = number of data points * (data_bytes * number of channels)
|
||||
# use of `int` avoids overflow problem
|
||||
data_size = (
|
||||
int(dh["nb_data_points"])
|
||||
* int(basic_header["channel_count"])
|
||||
* DATA_BYTE_SIZE
|
||||
)
|
||||
# define new offset (to possible next data block)
|
||||
offset = data_header[index]["offset_to_data_block"] + data_size
|
||||
index += 1
|
||||
|
||||
basic_header["data_header"] = data_header
|
||||
return basic_header
|
||||
|
||||
|
||||
def _get_hdr_info(fname, stim_channel=True, eog=None, misc=None):
|
||||
"""Read header information NSx file."""
|
||||
eog = eog if eog is not None else []
|
||||
misc = misc if misc is not None else []
|
||||
|
||||
nsx_info = _read_header(fname)
|
||||
ch_names = list(nsx_info["extended"]["electrode_label"])
|
||||
ch_types = list(nsx_info["extended"]["type"])
|
||||
ch_units = list(nsx_info["extended"]["units"])
|
||||
ch_names, ch_types, ch_units = (
|
||||
list(map(bytes.decode, xx)) for xx in (ch_names, ch_types, ch_units)
|
||||
)
|
||||
max_analog_val = nsx_info["extended"]["max_analog_val"].astype("double")
|
||||
min_analog_val = nsx_info["extended"]["min_analog_val"].astype("double")
|
||||
max_digital_val = nsx_info["extended"]["max_digital_val"].astype("double")
|
||||
min_digital_val = nsx_info["extended"]["min_digital_val"].astype("double")
|
||||
cals = (max_analog_val - min_analog_val) / (max_digital_val - min_digital_val)
|
||||
|
||||
stim_channel_idxs, _ = _check_stim_channel(stim_channel, ch_names)
|
||||
|
||||
nchan = int(nsx_info["channel_count"])
|
||||
logger.info("Setting channel info structure...")
|
||||
chs = list()
|
||||
pick_mask = np.ones(len(ch_names))
|
||||
|
||||
orig_units = {}
|
||||
for idx, ch_name in enumerate(ch_names):
|
||||
chan_info = {}
|
||||
chan_info["logno"] = int(nsx_info["extended"]["electrode_id"][idx])
|
||||
chan_info["scanno"] = int(nsx_info["extended"]["electrode_id"][idx])
|
||||
chan_info["ch_name"] = ch_name
|
||||
chan_info["unit_mul"] = FIFF.FIFF_UNITM_NONE
|
||||
ch_unit = ch_units[idx]
|
||||
chan_info["unit"] = FIFF.FIFF_UNIT_V
|
||||
# chan_info["range"] = _unit_range_dict[ch_units[idx]]
|
||||
chan_info["range"] = 1 / _get_scaling("eeg", ch_units[idx])
|
||||
chan_info["cal"] = cals[idx]
|
||||
chan_info["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
chan_info["coil_type"] = FIFF.FIFFV_COIL_EEG
|
||||
chan_info["kind"] = FIFF.FIFFV_SEEG_CH
|
||||
# montage can't be stored in NSx so channel locs are unknown:
|
||||
chan_info["loc"] = np.full(12, np.nan)
|
||||
orig_units[ch_name] = ch_unit
|
||||
|
||||
# if the NSx info contained channel type information
|
||||
# set it now. They are always set to 'CC'.
|
||||
# If not inferable, set it to 'SEEG' with a warning.
|
||||
ch_type = ch_types[idx]
|
||||
ch_const = getattr(FIFF, f"FIFFV_{CH_TYPE_MAPPING.get(ch_type, 'SEEG')}_CH")
|
||||
chan_info["kind"] = ch_const
|
||||
# if user passes in explicit mapping for eog, misc and stim
|
||||
# channels set them here.
|
||||
if ch_name in eog or idx in eog or idx - nchan in eog:
|
||||
chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
chan_info["kind"] = FIFF.FIFFV_EOG_CH
|
||||
pick_mask[idx] = False
|
||||
elif ch_name in misc or idx in misc or idx - nchan in misc:
|
||||
chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
chan_info["kind"] = FIFF.FIFFV_MISC_CH
|
||||
pick_mask[idx] = False
|
||||
elif idx in stim_channel_idxs:
|
||||
chan_info["coil_type"] = FIFF.FIFFV_COIL_NONE
|
||||
chan_info["unit"] = FIFF.FIFF_UNIT_NONE
|
||||
chan_info["kind"] = FIFF.FIFFV_STIM_CH
|
||||
pick_mask[idx] = False
|
||||
chan_info["ch_name"] = ch_name
|
||||
ch_names[idx] = chan_info["ch_name"]
|
||||
chs.append(chan_info)
|
||||
|
||||
sfreq = nsx_info["timestamp_resolution"] / nsx_info["period"]
|
||||
info = _empty_info(sfreq)
|
||||
info["meas_date"] = nsx_info["meas_date"]
|
||||
info["chs"] = chs
|
||||
info["ch_names"] = ch_names
|
||||
|
||||
highpass = nsx_info["highpass"][:128]
|
||||
lowpass = nsx_info["lowpass"][:128]
|
||||
_decode_online_filters(info, highpass, lowpass)
|
||||
|
||||
# Some keys to be consistent with FIF measurement info
|
||||
info["description"] = None
|
||||
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
|
||||
orig_format = ORIG_FORMAT
|
||||
|
||||
raw_extras = {
|
||||
key: [r[key] for r in nsx_info["data_header"]]
|
||||
for key in nsx_info["data_header"][0]
|
||||
}
|
||||
for key in raw_extras:
|
||||
raw_extras[key] = np.array(raw_extras[key], int)
|
||||
good_data_packets = raw_extras.pop("header") == 1
|
||||
if not good_data_packets.any():
|
||||
raise RuntimeError("NSx file appears to be broken")
|
||||
raw_extras = {key: raw_extras[key][good_data_packets] for key in raw_extras.keys()}
|
||||
raw_extras["timestamp"] = raw_extras["timestamp"] // nsx_info["period"]
|
||||
first_samp = raw_extras["timestamp"][0]
|
||||
last_samp = raw_extras["timestamp"][-1] + raw_extras["nb_data_points"][-1]
|
||||
n_samples = last_samp - first_samp
|
||||
|
||||
return (
|
||||
info,
|
||||
fname,
|
||||
nsx_info["spec"],
|
||||
n_samples,
|
||||
orig_format,
|
||||
raw_extras,
|
||||
orig_units,
|
||||
)
|
||||
|
||||
|
||||
def _decode_online_filters(info, highpass, lowpass):
|
||||
"""Decode low/high-pass filters that are applied online."""
|
||||
if np.all(highpass == highpass[0]):
|
||||
if highpass[0] == "NaN":
|
||||
# Placeholder for future use. Highpass set in _empty_info.
|
||||
pass
|
||||
else:
|
||||
hp = float(highpass[0])
|
||||
info["highpass"] = hp
|
||||
else:
|
||||
info["highpass"] = float(np.max(highpass))
|
||||
warn(
|
||||
"Channels contain different highpass filters. Highest filter "
|
||||
"setting will be stored."
|
||||
)
|
||||
|
||||
if np.all(lowpass == lowpass[0]):
|
||||
if lowpass[0] in ("NaN", "0", "0.0"):
|
||||
# Placeholder for future use. Lowpass set in _empty_info.
|
||||
pass
|
||||
else:
|
||||
info["lowpass"] = float(lowpass[0])
|
||||
else:
|
||||
info["lowpass"] = float(np.min(lowpass))
|
||||
warn(
|
||||
"Channels contain different lowpass filters. Lowest filter "
|
||||
"setting will be stored."
|
||||
)
|
||||
|
||||
|
||||
def _check_stim_channel(stim_channel, ch_names):
|
||||
"""Check that the stimulus channel exists in the current datafile."""
|
||||
DEFAULT_STIM_CH_NAMES = ["status", "trigger"]
|
||||
|
||||
if stim_channel is None or stim_channel is False:
|
||||
return [], []
|
||||
|
||||
if stim_channel is True: # convenient aliases
|
||||
stim_channel = "auto"
|
||||
|
||||
if isinstance(stim_channel, str):
|
||||
if stim_channel == "auto":
|
||||
if "auto" in ch_names:
|
||||
warn(
|
||||
RuntimeWarning,
|
||||
"Using `stim_channel='auto'` when auto"
|
||||
" also corresponds to a channel name is ambiguous."
|
||||
" Please use `stim_channel=['auto']`.",
|
||||
)
|
||||
else:
|
||||
valid_stim_ch_names = DEFAULT_STIM_CH_NAMES
|
||||
else:
|
||||
valid_stim_ch_names = [stim_channel.lower()]
|
||||
|
||||
elif isinstance(stim_channel, int):
|
||||
valid_stim_ch_names = [ch_names[stim_channel].lower()]
|
||||
|
||||
elif isinstance(stim_channel, list):
|
||||
if all([isinstance(s, str) for s in stim_channel]):
|
||||
valid_stim_ch_names = [s.lower() for s in stim_channel]
|
||||
elif all([isinstance(s, int) for s in stim_channel]):
|
||||
valid_stim_ch_names = [ch_names[s].lower() for s in stim_channel]
|
||||
else:
|
||||
raise ValueError("Invalid stim_channel")
|
||||
else:
|
||||
raise ValueError("Invalid stim_channel")
|
||||
|
||||
ch_names_low = [ch.lower() for ch in ch_names]
|
||||
found = list(set(valid_stim_ch_names) & set(ch_names_low))
|
||||
|
||||
stim_channel_idxs = [ch_names_low.index(f) for f in found]
|
||||
names = [ch_names[idx] for idx in stim_channel_idxs]
|
||||
return stim_channel_idxs, names
|
||||
7
mne/io/persyst/__init__.py
Normal file
7
mne/io/persyst/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Persyst module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .persyst import read_raw_persyst
|
||||
474
mne/io/persyst/persyst.py
Normal file
474
mne/io/persyst/persyst.py
Normal file
@@ -0,0 +1,474 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
import os.path as op
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, fill_doc, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_persyst(fname, preload=False, verbose=None) -> "RawPersyst":
|
||||
"""Reader for a Persyst (.lay/.dat) recording.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the Persyst header ``.lay`` file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawPersyst
|
||||
A Raw object containing Persyst data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawPersyst.
|
||||
|
||||
Notes
|
||||
-----
|
||||
It is assumed that the ``.lay`` and ``.dat`` file
|
||||
are in the same directory. To get the correct file path to the
|
||||
``.dat`` file, ``read_raw_persyst`` will get the corresponding dat
|
||||
filename from the lay file, and look for that file inside the same
|
||||
directory as the lay file.
|
||||
"""
|
||||
return RawPersyst(fname, preload, verbose)
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawPersyst(BaseRaw):
|
||||
"""Raw object from a Persyst file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the Persyst header (.lay) file.
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, preload=False, verbose=None):
|
||||
fname = str(_check_fname(fname, "read", True, "fname"))
|
||||
logger.info(f"Loading {fname}")
|
||||
|
||||
# make sure filename is the Lay file
|
||||
if not fname.endswith(".lay"):
|
||||
fname = fname + ".lay"
|
||||
# get the current directory and Lay filename
|
||||
curr_path, lay_fname = op.dirname(fname), op.basename(fname)
|
||||
if not op.exists(fname):
|
||||
raise FileNotFoundError(
|
||||
f'The path you specified, "{lay_fname}",does not exist.'
|
||||
)
|
||||
|
||||
# sections and subsections currently unused
|
||||
keys, data, sections = _read_lay_contents(fname)
|
||||
|
||||
# these are the section headers in the Persyst file layout
|
||||
# Note: We do not make use of "SampleTimes" yet
|
||||
fileinfo_dict = OrderedDict()
|
||||
channelmap_dict = OrderedDict()
|
||||
patient_dict = OrderedDict()
|
||||
comments_dict = OrderedDict()
|
||||
|
||||
# keep track of total number of comments
|
||||
num_comments = 0
|
||||
|
||||
# loop through each line in the lay file
|
||||
for key, val, section in zip(keys, data, sections):
|
||||
if key == "":
|
||||
continue
|
||||
|
||||
# Make sure key are lowercase for everything, but electrodes.
|
||||
# We also do not want to lower-case comments because those
|
||||
# are free-form text where casing may matter.
|
||||
if key is not None and section not in ["channelmap", "comments"]:
|
||||
key = key.lower()
|
||||
|
||||
# FileInfo
|
||||
if section == "fileinfo":
|
||||
# extract the .dat file name
|
||||
if key == "file":
|
||||
dat_fname = op.basename(val)
|
||||
dat_fpath = op.join(curr_path, op.basename(dat_fname))
|
||||
|
||||
# determine if .dat file exists where it should
|
||||
error_msg = (
|
||||
f"The data path you specified "
|
||||
f"does not exist for the lay path, "
|
||||
f"{lay_fname}. Make sure the dat file "
|
||||
f"is in the same directory as the lay "
|
||||
f"file, and the specified dat filename "
|
||||
f"matches."
|
||||
)
|
||||
if not op.exists(dat_fpath):
|
||||
raise FileNotFoundError(error_msg)
|
||||
fileinfo_dict[key] = val
|
||||
# ChannelMap
|
||||
elif section == "channelmap":
|
||||
# channel map has <channel_name>=<number> for <key>=<val>
|
||||
channelmap_dict[key] = val
|
||||
# Patient (All optional)
|
||||
elif section == "patient":
|
||||
patient_dict[key] = val
|
||||
# Comments (turned into mne.Annotations)
|
||||
elif section == "comments":
|
||||
comments_dict[key] = comments_dict.get(key, list()) + [val]
|
||||
num_comments += 1
|
||||
|
||||
# get numerical metadata
|
||||
# datatype is either 7 for 32 bit, or 0 for 16 bit
|
||||
datatype = fileinfo_dict.get("datatype")
|
||||
cal = float(fileinfo_dict.get("calibration"))
|
||||
n_chs = int(fileinfo_dict.get("waveformcount"))
|
||||
|
||||
# Store subject information from lay file in mne format
|
||||
# Note: Persyst also records "Physician", "Technician",
|
||||
# "Medications", "History", and "Comments1" and "Comments2"
|
||||
# and this information is currently discarded
|
||||
subject_info = _get_subjectinfo(patient_dict)
|
||||
|
||||
# set measurement date
|
||||
testdate = patient_dict.get("testdate")
|
||||
if testdate is not None:
|
||||
# TODO: Persyst may change its internal date schemas
|
||||
# without notice
|
||||
# These are the 3 "so far" possible datatime storage
|
||||
# formats in Persyst .lay
|
||||
if "/" in testdate:
|
||||
testdate = datetime.strptime(testdate, "%m/%d/%Y")
|
||||
elif "-" in testdate:
|
||||
testdate = datetime.strptime(testdate, "%d-%m-%Y")
|
||||
elif "." in testdate:
|
||||
testdate = datetime.strptime(testdate, "%Y.%m.%d")
|
||||
|
||||
if not isinstance(testdate, datetime):
|
||||
warn(
|
||||
"Cannot read in the measurement date due "
|
||||
"to incompatible format. Please set manually "
|
||||
f"for {lay_fname} "
|
||||
)
|
||||
meas_date = None
|
||||
else:
|
||||
testtime = datetime.strptime(patient_dict.get("testtime"), "%H:%M:%S")
|
||||
meas_date = datetime(
|
||||
year=testdate.year,
|
||||
month=testdate.month,
|
||||
day=testdate.day,
|
||||
hour=testtime.hour,
|
||||
minute=testtime.minute,
|
||||
second=testtime.second,
|
||||
tzinfo=timezone.utc,
|
||||
)
|
||||
|
||||
# Create mne structure
|
||||
ch_names = list(channelmap_dict.keys())
|
||||
if n_chs != len(ch_names):
|
||||
raise RuntimeError(
|
||||
"Channels in lay file do not "
|
||||
"match the number of channels "
|
||||
"in the .dat file."
|
||||
) # noqa
|
||||
# get rid of the "-Ref" in channel names
|
||||
ch_names = [ch.upper().split("-REF")[0] for ch in ch_names]
|
||||
|
||||
# get the sampling rate and default channel types to EEG
|
||||
sfreq = fileinfo_dict.get("samplingrate")
|
||||
ch_types = "eeg"
|
||||
info = create_info(ch_names, sfreq, ch_types=ch_types)
|
||||
info.update(subject_info=subject_info)
|
||||
with info._unlock():
|
||||
for idx in range(n_chs):
|
||||
# calibration brings to uV then 1e-6 brings to V
|
||||
info["chs"][idx]["cal"] = cal * 1.0e-6
|
||||
info["meas_date"] = meas_date
|
||||
|
||||
# determine number of samples in file
|
||||
# Note: We do not use the lay file to do this
|
||||
# because clips in time may be generated by Persyst that
|
||||
# DO NOT modify the "SampleTimes" section
|
||||
with open(dat_fpath, "rb") as f:
|
||||
# determine the precision
|
||||
if int(datatype) == 7:
|
||||
# 32 bit
|
||||
dtype = np.dtype("i4")
|
||||
elif int(datatype) == 0:
|
||||
# 16 bit
|
||||
dtype = np.dtype("i2")
|
||||
else:
|
||||
raise RuntimeError(f"Unknown format: {datatype}")
|
||||
|
||||
# allow offset to occur
|
||||
f.seek(0, os.SEEK_END)
|
||||
n_samples = f.tell()
|
||||
n_samples = n_samples // (dtype.itemsize * n_chs)
|
||||
|
||||
logger.debug(f"Loaded {n_samples} samples for {n_chs} channels.")
|
||||
|
||||
raw_extras = {"dtype": dtype, "n_chs": n_chs, "n_samples": n_samples}
|
||||
# create Raw object
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[dat_fpath],
|
||||
last_samps=[n_samples - 1],
|
||||
raw_extras=[raw_extras],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# set annotations based on the comments read in
|
||||
onset = np.zeros(num_comments, float)
|
||||
duration = np.zeros(num_comments, float)
|
||||
description = [""] * num_comments
|
||||
|
||||
# loop through comments dictionary, which may contain
|
||||
# multiple events for the same "text" annotation
|
||||
t_idx = 0
|
||||
for _description, event_tuples in comments_dict.items():
|
||||
for _onset, _duration in event_tuples:
|
||||
# extract the onset, duration, description to
|
||||
# create an Annotations object
|
||||
onset[t_idx] = _onset
|
||||
duration[t_idx] = _duration
|
||||
description[t_idx] = _description
|
||||
t_idx += 1
|
||||
annot = Annotations(onset, duration, description)
|
||||
self.set_annotations(annot)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file.
|
||||
|
||||
The Persyst software records raw data in either 16 or 32 bit
|
||||
binary files. In addition, it stores the calibration to convert
|
||||
data to uV in the lay file.
|
||||
"""
|
||||
dtype = self._raw_extras[fi]["dtype"]
|
||||
n_chs = self._raw_extras[fi]["n_chs"]
|
||||
dat_fname = self.filenames[fi]
|
||||
|
||||
# compute samples count based on start and stop
|
||||
time_length_samps = stop - start
|
||||
|
||||
# read data from .dat file into array of correct size, then calibrate
|
||||
# records = recnum rows x inf columns
|
||||
count = time_length_samps * n_chs
|
||||
|
||||
# seek the dat file
|
||||
with open(dat_fname, "rb") as dat_file_ID:
|
||||
# allow offset to occur
|
||||
dat_file_ID.seek(n_chs * dtype.itemsize * start, 1)
|
||||
|
||||
# read in the actual record starting at possibly offset
|
||||
record = np.fromfile(dat_file_ID, dtype=dtype, count=count)
|
||||
|
||||
# chs * rows
|
||||
# cast as float32; more than enough precision
|
||||
record = np.reshape(record, (n_chs, -1), order="F").astype(np.float32)
|
||||
|
||||
# calibrate to convert to V and handle mult
|
||||
_mult_cal_one(data, record, idx, cals, mult)
|
||||
|
||||
|
||||
def _get_subjectinfo(patient_dict):
|
||||
# attempt to parse out the birthdate, but if it doesn't
|
||||
# meet spec, then it will set to None
|
||||
birthdate = patient_dict.get("birthdate")
|
||||
if "/" in birthdate:
|
||||
try:
|
||||
birthdate = datetime.strptime(birthdate, "%m/%d/%y")
|
||||
except ValueError:
|
||||
birthdate = None
|
||||
print(f"Unable to process birthdate of {birthdate} ")
|
||||
elif "-" in birthdate:
|
||||
try:
|
||||
birthdate = datetime.strptime(birthdate, "%d-%m-%y")
|
||||
except ValueError:
|
||||
birthdate = None
|
||||
print(f"Unable to process birthdate of {birthdate} ")
|
||||
|
||||
subject_info = {
|
||||
"first_name": patient_dict.get("first"),
|
||||
"middle_name": patient_dict.get("middle"),
|
||||
"last_name": patient_dict.get("last"),
|
||||
"sex": patient_dict.get("sex"),
|
||||
"hand": patient_dict.get("hand"),
|
||||
"his_id": patient_dict.get("id"),
|
||||
"birthday": birthdate,
|
||||
}
|
||||
subject_info = {key: val for key, val in subject_info.items() if val is not None}
|
||||
|
||||
# Recode sex values
|
||||
sex_dict = dict(
|
||||
m=FIFF.FIFFV_SUBJ_SEX_MALE,
|
||||
male=FIFF.FIFFV_SUBJ_SEX_MALE,
|
||||
f=FIFF.FIFFV_SUBJ_SEX_FEMALE,
|
||||
female=FIFF.FIFFV_SUBJ_SEX_FEMALE,
|
||||
)
|
||||
subject_info["sex"] = sex_dict.get(subject_info["sex"], FIFF.FIFFV_SUBJ_SEX_UNKNOWN)
|
||||
|
||||
# Recode hand values
|
||||
hand_dict = dict(
|
||||
r=FIFF.FIFFV_SUBJ_HAND_RIGHT,
|
||||
right=FIFF.FIFFV_SUBJ_HAND_RIGHT,
|
||||
l=FIFF.FIFFV_SUBJ_HAND_LEFT,
|
||||
left=FIFF.FIFFV_SUBJ_HAND_LEFT,
|
||||
a=FIFF.FIFFV_SUBJ_HAND_AMBI,
|
||||
ambidextrous=FIFF.FIFFV_SUBJ_HAND_AMBI,
|
||||
ambi=FIFF.FIFFV_SUBJ_HAND_AMBI,
|
||||
)
|
||||
# no handedness is set when unknown
|
||||
try:
|
||||
subject_info["hand"] = hand_dict[subject_info["hand"]]
|
||||
except KeyError:
|
||||
subject_info.pop("hand")
|
||||
|
||||
return subject_info
|
||||
|
||||
|
||||
def _read_lay_contents(fname):
|
||||
"""Lay file are laid out like a INI file."""
|
||||
# keep track of sections, keys and data
|
||||
sections = []
|
||||
keys, data = [], []
|
||||
|
||||
# initialize all section to empty str
|
||||
section = ""
|
||||
with open(fname) as fin:
|
||||
for line in fin:
|
||||
# break a line into a status, key and value
|
||||
status, key, val = _process_lay_line(line, section)
|
||||
|
||||
# handle keys and values if they are
|
||||
# Section, Subsections, or Line items
|
||||
if status == 1: # Section was found
|
||||
section = val.lower()
|
||||
continue
|
||||
|
||||
# keep track of all sections, subsections,
|
||||
# keys and the data of the file
|
||||
sections.append(section)
|
||||
data.append(val)
|
||||
keys.append(key)
|
||||
|
||||
return keys, data, sections
|
||||
|
||||
|
||||
def _process_lay_line(line, section):
|
||||
"""Process a line read from the Lay (INI) file.
|
||||
|
||||
Each line in the .lay file will be processed
|
||||
into a structured ``status``, ``key`` and ``value``.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
line : str
|
||||
The actual line in the Lay file.
|
||||
section : str
|
||||
The section in the Lay file.
|
||||
|
||||
Returns
|
||||
-------
|
||||
status : int
|
||||
Returns the following integers based on status.
|
||||
-1 => unknown string found
|
||||
0 => empty line found
|
||||
1 => section found
|
||||
2 => key-value pair found
|
||||
key : str
|
||||
The string before the ``'='`` character. If section is "Comments",
|
||||
then returns the text comment description.
|
||||
value : str
|
||||
The string from the line after the ``'='`` character. If section is
|
||||
"Comments", then returns the onset and duration as a tuple.
|
||||
|
||||
Notes
|
||||
-----
|
||||
The lay file comprises of multiple "sections" that are documented with
|
||||
bracket ``[]`` characters. For example, ``[FileInfo]`` and the lines
|
||||
afterward indicate metadata about the data file itself. Within
|
||||
each section, there are multiple lines in the format of
|
||||
``<key>=<value>``.
|
||||
|
||||
For ``FileInfo``, ``Patient`` and ``ChannelMap``
|
||||
each line will be denoted with a ``key`` and a ``value`` that
|
||||
can be represented as a dictionary. The keys describe what sort
|
||||
of data that line holds, while the values contain the corresponding
|
||||
value. In some cases, the ``value``.
|
||||
|
||||
For ``SampleTimes``, the ``key`` and ``value`` pair indicate the
|
||||
start and end time in seconds of the original data file.
|
||||
|
||||
For ``Comments`` section, this denotes an area where users through
|
||||
Persyst actually annotate data in time. These are instead
|
||||
represented as 5 data points that are ``,`` delimited. These
|
||||
data points are ordered as:
|
||||
|
||||
1. time (in seconds) of the annotation
|
||||
2. duration (in seconds) of the annotation
|
||||
3. state (unused)
|
||||
4. variable type (unused)
|
||||
5. free-form text describing the annotation
|
||||
"""
|
||||
key = "" # default; only return value possibly not set
|
||||
line = line.strip() # remove leading and trailing spaces
|
||||
end_idx = len(line) - 1 # get the last index of the line
|
||||
|
||||
# empty sequence evaluates to false
|
||||
if not line:
|
||||
status = 0
|
||||
key = ""
|
||||
value = ""
|
||||
return status, key, value
|
||||
# section found
|
||||
elif (line[0] == "[") and (line[end_idx] == "]") and (end_idx + 1 >= 3):
|
||||
status = 1
|
||||
value = line[1:end_idx].lower()
|
||||
# key found
|
||||
else:
|
||||
# handle Comments section differently from all other sections
|
||||
# TODO: utilize state and var_type in code.
|
||||
# Currently not used
|
||||
if section == "comments":
|
||||
# Persyst Comments output 5 variables "," separated
|
||||
time_sec, duration, state, var_type, text = line.split(",", 4)
|
||||
del var_type, state
|
||||
status = 2
|
||||
key = text
|
||||
value = (time_sec, duration)
|
||||
# all other sections
|
||||
else:
|
||||
if "=" not in line:
|
||||
raise RuntimeError(
|
||||
f"The line {line} does not conform "
|
||||
"to the standards. Please check the "
|
||||
".lay file."
|
||||
) # noqa
|
||||
pos = line.index("=")
|
||||
status = 2
|
||||
|
||||
# the line now is composed of a
|
||||
# <key>=<value>
|
||||
key = line[0:pos]
|
||||
key.strip()
|
||||
value = line[pos + 1 : end_idx + 1]
|
||||
value.strip()
|
||||
return status, key, value
|
||||
18
mne/io/pick.py
Normal file
18
mne/io/pick.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
|
||||
from .._fiff.pick import (
|
||||
_DATA_CH_TYPES_ORDER_DEFAULT,
|
||||
_DATA_CH_TYPES_SPLIT,
|
||||
_picks_to_idx,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# mne-bids, autoreject, mne-connectivity, mne-realtime, mne-nirs, mne-realtime
|
||||
"_picks_to_idx",
|
||||
# mne-qt-browser
|
||||
"_DATA_CH_TYPES_ORDER_DEFAULT",
|
||||
"_DATA_CH_TYPES_SPLIT",
|
||||
]
|
||||
7
mne/io/snirf/__init__.py
Normal file
7
mne/io/snirf/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""SNIRF module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from ._snirf import read_raw_snirf
|
||||
585
mne/io/snirf/_snirf.py
Normal file
585
mne/io/snirf/_snirf.py
Normal file
@@ -0,0 +1,585 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff._digitization import _make_dig_points
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _format_dig_points, create_info
|
||||
from ..._fiff.utils import _mult_cal_one
|
||||
from ..._freesurfer import get_mni_fiducials
|
||||
from ...annotations import Annotations
|
||||
from ...transforms import _frame_to_str, apply_trans
|
||||
from ...utils import _check_fname, _import_h5py, fill_doc, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from ..nirx.nirx import _convert_fnirs_to_head
|
||||
|
||||
|
||||
@fill_doc
|
||||
def read_raw_snirf(
|
||||
fname, optode_frame="unknown", preload=False, verbose=None
|
||||
) -> "RawSNIRF":
|
||||
"""Reader for a continuous wave SNIRF data.
|
||||
|
||||
.. note:: This reader supports the .snirf file type only,
|
||||
not the .jnirs version.
|
||||
Files with either 3D or 2D locations can be read.
|
||||
However, we strongly recommend using 3D positions.
|
||||
If 2D positions are used the behaviour of MNE functions
|
||||
can not be guaranteed.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the SNIRF data file.
|
||||
optode_frame : str
|
||||
Coordinate frame used for the optode positions. The default is unknown,
|
||||
in which case the positions are not modified. If a known coordinate
|
||||
frame is provided (head, meg, mri), then the positions are transformed
|
||||
in to the Neuromag head coordinate frame (head).
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawSNIRF
|
||||
A Raw object containing fNIRS data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawSNIRF.
|
||||
"""
|
||||
return RawSNIRF(fname, optode_frame, preload, verbose)
|
||||
|
||||
|
||||
def _open(fname):
|
||||
return open(fname, encoding="latin-1")
|
||||
|
||||
|
||||
@fill_doc
|
||||
class RawSNIRF(BaseRaw):
|
||||
"""Raw object from a continuous wave SNIRF file.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
Path to the SNIRF data file.
|
||||
optode_frame : str
|
||||
Coordinate frame used for the optode positions. The default is unknown,
|
||||
in which case the positions are not modified. If a known coordinate
|
||||
frame is provided (head, meg, mri), then the positions are transformed
|
||||
in to the Neuromag head coordinate frame (head).
|
||||
%(preload)s
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods.
|
||||
"""
|
||||
|
||||
@verbose
|
||||
def __init__(self, fname, optode_frame="unknown", preload=False, verbose=None):
|
||||
# Must be here due to circular import error
|
||||
from ...preprocessing.nirs import _validate_nirs_info
|
||||
|
||||
h5py = _import_h5py()
|
||||
|
||||
fname = str(_check_fname(fname, "read", True, "fname"))
|
||||
logger.info(f"Loading {fname}")
|
||||
|
||||
with h5py.File(fname, "r") as dat:
|
||||
if "data2" in dat["nirs"]:
|
||||
warn(
|
||||
"File contains multiple recordings. "
|
||||
"MNE does not support this feature. "
|
||||
"Only the first dataset will be processed."
|
||||
)
|
||||
|
||||
manufacturer = _get_metadata_str(dat, "ManufacturerName")
|
||||
if (optode_frame == "unknown") & (manufacturer == "Gowerlabs"):
|
||||
optode_frame = "head"
|
||||
|
||||
snirf_data_type = np.array(
|
||||
dat.get("nirs/data1/measurementList1/dataType")
|
||||
).item()
|
||||
if snirf_data_type not in [1, 99999]:
|
||||
# 1 = Continuous Wave
|
||||
# 99999 = Processed
|
||||
raise RuntimeError(
|
||||
"MNE only supports reading continuous"
|
||||
" wave amplitude and processed haemoglobin"
|
||||
" SNIRF files. Expected type"
|
||||
" code 1 or 99999 but received type "
|
||||
f"code {snirf_data_type}"
|
||||
)
|
||||
|
||||
last_samps = dat.get("/nirs/data1/dataTimeSeries").shape[0] - 1
|
||||
|
||||
sampling_rate = _extract_sampling_rate(dat)
|
||||
|
||||
if sampling_rate == 0:
|
||||
warn("Unable to extract sample rate from SNIRF file.")
|
||||
|
||||
# Extract wavelengths
|
||||
fnirs_wavelengths = np.array(dat.get("nirs/probe/wavelengths"))
|
||||
fnirs_wavelengths = [int(w) for w in fnirs_wavelengths]
|
||||
if len(fnirs_wavelengths) != 2:
|
||||
raise RuntimeError(
|
||||
f"The data contains "
|
||||
f"{len(fnirs_wavelengths)}"
|
||||
f" wavelengths: {fnirs_wavelengths}. "
|
||||
f"MNE only supports reading continuous"
|
||||
" wave amplitude SNIRF files "
|
||||
"with two wavelengths."
|
||||
)
|
||||
|
||||
# Extract channels
|
||||
def atoi(text):
|
||||
return int(text) if text.isdigit() else text
|
||||
|
||||
def natural_keys(text):
|
||||
return [atoi(c) for c in re.split(r"(\d+)", text)]
|
||||
|
||||
channels = np.array([name for name in dat["nirs"]["data1"].keys()])
|
||||
channels_idx = np.array(["measurementList" in n for n in channels])
|
||||
channels = channels[channels_idx]
|
||||
channels = sorted(channels, key=natural_keys)
|
||||
|
||||
# Source and detector labels are optional fields.
|
||||
# Use S1, S2, S3, etc if not specified.
|
||||
if "sourceLabels_disabled" in dat["nirs/probe"]:
|
||||
# This is disabled as
|
||||
# MNE-Python does not currently support custom source names.
|
||||
# Instead, sources must be integer values.
|
||||
sources = np.array(dat.get("nirs/probe/sourceLabels"))
|
||||
sources = [s.decode("UTF-8") for s in sources]
|
||||
else:
|
||||
sources = np.unique(
|
||||
[
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + c + "/sourceIndex"))
|
||||
)[0]
|
||||
for c in channels
|
||||
]
|
||||
)
|
||||
sources = {int(s): f"S{int(s)}" for s in sources}
|
||||
|
||||
if "detectorLabels_disabled" in dat["nirs/probe"]:
|
||||
# This is disabled as
|
||||
# MNE-Python does not currently support custom detector names.
|
||||
# Instead, detector must be integer values.
|
||||
detectors = np.array(dat.get("nirs/probe/detectorLabels"))
|
||||
detectors = [d.decode("UTF-8") for d in detectors]
|
||||
else:
|
||||
detectors = np.unique(
|
||||
[
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + c + "/detectorIndex"))
|
||||
)[0]
|
||||
for c in channels
|
||||
]
|
||||
)
|
||||
detectors = {int(d): f"D{int(d)}" for d in detectors}
|
||||
|
||||
# Extract source and detector locations
|
||||
# 3D positions are optional in SNIRF,
|
||||
# but highly recommended in MNE.
|
||||
if ("detectorPos3D" in dat["nirs/probe"]) & (
|
||||
"sourcePos3D" in dat["nirs/probe"]
|
||||
):
|
||||
# If 3D positions are available they are used even if 2D exists
|
||||
detPos3D = np.array(dat.get("nirs/probe/detectorPos3D"))
|
||||
srcPos3D = np.array(dat.get("nirs/probe/sourcePos3D"))
|
||||
elif ("detectorPos2D" in dat["nirs/probe"]) & (
|
||||
"sourcePos2D" in dat["nirs/probe"]
|
||||
):
|
||||
warn(
|
||||
"The data only contains 2D location information for the "
|
||||
"optode positions. "
|
||||
"It is highly recommended that data is used "
|
||||
"which contains 3D location information for the "
|
||||
"optode positions. With only 2D locations it can not be "
|
||||
"guaranteed that MNE functions will behave correctly "
|
||||
"and produce accurate results. If it is not possible to "
|
||||
"include 3D positions in your data, please consider "
|
||||
"using the set_montage() function."
|
||||
)
|
||||
|
||||
detPos2D = np.array(dat.get("nirs/probe/detectorPos2D"))
|
||||
srcPos2D = np.array(dat.get("nirs/probe/sourcePos2D"))
|
||||
# Set the third dimension to zero. See gh#9308
|
||||
detPos3D = np.append(detPos2D, np.zeros((detPos2D.shape[0], 1)), axis=1)
|
||||
srcPos3D = np.append(srcPos2D, np.zeros((srcPos2D.shape[0], 1)), axis=1)
|
||||
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"No optode location information is "
|
||||
"provided. MNE requires at least 2D "
|
||||
"location information"
|
||||
)
|
||||
|
||||
chnames = []
|
||||
ch_types = []
|
||||
for chan in channels:
|
||||
src_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/sourceIndex"))
|
||||
)[0]
|
||||
)
|
||||
det_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/detectorIndex"))
|
||||
)[0]
|
||||
)
|
||||
|
||||
if snirf_data_type == 1:
|
||||
wve_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex"))
|
||||
)[0]
|
||||
)
|
||||
ch_name = (
|
||||
sources[src_idx]
|
||||
+ "_"
|
||||
+ detectors[det_idx]
|
||||
+ " "
|
||||
+ str(fnirs_wavelengths[wve_idx - 1])
|
||||
)
|
||||
chnames.append(ch_name)
|
||||
ch_types.append("fnirs_cw_amplitude")
|
||||
|
||||
elif snirf_data_type == 99999:
|
||||
dt_id = _correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/dataTypeLabel"))
|
||||
)[0].decode("UTF-8")
|
||||
|
||||
# Convert between SNIRF processed names and MNE type names
|
||||
dt_id = dt_id.lower().replace("dod", "fnirs_od")
|
||||
|
||||
ch_name = sources[src_idx] + "_" + detectors[det_idx]
|
||||
|
||||
if dt_id == "fnirs_od":
|
||||
wve_idx = int(
|
||||
_correct_shape(
|
||||
np.array(
|
||||
dat.get("nirs/data1/" + chan + "/wavelengthIndex")
|
||||
)
|
||||
)[0]
|
||||
)
|
||||
suffix = " " + str(fnirs_wavelengths[wve_idx - 1])
|
||||
else:
|
||||
suffix = " " + dt_id.lower()
|
||||
ch_name = ch_name + suffix
|
||||
|
||||
chnames.append(ch_name)
|
||||
ch_types.append(dt_id)
|
||||
|
||||
# Create mne structure
|
||||
info = create_info(chnames, sampling_rate, ch_types=ch_types)
|
||||
|
||||
subject_info = {}
|
||||
names = np.array(dat.get("nirs/metaDataTags/SubjectID"))
|
||||
names = _correct_shape(names)[0].decode("UTF-8")
|
||||
subject_info["his_id"] = names
|
||||
# Read non standard (but allowed) custom metadata tags
|
||||
if "lastName" in dat.get("nirs/metaDataTags/"):
|
||||
ln = dat.get("/nirs/metaDataTags/lastName")[0].decode("UTF-8")
|
||||
subject_info["last_name"] = ln
|
||||
if "middleName" in dat.get("nirs/metaDataTags/"):
|
||||
m = dat.get("/nirs/metaDataTags/middleName")[0].decode("UTF-8")
|
||||
subject_info["middle_name"] = m
|
||||
if "firstName" in dat.get("nirs/metaDataTags/"):
|
||||
fn = dat.get("/nirs/metaDataTags/firstName")[0].decode("UTF-8")
|
||||
subject_info["first_name"] = fn
|
||||
else:
|
||||
# MNE < 1.7 used to not write the firstName tag, so pull it from names
|
||||
subject_info["first_name"] = names.split("_")[0]
|
||||
if "sex" in dat.get("nirs/metaDataTags/"):
|
||||
s = dat.get("/nirs/metaDataTags/sex")[0].decode("UTF-8")
|
||||
if s in {"M", "Male", "1", "m"}:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_MALE
|
||||
elif s in {"F", "Female", "2", "f"}:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_FEMALE
|
||||
elif s in {"0", "u"}:
|
||||
subject_info["sex"] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
|
||||
# End non standard name reading
|
||||
# Update info
|
||||
info.update(subject_info=subject_info)
|
||||
|
||||
length_unit = _get_metadata_str(dat, "LengthUnit")
|
||||
length_scaling = _get_lengthunit_scaling(length_unit)
|
||||
|
||||
srcPos3D /= length_scaling
|
||||
detPos3D /= length_scaling
|
||||
|
||||
if optode_frame in ["mri", "meg"]:
|
||||
# These are all in MNI or MEG coordinates, so let's transform
|
||||
# them to the Neuromag head coordinate frame
|
||||
srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head(
|
||||
"fsaverage", optode_frame, "head", srcPos3D, detPos3D, []
|
||||
)
|
||||
else:
|
||||
head_t = np.eye(4)
|
||||
|
||||
if optode_frame in ["head", "mri", "meg"]:
|
||||
# Then the transformation to head was performed above
|
||||
coord_frame = FIFF.FIFFV_COORD_HEAD
|
||||
elif "MNE_coordFrame" in dat.get("nirs/metaDataTags/"):
|
||||
coord_frame = int(dat.get("/nirs/metaDataTags/MNE_coordFrame")[0])
|
||||
else:
|
||||
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
|
||||
|
||||
for idx, chan in enumerate(channels):
|
||||
src_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/sourceIndex"))
|
||||
)[0]
|
||||
)
|
||||
det_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/detectorIndex"))
|
||||
)[0]
|
||||
)
|
||||
|
||||
info["chs"][idx]["loc"][3:6] = srcPos3D[src_idx - 1, :]
|
||||
info["chs"][idx]["loc"][6:9] = detPos3D[det_idx - 1, :]
|
||||
# Store channel as mid point
|
||||
midpoint = (
|
||||
info["chs"][idx]["loc"][3:6] + info["chs"][idx]["loc"][6:9]
|
||||
) / 2
|
||||
info["chs"][idx]["loc"][0:3] = midpoint
|
||||
info["chs"][idx]["coord_frame"] = coord_frame
|
||||
|
||||
if (snirf_data_type in [1]) or (
|
||||
(snirf_data_type == 99999) and (ch_types[idx] == "fnirs_od")
|
||||
):
|
||||
wve_idx = int(
|
||||
_correct_shape(
|
||||
np.array(dat.get("nirs/data1/" + chan + "/wavelengthIndex"))
|
||||
)[0]
|
||||
)
|
||||
info["chs"][idx]["loc"][9] = fnirs_wavelengths[wve_idx - 1]
|
||||
|
||||
if "landmarkPos3D" in dat.get("nirs/probe/"):
|
||||
diglocs = np.array(dat.get("/nirs/probe/landmarkPos3D"))
|
||||
diglocs /= length_scaling
|
||||
digname = np.array(dat.get("/nirs/probe/landmarkLabels"))
|
||||
nasion, lpa, rpa, hpi = None, None, None, None
|
||||
extra_ps = dict()
|
||||
for idx, dign in enumerate(digname):
|
||||
dign = dign.lower()
|
||||
if dign in [b"lpa", b"al"]:
|
||||
lpa = diglocs[idx, :3]
|
||||
elif dign in [b"nasion"]:
|
||||
nasion = diglocs[idx, :3]
|
||||
elif dign in [b"rpa", b"ar"]:
|
||||
rpa = diglocs[idx, :3]
|
||||
else:
|
||||
extra_ps[f"EEG{len(extra_ps) + 1:03d}"] = diglocs[idx, :3]
|
||||
add_missing_fiducials = (
|
||||
coord_frame == FIFF.FIFFV_COORD_HEAD
|
||||
and lpa is None
|
||||
and rpa is None
|
||||
and nasion is None
|
||||
)
|
||||
dig = _make_dig_points(
|
||||
nasion=nasion,
|
||||
lpa=lpa,
|
||||
rpa=rpa,
|
||||
hpi=hpi,
|
||||
dig_ch_pos=extra_ps,
|
||||
coord_frame=_frame_to_str[coord_frame],
|
||||
add_missing_fiducials=add_missing_fiducials,
|
||||
)
|
||||
else:
|
||||
ch_locs = [info["chs"][idx]["loc"][0:3] for idx in range(len(channels))]
|
||||
# Set up digitization
|
||||
dig = get_mni_fiducials("fsaverage", verbose=False)
|
||||
for fid in dig:
|
||||
fid["r"] = apply_trans(head_t, fid["r"])
|
||||
fid["coord_frame"] = FIFF.FIFFV_COORD_HEAD
|
||||
for ii, ch_loc in enumerate(ch_locs, 1):
|
||||
dig.append(
|
||||
dict(
|
||||
kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay
|
||||
r=ch_loc,
|
||||
ident=ii,
|
||||
coord_frame=FIFF.FIFFV_COORD_HEAD,
|
||||
)
|
||||
)
|
||||
dig = _format_dig_points(dig)
|
||||
del head_t
|
||||
with info._unlock():
|
||||
info["dig"] = dig
|
||||
|
||||
str_date = _correct_shape(
|
||||
np.array(dat.get("/nirs/metaDataTags/MeasurementDate"))
|
||||
)[0].decode("UTF-8")
|
||||
str_time = _correct_shape(
|
||||
np.array(dat.get("/nirs/metaDataTags/MeasurementTime"))
|
||||
)[0].decode("UTF-8")
|
||||
str_datetime = str_date + str_time
|
||||
|
||||
# Several formats have been observed so we try each in turn
|
||||
for dt_code in [
|
||||
"%Y-%m-%d%H:%M:%SZ",
|
||||
"%Y-%m-%d%H:%M:%S",
|
||||
"%Y-%m-%d%H:%M:%S.%f",
|
||||
"%Y-%m-%d%H:%M:%S.%f%z",
|
||||
]:
|
||||
try:
|
||||
meas_date = datetime.datetime.strptime(str_datetime, dt_code)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
break
|
||||
else:
|
||||
warn(
|
||||
"Extraction of measurement date from SNIRF file failed. "
|
||||
"The date is being set to January 1st, 2000, "
|
||||
f"instead of {str_datetime}"
|
||||
)
|
||||
meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0)
|
||||
meas_date = meas_date.replace(tzinfo=datetime.timezone.utc)
|
||||
with info._unlock():
|
||||
info["meas_date"] = meas_date
|
||||
|
||||
if "DateOfBirth" in dat.get("nirs/metaDataTags/"):
|
||||
str_birth = (
|
||||
np.array(dat.get("/nirs/metaDataTags/DateOfBirth")).item().decode()
|
||||
)
|
||||
birth_matched = re.fullmatch(r"(\d+)-(\d+)-(\d+)", str_birth)
|
||||
if birth_matched is not None:
|
||||
birthday = datetime.date(
|
||||
int(birth_matched.groups()[0]),
|
||||
int(birth_matched.groups()[1]),
|
||||
int(birth_matched.groups()[2]),
|
||||
)
|
||||
with info._unlock():
|
||||
info["subject_info"]["birthday"] = birthday
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
filenames=[fname],
|
||||
last_samps=[last_samps],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Extract annotations
|
||||
# As described at https://github.com/fNIRS/snirf/
|
||||
# blob/master/snirf_specification.md#nirsistimjdata
|
||||
annot = Annotations([], [], [])
|
||||
for key in dat["nirs"]:
|
||||
if "stim" in key:
|
||||
data = np.atleast_2d(np.array(dat.get("/nirs/" + key + "/data")))
|
||||
if data.shape[1] >= 3:
|
||||
desc = _correct_shape(
|
||||
np.array(dat.get("/nirs/" + key + "/name"))
|
||||
)[0]
|
||||
annot.append(data[:, 0], data[:, 1], desc.decode("UTF-8"))
|
||||
self.set_annotations(annot, emit_warning=False)
|
||||
|
||||
# Validate that the fNIRS info is correctly formatted
|
||||
_validate_nirs_info(self.info)
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file."""
|
||||
import h5py
|
||||
|
||||
with h5py.File(self.filenames[0], "r") as dat:
|
||||
one = dat["/nirs/data1/dataTimeSeries"][start:stop].T
|
||||
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
|
||||
|
||||
# Helper function for when the numpy array has shape (), i.e. just one element.
|
||||
def _correct_shape(arr):
|
||||
if arr.shape == ():
|
||||
arr = arr[np.newaxis]
|
||||
return arr
|
||||
|
||||
|
||||
def _get_timeunit_scaling(time_unit):
|
||||
"""MNE expects time in seconds, return required scaling."""
|
||||
scalings = {"ms": 1000, "s": 1, "unknown": 1}
|
||||
if time_unit in scalings:
|
||||
return scalings[time_unit]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"The time unit {time_unit} is not supported by "
|
||||
"MNE. Please report this error as a GitHub "
|
||||
"issue to inform the developers."
|
||||
)
|
||||
|
||||
|
||||
def _get_lengthunit_scaling(length_unit):
|
||||
"""MNE expects distance in m, return required scaling."""
|
||||
scalings = {"m": 1, "cm": 100, "mm": 1000}
|
||||
if length_unit in scalings:
|
||||
return scalings[length_unit]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f"The length unit {length_unit} is not supported "
|
||||
"by MNE. Please report this error as a GitHub "
|
||||
"issue to inform the developers."
|
||||
)
|
||||
|
||||
|
||||
def _extract_sampling_rate(dat):
|
||||
"""Extract the sample rate from the time field."""
|
||||
# This is a workaround to provide support for Artinis data.
|
||||
# It allows for a 1% variation in the sampling times relative
|
||||
# to the average sampling rate of the file.
|
||||
MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE = 1.0
|
||||
|
||||
time_data = np.array(dat.get("nirs/data1/time"))
|
||||
sampling_rate = 0
|
||||
if len(time_data) == 2:
|
||||
# specified as onset, samplerate
|
||||
sampling_rate = 1.0 / (time_data[1] - time_data[0])
|
||||
else:
|
||||
# specified as time points
|
||||
periods = np.diff(time_data)
|
||||
uniq_periods = np.unique(periods.round(decimals=4))
|
||||
if uniq_periods.size == 1:
|
||||
# Uniformly sampled data
|
||||
sampling_rate = 1.0 / uniq_periods.item()
|
||||
else:
|
||||
# Hopefully uniformly sampled data with some precision issues.
|
||||
# This is a workaround to provide support for Artinis data.
|
||||
mean_period = np.mean(periods)
|
||||
sampling_rate = 1.0 / mean_period
|
||||
ideal_times = np.linspace(time_data[0], time_data[-1], time_data.size)
|
||||
max_jitter = np.max(np.abs(time_data - ideal_times))
|
||||
percent_jitter = 100.0 * max_jitter / mean_period
|
||||
msg = (
|
||||
f"Found jitter of {percent_jitter:3f}% in sample times. Sampling "
|
||||
f"rate has been set to {sampling_rate:1f}."
|
||||
)
|
||||
if percent_jitter > MAXIMUM_ALLOWED_SAMPLING_JITTER_PERCENTAGE:
|
||||
warn(
|
||||
f"{msg} Note that MNE-Python does not currently support SNIRF "
|
||||
"files with non-uniformly-sampled data."
|
||||
)
|
||||
else:
|
||||
logger.info(msg)
|
||||
time_unit = _get_metadata_str(dat, "TimeUnit")
|
||||
time_unit_scaling = _get_timeunit_scaling(time_unit)
|
||||
sampling_rate *= time_unit_scaling
|
||||
|
||||
return sampling_rate
|
||||
|
||||
|
||||
def _get_metadata_str(dat, field):
|
||||
if field not in np.array(dat.get("nirs/metaDataTags")):
|
||||
return None
|
||||
data = dat.get(f"/nirs/metaDataTags/{field}")
|
||||
data = _correct_shape(np.array(data))
|
||||
data = str(data[0], "utf-8")
|
||||
return data
|
||||
Reference in New Issue
Block a user