initial commit
This commit is contained in:
8
mne/io/egi/__init__.py
Normal file
8
mne/io/egi/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""EGI module for conversion to FIF."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from .egi import read_raw_egi
|
||||
from .egimff import read_evokeds_mff
|
||||
332
mne/io/egi/egi.py
Normal file
332
mne/io/egi/egi.py
Normal file
@@ -0,0 +1,332 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import time
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info
|
||||
from ..._fiff.utils import _create_chs, _read_segments_file
|
||||
from ...annotations import Annotations
|
||||
from ...utils import _check_fname, _validate_type, logger, verbose
|
||||
from ..base import BaseRaw
|
||||
from .egimff import _read_raw_egi_mff
|
||||
from .events import _combine_triggers, _triage_include_exclude
|
||||
|
||||
|
||||
def _read_header(fid):
|
||||
"""Read EGI binary header."""
|
||||
version = np.fromfile(fid, "<i4", 1)[0]
|
||||
|
||||
if version > 6 & ~np.bitwise_and(version, 6):
|
||||
version = version.byteswap().astype(np.uint32)
|
||||
else:
|
||||
raise ValueError("Watchout. This does not seem to be a simple binary EGI file.")
|
||||
|
||||
def my_fread(*x, **y):
|
||||
return int(np.fromfile(*x, **y)[0])
|
||||
|
||||
info = dict(
|
||||
version=version,
|
||||
year=my_fread(fid, ">i2", 1),
|
||||
month=my_fread(fid, ">i2", 1),
|
||||
day=my_fread(fid, ">i2", 1),
|
||||
hour=my_fread(fid, ">i2", 1),
|
||||
minute=my_fread(fid, ">i2", 1),
|
||||
second=my_fread(fid, ">i2", 1),
|
||||
millisecond=my_fread(fid, ">i4", 1),
|
||||
samp_rate=my_fread(fid, ">i2", 1),
|
||||
n_channels=my_fread(fid, ">i2", 1),
|
||||
gain=my_fread(fid, ">i2", 1),
|
||||
bits=my_fread(fid, ">i2", 1),
|
||||
value_range=my_fread(fid, ">i2", 1),
|
||||
)
|
||||
|
||||
unsegmented = 1 if np.bitwise_and(version, 1) == 0 else 0
|
||||
precision = np.bitwise_and(version, 6)
|
||||
if precision == 0:
|
||||
raise RuntimeError("Floating point precision is undefined.")
|
||||
|
||||
if unsegmented:
|
||||
info.update(
|
||||
dict(
|
||||
n_categories=0,
|
||||
n_segments=1,
|
||||
n_samples=int(np.fromfile(fid, ">i4", 1)[0]),
|
||||
n_events=int(np.fromfile(fid, ">i2", 1)[0]),
|
||||
event_codes=[],
|
||||
category_names=[],
|
||||
category_lengths=[],
|
||||
pre_baseline=0,
|
||||
)
|
||||
)
|
||||
for event in range(info["n_events"]):
|
||||
event_codes = "".join(np.fromfile(fid, "S1", 4).astype("U1"))
|
||||
info["event_codes"].append(event_codes)
|
||||
else:
|
||||
raise NotImplementedError("Only continuous files are supported")
|
||||
info["unsegmented"] = unsegmented
|
||||
info["dtype"], info["orig_format"] = {
|
||||
2: (">i2", "short"),
|
||||
4: (">f4", "float"),
|
||||
6: (">f8", "double"),
|
||||
}[precision]
|
||||
info["dtype"] = np.dtype(info["dtype"])
|
||||
return info
|
||||
|
||||
|
||||
def _read_events(fid, info):
|
||||
"""Read events."""
|
||||
events = np.zeros([info["n_events"], info["n_segments"] * info["n_samples"]])
|
||||
fid.seek(36 + info["n_events"] * 4, 0) # skip header
|
||||
for si in range(info["n_samples"]):
|
||||
# skip data channels
|
||||
fid.seek(info["n_channels"] * info["dtype"].itemsize, 1)
|
||||
# read event channels
|
||||
events[:, si] = np.fromfile(fid, info["dtype"], info["n_events"])
|
||||
return events
|
||||
|
||||
|
||||
@verbose
|
||||
def read_raw_egi(
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
) -> "RawEGI":
|
||||
"""Read EGI simple binary as raw object.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path to the raw file. Files with an extension ``.mff`` are
|
||||
automatically considered to be EGI's native MFF format files.
|
||||
eog : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
EOG channels. Default is None.
|
||||
misc : list or tuple
|
||||
Names of channels or list of indices that should be designated
|
||||
MISC channels. Default is None.
|
||||
include : None | list
|
||||
The event channels to be included when creating the synthetic
|
||||
trigger or annotations. Defaults to None.
|
||||
Note. Overrides ``exclude`` parameter.
|
||||
exclude : None | list
|
||||
The event channels to be ignored when creating the synthetic
|
||||
trigger or annotations. Defaults to None. If None, the ``sync`` and ``TREV``
|
||||
channels will be ignored. This is ignored when ``include`` is not None.
|
||||
%(preload)s
|
||||
|
||||
.. versionadded:: 0.11
|
||||
channel_naming : str
|
||||
Channel naming convention for the data channels. Defaults to ``'E%%d'``
|
||||
(resulting in channel names ``'E1'``, ``'E2'``, ``'E3'``...). The
|
||||
effective default prior to 0.14.0 was ``'EEG %%03d'``.
|
||||
.. versionadded:: 0.14.0
|
||||
|
||||
events_as_annotations : bool
|
||||
If True, annotations are created from experiment events. If False (default),
|
||||
a synthetic trigger channel ``STI 014`` is created from experiment events.
|
||||
See the Notes section for details.
|
||||
The default will change from False to True in version 1.9.
|
||||
|
||||
.. versionadded:: 1.8.0
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
raw : instance of RawEGI
|
||||
A Raw object containing EGI data.
|
||||
See :class:`mne.io.Raw` for documentation of attributes and methods.
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.io.Raw : Documentation of attributes and methods of RawEGI.
|
||||
|
||||
Notes
|
||||
-----
|
||||
When ``events_from_annotations=True``, event codes on stimulus channels like
|
||||
``DIN1`` are stored as annotations with the ``description`` set to the stimulus
|
||||
channel name.
|
||||
|
||||
When ``events_from_annotations=False`` and events are present on the included
|
||||
stimulus channels, a new stim channel ``STI014`` will be synthesized from the
|
||||
events. It will contain 1-sample pulses where the Netstation file had event
|
||||
timestamps. A ``raw.event_id`` dictionary is added to the raw object that will have
|
||||
arbitrary sequential integer IDs for the events. This will fail if any timestamps
|
||||
are duplicated. The ``event_id`` will also not survive a save/load roundtrip.
|
||||
|
||||
For these reasons, it is recommended to use ``events_as_annotations=True``.
|
||||
"""
|
||||
_validate_type(input_fname, "path-like", "input_fname")
|
||||
input_fname = str(input_fname)
|
||||
_validate_type(events_as_annotations, bool, "events_as_annotations")
|
||||
|
||||
if input_fname.rstrip("/\\").endswith(".mff"): # allows .mff or .mff/
|
||||
return _read_raw_egi_mff(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
return RawEGI(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
class RawEGI(BaseRaw):
|
||||
"""Raw object from EGI simple binary file."""
|
||||
|
||||
_extra_attributes = ("event_id",)
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
input_fname = str(_check_fname(input_fname, "read", True, "input_fname"))
|
||||
if eog is None:
|
||||
eog = []
|
||||
if misc is None:
|
||||
misc = []
|
||||
with open(input_fname, "rb") as fid: # 'rb' important for py3k
|
||||
logger.info(f"Reading EGI header from {input_fname}...")
|
||||
egi_info = _read_header(fid)
|
||||
logger.info(" Reading events ...")
|
||||
egi_events = _read_events(fid, egi_info) # update info + jump
|
||||
if egi_info["value_range"] != 0 and egi_info["bits"] != 0:
|
||||
cal = egi_info["value_range"] / 2.0 ** egi_info["bits"]
|
||||
else:
|
||||
cal = 1e-6
|
||||
|
||||
logger.info(" Assembling measurement info ...")
|
||||
|
||||
event_codes = egi_info["event_codes"]
|
||||
include = _triage_include_exclude(include, exclude, egi_events, egi_info)
|
||||
if egi_info["n_events"] > 0 and not events_as_annotations:
|
||||
event_ids = np.arange(len(include)) + 1
|
||||
logger.info(' Synthesizing trigger channel "STI 014" ...')
|
||||
egi_info["new_trigger"] = _combine_triggers(
|
||||
egi_events[[e in include for e in event_codes]], remapping=event_ids
|
||||
)
|
||||
self.event_id = dict(
|
||||
zip([e for e in event_codes if e in include], event_ids)
|
||||
)
|
||||
else:
|
||||
self.event_id = None
|
||||
egi_info["new_trigger"] = None
|
||||
info = _empty_info(egi_info["samp_rate"])
|
||||
my_time = datetime.datetime(
|
||||
egi_info["year"],
|
||||
egi_info["month"],
|
||||
egi_info["day"],
|
||||
egi_info["hour"],
|
||||
egi_info["minute"],
|
||||
egi_info["second"],
|
||||
)
|
||||
my_timestamp = time.mktime(my_time.timetuple())
|
||||
info["meas_date"] = (my_timestamp, 0)
|
||||
ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])]
|
||||
cals = np.repeat(cal, len(ch_names))
|
||||
ch_names.extend(list(event_codes))
|
||||
cals = np.concatenate([cals, np.ones(egi_info["n_events"])])
|
||||
if egi_info["new_trigger"] is not None:
|
||||
ch_names.append("STI 014") # our new_trigger
|
||||
cals = np.concatenate([cals, [1.0]])
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
|
||||
sti_ch_idx = [
|
||||
i
|
||||
for i, name in enumerate(ch_names)
|
||||
if name.startswith("STI") or name in event_codes
|
||||
]
|
||||
for idx in sti_ch_idx:
|
||||
chs[idx].update(
|
||||
{
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"kind": FIFF.FIFFV_STIM_CH,
|
||||
"coil_type": FIFF.FIFFV_COIL_NONE,
|
||||
"unit": FIFF.FIFF_UNIT_NONE,
|
||||
"loc": np.zeros(12),
|
||||
}
|
||||
)
|
||||
info["chs"] = chs
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
orig_format = (
|
||||
egi_info["orig_format"] if egi_info["orig_format"] != "float" else "single"
|
||||
)
|
||||
super().__init__(
|
||||
info,
|
||||
preload,
|
||||
orig_format=orig_format,
|
||||
filenames=[input_fname],
|
||||
last_samps=[egi_info["n_samples"] - 1],
|
||||
raw_extras=[egi_info],
|
||||
verbose=verbose,
|
||||
)
|
||||
if events_as_annotations:
|
||||
annot = dict(onset=list(), duration=list(), description=list())
|
||||
for code, row in zip(egi_info["event_codes"], egi_events):
|
||||
if code not in include:
|
||||
continue
|
||||
onset = np.where(row)[0] / self.info["sfreq"]
|
||||
annot["onset"].extend(onset)
|
||||
annot["duration"].extend([0.0] * len(onset))
|
||||
annot["description"].extend([code] * len(onset))
|
||||
if annot:
|
||||
self.set_annotations(Annotations(**annot))
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a segment of data from a file."""
|
||||
egi_info = self._raw_extras[fi]
|
||||
dtype = egi_info["dtype"]
|
||||
n_chan_read = egi_info["n_channels"] + egi_info["n_events"]
|
||||
offset = 36 + egi_info["n_events"] * 4
|
||||
trigger_ch = egi_info["new_trigger"]
|
||||
_read_segments_file(
|
||||
self,
|
||||
data,
|
||||
idx,
|
||||
fi,
|
||||
start,
|
||||
stop,
|
||||
cals,
|
||||
mult,
|
||||
dtype=dtype,
|
||||
n_channels=n_chan_read,
|
||||
offset=offset,
|
||||
trigger_ch=trigger_ch,
|
||||
)
|
||||
974
mne/io/egi/egimff.py
Normal file
974
mne/io/egi/egimff.py
Normal file
@@ -0,0 +1,974 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""EGI NetStation Load Function."""
|
||||
|
||||
import datetime
|
||||
import math
|
||||
import os.path as op
|
||||
import re
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..._fiff.constants import FIFF
|
||||
from ..._fiff.meas_info import _empty_info, _ensure_meas_date_none_or_dt, create_info
|
||||
from ..._fiff.proj import setup_proj
|
||||
from ..._fiff.utils import _create_chs, _mult_cal_one
|
||||
from ...annotations import Annotations
|
||||
from ...channels.montage import make_dig_montage
|
||||
from ...evoked import EvokedArray
|
||||
from ...utils import _check_fname, _check_option, _soft_import, logger, verbose, warn
|
||||
from ..base import BaseRaw
|
||||
from .events import _combine_triggers, _read_events, _triage_include_exclude
|
||||
from .general import (
|
||||
_block_r,
|
||||
_extract,
|
||||
_get_blocks,
|
||||
_get_ep_info,
|
||||
_get_gains,
|
||||
_get_signalfname,
|
||||
)
|
||||
|
||||
REFERENCE_NAMES = ("VREF", "Vertex Reference")
|
||||
|
||||
|
||||
def _read_mff_header(filepath):
|
||||
"""Read mff header."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
all_files = _get_signalfname(filepath)
|
||||
eeg_file = all_files["EEG"]["signal"]
|
||||
eeg_info_file = all_files["EEG"]["info"]
|
||||
|
||||
info_filepath = op.join(filepath, "info.xml") # add with filepath
|
||||
tags = ["mffVersion", "recordTime"]
|
||||
version_and_date = _extract(tags, filepath=info_filepath)
|
||||
version = ""
|
||||
if len(version_and_date["mffVersion"]):
|
||||
version = version_and_date["mffVersion"][0]
|
||||
|
||||
fname = op.join(filepath, eeg_file)
|
||||
signal_blocks = _get_blocks(fname)
|
||||
epochs = _get_ep_info(filepath)
|
||||
summaryinfo = dict(eeg_fname=eeg_file, info_fname=eeg_info_file)
|
||||
summaryinfo.update(signal_blocks)
|
||||
# sanity check and update relevant values
|
||||
record_time = version_and_date["recordTime"][0]
|
||||
# e.g.,
|
||||
# 2018-07-30T10:47:01.021673-04:00
|
||||
# 2017-09-20T09:55:44.072000000+01:00
|
||||
g = re.match(
|
||||
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.(\d{6}(?:\d{3})?)[+-]\d{2}:\d{2}", # noqa: E501
|
||||
record_time,
|
||||
)
|
||||
if g is None:
|
||||
raise RuntimeError(f"Could not parse recordTime {repr(record_time)}")
|
||||
frac = g.groups()[0]
|
||||
assert len(frac) in (6, 9) and all(f.isnumeric() for f in frac) # regex
|
||||
div = 1000 if len(frac) == 6 else 1000000
|
||||
for key in ("last_samps", "first_samps"):
|
||||
# convert from times in µS to samples
|
||||
for ei, e in enumerate(epochs[key]):
|
||||
if e % div != 0:
|
||||
raise RuntimeError(f"Could not parse epoch time {e}")
|
||||
epochs[key][ei] = e // div
|
||||
epochs[key] = np.array(epochs[key], np.uint64)
|
||||
# I guess they refer to times in milliseconds?
|
||||
# What we really need to do here is:
|
||||
# epochs[key] *= signal_blocks['sfreq']
|
||||
# epochs[key] //= 1000
|
||||
# But that multiplication risks an overflow, so let's only multiply
|
||||
# by what we need to (e.g., a sample rate of 500 means we can multiply
|
||||
# by 1 and divide by 2 rather than multiplying by 500 and dividing by
|
||||
# 1000)
|
||||
numerator = int(signal_blocks["sfreq"])
|
||||
denominator = 1000
|
||||
this_gcd = math.gcd(numerator, denominator)
|
||||
numerator = numerator // this_gcd
|
||||
denominator = denominator // this_gcd
|
||||
with np.errstate(over="raise"):
|
||||
epochs[key] *= numerator
|
||||
epochs[key] //= denominator
|
||||
# Should be safe to cast to int now, which makes things later not
|
||||
# upbroadcast to float
|
||||
epochs[key] = epochs[key].astype(np.int64)
|
||||
n_samps_block = signal_blocks["samples_block"].sum()
|
||||
n_samps_epochs = (epochs["last_samps"] - epochs["first_samps"]).sum()
|
||||
bad = (
|
||||
n_samps_epochs != n_samps_block
|
||||
or not (epochs["first_samps"] < epochs["last_samps"]).all()
|
||||
or not (epochs["first_samps"][1:] >= epochs["last_samps"][:-1]).all()
|
||||
)
|
||||
if bad:
|
||||
raise RuntimeError(
|
||||
"EGI epoch first/last samps could not be parsed:\n"
|
||||
f'{list(epochs["first_samps"])}\n{list(epochs["last_samps"])}'
|
||||
)
|
||||
summaryinfo.update(epochs)
|
||||
# index which samples in raw are actually readable from disk (i.e., not
|
||||
# in a skip)
|
||||
disk_samps = np.full(epochs["last_samps"][-1], -1)
|
||||
offset = 0
|
||||
for first, last in zip(epochs["first_samps"], epochs["last_samps"]):
|
||||
n_this = last - first
|
||||
disk_samps[first:last] = np.arange(offset, offset + n_this)
|
||||
offset += n_this
|
||||
summaryinfo["disk_samps"] = disk_samps
|
||||
|
||||
# Add the sensor info.
|
||||
sensor_layout_file = op.join(filepath, "sensorLayout.xml")
|
||||
sensor_layout_obj = parse(sensor_layout_file)
|
||||
summaryinfo["device"] = sensor_layout_obj.getElementsByTagName("name")[
|
||||
0
|
||||
].firstChild.data
|
||||
sensors = sensor_layout_obj.getElementsByTagName("sensor")
|
||||
chan_type = list()
|
||||
chan_unit = list()
|
||||
n_chans = 0
|
||||
numbers = list() # used for identification
|
||||
for sensor in sensors:
|
||||
sensortype = int(sensor.getElementsByTagName("type")[0].firstChild.data)
|
||||
if sensortype in [0, 1]:
|
||||
sn = sensor.getElementsByTagName("number")[0].firstChild.data
|
||||
sn = sn.encode()
|
||||
numbers.append(sn)
|
||||
chan_type.append("eeg")
|
||||
chan_unit.append("uV")
|
||||
n_chans = n_chans + 1
|
||||
if n_chans != summaryinfo["n_channels"]:
|
||||
raise RuntimeError(
|
||||
f"Number of defined channels ({n_chans}) did not match the "
|
||||
f"expected channels ({summaryinfo['n_channels']})."
|
||||
)
|
||||
|
||||
# Check presence of PNS data
|
||||
pns_names = []
|
||||
if "PNS" in all_files:
|
||||
pns_fpath = op.join(filepath, all_files["PNS"]["signal"])
|
||||
pns_blocks = _get_blocks(pns_fpath)
|
||||
pns_samples = pns_blocks["samples_block"]
|
||||
signal_samples = signal_blocks["samples_block"]
|
||||
same_blocks = np.array_equal(
|
||||
pns_samples[:-1], signal_samples[:-1]
|
||||
) and pns_samples[-1] in (signal_samples[-1] - np.arange(2))
|
||||
if not same_blocks:
|
||||
raise RuntimeError(
|
||||
"PNS and signals samples did not match:\n"
|
||||
f"{list(pns_samples)}\nvs\n{list(signal_samples)}"
|
||||
)
|
||||
|
||||
pns_file = op.join(filepath, "pnsSet.xml")
|
||||
pns_obj = parse(pns_file)
|
||||
sensors = pns_obj.getElementsByTagName("sensor")
|
||||
pns_types = []
|
||||
pns_units = []
|
||||
for sensor in sensors:
|
||||
# sensor number:
|
||||
# sensor.getElementsByTagName('number')[0].firstChild.data
|
||||
name = sensor.getElementsByTagName("name")[0].firstChild.data
|
||||
unit_elem = sensor.getElementsByTagName("unit")[0].firstChild
|
||||
unit = ""
|
||||
if unit_elem is not None:
|
||||
unit = unit_elem.data
|
||||
|
||||
if name == "ECG":
|
||||
ch_type = "ecg"
|
||||
elif "EMG" in name:
|
||||
ch_type = "emg"
|
||||
else:
|
||||
ch_type = "bio"
|
||||
pns_types.append(ch_type)
|
||||
pns_units.append(unit)
|
||||
pns_names.append(name)
|
||||
|
||||
summaryinfo.update(
|
||||
pns_types=pns_types,
|
||||
pns_units=pns_units,
|
||||
pns_fname=all_files["PNS"]["signal"],
|
||||
pns_sample_blocks=pns_blocks,
|
||||
)
|
||||
summaryinfo.update(
|
||||
pns_names=pns_names,
|
||||
version=version,
|
||||
date=version_and_date["recordTime"][0],
|
||||
chan_type=chan_type,
|
||||
chan_unit=chan_unit,
|
||||
numbers=numbers,
|
||||
)
|
||||
|
||||
return summaryinfo
|
||||
|
||||
|
||||
def _read_header(input_fname):
|
||||
"""Obtain the headers from the file package mff.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
Path for the file
|
||||
|
||||
Returns
|
||||
-------
|
||||
info : dict
|
||||
Main headers set.
|
||||
"""
|
||||
input_fname = str(input_fname) # cast to str any Paths
|
||||
mff_hdr = _read_mff_header(input_fname)
|
||||
with open(input_fname + "/signal1.bin", "rb") as fid:
|
||||
version = np.fromfile(fid, np.int32, 1)[0]
|
||||
"""
|
||||
the datetime.strptime .f directive (milleseconds)
|
||||
will only accept up to 6 digits. if there are more than
|
||||
six millesecond digits in the provided timestamp string
|
||||
(i.e. because of trailing zeros, as in test_egi_pns.mff)
|
||||
then slice both the first 26 elements and the last 6
|
||||
elements of the timestamp string to truncate the
|
||||
milleseconds to 6 digits and extract the timezone,
|
||||
and then piece these together and assign back to mff_hdr['date']
|
||||
"""
|
||||
if len(mff_hdr["date"]) > 32:
|
||||
dt, tz = [mff_hdr["date"][:26], mff_hdr["date"][-6:]]
|
||||
mff_hdr["date"] = dt + tz
|
||||
|
||||
time_n = datetime.datetime.strptime(mff_hdr["date"], "%Y-%m-%dT%H:%M:%S.%f%z")
|
||||
|
||||
info = dict(
|
||||
version=version,
|
||||
meas_dt_local=time_n,
|
||||
utc_offset=time_n.strftime("%z"),
|
||||
gain=0,
|
||||
bits=0,
|
||||
value_range=0,
|
||||
)
|
||||
info.update(
|
||||
n_categories=0,
|
||||
n_segments=1,
|
||||
n_events=0,
|
||||
event_codes=[],
|
||||
category_names=[],
|
||||
category_lengths=[],
|
||||
pre_baseline=0,
|
||||
)
|
||||
info.update(mff_hdr)
|
||||
return info
|
||||
|
||||
|
||||
def _get_eeg_calibration_info(filepath, egi_info):
|
||||
"""Calculate calibration info for EEG channels."""
|
||||
gains = _get_gains(op.join(filepath, egi_info["info_fname"]))
|
||||
if egi_info["value_range"] != 0 and egi_info["bits"] != 0:
|
||||
cals = [egi_info["value_range"] / 2 ** egi_info["bits"]] * len(
|
||||
egi_info["chan_type"]
|
||||
)
|
||||
else:
|
||||
cal_scales = {"uV": 1e-6, "V": 1}
|
||||
cals = [cal_scales[t] for t in egi_info["chan_unit"]]
|
||||
if "gcal" in gains:
|
||||
cals *= gains["gcal"]
|
||||
return cals
|
||||
|
||||
|
||||
def _read_locs(filepath, egi_info, channel_naming):
|
||||
"""Read channel locations."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
fname = op.join(filepath, "coordinates.xml")
|
||||
if not op.exists(fname):
|
||||
logger.warn("File coordinates.xml not found, not setting channel locations")
|
||||
ch_names = [channel_naming % (i + 1) for i in range(egi_info["n_channels"])]
|
||||
return ch_names, None
|
||||
dig_ident_map = {
|
||||
"Left periauricular point": "lpa",
|
||||
"Right periauricular point": "rpa",
|
||||
"Nasion": "nasion",
|
||||
}
|
||||
numbers = np.array(egi_info["numbers"])
|
||||
coordinates = parse(fname)
|
||||
sensors = coordinates.getElementsByTagName("sensor")
|
||||
ch_pos = OrderedDict()
|
||||
hsp = list()
|
||||
nlr = dict()
|
||||
ch_names = list()
|
||||
|
||||
for sensor in sensors:
|
||||
name_element = sensor.getElementsByTagName("name")[0].firstChild
|
||||
num_element = sensor.getElementsByTagName("number")[0].firstChild
|
||||
name = (
|
||||
channel_naming % int(num_element.data)
|
||||
if name_element is None
|
||||
else name_element.data
|
||||
)
|
||||
nr = num_element.data.encode()
|
||||
coords = [
|
||||
float(sensor.getElementsByTagName(coord)[0].firstChild.data)
|
||||
for coord in "xyz"
|
||||
]
|
||||
loc = np.array(coords) / 100 # cm -> m
|
||||
# create dig entry
|
||||
if name in dig_ident_map:
|
||||
nlr[dig_ident_map[name]] = loc
|
||||
else:
|
||||
# id_ is the index of the channel in egi_info['numbers']
|
||||
id_ = np.flatnonzero(numbers == nr)
|
||||
# if it's not in egi_info['numbers'], it's a headshape point
|
||||
if len(id_) == 0:
|
||||
hsp.append(loc)
|
||||
# not HSP, must be a data or reference channel
|
||||
else:
|
||||
ch_names.append(name)
|
||||
ch_pos[name] = loc
|
||||
mon = make_dig_montage(ch_pos=ch_pos, hsp=hsp, **nlr)
|
||||
return ch_names, mon
|
||||
|
||||
|
||||
def _add_pns_channel_info(chs, egi_info, ch_names):
|
||||
"""Add info for PNS channels to channel info dict."""
|
||||
for i_ch, ch_name in enumerate(egi_info["pns_names"]):
|
||||
idx = ch_names.index(ch_name)
|
||||
ch_type = egi_info["pns_types"][i_ch]
|
||||
type_to_kind_map = {"ecg": FIFF.FIFFV_ECG_CH, "emg": FIFF.FIFFV_EMG_CH}
|
||||
ch_kind = type_to_kind_map.get(ch_type, FIFF.FIFFV_BIO_CH)
|
||||
ch_unit = FIFF.FIFF_UNIT_V
|
||||
ch_cal = 1e-6
|
||||
if egi_info["pns_units"][i_ch] != "uV":
|
||||
ch_unit = FIFF.FIFF_UNIT_NONE
|
||||
ch_cal = 1.0
|
||||
chs[idx].update(
|
||||
cal=ch_cal, kind=ch_kind, coil_type=FIFF.FIFFV_COIL_NONE, unit=ch_unit
|
||||
)
|
||||
return chs
|
||||
|
||||
|
||||
@verbose
|
||||
def _read_raw_egi_mff(
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
"""Read EGI mff binary as raw object."""
|
||||
return RawMff(
|
||||
input_fname,
|
||||
eog,
|
||||
misc,
|
||||
include,
|
||||
exclude,
|
||||
preload,
|
||||
channel_naming,
|
||||
events_as_annotations=events_as_annotations,
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
|
||||
class RawMff(BaseRaw):
|
||||
"""RawMff class."""
|
||||
|
||||
_extra_attributes = ("event_id",)
|
||||
|
||||
@verbose
|
||||
def __init__(
|
||||
self,
|
||||
input_fname,
|
||||
eog=None,
|
||||
misc=None,
|
||||
include=None,
|
||||
exclude=None,
|
||||
preload=False,
|
||||
channel_naming="E%d",
|
||||
*,
|
||||
events_as_annotations=True,
|
||||
verbose=None,
|
||||
):
|
||||
"""Init the RawMff class."""
|
||||
input_fname = str(
|
||||
_check_fname(
|
||||
input_fname,
|
||||
"read",
|
||||
True,
|
||||
"input_fname",
|
||||
need_dir=True,
|
||||
)
|
||||
)
|
||||
logger.info(f"Reading EGI MFF Header from {input_fname}...")
|
||||
egi_info = _read_header(input_fname)
|
||||
if eog is None:
|
||||
eog = []
|
||||
if misc is None:
|
||||
misc = np.where(np.array(egi_info["chan_type"]) != "eeg")[0].tolist()
|
||||
|
||||
logger.info(" Reading events ...")
|
||||
egi_events, egi_info, mff_events = _read_events(input_fname, egi_info)
|
||||
cals = _get_eeg_calibration_info(input_fname, egi_info)
|
||||
logger.info(" Assembling measurement info ...")
|
||||
event_codes = egi_info["event_codes"]
|
||||
include = _triage_include_exclude(include, exclude, egi_events, egi_info)
|
||||
if egi_info["n_events"] > 0 and not events_as_annotations:
|
||||
logger.info(' Synthesizing trigger channel "STI 014" ...')
|
||||
if all(ch.startswith("D") for ch in include):
|
||||
# support the DIN format DIN1, DIN2, ..., DIN9, DI10, DI11, ... DI99,
|
||||
# D100, D101, ..., D255 that we get when sending 0-255 triggers on a
|
||||
# parallel port.
|
||||
events_ids = list()
|
||||
for ch in include:
|
||||
while not ch[0].isnumeric():
|
||||
ch = ch[1:]
|
||||
events_ids.append(int(ch))
|
||||
else:
|
||||
events_ids = np.arange(len(include)) + 1
|
||||
egi_info["new_trigger"] = _combine_triggers(
|
||||
egi_events[[c in include for c in event_codes]], remapping=events_ids
|
||||
)
|
||||
self.event_id = dict(
|
||||
zip([e for e in event_codes if e in include], events_ids)
|
||||
)
|
||||
if egi_info["new_trigger"] is not None:
|
||||
egi_events = np.vstack([egi_events, egi_info["new_trigger"]])
|
||||
else:
|
||||
self.event_id = None
|
||||
egi_info["new_trigger"] = None
|
||||
assert egi_events.shape[1] == egi_info["last_samps"][-1]
|
||||
|
||||
meas_dt_utc = egi_info["meas_dt_local"].astimezone(datetime.timezone.utc)
|
||||
info = _empty_info(egi_info["sfreq"])
|
||||
info["meas_date"] = _ensure_meas_date_none_or_dt(meas_dt_utc)
|
||||
info["utc_offset"] = egi_info["utc_offset"]
|
||||
info["device_info"] = dict(type=egi_info["device"])
|
||||
|
||||
# read in the montage, if it exists
|
||||
ch_names, mon = _read_locs(input_fname, egi_info, channel_naming)
|
||||
# Second: Stim
|
||||
ch_names.extend(list(egi_info["event_codes"]))
|
||||
n_extra = len(event_codes) + len(misc) + len(eog) + len(egi_info["pns_names"])
|
||||
if egi_info["new_trigger"] is not None:
|
||||
ch_names.append("STI 014") # channel for combined events
|
||||
n_extra += 1
|
||||
|
||||
# Third: PNS
|
||||
ch_names.extend(egi_info["pns_names"])
|
||||
|
||||
cals = np.concatenate([cals, np.ones(n_extra)])
|
||||
assert len(cals) == len(ch_names), (len(cals), len(ch_names))
|
||||
|
||||
# Actually create channels as EEG, then update stim and PNS
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
|
||||
|
||||
sti_ch_idx = [
|
||||
i
|
||||
for i, name in enumerate(ch_names)
|
||||
if name.startswith("STI") or name in event_codes
|
||||
]
|
||||
for idx in sti_ch_idx:
|
||||
chs[idx].update(
|
||||
{
|
||||
"unit_mul": FIFF.FIFF_UNITM_NONE,
|
||||
"cal": cals[idx],
|
||||
"kind": FIFF.FIFFV_STIM_CH,
|
||||
"coil_type": FIFF.FIFFV_COIL_NONE,
|
||||
"unit": FIFF.FIFF_UNIT_NONE,
|
||||
}
|
||||
)
|
||||
chs = _add_pns_channel_info(chs, egi_info, ch_names)
|
||||
info["chs"] = chs
|
||||
info._unlocked = False
|
||||
info._update_redundant()
|
||||
|
||||
if mon is not None:
|
||||
info.set_montage(mon, on_missing="ignore")
|
||||
|
||||
ref_idx = np.flatnonzero(np.isin(mon.ch_names, REFERENCE_NAMES))
|
||||
if len(ref_idx):
|
||||
ref_idx = ref_idx.item()
|
||||
ref_coords = info["chs"][int(ref_idx)]["loc"][:3]
|
||||
for chan in info["chs"]:
|
||||
if chan["kind"] == FIFF.FIFFV_EEG_CH:
|
||||
chan["loc"][3:6] = ref_coords
|
||||
|
||||
file_bin = op.join(input_fname, egi_info["eeg_fname"])
|
||||
egi_info["egi_events"] = egi_events
|
||||
|
||||
# Check how many channels to read are from EEG
|
||||
keys = ("eeg", "sti", "pns")
|
||||
idx = dict()
|
||||
idx["eeg"] = np.where([ch["kind"] == FIFF.FIFFV_EEG_CH for ch in chs])[0]
|
||||
idx["sti"] = np.where([ch["kind"] == FIFF.FIFFV_STIM_CH for ch in chs])[0]
|
||||
idx["pns"] = np.where(
|
||||
[
|
||||
ch["kind"] in (FIFF.FIFFV_ECG_CH, FIFF.FIFFV_EMG_CH, FIFF.FIFFV_BIO_CH)
|
||||
for ch in chs
|
||||
]
|
||||
)[0]
|
||||
# By construction this should always be true, but check anyway
|
||||
if not np.array_equal(
|
||||
np.concatenate([idx[key] for key in keys]), np.arange(len(chs))
|
||||
):
|
||||
raise ValueError(
|
||||
"Currently interlacing EEG and PNS channels is not supported"
|
||||
)
|
||||
egi_info["kind_bounds"] = [0]
|
||||
for key in keys:
|
||||
egi_info["kind_bounds"].append(len(idx[key]))
|
||||
egi_info["kind_bounds"] = np.cumsum(egi_info["kind_bounds"])
|
||||
assert egi_info["kind_bounds"][0] == 0
|
||||
assert egi_info["kind_bounds"][-1] == info["nchan"]
|
||||
first_samps = [0]
|
||||
last_samps = [egi_info["last_samps"][-1] - 1]
|
||||
|
||||
annot = dict(onset=list(), duration=list(), description=list())
|
||||
|
||||
if len(idx["pns"]):
|
||||
# PNS Data is present and should be read:
|
||||
egi_info["pns_filepath"] = op.join(input_fname, egi_info["pns_fname"])
|
||||
# Check for PNS bug immediately
|
||||
pns_samples = np.sum(egi_info["pns_sample_blocks"]["samples_block"])
|
||||
eeg_samples = np.sum(egi_info["samples_block"])
|
||||
if pns_samples == eeg_samples - 1:
|
||||
warn("This file has the EGI PSG sample bug")
|
||||
annot["onset"].append(last_samps[-1] / egi_info["sfreq"])
|
||||
annot["duration"].append(1 / egi_info["sfreq"])
|
||||
annot["description"].append("BAD_EGI_PSG")
|
||||
elif pns_samples != eeg_samples:
|
||||
raise RuntimeError(
|
||||
f"PNS samples ({pns_samples}) did not match EEG samples "
|
||||
f"({eeg_samples})."
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
info,
|
||||
preload=preload,
|
||||
orig_format="single",
|
||||
filenames=[file_bin],
|
||||
first_samps=first_samps,
|
||||
last_samps=last_samps,
|
||||
raw_extras=[egi_info],
|
||||
verbose=verbose,
|
||||
)
|
||||
|
||||
# Annotate acquisition skips
|
||||
for first, prev_last in zip(
|
||||
egi_info["first_samps"][1:], egi_info["last_samps"][:-1]
|
||||
):
|
||||
gap = first - prev_last
|
||||
assert gap >= 0
|
||||
if gap:
|
||||
annot["onset"].append((prev_last - 0.5) / egi_info["sfreq"])
|
||||
annot["duration"].append(gap / egi_info["sfreq"])
|
||||
annot["description"].append("BAD_ACQ_SKIP")
|
||||
|
||||
# create events from annotations
|
||||
if events_as_annotations:
|
||||
for code, samples in mff_events.items():
|
||||
if code not in include:
|
||||
continue
|
||||
annot["onset"].extend(np.array(samples) / egi_info["sfreq"])
|
||||
annot["duration"].extend([0.0] * len(samples))
|
||||
annot["description"].extend([code] * len(samples))
|
||||
|
||||
if len(annot["onset"]):
|
||||
self.set_annotations(Annotations(**annot))
|
||||
|
||||
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
|
||||
"""Read a chunk of data."""
|
||||
logger.debug(f"Reading MFF {start:6d} ... {stop:6d} ...")
|
||||
dtype = "<f4" # Data read in four byte floats.
|
||||
|
||||
egi_info = self._raw_extras[fi]
|
||||
one = np.zeros((egi_info["kind_bounds"][-1], stop - start))
|
||||
|
||||
# info about the binary file structure
|
||||
n_channels = egi_info["n_channels"]
|
||||
samples_block = egi_info["samples_block"]
|
||||
|
||||
# Check how many channels to read are from each type
|
||||
bounds = egi_info["kind_bounds"]
|
||||
if isinstance(idx, slice):
|
||||
idx = np.arange(idx.start, idx.stop)
|
||||
eeg_out = np.where(idx < bounds[1])[0]
|
||||
eeg_one = idx[eeg_out, np.newaxis]
|
||||
eeg_in = idx[eeg_out]
|
||||
stim_out = np.where((idx >= bounds[1]) & (idx < bounds[2]))[0]
|
||||
stim_one = idx[stim_out]
|
||||
stim_in = idx[stim_out] - bounds[1]
|
||||
pns_out = np.where((idx >= bounds[2]) & (idx < bounds[3]))[0]
|
||||
pns_in = idx[pns_out] - bounds[2]
|
||||
pns_one = idx[pns_out, np.newaxis]
|
||||
del eeg_out, stim_out, pns_out
|
||||
|
||||
# take into account events (already extended to correct size)
|
||||
one[stim_one, :] = egi_info["egi_events"][stim_in, start:stop]
|
||||
|
||||
# Convert start and stop to limits in terms of the data
|
||||
# actually on disk, plus an indexer (disk_use_idx) that populates
|
||||
# the potentially larger `data` with it, taking skips into account
|
||||
disk_samps = egi_info["disk_samps"][start:stop]
|
||||
disk_use_idx = np.where(disk_samps > -1)[0]
|
||||
# short circuit in case we don't need any samples
|
||||
if not len(disk_use_idx):
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
return
|
||||
|
||||
start = disk_samps[disk_use_idx[0]]
|
||||
stop = disk_samps[disk_use_idx[-1]] + 1
|
||||
assert len(disk_use_idx) == stop - start
|
||||
|
||||
# Get starting/stopping block/samples
|
||||
block_samples_offset = np.cumsum(samples_block)
|
||||
offset_blocks = np.sum(block_samples_offset <= start)
|
||||
offset_samples = start - (
|
||||
block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0
|
||||
)
|
||||
|
||||
# TODO: Refactor this reading with the PNS reading in a single function
|
||||
# (DRY)
|
||||
samples_to_read = stop - start
|
||||
with open(self.filenames[fi], "rb", buffering=0) as fid:
|
||||
# Go to starting block
|
||||
current_block = 0
|
||||
current_block_info = None
|
||||
current_data_sample = 0
|
||||
while current_block < offset_blocks:
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
fid.seek(current_block_info["block_size"], 1)
|
||||
current_block += 1
|
||||
|
||||
# Start reading samples
|
||||
while samples_to_read > 0:
|
||||
logger.debug(f" Reading from block {current_block}")
|
||||
this_block_info = _block_r(fid)
|
||||
current_block += 1
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
|
||||
to_read = current_block_info["nsamples"] * current_block_info["nc"]
|
||||
block_data = np.fromfile(fid, dtype, to_read)
|
||||
block_data = block_data.reshape(n_channels, -1, order="C")
|
||||
|
||||
# Compute indexes
|
||||
samples_read = block_data.shape[1]
|
||||
logger.debug(f" Read {samples_read} samples")
|
||||
logger.debug(f" Offset {offset_samples} samples")
|
||||
if offset_samples > 0:
|
||||
# First block read, skip to the offset:
|
||||
block_data = block_data[:, offset_samples:]
|
||||
samples_read = samples_read - offset_samples
|
||||
offset_samples = 0
|
||||
if samples_to_read < samples_read:
|
||||
# Last block to read, skip the last samples
|
||||
block_data = block_data[:, :samples_to_read]
|
||||
samples_read = samples_to_read
|
||||
logger.debug(f" Keep {samples_read} samples")
|
||||
|
||||
s_start = current_data_sample
|
||||
s_end = s_start + samples_read
|
||||
|
||||
one[eeg_one, disk_use_idx[s_start:s_end]] = block_data[eeg_in]
|
||||
samples_to_read = samples_to_read - samples_read
|
||||
current_data_sample = current_data_sample + samples_read
|
||||
|
||||
if len(pns_one) > 0:
|
||||
# PNS Data is present and should be read:
|
||||
pns_filepath = egi_info["pns_filepath"]
|
||||
pns_info = egi_info["pns_sample_blocks"]
|
||||
n_channels = pns_info["n_channels"]
|
||||
samples_block = pns_info["samples_block"]
|
||||
|
||||
# Get starting/stopping block/samples
|
||||
block_samples_offset = np.cumsum(samples_block)
|
||||
offset_blocks = np.sum(block_samples_offset < start)
|
||||
offset_samples = start - (
|
||||
block_samples_offset[offset_blocks - 1] if offset_blocks > 0 else 0
|
||||
)
|
||||
|
||||
samples_to_read = stop - start
|
||||
with open(pns_filepath, "rb", buffering=0) as fid:
|
||||
# Check file size
|
||||
fid.seek(0, 2)
|
||||
file_size = fid.tell()
|
||||
fid.seek(0)
|
||||
# Go to starting block
|
||||
current_block = 0
|
||||
current_block_info = None
|
||||
current_data_sample = 0
|
||||
while current_block < offset_blocks:
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
fid.seek(current_block_info["block_size"], 1)
|
||||
current_block += 1
|
||||
|
||||
# Start reading samples
|
||||
while samples_to_read > 0:
|
||||
if samples_to_read == 1 and fid.tell() == file_size:
|
||||
# We are in the presence of the EEG bug
|
||||
# fill with zeros and break the loop
|
||||
one[pns_one, -1] = 0
|
||||
break
|
||||
|
||||
this_block_info = _block_r(fid)
|
||||
if this_block_info is not None:
|
||||
current_block_info = this_block_info
|
||||
|
||||
to_read = current_block_info["nsamples"] * current_block_info["nc"]
|
||||
block_data = np.fromfile(fid, dtype, to_read)
|
||||
block_data = block_data.reshape(n_channels, -1, order="C")
|
||||
|
||||
# Compute indexes
|
||||
samples_read = block_data.shape[1]
|
||||
if offset_samples > 0:
|
||||
# First block read, skip to the offset:
|
||||
block_data = block_data[:, offset_samples:]
|
||||
samples_read = samples_read - offset_samples
|
||||
offset_samples = 0
|
||||
|
||||
if samples_to_read < samples_read:
|
||||
# Last block to read, skip the last samples
|
||||
block_data = block_data[:, :samples_to_read]
|
||||
samples_read = samples_to_read
|
||||
|
||||
s_start = current_data_sample
|
||||
s_end = s_start + samples_read
|
||||
|
||||
one[pns_one, disk_use_idx[s_start:s_end]] = block_data[pns_in]
|
||||
samples_to_read = samples_to_read - samples_read
|
||||
current_data_sample = current_data_sample + samples_read
|
||||
|
||||
# do the calibration
|
||||
_mult_cal_one(data, one, idx, cals, mult)
|
||||
|
||||
|
||||
@verbose
|
||||
def read_evokeds_mff(
|
||||
fname, condition=None, channel_naming="E%d", baseline=None, verbose=None
|
||||
):
|
||||
"""Read averaged MFF file as EvokedArray or list of EvokedArray.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fname : path-like
|
||||
File path to averaged MFF file. Should end in ``.mff``.
|
||||
condition : int or str | list of int or str | None
|
||||
The index (indices) or category (categories) from which to read in
|
||||
data. Averaged MFF files can contain separate averages for different
|
||||
categories. These can be indexed by the block number or the category
|
||||
name. If ``condition`` is a list or None, a list of EvokedArray objects
|
||||
is returned.
|
||||
channel_naming : str
|
||||
Channel naming convention for EEG channels. Defaults to 'E%%d'
|
||||
(resulting in channel names 'E1', 'E2', 'E3'...).
|
||||
baseline : None (default) or tuple of length 2
|
||||
The time interval to apply baseline correction. If None do not apply
|
||||
it. If baseline is (a, b) the interval is between "a (s)" and "b (s)".
|
||||
If a is None the beginning of the data is used and if b is None then b
|
||||
is set to the end of the interval. If baseline is equal to (None, None)
|
||||
all the time interval is used. Correction is applied by computing mean
|
||||
of the baseline period and subtracting it from the data. The baseline
|
||||
(a, b) includes both endpoints, i.e. all timepoints t such that
|
||||
a <= t <= b.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
evoked : EvokedArray or list of EvokedArray
|
||||
The evoked dataset(s); one EvokedArray if condition is int or str,
|
||||
or list of EvokedArray if condition is None or list.
|
||||
|
||||
Raises
|
||||
------
|
||||
ValueError
|
||||
If ``fname`` has file extension other than '.mff'.
|
||||
ValueError
|
||||
If the MFF file specified by ``fname`` is not averaged.
|
||||
ValueError
|
||||
If no categories.xml file in MFF directory specified by ``fname``.
|
||||
|
||||
See Also
|
||||
--------
|
||||
Evoked, EvokedArray, create_info
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.22
|
||||
"""
|
||||
mffpy = _import_mffpy()
|
||||
# Confirm `fname` is a path to an MFF file
|
||||
fname = Path(fname) # should be replace with _check_fname
|
||||
if not fname.suffix == ".mff":
|
||||
raise ValueError('fname must be an MFF file with extension ".mff".')
|
||||
# Confirm the input MFF is averaged
|
||||
mff = mffpy.Reader(fname)
|
||||
try:
|
||||
flavor = mff.mff_flavor
|
||||
except AttributeError: # < 6.3
|
||||
flavor = mff.flavor
|
||||
if flavor not in ("averaged", "segmented"): # old, new names
|
||||
raise ValueError(
|
||||
f"{fname} is a {flavor} MFF file. "
|
||||
"fname must be the path to an averaged MFF file."
|
||||
)
|
||||
# Check for categories.xml file
|
||||
if "categories.xml" not in mff.directory.listdir():
|
||||
raise ValueError(
|
||||
"categories.xml not found in MFF directory. "
|
||||
f"{fname} may not be an averaged MFF file."
|
||||
)
|
||||
return_list = True
|
||||
if condition is None:
|
||||
categories = mff.categories.categories
|
||||
condition = list(categories.keys())
|
||||
elif not isinstance(condition, list):
|
||||
condition = [condition]
|
||||
return_list = False
|
||||
logger.info(f"Reading {len(condition)} evoked datasets from {fname} ...")
|
||||
output = [
|
||||
_read_evoked_mff(
|
||||
fname, c, channel_naming=channel_naming, verbose=verbose
|
||||
).apply_baseline(baseline)
|
||||
for c in condition
|
||||
]
|
||||
return output if return_list else output[0]
|
||||
|
||||
|
||||
def _read_evoked_mff(fname, condition, channel_naming="E%d", verbose=None):
|
||||
"""Read evoked data from MFF file."""
|
||||
import mffpy
|
||||
|
||||
egi_info = _read_header(fname)
|
||||
mff = mffpy.Reader(fname)
|
||||
categories = mff.categories.categories
|
||||
|
||||
if isinstance(condition, str):
|
||||
# Condition is interpreted as category name
|
||||
category = _check_option(
|
||||
"condition", condition, categories, extra="provided as category name"
|
||||
)
|
||||
epoch = mff.epochs[category]
|
||||
elif isinstance(condition, int):
|
||||
# Condition is interpreted as epoch index
|
||||
try:
|
||||
epoch = mff.epochs[condition]
|
||||
except IndexError:
|
||||
raise ValueError(
|
||||
f'"condition" parameter ({condition}), provided '
|
||||
"as epoch index, is out of range for available "
|
||||
f"epochs ({len(mff.epochs)})."
|
||||
)
|
||||
category = epoch.name
|
||||
else:
|
||||
raise TypeError('"condition" parameter must be either int or str.')
|
||||
|
||||
# Read in signals from the target epoch
|
||||
data = mff.get_physical_samples_from_epoch(epoch)
|
||||
eeg_data, t0 = data["EEG"]
|
||||
if "PNSData" in data:
|
||||
pns_data, t0 = data["PNSData"]
|
||||
all_data = np.vstack((eeg_data, pns_data))
|
||||
ch_types = egi_info["chan_type"] + egi_info["pns_types"]
|
||||
else:
|
||||
all_data = eeg_data
|
||||
ch_types = egi_info["chan_type"]
|
||||
all_data *= 1e-6 # convert to volts
|
||||
|
||||
# Load metadata into info object
|
||||
# Exclude info['meas_date'] because record time info in
|
||||
# averaged MFF is the time of the averaging, not true record time.
|
||||
ch_names, mon = _read_locs(fname, egi_info, channel_naming)
|
||||
ch_names.extend(egi_info["pns_names"])
|
||||
info = create_info(ch_names, mff.sampling_rates["EEG"], ch_types)
|
||||
with info._unlock():
|
||||
info["device_info"] = dict(type=egi_info["device"])
|
||||
info["nchan"] = sum(mff.num_channels.values())
|
||||
|
||||
# Add individual channel info
|
||||
# Get calibration info for EEG channels
|
||||
cals = _get_eeg_calibration_info(fname, egi_info)
|
||||
# Initialize calibration for PNS channels, will be updated later
|
||||
cals = np.concatenate([cals, np.repeat(1, len(egi_info["pns_names"]))])
|
||||
ch_coil = FIFF.FIFFV_COIL_EEG
|
||||
ch_kind = FIFF.FIFFV_EEG_CH
|
||||
chs = _create_chs(ch_names, cals, ch_coil, ch_kind, (), (), (), ())
|
||||
# Update PNS channel info
|
||||
chs = _add_pns_channel_info(chs, egi_info, ch_names)
|
||||
with info._unlock():
|
||||
info["chs"] = chs
|
||||
if mon is not None:
|
||||
info.set_montage(mon, on_missing="ignore")
|
||||
|
||||
# Add bad channels to info
|
||||
info["description"] = category
|
||||
try:
|
||||
channel_status = categories[category][0]["channelStatus"]
|
||||
except KeyError:
|
||||
warn(
|
||||
f"Channel status data not found for condition {category}. "
|
||||
"No channels will be marked as bad.",
|
||||
category=UserWarning,
|
||||
)
|
||||
channel_status = None
|
||||
bads = []
|
||||
if channel_status:
|
||||
for entry in channel_status:
|
||||
if entry["exclusion"] == "badChannels":
|
||||
if entry["signalBin"] == 1:
|
||||
# Add bad EEG channels
|
||||
for ch in entry["channels"]:
|
||||
bads.append(ch_names[ch - 1])
|
||||
elif entry["signalBin"] == 2:
|
||||
# Add bad PNS channels
|
||||
for ch in entry["channels"]:
|
||||
bads.append(egi_info["pns_names"][ch - 1])
|
||||
info["bads"] = bads
|
||||
|
||||
# Add EEG reference to info
|
||||
try:
|
||||
fp = mff.directory.filepointer("history")
|
||||
except (ValueError, FileNotFoundError): # old (<=0.6.3) vs new mffpy
|
||||
pass
|
||||
else:
|
||||
with fp:
|
||||
history = mffpy.XML.from_file(fp)
|
||||
for entry in history.entries:
|
||||
if entry["method"] == "Montage Operations Tool":
|
||||
if "Average Reference" in entry["settings"]:
|
||||
# Average reference has been applied
|
||||
_, info = setup_proj(info)
|
||||
|
||||
# Get nave from categories.xml
|
||||
try:
|
||||
nave = categories[category][0]["keys"]["#seg"]["data"]
|
||||
except KeyError:
|
||||
warn(
|
||||
f"Number of averaged epochs not found for condition {category}. "
|
||||
"nave will default to 1.",
|
||||
category=UserWarning,
|
||||
)
|
||||
nave = 1
|
||||
|
||||
# Let tmin default to 0
|
||||
return EvokedArray(
|
||||
all_data, info, tmin=0.0, comment=category, nave=nave, verbose=verbose
|
||||
)
|
||||
|
||||
|
||||
def _import_mffpy(why="read averaged .mff files"):
|
||||
"""Import and return module mffpy."""
|
||||
try:
|
||||
import mffpy
|
||||
except ImportError as exp:
|
||||
msg = f"mffpy is required to {why}, got:\n{exp}"
|
||||
raise ImportError(msg)
|
||||
|
||||
return mffpy
|
||||
207
mne/io/egi/events.py
Normal file
207
mne/io/egi/events.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from datetime import datetime
|
||||
from glob import glob
|
||||
from os.path import basename, join, splitext
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import _soft_import, _validate_type, logger, warn
|
||||
|
||||
|
||||
def _read_events(input_fname, info):
|
||||
"""Read events for the record.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
input_fname : path-like
|
||||
The file path.
|
||||
info : dict
|
||||
Header info array.
|
||||
"""
|
||||
n_samples = info["last_samps"][-1]
|
||||
mff_events, event_codes = _read_mff_events(input_fname, info["sfreq"])
|
||||
info["n_events"] = len(event_codes)
|
||||
info["event_codes"] = event_codes
|
||||
events = np.zeros([info["n_events"], info["n_segments"] * n_samples])
|
||||
for n, event in enumerate(event_codes):
|
||||
for i in mff_events[event]:
|
||||
if (i < 0) or (i >= events.shape[1]):
|
||||
continue
|
||||
events[n][i] = n + 1
|
||||
return events, info, mff_events
|
||||
|
||||
|
||||
def _read_mff_events(filename, sfreq):
|
||||
"""Extract the events.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
filename : path-like
|
||||
File path.
|
||||
sfreq : float
|
||||
The sampling frequency
|
||||
"""
|
||||
orig = {}
|
||||
for xml_file in glob(join(filename, "*.xml")):
|
||||
xml_type = splitext(basename(xml_file))[0]
|
||||
orig[xml_type] = _parse_xml(xml_file)
|
||||
xml_files = orig.keys()
|
||||
xml_events = [x for x in xml_files if x[:7] == "Events_"]
|
||||
for item in orig["info"]:
|
||||
if "recordTime" in item:
|
||||
start_time = _ns2py_time(item["recordTime"])
|
||||
break
|
||||
markers = []
|
||||
code = []
|
||||
for xml in xml_events:
|
||||
for event in orig[xml][2:]:
|
||||
event_start = _ns2py_time(event["beginTime"])
|
||||
start = (event_start - start_time).total_seconds()
|
||||
if event["code"] not in code:
|
||||
code.append(event["code"])
|
||||
marker = {
|
||||
"name": event["code"],
|
||||
"start": start,
|
||||
"start_sample": int(np.fix(start * sfreq)),
|
||||
"end": start + float(event["duration"]) / 1e9,
|
||||
"chan": None,
|
||||
}
|
||||
markers.append(marker)
|
||||
events_tims = dict()
|
||||
for ev in code:
|
||||
trig_samp = list(
|
||||
c["start_sample"] for n, c in enumerate(markers) if c["name"] == ev
|
||||
)
|
||||
events_tims.update({ev: trig_samp})
|
||||
return events_tims, code
|
||||
|
||||
|
||||
def _parse_xml(xml_file):
|
||||
"""Parse XML file."""
|
||||
defusedxml = _soft_import("defusedxml", "reading EGI MFF data")
|
||||
xml = defusedxml.ElementTree.parse(xml_file)
|
||||
root = xml.getroot()
|
||||
return _xml2list(root)
|
||||
|
||||
|
||||
def _xml2list(root):
|
||||
"""Parse XML item."""
|
||||
output = []
|
||||
for element in root:
|
||||
if len(element) > 0:
|
||||
if element[0].tag != element[-1].tag:
|
||||
output.append(_xml2dict(element))
|
||||
else:
|
||||
output.append(_xml2list(element))
|
||||
|
||||
elif element.text:
|
||||
text = element.text.strip()
|
||||
if text:
|
||||
tag = _ns(element.tag)
|
||||
output.append({tag: text})
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def _ns(s):
|
||||
"""Remove namespace, but only if there is a namespace to begin with."""
|
||||
if "}" in s:
|
||||
return "}".join(s.split("}")[1:])
|
||||
else:
|
||||
return s
|
||||
|
||||
|
||||
def _xml2dict(root):
|
||||
"""Use functions instead of Class.
|
||||
|
||||
remove namespace based on
|
||||
http://stackoverflow.com/questions/2148119
|
||||
"""
|
||||
output = {}
|
||||
if root.items():
|
||||
output.update(dict(root.items()))
|
||||
|
||||
for element in root:
|
||||
if len(element) > 0:
|
||||
if len(element) == 1 or element[0].tag != element[1].tag:
|
||||
one_dict = _xml2dict(element)
|
||||
else:
|
||||
one_dict = {_ns(element[0].tag): _xml2list(element)}
|
||||
|
||||
if element.items():
|
||||
one_dict.update(dict(element.items()))
|
||||
output.update({_ns(element.tag): one_dict})
|
||||
|
||||
elif element.items():
|
||||
output.update({_ns(element.tag): dict(element.items())})
|
||||
|
||||
else:
|
||||
output.update({_ns(element.tag): element.text})
|
||||
return output
|
||||
|
||||
|
||||
def _ns2py_time(nstime):
|
||||
"""Parse times."""
|
||||
nsdate = nstime[0:10]
|
||||
nstime0 = nstime[11:26]
|
||||
nstime00 = nsdate + " " + nstime0
|
||||
pytime = datetime.strptime(nstime00, "%Y-%m-%d %H:%M:%S.%f")
|
||||
return pytime
|
||||
|
||||
|
||||
def _combine_triggers(data, remapping=None):
|
||||
"""Combine binary triggers."""
|
||||
new_trigger = np.zeros(data.shape[1])
|
||||
if data.astype(bool).sum(axis=0).max() > 1: # ensure no overlaps
|
||||
logger.info(
|
||||
" Found multiple events at the same time "
|
||||
"sample. Cannot create trigger channel."
|
||||
)
|
||||
return
|
||||
if remapping is None:
|
||||
remapping = np.arange(data) + 1
|
||||
for d, event_id in zip(data, remapping):
|
||||
idx = d.nonzero()
|
||||
if np.any(idx):
|
||||
new_trigger[idx] += event_id
|
||||
return new_trigger
|
||||
|
||||
|
||||
def _triage_include_exclude(include, exclude, egi_events, egi_info):
|
||||
"""Triage include and exclude."""
|
||||
_validate_type(exclude, (list, None), "exclude")
|
||||
_validate_type(include, (list, None), "include")
|
||||
event_codes = list(egi_info["event_codes"])
|
||||
for name, lst in dict(exclude=exclude, include=include).items():
|
||||
for ii, item in enumerate(lst or []):
|
||||
what = f"{name}[{ii}]"
|
||||
_validate_type(item, str, what)
|
||||
if item not in event_codes:
|
||||
raise ValueError(
|
||||
f"Could not find event channel named {what}={repr(item)}"
|
||||
)
|
||||
if include is None:
|
||||
if exclude is None:
|
||||
default_exclude = ["sync", "TREV"]
|
||||
exclude = [code for code in default_exclude if code in event_codes]
|
||||
for code, event in zip(event_codes, egi_events):
|
||||
if event.sum() < 1 and code:
|
||||
exclude.append(code)
|
||||
if (
|
||||
len(exclude) == len(event_codes)
|
||||
and egi_info["n_events"]
|
||||
and set(exclude) - set(default_exclude)
|
||||
):
|
||||
warn(
|
||||
"Did not find any event code with at least one event.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
include = [k for k in event_codes if k not in exclude]
|
||||
del exclude
|
||||
excl_events = ", ".join(k for k in event_codes if k not in include)
|
||||
logger.info(f" Excluding events {{{excl_events}}} ...")
|
||||
return include
|
||||
192
mne/io/egi/general.py
Normal file
192
mne/io/egi/general.py
Normal file
@@ -0,0 +1,192 @@
|
||||
#
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ...utils import _pl, _soft_import
|
||||
|
||||
|
||||
def _extract(tags, filepath=None, obj=None):
|
||||
"""Extract info from XML."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
if obj is not None:
|
||||
fileobj = obj
|
||||
elif filepath is not None:
|
||||
fileobj = parse(filepath)
|
||||
else:
|
||||
raise ValueError("There is not object or file to extract data")
|
||||
infoxml = dict()
|
||||
for tag in tags:
|
||||
value = fileobj.getElementsByTagName(tag)
|
||||
infoxml[tag] = []
|
||||
for i in range(len(value)):
|
||||
infoxml[tag].append(value[i].firstChild.data)
|
||||
return infoxml
|
||||
|
||||
|
||||
def _get_gains(filepath):
|
||||
"""Parse gains."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
file_obj = parse(filepath)
|
||||
objects = file_obj.getElementsByTagName("calibration")
|
||||
gains = dict()
|
||||
for ob in objects:
|
||||
value = ob.getElementsByTagName("type")
|
||||
if value[0].firstChild.data == "GCAL":
|
||||
data_g = _extract(["ch"], obj=ob)["ch"]
|
||||
gains.update(gcal=np.asarray(data_g, dtype=np.float64))
|
||||
elif value[0].firstChild.data == "ICAL":
|
||||
data_g = _extract(["ch"], obj=ob)["ch"]
|
||||
gains.update(ical=np.asarray(data_g, dtype=np.float64))
|
||||
return gains
|
||||
|
||||
|
||||
def _get_ep_info(filepath):
|
||||
"""Get epoch info."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
epochfile = filepath + "/epochs.xml"
|
||||
epochlist = parse(epochfile)
|
||||
epochs = epochlist.getElementsByTagName("epoch")
|
||||
keys = ("first_samps", "last_samps", "first_blocks", "last_blocks")
|
||||
epoch_info = {key: list() for key in keys}
|
||||
for epoch in epochs:
|
||||
ep_begin = int(epoch.getElementsByTagName("beginTime")[0].firstChild.data)
|
||||
ep_end = int(epoch.getElementsByTagName("endTime")[0].firstChild.data)
|
||||
first_block = int(epoch.getElementsByTagName("firstBlock")[0].firstChild.data)
|
||||
last_block = int(epoch.getElementsByTagName("lastBlock")[0].firstChild.data)
|
||||
epoch_info["first_samps"].append(ep_begin)
|
||||
epoch_info["last_samps"].append(ep_end)
|
||||
epoch_info["first_blocks"].append(first_block)
|
||||
epoch_info["last_blocks"].append(last_block)
|
||||
# Don't turn into ndarray here, keep native int because it can deal with
|
||||
# huge numbers (could use np.uint64 but it's more work)
|
||||
return epoch_info
|
||||
|
||||
|
||||
def _get_blocks(filepath):
|
||||
"""Get info from meta data blocks."""
|
||||
binfile = os.path.join(filepath)
|
||||
n_blocks = 0
|
||||
samples_block = []
|
||||
header_sizes = []
|
||||
n_channels = []
|
||||
sfreq = []
|
||||
# Meta data consists of:
|
||||
# * 1 byte of flag (1 for meta data, 0 for data)
|
||||
# * 1 byte of header size
|
||||
# * 1 byte of block size
|
||||
# * 1 byte of n_channels
|
||||
# * n_channels bytes of offsets
|
||||
# * n_channels bytes of sigfreqs?
|
||||
with open(binfile, "rb") as fid:
|
||||
fid.seek(0, 2) # go to end of file
|
||||
file_length = fid.tell()
|
||||
block_size = file_length
|
||||
fid.seek(0)
|
||||
position = 0
|
||||
while position < file_length:
|
||||
block = _block_r(fid)
|
||||
if block is None:
|
||||
samples_block.append(samples_block[n_blocks - 1])
|
||||
n_blocks += 1
|
||||
fid.seek(block_size, 1)
|
||||
position = fid.tell()
|
||||
continue
|
||||
block_size = block["block_size"]
|
||||
header_size = block["header_size"]
|
||||
header_sizes.append(header_size)
|
||||
samples_block.append(block["nsamples"])
|
||||
n_blocks += 1
|
||||
fid.seek(block_size, 1)
|
||||
sfreq.append(block["sfreq"])
|
||||
n_channels.append(block["nc"])
|
||||
position = fid.tell()
|
||||
|
||||
if any([n != n_channels[0] for n in n_channels]):
|
||||
raise RuntimeError("All the blocks don't have the same amount of channels.")
|
||||
if any([f != sfreq[0] for f in sfreq]):
|
||||
raise RuntimeError("All the blocks don't have the same sampling frequency.")
|
||||
if len(samples_block) < 1:
|
||||
raise RuntimeError("There seems to be no data")
|
||||
samples_block = np.array(samples_block)
|
||||
signal_blocks = dict(
|
||||
n_channels=n_channels[0],
|
||||
sfreq=sfreq[0],
|
||||
n_blocks=n_blocks,
|
||||
samples_block=samples_block,
|
||||
header_sizes=header_sizes,
|
||||
)
|
||||
return signal_blocks
|
||||
|
||||
|
||||
def _get_signalfname(filepath):
|
||||
"""Get filenames."""
|
||||
_soft_import("defusedxml", "reading EGI MFF data")
|
||||
from defusedxml.minidom import parse
|
||||
|
||||
listfiles = os.listdir(filepath)
|
||||
binfiles = list(
|
||||
f for f in listfiles if "signal" in f and f[-4:] == ".bin" and f[0] != "."
|
||||
)
|
||||
all_files = {}
|
||||
infofiles = list()
|
||||
for binfile in binfiles:
|
||||
bin_num_str = re.search(r"\d+", binfile).group()
|
||||
infofile = "info" + bin_num_str + ".xml"
|
||||
infofiles.append(infofile)
|
||||
infobjfile = os.path.join(filepath, infofile)
|
||||
infobj = parse(infobjfile)
|
||||
if len(infobj.getElementsByTagName("EEG")):
|
||||
signal_type = "EEG"
|
||||
elif len(infobj.getElementsByTagName("PNSData")):
|
||||
signal_type = "PNS"
|
||||
all_files[signal_type] = {
|
||||
"signal": f"signal{bin_num_str}.bin",
|
||||
"info": infofile,
|
||||
}
|
||||
if "EEG" not in all_files:
|
||||
infofiles_str = "\n".join(infofiles)
|
||||
raise FileNotFoundError(
|
||||
f"Could not find any EEG data in the {len(infofiles)} file{_pl(infofiles)} "
|
||||
f"found in {filepath}:\n{infofiles_str}"
|
||||
)
|
||||
return all_files
|
||||
|
||||
|
||||
def _block_r(fid):
|
||||
"""Read meta data."""
|
||||
if np.fromfile(fid, dtype=np.dtype("i4"), count=1).item() != 1: # not meta
|
||||
return None
|
||||
header_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
block_size = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
hl = int(block_size / 4)
|
||||
nc = np.fromfile(fid, dtype=np.dtype("i4"), count=1).item()
|
||||
nsamples = int(hl / nc)
|
||||
np.fromfile(fid, dtype=np.dtype("i4"), count=nc) # sigoffset
|
||||
sigfreq = np.fromfile(fid, dtype=np.dtype("i4"), count=nc)
|
||||
depth = sigfreq[0] & 0xFF
|
||||
if depth != 32:
|
||||
raise ValueError("I do not know how to read this MFF (depth != 32)")
|
||||
sfreq = sigfreq[0] >> 8
|
||||
count = int(header_size / 4 - (4 + 2 * nc))
|
||||
np.fromfile(fid, dtype=np.dtype("i4"), count=count) # sigoffset
|
||||
block = dict(
|
||||
nc=nc,
|
||||
hl=hl,
|
||||
nsamples=nsamples,
|
||||
block_size=block_size,
|
||||
header_size=header_size,
|
||||
sfreq=sfreq,
|
||||
)
|
||||
return block
|
||||
Reference in New Issue
Block a user