initial commit
This commit is contained in:
8
mne/export/__init__.py
Normal file
8
mne/export/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""Functions for exporting data to non-FIF formats."""
|
||||
import lazy_loader as lazy
|
||||
|
||||
(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__)
|
||||
3
mne/export/__init__.pyi
Normal file
3
mne/export/__init__.pyi
Normal file
@@ -0,0 +1,3 @@
|
||||
__all__ = ["export_epochs", "export_evokeds", "export_evokeds_mff", "export_raw"]
|
||||
from ._egimff import export_evokeds_mff
|
||||
from ._export import export_epochs, export_evokeds, export_raw
|
||||
158
mne/export/_brainvision.py
Normal file
158
mne/export/_brainvision.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
|
||||
from mne.channels.channels import _unit2human
|
||||
from mne.io.constants import FIFF
|
||||
from mne.utils import _check_pybv_installed, warn
|
||||
|
||||
_check_pybv_installed()
|
||||
from pybv import write_brainvision # noqa: E402
|
||||
|
||||
|
||||
def _export_mne_raw(*, raw, fname, events=None, overwrite=False):
|
||||
"""Export raw data from MNE-Python.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
raw : mne.io.Raw
|
||||
The raw data to export.
|
||||
fname : str | pathlib.Path
|
||||
The name of the file where raw data will be exported to. Must end with
|
||||
``".vhdr"``, and accompanying *.vmrk* and *.eeg* files will be written inside
|
||||
the same directory.
|
||||
events : np.ndarray | None
|
||||
Events to be written to the marker file (*.vmrk*). If array, must be in
|
||||
`MNE-Python format <https://mne.tools/stable/glossary.html#term-events>`_. If
|
||||
``None`` (default), events will be written based on ``raw.annotations``.
|
||||
overwrite : bool
|
||||
Whether or not to overwrite existing data. Defaults to ``False``.
|
||||
|
||||
"""
|
||||
# prepare file location
|
||||
if not str(fname).endswith(".vhdr"):
|
||||
raise ValueError("`fname` must have the '.vhdr' extension for BrainVision.")
|
||||
fname = Path(fname)
|
||||
folder_out = fname.parents[0]
|
||||
fname_base = fname.stem
|
||||
|
||||
# prepare data from raw
|
||||
data = raw.get_data() # gets data starting from raw.first_samp
|
||||
sfreq = raw.info["sfreq"] # in Hz
|
||||
meas_date = raw.info["meas_date"] # datetime.datetime
|
||||
ch_names = raw.ch_names
|
||||
|
||||
# write voltage units as micro-volts and all other units without scaling
|
||||
# write units that we don't know as n/a
|
||||
unit = []
|
||||
for ch in raw.info["chs"]:
|
||||
if ch["unit"] == FIFF.FIFF_UNIT_V:
|
||||
unit.append("µV")
|
||||
elif ch["unit"] == FIFF.FIFF_UNIT_CEL:
|
||||
unit.append("°C")
|
||||
else:
|
||||
unit.append(_unit2human.get(ch["unit"], "n/a"))
|
||||
unit = [u if u != "NA" else "n/a" for u in unit]
|
||||
|
||||
# enforce conversion to float32 format
|
||||
# XXX: Could add a feature that checks data and optimizes `unit`, `resolution`, and
|
||||
# `format` so that raw.orig_format could be retained if reasonable.
|
||||
if raw.orig_format != "single":
|
||||
warn(
|
||||
f"Encountered data in '{raw.orig_format}' format. Converting to float32.",
|
||||
RuntimeWarning,
|
||||
)
|
||||
|
||||
fmt = "binary_float32"
|
||||
resolution = 0.1
|
||||
|
||||
# handle events
|
||||
# if we got an ndarray, this is in MNE-Python format
|
||||
msg = "`events` must be None or array in MNE-Python format."
|
||||
if events is not None:
|
||||
# subtract raw.first_samp because brainvision marks events starting from the
|
||||
# first available data point and ignores the raw.first_samp
|
||||
assert isinstance(events, np.ndarray), msg
|
||||
assert events.ndim == 2, msg
|
||||
assert events.shape[-1] == 3, msg
|
||||
events[:, 0] -= raw.first_samp
|
||||
events = events[:, [0, 2]] # reorder for pybv required order
|
||||
else: # else, prepare pybv style events from raw.annotations
|
||||
events = _mne_annots2pybv_events(raw)
|
||||
|
||||
# no information about reference channels in mne currently
|
||||
ref_ch_names = None
|
||||
|
||||
# write to BrainVision
|
||||
write_brainvision(
|
||||
data=data,
|
||||
sfreq=sfreq,
|
||||
ch_names=ch_names,
|
||||
ref_ch_names=ref_ch_names,
|
||||
fname_base=fname_base,
|
||||
folder_out=folder_out,
|
||||
overwrite=overwrite,
|
||||
events=events,
|
||||
resolution=resolution,
|
||||
unit=unit,
|
||||
fmt=fmt,
|
||||
meas_date=meas_date,
|
||||
)
|
||||
|
||||
|
||||
def _mne_annots2pybv_events(raw):
|
||||
"""Convert mne Annotations to pybv events."""
|
||||
events = []
|
||||
for annot in raw.annotations:
|
||||
# handle onset and duration: seconds to sample, relative to
|
||||
# raw.first_samp / raw.first_time
|
||||
onset = annot["onset"] - raw.first_time
|
||||
onset = raw.time_as_index(onset).astype(int)[0]
|
||||
duration = int(annot["duration"] * raw.info["sfreq"])
|
||||
|
||||
# triage type and description
|
||||
# defaults to type="Comment" and the full description
|
||||
etype = "Comment"
|
||||
description = annot["description"]
|
||||
for start in ["Stimulus/S", "Response/R", "Comment/"]:
|
||||
if description.startswith(start):
|
||||
etype = start.split("/")[0]
|
||||
description = description.replace(start, "")
|
||||
break
|
||||
|
||||
if etype in ["Stimulus", "Response"] and description.strip().isdigit():
|
||||
description = int(description.strip())
|
||||
else:
|
||||
# if cannot convert to int, we must use this as "Comment"
|
||||
etype = "Comment"
|
||||
|
||||
event_dict = dict(
|
||||
onset=onset, # in samples
|
||||
duration=duration, # in samples
|
||||
description=description,
|
||||
type=etype,
|
||||
)
|
||||
|
||||
if "ch_names" in annot:
|
||||
# handle channels
|
||||
channels = list(annot["ch_names"])
|
||||
event_dict["channels"] = channels
|
||||
|
||||
# add a "pybv" event
|
||||
events += [event_dict]
|
||||
|
||||
return events
|
||||
|
||||
|
||||
def _export_raw(fname, raw, overwrite):
|
||||
"""Export Raw object to BrainVision via pybv."""
|
||||
fname = str(fname)
|
||||
ext = os.path.splitext(fname)[-1]
|
||||
if ext != ".vhdr":
|
||||
fname = fname.replace(ext, ".vhdr")
|
||||
_export_mne_raw(raw=raw, fname=fname, overwrite=overwrite)
|
||||
226
mne/export/_edf.py
Normal file
226
mne/export/_edf.py
Normal file
@@ -0,0 +1,226 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime as dt
|
||||
from collections.abc import Callable
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import _check_edfio_installed, warn
|
||||
|
||||
_check_edfio_installed()
|
||||
from edfio import Edf, EdfAnnotation, EdfSignal, Patient, Recording # noqa: E402
|
||||
|
||||
|
||||
# copied from edfio (Apache license)
|
||||
def _round_float_to_8_characters(
|
||||
value: float,
|
||||
round_func: Callable[[float], int],
|
||||
) -> float:
|
||||
if isinstance(value, int) or value.is_integer():
|
||||
return value
|
||||
length = 8
|
||||
integer_part_length = str(value).find(".")
|
||||
if integer_part_length == length:
|
||||
return round_func(value)
|
||||
factor = 10 ** (length - 1 - integer_part_length)
|
||||
return round_func(value * factor) / factor
|
||||
|
||||
|
||||
def _export_raw(fname, raw, physical_range, add_ch_type):
|
||||
"""Export Raw objects to EDF files.
|
||||
|
||||
TODO: if in future the Info object supports transducer or technician information,
|
||||
allow writing those here.
|
||||
"""
|
||||
# get voltage-based data in uV
|
||||
units = dict(
|
||||
eeg="uV", ecog="uV", seeg="uV", eog="uV", ecg="uV", emg="uV", bio="uV", dbs="uV"
|
||||
)
|
||||
|
||||
digital_min, digital_max = -32767, 32767
|
||||
annotations = []
|
||||
|
||||
# load data first
|
||||
raw.load_data()
|
||||
|
||||
ch_types = np.array(raw.get_channel_types())
|
||||
n_times = raw.n_times
|
||||
|
||||
# get the entire dataset in uV
|
||||
data = raw.get_data(units=units)
|
||||
|
||||
# Sampling frequency in EDF only supports integers, so to allow for float sampling
|
||||
# rates from Raw, we adjust the output sampling rate for all channels and the data
|
||||
# record duration.
|
||||
sfreq = raw.info["sfreq"]
|
||||
if float(sfreq).is_integer():
|
||||
out_sfreq = int(sfreq)
|
||||
data_record_duration = None
|
||||
# make non-integer second durations work
|
||||
if (pad_width := int(np.ceil(n_times / sfreq) * sfreq - n_times)) > 0:
|
||||
warn(
|
||||
"EDF format requires equal-length data blocks, so "
|
||||
f"{pad_width / sfreq:.3g} seconds of edge values were appended to all "
|
||||
"channels when writing the final block."
|
||||
)
|
||||
data = np.pad(data, (0, int(pad_width)), "edge")
|
||||
annotations.append(
|
||||
EdfAnnotation(
|
||||
raw.times[-1] + 1 / sfreq, pad_width / sfreq, "BAD_ACQ_SKIP"
|
||||
)
|
||||
)
|
||||
else:
|
||||
data_record_duration = _round_float_to_8_characters(
|
||||
np.floor(sfreq) / sfreq, round
|
||||
)
|
||||
out_sfreq = np.floor(sfreq) / data_record_duration
|
||||
warn(
|
||||
f"Data has a non-integer sampling rate of {sfreq}; writing to EDF format "
|
||||
"may cause a small change to sample times."
|
||||
)
|
||||
|
||||
# get any filter information applied to the data
|
||||
lowpass = raw.info["lowpass"]
|
||||
highpass = raw.info["highpass"]
|
||||
linefreq = raw.info["line_freq"]
|
||||
filter_str_info = f"HP:{highpass}Hz LP:{lowpass}Hz"
|
||||
if linefreq is not None:
|
||||
filter_str_info += " N:{linefreq}Hz"
|
||||
|
||||
if physical_range == "auto":
|
||||
# get max and min for each channel type data
|
||||
ch_types_phys_max = dict()
|
||||
ch_types_phys_min = dict()
|
||||
|
||||
for _type in np.unique(ch_types):
|
||||
_picks = [n for n, t in zip(raw.ch_names, ch_types) if t == _type]
|
||||
_data = raw.get_data(units=units, picks=_picks)
|
||||
ch_types_phys_max[_type] = _data.max()
|
||||
ch_types_phys_min[_type] = _data.min()
|
||||
elif physical_range == "channelwise":
|
||||
prange = None
|
||||
else:
|
||||
# get the physical min and max of the data in uV
|
||||
# Physical ranges of the data in uV are usually set by the manufacturer and
|
||||
# electrode properties. In general, physical min and max should be the clipping
|
||||
# levels of the ADC input, and they should be the same for all channels. For
|
||||
# example, Nihon Kohden uses ±3200 uV for all EEG channels (corresponding to the
|
||||
# actual clipping levels of their input amplifiers & ADC). For a discussion,
|
||||
# see https://github.com/sccn/eeglab/issues/246
|
||||
pmin, pmax = physical_range[0], physical_range[1]
|
||||
|
||||
# check that physical min and max is not exceeded
|
||||
if data.max() > pmax:
|
||||
warn(
|
||||
f"The maximum μV of the data {data.max()} is more than the physical max"
|
||||
f" passed in {pmax}."
|
||||
)
|
||||
if data.min() < pmin:
|
||||
warn(
|
||||
f"The minimum μV of the data {data.min()} is less than the physical min"
|
||||
f" passed in {pmin}."
|
||||
)
|
||||
data = np.clip(data, pmin, pmax)
|
||||
prange = pmin, pmax
|
||||
signals = []
|
||||
for idx, ch in enumerate(raw.ch_names):
|
||||
ch_type = ch_types[idx]
|
||||
signal_label = f"{ch_type.upper()} {ch}" if add_ch_type else ch
|
||||
if len(signal_label) > 16:
|
||||
raise RuntimeError(
|
||||
f"Signal label for {ch} ({ch_type}) is longer than 16 characters, which"
|
||||
" is not supported by the EDF standard. Please shorten the channel name"
|
||||
"before exporting to EDF."
|
||||
)
|
||||
|
||||
if physical_range == "auto": # per channel type
|
||||
pmin = ch_types_phys_min[ch_type]
|
||||
pmax = ch_types_phys_max[ch_type]
|
||||
if pmax == pmin:
|
||||
pmax = pmin + 1
|
||||
prange = pmin, pmax
|
||||
|
||||
signals.append(
|
||||
EdfSignal(
|
||||
data[idx],
|
||||
out_sfreq,
|
||||
label=signal_label,
|
||||
transducer_type="",
|
||||
physical_dimension="" if ch_type == "stim" else "uV",
|
||||
physical_range=prange,
|
||||
digital_range=(digital_min, digital_max),
|
||||
prefiltering=filter_str_info,
|
||||
)
|
||||
)
|
||||
|
||||
# set patient info
|
||||
subj_info = raw.info.get("subject_info")
|
||||
if subj_info is not None:
|
||||
# get the full name of subject if available
|
||||
first_name = subj_info.get("first_name", "")
|
||||
middle_name = subj_info.get("middle_name", "")
|
||||
last_name = subj_info.get("last_name", "")
|
||||
name = "_".join(filter(None, [first_name, middle_name, last_name]))
|
||||
|
||||
birthday = subj_info.get("birthday")
|
||||
hand = subj_info.get("hand")
|
||||
weight = subj_info.get("weight")
|
||||
height = subj_info.get("height")
|
||||
sex = subj_info.get("sex")
|
||||
|
||||
additional_patient_info = []
|
||||
for key, value in [("height", height), ("weight", weight), ("hand", hand)]:
|
||||
if value:
|
||||
additional_patient_info.append(f"{key}={value}")
|
||||
|
||||
patient = Patient(
|
||||
code=subj_info.get("his_id") or "X",
|
||||
sex={0: "X", 1: "M", 2: "F", None: "X"}[sex],
|
||||
birthdate=birthday,
|
||||
name=name or "X",
|
||||
additional=additional_patient_info,
|
||||
)
|
||||
else:
|
||||
patient = None
|
||||
|
||||
# set measurement date
|
||||
if (meas_date := raw.info["meas_date"]) is not None:
|
||||
startdate = dt.date(meas_date.year, meas_date.month, meas_date.day)
|
||||
starttime = dt.time(
|
||||
meas_date.hour, meas_date.minute, meas_date.second, meas_date.microsecond
|
||||
)
|
||||
else:
|
||||
startdate = None
|
||||
starttime = None
|
||||
|
||||
device_info = raw.info.get("device_info")
|
||||
if device_info is not None:
|
||||
device_type = device_info.get("type") or "X"
|
||||
recording = Recording(startdate=startdate, equipment_code=device_type)
|
||||
else:
|
||||
recording = Recording(startdate=startdate)
|
||||
|
||||
for desc, onset, duration, ch_names in zip(
|
||||
raw.annotations.description,
|
||||
raw.annotations.onset,
|
||||
raw.annotations.duration,
|
||||
raw.annotations.ch_names,
|
||||
):
|
||||
if ch_names:
|
||||
for ch_name in ch_names:
|
||||
annotations.append(
|
||||
EdfAnnotation(onset, duration, desc + f"@@{ch_name}")
|
||||
)
|
||||
else:
|
||||
annotations.append(EdfAnnotation(onset, duration, desc))
|
||||
|
||||
Edf(
|
||||
signals=signals,
|
||||
patient=patient,
|
||||
recording=recording,
|
||||
starttime=starttime,
|
||||
data_record_duration=data_record_duration,
|
||||
annotations=annotations,
|
||||
).write(fname)
|
||||
92
mne/export/_eeglab.py
Normal file
92
mne/export/_eeglab.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..utils import _check_eeglabio_installed
|
||||
|
||||
_check_eeglabio_installed()
|
||||
import eeglabio.epochs # noqa: E402
|
||||
import eeglabio.raw # noqa: E402
|
||||
|
||||
|
||||
def _export_raw(fname, raw):
|
||||
# load data first
|
||||
raw.load_data()
|
||||
|
||||
# remove extra epoc and STI channels
|
||||
drop_chs = ["epoc"]
|
||||
# filenames attribute of RawArray is filled with None
|
||||
if raw.filenames[0] and raw.filenames[0].suffix != ".fif":
|
||||
drop_chs.append("STI 014")
|
||||
|
||||
ch_names = [ch for ch in raw.ch_names if ch not in drop_chs]
|
||||
cart_coords = _get_als_coords_from_chs(raw.info["chs"], drop_chs)
|
||||
|
||||
annotations = [
|
||||
raw.annotations.description,
|
||||
raw.annotations.onset,
|
||||
raw.annotations.duration,
|
||||
]
|
||||
eeglabio.raw.export_set(
|
||||
fname,
|
||||
data=raw.get_data(picks=ch_names),
|
||||
sfreq=raw.info["sfreq"],
|
||||
ch_names=ch_names,
|
||||
ch_locs=cart_coords,
|
||||
annotations=annotations,
|
||||
)
|
||||
|
||||
|
||||
def _export_epochs(fname, epochs):
|
||||
_check_eeglabio_installed()
|
||||
# load data first
|
||||
epochs.load_data()
|
||||
|
||||
# remove extra epoc and STI channels
|
||||
drop_chs = ["epoc", "STI 014"]
|
||||
ch_names = [ch for ch in epochs.ch_names if ch not in drop_chs]
|
||||
cart_coords = _get_als_coords_from_chs(epochs.info["chs"], drop_chs)
|
||||
|
||||
if epochs.annotations:
|
||||
annot = [
|
||||
epochs.annotations.description,
|
||||
epochs.annotations.onset,
|
||||
epochs.annotations.duration,
|
||||
]
|
||||
else:
|
||||
annot = None
|
||||
|
||||
eeglabio.epochs.export_set(
|
||||
fname,
|
||||
data=epochs.get_data(picks=ch_names),
|
||||
sfreq=epochs.info["sfreq"],
|
||||
events=epochs.events,
|
||||
tmin=epochs.tmin,
|
||||
tmax=epochs.tmax,
|
||||
ch_names=ch_names,
|
||||
event_id=epochs.event_id,
|
||||
ch_locs=cart_coords,
|
||||
annotations=annot,
|
||||
)
|
||||
|
||||
|
||||
def _get_als_coords_from_chs(chs, drop_chs=None):
|
||||
"""Extract channel locations in ALS format (x, y, z) from a chs instance.
|
||||
|
||||
Returns
|
||||
-------
|
||||
None if no valid coordinates are found (all zeros)
|
||||
"""
|
||||
if drop_chs is None:
|
||||
drop_chs = []
|
||||
cart_coords = np.array([d["loc"][:3] for d in chs if d["ch_name"] not in drop_chs])
|
||||
if cart_coords.any(): # has coordinates
|
||||
# (-y x z) to (x y z)
|
||||
cart_coords[:, 0] = -cart_coords[:, 0] # -y to y
|
||||
# swap x (1) and y (0)
|
||||
cart_coords[:, [0, 1]] = cart_coords[:, [1, 0]]
|
||||
else:
|
||||
cart_coords = None
|
||||
return cart_coords
|
||||
176
mne/export/_egimff.py
Normal file
176
mne/export/_egimff.py
Normal file
@@ -0,0 +1,176 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import os.path as op
|
||||
import shutil
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .._fiff.pick import pick_channels, pick_types
|
||||
from ..io.egi.egimff import _import_mffpy
|
||||
from ..utils import _check_fname, verbose, warn
|
||||
|
||||
|
||||
@verbose
|
||||
def export_evokeds_mff(fname, evoked, history=None, *, overwrite=False, verbose=None):
|
||||
"""Export evoked dataset to MFF.
|
||||
|
||||
%(export_warning)s
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(fname_export_params)s
|
||||
evoked : list of Evoked instances
|
||||
List of evoked datasets to export to one file. Note that the
|
||||
measurement info from the first evoked instance is used, so be sure
|
||||
that information matches.
|
||||
history : None (default) | list of dict
|
||||
Optional list of history entries (dictionaries) to be written to
|
||||
history.xml. This must adhere to the format described in
|
||||
mffpy.xml_files.History.content. If None, no history.xml will be
|
||||
written.
|
||||
%(overwrite)s
|
||||
|
||||
.. versionadded:: 0.24.1
|
||||
%(verbose)s
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.24
|
||||
|
||||
%(export_warning_note_evoked)s
|
||||
|
||||
Only EEG channels are written to the output file.
|
||||
``info['device_info']['type']`` must be a valid MFF recording device
|
||||
(e.g. 'HydroCel GSN 256 1.0'). This field is automatically populated when
|
||||
using MFF read functions.
|
||||
"""
|
||||
mffpy = _import_mffpy("Export evokeds to MFF.")
|
||||
|
||||
info = evoked[0].info
|
||||
if np.round(info["sfreq"]) != info["sfreq"]:
|
||||
raise ValueError(
|
||||
f'Sampling frequency must be a whole number. sfreq: {info["sfreq"]}'
|
||||
)
|
||||
sampling_rate = int(info["sfreq"])
|
||||
|
||||
# check for unapplied projectors
|
||||
if any(not proj["active"] for proj in evoked[0].info["projs"]):
|
||||
warn(
|
||||
"Evoked instance has unapplied projectors. Consider applying "
|
||||
"them before exporting with evoked.apply_proj()."
|
||||
)
|
||||
|
||||
# Initialize writer
|
||||
# Future changes: conditions based on version or mffpy requirement if
|
||||
# https://github.com/BEL-Public/mffpy/pull/92 is merged and released.
|
||||
fname = str(_check_fname(fname, overwrite=overwrite))
|
||||
if op.exists(fname):
|
||||
os.remove(fname) if op.isfile(fname) else shutil.rmtree(fname)
|
||||
writer = mffpy.Writer(fname)
|
||||
current_time = datetime.datetime.now(datetime.timezone.utc)
|
||||
writer.addxml("fileInfo", recordTime=current_time)
|
||||
try:
|
||||
device = info["device_info"]["type"]
|
||||
except (TypeError, KeyError):
|
||||
raise ValueError("No device type. Cannot determine sensor layout.")
|
||||
writer.add_coordinates_and_sensor_layout(device)
|
||||
|
||||
# Add EEG data
|
||||
eeg_channels = pick_types(info, eeg=True, exclude=[])
|
||||
eeg_bin = mffpy.bin_writer.BinWriter(sampling_rate)
|
||||
for ave in evoked:
|
||||
# Signals are converted to µV
|
||||
block = (ave.data[eeg_channels] * 1e6).astype(np.float32)
|
||||
eeg_bin.add_block(block, offset_us=0)
|
||||
writer.addbin(eeg_bin)
|
||||
|
||||
# Add categories
|
||||
categories_content = _categories_content_from_evokeds(evoked)
|
||||
writer.addxml("categories", categories=categories_content)
|
||||
|
||||
# Add history
|
||||
if history:
|
||||
writer.addxml("historyEntries", entries=history)
|
||||
|
||||
writer.write()
|
||||
|
||||
|
||||
def _categories_content_from_evokeds(evoked):
|
||||
"""Return categories.xml content for evoked dataset."""
|
||||
content = dict()
|
||||
begin_time = 0
|
||||
for ave in evoked:
|
||||
# Times are converted to microseconds
|
||||
sfreq = ave.info["sfreq"]
|
||||
duration = np.round(len(ave.times) / sfreq * 1e6).astype(int)
|
||||
end_time = begin_time + duration
|
||||
event_time = begin_time - np.round(ave.tmin * 1e6).astype(int)
|
||||
eeg_bads = _get_bad_eeg_channels(ave.info)
|
||||
content[ave.comment] = [
|
||||
_build_segment_content(
|
||||
begin_time,
|
||||
end_time,
|
||||
event_time,
|
||||
eeg_bads,
|
||||
name="Average",
|
||||
nsegs=ave.nave,
|
||||
)
|
||||
]
|
||||
begin_time += duration
|
||||
return content
|
||||
|
||||
|
||||
def _get_bad_eeg_channels(info):
|
||||
"""Return a list of bad EEG channels formatted for categories.xml.
|
||||
|
||||
Given a list of only the EEG channels in file, return the indices of this
|
||||
list (starting at 1) that correspond to bad channels.
|
||||
"""
|
||||
if len(info["bads"]) == 0:
|
||||
return []
|
||||
eeg_channels = pick_types(info, eeg=True, exclude=[])
|
||||
bad_channels = pick_channels(info["ch_names"], info["bads"])
|
||||
bads_elementwise = np.isin(eeg_channels, bad_channels)
|
||||
return list(np.flatnonzero(bads_elementwise) + 1)
|
||||
|
||||
|
||||
def _build_segment_content(
|
||||
begin_time,
|
||||
end_time,
|
||||
event_time,
|
||||
eeg_bads,
|
||||
status="unedited",
|
||||
name=None,
|
||||
pns_bads=None,
|
||||
nsegs=None,
|
||||
):
|
||||
"""Build content for a single segment in categories.xml.
|
||||
|
||||
Segments are sorted into categories in categories.xml. In a segmented MFF
|
||||
each category can contain multiple segments, but in an averaged MFF each
|
||||
category only contains one segment (the average).
|
||||
"""
|
||||
channel_status = [
|
||||
{"signalBin": 1, "exclusion": "badChannels", "channels": eeg_bads}
|
||||
]
|
||||
if pns_bads:
|
||||
channel_status.append(
|
||||
{"signalBin": 2, "exclusion": "badChannels", "channels": pns_bads}
|
||||
)
|
||||
content = {
|
||||
"status": status,
|
||||
"beginTime": begin_time,
|
||||
"endTime": end_time,
|
||||
"evtBegin": event_time,
|
||||
"evtEnd": event_time,
|
||||
"channelStatus": channel_status,
|
||||
}
|
||||
if name:
|
||||
content["name"] = name
|
||||
if nsegs:
|
||||
content["keys"] = {"#seg": {"type": "long", "data": nsegs}}
|
||||
return content
|
||||
222
mne/export/_export.py
Normal file
222
mne/export/_export.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
import os.path as op
|
||||
|
||||
from ..utils import _check_fname, _validate_type, logger, verbose, warn
|
||||
from ._egimff import export_evokeds_mff
|
||||
|
||||
|
||||
@verbose
|
||||
def export_raw(
|
||||
fname,
|
||||
raw,
|
||||
fmt="auto",
|
||||
physical_range="auto",
|
||||
add_ch_type=False,
|
||||
*,
|
||||
overwrite=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Export Raw to external formats.
|
||||
|
||||
%(export_fmt_support_raw)s
|
||||
|
||||
%(export_warning)s
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(fname_export_params)s
|
||||
raw : instance of Raw
|
||||
The raw instance to export.
|
||||
%(export_fmt_params_raw)s
|
||||
%(physical_range_export_params)s
|
||||
%(add_ch_type_export_params)s
|
||||
%(overwrite)s
|
||||
|
||||
.. versionadded:: 0.24.1
|
||||
%(verbose)s
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.24
|
||||
|
||||
%(export_warning_note_raw)s
|
||||
%(export_eeglab_note)s
|
||||
%(export_edf_note)s
|
||||
"""
|
||||
fname = str(_check_fname(fname, overwrite=overwrite))
|
||||
supported_export_formats = { # format : (extensions,)
|
||||
"eeglab": ("set",),
|
||||
"edf": ("edf",),
|
||||
"brainvision": (
|
||||
"eeg",
|
||||
"vmrk",
|
||||
"vhdr",
|
||||
),
|
||||
}
|
||||
fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats)
|
||||
|
||||
# check for unapplied projectors
|
||||
if any(not proj["active"] for proj in raw.info["projs"]):
|
||||
warn(
|
||||
"Raw instance has unapplied projectors. Consider applying "
|
||||
"them before exporting with raw.apply_proj()."
|
||||
)
|
||||
|
||||
if fmt == "eeglab":
|
||||
from ._eeglab import _export_raw
|
||||
|
||||
_export_raw(fname, raw)
|
||||
elif fmt == "edf":
|
||||
from ._edf import _export_raw
|
||||
|
||||
_export_raw(fname, raw, physical_range, add_ch_type)
|
||||
elif fmt == "brainvision":
|
||||
from ._brainvision import _export_raw
|
||||
|
||||
_export_raw(fname, raw, overwrite)
|
||||
|
||||
|
||||
@verbose
|
||||
def export_epochs(fname, epochs, fmt="auto", *, overwrite=False, verbose=None):
|
||||
"""Export Epochs to external formats.
|
||||
|
||||
%(export_fmt_support_epochs)s
|
||||
|
||||
%(export_warning)s
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(fname_export_params)s
|
||||
epochs : instance of Epochs
|
||||
The epochs to export.
|
||||
%(export_fmt_params_epochs)s
|
||||
%(overwrite)s
|
||||
|
||||
.. versionadded:: 0.24.1
|
||||
%(verbose)s
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.24
|
||||
|
||||
%(export_warning_note_epochs)s
|
||||
%(export_eeglab_note)s
|
||||
"""
|
||||
fname = str(_check_fname(fname, overwrite=overwrite))
|
||||
supported_export_formats = {
|
||||
"eeglab": ("set",),
|
||||
}
|
||||
fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats)
|
||||
|
||||
# check for unapplied projectors
|
||||
if any(not proj["active"] for proj in epochs.info["projs"]):
|
||||
warn(
|
||||
"Epochs instance has unapplied projectors. Consider applying "
|
||||
"them before exporting with epochs.apply_proj()."
|
||||
)
|
||||
|
||||
if fmt == "eeglab":
|
||||
from ._eeglab import _export_epochs
|
||||
|
||||
_export_epochs(fname, epochs)
|
||||
|
||||
|
||||
@verbose
|
||||
def export_evokeds(fname, evoked, fmt="auto", *, overwrite=False, verbose=None):
|
||||
"""Export evoked dataset to external formats.
|
||||
|
||||
This function is a wrapper for format-specific export functions. The export
|
||||
function is selected based on the inferred file format. For additional
|
||||
options, use the format-specific functions.
|
||||
|
||||
%(export_fmt_support_evoked)s
|
||||
|
||||
%(export_warning)s
|
||||
|
||||
Parameters
|
||||
----------
|
||||
%(fname_export_params)s
|
||||
evoked : Evoked instance, or list of Evoked instances
|
||||
The evoked dataset, or list of evoked datasets, to export to one file.
|
||||
Note that the measurement info from the first evoked instance is used,
|
||||
so be sure that information matches.
|
||||
%(export_fmt_params_evoked)s
|
||||
%(overwrite)s
|
||||
|
||||
.. versionadded:: 0.24.1
|
||||
%(verbose)s
|
||||
|
||||
See Also
|
||||
--------
|
||||
mne.write_evokeds
|
||||
mne.export.export_evokeds_mff
|
||||
|
||||
Notes
|
||||
-----
|
||||
.. versionadded:: 0.24
|
||||
|
||||
%(export_warning_note_evoked)s
|
||||
"""
|
||||
fname = str(_check_fname(fname, overwrite=overwrite))
|
||||
supported_export_formats = {
|
||||
"mff": ("mff",),
|
||||
}
|
||||
fmt = _infer_check_export_fmt(fmt, fname, supported_export_formats)
|
||||
|
||||
if not isinstance(evoked, list):
|
||||
evoked = [evoked]
|
||||
|
||||
logger.info(f"Exporting evoked dataset to {fname}...")
|
||||
|
||||
if fmt == "mff":
|
||||
export_evokeds_mff(fname, evoked, overwrite=overwrite)
|
||||
|
||||
|
||||
def _infer_check_export_fmt(fmt, fname, supported_formats):
|
||||
"""Infer export format from filename extension if auto.
|
||||
|
||||
Raises error if fmt is auto and no file extension found,
|
||||
then checks format against supported formats, raises error if format is not
|
||||
supported.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
fmt : str
|
||||
Format of the export, will only infer the format from filename if fmt
|
||||
is auto.
|
||||
fname : str
|
||||
Name of the target export file, only used when fmt is auto.
|
||||
supported_formats : dict of str : tuple/list
|
||||
Dictionary containing supported formats (as keys) and each format's
|
||||
corresponding file extensions in a tuple (e.g., {'eeglab': ('set',)})
|
||||
"""
|
||||
_validate_type(fmt, str, "fmt")
|
||||
fmt = fmt.lower()
|
||||
if fmt == "auto":
|
||||
fmt = op.splitext(fname)[1]
|
||||
if fmt:
|
||||
fmt = fmt[1:].lower()
|
||||
# find fmt in supported formats dict's tuples
|
||||
fmt = next(
|
||||
(k for k, v in supported_formats.items() if fmt in v), fmt
|
||||
) # default to original fmt for raising error later
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Couldn't infer format from filename {fname} (no extension found)"
|
||||
)
|
||||
|
||||
if fmt not in supported_formats:
|
||||
supported = []
|
||||
for supp_format, extensions in supported_formats.items():
|
||||
ext_str = ", ".join(f"*.{ext}" for ext in extensions)
|
||||
supported.append(f"{supp_format} ({ext_str})")
|
||||
|
||||
supported_str = ", ".join(supported)
|
||||
raise ValueError(
|
||||
f"Format '{fmt}' is not supported. "
|
||||
f"Supported formats are {supported_str}."
|
||||
)
|
||||
return fmt
|
||||
Reference in New Issue
Block a user