initial commit
This commit is contained in:
8
mne/minimum_norm/__init__.py
Normal file
8
mne/minimum_norm/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""Linear inverse solvers based on L2 Minimum Norm Estimates (MNE)."""
|
||||
import lazy_loader as lazy
|
||||
|
||||
(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__)
|
||||
50
mne/minimum_norm/__init__.pyi
Normal file
50
mne/minimum_norm/__init__.pyi
Normal file
@@ -0,0 +1,50 @@
|
||||
__all__ = [
|
||||
"INVERSE_METHODS",
|
||||
"InverseOperator",
|
||||
"apply_inverse",
|
||||
"apply_inverse_cov",
|
||||
"apply_inverse_epochs",
|
||||
"apply_inverse_raw",
|
||||
"apply_inverse_tfr_epochs",
|
||||
"compute_rank_inverse",
|
||||
"compute_source_psd",
|
||||
"compute_source_psd_epochs",
|
||||
"estimate_snr",
|
||||
"get_cross_talk",
|
||||
"get_point_spread",
|
||||
"make_inverse_operator",
|
||||
"make_inverse_resolution_matrix",
|
||||
"prepare_inverse_operator",
|
||||
"read_inverse_operator",
|
||||
"resolution_metrics",
|
||||
"source_band_induced_power",
|
||||
"source_induced_power",
|
||||
"write_inverse_operator",
|
||||
]
|
||||
from .inverse import (
|
||||
INVERSE_METHODS,
|
||||
InverseOperator,
|
||||
apply_inverse,
|
||||
apply_inverse_cov,
|
||||
apply_inverse_epochs,
|
||||
apply_inverse_raw,
|
||||
apply_inverse_tfr_epochs,
|
||||
compute_rank_inverse,
|
||||
estimate_snr,
|
||||
make_inverse_operator,
|
||||
prepare_inverse_operator,
|
||||
read_inverse_operator,
|
||||
write_inverse_operator,
|
||||
)
|
||||
from .resolution_matrix import (
|
||||
get_cross_talk,
|
||||
get_point_spread,
|
||||
make_inverse_resolution_matrix,
|
||||
)
|
||||
from .spatial_resolution import resolution_metrics
|
||||
from .time_frequency import (
|
||||
compute_source_psd,
|
||||
compute_source_psd_epochs,
|
||||
source_band_induced_power,
|
||||
source_induced_power,
|
||||
)
|
||||
199
mne/minimum_norm/_eloreta.py
Normal file
199
mne/minimum_norm/_eloreta.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..defaults import _handle_default
|
||||
from ..fixes import _safe_svd
|
||||
from ..utils import eigh, logger, sqrtm_sym, warn
|
||||
|
||||
# For the reference implementation of eLORETA (force_equal=False),
|
||||
# 0 < loose <= 1 all produce solutions that are (more or less)
|
||||
# the same as free orientation (loose=1) and quite different from
|
||||
# loose=0 (fixed). If we do force_equal=True, we get a visibly smooth
|
||||
# transition from 0->1. This is probably because this mode behaves more like
|
||||
# sLORETA and dSPM in that it weights each orientation for a given source
|
||||
# uniformly (which is not the case for the reference eLORETA implementation).
|
||||
#
|
||||
# If we *reapply the orientation prior* after each eLORETA iteration,
|
||||
# we can preserve the smooth transition without requiring force_equal=True,
|
||||
# which is probably more representative of what eLORETA should do. But this
|
||||
# does not produce results that pass the eye test.
|
||||
|
||||
|
||||
def _compute_eloreta(inv, lambda2, options):
|
||||
"""Compute the eLORETA solution."""
|
||||
from .inverse import _compute_reginv, compute_rank_inverse
|
||||
|
||||
options = _handle_default("eloreta_options", options)
|
||||
eps, max_iter = options["eps"], options["max_iter"]
|
||||
force_equal = bool(options["force_equal"]) # None means False
|
||||
|
||||
# Reassemble the gain matrix (should be fast enough)
|
||||
if inv["eigen_leads_weighted"]:
|
||||
# We can probably relax this if we ever need to
|
||||
raise RuntimeError("eLORETA cannot be computed with weighted eigen leads")
|
||||
G = np.dot(
|
||||
inv["eigen_fields"]["data"].T * inv["sing"], inv["eigen_leads"]["data"].T
|
||||
)
|
||||
del inv["eigen_leads"]["data"]
|
||||
del inv["eigen_fields"]["data"]
|
||||
del inv["sing"]
|
||||
G = G.astype(np.float64)
|
||||
n_nzero = compute_rank_inverse(inv)
|
||||
G /= np.sqrt(inv["source_cov"]["data"])
|
||||
# restore orientation prior
|
||||
source_std = np.ones(G.shape[1])
|
||||
if inv["orient_prior"] is not None:
|
||||
source_std *= np.sqrt(inv["orient_prior"]["data"])
|
||||
G *= source_std
|
||||
# We do not multiply by the depth prior, as eLORETA should compensate for
|
||||
# depth bias.
|
||||
n_src = inv["nsource"]
|
||||
n_chan, n_orient = G.shape
|
||||
n_orient //= n_src
|
||||
assert n_orient in (1, 3)
|
||||
logger.info(" Computing optimized source covariance (eLORETA)...")
|
||||
if n_orient == 3:
|
||||
logger.info(
|
||||
f" Using {'uniform' if force_equal else 'independent'} "
|
||||
"orientation weights"
|
||||
)
|
||||
# src, sens, 3
|
||||
G_3 = _get_G_3(G, n_orient)
|
||||
if n_orient != 1 and not force_equal:
|
||||
# Outer product
|
||||
R_prior = source_std.reshape(n_src, 1, 3) * source_std.reshape(n_src, 3, 1)
|
||||
else:
|
||||
R_prior = source_std**2
|
||||
|
||||
# The following was adapted under BSD license by permission of Guido Nolte
|
||||
if force_equal or n_orient == 1:
|
||||
R_shape = (n_src * n_orient,)
|
||||
R = np.ones(R_shape)
|
||||
else:
|
||||
R_shape = (n_src, n_orient, n_orient)
|
||||
R = np.empty(R_shape)
|
||||
R[:] = np.eye(n_orient)[np.newaxis]
|
||||
R *= R_prior
|
||||
_this_normalize_R = partial(
|
||||
_normalize_R,
|
||||
n_nzero=n_nzero,
|
||||
force_equal=force_equal,
|
||||
n_src=n_src,
|
||||
n_orient=n_orient,
|
||||
)
|
||||
G_R_Gt = _this_normalize_R(G, R, G_3)
|
||||
extra = " (this make take a while)" if n_orient == 3 else ""
|
||||
logger.info(f" Fitting up to {max_iter} iterations{extra}...")
|
||||
for kk in range(max_iter):
|
||||
# 1. Compute inverse of the weights (stabilized) and C
|
||||
s, u = eigh(G_R_Gt)
|
||||
s = abs(s)
|
||||
sidx = np.argsort(s)[::-1][:n_nzero]
|
||||
s, u = s[sidx], u[:, sidx]
|
||||
with np.errstate(invalid="ignore"):
|
||||
s = np.where(s > 0, 1 / (s + lambda2), 0)
|
||||
N = np.dot(u * s, u.T)
|
||||
del s
|
||||
|
||||
# Update the weights
|
||||
R_last = R.copy()
|
||||
if n_orient == 1:
|
||||
R[:] = 1.0 / np.sqrt((np.dot(N, G) * G).sum(0))
|
||||
else:
|
||||
M = np.matmul(np.matmul(G_3, N[np.newaxis]), G_3.swapaxes(-2, -1))
|
||||
if force_equal:
|
||||
_, s = sqrtm_sym(M, inv=True)
|
||||
R[:] = np.repeat(1.0 / np.mean(s, axis=-1), 3)
|
||||
else:
|
||||
R[:], _ = sqrtm_sym(M, inv=True)
|
||||
R *= R_prior # reapply our prior, eLORETA undoes it
|
||||
G_R_Gt = _this_normalize_R(G, R, G_3)
|
||||
|
||||
# Check for weight convergence
|
||||
delta = np.linalg.norm(R.ravel() - R_last.ravel()) / np.linalg.norm(
|
||||
R_last.ravel()
|
||||
)
|
||||
logger.debug(
|
||||
f" Iteration {kk + 1} / {max_iter} ...{extra} ({delta:0.1e})"
|
||||
)
|
||||
if delta < eps:
|
||||
logger.info(
|
||||
f" Converged on iteration {kk} ({delta:.2g} < {eps:.2g})"
|
||||
)
|
||||
break
|
||||
else:
|
||||
warn(f"eLORETA weight fitting did not converge (>= {eps})")
|
||||
del G_R_Gt
|
||||
logger.info(" Updating inverse with weighted eigen leads")
|
||||
G /= source_std # undo our biasing
|
||||
G_3 = _get_G_3(G, n_orient)
|
||||
_this_normalize_R(G, R, G_3)
|
||||
del G_3
|
||||
if n_orient == 1 or force_equal:
|
||||
R_sqrt = np.sqrt(R)
|
||||
else:
|
||||
R_sqrt = sqrtm_sym(R)[0]
|
||||
assert R_sqrt.shape == R_shape
|
||||
A = _R_sqrt_mult(G, R_sqrt)
|
||||
del R, G # the rest will be done in terms of R_sqrt and A
|
||||
eigen_fields, sing, eigen_leads = _safe_svd(A, full_matrices=False)
|
||||
del A
|
||||
inv["sing"] = sing
|
||||
inv["reginv"] = _compute_reginv(inv, lambda2)
|
||||
inv["eigen_leads_weighted"] = True
|
||||
inv["eigen_leads"]["data"] = _R_sqrt_mult(eigen_leads, R_sqrt).T
|
||||
inv["eigen_fields"]["data"] = eigen_fields.T
|
||||
# XXX in theory we should set inv['source_cov'] properly.
|
||||
# For fixed ori (or free ori with force_equal=True), we can as these
|
||||
# are diagonal matrices. But for free ori without force_equal, it's a
|
||||
# block diagonal 3x3 and we have no efficient way of storing this (and
|
||||
# storing a covariance matrix with (20484 * 3) ** 2 elements is not going
|
||||
# to work. So let's just set to nan for now.
|
||||
# It's not used downstream anyway now that we set
|
||||
# eigen_leads_weighted = True.
|
||||
inv["source_cov"]["data"].fill(np.nan)
|
||||
logger.info("[done]")
|
||||
|
||||
|
||||
def _normalize_R(G, R, G_3, n_nzero, force_equal, n_src, n_orient):
|
||||
"""Normalize R so that lambda2 is consistent."""
|
||||
if n_orient == 1 or force_equal:
|
||||
R_Gt = R[:, np.newaxis] * G.T
|
||||
else:
|
||||
R_Gt = np.matmul(R, G_3).reshape(n_src * 3, -1)
|
||||
G_R_Gt = G @ R_Gt
|
||||
norm = np.trace(G_R_Gt) / n_nzero
|
||||
G_R_Gt /= norm
|
||||
R /= norm
|
||||
return G_R_Gt
|
||||
|
||||
|
||||
def _get_G_3(G, n_orient):
|
||||
if n_orient == 1:
|
||||
return None
|
||||
else:
|
||||
return G.reshape(G.shape[0], -1, n_orient).transpose(1, 2, 0)
|
||||
|
||||
|
||||
def _R_sqrt_mult(other, R_sqrt):
|
||||
"""Do other @ R ** 0.5."""
|
||||
if R_sqrt.ndim == 1:
|
||||
assert other.shape[1] == R_sqrt.size
|
||||
out = R_sqrt * other
|
||||
else:
|
||||
assert R_sqrt.shape[1:3] == (3, 3)
|
||||
assert other.shape[1] == np.prod(R_sqrt.shape[:2])
|
||||
assert other.ndim == 2
|
||||
n_src = R_sqrt.shape[0]
|
||||
n_chan = other.shape[0]
|
||||
out = (
|
||||
np.matmul(R_sqrt, other.reshape(n_chan, n_src, 3).transpose(1, 2, 0))
|
||||
.reshape(n_src * 3, n_chan)
|
||||
.T
|
||||
)
|
||||
return out
|
||||
2250
mne/minimum_norm/inverse.py
Normal file
2250
mne/minimum_norm/inverse.py
Normal file
File diff suppressed because it is too large
Load Diff
526
mne/minimum_norm/resolution_matrix.py
Normal file
526
mne/minimum_norm/resolution_matrix.py
Normal file
@@ -0,0 +1,526 @@
|
||||
"""Compute resolution matrix for linear estimators."""
|
||||
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
|
||||
from mne.minimum_norm.inverse import InverseOperator
|
||||
|
||||
from .._fiff.constants import FIFF
|
||||
from .._fiff.pick import pick_channels_forward
|
||||
from ..evoked import EvokedArray
|
||||
from ..forward.forward import Forward, convert_forward_solution
|
||||
from ..label import Label
|
||||
from ..source_estimate import _get_src_type, _make_stc, _prepare_label_extraction
|
||||
from ..source_space._source_space import SourceSpaces, _get_vertno
|
||||
from ..utils import _validate_type, logger, verbose
|
||||
from .inverse import apply_inverse
|
||||
|
||||
|
||||
@verbose
|
||||
def make_inverse_resolution_matrix(
|
||||
forward, inverse_operator, method="dSPM", lambda2=1.0 / 9.0, verbose=None
|
||||
):
|
||||
"""Compute resolution matrix for linear inverse operator.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
forward : instance of Forward
|
||||
Forward Operator.
|
||||
inverse_operator : instance of InverseOperator
|
||||
Inverse operator.
|
||||
method : 'MNE' | 'dSPM' | 'sLORETA'
|
||||
Inverse method to use (MNE, dSPM, sLORETA).
|
||||
lambda2 : float
|
||||
The regularisation parameter.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
resmat: array, shape (n_orient_inv * n_dipoles, n_orient_fwd * n_dipoles)
|
||||
Resolution matrix (inverse operator times forward operator).
|
||||
The result of applying the inverse operator to the forward operator.
|
||||
If source orientations are not fixed, all source components will be
|
||||
computed (i.e. for n_orient_inv > 1 or n_orient_fwd > 1).
|
||||
The columns of the resolution matrix are the point-spread functions
|
||||
(PSFs) and the rows are the cross-talk functions (CTFs).
|
||||
"""
|
||||
# make sure forward and inverse operator match
|
||||
inv = inverse_operator
|
||||
fwd = _convert_forward_match_inv(forward, inv)
|
||||
|
||||
# don't include bad channels
|
||||
# only use good channels from inverse operator
|
||||
bads_inv = inv["info"]["bads"]
|
||||
# good channels
|
||||
ch_names = [c for c in inv["info"]["ch_names"] if (c not in bads_inv)]
|
||||
fwd = pick_channels_forward(fwd, ch_names, ordered=True)
|
||||
|
||||
# get leadfield matrix from forward solution
|
||||
leadfield = fwd["sol"]["data"]
|
||||
invmat = _get_matrix_from_inverse_operator(inv, fwd, method=method, lambda2=lambda2)
|
||||
resmat = invmat.dot(leadfield)
|
||||
logger.info(
|
||||
f"Dimensions of resolution matrix: {resmat.shape[0]} by {resmat.shape[1]}."
|
||||
)
|
||||
return resmat
|
||||
|
||||
|
||||
@verbose
|
||||
def _get_psf_ctf(
|
||||
resmat,
|
||||
src,
|
||||
idx,
|
||||
*,
|
||||
func,
|
||||
mode,
|
||||
n_comp,
|
||||
norm,
|
||||
return_pca_vars,
|
||||
vector=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Get point-spread (PSFs) or cross-talk (CTFs) functions."""
|
||||
# check for consistencies in input parameters
|
||||
_check_get_psf_ctf_params(mode, n_comp, return_pca_vars)
|
||||
|
||||
# backward compatibility
|
||||
if norm is True:
|
||||
norm = "max"
|
||||
|
||||
# get relevant vertices in source space
|
||||
src_orig = src
|
||||
_validate_type(src_orig, (InverseOperator, Forward, SourceSpaces), "src")
|
||||
if not isinstance(src, SourceSpaces):
|
||||
src = src["src"]
|
||||
verts_all = _vertices_for_get_psf_ctf(idx, src)
|
||||
vertno = _get_vertno(src)
|
||||
n_verts = sum(len(v) for v in vertno)
|
||||
src_type = _get_src_type(src, vertno)
|
||||
subject = src._subject
|
||||
if vector and src_type == "surface":
|
||||
_validate_type(
|
||||
src_orig,
|
||||
(Forward, InverseOperator),
|
||||
"src",
|
||||
extra="when creating a vector surface source estimate",
|
||||
)
|
||||
nn = src_orig["source_nn"]
|
||||
else:
|
||||
nn = np.repeat(np.eye(3, 3)[np.newaxis], n_verts, 0)
|
||||
|
||||
n_r, n_c = resmat.shape
|
||||
if ((n_verts != n_r) and (n_r / 3 != n_verts)) or (
|
||||
(n_verts != n_c) and (n_c / 3 != n_verts)
|
||||
):
|
||||
msg = (
|
||||
f"Number of vertices ({n_verts}) and corresponding dimension of"
|
||||
f"resolution matrix ({n_r}, {n_c}) do not match"
|
||||
)
|
||||
raise ValueError(msg)
|
||||
|
||||
# the following will operate on columns of funcs
|
||||
if func == "ctf":
|
||||
resmat = resmat.T
|
||||
n_r, n_c = n_c, n_r
|
||||
|
||||
# Functions and variances per label
|
||||
stcs = []
|
||||
pca_vars = []
|
||||
|
||||
# if 3 orientations per vertex, redefine indices to columns of resolution
|
||||
# matrix
|
||||
if n_verts != n_c:
|
||||
# change indices to three indices per vertex
|
||||
for [i, verts] in enumerate(verts_all):
|
||||
verts_vec = np.empty(3 * len(verts), dtype=int)
|
||||
for [j, v] in enumerate(verts):
|
||||
verts_vec[3 * j : 3 * j + 3] = 3 * verts[j] + np.array([0, 1, 2])
|
||||
verts_all[i] = verts_vec # use these as indices
|
||||
|
||||
for verts in verts_all:
|
||||
# get relevant PSFs or CTFs for specified vertices
|
||||
if isinstance(verts, int):
|
||||
verts = [verts] # to keep array dimensions
|
||||
funcs = resmat[:, verts]
|
||||
|
||||
# normalise PSFs/CTFs if requested
|
||||
if norm is not None:
|
||||
funcs = _normalise_psf_ctf(funcs, norm)
|
||||
|
||||
# summarise PSFs/CTFs across vertices if requested
|
||||
pca_var = None # variances computed only if return_pca_vars=True
|
||||
if mode is not None:
|
||||
funcs, pca_var = _summarise_psf_ctf(
|
||||
funcs, mode, n_comp, return_pca_vars, nn
|
||||
)
|
||||
|
||||
if not vector: # if one value per vertex requested
|
||||
if n_verts != n_r: # if 3 orientations per vertex, combine
|
||||
funcs_int = np.empty([int(n_r / 3), funcs.shape[1]])
|
||||
for i in np.arange(0, n_verts):
|
||||
funcs_vert = funcs[3 * i : 3 * i + 3, :]
|
||||
funcs_int[i, :] = np.sqrt((funcs_vert**2).sum(axis=0))
|
||||
funcs = funcs_int
|
||||
|
||||
stc = _make_stc(
|
||||
funcs,
|
||||
vertno,
|
||||
src_type,
|
||||
tmin=0.0,
|
||||
tstep=1.0,
|
||||
subject=subject,
|
||||
vector=vector,
|
||||
source_nn=nn,
|
||||
)
|
||||
stcs.append(stc)
|
||||
pca_vars.append(pca_var)
|
||||
|
||||
# if just one list or label specified, simplify output
|
||||
if len(stcs) == 1:
|
||||
stcs = stc
|
||||
if len(pca_vars) == 1:
|
||||
pca_vars = pca_var
|
||||
if pca_var is not None:
|
||||
return stcs, pca_vars
|
||||
else:
|
||||
return stcs
|
||||
|
||||
|
||||
def _check_get_psf_ctf_params(mode, n_comp, return_pca_vars):
|
||||
"""Check input parameters of _get_psf_ctf() for consistency."""
|
||||
if mode in [None, "sum", "mean"] and n_comp > 1:
|
||||
msg = f"n_comp must be 1 for mode={mode}."
|
||||
raise ValueError(msg)
|
||||
if mode != "pca" and return_pca_vars:
|
||||
msg = "SVD variances can only be returned if mode=pca."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def _vertices_for_get_psf_ctf(idx, src):
|
||||
"""Get vertices in source space for PSFs/CTFs in _get_psf_ctf()."""
|
||||
# idx must be list
|
||||
# if label(s) specified get the indices, otherwise just carry on
|
||||
if type(idx[0]) is Label:
|
||||
# specify without source time courses, gets indices per label
|
||||
verts_labs, _ = _prepare_label_extraction(
|
||||
stc=None,
|
||||
labels=idx,
|
||||
src=src,
|
||||
mode="mean",
|
||||
allow_empty=False,
|
||||
use_sparse=False,
|
||||
)
|
||||
# verts_labs can be list of lists
|
||||
# concatenate indices per label across hemispheres
|
||||
# one list item per label
|
||||
verts = []
|
||||
|
||||
for v in verts_labs:
|
||||
# if two hemispheres present
|
||||
if isinstance(v, list):
|
||||
# indices for both hemispheres in one list
|
||||
this_verts = np.concatenate((v[0], v[1]))
|
||||
else:
|
||||
this_verts = np.array(v)
|
||||
verts.append(this_verts)
|
||||
# check if list of list or just list
|
||||
else:
|
||||
if isinstance(idx[0], list): # if list of list of integers
|
||||
verts = idx
|
||||
else: # if list of integers
|
||||
verts = [idx]
|
||||
|
||||
return verts
|
||||
|
||||
|
||||
def _normalise_psf_ctf(funcs, norm):
|
||||
"""Normalise PSFs/CTFs in _get_psf_ctf()."""
|
||||
# normalise PSFs/CTFs if specified
|
||||
if norm == "max":
|
||||
maxval = max(-funcs.min(), funcs.max())
|
||||
funcs = funcs / maxval
|
||||
elif norm == "norm": # normalise to maximum norm across columns
|
||||
norms = np.linalg.norm(funcs, axis=0)
|
||||
funcs = funcs / norms.max()
|
||||
|
||||
return funcs
|
||||
|
||||
|
||||
def _summarise_psf_ctf(funcs, mode, n_comp, return_pca_vars, nn):
|
||||
"""Summarise PSFs/CTFs across vertices."""
|
||||
s_var = None # only computed for return_pca_vars=True
|
||||
|
||||
if mode == "maxval": # pick PSF/CTF with maximum absolute value
|
||||
absvals = np.maximum(-np.min(funcs, axis=0), np.max(funcs, axis=0))
|
||||
if n_comp > 1: # only keep requested number of sorted PSFs/CTFs
|
||||
sortidx = np.argsort(absvals)
|
||||
maxidx = sortidx[-n_comp:]
|
||||
else: # faster if only one required
|
||||
maxidx = [absvals.argmax()]
|
||||
funcs = funcs[:, maxidx]
|
||||
|
||||
elif mode == "maxnorm": # pick PSF/CTF with maximum norm
|
||||
norms = np.linalg.norm(funcs, axis=0)
|
||||
if n_comp > 1: # only keep requested number of sorted PSFs/CTFs
|
||||
sortidx = np.argsort(norms)
|
||||
maxidx = sortidx[-n_comp:]
|
||||
else: # faster if only one required
|
||||
maxidx = [norms.argmax()]
|
||||
funcs = funcs[:, maxidx]
|
||||
|
||||
elif mode == "sum": # sum across PSFs/CTFs
|
||||
funcs = np.sum(funcs, axis=1, keepdims=True)
|
||||
|
||||
elif mode == "mean": # mean of PSFs/CTFs
|
||||
funcs = np.mean(funcs, axis=1, keepdims=True)
|
||||
|
||||
elif mode == "pca": # SVD across PSFs/CTFs
|
||||
# compute SVD of PSFs/CTFs across vertices
|
||||
u, s, _ = np.linalg.svd(funcs, full_matrices=False, compute_uv=True)
|
||||
if n_comp > 1:
|
||||
funcs = u[:, :n_comp]
|
||||
else:
|
||||
funcs = u[:, 0, np.newaxis]
|
||||
# if explained variances for SVD components requested
|
||||
if return_pca_vars:
|
||||
# explained variance of individual SVD components
|
||||
s2 = s * s
|
||||
s_var = 100 * s2[:n_comp] / s2.sum()
|
||||
|
||||
return funcs, s_var
|
||||
|
||||
|
||||
@verbose
|
||||
def get_point_spread(
|
||||
resmat,
|
||||
src,
|
||||
idx,
|
||||
mode=None,
|
||||
*,
|
||||
n_comp=1,
|
||||
norm=False,
|
||||
return_pca_vars=False,
|
||||
vector=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Get point-spread (PSFs) functions for vertices.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_dipoles, n_dipoles)
|
||||
Forward Operator.
|
||||
src : instance of SourceSpaces | instance of InverseOperator | instance of Forward
|
||||
Source space used to compute resolution matrix.
|
||||
Must be an InverseOperator if ``vector=True`` and a surface
|
||||
source space is used.
|
||||
%(idx_pctf)s
|
||||
%(mode_pctf)s
|
||||
%(n_comp_pctf_n)s
|
||||
%(norm_pctf)s
|
||||
%(return_pca_vars_pctf)s
|
||||
%(vector_pctf)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
%(stcs_pctf)s
|
||||
%(pca_vars_pctf)s
|
||||
""" # noqa: E501
|
||||
return _get_psf_ctf(
|
||||
resmat,
|
||||
src,
|
||||
idx,
|
||||
func="psf",
|
||||
mode=mode,
|
||||
n_comp=n_comp,
|
||||
norm=norm,
|
||||
return_pca_vars=return_pca_vars,
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
|
||||
@verbose
|
||||
def get_cross_talk(
|
||||
resmat,
|
||||
src,
|
||||
idx,
|
||||
mode=None,
|
||||
*,
|
||||
n_comp=1,
|
||||
norm=False,
|
||||
return_pca_vars=False,
|
||||
vector=False,
|
||||
verbose=None,
|
||||
):
|
||||
"""Get cross-talk (CTFs) function for vertices.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_dipoles, n_dipoles)
|
||||
Forward Operator.
|
||||
src : instance of SourceSpaces | instance of InverseOperator | instance of Forward
|
||||
Source space used to compute resolution matrix.
|
||||
Must be an InverseOperator if ``vector=True`` and a surface
|
||||
source space is used.
|
||||
%(idx_pctf)s
|
||||
%(mode_pctf)s
|
||||
%(n_comp_pctf_n)s
|
||||
%(norm_pctf)s
|
||||
%(return_pca_vars_pctf)s
|
||||
%(vector_pctf)s
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
%(stcs_pctf)s
|
||||
%(pca_vars_pctf)s
|
||||
""" # noqa: E501
|
||||
return _get_psf_ctf(
|
||||
resmat,
|
||||
src,
|
||||
idx,
|
||||
func="ctf",
|
||||
mode=mode,
|
||||
n_comp=n_comp,
|
||||
norm=norm,
|
||||
return_pca_vars=return_pca_vars,
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
|
||||
def _convert_forward_match_inv(fwd, inv):
|
||||
"""Ensure forward and inverse operators match.
|
||||
|
||||
Inverse operator and forward operator must have same surface orientations,
|
||||
but can have different source orientation constraints.
|
||||
"""
|
||||
_validate_type(fwd, Forward, "fwd")
|
||||
_validate_type(inv, InverseOperator, "inverse_operator")
|
||||
# did inverse operator use fixed orientation?
|
||||
is_fixed_inv = _check_fixed_ori(inv)
|
||||
# did forward operator use fixed orientation?
|
||||
is_fixed_fwd = _check_fixed_ori(fwd)
|
||||
|
||||
# if inv or fwd fixed: do nothing
|
||||
# if inv loose: surf_ori must be True
|
||||
# if inv free: surf_ori must be False
|
||||
if not is_fixed_inv and not is_fixed_fwd:
|
||||
inv_surf_ori = inv._is_surf_ori
|
||||
if inv_surf_ori != fwd["surf_ori"]:
|
||||
fwd = convert_forward_solution(
|
||||
fwd, surf_ori=inv_surf_ori, force_fixed=False
|
||||
)
|
||||
|
||||
return fwd
|
||||
|
||||
|
||||
def _prepare_info(inverse_operator):
|
||||
"""Get a usable dict."""
|
||||
# in order to convert sub-leadfield matrix to evoked data type (pretending
|
||||
# it's an epoch, see in loop below), uses 'info' from inverse solution
|
||||
# because this has all the correct projector information
|
||||
info = deepcopy(inverse_operator["info"])
|
||||
with info._unlock():
|
||||
info["sfreq"] = 1000.0 # necessary
|
||||
info["projs"] = inverse_operator["projs"]
|
||||
info["custom_ref_applied"] = False
|
||||
return info
|
||||
|
||||
|
||||
def _get_matrix_from_inverse_operator(
|
||||
inverse_operator, forward, method="dSPM", lambda2=1.0 / 9.0
|
||||
):
|
||||
"""Get inverse matrix from an inverse operator.
|
||||
|
||||
Currently works only for fixed/loose orientation constraints
|
||||
For loose orientation constraint, the CTFs are computed for the normal
|
||||
component (pick_ori='normal').
|
||||
|
||||
Parameters
|
||||
----------
|
||||
inverse_operator : instance of InverseOperator
|
||||
The inverse operator.
|
||||
forward : instance of Forward
|
||||
The forward operator.
|
||||
method : 'MNE' | 'dSPM' | 'sLORETA'
|
||||
Inverse methods (for apply_inverse).
|
||||
lambda2 : float
|
||||
The regularization parameter (for apply_inverse).
|
||||
|
||||
Returns
|
||||
-------
|
||||
invmat : array, shape (n_dipoles, n_channels)
|
||||
Inverse matrix associated with inverse operator and specified
|
||||
parameters.
|
||||
"""
|
||||
# make sure forward and inverse operators match with respect to
|
||||
# surface orientation
|
||||
_convert_forward_match_inv(forward, inverse_operator)
|
||||
|
||||
info_inv = _prepare_info(inverse_operator)
|
||||
|
||||
# only use channels that are good for inverse operator and forward sol
|
||||
ch_names_inv = info_inv["ch_names"]
|
||||
n_chs_inv = len(ch_names_inv)
|
||||
bads_inv = inverse_operator["info"]["bads"]
|
||||
|
||||
# indices of bad channels
|
||||
ch_idx_bads = [ch_names_inv.index(ch) for ch in bads_inv]
|
||||
|
||||
# create identity matrix as input for inverse operator
|
||||
# set elements to zero for non-selected channels
|
||||
id_mat = np.eye(n_chs_inv)
|
||||
|
||||
# convert identity matrix to evoked data type (pretending it's an epoch)
|
||||
ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.0)
|
||||
|
||||
# apply inverse operator to identity matrix in order to get inverse matrix
|
||||
# free orientation constraint not possible because apply_inverse would
|
||||
# combine components
|
||||
|
||||
# check if inverse operator uses fixed source orientations
|
||||
is_fixed_inv = _check_fixed_ori(inverse_operator)
|
||||
|
||||
# choose pick_ori according to inverse operator
|
||||
if is_fixed_inv:
|
||||
pick_ori = None
|
||||
else:
|
||||
pick_ori = "vector"
|
||||
|
||||
# columns for bad channels will be zero
|
||||
invmat_op = apply_inverse(
|
||||
ev_id, inverse_operator, lambda2=lambda2, method=method, pick_ori=pick_ori
|
||||
)
|
||||
|
||||
# turn source estimate into numpy array
|
||||
invmat = invmat_op.data
|
||||
|
||||
# remove columns for bad channels
|
||||
# take into account it may be 3D array
|
||||
invmat = np.delete(invmat, ch_idx_bads, axis=invmat.ndim - 1)
|
||||
|
||||
# if 3D array, i.e. multiple values per location (fixed and loose),
|
||||
# reshape into 2D array
|
||||
if invmat.ndim == 3:
|
||||
v0o1 = invmat[0, 1].copy()
|
||||
v3o2 = invmat[3, 2].copy()
|
||||
shape = invmat.shape
|
||||
invmat = invmat.reshape(shape[0] * shape[1], shape[2])
|
||||
# make sure that reshaping worked
|
||||
assert np.array_equal(v0o1, invmat[1])
|
||||
assert np.array_equal(v3o2, invmat[11])
|
||||
|
||||
logger.info(f"Dimension of Inverse Matrix: {invmat.shape}")
|
||||
|
||||
return invmat
|
||||
|
||||
|
||||
def _check_fixed_ori(inst):
|
||||
"""Check if inverse or forward was computed for fixed orientations."""
|
||||
is_fixed = inst["source_ori"] != FIFF.FIFFV_MNE_FREE_ORI
|
||||
return is_fixed
|
||||
341
mne/minimum_norm/spatial_resolution.py
Normal file
341
mne/minimum_norm/spatial_resolution.py
Normal file
@@ -0,0 +1,341 @@
|
||||
# Authors: The MNE-Python contributors.
|
||||
# License: BSD-3-Clause
|
||||
# Copyright the MNE-Python contributors.
|
||||
|
||||
"""Compute resolution metrics from resolution matrix.
|
||||
|
||||
Resolution metrics: localisation error, spatial extent, relative amplitude.
|
||||
Metrics can be computed for point-spread and cross-talk functions (PSFs/CTFs).
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
|
||||
from ..source_estimate import SourceEstimate
|
||||
from ..utils import _check_option, logger, verbose
|
||||
|
||||
|
||||
@verbose
|
||||
def resolution_metrics(
|
||||
resmat, src, function="psf", metric="peak_err", threshold=0.5, verbose=None
|
||||
):
|
||||
"""Compute spatial resolution metrics for linear solvers.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_orient * n_vertices, n_vertices)
|
||||
The resolution matrix.
|
||||
If not a square matrix and if the number of rows is a multiple of
|
||||
number of columns (e.g. free or loose orientations), then the Euclidean
|
||||
length per source location is computed (e.g. if inverse operator with
|
||||
free orientations was applied to forward solution with fixed
|
||||
orientations).
|
||||
src : instance of SourceSpaces
|
||||
Source space object from forward or inverse operator.
|
||||
function : 'psf' | 'ctf'
|
||||
Whether to compute metrics for columns (point-spread functions, PSFs)
|
||||
or rows (cross-talk functions, CTFs) of the resolution matrix.
|
||||
metric : str
|
||||
The resolution metric to compute. Allowed options are:
|
||||
|
||||
Localization-based metrics:
|
||||
|
||||
- ``'peak_err'`` Peak localization error (PLE), Euclidean distance
|
||||
between peak and true source location.
|
||||
- ``'cog_err'`` Centre-of-gravity localisation error (CoG), Euclidean
|
||||
distance between CoG and true source location.
|
||||
|
||||
Spatial-extent-based metrics:
|
||||
|
||||
- ``'sd_ext'`` Spatial deviation
|
||||
(e.g. :footcite:`MolinsEtAl2008,HaukEtAl2019`).
|
||||
- ``'maxrad_ext'`` Maximum radius to 50%% of max amplitude.
|
||||
|
||||
Amplitude-based metrics:
|
||||
|
||||
- ``'peak_amp'`` Ratio between absolute maximum amplitudes of peaks
|
||||
per location and maximum peak across locations.
|
||||
- ``'sum_amp'`` Ratio between sums of absolute amplitudes.
|
||||
|
||||
threshold : float
|
||||
Amplitude fraction threshold for spatial extent metric 'maxrad_ext'.
|
||||
Defaults to 0.5.
|
||||
%(verbose)s
|
||||
|
||||
Returns
|
||||
-------
|
||||
resolution_metric : instance of SourceEstimate
|
||||
The resolution metric.
|
||||
|
||||
Notes
|
||||
-----
|
||||
For details, see :footcite:`MolinsEtAl2008,HaukEtAl2019`.
|
||||
|
||||
.. versionadded:: 0.20
|
||||
|
||||
References
|
||||
----------
|
||||
.. footbibliography::
|
||||
"""
|
||||
# Check if input options are valid
|
||||
metrics = ("peak_err", "cog_err", "sd_ext", "maxrad_ext", "peak_amp", "sum_amp")
|
||||
if metric not in metrics:
|
||||
raise ValueError(f'"{metric}" is not a recognized metric.')
|
||||
|
||||
if function not in ["psf", "ctf"]:
|
||||
raise ValueError(f"Not a recognised resolution function: {function}.")
|
||||
|
||||
if metric in ("peak_err", "cog_err"):
|
||||
resolution_metric = _localisation_error(
|
||||
resmat, src, function=function, metric=metric
|
||||
)
|
||||
|
||||
elif metric in ("sd_ext", "maxrad_ext"):
|
||||
resolution_metric = _spatial_extent(
|
||||
resmat, src, function=function, metric=metric, threshold=threshold
|
||||
)
|
||||
|
||||
elif metric in ("peak_amp", "sum_amp"):
|
||||
resolution_metric = _relative_amplitude(
|
||||
resmat, src, function=function, metric=metric
|
||||
)
|
||||
|
||||
# get vertices from source space
|
||||
vertno_lh = src[0]["vertno"]
|
||||
vertno_rh = src[1]["vertno"]
|
||||
vertno = [vertno_lh, vertno_rh]
|
||||
|
||||
# Convert array to source estimate
|
||||
resolution_metric = SourceEstimate(resolution_metric, vertno, tmin=0.0, tstep=1.0)
|
||||
|
||||
return resolution_metric
|
||||
|
||||
|
||||
def _localisation_error(resmat, src, function, metric):
|
||||
"""Compute localisation error metrics for resolution matrix.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_orient * n_locations, n_locations)
|
||||
The resolution matrix.
|
||||
If not a square matrix and if the number of rows is a multiple of
|
||||
number of columns (i.e. n_orient>1), then the Euclidean length per
|
||||
source location is computed (e.g. if inverse operator with free
|
||||
orientations was applied to forward solution with fixed orientations).
|
||||
src : Source Space
|
||||
Source space object from forward or inverse operator.
|
||||
function : 'psf' | 'ctf'
|
||||
Whether to compute metrics for columns (point-spread functions, PSFs)
|
||||
or rows (cross-talk functions, CTFs).
|
||||
metric : str
|
||||
What type of localisation error to compute.
|
||||
|
||||
- 'peak_err': Peak localisation error (PLE), Euclidean distance between
|
||||
peak and true source location, in centimeters.
|
||||
- 'cog_err': Centre-of-gravity localisation error (CoG), Euclidean
|
||||
distance between CoG and true source location, in centimeters.
|
||||
|
||||
Returns
|
||||
-------
|
||||
locerr : array, shape (n_locations,)
|
||||
Localisation error per location (in cm).
|
||||
"""
|
||||
# ensure resolution matrix is square
|
||||
# combine rows (Euclidean length) if necessary
|
||||
resmat = _rectify_resolution_matrix(resmat)
|
||||
locations = _get_src_locations(src) # locs used in forw. and inv. operator
|
||||
locations = 100.0 * locations # convert to cm (more common)
|
||||
# we want to use absolute values, but doing abs() mases a copy and this
|
||||
# can be quite expensive in memory. So let's just use abs() in place below.
|
||||
|
||||
# The code below will operate on columns, so transpose if you want CTFs
|
||||
if function == "ctf":
|
||||
resmat = resmat.T
|
||||
|
||||
# Euclidean distance between true location and maximum
|
||||
if metric == "peak_err":
|
||||
resmax = [abs(col).argmax() for col in resmat.T] # max inds along cols
|
||||
maxloc = locations[resmax, :] # locations of maxima
|
||||
diffloc = locations - maxloc # diff btw true locs and maxima locs
|
||||
locerr = np.linalg.norm(diffloc, axis=1) # Euclidean distance
|
||||
|
||||
# centre of gravity
|
||||
elif metric == "cog_err":
|
||||
locerr = np.empty(locations.shape[0]) # initialise result array
|
||||
for ii, rr in enumerate(locations):
|
||||
resvec = abs(resmat[:, ii].T) # corresponding column of resmat
|
||||
cog = resvec.dot(locations) / np.sum(resvec) # centre of gravity
|
||||
locerr[ii] = np.sqrt(np.sum((rr - cog) ** 2)) # Euclidean distance
|
||||
|
||||
return locerr
|
||||
|
||||
|
||||
def _spatial_extent(resmat, src, function, metric, threshold=0.5):
|
||||
"""Compute spatial width metrics for resolution matrix.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_orient * n_dipoles, n_dipoles)
|
||||
The resolution matrix.
|
||||
If not a square matrix and if the number of rows is a multiple of
|
||||
number of columns (i.e. n_orient>1), then the Euclidean length per
|
||||
source location is computed (e.g. if inverse operator with free
|
||||
orientations was applied to forward solution with fixed orientations).
|
||||
src : Source Space
|
||||
Source space object from forward or inverse operator.
|
||||
function : 'psf' | 'ctf'
|
||||
Whether to compute metrics for columns (PSFs) or rows (CTFs).
|
||||
metric : str
|
||||
What type of width metric to compute.
|
||||
|
||||
- 'sd_ext': spatial deviation (e.g. Molins et al.), in centimeters.
|
||||
- 'maxrad_ext': maximum radius to fraction threshold of max amplitude,
|
||||
in centimeters.
|
||||
|
||||
threshold : float
|
||||
Amplitude fraction threshold for metric 'maxrad'. Defaults to 0.5.
|
||||
|
||||
Returns
|
||||
-------
|
||||
width : array, shape (n_dipoles,)
|
||||
Spatial width metric per location.
|
||||
"""
|
||||
locations = _get_src_locations(src) # locs used in forw. and inv. operator
|
||||
locations = 100.0 * locations # convert to cm (more common)
|
||||
|
||||
# The code below will operate on columns, so transpose if you want CTFs
|
||||
if function == "ctf":
|
||||
resmat = resmat.T
|
||||
|
||||
width = np.empty(resmat.shape[1]) # initialise output array
|
||||
|
||||
# spatial deviation as in Molins et al.
|
||||
if metric == "sd_ext":
|
||||
for ii in range(locations.shape[0]):
|
||||
diffloc = locations - locations[ii, :] # locs w/r/t true source
|
||||
locerr = np.sum(diffloc**2, 1) # squared Eucl dists to true source
|
||||
resvec = abs(resmat[:, ii]) ** 2 # pick current row
|
||||
# spatial deviation (Molins et al, NI 2008, eq. 12)
|
||||
width[ii] = np.sqrt(np.sum(np.multiply(locerr, resvec)) / np.sum(resvec))
|
||||
|
||||
# maximum radius to 50% of max amplitude
|
||||
elif metric == "maxrad_ext":
|
||||
for ii, resvec in enumerate(resmat.T): # iterate over columns
|
||||
resvec = abs(resvec) # operate on absolute values
|
||||
amps = resvec.max()
|
||||
# indices of elements with values larger than fraction threshold
|
||||
# of peak amplitude
|
||||
thresh_idx = np.where(resvec > threshold * amps)
|
||||
# get distances for those indices from true source position
|
||||
locs_thresh = locations[thresh_idx, :] - locations[ii, :]
|
||||
# get maximum distance
|
||||
width[ii] = np.sqrt(np.sum(locs_thresh**2, 1).max())
|
||||
|
||||
return width
|
||||
|
||||
|
||||
def _relative_amplitude(resmat, src, function, metric):
|
||||
"""Compute relative amplitude metrics for resolution matrix.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
resmat : array, shape (n_orient * n_dipoles, n_dipoles)
|
||||
The resolution matrix.
|
||||
If not a square matrix and if the number of rows is a multiple of
|
||||
number of columns (i.e. n_orient>1), then the Euclidean length per
|
||||
source location is computed (e.g. if inverse operator with free
|
||||
orientations was applied to forward solution with fixed orientations).
|
||||
src : Source Space
|
||||
Source space object from forward or inverse operator.
|
||||
function : 'psf' | 'ctf'
|
||||
Whether to compute metrics for columns (PSFs) or rows (CTFs).
|
||||
metric : str
|
||||
Which amplitudes to use.
|
||||
|
||||
- 'peak_amp': Ratio between absolute maximum amplitudes of peaks per
|
||||
location and maximum peak across locations.
|
||||
- 'sum_amp': Ratio between sums of absolute amplitudes.
|
||||
|
||||
Returns
|
||||
-------
|
||||
relamp : array, shape (n_dipoles,)
|
||||
Relative amplitude metric per location.
|
||||
"""
|
||||
# The code below will operate on columns, so transpose if you want CTFs
|
||||
if function == "ctf":
|
||||
resmat = resmat.T
|
||||
|
||||
# Ratio between amplitude at peak and global peak maximum
|
||||
if metric == "peak_amp":
|
||||
# maximum amplitudes per column
|
||||
maxamps = np.array([abs(col).max() for col in resmat.T])
|
||||
maxmaxamps = maxamps.max() # global absolute maximum
|
||||
relamp = maxamps / maxmaxamps
|
||||
|
||||
# ratio between sums of absolute amplitudes
|
||||
elif metric == "sum_amp":
|
||||
# sum of amplitudes per column
|
||||
sumamps = np.array([abs(col).sum() for col in resmat.T])
|
||||
sumampsmax = sumamps.max() # maximum of summed amplitudes
|
||||
relamp = sumamps / sumampsmax
|
||||
|
||||
return relamp
|
||||
|
||||
|
||||
def _get_src_locations(src):
|
||||
"""Get source positions from src object."""
|
||||
# vertices used in forward and inverse operator
|
||||
# for now let's just support surface source spaces
|
||||
_check_option("source space kind", src.kind, ("surface",))
|
||||
vertno_lh = src[0]["vertno"]
|
||||
vertno_rh = src[1]["vertno"]
|
||||
|
||||
# locations corresponding to vertices for both hemispheres
|
||||
locations_lh = src[0]["rr"][vertno_lh, :]
|
||||
locations_rh = src[1]["rr"][vertno_rh, :]
|
||||
locations = np.vstack([locations_lh, locations_rh])
|
||||
|
||||
return locations
|
||||
|
||||
|
||||
def _rectify_resolution_matrix(resmat):
|
||||
"""
|
||||
Ensure resolution matrix is square matrix.
|
||||
|
||||
If resmat is not a square matrix, it is assumed that the inverse operator
|
||||
had free or loose orientation constraint, i.e. multiple values per source
|
||||
location. The Euclidean length for values at each location is computed to
|
||||
make resmat a square matrix.
|
||||
"""
|
||||
shape = resmat.shape
|
||||
if not shape[0] == shape[1]:
|
||||
if shape[0] < shape[1]:
|
||||
raise ValueError(
|
||||
f"Number of target sources ({shape[0]}) cannot be lower "
|
||||
f"than number of input sources ({shape[1]})"
|
||||
)
|
||||
|
||||
if np.mod(shape[0], shape[1]): # if ratio not integer
|
||||
raise ValueError(
|
||||
f"Number of target sources ({shape[0]}) must be a "
|
||||
f"multiple of the number of input sources ({shape[1]})"
|
||||
)
|
||||
|
||||
ns = shape[0] // shape[1] # number of source components per vertex
|
||||
|
||||
# Combine rows of resolution matrix
|
||||
resmatl = [
|
||||
np.sqrt((resmat[ns * i : ns * (i + 1), :] ** 2).sum(axis=0))
|
||||
for i in np.arange(0, shape[1], dtype=int)
|
||||
]
|
||||
|
||||
resmat = np.array(resmatl)
|
||||
|
||||
logger.info(
|
||||
"Rectified resolution matrix from (%d, %d) to (%d, %d).",
|
||||
shape[0],
|
||||
shape[1],
|
||||
resmat.shape[0],
|
||||
resmat.shape[1],
|
||||
)
|
||||
|
||||
return resmat
|
||||
1217
mne/minimum_norm/time_frequency.py
Normal file
1217
mne/minimum_norm/time_frequency.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user