def test_ndvar_timeseries_methods(): "Test NDVar time-series methods" ds = datasets.get_uts(True) x = ds['utsnd'] xs = NDVar(x.x.swapaxes(1, 2), ('case', x.dims[2], x.dims[1]), x.info.copy(), x.name) # envelope env = x.envelope() assert_array_equal(env.x >= 0, True) envs = xs.envelope() assert_array_equal(env.x, envs.x.swapaxes(1,2)) # indexing eq_(len(ds[0, 'uts'][0.01:0.1].time), 9) # FFT x = ds['uts'].mean('case') np.sin(2 * np.pi * x.time.times, x.x) f = x.fft() assert_array_almost_equal(f.x, (f.frequency.x == 1) * (len(f) - 1)) np.sin(4 * np.pi * x.time.times, x.x) f = x.fft() assert_array_almost_equal(f.x, (f.frequency.x == 2) * (len(f) - 1)) # update tmin eq_(x.time.times[0], -0.2) x.time.set_tmin(3.2) eq_(x.time.times[0], 3.2)
def test_ndvar_binning(): "Test NDVar.bin()" x = np.arange(10) time = UTS(-0.1, 0.1, 10) x_dst = x.reshape((5, 2)).mean(1) time_dst = np.arange(0., 0.9, 0.2) # 1-d ndvar = NDVar(x, (time,)) b = ndvar.bin(0.2) assert_array_equal(b.x, x_dst, "Binned data") assert_array_equal(b.time.x, time_dst, "Bin times") b = ndvar.sub(time=(0, 0.8)).bin(0.4) eq_(b.shape, (2,)) # 2-d ndvar = NDVar(np.vstack((x, x, x)), ('case', time)) b = ndvar.bin(0.2) assert_array_equal(b.x, np.vstack((x_dst, x_dst, x_dst)), "Binned data") assert_array_equal(b.time.x, time_dst, "Bin times") # time: x = np.ones((5, 70)) ndvar = NDVar(x, ('case', UTS(0.45000000000000007, 0.005, 70))) binned_ndvar = ndvar.bin(0.05) assert_array_equal(binned_ndvar.x, 1.) eq_(binned_ndvar.shape, (5, 7))
def test_ndvar_timeseries_methods(): "Test NDVar time-series methods" ds = datasets.get_uts(True) x = ds['utsnd'] xs = NDVar(x.x.swapaxes(1, 2), ('case', x.dims[2], x.dims[1]), x.info.copy(), x.name) # envelope env = x.envelope() assert_array_equal(env.x >= 0, True) envs = xs.envelope() assert_array_equal(env.x, envs.x.swapaxes(1,2)) # indexing eq_(len(ds[0, 'uts'][-10:-1].time), 9) # FFT x = ds['uts'].mean('case') np.sin(2 * np.pi * x.time.times, x.x) f = x.fft() assert_array_almost_equal(f.x, (f.frequency.x == 1) * (len(f) - 1)) np.sin(4 * np.pi * x.time.times, x.x) f = x.fft() assert_array_almost_equal(f.x, (f.frequency.x == 2) * (len(f) - 1)) # update tmin eq_(x.time.times[0], -0.2) x.time.set_tmin(3.2) eq_(x.time.times[0], 3.2)
def test_find_intervals(): time = UTS(-5, 1, 10) x = NDVar([0, 1, 0, 1, 1, 0, 1, 1, 1, 0], (time,)) eq_(find_intervals(x), ((-4, -3), (-2, 0), (1, 4))) x = NDVar([0, 1, 0, 1, 1, 0, 1, 1, 1, 1], (time,)) eq_(find_intervals(x), ((-4, -3), (-2, 0), (1, 5))) x = NDVar([1, 1, 0, 1, 1, 0, 1, 1, 1, 1], (time,)) eq_(find_intervals(x), ((-5, -3), (-2, 0), (1, 5)))
def test_glassbrain(): ndvar = datasets.get_mne_stc(True, True) # source space only p = plot.GlassBrain(ndvar.source) p.close() # single time points ndvar_30 = ndvar.sub(time=0.030) p = plot.GlassBrain(ndvar_30) p.close() # without arrows p = plot.GlassBrain(ndvar_30, draw_arrows=False) p.close() # time series p = plot.GlassBrain(ndvar) p.set_time(.03) p.close() # masked data import numpy as np h = ndvar.sub(time=0.030) c = 6.15459575929912e-10 # precomputed _fast_abs_percentile(h) mask = h.norm('space') < c mask_x = np.repeat(h._ialign(mask), 3, h.get_axis('space')) mask = NDVar(mask_x, h.dims) y = h.mask(mask) p = plot.GlassBrain(y) p.close()
def test_smoothing(): x = get_ndvar(2) xt = NDVar(x.x.swapaxes(1, 2), [x.dims[i] for i in [0, 2, 1]], x.name, x.info) # smoothing across time ma = x.smooth('time', 0.2, 'blackman') assert_dataobj_equal( x.smooth('time', window='blackman', window_samples=20), ma) with pytest.raises(TypeError): x.smooth('time') with pytest.raises(TypeError): x.smooth('time', 0.2, 'blackman', window_samples=20) mas = xt.smooth('time', 0.2, 'blackman') assert_allclose(ma.x, mas.x.swapaxes(1, 2), 1e-10) ma_mean = x.mean('case').smooth('time', 0.2, 'blackman') assert_allclose(ma.mean('case').x, ma_mean.x) # against raw scipy.signal window = signal.get_window('blackman', 20, False) window /= window.sum() window.shape = (1, 20, 1) assert_array_equal(ma.x[:, 10:-10], signal.convolve(x.x, window, 'same')[:, 10:-10]) # mode parameter full = signal.convolve(x.x, window, 'full') ma = x.smooth('time', 0.2, 'blackman', mode='left') assert_array_equal(ma.x[:], full[:, :100]) ma = x.smooth('time', 0.2, 'blackman', mode='right') assert_array_equal(ma.x[:], full[:, 19:]) # fix_edges: smooth with constant sum xs = x.smooth('frequency', window_samples=1, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency')) xs = x.smooth('frequency', window_samples=2, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=3, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=5, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) xs = x.smooth('frequency', window_samples=4, fix_edges=True) assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14) # gaussian x = get_ndvar(2, frequency=0, sensor=5) x.smooth('sensor', 0.1, 'gaussian') x = get_ndvar(2, sensor=5) x.smooth('sensor', 0.1, 'gaussian')
def func(x: MUV, name: str = None, info: dict = None): if isinstance(x, NDVar): return NDVar(numpy_func(x.x), x.dims, name, info) elif isinstance(x, numpy.ndarray): return numpy_func(x) elif isinstance(x, Sequence): return [element_func(xi) for xi in x] else: return element_func(x)
def _load(self, path, tmin, tstep, n_samples, code, seed): x = load.unpickle(path) # allow for pre-computed resampled versions if isinstance(x, list): xs = x for x in xs: if x.time.tstep == tstep: break else: raise IOError( f"{os.path.basename(path)} does not contain tstep={tstep!r}" ) # continuous UTS if isinstance(x, NDVar): if x.time.tstep == tstep: pass elif self.resample == 'bin': x = x.bin(tstep, label='start') elif self.resample == 'resample': srate = 1 / tstep int_srate = int(round(srate)) srate = int_srate if abs(int_srate - srate) < .001 else srate x = resample(x, srate) elif self.resample is None: raise RuntimeError( f"{os.path.basename(path)} has tstep={x.time.tstep}, not {tstep}" ) else: raise RuntimeError(f"resample={self.resample!r}") x = pad(x, tmin, nsamples=n_samples) # NUTS elif isinstance(x, Dataset): ds = x if code.shuffle in ('permute', 'relocate'): rng = numpy.random.RandomState(seed) if code.shuffle == 'permute': index = ds['permute'].x assert index.dtype.kind == 'b' values = ds[index, 'value'].x rng.shuffle(values) ds[index, 'value'] = values else: rng.shuffle(ds['value'].x) code.register_shuffle() x = NDVar(numpy.zeros(n_samples), UTS(tmin, tstep, n_samples), name=code.code_with_rand) ds = ds[ds['time'] < x.time.tstop] for t, v in ds.zip('time', 'value'): x[t] = v else: raise TypeError(f'{x!r} at {path}') if code.shuffle in NDVAR_SHUFFLE_METHODS: x = shuffle(x, code.shuffle, code.shuffle_band, code.shuffle_angle) code.register_shuffle() return x
def run_as_ndanova(y, x, ds): yt = ds.eval(y).x[:, None] y2 = np.concatenate((yt, yt * 2), 1) ndvar = NDVar(y2, ('case', UTS(0, 0.1, 2))) res = testnd.anova(ndvar, x, ds=ds) f1 = [fmap.x[0] for fmap in res.f] f2 = [fmap.x[1] for fmap in res.f] for f1_, f2_ in zip(f1, f2): assert f1_ == f2_ return f1
def test_resample(): x = NDVar([0.0, 1.0, 1.4, 1.0, 0.0], UTS(0, 0.1, 5)).mask([True, False, False, False, True]) y = resample(x, 20) assert_array_equal( y.x.mask, [True, False, False, False, False, False, False, False, True, True]) y = resample(x, 20, npad=0) assert_array_equal( y.x.mask, [True, False, False, False, False, False, False, False, True, True])
def test_find_peaks(): scalar = Scalar('scalar', range(9)) time = UTS(0, .1, 12) v = NDVar(np.zeros((9, 12)), (scalar, time)) wsize = [0, 0, 1, 2, 3, 2, 1, 0, 0] for i, s in enumerate(wsize): if s: v.x[i, 5 - s: 5 + s] += np.hamming(2 * s) peaks = find_peaks(v) x, y = np.where(peaks.x) assert_array_equal(x, [4]) assert_array_equal(y, [5])
def test_frequency_response(): b_array = signal.firwin(80, 0.5, window=('kaiser', 8)) freqs_array, fresp_array = signal.freqz(b_array) hz_to_rad = 2 * np.pi * 0.01 b = NDVar(b_array, (UTS(0, 0.01, 80), )) fresp = frequency_response(b) assert_array_equal(fresp.x, fresp_array) assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array) b2d = concatenate((b, b), Case) fresp = frequency_response(b2d) assert_array_equal(fresp.x[0], fresp_array) assert_array_equal(fresp.x[1], fresp_array) assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)
def apply_receptive_field(stimulus, rf, clip=True, name=None): """Temporal suppression Two ways of conceptualizing: - STRF with center-surround field, edge detector - Envelope input exerts suppression of responses in the future """ if name is None: name = stimulus.name tdim = stimulus.get_dim('time') fdim = stimulus.get_dim('frequency') assert tdim.tstep == 0.001 stim_data = stimulus.get_data(('frequency', 'time')) out = _apply_rf_array(stim_data, rf, clip) return NDVar(out, (fdim, tdim), name, stimulus.info)
def _generate_continuous( self, uts: UTS, # time axis for the output ds: Dataset, # events stim_var: str, code: Code, directory: Path, ): # place multiple input files into a continuous predictor cache = { stim: self._load(uts.tstep, code.with_stim(stim).nuts_file_name(self.columns), directory) for stim in ds[stim_var].cells } # determine type stim_type = {type(s) for s in cache.values()} assert len(stim_type) == 1 stim_type = stim_type.pop() # generate x if stim_type is Dataset: dss = [] for t, stim in ds.zip('T_relative', stim_var): x = cache[stim].copy() x['time'] += t dss.append(x) if code.nuts_method: x_stop_ds = t_stop_ds(x, t) dss.append(x_stop_ds) x = self._ds_to_ndvar(combine(dss), uts, code) elif stim_type is NDVar: v = cache[ds[0, stim_var]] dimnames = v.get_dimnames(first='time') dims = (uts, *v.get_dims(dimnames[1:])) x = NDVar.zeros(dims, code.key) for t, stim in ds.zip('T_relative', stim_var): x_stim = cache[stim] i_start = uts._array_index(t + x_stim.time.tmin) i_stop = i_start + len(x_stim.time) if i_stop > len(uts): raise ValueError( f"{code.string_without_rand} for {stim} is longer than the data" ) x.x[i_start:i_stop] = x_stim.get_data(dimnames) else: raise RuntimeError(f"stim_type={stim_type!r}") return x
def h(self): """The spatio-temporal response function as (list of) NDVar""" n_vars = sum(len(dim) if dim else 1 for dim in self._stim_dims) if n_vars > 1: shape = (self.theta.shape[0], n_vars, -1) trf = self.theta.reshape(shape) trf = trf.swapaxes(1, 0) else: trf = self.theta[np.newaxis, :] trf = np.dot(trf, self._basis.T) / self.lead_field_scaling time = UTS(self.tstart, self.tstep, trf.shape[-1]) if self.space: shared_dims = (self.source, self.space, time) else: shared_dims = (self.source, time) trf = trf.reshape((-1, *(map(len, shared_dims)))) h = [] i = 0 for dim, name in zip(self._stim_dims, self._stim_names): if dim: dims = (dim, *shared_dims) i1 = i + len(dim) x = trf[i:i1] i = i1 else: dims = shared_dims x = trf[i] i += 1 h.append(NDVar(x, dims, name=name)) if self._stim_is_single: return h[0] else: return h
def test_mask(): ds = datasets.get_uts(True) x = NDVar([1, 2, 3], Case) assert x.mean() == 2.0 y = x.mask([True, False, False]) assert y.mean() == 2.5 # multi-dimensional y = ds[:2, 'utsnd'].copy() mask_x = y.time.times >= 0.500 mask_ndvar = NDVar(mask_x, y.time) y_masked = y.mask(mask_ndvar) assert_array_equal(y_masked.x.mask[:, :, 70:], True) assert_array_equal(y_masked.x.mask[:, :, :70], False) # mask that is smaller than array mask = mask_ndvar.sub(time=(0.100, None)) with pytest.raises(TypeError): y.mask(mask) y_masked = y.mask(mask, missing=True) assert_array_equal(y_masked.x.mask[:, :, 70:], True) assert_array_equal(y_masked.x.mask[:, :, 30:70], False) assert_array_equal(y_masked.x.mask[:, :, :30], True)
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', range(6), 'unit'), Sensor(locs, ['0', '1', '2', '3'], connect_dist=1.1)) Y = NDVar(x, dims) # test connecting sensors logger.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(Y, 0, 1.5) print repr(cdist) cdist.add_original(pmap) print repr(cdist) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, Y.dims[1:]) # test connecting many sensors logger.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(Y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logger.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(Y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # TFCE logger.info("TEST: TFCE") dims = ('case', UTS(-0.1, 0.1, 4), Sensor(locs, ['0', '1', '2', '3'], connect_dist=1.1), Ordered('dim2', range(10), 'unit')) Y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(Y, 3, None) cdist.add_original(Y.x[0]) for i in xrange(1, 4): cdist.add_perm(Y.x[i]) assert_equal(cdist.dist.shape, (3, )) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = cdist._find_peaks(x) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) mps = False, True thresholds = (None, 'tfce') for mp, threshold in product(mps, thresholds): logger.info("TEST: multiprocessing=%r, threshold=%r" % (mp, threshold)) _testnd.multiprocessing = mp # test keeping dimension cdist = _ClusterDist(Y, 5, threshold, dist_dim='sensor') print repr(cdist) cdist.add_original(Y.x[0]) print repr(cdist) for i in xrange(1, 6): cdist.add_perm(Y.x[i]) print repr(cdist) assert_equal(cdist.dist.shape, (5, 4)) # test keeping time bins cdist = _ClusterDist(Y, 5, threshold, dist_tstep=0.2) cdist.add_original(Y.x[0]) for i in xrange(1, 6): cdist.add_perm(Y.x[i]) assert_equal(cdist.dist.shape, (5, 2)) assert_raises(ValueError, _ClusterDist, Y, 5, threshold, dist_tstep=0.3) # test keeping dimension and time bins cdist = _ClusterDist(Y, 5, threshold, dist_dim='sensor', dist_tstep=0.2) cdist.add_original(Y.x[0]) for i in xrange(1, 6): cdist.add_perm(Y.x[i]) assert_equal(cdist.dist.shape, (5, 4, 2)) # test keeping 2 dimensions and time bins cdist = _ClusterDist(Y, 5, threshold, dist_dim=('sensor', 'dim2'), dist_tstep=0.2) cdist.add_original(Y.x[0]) for i in xrange(1, 6): cdist.add_perm(Y.x[i]) assert_equal(cdist.dist.shape, (5, 4, 2, 10))
def pad( ndvar: NDVar, tstart: float = None, tstop: float = None, nsamples: int = None, set_tmin: bool = False, name: str = None, ) -> NDVar: """Pad (or crop) an NDVar in time Parameters ---------- ndvar NDVar to pad. tstart New tstart. tstop New tstop. nsamples New number of samples. set_tmin Reset ``tmin`` to be exactly equal to ``tstart``. name Name for the new NDVar. """ axis = ndvar.get_axis('time') time: UTS = ndvar.dims[axis] if name is None: name = ndvar.name # start if tstart is None: if set_tmin: raise ValueError("set_tmin without defining tstart") if nsamples is not None: raise NotImplementedError("nsamples without tstart") n_add_start = 0 elif tstart < time.tmin: n_add_start = int(ceil((time.tmin - tstart) / time.tstep)) elif tstart > time.tmin: n_add_start = -time._array_index(tstart) else: n_add_start = 0 # end if nsamples is None and tstop is None: n_add_end = 0 elif nsamples is None: n_add_end = int((tstop - time.tstop) // time.tstep) elif tstop is None: n_add_end = nsamples - n_add_start - time.nsamples else: raise TypeError("Can only specify one of tstart and nsamples") # need to pad? if not n_add_start and not n_add_end: return ndvar # construct padded data xs = [ndvar.x] shape = ndvar.x.shape # start if n_add_start > 0: shape_start = shape[:axis] + (n_add_start,) + shape[axis + 1:] xs.insert(0, np.zeros(shape_start)) elif n_add_start < 0: xs[0] = xs[0][index(slice(-n_add_start, None), axis)] # end if n_add_end > 0: shape_end = shape[:axis] + (n_add_end,) + shape[axis + 1:] xs += (np.zeros(shape_end),) elif n_add_end < 0: xs[-1] = xs[-1][index(slice(None, n_add_end), axis)] x = np.concatenate(xs, axis) if set_tmin: new_tmin = tstart else: new_tmin = time.tmin - (time.tstep * n_add_start) new_time = UTS(new_tmin, time.tstep, x.shape[axis]) dims = (*ndvar.dims[:axis], new_time, *ndvar.dims[axis + 1:]) return NDVar(x, dims, name, ndvar.info)
def _ds_to_ndvar(self, ds: Dataset, uts: UTS, code: Code): if self.columns: column_key, mask_key = code.nuts_columns if column_key is None: column_key = 'value' ds[:, column_key] = 1 else: column_key = 'value' mask_key = 'mask' if 'mask' in ds else None if mask_key: mask = ds[mask_key].x assert mask.dtype.kind == 'b', "'mask' must be boolean" else: mask = None if code.shuffle_index: shuffle_mask = ds[code.shuffle_index].x if shuffle_mask.dtype.kind != 'b': raise code.error("shuffle index must be boolean", -1) elif code.shuffle == 'permute' and mask is not None: assert not numpy.any(shuffle_mask[~mask]) elif code.shuffle == 'permute': shuffle_mask = mask else: shuffle_mask = None if code.shuffle == 'remask': if mask is None: raise code.error("$remask for predictor without mask", -1) rng = code._get_rng() if shuffle_mask is None: rng.shuffle(mask) else: remask = mask[shuffle_mask] rng.shuffle(remask) mask[shuffle_mask] = remask code.register_shuffle(index=True) if mask is not None: ds[column_key] *= mask if code.shuffle == 'permute': rng = code._get_rng() if shuffle_mask is None: rng.shuffle(ds[column_key].x) else: values = ds[column_key].x[shuffle_mask] rng.shuffle(values) ds[column_key].x[shuffle_mask] = values code.register_shuffle(index=True) # prepare output NDVar if code.nuts_method == 'is': dim = Categorial('representation', ('step', 'impulse')) x = NDVar(numpy.zeros((2, len(uts))), (dim, uts), name=code.key) x_step, x_impulse = x else: x = NDVar(numpy.zeros(len(uts)), uts, name=code.key) if code.nuts_method == 'step': x_step, x_impulse = x, None elif not code.nuts_method: x_step, x_impulse = None, x else: raise code.error(f"NUTS-method={code.nuts_method!r}") # fill in values ds = ds[ds['time'] < uts.tstop] if x_impulse is not None: for t, v in ds.zip('time', column_key): x_impulse[t] = v if x_step is not None: t_stops = ds[1:, 'time'] if ds[-1, column_key] != 0: if 'tstop' not in ds.info: raise code.error( "For step representation, the predictor datasets needs to contain ds.info['tstop'] to determine the end of the last step", -1) t_stops = chain(t_stops, [ds.info['tstop']]) for t0, t1, v in zip(ds['time'], t_stops, ds[column_key]): x_step[t0:t1] = v return x
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logger.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print repr(cdist) cdist.add_original(pmap) print repr(cdist) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logger.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logger.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # TFCE logger.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 4), sensor, Ordered('dim2', range(10), 'unit')) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3, )) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = cdist._find_peaks(x) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) mps = False, True thresholds = (None, 'tfce') for mp, threshold in product(mps, thresholds): logger.info("TEST: multiprocessing=%r, threshold=%r" % (mp, threshold)) _testnd.multiprocessing = mp # test keeping dimension cdist = _ClusterDist(y, 5, threshold, dist_dim='sensor') print repr(cdist) cdist.add_original(y.x[0]) print repr(cdist) assert_equal(cdist.dist.shape, (5, 4)) # test keeping time bins cdist = _ClusterDist(y, 5, threshold, dist_tstep=0.2) cdist.add_original(y.x[0]) assert_equal(cdist.dist.shape, (5, 2)) assert_raises(ValueError, _ClusterDist, y, 5, threshold, dist_tstep=0.3) # test keeping dimension and time bins cdist = _ClusterDist(y, 5, threshold, dist_dim='sensor', dist_tstep=0.2) cdist.add_original(y.x[0]) assert_equal(cdist.dist.shape, (5, 4, 2)) # test keeping 2 dimensions and time bins cdist = _ClusterDist(y, 5, threshold, dist_dim=('sensor', 'dim2'), dist_tstep=0.2) cdist.add_original(y.x[0]) assert_equal(cdist.dist.shape, (5, 4, 2, 10))
def saturate(x, c=10): x_out = 2 / (1 + np.e**(-x.x / c)) - 1 return NDVar(x_out, x.dims, name=f'c={c}')
def test_ndvar(): "Test the NDVar class" ds = datasets.get_uts(utsnd=True) x = ds['utsnd'] # meaningful slicing assert_raises(KeyError, x.sub, sensor='5') assert_equal(x.sub(sensor='4'), x.x[:, 4]) assert_equal(x.sub(sensor=['4', '3', '2']), x.x[:, [4, 3, 2]]) assert_equal(x.sub(sensor=['4']), x.x[:, [4]]) assert_equal(x.sub(case=1, sensor='4'), x.x[1, 4]) # setup indices s_case = slice(10, 13) s_sensor = slice('2', '4') s_time = slice(0.1, 0.2) b_case = np.bincount([10, 11, 12], minlength=len(x)).astype(bool) b_sensor = np.array([False, False, True, True, False]) b_time = np.bincount(range(30, 40), minlength=len(x.time)).astype(bool) a_case = np.arange(10, 13) a_sensor = ['2', '3'] a_time = np.arange(0.1, 0.2, 0.01) # slicing with different index kinds tgt = x.x[s_case, 2:4, 30:40] eq_(tgt.shape, (3, 2, 10)) # single assert_equal(x.sub(case=s_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=b_time), tgt) # bool & slice assert_equal(x.sub(case=b_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=b_time), tgt) # bool & array assert_equal(x.sub(case=b_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=b_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=b_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=b_time), tgt) # slice & array assert_equal(x.sub(case=s_case, sensor=a_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=s_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=s_time), tgt) # all three assert_equal(x.sub(case=a_case, sensor=b_sensor, time=s_time), tgt) assert_equal(x.sub(case=a_case, sensor=s_sensor, time=b_time), tgt) assert_equal(x.sub(case=b_case, sensor=a_sensor, time=s_time), tgt) assert_equal(x.sub(case=b_case, sensor=s_sensor, time=a_time), tgt) assert_equal(x.sub(case=s_case, sensor=a_sensor, time=b_time), tgt) assert_equal(x.sub(case=s_case, sensor=b_sensor, time=a_time), tgt) # norm y = x / x.norm('sensor') assert_allclose(y.norm('sensor'), 1.) y = ds['uts'].mean('case').norm('time') assert_is_instance(y, float) # Var v_case = Var(b_case) assert_equal(x.sub(case=v_case, sensor=b_sensor, time=a_time), tgt) # univariate result assert_dataobj_equal(x.sub(sensor='2', time=0.1), Var(x.x[:, 2, 30], x.name)) eq_(x.sub(case=0, sensor='2', time=0.1), x.x[0, 2, 30]) # baseline correction x_bl = x - x.summary(time=(None, 0)) # assert that the baseline is 0 bl = x_bl.summary('case', 'sensor', time=(None, 0)) ok_(abs(bl) < 1e-10, "Baseline correction") # NDVar as index sens_mean = x.mean(('case', 'time')) idx = sens_mean > 0 pos = sens_mean[idx] assert_array_equal(pos.x > 0, True) # NDVar as index along one dimension x_tc = x.sub(sensor='1') x_time = NDVar(x_tc.time.times >= 0.3, dims=(x_tc.time,)) assert_dataobj_equal(x_tc[x_time], x_tc.sub(time=(0.3, None))) # out of range index assert_raises(ValueError, x.sub, time=(0.1, 0.81)) assert_raises(IndexError, x.sub, time=(-0.25, 0.1)) # iteration for i, xi in enumerate(x): assert_dataobj_equal(xi, x[i]) if i > 4: break
def gammatone_bank(wav: NDVar, f_min: float, f_max: float, n: int, integration_window: float = 0.010, tstep: float = None, location: str = 'right', pad: bool = True, name: str = None) -> NDVar: """Gammatone filterbank response Parameters ---------- wav : NDVar Sound input. f_min : scalar Lower frequency cutoff. f_max : scalar Upper frequency cutoff. n : int Number of filter channels. integration_window : scalar Integration time window in seconds (default 10 ms). tstep : scalar Time step size in the output (default is same as ``wav``). location : str Location of the output relative to the input time axis: - ``right``: gammatone sample at end of integration window (default) - ``left``: gammatone sample at beginning of integration window - ``center``: gammatone sample at center of integration window Since gammatone filter response depends on ``integration_window``, the filter response will be delayed relative to the analytic envlope. To ignore this delay, use `location='left'` pad : bool Pad output to match time axis of input. name : str NDVar name (default is ``wav.name``). Notes ----- Requires the ``fmax`` branch of the gammatone library to be installed: $ pip install https://github.com/christianbrodbeck/gammatone/archive/fmax.zip """ from gammatone.filters import centre_freqs from gammatone.gtgram import gtgram tmin = wav.time.tmin wav_ = wav if location == 'left': if pad: wav_ = _pad_func(wav, wav.time.tmin - integration_window) elif location == 'right': # tmin += window_time if pad: wav_ = _pad_func(wav, tstop=wav.time.tstop + integration_window) elif location == 'center': dt = integration_window / 2 # tmin += dt if pad: wav_ = _pad_func(wav, wav.time.tmin - dt, wav.time.tstop + dt) else: raise ValueError(f"mode={location!r}") sfreq = 1 / wav.time.tstep if tstep is None: tstep = wav.time.tstep x = gtgram(wav_.get_data('time'), sfreq, integration_window, tstep, n, f_min, f_max) freqs = centre_freqs(sfreq, n, f_min, f_max) # freqs = np.round(freqs, out=freqs).astype(int) freq_dim = Scalar('frequency', freqs[::-1], 'Hz') time_dim = UTS(tmin, tstep, x.shape[1]) return NDVar(x, (freq_dim, time_dim), name or wav.name)
def test_ndvar_indexing(): ds = datasets.get_uts(utsnd=True) x = ds['utsnd'] # case test_ndvar_index(x, 'case', 1, 1) test_ndvar_index(x, 'case', [0, 3], [0, 3]) test_ndvar_index(x, 'case', slice(0, 10, 2), slice(0, 10, 2)) # sensor test_ndvar_index(x, 'sensor', '0', 0) test_ndvar_index(x, 'sensor', ['0', '2'], [0, 2]) test_ndvar_index(x, 'sensor', slice('0', '2'), slice(0, 2)) test_ndvar_index(x, 'sensor', 0, 0, False) test_ndvar_index(x, 'sensor', [0, 2], [0, 2], False) test_ndvar_index(x, 'sensor', slice(0, 2), slice(0, 2), False) # time test_ndvar_index(x, 'time', 0, 20) test_ndvar_index(x, 'time', 0.1, 30) test_ndvar_index(x, 'time', 0.102, 30, False) test_ndvar_index(x, 'time', [0, 0.1, 0.2], [20, 30, 40]) test_ndvar_index(x, 'time', slice(0.1, None), slice(30, None)) test_ndvar_index(x, 'time', slice(0.2), slice(40)) test_ndvar_index(x, 'time', slice(0.202), slice(41), False) test_ndvar_index(x, 'time', slice(0.1, 0.2), slice(30, 40)) test_ndvar_index(x, 'time', slice(0.102, 0.2), slice(31, 40), False) test_ndvar_index(x, 'time', slice(0.1, None, 0.1), slice(30, None, 10)) test_ndvar_index(x, 'time', slice(0.1, None, 1), slice(30, None, 100)) # Ordered x = cwt_morlet(ds['uts'], [8, 10, 13, 17]) assert_raises(IndexError, x.__getitem__, (full_slice, 9)) assert_raises(IndexError, x.__getitem__, (full_slice, 6)) test_ndvar_index(x, 'frequency', 10, 1) test_ndvar_index(x, 'frequency', 10.1, 1, False) test_ndvar_index(x, 'frequency', 9.9, 1, False) test_ndvar_index(x, 'frequency', [8.1, 10.1], [0, 1], False) test_ndvar_index(x, 'frequency', slice(8, 13), slice(0, 2)) test_ndvar_index(x, 'frequency', slice(8, 13.1), slice(0, 3), False) test_ndvar_index(x, 'frequency', slice(8, 13.1, 2), slice(0, 3, 2), False) # Categorial x = NDVar(x.x, ('case', Categorial('cat', ['8', '10', '13', '17']), x.time)) assert_raises(TypeError, x.__getitem__, (full_slice, 9)) assert_raises(IndexError, x.__getitem__, (full_slice, '9')) test_ndvar_index(x, 'cat', '13', 2) test_ndvar_index(x, 'cat', ['8', '13'], [0, 2]) test_ndvar_index(x, 'cat', slice('8', '13'), slice(0, 2)) test_ndvar_index(x, 'cat', slice('8', None, 2), slice(0, None, 2)) # SourceSpace x = datasets.get_mne_stc(True) assert_raises(TypeError, x.__getitem__, slice('insula-rh')) assert_raises(TypeError, x.__getitem__, slice('insula-lh', 'insula-rh')) assert_raises(TypeError, x.__getitem__, ('insula-lh', 'insula-rh')) test_ndvar_index(x, 'source', 'L90', 90) test_ndvar_index(x, 'source', 'R90', 642 + 90) test_ndvar_index(x, 'source', ['L90', 'R90'], [90, 642 + 90]) test_ndvar_index(x, 'source', slice('L90', 'R90'), slice(90, 642 + 90)) test_ndvar_index(x, 'source', 90, 90, False) test_ndvar_index(x, 'source', [90, 95], [90, 95], False) test_ndvar_index(x, 'source', slice(90, 95), slice(90, 95), False) test_ndvar_index(x, 'source', 'insula-lh', x.source.parc == 'insula-lh', False) test_ndvar_index(x, 'source', ('insula-lh', 'insula-rh'), x.source.parc.isin(('insula-lh', 'insula-rh')), False) n_lh = x.source.parc.endswith('lh').sum() test_ndvar_index(x, 'source', 'lh', slice(n_lh), False) test_ndvar_index(x, 'source', 'rh', slice(n_lh, None), False) # argmax x.x[10, 10] = 20 eq_(x.argmax(), ('L10', 0.1)) eq_(x[('L10', 0.1)], 20) eq_(x.sub(source='L10').argmax(), 0.1) eq_(x.sub(time=0.1).argmax(), 'L10')
def gammatone_bank( wav: NDVar, f_min: float, f_max: float, n: int, integration_window: float = 0.010, tstep: float = None, location: str = 'right', pad: bool = True, name: str = None, ) -> NDVar: """Gammatone filterbank response Parameters ---------- wav : NDVar Sound input. f_min : scalar Lower frequency cutoff. f_max : scalar Upper frequency cutoff. n : int Number of filter channels. integration_window : scalar Integration time window in seconds (default 10 ms). tstep : scalar Time step size in the output (default is same as ``wav``). location : str Location of the output relative to the input time axis: - ``right``: gammatone sample at end of integration window (default) - ``left``: gammatone sample at beginning of integration window - ``center``: gammatone sample at center of integration window Since gammatone filter response depends on ``integration_window``, the filter response will be delayed relative to the analytic envlope. To ignore this delay, use `location='left'` pad : bool Pad output to match time axis of input. name : str NDVar name (default is ``wav.name``). Notes ----- Requires the ``fmax`` branch of the gammatone library to be installed: $ pip install https://github.com/christianbrodbeck/gammatone/archive/fmax.zip """ from gammatone.filters import centre_freqs, erb_filterbank from gammatone.gtgram import make_erb_filters wav_ = wav if location == 'left': if pad: wav_ = _pad_func(wav, wav.time.tmin - integration_window) elif location == 'right': # tmin += window_time if pad: wav_ = _pad_func(wav, tstop=wav.time.tstop + integration_window) elif location == 'center': dt = integration_window / 2 # tmin += dt if pad: wav_ = _pad_func(wav, wav.time.tmin - dt, wav.time.tstop + dt) else: raise ValueError(f"mode={location!r}") fs = 1 / wav.time.tstep if tstep is None: tstep = wav.time.tstep wave = wav_.get_data('time') # based on gammatone library, rewritten to reduce memory footprint cfs = centre_freqs(fs, n, f_min, f_max) integration_window_len = int(round(integration_window * fs)) output_n_samples = floor((len(wave) - integration_window_len) * wav.time.tstep / tstep) output_step = tstep / wav.time.tstep results = [] for i, cf in tqdm(enumerate(reversed(cfs)), "Gammatone spectrogram", total=len(cfs), unit='band'): fcoefs = np.flipud(make_erb_filters(fs, cf)) xf = erb_filterbank(wave, fcoefs) results.append(aggregate(xf[0], output_n_samples, output_step, integration_window_len)) result = np.sqrt(results) # package output freq_dim = Scalar('frequency', cfs[::-1], 'Hz') time_dim = UTS(wav.time.tmin, tstep, output_n_samples) if name is None: name = wav.name return NDVar(result, (freq_dim, time_dim), name)
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) time = UTS(-0.1, 0.1, 4) scalar = Scalar('scalar', range(10), 'unit') dims = ('case', time, sensor, scalar) np.random.seed(0) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3, )) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = find_peaks(x, cdist._connectivity) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) # testnd permutation result res = testnd.ttest_1samp(y, tfce=True, samples=3) assert_allclose(np.sort(res._cdist.dist), [77.5852307, 119.1976153, 217.6270428]) # parc with TFCE on unconnected dimension configure(False) x = np.random.normal(0, 1, (10, 5, 2, 4)) time = UTS(-0.1, 0.1, 5) categorial = Categorial('categorial', ('a', 'b')) y = NDVar(x, ('case', time, categorial, sensor)) y0 = NDVar(x[:, :, 0], ('case', time, sensor)) y1 = NDVar(x[:, :, 1], ('case', time, sensor)) res = testnd.ttest_1samp(y, tfce=True, samples=3) res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial') res0 = testnd.ttest_1samp(y0, tfce=True, samples=3) res1 = testnd.ttest_1samp(y1, tfce=True, samples=3) # cdist eq_(res._cdist.shape, (4, 2, 5)) # T-maps don't depend on connectivity assert_array_equal(res.t.x[:, 0], res0.t.x) assert_array_equal(res.t.x[:, 1], res1.t.x) assert_array_equal(res_parc.t.x[:, 0], res0.t.x) assert_array_equal(res_parc.t.x[:, 1], res1.t.x) # TFCE-maps should always be the same because they're unconnected assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x) # Probability-maps should depend on what is taken into account p_a = res0.compute_probability_map().x p_b = res1.compute_probability_map().x assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a) assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b) p_parc = res_parc.compute_probability_map() assert_array_equal(p_parc.x, res.compute_probability_map().x) ok_(np.all(p_parc.sub(categorial='a').x >= p_a)) ok_(np.all(p_parc.sub(categorial='b').x >= p_b)) configure(True)
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Ordered('dim2', list(range(6)), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 4), sensor, Ordered('dim2', list(range(10)), 'unit')) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3,)) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = cdist._find_peaks(x) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt)