def test_cast_to_ndvar(): "Test table.cast_to_ndvar()" long_ds = datasets.get_uv() long_ds['scalar'] = long_ds['A'] == 'a2' long_ds['time'] = long_ds.eval('A%B').as_var({ ('a1', 'b1'): 0., ('a1', 'b2'): 0.1, ('a2', 'b1'): 0.2, ('a2', 'b2'): 0.3, }) # categorial ds = table.cast_to_ndvar('fltvar', 'A', 'B%rm', ds=long_ds, name='new') assert ds.n_cases == long_ds.n_cases / 2 assert ds['new'].A == Categorial('A', ('a1', 'a2')) # scalar ds2 = table.cast_to_ndvar('fltvar', 'scalar', 'B%rm', ds=long_ds, dim='newdim', name='new') assert ds2.n_cases == long_ds.n_cases / 2 assert ds2['new'].newdim == Scalar('newdim', [False, True]) assert_array_equal(ds['new'].x, ds2['new'].x) # time ds = table.cast_to_ndvar('fltvar', 'time', 'rm', ds=long_ds, dim='uts', name='y') assert ds.n_cases == long_ds.n_cases / 4 assert ds['y'].time == UTS(0, 0.1, 4)
def test_ncrf(): meg = load('meg').sub(time=(0, 5)) stim = load('stim').sub(time=(0, 5)) fwd = load('fwd_sol') emptyroom = load('emptyroom') # 1 stimulus model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10) # check residual assert model.residual == pytest.approx(172.714, 0.001) # check scaling stim_baseline = stim.mean() assert model._stim_baseline[0] == stim_baseline assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean() assert model.h.norm('time').norm('source').norm('space') == pytest.approx(6.043e-10, rel=0.001) # test persistence model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL)) assert_dataobj_equal(model_2.h, model.h) assert_dataobj_equal(model_2.h_scaled, model.h_scaled) assert model_2.residual == model.residual # 2 stimuli, one of them 2-d, normalize='l2' diff = stim.diff('time') stim2 = concatenate([diff.clip(0), diff.clip(max=0)], Categorial('rep', ['on', 'off'])) model = fit_ncrf(meg, [stim, stim2], fwd, emptyroom, tstop=0.2, normalize='l2', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10) # check scaling assert model._stim_baseline[0] == stim.mean() assert model._stim_scaling[0] == stim.std() assert model.h[0].norm('time').norm('source').norm('space') == pytest.approx(4.732e-10, 0.001) # cross-validation model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu='auto', n_iter=1, n_iterc=2, n_iterf=2, n_workers=1) assert model.mu == pytest.approx(0.0203, 0.001) model.cv_info()
def test_clusterdist(): "Test _ClusterDist class" shape = (10, 6, 6, 4) locs = [[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]] x = np.random.normal(0, 1, shape) sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) dims = ('case', UTS(-0.1, 0.1, 6), Scalar('dim2', range(6), 'unit'), sensor) y = NDVar(x, dims) # test connecting sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, :2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) print(repr(cdist)) cdist.add_original(pmap) print(repr(cdist)) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) assert_equal(cdist.parameter_map.dims, y.dims[1:]) # test connecting many sensors logging.info("TEST: connecting sensors") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 0, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 1) assert_array_equal(cdist._original_cluster_map == cdist._cids[0], cdist._crop(bin_map).swapaxes(0, cdist._nad_ax)) # test keeping sensors separate logging.info("TEST: keeping sensors separate") bin_map = np.zeros(shape[1:], dtype=np.bool8) bin_map[:3, :3, 0] = True bin_map[:3, :3, 2] = True pmap = np.random.normal(0, 1, shape[1:]) np.clip(pmap, -1, 1, pmap) pmap[bin_map] = 2 cdist = _ClusterDist(y, 1, 1.5) cdist.add_original(pmap) assert_equal(cdist.n_clusters, 2) # criteria ds = datasets.get_uts(True) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_less(res.clusters['duration'].min(), 0.01) eq_(res.clusters['n_sensors'].min(), 1) res = testnd.ttest_rel('utsnd', 'A', match='rm', ds=ds, samples=0, pmin=0.05, mintime=0.02, minsensor=2) assert_greater_equal(res.clusters['duration'].min(), 0.02) eq_(res.clusters['n_sensors'].min(), 2) # 1d res1d = testnd.ttest_rel('utsnd.sub(time=0.1)', 'A', match='rm', ds=ds, samples=0, pmin=0.05) assert_dataobj_equal(res1d.p_uncorrected, res.p_uncorrected.sub(time=0.1)) # TFCE logging.info("TEST: TFCE") sensor = Sensor(locs, ['0', '1', '2', '3']) sensor.set_connectivity(connect_dist=1.1) time = UTS(-0.1, 0.1, 4) scalar = Scalar('scalar', range(10), 'unit') dims = ('case', time, sensor, scalar) np.random.seed(0) y = NDVar(np.random.normal(0, 1, (10, 4, 4, 10)), dims) cdist = _ClusterDist(y, 3, None) cdist.add_original(y.x[0]) cdist.finalize() assert_equal(cdist.dist.shape, (3, )) # I/O string = pickle.dumps(cdist, pickle.HIGHEST_PROTOCOL) cdist_ = pickle.loads(string) assert_equal(repr(cdist_), repr(cdist)) # find peaks x = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5, 7, 0, 0, 0, 0, 0, 0, 0, 0], [0, 6, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 5, 5, 0, 0], [0, 0, 0, 0, 5, 4, 4, 4, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 4, 0, 0], [0, 0, 0, 0, 7, 0, 0, 3, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]) tgt = np.equal(x, 7) peaks = find_peaks(x, cdist._connectivity) logging.debug(' detected: \n%s' % (peaks.astype(int))) logging.debug(' target: \n%s' % (tgt.astype(int))) assert_array_equal(peaks, tgt) # testnd permutation result res = testnd.ttest_1samp(y, tfce=True, samples=3) assert_allclose(np.sort(res._cdist.dist), [77.5852307, 119.1976153, 217.6270428]) # parc with TFCE on unconnected dimension configure(False) x = np.random.normal(0, 1, (10, 5, 2, 4)) time = UTS(-0.1, 0.1, 5) categorial = Categorial('categorial', ('a', 'b')) y = NDVar(x, ('case', time, categorial, sensor)) y0 = NDVar(x[:, :, 0], ('case', time, sensor)) y1 = NDVar(x[:, :, 1], ('case', time, sensor)) res = testnd.ttest_1samp(y, tfce=True, samples=3) res_parc = testnd.ttest_1samp(y, tfce=True, samples=3, parc='categorial') res0 = testnd.ttest_1samp(y0, tfce=True, samples=3) res1 = testnd.ttest_1samp(y1, tfce=True, samples=3) # cdist eq_(res._cdist.shape, (4, 2, 5)) # T-maps don't depend on connectivity assert_array_equal(res.t.x[:, 0], res0.t.x) assert_array_equal(res.t.x[:, 1], res1.t.x) assert_array_equal(res_parc.t.x[:, 0], res0.t.x) assert_array_equal(res_parc.t.x[:, 1], res1.t.x) # TFCE-maps should always be the same because they're unconnected assert_array_equal(res.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res.tfce_map.x[:, 1], res1.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 0], res0.tfce_map.x) assert_array_equal(res_parc.tfce_map.x[:, 1], res1.tfce_map.x) # Probability-maps should depend on what is taken into account p_a = res0.compute_probability_map().x p_b = res1.compute_probability_map().x assert_array_equal(res_parc.compute_probability_map(categorial='a').x, p_a) assert_array_equal(res_parc.compute_probability_map(categorial='b').x, p_b) p_parc = res_parc.compute_probability_map() assert_array_equal(p_parc.x, res.compute_probability_map().x) ok_(np.all(p_parc.sub(categorial='a').x >= p_a)) ok_(np.all(p_parc.sub(categorial='b').x >= p_b)) configure(True)
def test_ncrf(): meg = load('meg').sub(time=(0, 5)) stim = load('stim').sub(time=(0, 5)) fwd = load('fwd_sol') emptyroom = load('emptyroom') # 1 stimulus model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10, do_post_normalization=False) # check residual and explained var assert model.explained_var == pytest.approx(0.00641890144769941, rel=0.001) assert model.voxelwise_explained_variance.sum() == pytest.approx( 0.08261162457414245, rel=0.001) assert model.residual == pytest.approx(178.512, 0.001) # check scaling stim_baseline = stim.mean() assert model._stim_baseline[0] == stim_baseline assert model._stim_scaling[0] == (stim - stim_baseline).abs().mean() assert model.h.norm('time').norm('source').norm('space') == pytest.approx( 6.601677e-10, rel=0.001) # test persistence model_2 = pickle.loads(pickle.dumps(model, pickle.HIGHEST_PROTOCOL)) assert_dataobj_equal(model_2.h, model.h) assert_dataobj_equal(model_2.h_scaled, model.h_scaled) assert model_2.residual == model.residual assert model_2.gaussian_fwhm == model.gaussian_fwhm # test gaussian fwhm model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu=0.0019444, n_iter=1, n_iterc=1, n_iterf=1, gaussian_fwhm=50.0) assert model.gaussian_fwhm == 50.0 # 2 stimuli, one of them 2-d, normalize='l2' diff = stim.diff('time') stim2 = concatenate([diff.clip(0), diff.clip(max=0)], Categorial('rep', ['on', 'off'])) model = fit_ncrf(meg, [stim, stim2], fwd, emptyroom, tstop=[0.2, 0.2], normalize='l2', mu=0.0019444, n_iter=3, n_iterc=3, n_iterf=10, do_post_normalization=False) # check scaling assert model._stim_baseline[0] == stim.mean() assert model._stim_scaling[0] == stim.std() assert model.h[0].norm('time').norm('source').norm( 'space') == pytest.approx(7.0088e-10, rel=0.001) # cross-validation model = fit_ncrf(meg, stim, fwd, emptyroom, tstop=0.2, normalize='l1', mu='auto', n_iter=1, n_iterc=2, n_iterf=2, n_workers=1, do_post_normalization=False) assert model.mu == pytest.approx(0.0203, 0.001) model.cv_info()
def _ds_to_ndvar(self, ds: Dataset, uts: UTS, code: Code): if self.columns: column_key, mask_key = code.nuts_columns if column_key is None: column_key = 'value' ds[:, column_key] = 1 else: column_key = 'value' mask_key = 'mask' if 'mask' in ds else None if mask_key: mask = ds[mask_key].x assert mask.dtype.kind == 'b', "'mask' must be boolean" else: mask = None if code.shuffle_index: shuffle_mask = ds[code.shuffle_index].x if shuffle_mask.dtype.kind != 'b': raise code.error("shuffle index must be boolean", -1) elif code.shuffle == 'permute' and mask is not None: assert not numpy.any(shuffle_mask[~mask]) elif code.shuffle == 'permute': shuffle_mask = mask else: shuffle_mask = None if code.shuffle == 'remask': if mask is None: raise code.error("$remask for predictor without mask", -1) rng = code._get_rng() if shuffle_mask is None: rng.shuffle(mask) else: remask = mask[shuffle_mask] rng.shuffle(remask) mask[shuffle_mask] = remask code.register_shuffle(index=True) if mask is not None: ds[column_key] *= mask if code.shuffle == 'permute': rng = code._get_rng() if shuffle_mask is None: rng.shuffle(ds[column_key].x) else: values = ds[column_key].x[shuffle_mask] rng.shuffle(values) ds[column_key].x[shuffle_mask] = values code.register_shuffle(index=True) # prepare output NDVar if code.nuts_method == 'is': dim = Categorial('representation', ('step', 'impulse')) x = NDVar(numpy.zeros((2, len(uts))), (dim, uts), name=code.key) x_step, x_impulse = x else: x = NDVar(numpy.zeros(len(uts)), uts, name=code.key) if code.nuts_method == 'step': x_step, x_impulse = x, None elif not code.nuts_method: x_step, x_impulse = None, x else: raise code.error(f"NUTS-method={code.nuts_method!r}") # fill in values ds = ds[ds['time'] < uts.tstop] if x_impulse is not None: for t, v in ds.zip('time', column_key): x_impulse[t] = v if x_step is not None: t_stops = ds[1:, 'time'] if ds[-1, column_key] != 0: if 'tstop' not in ds.info: raise code.error( "For step representation, the predictor datasets needs to contain ds.info['tstop'] to determine the end of the last step", -1) t_stops = chain(t_stops, [ds.info['tstop']]) for t0, t1, v in zip(ds['time'], t_stops, ds[column_key]): x_step[t0:t1] = v return x