def test_rate_arrays(): P = PoissonGroup(2, np.array([0, 1./defaultclock.dt])*Hz) spikes = SpikeMonitor(P) net = Network(P, spikes) net.run(2*defaultclock.dt) assert_equal(spikes.count, np.array([0, 2]))
def test_multinomial_elementwise_vector(self): '''Test creating multinomial variables (r=1).''' (m, n) = (20, 5) p = statutil.random_row_stochastic((m, n)) x = statutil.multinomial_elementwise(p) assert_equal(x, [3, 1, 3, 4, 0, 2, 2, 3, 1, 4, 2, 3, 2, 1, 4, 2, 4, 3, 4, 3], 'Wrong random multinomial generated')
def test_file(file_name, d=None): n = 50 for d in [d] if d else xrange(1, n + 5): fast = sum_from_left(read_digit_columns(open(file_name, 'rb'), n), d) exact = int(str(sum(int(x.rstrip('\n')) for x in open(file_name, 'rb')))[0:d]) # print d, fast, exact assert_equal(fast, exact, 'Approximate sum is wrong, file %s, d %d' % (file_name, d))
def test_spike_monitor(): language_before = brian_prefs.codegen.target for language in languages: brian_prefs.codegen.target = language defaultclock.t = 0*second G = NeuronGroup(2, '''dv/dt = rate : 1 rate: Hz''', threshold='v>1', reset='v=0') # We don't use 100 and 1000Hz, because then the membrane potential would # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point # issues this will not be exact, G.rate = [101, 1001] * Hz mon = SpikeMonitor(G) net = Network(G, mon) net.run(10*ms) assert_allclose(mon.t[mon.i == 0], [9.9]*ms) assert_allclose(mon.t[mon.i == 1], np.arange(10)*ms + 0.9*ms) assert_allclose(mon.t_[mon.i == 0], np.array([9.9*float(ms)])) assert_allclose(mon.t_[mon.i == 1], (np.arange(10) + 0.9)*float(ms)) assert_equal(mon.count, np.array([1, 10])) i, t = mon.it i_, t_ = mon.it_ assert_equal(i, mon.i) assert_equal(i, i_) assert_equal(t, mon.t) assert_equal(t_, mon.t_) brian_prefs.codegen.target = language_before
def test_changed_dt_spikes_in_queue(): for codeobj_class in codeobj_classes: defaultclock.dt = .5*ms G1 = NeuronGroup(1, 'v:1', threshold='v>1', reset='v=0', codeobj_class=codeobj_class) G1.v = 1.1 G2 = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0', codeobj_class=codeobj_class) S = Synapses(G1, G2, pre='v+=1.1', codeobj_class=codeobj_class) S.connect(True) S.delay = 'j*ms' mon = SpikeMonitor(G2) net = Network(G1, G2, S, mon) net.run(5*ms) defaultclock.dt = 1*ms net.run(3*ms) defaultclock.dt = 0.1*ms net.run(2*ms) # Spikes should have delays of 0, 1, 2, ... ms and always # trigger a spike one dt later expected = [0.5, 1.5, 2.5, 3.5, 4.5, # dt=0.5ms 6, 7, 8, #dt = 1ms 8.1, 9.1 #dt=0.1ms ] * ms assert_equal(mon.t, expected)
def test_LinearConstraint(): from numpy.testing.utils import assert_equal lc = LinearConstraint(["foo", "bar"], [1, 1]) assert lc.variable_names == ["foo", "bar"] assert_equal(lc.coefs, [[1, 1]]) assert_equal(lc.constants, [[0]]) lc = LinearConstraint(["foo", "bar"], [[1, 1], [2, 3]], [10, 20]) assert_equal(lc.coefs, [[1, 1], [2, 3]]) assert_equal(lc.constants, [[10], [20]]) assert lc.coefs.dtype == np.dtype(float) assert lc.constants.dtype == np.dtype(float) # statsmodels wants to be able to create degenerate constraints like this, # see: # https://github.com/pydata/patsy/issues/89 # We used to forbid it, but I guess it's harmless, so why not. lc = LinearConstraint(["a"], [[0]]) assert_equal(lc.coefs, [[0]]) from nose.tools import assert_raises assert_raises(ValueError, LinearConstraint, ["a"], [[1, 2]]) assert_raises(ValueError, LinearConstraint, ["a"], [[[1]]]) assert_raises(ValueError, LinearConstraint, ["a"], [[1, 2]], [3, 4]) assert_raises(ValueError, LinearConstraint, ["a", "b"], [[1, 2]], [3, 4]) assert_raises(ValueError, LinearConstraint, ["a"], [[1]], [[]]) assert_raises(ValueError, LinearConstraint, ["a", "b"], []) assert_raises(ValueError, LinearConstraint, ["a", "b"], np.zeros((0, 2))) assert_no_pickling(lc)
def test_timedarray_with_units(): ta = TimedArray(np.arange(10)*amp, dt=0.1*ms) G = NeuronGroup(1, 'value = ta(t) + 2*nA: amp', dt=0.1*ms) mon = StateMonitor(G, 'value', record=True, dt=0.1*ms) net = Network(G, mon) net.run(1.1*ms) assert_equal(mon[0].value, np.clip(np.arange(len(mon[0].t)), 0, 9)*amp + 2*nA)
def test_spikegenerator_rounding(): # all spikes should fall into the first time bin indices = np.arange(100) times = np.linspace(0, 0.1, 100, endpoint=False)*ms SG = SpikeGeneratorGroup(100, indices, times, dt=0.1*ms) mon = SpikeMonitor(SG) net = Network(SG, mon) net.run(0.1*ms) assert_equal(mon.count, np.ones(100)) # all spikes should fall in separate bins dt = 0.1*ms indices = np.zeros(10000) times = np.arange(10000)*dt SG = SpikeGeneratorGroup(1, indices, times, dt=dt) target = NeuronGroup(1, 'count : 1', threshold='True', reset='count=0') # set count to zero at every time step syn = Synapses(SG, target, on_pre='count+=1') syn.connect() mon = StateMonitor(target, 'count', record=0, when='end') net = Network(SG, target, syn, mon) # change the schedule so that resets are processed before synapses net.schedule = ['start', 'groups', 'thresholds', 'resets', 'synapses', 'end'] net.run(10000*dt) assert_equal(mon[0].count, np.ones(10000))
def test_no_reference_1(): ''' Using subgroups without keeping an explicit reference. Basic access. ''' G = NeuronGroup(10, 'v:1') G.v = np.arange(10) assert_equal(G[:5].v[:], G.v[:5])
def testDirectedIntersectionFromInside(self): # line origin is inside sphere/ellipsoid for ellipsoidLineIntersection, ellipsoidLineIntersects in zip(ellipsoidLineIntersectionFns, ellipsoidLineIntersectsFns): r = 2 origin = [1,0,0] direction = [[1,0,0]] intersection = [[2,0,0]] res = sphereLineIntersection(r, origin, direction, directed=False) assert_array_equal(res, intersection) res = sphereLineIntersection(r, origin, direction, directed=True) assert_array_equal(res, intersection) res = ellipsoidLineIntersection(r, r, origin, direction, directed=False) assert_array_equal(res, intersection) intersects = ellipsoidLineIntersects(r, r, origin, direction, directed=False) assert_equal(intersects, [True]) res = ellipsoidLineIntersection(r, r, origin, direction, directed=True) assert_array_equal(res, intersection) intersects = ellipsoidLineIntersects(r, r, origin, direction, directed=True) assert_equal(intersects, [True]) intersection2 = [[-2,0,0]] direction2 = [[-1,0,0]] res = sphereLineIntersection(r, origin, direction2, directed=False) assert_array_equal(res, intersection) res = sphereLineIntersection(r, origin, direction2, directed=True) assert_array_equal(res, intersection2)
def test_decompose_pentagon(self): # Make sure decompose returns correct thing for a pentagon. result = decompose_to_triangles(self.pentagon) benchmark = array([[[0, 0], [2, 0], [2, 1]], [[0, 0], [2, 1], [1, 2]], [[0, 0], [1, 2], [0, 2]]], dtype='float') assert_equal(result, benchmark)
def test_polygon1(): vert = [(2, 1), (3, 5), (6, 6), (3, 8), (0, 4), (2, 1)] pol = region.Polygon('1', vert) mask = np.zeros((9, 9), dtype=np.int) mask = pol.scan(mask) pol1 = polygon1() utils.assert_equal(mask, pol1)
def testSphereLineIntersection(self): sphereRadius = 2 lineOrigin = [0,3,0] lineDirection = [0,-1,0] point = sphereLineIntersection(sphereRadius, lineOrigin, lineDirection) assert_equal(point, [0,2,0])
def testSphereLineIntersectionArray(self): sphereRadius = 2 lineOrigin = [0,3,0] lineDirection = unitVectors([[0,-1,0],[-1,-1,0]]) points = sphereLineIntersection(sphereRadius, lineOrigin, lineDirection) assert_equal(points, [[0,2,0],[np.nan,np.nan,np.nan]])
def test_scalar_parameter_access(): for codeobj_class in codeobj_classes: G = NeuronGroup(10, '''dv/dt = freq : 1 freq : Hz (shared) number : 1 (shared) array : 1''', codeobj_class=codeobj_class) # Try setting a scalar variable G.freq = 100*Hz assert_equal(G.freq[:], 100*Hz) G.freq[:] = 200*Hz assert_equal(G.freq[:], 200*Hz) G.freq = 'freq - 50*Hz + number*Hz' assert_equal(G.freq[:], 150*Hz) G.freq[:] = '50*Hz' assert_equal(G.freq[:], 50*Hz) # Check the second method of accessing that works assert_equal(np.asanyarray(G.freq), 50*Hz) # Check error messages assert_raises(IndexError, lambda: G.freq[0]) assert_raises(IndexError, lambda: G.freq[1]) assert_raises(IndexError, lambda: G.freq[0:1]) assert_raises(IndexError, lambda: G.freq['i>5']) assert_raises(ValueError, lambda: G.freq.set_item(slice(None), [0, 1]*Hz)) assert_raises(IndexError, lambda: G.freq.set_item(0, 100*Hz)) assert_raises(IndexError, lambda: G.freq.set_item(1, 100*Hz)) assert_raises(IndexError, lambda: G.freq.set_item('i>5', 100*Hz))
def test_par_slicing(self): """ Test assigning to a parameter slice """ p1 = models.Polynomial1D(3, n_models=3) p1.c0[:2] = [10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_synapse_creation_generator_complex_ranges(): G1 = NeuronGroup(10, 'v:1', threshold='False') G2 = NeuronGroup(20, 'v:1', threshold='False') G1.v = 'i' G2.v = '10 + i' SG1 = G1[:5] SG2 = G2[10:] S = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S.connect(j='i+k for k in range(N_post-i)') # Connect to all j>i # connect based on pre-/postsynaptic state variables S2 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S2.connect(j='k for k in range(N_post * int(v_pre > 2))') # connect based on pre-/postsynaptic state variables S3 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S3.connect(j='k for k in range(N_post * int(v_pre > 22))') run(0*ms) # for standalone for syn_source in xrange(5): # Internally, the "real" neuron indices should be used assert_equal(S._synaptic_post[syn_source, :], 10 + syn_source + np.arange(10 - syn_source)) # For the user, the subgroup-relative indices should be presented assert_equal(S.j[syn_source, :], syn_source + np.arange(10-syn_source)) assert len(S2) == 2 * len(SG2), str(len(S2)) assert all(S2.v_pre[:] > 2) assert len(S3) == 7 * len(SG1), str(len(S3)) assert all(S3.v_pre[:] > 22)
def test_compute_ts_map(tmpdir): """Minimal test of compute_ts_map""" data = load_poisson_stats_image(extra_info=True) kernel = Gaussian2DKernel(2.5) data['exposure'] = np.ones(data['counts'].shape) * 1E12 for _, func in zip(['counts', 'background', 'exposure'], [np.nansum, np.nansum, np.mean]): data[_] = downsample_2N(data[_], 2, func) result = compute_ts_map(data['counts'], data['background'], data['exposure'], kernel) for name, order in zip(['ts', 'amplitude', 'niter'], [2, 5, 0]): result[name] = np.nan_to_num(result[name]) result[name] = upsample_2N(result[name], 2, order=order) assert_allclose(1705.840212274973, result.ts[99, 99], rtol=1e-3) assert_allclose([[99], [99]], np.where(result.ts == result.ts.max())) assert_allclose(6, result.niter[99, 99]) assert_allclose(1.0227934338735763e-09, result.amplitude[99, 99], rtol=1e-3) # test write method filename = str(tmpdir.join('ts_test.fits')) result.write(filename, header=data['header']) read_result = TSMapResult.read(filename) for _ in ['ts', 'sqrt_ts', 'amplitude', 'niter']: assert result[_].dtype == read_result[_].dtype assert_equal(result[_], read_result[_])
def read_pedigree_from_test_file(file_name, genotyped_id_file=None): '''Load a pedigree from a PLINK TFAM file.''' data = np.genfromtxt(file_name, np.dtype(int)) p = io_pedigree.read(file_name, genotyped_id_file=genotyped_id_file) assert_equal(p._graph.number_of_nodes(), data.shape[0], 'Incorrect number of nodes') assert nx.is_directed_acyclic_graph(p._graph), 'Pedigree is not a DAG' return p
def test_timedarray_no_upsampling(): # Test a TimedArray where no upsampling is necessary because the monitor's # dt is bigger than the TimedArray's ta = TimedArray(np.arange(10), dt=0.01*ms) G = NeuronGroup(1, 'value = ta(t): 1', dt=0.1*ms) mon = StateMonitor(G, 'value', record=True, dt=1*ms) run(2.1*ms) assert_equal(mon[0].value, [0, 9, 9])
def test_state_variables_group_as_index(): G = NeuronGroup(10, 'v : 1') SG = G[4:9] G.v[SG] = 1 assert_equal(G.v[:], np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0])) G.v = 1 G.v[SG] = '2*v' assert_equal(G.v[:], np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 1]))
def __test_segment_range(self, n, step, c): '''Test getting the start and end of equidistant intervals in an item collection.''' items = np.arange(0, n, c) segments = iu.segmentrange(items, step) assert_equal(segments[:, 0], range(0, n, c * step), 'Wrong start array') assert_equal(segments[:, 1], range(c * (step - 1), n, c * step) + ([n - c] if np.mod(n, step) else []), 'Wrong end array')
def test_poly1d_multiple_sets(self): p1 = models.Polynomial1D(3, n_models=3) utils.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) utils.assert_array_equal(p1.c0, [0, 0, 0]) p1.c0 = [10, 10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_propagation(): # Using a PoissonGroup as a source for Synapses should work as expected P = PoissonGroup(2, np.array([0, 1.0 / defaultclock.dt]) * Hz) G = NeuronGroup(2, "v:1") S = Synapses(P, G, pre="v+=1", connect="i==j") run(2 * defaultclock.dt) assert_equal(G.v[:], np.array([0.0, 2.0]))
def test_par_slicing(self): """ Test assigning to a parameter slice """ p1 = models.Poly1DModel(3, param_dim=3) p1.c0[:2] = [10, 10] utils.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_timedarray_with_units(): ta = TimedArray(np.arange(10)*amp, defaultclock.dt) for codeobj_class in codeobj_classes: G = NeuronGroup(1, 'value = ta(t) + 2*nA: amp', codeobj_class=codeobj_class) mon = StateMonitor(G, 'value', record=True) net = Network(G, mon) net.run(11*ms) assert_equal(mon[0].value, np.clip(np.arange(len(mon[0].t)), 0, 9)*amp + 2*nA)
def test_propagation(): # Using a PoissonGroup as a source for Synapses should work as expected P = PoissonGroup(2, np.array([0, 1./defaultclock.dt])*Hz) G = NeuronGroup(2, 'v:1') S = Synapses(P, G, pre='v+=1', connect='i==j') run(2*defaultclock.dt) assert_equal(G.v[:], np.array([0., 2.]))
def test_subexpression(): G = NeuronGroup(10, '''dv/dt = freq : 1 freq : Hz array : 1 expr = 2*freq + array*Hz : Hz''') G.freq = '10*i*Hz' G.array = 5 assert_equal(G.expr[:], 2*10*np.arange(10)*Hz + 5*Hz)
def test_non_linear_NYset(self): """ This case covers: N param sets , 1 set 1D x --> N 1D y data """ g1 = models.Gaussian1DModel([10, 10], [3, 3], [.2, .2]) y1 = g1(self.x1) utils.assert_equal((y1[:, 0] - y1[:, 1]).nonzero(), (np.array([]),))
def test_alpha(self): """ This test checks if RidgeCV finds the optimal `alpha`. """ self.var.fit(self.x) # Currently we simply *know* empirically that from the three # candidate alphas 100 is closest to the optimum. # TODO: programmatically derive the optimum from the data assert_equal(self.var.fitting_model.alpha_, 100)
def test_refractoriness_variables(): # Try a string evaluating to a quantity an an explicit boolean # condition -- all should do the same thing for ref_time in [ '5*ms', '(t-lastspike) <= 5*ms', 'time_since_spike <= 5*ms', 'ref_subexpression', '(t-lastspike) <= ref', 'ref', 'ref_no_unit*ms' ]: G = NeuronGroup(1, ''' dv/dt = 100*Hz : 1 (unless refractory) dw/dt = 100*Hz : 1 ref : second ref_no_unit : 1 time_since_spike = t - lastspike : second ref_subexpression = (t - lastspike) <= ref : boolean ''', threshold='v>1', reset='v=0;w=0', refractory=ref_time) G.ref = 5 * ms G.ref_no_unit = 5 # It should take 10ms to reach the threshold, then v should stay at 0 # for 5ms, while w continues to increase mon = StateMonitor(G, ['v', 'w'], record=True) net = Network(G, mon) net.run(20 * ms) try: # No difference before the spike assert_equal(mon[0].v[mon.t < 10 * ms], mon[0].w[mon.t < 10 * ms]) # v is not updated during refractoriness in_refractoriness = mon[0].v[(mon.t >= 10 * ms) & (mon.t < 15 * ms)] assert_equal(in_refractoriness, np.zeros_like(in_refractoriness)) # w should evolve as before assert_equal(mon[0].w[mon.t < 5 * ms], mon[0].w[(mon.t >= 10 * ms) & (mon.t < 15 * ms)]) assert np.all(mon[0].w[(mon.t >= 10 * ms) & (mon.t < 15 * ms)] > 0) # After refractoriness, v should increase again assert np.all(mon[0].v[(mon.t >= 15 * ms) & (mon.t < 20 * ms)] > 0) except AssertionError as ex: raise AssertionError( 'Assertion failed when using %r as refractory argument:\n%s' % (ref_time, ex))
def test_get_features_gamma(): model_spikes = [ [np.array([1, 5, 8]), np.array([2, 3, 8, 9])], # Correct rate [np.array([1, 5]), np.array([0, 2, 3, 8, 9])] ] # Wrong rate data_spikes = [np.array([0, 5, 9]), np.array([1, 3, 5, 6])] gf = GammaFactor(delta=0.5 * ms, time=10 * ms) features = gf.get_features(model_spikes, data_spikes, 0.1 * ms) assert_equal(np.shape(features), (2, 2)) assert (np.all(np.array(features) > -1)) normed_gf = GammaFactor(delta=0.5 * ms, time=10 * ms, normalization=1 / 2.) normed_features = normed_gf.get_features(model_spikes, data_spikes, 0.1 * ms) assert_equal(normed_features, 2 * features) features = gf.get_features([data_spikes] * 3, data_spikes, 0.1 * ms) assert_equal(np.shape(features), (3, 2)) assert_almost_equal(features, np.zeros((3, 2)))
def test_poissoninput(): # Test extreme cases and do a very basic test of an intermediate case, we # don't want tests to be stochastic G = NeuronGroup( 10, '''x : volt y : volt y2 : volt z : volt z2 : volt w : 1''') G.w = 0.5 never_update = PoissonInput(G, 'x', 100, 0 * Hz, weight=1 * volt) always_update = PoissonInput(G, 'y', 50, 1 / defaultclock.dt, weight=2 * volt) always_update2 = PoissonInput(G, 'y2', 50, 1 / defaultclock.dt, weight='1*volt + 1*volt') sometimes_update = PoissonInput(G, 'z', 10000, 50 * Hz, weight=0.5 * volt) sometimes_update2 = PoissonInput(G, 'z2', 10000, 50 * Hz, weight='w*volt') mon = StateMonitor(G, ['x', 'y', 'y2', 'z', 'z2'], record=True, when='end') run(1 * ms) assert_equal(0, mon.x[:]) assert_equal( np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y[:]) assert_equal( np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y2[:]) assert all(np.var(mon.z[:], axis=1) > 0) # variability over time assert all(np.var(mon.z[:], axis=0) > 0) # variability over neurons assert all(np.var(mon.z2[:], axis=1) > 0) # variability over time assert all(np.var(mon.z2[:], axis=0) > 0) # variability over neurons
def test_refractoriness_basic(): G = NeuronGroup(1, ''' dv/dt = 100*Hz : 1 (unless refractory) dw/dt = 100*Hz : 1 ''', threshold='v>1', reset='v=0;w=0', refractory=5 * ms) # It should take 10ms to reach the threshold, then v should stay at 0 # for 5ms, while w continues to increase mon = StateMonitor(G, ['v', 'w'], record=True, when='end') run(20 * ms) # No difference before the spike assert_equal(mon[0].v[mon.t < 10 * ms], mon[0].w[mon.t < 10 * ms]) # v is not updated during refractoriness in_refractoriness = mon[0].v[(mon.t >= 10 * ms) & (mon.t < 15 * ms)] assert_equal(in_refractoriness, np.zeros_like(in_refractoriness)) # w should evolve as before assert_equal(mon[0].w[mon.t < 5 * ms], mon[0].w[(mon.t >= 10 * ms) & (mon.t < 15 * ms)]) assert np.all(mon[0].w[(mon.t >= 10 * ms) & (mon.t < 15 * ms)] > 0) # After refractoriness, v should increase again assert np.all(mon[0].v[(mon.t >= 15 * ms) & (mon.t < 20 * ms)] > 0)
def test_unbalanced_sinkhorn_transport_class(): """test_sinkhorn_transport """ ns = 150 nt = 200 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) otda = ot.da.UnbalancedSinkhornTransport() # test its computed otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new, _ = make_data_classif('3gauss', ns + 1) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) Xt_new, _ = make_data_classif('3gauss2', nt + 1) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornTransport() otda_unsup.fit(Xs=Xs, Xt=Xt) n_unsup = np.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornTransport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = np.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert n_unsup != n_semisup, "semisupervised mode not working" # check everything runs well with log=True otda = ot.da.SinkhornTransport(log=True) otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert len(otda.log_.keys()) != 0
def _compare(synapses, expected): conn_matrix = np.zeros((len(synapses.source), len(synapses.target))) for i, j in zip(synapses.i[:], synapses.j[:]): conn_matrix[i, j] += 1 assert_equal(conn_matrix, expected)
def test_state_variable_indexing(): G1 = NeuronGroup(5, 'v:1') G2 = NeuronGroup(7, 'v:1') S = Synapses(G1, G2, 'w:1') S.connect(True, n=2) S.w[:, :, 0] = '5*i + j' S.w[:, :, 1] = '35 + 5*i + j' #Slicing assert len(S.w[:]) == len(S.w[:, :]) == len(S.w[:, :, :]) == len(G1)*len(G2)*2 assert len(S.w[0:]) == len(S.w[0:, 0:]) == len(S.w[0:, 0:, 0:]) == len(G1)*len(G2)*2 assert len(S.w[0]) == len(S.w[0, :]) == len(S.w[0, :, :]) == len(G2)*2 assert len(S.w[0:2]) == len(S.w[0:2, :]) == len(S.w[0:2, :, :]) == 2*len(G2)*2 assert len(S.w[:, 0]) == len(S.w[:, 0, :]) == len(G1)*2 assert len(S.w[:, 0:2]) == len(S.w[:, 0:2, :]) == 2*len(G1)*2 assert len(S.w[:, :2]) == len(S.w[:, :2, :]) == 2*len(G1)*2 assert len(S.w[:, :, 0]) == len(G1)*len(G2) assert len(S.w[:, :, 0:2]) == len(G1)*len(G2)*2 assert len(S.w[:, :, :2]) == len(G1)*len(G2)*2 #Array-indexing (not yet supported for synapse index) assert_equal(S.w[0:3], S.w[[0, 1, 2]]) assert_equal(S.w[0:3], S.w[[0, 1, 2], np.arange(len(G2))]) assert_equal(S.w[:, 0:3], S.w[:, [0, 1, 2]]) assert_equal(S.w[:, 0:3], S.w[np.arange(len(G1)), [0, 1, 2]]) #string-based indexing assert_equal(S.w[0:3], S.w['i<3']) assert_equal(S.w[:, 0:3], S.w['j<3']) assert_equal(S.w[:, :, 0], S.w['k==0']) #invalid indices assert_raises(IndexError, lambda: S.w.__getitem__((1, 2, 3, 4))) assert_raises(IndexError, lambda: S.w.__getitem__(object()))
def test_spike_monitor(): G = NeuronGroup(10, 'v:1', threshold='v>1', reset='v=0') G.v[0] = 1.1 G.v[2] = 1.1 G.v[5] = 1.1 SG = G[3:] SG2 = G[:3] s_mon = SpikeMonitor(G) sub_s_mon = SpikeMonitor(SG) sub_s_mon2 = SpikeMonitor(SG2) run(defaultclock.dt) assert_equal(s_mon.i, np.array([0, 2, 5])) assert_equal(s_mon.t_, np.zeros(3)) assert_equal(sub_s_mon.i, np.array([2])) assert_equal(sub_s_mon.t_, np.zeros(1)) assert_equal(sub_s_mon2.i, np.array([0, 2])) assert_equal(sub_s_mon2.t_, np.zeros(2)) expected = np.zeros(10, dtype=int) expected[[0, 2, 5]] = 1 assert_equal(s_mon.count, expected) expected = np.zeros(7, dtype=int) expected[[2]] = 1 assert_equal(sub_s_mon.count, expected) assert_equal(sub_s_mon2.count, np.array([1, 0, 1]))
def testHalfSizeParameter(): raw = rawpy.imread(rawTestPath) s = raw.sizes rgb = raw.postprocess(half_size=True) assert_equal(rgb.shape[0], s.height // 2) assert_equal(rgb.shape[1], s.width // 2)
def test_synapse_access(): G1 = NeuronGroup(10, 'v:1', threshold='False') G1.v = 'i' G2 = NeuronGroup(20, 'v:1', threshold='False') G2.v = 'i' SG1 = G1[:5] SG2 = G2[10:] S = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S.connect(True) S.w['j == 0'] = 5 assert all(S.w['j==0'] == 5) S.w[2, 2] = 7 assert all(S.w['i==2 and j==2'] == 7) S.w = '2*j' assert all(S.w[:, 1] == 2) assert len(S.w[:, 10]) == 0 assert len(S.w['j==10']) == 0 # Test referencing pre- and postsynaptic variables assert_equal(S.w[2:, :], S.w['v_pre >= 2']) assert_equal(S.w[:, :5], S.w['v_post < 15']) S.w = 'v_post' assert_equal(S.w[:], S.j[:] + 10) S.w = 'v_post + v_pre' assert_equal(S.w[:], S.j[:] + 10 + S.i[:]) # Test using subgroups as indices assert len(S) == len(S.w[SG1, SG2]) assert_equal(S.w[SG1, 1], S.w[:, 1]) assert_equal(S.w[1, SG2], S.w[1, :]) assert len(S.w[SG1, 10]) == 0
def test_poly2d(self): p2 = models.Polynomial2D(degree=3) p2.c0_0 = 5 utils.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_state_variable_access_strings(): for codeobj_class in codeobj_classes: G = NeuronGroup(10, 'v:volt', codeobj_class=codeobj_class) G.v = np.arange(10) * volt # Indexing with strings assert G.v['i==2'] == G.v[2] assert G.v_['i==2'] == G.v_[2] assert_equal(G.v['v >= 3*volt'], G.v[3:]) assert_equal(G.v_['v >= 3*volt'], G.v_[3:]) # Should also check for units assert_raises(DimensionMismatchError, lambda: G.v['v >= 3']) assert_raises(DimensionMismatchError, lambda: G.v['v >= 3*second']) # Setting with strings # -------------------- # String value referring to i G.v = '2*i*volt' assert_equal(G.v[:], 2*np.arange(10)*volt) # String value referring to i G.v[:5] = '3*i*volt' assert_equal(G.v[:], np.array([0, 3, 6, 9, 12, 10, 12, 14, 16, 18])*volt) G.v = np.arange(10) * volt # String value referring to a state variable G.v = '2*v' assert_equal(G.v[:], 2*np.arange(10)*volt) G.v[:5] = '2*v' assert_equal(G.v[:], np.array([0, 4, 8, 12, 16, 10, 12, 14, 16, 18])*volt) G.v = np.arange(10) * volt # String value referring to state variables, i, and an external variable ext = 5*volt G.v = 'v + ext + (N + i)*volt' assert_equal(G.v[:], 2*np.arange(10)*volt + 15*volt) G.v = np.arange(10) * volt G.v[:5] = 'v + ext + (N + i)*volt' assert_equal(G.v[:], np.array([15, 17, 19, 21, 23, 5, 6, 7, 8, 9])*volt) G.v = 'v + randn()*volt' # only check that it doesn't raise an error G.v[:5] = 'v + randn()*volt' # only check that it doesn't raise an error G.v = np.arange(10) * volt # String index using a random number G.v['rand() <= 1'] = 0*mV assert_equal(G.v[:], np.zeros(10)*volt) G.v = np.arange(10) * volt # String index referring to i and setting to a scalar value G.v['i>=5'] = 0*mV assert_equal(G.v[:], np.array([0, 1, 2, 3, 4, 0, 0, 0, 0, 0])*volt) # String index referring to a state variable G.v['v<3*volt'] = 0*mV assert_equal(G.v[:], np.array([0, 0, 0, 3, 4, 0, 0, 0, 0, 0])*volt) # String index referring to state variables, i, and an external variable ext = 2*volt G.v['v>=ext and i==(N-6)'] = 0*mV assert_equal(G.v[:], np.array([0, 0, 0, 3, 0, 0, 0, 0, 0, 0])*volt) G.v = np.arange(10) * volt # Strings for both condition and values G.v['i>=5'] = 'v*2' assert_equal(G.v[:], np.array([0, 1, 2, 3, 4, 10, 12, 14, 16, 18])*volt) G.v['v>=5*volt'] = 'i*volt' assert_equal(G.v[:], np.arange(10)*volt)
def test_subexpression_with_constant(): g = 2 G = NeuronGroup(1, '''I = 1*g : 1''') assert_equal(G.I[:], np.array([2]))
def test_state_variable_access(): for codeobj_class in codeobj_classes: G = NeuronGroup(10, 'v:volt', codeobj_class=codeobj_class) G.v = np.arange(10) * volt assert_equal(np.asarray(G.v[:]), np.arange(10)) assert have_same_dimensions(G.v[:], volt) assert_equal(np.asarray(G.v[:]), G.v_[:]) # Accessing single elements, slices and arrays assert G.v[5] == 5 * volt assert G.v_[5] == 5 assert_equal(G.v[:5], np.arange(5) * volt) assert_equal(G.v_[:5], np.arange(5)) assert_equal(G.v[[0, 5]], [0, 5] * volt) assert_equal(G.v_[[0, 5]], np.array([0, 5])) # Illegal indexing assert_raises(IndexError, lambda: G.v[0, 0]) assert_raises(IndexError, lambda: G.v_[0, 0]) assert_raises(TypeError, lambda: G.v[object()]) assert_raises(TypeError, lambda: G.v_[object()]) # A string representation should not raise any error assert len(str(G.v)) assert len(repr(G.v)) assert len(str(G.v_)) assert len(repr(G.v_))
def test_state_variables(): ''' Test the setting and accessing of state variables. ''' for codeobj_class in codeobj_classes: G = NeuronGroup(10, 'v : volt', codeobj_class=codeobj_class) # The variable N should be always present assert G.N == 10 # But it should be read-only assert_raises(TypeError, lambda: G.__setattr__('N', 20)) assert_raises(TypeError, lambda: G.__setattr__('N_', 20)) G.v = -70*mV assert_raises(DimensionMismatchError, lambda: G.__setattr__('v', -70)) G.v_ = float(-70*mV) assert_allclose(G.v[:], -70*mV) G.v = -70*mV + np.arange(10)*mV assert_allclose(G.v[:], -70*mV + np.arange(10)*mV) G.v = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] * volt assert_allclose(G.v[:], np.arange(10) * volt) # incorrect size assert_raises(ValueError, lambda: G.__setattr__('v', [0, 1]*volt)) assert_raises(ValueError, lambda: G.__setattr__('v', np.arange(11)*volt)) G.v = -70*mV # Numpy methods should be able to deal with state variables # (discarding units) assert_allclose(np.mean(G.v), float(-70*mV)) # Getting the content should return a Quantity object which then natively # supports numpy functions that access a method assert_allclose(np.mean(G.v[:]), -70*mV) # You should also be able to set variables with a string G.v = '-70*mV + i*mV' assert_allclose(G.v[0], -70*mV) assert_allclose(G.v[9], -61*mV) assert_allclose(G.v[:], -70*mV + np.arange(10)*mV) # And it should raise an unit error if the units are incorrect assert_raises(DimensionMismatchError, lambda: G.__setattr__('v', '70 + i')) assert_raises(DimensionMismatchError, lambda: G.__setattr__('v', '70 + i*mV')) # Calculating with state variables should work too # With units assert all(G.v - G.v == 0) assert all(G.v - G.v[:] == 0*mV) assert all(G.v[:] - G.v == 0*mV) assert all(G.v + 70*mV == G.v[:] + 70*mV) assert all(70*mV + G.v == G.v[:] + 70*mV) assert all(G.v + G.v == 2*G.v) assert all(G.v / 2.0 == 0.5*G.v) assert all(1.0 / G.v == 1.0 / G.v[:]) assert_equal((-G.v)[:], -G.v[:]) assert_equal((+G.v)[:], G.v[:]) #Without units assert all(G.v_ - G.v_ == 0) assert all(G.v_ - G.v_[:] == 0) assert all(G.v_[:] - G.v_ == 0) assert all(G.v_ + float(70*mV) == G.v_[:] + float(70*mV)) assert all(float(70*mV) + G.v_ == G.v_[:] + float(70*mV)) assert all(G.v_ + G.v_ == 2*G.v_) assert all(G.v_ / 2.0 == 0.5*G.v_) assert all(1.0 / G.v_ == 1.0 / G.v_[:]) assert_equal((-G.v)[:], -G.v[:]) assert_equal((+G.v)[:], G.v[:]) # And in-place modification should work as well G.v += 10*mV G.v -= 10*mV G.v *= 2 G.v /= 2.0 # with unit checking assert_raises(DimensionMismatchError, lambda: G.v.__iadd__(3*second)) assert_raises(DimensionMismatchError, lambda: G.v.__iadd__(3)) assert_raises(DimensionMismatchError, lambda: G.v.__imul__(3*second)) # in-place modification with strings should not work assert_raises(TypeError, lambda: G.v.__iadd__('string')) assert_raises(TypeError, lambda: G.v.__imul__('string')) assert_raises(TypeError, lambda: G.v.__idiv__('string')) assert_raises(TypeError, lambda: G.v.__isub__('string'))
def compare_faster(): variables = collections.OrderedDict() variables['alpha'] = dict(min=-180, max=180, description="angle", resolution=5, units='deg', units_display='deg') variables['r'] = dict(min=3, max=5, description="distance", resolution=0.1, units='m', units_display='cm') # this will fail if precision is float32 gh = GridHelper(variables, precision='float64') val_fast = gh.create_new() val_fast.fill(0) val_slow = gh.create_new() val_slow.fill(0) od = dtu.get_output_dir_for_test() F = 1 alpha0 = 7 # r0 = 4 r0 = 4.1 w0 = 1. value = dict(alpha=alpha0, r=r0) gh.add_vote(val_slow, value, w0, F) assert_equal(np.sum(val_slow > 0), 9) values = np.zeros((2, 1)) values[0, 0] = alpha0 values[1, 0] = r0 weights = np.zeros(1) weights[0] = w0 gh.add_vote_faster(val_fast, values, weights, F) assert_equal(np.sum(val_fast > 0), 9) d = grid_helper_plot(gh, val_slow) fn = os.path.join(od, 'compare_faster_slow.jpg') dtu.write_data_to_file(d.get_png(), fn) d = grid_helper_plot(gh, val_fast) fn = os.path.join(od, 'compare_faster_fast.jpg') dtu.write_data_to_file(d.get_png(), fn) D = val_fast - val_slow diff = np.max(np.abs(D)) print('diff: %r' % diff) if diff > 1e-8: print(dtu.indent(array_as_string_sign(val_fast), 'val_fast ')) print(dtu.indent(array_as_string_sign(val_slow), 'val_slow ')) print(dtu.indent(array_as_string_sign(D), 'Diff ')) print('non zero val_fast: %s' % val_fast[val_fast > 0]) print('non zero val_slow: %s' % val_slow[val_slow > 0]) assert_almost_equal(val_fast, val_slow)
def test_synapse_creation_generator_multiple_synapses(): G1 = NeuronGroup(10, 'v:1', threshold='False') G2 = NeuronGroup(20, 'v:1', threshold='False') G1.v = 'i' G2.v = '10 + i' SG1 = G1[:5] SG2 = G2[10:] S1 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S1.connect(j='k for k in range(N_post)', n='i') S2 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S2.connect(j='k for k in range(N_post)', n='j') S3 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S3.connect(j='k for k in range(N_post)', n='i') S4 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S4.connect(j='k for k in range(N_post)', n='j') S5 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S5.connect(j='k for k in range(N_post)', n='i+j') S6 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S6.connect(j='k for k in range(N_post)', n='i+j') S7 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S7.connect(j='k for k in range(N_post)', n='int(v_pre>2)*2') S8 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S8.connect(j='k for k in range(N_post)', n='int(v_post>2)*2') S9 = Synapses(SG1, SG2, 'w:1', on_pre='v+=w') S9.connect(j='k for k in range(N_post)', n='int(v_post>22)*2') S10 = Synapses(SG2, SG1, 'w:1', on_pre='v+=w') S10.connect(j='k for k in range(N_post)', n='int(v_pre>22)*2') run(0*ms) # for standalone # straightforward loop instead of doing something clever... for source in xrange(len(SG1)): assert_equal(S1.j[source, :], np.arange(len(SG2)).repeat(source)) assert_equal(S2.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2)))) assert_equal(S3.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2)))) assert_equal(S4.i[:, source], np.arange(len(SG2)).repeat(source)) assert_equal(S5.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2))+source)) assert_equal(S6.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2)) + source)) if source > 2: assert_equal(S7.j[source, :], np.arange(len(SG2)).repeat(2)) assert_equal(S8.i[:, source], np.arange(len(SG2)).repeat(2)) else: assert len(S7.j[source, :]) == 0 assert len(S8.i[:, source]) == 0 assert_equal(S9.j[source, :], np.arange(3, len(SG2)).repeat(2)) assert_equal(S10.i[:, source], np.arange(3, len(SG2)).repeat(2))
def test_subexpression_references(): ''' Assure that subexpressions in targeted groups are handled correctly. ''' G = NeuronGroup(10, '''v : 1 v2 = 2*v : 1''') G.v = np.arange(10) SG1 = G[:5] SG2 = G[5:] S1 = Synapses(SG1, SG2, '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S1.connect('i==(5-1-j)') assert_equal(S1.i[:], np.arange(5)) assert_equal(S1.j[:], np.arange(5)[::-1]) assert_equal(S1.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S1.x[:], np.arange(5)*2+1) S2 = Synapses(G, SG2, '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S2.connect('i==(5-1-j)') assert_equal(S2.i[:], np.arange(5)) assert_equal(S2.j[:], np.arange(5)[::-1]) assert_equal(S2.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S2.x[:], np.arange(5)*2+1) S3 = Synapses(SG1, G, '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S3.connect('i==(10-1-j)') assert_equal(S3.i[:], np.arange(5)) assert_equal(S3.j[:], np.arange(10)[:-6:-1]) assert_equal(S3.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S3.x[:], np.arange(5)*2+1)
def test_conditional_gaussian_dependency_matrix(self): length = 100 n_samples = 1000 X = array([sample_markov_chain(length) for _ in range(n_samples)]) # Next two should be equal s0 = AnomalyDetector( P_ConditionalGaussianDependencyMatrix( range(length), length)).fit(X).anomaly_score(X) ad1 = AnomalyDetector( P_ConditionalGaussianCombiner([ P_ConditionalGaussian([i + 1], [i]) for i in range(length - 1) ] + [P_ConditionalGaussian([0], [])]), cr_plus).fit(X) s1 = ad1.anomaly_score(X) assert_allclose(s0, s1, rtol=0.0001) # OK # Most likely, these two are not equal but highly correlated ad2 = AnomalyDetector( [P_ConditionalGaussian([i], []) for i in range(length)], cr_plus).fit(X) s2 = ad2.anomaly_score(X) ad3 = AnomalyDetector( P_ConditionalGaussianCombiner( [P_ConditionalGaussian([i], []) for i in range(length)]), cr_plus).fit(X) s3 = ad3.anomaly_score(X) assert_equal(pearsonr(s2, s3) > 0.985, True) # Test classification Y = array([sample_markov_chain(length, 0.2) for _ in range(n_samples)]) Z = array([sample_markov_chain(length, 0.3) for _ in range(n_samples)]) data = r_[X, Y, Z] labels = r_[['X'] * len(X), ['Y'] * len(Y), ['Z'] * len(Z)] data_index = shuffle(range(len(data))) training_set = data_index[:n_samples * 2] test_set = data_index[n_samples * 2:] models = { 'independent gaussian': AnomalyDetector([P_Gaussian([i]) for i in range(length)], cr_plus), 'independent conditional gaussian': AnomalyDetector( [P_ConditionalGaussian([i], []) for i in range(length)], cr_plus), 'independent conditional gaussian with combiner': AnomalyDetector( P_ConditionalGaussianCombiner( [P_ConditionalGaussian([i], []) for i in range(length)])), 'single conditional gaussian with combiner': AnomalyDetector( P_ConditionalGaussianCombiner([ P_ConditionalGaussian([i], [i - 1]) for i in range(1, length) ] + [P_ConditionalGaussian([0], [])])), 'dependency matrix': AnomalyDetector( P_ConditionalGaussianDependencyMatrix(range(length), length)) } all_acc = {} for key in models: ad = models[key].fit(data[training_set], labels[training_set]) adclf = SklearnClassifier.clf(ad) labels_predicted = adclf.predict(data[test_set]) accuracy = sum(labels[test_set] == labels_predicted) / float( len(test_set)) all_acc[key] = accuracy print key, "accuracy = ", accuracy assert_close(all_acc['independent gaussian'], all_acc['independent conditional gaussian'], decimal=2) assert_close(all_acc['independent gaussian'], all_acc['independent conditional gaussian with combiner'], decimal=2) assert_close(all_acc['single conditional gaussian with combiner'], all_acc['dependency matrix'], decimal=2)
def test_rate_arrays(): P = PoissonGroup(2, np.array([0, 1. / defaultclock.dt]) * Hz) spikes = SpikeMonitor(P) run(2 * defaultclock.dt) assert_equal(spikes.count, np.array([0, 2]))
def test_synapses_access_subgroups(): G1 = NeuronGroup(5, 'x:1') G2 = NeuronGroup(10, 'y:1') SG1 = G1[2:5] SG2 = G2[4:9] S = Synapses(G1, G2, 'w:1') S.connect() S.w[SG1, SG2] = 1 assert_equal(S.w['(i>=2 and i<5) and (j>=4 and j<9)'], 1) assert_equal(S.w['not ((i>=2 and i<5) and (j>=4 and j<9))'], 0) S.w = 0 S.w[SG1, :] = 1 assert_equal(S.w['i>=2 and i<5'], 1) assert_equal(S.w['not (i>=2 and i<5)'], 0) S.w = 0 S.w[:, SG2] = 1 assert_equal(S.w['j>=4 and j<9'], 1) assert_equal(S.w['not (j>=4 and j<9)'], 0)
def test_sinkhorn_l1l2_transport_class(): """test_sinkhorn_transport """ ns = 150 nt = 200 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) otda = ot.da.SinkhornL1l2Transport() # test its computed otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert hasattr(otda, "cost_") assert hasattr(otda, "coupling_") assert hasattr(otda, "log_") # test dimensions of coupling assert_equal(otda.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) Xs_new, _ = make_data_classif('3gauss', ns + 1) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # test inverse transform transp_Xt = otda.inverse_transform(Xt=Xt) assert_equal(transp_Xt.shape, Xt.shape) Xt_new, _ = make_data_classif('3gauss2', nt + 1) transp_Xt_new = otda.inverse_transform(Xt=Xt_new) # check that the oos method is working assert_equal(transp_Xt_new.shape, Xt_new.shape) # test fit_transform transp_Xs = otda.fit_transform(Xs=Xs, ys=ys, Xt=Xt) assert_equal(transp_Xs.shape, Xs.shape) # test unsupervised vs semi-supervised mode otda_unsup = ot.da.SinkhornL1l2Transport() otda_unsup.fit(Xs=Xs, ys=ys, Xt=Xt) n_unsup = np.sum(otda_unsup.cost_) otda_semi = ot.da.SinkhornL1l2Transport() otda_semi.fit(Xs=Xs, ys=ys, Xt=Xt, yt=yt) assert_equal(otda_semi.cost_.shape, ((Xs.shape[0], Xt.shape[0]))) n_semisup = np.sum(otda_semi.cost_) # check that the cost matrix norms are indeed different assert n_unsup != n_semisup, "semisupervised mode not working" # check that the coupling forbids mass transport between labeled source # and labeled target samples mass_semi = np.sum( otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max]) mass_semi = otda_semi.coupling_[otda_semi.cost_ == otda_semi.limit_max] assert_allclose(mass_semi, np.zeros_like(mass_semi), rtol=1e-9, atol=1e-9) # check everything runs well with log=True otda = ot.da.SinkhornL1l2Transport(log=True) otda.fit(Xs=Xs, ys=ys, Xt=Xt) assert len(otda.log_.keys()) != 0
def test_subexpression_no_references(): ''' Assure that subexpressions are handled correctly, even when the subgroups are created on-the-fly. ''' G = NeuronGroup(10, '''v : 1 v2 = 2*v : 1''') G.v = np.arange(10) assert_equal(G[5:].v2, np.arange(5, 10)*2) S1 = Synapses(G[:5], G[5:], '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S1.connect('i==(5-1-j)') assert_equal(S1.i[:], np.arange(5)) assert_equal(S1.j[:], np.arange(5)[::-1]) assert_equal(S1.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S1.x[:], np.arange(5)*2+1) S2 = Synapses(G, G[5:], '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S2.connect('i==(5-1-j)') assert_equal(S2.i[:], np.arange(5)) assert_equal(S2.j[:], np.arange(5)[::-1]) assert_equal(S2.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S2.x[:], np.arange(5)*2+1) S3 = Synapses(G[:5], G, '''w : 1 u = v2_post + 1 : 1 x = v2_pre + 1 : 1''') S3.connect('i==(10-1-j)') assert_equal(S3.i[:], np.arange(5)) assert_equal(S3.j[:], np.arange(10)[:-6:-1]) assert_equal(S3.u[:], np.arange(10)[:-6:-1]*2+1) assert_equal(S3.x[:], np.arange(5)*2+1)
def assert_equal(actual, desired, err_msg='', verbose=True): """ Alternative naming for assertArrayEqual. """ return nptu.assert_equal(actual, desired, err_msg=err_msg, verbose=verbose)
def test_alternative_indexing(): G = NeuronGroup(10, 'v : integer') G.v = 'i' assert_equal(G[-3:].v, np.array([7, 8, 9])) assert_equal(G[3].v, np.array([3])) assert_equal(G[[3, 4, 5]].v, np.array([3, 4, 5]))
def test_mapping_transport_class(): """test_mapping_transport """ ns = 60 nt = 120 Xs, ys = make_data_classif('3gauss', ns) Xt, yt = make_data_classif('3gauss2', nt) Xs_new, _ = make_data_classif('3gauss', ns + 1) ########################################################################## # kernel == linear mapping tests ########################################################################## # check computation and dimensions if bias == False otda = ot.da.MappingTransport(kernel="linear", bias=False) otda.fit(Xs=Xs, Xt=Xt) assert hasattr(otda, "coupling_") assert hasattr(otda, "mapping_") assert hasattr(otda, "log_") assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.mapping_.shape, ((Xs.shape[1], Xt.shape[1]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # check computation and dimensions if bias == True otda = ot.da.MappingTransport(kernel="linear", bias=True) otda.fit(Xs=Xs, Xt=Xt) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.mapping_.shape, ((Xs.shape[1] + 1, Xt.shape[1]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) ########################################################################## # kernel == gaussian mapping tests ########################################################################## # check computation and dimensions if bias == False otda = ot.da.MappingTransport(kernel="gaussian", bias=False) otda.fit(Xs=Xs, Xt=Xt) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.mapping_.shape, ((Xs.shape[0], Xt.shape[1]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # check computation and dimensions if bias == True otda = ot.da.MappingTransport(kernel="gaussian", bias=True) otda.fit(Xs=Xs, Xt=Xt) assert_equal(otda.coupling_.shape, ((Xs.shape[0], Xt.shape[0]))) assert_equal(otda.mapping_.shape, ((Xs.shape[0] + 1, Xt.shape[1]))) # test margin constraints mu_s = unif(ns) mu_t = unif(nt) assert_allclose( np.sum(otda.coupling_, axis=0), mu_t, rtol=1e-3, atol=1e-3) assert_allclose( np.sum(otda.coupling_, axis=1), mu_s, rtol=1e-3, atol=1e-3) # test transform transp_Xs = otda.transform(Xs=Xs) assert_equal(transp_Xs.shape, Xs.shape) transp_Xs_new = otda.transform(Xs_new) # check that the oos method is working assert_equal(transp_Xs_new.shape, Xs_new.shape) # check everything runs well with log=True otda = ot.da.MappingTransport(kernel="gaussian", log=True) otda.fit(Xs=Xs, Xt=Xt) assert len(otda.log_.keys()) != 0
def testPolynomial1D(self): d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14} p1 = models.Polynomial1D(3, **d) utils.assert_equal(p1.parameters, [11, 12, 13, 14])
def testarith(self): adder = 3 array_adder = array([1, 2, 3]) # add b = self.mix + adder self.assertIsInstance(b, OrderedDPMixture, 'integer addition return wrong type') assert_equal(b.mus[0], self.mix.mus[0] + adder, 'integer addition returned wrong value') c = self.mix + array_adder self.assertIsInstance(c, OrderedDPMixture, 'array addition return wrong type') assert_array_equal(c.mus[0], self.mix.mus[0] + array_adder, 'array addition returned wrong value') # radd b = adder + self.mix self.assertIsInstance(b, OrderedDPMixture, 'integer addition return wrong type') assert_array_equal(b.mus[0], adder + self.mix.mus[0], 'integer addition returned wrong value') c = array_adder + self.mix self.assertIsInstance(c, OrderedDPMixture, 'array addition return wrong type') assert_array_equal(c.mus[0], array_adder + self.mix.mus[0], 'array addition returned wrong value') # sub b = self.mix - adder self.assertIsInstance(b, OrderedDPMixture, 'integer subtraction return wrong type') assert_array_equal(b.mus[0], self.mix.mus[0] - adder, 'integer subtraction returned wrong value') c = self.mix - array_adder self.assertIsInstance(c, OrderedDPMixture, 'array subtraction return wrong type') assert_array_equal(c.mus[0], self.mix.mus[0] - array_adder, 'array subtraction returned wrong value') # rsub b = adder - self.mix self.assertIsInstance(b, OrderedDPMixture, 'integer subtraction return wrong type') assert_array_equal(b.mus[0], adder - self.mix.mus[0], 'integer subtraction returned wrong value') c = array_adder - self.mix self.assertIsInstance(c, OrderedDPMixture, 'array subtraction return wrong type') assert_array_equal(c.mus[0], array_adder - self.mix.mus[0], 'array subtraction returned wrong value') # mul b = self.mix * adder self.assertIsInstance(b, OrderedDPMixture, 'integer multiplication return wrong type') assert_array_equal(b.mus[0], self.mix.mus[0] * adder, 'integer multiplication returned wrong value') c = self.mix * array_adder self.assertIsInstance(c, OrderedDPMixture, 'array multiplicaton return wrong type') assert_array_equal(c.mus[0], dot(self.mix.mus[0], array_adder), 'array multiplication returned wrong value') # rmul b = adder * self.mix self.assertIsInstance(b, OrderedDPMixture, 'integer multiplication return wrong type') assert_array_equal(b.mus[0], adder * self.mix.mus[0], 'integer multiplication returned wrong value') c = array_adder * self.mix self.assertIsInstance(c, OrderedDPMixture, 'array multiplication return wrong type') assert_array_equal(c.mus[0], dot(array_adder, self.mix.mus[0]), 'array multiplication returned wrong value')
def test_timedarray_no_units(): ta = TimedArray(np.arange(10), dt=0.1 * ms) G = NeuronGroup(1, 'value = ta(t) + 1: 1', dt=0.1 * ms) mon = StateMonitor(G, 'value', record=True, dt=0.1 * ms) run(1.1 * ms) assert_equal(mon[0].value_, np.clip(np.arange(len(mon[0].t)), 0, 9) + 1)
def testgetitem(self): assert_equal(self.mu1, self.mix[0].mu, 'getitem failed') self.mix[0] = self.clust2 assert_equal(self.mu2, self.mix[0].mu, 'getitem failed') self.mix[0] = self.clust1