def test_returns_nan_if_one_spike_train_is_empty(self): empty = create_empty_spike_train() non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s) k = sigproc.GaussianKernel() with warnings.catch_warnings(): warnings.simplefilter('ignore') actual = stm.schreiber_similarity((empty, non_empty), k) self.assertTrue(sp.isnan(actual[0, 0])) self.assertTrue(sp.isnan(actual[0, 1])) self.assertTrue(sp.isnan(actual[1, 0]))
def test_returns_correct_spike_train_schreiber_similarity(self): a = neo.SpikeTrain( sp.array([1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=1.4 * pq.s) b = neo.SpikeTrain( sp.array([0.5, 1.5]) * pq.s, t_stop=2.0 * pq.s) c = neo.SpikeTrain( sp.array([2.0, 1.0]) * pq.s, t_start=0.6 * pq.s, t_stop=2.4 * pq.s) k = sigproc.GaussianKernel(sp.sqrt(2.0) * pq.s) expected = sp.array([ [1.0, 0.9961114, 0.9430803], [0.9961114, 1.0, 0.9523332], [0.9430803, 0.9523332, 1.0]]) actual = stm.schreiber_similarity((a, b, c), k) assert_array_almost_equal(expected, actual)
tau = 5.0 * pq.ms sampling_rate = 1000 * pq.Hz metrics = { "cs": ( r"$D_{\mathrm{CS}}$", lambda trains: stm.cs_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate), ), "es": (r"$S_{\mathrm{ES}}$", lambda trains: stm.event_synchronization(trains, tau, sort=False)), "hm": (r"$S_{\mathrm{HM}}$", lambda trains: stm.hunter_milton_similarity(trains, tau)), "norm": ( r"$D_{\mathrm{ND}}$", lambda trains: stm.norm_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate), ), "ss": (r"$S_{S}}$", lambda trains: stm.schreiber_similarity(trains, sigproc.LaplacianKernel(tau), sort=False)), "vr": (r"$D_{\mathrm{R}}$", lambda trains: stm.van_rossum_dist(trains, tau, sort=False)), "vp": (r"$D_{\mathrm{V}}$", lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)), } def print_available_metrics(): for key in metrics: print "%s (%s)" % (key, metrics[key][0]) class BenchmarkData(object): def __init__(self, spike_count_range, train_count_range, firing_rate=50 * pq.Hz): self.spike_count_range = spike_count_range self.train_count_range = train_count_range self.num_trains_per_spike_count = sp.amax(train_count_range)
def calc_similarity(self, trains): k = sigproc.GaussianKernel() return stm.schreiber_similarity(trains, k)
return {0: trains[:half], 1: trains[half:2 * half]} metrics = { 'cs': ('Cauchy-Schwarz distance', lambda trains: stm.cs_dist( trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)), 'es': ('event synchronization', lambda trains: stm.event_synchronization(trains, tau, sort=False)), 'hm': ('Hunter-Milton similarity measure', lambda trains: stm.hunter_milton_similarity(trains, tau)), 'norm': ('norm distance', lambda trains: stm.norm_dist( trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)), 'ss': ('Schreiber et al. similarity measure', lambda trains: stm.schreiber_similarity( trains, sigproc.GaussianKernel(tau), sort=False)), 'vr': ('van Rossum distance', lambda trains: stm.van_rossum_dist(trains, tau, sort=False)), 'vp': ('Victor-Purpura\'s distance', lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)), 'vr_mu': ('van Rossum multi-unit distance', lambda trains: stm.van_rossum_multiunit_dist( trains_as_multiunits(trains), 0.5, tau)), 'vp_mu': ('Victor-Purpura\'s multi-unit distance', lambda trains: stm.victor_purpura_multiunit_dist( trains_as_multiunits(trains), 0.3, 2.0 / tau))} def print_available_metrics(): for key in metrics: print "%s (%s)" % (key, metrics[key][0])