def test_allows_to_set_constant_tau(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([1.0, 5.7]) * pq.s, t_stop=10.0 * pq.s) tau = 0.5 * pq.s expected = sp.array( [[1.0, 0.40824829046386307], [0.40824829046386307, 1.0]]) actual = stm.event_synchronization([a, b], tau) assert_array_almost_equal(expected, actual)
def test_allows_use_of_different_kernel(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([1.0, 5.7]) * pq.s, t_stop=10.0 * pq.s) tau = 1.0 * pq.s kernel = sigproc.LaplacianKernel(1.0, normalize=False) expected = sp.array( [[1.0, 0.70480122722318095], [0.70480122722318095, 1.0]]) actual = stm.event_synchronization([a, b], tau, kernel=kernel) assert_array_almost_equal(expected, actual)
def test_returns_correct_event_synchronization(self): a = neo.SpikeTrain(sp.array([1.0, 2.5, 6.5]) * pq.s, t_stop=7.0 * pq.s) b = neo.SpikeTrain(sp.array([5.7, 1.0]) * pq.s, t_stop=10.0 * pq.s) c = neo.SpikeTrain(sp.array([2.0, 2.1, 5.0]) * pq.s, t_stop=10.0 * pq.s) expected = sp.array( [[1.0, 0.81649658092772615, 0.0], [0.81649658092772615, 1.0, 0.4082482904638631], [0.0, 0.4082482904638631, 1.0]]) actual = stm.event_synchronization([a, b, c]) assert_array_almost_equal(expected, actual)
* 'hm': :func:`Hunter-Milton similarity measure <.spike_train_metrics.hunter_milton_similarity>` subtracted from 1 * 'vp': :func:`Victor Purpura's distance <.spike_train_metrics.victor_purpura_dist>` with :math:`q = 2/\\tau` * 'vr': :func:`Van Rossum distance <.spike_train_metrics.van_rossum_dist>` """ import quantities as pq import scipy as sp import sklearn.base import spykeutils.spike_train_metrics as stm metric_defs = { 'es': ("Event synchronization", lambda trains, tau: 1.0 - stm.event_synchronization(trains, tau)), 'hm': ("Hunter-Milton similarity measure", lambda trains, tau: 1.0 - stm.hunter_milton_similarity(trains, tau)), 'vp': ("Victor-Purpura\'s distance", lambda trains, tau: stm.victor_purpura_dist(trains, 2.0 / tau)), 'vr': ("Van Rossum distance", lambda trains, tau: stm.van_rossum_dist(trains, tau)) } class PrecomputedSpikeTrainMetricApplier( sklearn.base.BaseEstimator, sklearn.base.TransformerMixin): """ Precomputes a spike train metric on spike trains and retrieves the corresponding Gram matrix (the matrix of all pairwise distances) for requested sets of the initial data.
def calc_similarity(self, trains): return stm.event_synchronization(trains)
import spykeutils.spike_train_generation as stg import spykeutils.spike_train_metrics as stm import sys import timeit tau = 5.0 * pq.ms sampling_rate = 1000 * pq.Hz metrics = { "cs": ( r"$D_{\mathrm{CS}}$", lambda trains: stm.cs_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate), ), "es": (r"$S_{\mathrm{ES}}$", lambda trains: stm.event_synchronization(trains, tau, sort=False)), "hm": (r"$S_{\mathrm{HM}}$", lambda trains: stm.hunter_milton_similarity(trains, tau)), "norm": ( r"$D_{\mathrm{ND}}$", lambda trains: stm.norm_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate), ), "ss": (r"$S_{S}}$", lambda trains: stm.schreiber_similarity(trains, sigproc.LaplacianKernel(tau), sort=False)), "vr": (r"$D_{\mathrm{R}}$", lambda trains: stm.van_rossum_dist(trains, tau, sort=False)), "vp": (r"$D_{\mathrm{V}}$", lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)), } def print_available_metrics(): for key in metrics: print "%s (%s)" % (key, metrics[key][0])
tau = 5.0 * pq.ms sampling_rate = 1000 * pq.Hz def trains_as_multiunits(trains): half = len(trains) // 2 return {0: trains[:half], 1: trains[half:2 * half]} metrics = { 'cs': ('Cauchy-Schwarz distance', lambda trains: stm.cs_dist( trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)), 'es': ('event synchronization', lambda trains: stm.event_synchronization(trains, tau, sort=False)), 'hm': ('Hunter-Milton similarity measure', lambda trains: stm.hunter_milton_similarity(trains, tau)), 'norm': ('norm distance', lambda trains: stm.norm_dist( trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)), 'ss': ('Schreiber et al. similarity measure', lambda trains: stm.schreiber_similarity( trains, sigproc.GaussianKernel(tau), sort=False)), 'vr': ('van Rossum distance', lambda trains: stm.van_rossum_dist(trains, tau, sort=False)), 'vp': ('Victor-Purpura\'s distance', lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)), 'vr_mu': ('van Rossum multi-unit distance', lambda trains: stm.van_rossum_multiunit_dist( trains_as_multiunits(trains), 0.5, tau)),
for j in xrange(i, len(trains)): d[i, j] = d[j, i] = sp.sum( sp.absolute(binned[0][i] - binned[0][j]) ** exponent) return d def normalized_vp_dist(trains, tau): num_spikes = sp.atleast_2d(sp.asarray([st.size for st in trains])) normalization = num_spikes + num_spikes.T normalization[normalization == 0.0] = 1.0 return sp.asfarray( stm.victor_purpura_dist(trains, 2.0 / tau, sort=False)) / normalization metrics = { 'D_V': lambda trains, tau: stm.victor_purpura_dist( trains, 2.0 / tau, sort=False), 'D_V^*': normalized_vp_dist, 'D_R': lambda trains, tau: stm.van_rossum_dist( trains, tau, sort=False) ** 2 / 2.0, 'D_{R*}': lambda trains, tau: stm.van_rossum_dist( trains, kernel=sigproc.TriangularKernel( sp.sqrt(3) * tau, normalize=False), sort=False), 'D_B': binning_distance, 'D_{B*}': lambda trains, tau: binning_distance(trains, tau, 1), 'D_{ES}': lambda trains, tau: stm.event_synchronization( trains, tau, sort=False), 'D_{HM}': lambda trains, tau: 1.0 - stm.hunter_milton_similarity(trains, tau) }