コード例 #1
0
 def test_does_not_fail_with_kernel_not_allowing_spike_trains_as_argument(
         self):
     # Compare <https://neuralensemble.org/trac/neo/ticket/65>
     a = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s)
     b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s)
     k = sigproc.TriangularKernel(1.0 * pq.s, normalize=False)
     stm.van_rossum_dist((a, b), kernel=k)
コード例 #2
0
 def test_allows_tau_equal_to_infinity(self):
     a = neo.SpikeTrain(sp.array([1.0, 1.9, 2.0]) * pq.s, t_stop=3.0 * pq.s)
     b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s)
     tau = sp.inf * pq.s
     expected = sp.array([
         [0.0, 4.0],
         [4.0, 0.0]])
     actual = stm.van_rossum_dist((a, b), tau)
     assert_array_almost_equal(expected, actual)
コード例 #3
0
 def test_allows_use_of_different_kernel(self):
     a = neo.SpikeTrain(sp.array([1.0, 2.0]) * pq.s, t_stop=3.0 * pq.s)
     b = neo.SpikeTrain(sp.array([1.5]) * pq.s, t_stop=2.0 * pq.s)
     k = sigproc.GaussianKernel(1.0 * pq.s, normalize=False)
     expected = sp.array([
         [0.0, 0.8264827],
         [0.8264827, 0.0]])
     actual = stm.van_rossum_dist((a, b), kernel=k)
     assert_array_almost_equal(expected, actual)
コード例 #4
0
 def test_return_correct_distance(self):
     a = neo.SpikeTrain(
         sp.array([1.0, 4.0, 5.0, 6.0, 9.0, 11.0]) * pq.s,
         t_stop=12.0 * pq.s)
     b = neo.SpikeTrain(
         sp.array([2.0, 4.0, 7.0, 10.0]) * pq.s,  t_stop=12.0 * pq.s)
     c = neo.SpikeTrain(sp.array([4.0, 3.0]) * pq.s, t_stop=12.0 * pq.s)
     tau = 3.0 * pq.s
     expected = sp.array([
         [0.0, 1.895846644204, 2.878796160479],
         [1.895846644204, 0.0, 1.760192079676],
         [2.878796160479, 1.760192079676, 0.0]])
     actual = stm.van_rossum_dist((a, b, c), tau)
     assert_array_almost_equal(expected, actual)
コード例 #5
0
import quantities as pq
import scipy as sp
import sklearn.base
import spykeutils.spike_train_metrics as stm


metric_defs = {
    'es': ("Event synchronization",
           lambda trains, tau: 1.0 - stm.event_synchronization(trains, tau)),
    'hm': ("Hunter-Milton similarity measure",
           lambda trains, tau: 1.0 - stm.hunter_milton_similarity(trains, tau)),
    'vp': ("Victor-Purpura\'s distance",
           lambda trains, tau: stm.victor_purpura_dist(trains, 2.0 / tau)),
    'vr': ("Van Rossum distance",
           lambda trains, tau: stm.van_rossum_dist(trains, tau))
}


class PrecomputedSpikeTrainMetricApplier(
        sklearn.base.BaseEstimator, sklearn.base.TransformerMixin):
    """ Precomputes a spike train metric on spike trains and retrieves the
    corresponding Gram matrix (the matrix of all pairwise distances) for
    requested sets of the initial data.

    The spike trains will be passed only once to this class when constructing
    it. At this point the 1D array attribute :attr:`x_in` will be created
    indexing the spike trains. That attribute or a slice of it has then to be
    used is all further method calls requiring input data like :meth:`fit` and
    :meth:`transform`.
コード例 #6
0
 def test_distance_of_empty_spiketrain_and_single_spike_equals_one(self):
     a = neo.SpikeTrain(sp.array([]) * pq.s, t_stop=2.0 * pq.s)
     b = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s)
     expected = sp.array([[0.0, 1.0], [1.0, 0.0]])
     actual = stm.van_rossum_dist((a, b), 3.0 * pq.s)
     assert_array_almost_equal(expected, actual)
コード例 #7
0
 def calc_metric(self, trains):
     return stm.van_rossum_dist(trains)
コード例 #8
0
sampling_rate = 1000 * pq.Hz


metrics = {
    "cs": (
        r"$D_{\mathrm{CS}}$",
        lambda trains: stm.cs_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate),
    ),
    "es": (r"$S_{\mathrm{ES}}$", lambda trains: stm.event_synchronization(trains, tau, sort=False)),
    "hm": (r"$S_{\mathrm{HM}}$", lambda trains: stm.hunter_milton_similarity(trains, tau)),
    "norm": (
        r"$D_{\mathrm{ND}}$",
        lambda trains: stm.norm_dist(trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate),
    ),
    "ss": (r"$S_{S}}$", lambda trains: stm.schreiber_similarity(trains, sigproc.LaplacianKernel(tau), sort=False)),
    "vr": (r"$D_{\mathrm{R}}$", lambda trains: stm.van_rossum_dist(trains, tau, sort=False)),
    "vp": (r"$D_{\mathrm{V}}$", lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)),
}


def print_available_metrics():
    for key in metrics:
        print "%s  (%s)" % (key, metrics[key][0])


class BenchmarkData(object):
    def __init__(self, spike_count_range, train_count_range, firing_rate=50 * pq.Hz):
        self.spike_count_range = spike_count_range
        self.train_count_range = train_count_range
        self.num_trains_per_spike_count = sp.amax(train_count_range)
        self.trains = [
コード例 #9
0
metrics = {
    'cs': ('Cauchy-Schwarz distance',
           lambda trains: stm.cs_dist(
               trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)),
    'es': ('event synchronization',
           lambda trains: stm.event_synchronization(trains, tau, sort=False)),
    'hm': ('Hunter-Milton similarity measure',
           lambda trains: stm.hunter_milton_similarity(trains, tau)),
    'norm': ('norm distance',
             lambda trains: stm.norm_dist(
                 trains, sigproc.CausalDecayingExpKernel(tau), sampling_rate)),
    'ss': ('Schreiber et al. similarity measure',
           lambda trains: stm.schreiber_similarity(
               trains, sigproc.GaussianKernel(tau), sort=False)),
    'vr': ('van Rossum distance',
           lambda trains: stm.van_rossum_dist(trains, tau, sort=False)),
    'vp': ('Victor-Purpura\'s distance',
           lambda trains: stm.victor_purpura_dist(trains, 2.0 / tau)),
    'vr_mu': ('van Rossum multi-unit distance',
              lambda trains: stm.van_rossum_multiunit_dist(
                  trains_as_multiunits(trains), 0.5, tau)),
    'vp_mu': ('Victor-Purpura\'s multi-unit distance',
              lambda trains: stm.victor_purpura_multiunit_dist(
                  trains_as_multiunits(trains), 0.3, 2.0 / tau))}


def print_available_metrics():
    for key in metrics:
        print "%s  (%s)" % (key, metrics[key][0])

コード例 #10
0
        for j in xrange(i, len(trains)):
            d[i, j] = d[j, i] = sp.sum(
                sp.absolute(binned[0][i] - binned[0][j]) ** exponent)
    return d


def normalized_vp_dist(trains, tau):
    num_spikes = sp.atleast_2d(sp.asarray([st.size for st in trains]))
    normalization = num_spikes + num_spikes.T
    normalization[normalization == 0.0] = 1.0
    return sp.asfarray(
        stm.victor_purpura_dist(trains, 2.0 / tau, sort=False)) / normalization


metrics = {
    'D_V': lambda trains, tau: stm.victor_purpura_dist(
        trains, 2.0 / tau, sort=False),
    'D_V^*': normalized_vp_dist,
    'D_R': lambda trains, tau: stm.van_rossum_dist(
        trains, tau, sort=False) ** 2 / 2.0,
    'D_{R*}': lambda trains, tau: stm.van_rossum_dist(
        trains, kernel=sigproc.TriangularKernel(
            sp.sqrt(3) * tau, normalize=False),
        sort=False),
    'D_B': binning_distance,
    'D_{B*}': lambda trains, tau: binning_distance(trains, tau, 1),
    'D_{ES}': lambda trains, tau: stm.event_synchronization(
        trains, tau, sort=False),
    'D_{HM}': lambda trains, tau: 1.0 - stm.hunter_milton_similarity(trains, tau)
}