def test3(self):
        btg = BTG(
            gencode_path='src/tests/gencode_example.csv', extend=None)

        bin_names = np.array([
            'chr1:0-100',  # 0 -> 0
            'chr2:201-300',  # 5 -> 1
            'chr2:301-400',  # 6 -> 2
            'chr1:101-200',  # 1 -> 3
            'chr3:0-100',  # 7 -> 4
            'chr3:101-200',  # 8 -> 5
            'chr2:0-100',  # 3 -> 6
            'chr2:101-200',  # 4 -> 7
            'chr3:201-300',  # 9 -> 8
            'chr3:301-400',  # 10 -> 9
            'chr1:201-300',  # 2 -> 10
            'chr3:401-500',  # 11 -> 11
            'chr3:501-600'  # 12 -> 12
        ])

        x = np.random.random((1000, len(bin_names)))

        counts, ids = btg.convert(x, bin_names, prefix='chr')
        ae(ids, ["gene" + str(i) for i in range(1, 8)])

        ground_counts = np.zeros((1000, 7))
        ground_counts[:, 0] += x[:, 0]
        ground_counts[:, 1] += x[:, 3]
        ground_counts[:, 2] += x[:, 7] + x[:, 1]
        ground_counts[:, 3] += x[:, 6] + x[:, 7] + x[:, 1]
        ground_counts[:, 4] += x[:, 5] + x[:, 8]
        ground_counts[:, 5] += x[:, 8] + x[:, 9] + x[:, 11] + x[:, 12]
        ground_counts[:, 6] += x[:, 8] + x[:, 9] + x[:, 11] + x[:, 12]

        aae(ground_counts, counts)
    def test8(self):
        btg = BTG(
            gencode_path='src/tests/gencode_example.csv',
            extend=None, n_jobs=4)

        bin_names = np.array([
            'chr1:0-100',  # 0
            'chr3:501-600',  # 1
            'chr2:201-300',  # 2
            'chr2:301-400',  # 3
            'chr1:101-200',  # 4
            'chr2:601-700',  # 5
            'chr3:700-900'  # 6
        ])

        x = np.random.random((1000, len(bin_names)))

        counts, ids = btg.convert(x, bin_names, prefix='chr')
        ae(ids, ["gene1", "gene2", "gene3", "gene4", "gene6", "gene7"])

        ground_counts = np.zeros((1000, 6))
        ground_counts[:, 0] += x[:, 0]
        ground_counts[:, 1] += x[:, 4]
        ground_counts[:, 2] += x[:, 2]
        ground_counts[:, 3] += x[:, 2]
        ground_counts[:, 4] += x[:, 1]
        ground_counts[:, 5] += x[:, 1] + x[:, 6]

        aae(ground_counts, counts)
Beispiel #3
0
def test_h5_copy():
    with TemporaryDirectory() as tempdir:
        # Create the test HDF5 file in the temporary directory.
        filename = _create_test_file(tempdir)

        with open_h5(filename, 'a') as f:

            # Test dataset copy.
            assert f.exists('ds1')
            arr = f.read('ds1')[:]
            assert len(arr) == 10
            f.copy('ds1', 'ds1_new')
            assert f.exists('ds1')
            assert f.exists('ds1_new')
            arr_new = f.read('ds1_new')[:]
            assert len(arr_new) == 10
            ae(arr, arr_new)

            # Test group copy.
            assert f.exists('mygroup/ds2')
            arr = f.read('mygroup/ds2')
            f.copy('mygroup', 'g/mynewgroup')
            assert f.exists('mygroup')
            assert f.exists('g/mynewgroup')
            assert f.exists('g/mynewgroup/ds2')
            arr_new = f.read('g/mynewgroup/ds2')
            ae(arr, arr_new)
Beispiel #4
0
def test_h5_copy(tempdir):
    # Create the test HDF5 file in the temporary directory.
    filename = _create_test_file(tempdir)

    with open_h5(filename, "a") as f:

        # Test dataset copy.
        assert f.exists("ds1")
        arr = f.read("ds1")[:]
        assert len(arr) == 10
        f.copy("ds1", "ds1_new")
        assert f.exists("ds1")
        assert f.exists("ds1_new")
        arr_new = f.read("ds1_new")[:]
        assert len(arr_new) == 10
        ae(arr, arr_new)

        # Test group copy.
        assert f.exists("mygroup/ds2")
        arr = f.read("mygroup/ds2")
        f.copy("mygroup", "g/mynewgroup")
        assert f.exists("mygroup")
        assert f.exists("g/mynewgroup")
        assert f.exists("g/mynewgroup/ds2")
        arr_new = f.read("g/mynewgroup/ds2")
        ae(arr, arr_new)
Beispiel #5
0
 def _check_to_generate(cs, clusters):
     item = cs.items['my item']
     ae(item.to_generate(), clusters)
     ae(item.to_generate(None), clusters)
     ae(item.to_generate('default'), clusters)
     ae(item.to_generate('force'), np.arange(n_clusters))
     ae(item.to_generate('read-only'), [])
Beispiel #6
0
def test_supervisor_edge_cases(supervisor):
    mc = supervisor

    # Empty selection at first.
    ae(mc.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])

    mc.select([0])
    assert mc.selected == [0]

    mc.undo()
    mc.redo()

    # Merge.
    mc.merge()
    assert mc.selected == [0]

    mc.merge([])
    assert mc.selected == [0]

    mc.merge([10])
    assert mc.selected == [0]

    # Split.
    mc.split([])
    assert mc.selected == [0]

    # Move.
    mc.move('ignored', [])

    mc.save()
Beispiel #7
0
def test_transform_chain_empty(array):
    t = TransformChain()

    assert t.cpu_transforms == []
    assert t.gpu_transforms == []

    ae(t.apply(array), array)
Beispiel #8
0
def test_gui_wizard(qtbot):
    gui = _start_manual_clustering()
    n = gui.n_clusters
    qtbot.addWidget(gui.main_window)
    gui.show()

    clusters = np.arange(gui.n_clusters)
    best_clusters = gui.wizard.best_clusters()

    # assert gui.wizard.best_clusters(1)[0] == best_clusters[0]
    ae(np.unique(best_clusters), clusters)
    assert len(gui.wizard.most_similar_clusters()) == n - 1

    assert len(gui.wizard.most_similar_clusters(0, n_max=3)) == 3

    clusters = gui.cluster_ids[:2]
    up = gui.merge(clusters)
    new = up.added[0]
    assert np.all(
        np.in1d(gui.wizard.best_clusters(), np.arange(clusters[-1] + 1,
                                                      new + 1)))
    assert np.all(
        np.in1d(gui.wizard.most_similar_clusters(new),
                np.arange(clusters[-1] + 1, new)))

    gui.close()
Beispiel #9
0
def test_loader():
    n_samples_trace, n_channels = 10000, 100
    n_samples = 40
    n_spikes = n_samples_trace // (2 * n_samples)

    traces = artificial_traces(n_samples_trace, n_channels)
    spike_samples = np.cumsum(
        npr.randint(low=0, high=2 * n_samples, size=n_spikes))

    with raises(ValueError):
        WaveformLoader(traces)

    # Create a loader.
    loader = WaveformLoader(traces, n_samples=n_samples)
    assert id(loader.traces) == id(traces)
    loader.traces = traces

    # Extract a waveform.
    t = spike_samples[10]
    waveform = loader._load_at(t)
    assert waveform.shape == (n_samples, n_channels)
    ae(waveform, traces[t - 20:t + 20, :])

    waveforms = loader[spike_samples[10:20]]
    assert waveforms.shape == (10, n_samples, n_channels)
    t = spike_samples[15]
    w1 = waveforms[5, ...]
    w2 = traces[t - 20:t + 20, :]
    assert np.allclose(w1, w2)
Beispiel #10
0
def test_firing_rate_1():
    spike_clusters = np.tile(np.arange(10), 100)
    fr = firing_rate(spike_clusters,
                     cluster_ids=np.arange(10),
                     bin_size=.1,
                     duration=1.)
    ae(fr, np.ones((10, 10)) * 1000)
Beispiel #11
0
def test_positions():
    probe = staggered_positions(31)
    assert probe.shape == (31, 2)
    ae(probe[-1], (0, 0))

    probe = linear_positions(29)
    assert probe.shape == (29, 2)
Beispiel #12
0
def test_manual_clustering_edge_cases(manual_clustering):
    mc = manual_clustering

    # Empty selection at first.
    ae(mc.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])

    mc.select([0])
    assert mc.selected == [0]

    mc.undo()
    mc.redo()

    # Merge.
    mc.merge()
    assert mc.selected == [0]

    mc.merge([])
    assert mc.selected == [0]

    mc.merge([10])
    assert mc.selected == [0]

    # Split.
    mc.split([])
    assert mc.selected == [0]

    # Move.
    mc.move('ignored', [])

    mc.save()
Beispiel #13
0
def test_probe():
    probe = {'channel_groups': {
             0: {'channels': [0, 3, 1],
                 'graph': [[0, 3], [1, 0]],
                 'geometry': {0: (10, 10), 1: (10, 20), 3: (20, 30)},
                 },
             1: {'channels': [7],
                 'graph': [],
                 },
             }}
    adjacency = {0: set([1, 3]),
                 1: set([0]),
                 3: set([0]),
                 }
    assert _probe_channels(probe, 0) == [0, 3, 1]
    ae(_probe_positions(probe, 0), [(10, 10), (20, 30), (10, 20)])
    assert _probe_adjacency_list(probe) == adjacency

    mea = MEA(probe=probe)

    assert mea.adjacency == adjacency
    assert mea.channels_per_group == {0: [0, 3, 1], 1: [7]}
    assert mea.channels == [0, 3, 1]
    assert mea.n_channels == 3
    ae(mea.positions, [(10, 10), (20, 30), (10, 20)])
Beispiel #14
0
def test_probe():
    probe = {'channel_groups': {
             0: {'channels': [0, 3, 1],
                 'graph': [[0, 3], [1, 0]],
                 'geometry': {0: (10, 10), 1: (10, 20), 3: (20, 30)},
                 },
             1: {'channels': [7],
                 'graph': [],
                 },
             }}
    adjacency = {0: set([1, 3]),
                 1: set([0]),
                 3: set([0]),
                 }
    assert _probe_channels(probe, 0) == [0, 3, 1]
    ae(_probe_positions(probe, 0), [(10, 10), (20, 30), (10, 20)])
    assert _probe_adjacency_list(probe) == adjacency

    mea = MEA(probe=probe)

    assert mea.adjacency == adjacency
    assert mea.channels_per_group == {0: [0, 3, 1], 1: [7]}
    assert mea.channels == [0, 3, 1]
    assert mea.n_channels == 3
    ae(mea.positions, [(10, 10), (20, 30), (10, 20)])
Beispiel #15
0
def test_mea():

    n_channels = 10
    channels = np.arange(n_channels)
    positions = np.random.randn(n_channels, 2)

    mea = MEA(channels)
    mea.positions = positions
    ae(mea.positions, positions)
    assert mea.adjacency is None

    mea = MEA(channels, positions=positions)
    assert mea.n_channels == n_channels

    mea = MEA(channels, positions=positions)
    assert mea.n_channels == n_channels

    with raises(AssertionError):
        MEA(channels=np.arange(n_channels + 1), positions=positions)

    with raises(AssertionError):
        MEA(channels=channels, positions=positions[:-1, :])

    mea = MEA(channels=channels)
    assert mea.n_channels == n_channels
    mea.positions = positions
    with raises(ValueError):
        mea.positions = positions[:-1, :]
Beispiel #16
0
def _test_download_file(checksum=None):
    with TemporaryDirectory() as tmpdir:
        path = op.join(tmpdir, "test.kwik")
        download_file(_URL, path, checksum=checksum)
        with open(path, "rb") as f:
            data = f.read()
        ae(np.fromstring(data, np.float32), _DATA)
Beispiel #17
0
def test_h5_copy(tempdir):
    # Create the test HDF5 file in the temporary directory.
    filename = _create_test_file(tempdir)

    with open_h5(filename, 'a') as f:

        # Test dataset copy.
        assert f.exists('ds1')
        arr = f.read('ds1')[:]
        assert len(arr) == 10
        f.copy('ds1', 'ds1_new')
        assert f.exists('ds1')
        assert f.exists('ds1_new')
        arr_new = f.read('ds1_new')[:]
        assert len(arr_new) == 10
        ae(arr, arr_new)

        # Test group copy.
        assert f.exists('mygroup/ds2')
        arr = f.read('mygroup/ds2')
        f.copy('mygroup', 'g/mynewgroup')
        assert f.exists('mygroup')
        assert f.exists('g/mynewgroup')
        assert f.exists('g/mynewgroup/ds2')
        arr_new = f.read('g/mynewgroup/ds2')
        ae(arr, arr_new)
Beispiel #18
0
def test_loader():
    n_samples_trace, n_channels = 10000, 100
    n_samples = 40
    n_spikes = n_samples_trace // (2 * n_samples)

    traces = artificial_traces(n_samples_trace, n_channels)
    spike_samples = np.cumsum(npr.randint(low=0, high=2 * n_samples,
                                          size=n_spikes))

    with raises(ValueError):
        WaveformLoader(traces)

    # Create a loader.
    loader = WaveformLoader(traces, n_samples=n_samples)
    assert id(loader.traces) == id(traces)
    loader.traces = traces

    # Extract a waveform.
    t = spike_samples[10]
    waveform = loader._load_at(t)
    assert waveform.shape == (n_samples, n_channels)
    ae(waveform, traces[t - 20:t + 20, :])

    waveforms = loader[spike_samples[10:20]]
    assert waveforms.shape == (10, n_samples, n_channels)
    t = spike_samples[15]
    w1 = waveforms[5, ...]
    w2 = traces[t - 20:t + 20, :]
    assert np.allclose(w1, w2)
Beispiel #19
0
def test_kwik_clusterings(tempdir):

    # Create the test HDF5 file in the temporary directory.
    filename = create_mock_kwik(tempdir,
                                n_clusters=_N_CLUSTERS,
                                n_spikes=_N_SPIKES,
                                n_channels=_N_CHANNELS,
                                n_features_per_channel=_N_FETS,
                                n_samples_traces=_N_SAMPLES_TRACES)

    kwik = KwikModel(filename)
    assert kwik.clusterings == ['main', 'original']

    # The default clustering is 'main'.
    assert kwik.n_spikes == _N_SPIKES
    assert kwik.n_clusters == _N_CLUSTERS
    assert kwik.cluster_groups[_N_CLUSTERS - 1] == 3
    ae(kwik.cluster_ids, np.arange(_N_CLUSTERS))

    # Change clustering.
    kwik.clustering = 'original'
    n_clu = kwik.n_clusters
    assert kwik.n_spikes == _N_SPIKES
    # Some clusters may be empty with a small number of spikes like here
    assert _N_CLUSTERS * 2 - 4 <= n_clu <= _N_CLUSTERS * 2
    assert kwik.cluster_groups[n_clu - 1] == 3
    assert len(kwik.cluster_ids) == n_clu
Beispiel #20
0
def test_kwik_empty(tempdir):

    channels = [0, 3, 1]
    graph = [[0, 3], [1, 0]]
    probe = {'channel_groups': {
             0: {'channels': channels,
                 'graph': graph,
                 'geometry': {0: (10, 10)},
                 }}}
    sample_rate = 20000

    kwik_path = op.join(tempdir, 'test.kwik')
    create_kwik(kwik_path=kwik_path, probe=probe, sample_rate=sample_rate)

    model = KwikModel(kwik_path)
    ae(model.channels, sorted(channels))
    ae(model.channel_order, channels)

    assert model.sample_rate == sample_rate
    assert model.n_channels == 3
    assert model.spike_samples is None
    assert model.has_kwx()
    assert model.n_spikes == 0
    assert model.n_clusters == 0
    model.describe()
def test_extend_assignment():
    spike_clusters = np.array([3, 5, 2, 9, 5, 5, 2])
    spike_ids = np.array([0, 2])

    # These spikes belong to the following clusters.
    clusters = np.unique(spike_clusters[spike_ids])
    ae(clusters, [2, 3])

    # First case: assigning our two spikes to a new cluster.
    # This should not depend on the index chosen.
    for to in (123, 0, 1, 2, 3):
        clusters_rel = [123] * len(spike_ids)
        new_spike_ids, new_cluster_ids = _extend_assignment(spike_ids,
                                                            spike_clusters,
                                                            clusters_rel,
                                                            10,
                                                            )
        ae(new_spike_ids, [0, 2, 6])
        ae(new_cluster_ids, [10, 10, 11])

    # Second case: we assign the spikes to different clusters.
    clusters_rel = [0, 1]
    new_spike_ids, new_cluster_ids = _extend_assignment(spike_ids,
                                                        spike_clusters,
                                                        clusters_rel,
                                                        10,
                                                        )
    ae(new_spike_ids, [0, 2, 6])
    ae(new_cluster_ids, [10, 11, 12])
def test_concatenate_spike_clusters():
    spikes, clusters = _concatenate_spike_clusters(([1, 5, 4],
                                                    [10, 50, 40]),
                                                   ([2, 0, 3, 6],
                                                    [20, 0, 30, 60]))
    ae(spikes, np.arange(7))
    ae(clusters, np.arange(0, 60 + 1, 10))
Beispiel #23
0
def test_lasso_simple(qtbot):
    view = BaseCanvas()

    x = .25 * np.random.randn(N)
    y = .25 * np.random.randn(N)

    scatter = ScatterVisual()
    view.add_visual(scatter)
    scatter.set_data(x=x, y=y)

    l = Lasso()
    l.attach(view)
    l.create_lasso_visual()

    view.show()
    #qtbot.waitForWindowShown(view)

    l.add((-.5, -.5))
    l.add((+.5, -.5))
    l.add((+.5, +.5))
    l.add((-.5, +.5))
    assert l.count == 4
    assert l.polygon.shape == (4, 2)
    b = [[-.5, -.5], [+.5, -.5], [+.5, +.5], [-.5, +.5]]
    ae(l.in_polygon(b), [False, False, True, True])
    assert str(l)

    # qtbot.stop()
    view.close()
Beispiel #24
0
def _test_artificial(n_spikes=None, n_clusters=None):
    n_samples_waveforms = 32
    n_samples_traces = 50
    n_channels = 35
    n_features = n_channels * 2

    # Waveforms.
    waveforms = artificial_waveforms(n_spikes=n_spikes,
                                     n_samples=n_samples_waveforms,
                                     n_channels=n_channels)
    assert waveforms.shape == (n_spikes, n_samples_waveforms, n_channels)

    # Traces.
    traces = artificial_traces(n_samples=n_samples_traces,
                               n_channels=n_channels)
    assert traces.shape == (n_samples_traces, n_channels)

    # Spike clusters.
    spike_clusters = artificial_spike_clusters(n_spikes=n_spikes,
                                               n_clusters=n_clusters)
    assert spike_clusters.shape == (n_spikes,)
    if n_clusters >= 1:
        assert spike_clusters.min() in (0, 1)
        assert spike_clusters.max() in (n_clusters - 1, n_clusters - 2)
    ae(np.unique(spike_clusters), np.arange(n_clusters))

    # Features.
    features = artificial_features(n_spikes, n_features)
    assert features.shape == (n_spikes, n_features)

    # Masks.
    masks = artificial_masks(n_spikes, n_channels)
    assert masks.shape == (n_spikes, n_channels)
Beispiel #25
0
def test_transform_chain_empty(array):
    t = TransformChain()

    assert t.cpu_transforms == []
    assert t.gpu_transforms == []

    ae(t.apply(array), array)
Beispiel #26
0
def test_manual_clustering_edge_cases(manual_clustering):
    mc = manual_clustering

    # Empty selection at first.
    ae(mc.clustering.cluster_ids, [0, 1, 2, 10, 11, 20, 30])

    mc.select([0])
    assert mc.selected == [0]

    mc.undo()
    mc.redo()

    # Merge.
    mc.merge()
    assert mc.selected == [0]

    mc.merge([])
    assert mc.selected == [0]

    mc.merge([10])
    assert mc.selected == [0]

    # Split.
    mc.split([])
    assert mc.selected == [0]

    # Move.
    mc.move([], 'ignored')

    mc.save()
Beispiel #27
0
def test_in_polygon():
    polygon = [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]
    points = np.random.uniform(size=(100, 2), low=-1, high=1)
    idx_expected = np.nonzero((points[:, 0] > 0) & (points[:, 1] > 0)
                              & (points[:, 0] < 1) & (points[:, 1] < 1))[0]
    idx = np.nonzero(_in_polygon(points, polygon))[0]
    ae(idx, idx_expected)
Beispiel #28
0
def test_positions():
    probe = staggered_positions(31)
    assert probe.shape == (31, 2)
    ae(probe[-1], (0, 0))

    probe = linear_positions(29)
    assert probe.shape == (29, 2)
Beispiel #29
0
def test_mea():

    n_channels = 10
    channels = np.arange(n_channels)
    positions = np.random.randn(n_channels, 2)

    mea = MEA(channels)
    mea.positions = positions
    ae(mea.positions, positions)
    assert mea.adjacency is None

    mea = MEA(channels, positions=positions)
    assert mea.n_channels == n_channels

    mea = MEA(channels, positions=positions)
    assert mea.n_channels == n_channels

    with raises(AssertionError):
        MEA(channels=np.arange(n_channels + 1), positions=positions)

    with raises(AssertionError):
        MEA(channels=channels, positions=positions[:-1, :])

    mea = MEA(channels=channels)
    assert mea.n_channels == n_channels
    mea.positions = positions
    with raises(ValueError):
        mea.positions = positions[:-1, :]
Beispiel #30
0
def test_disk_store():

    dtype = np.float32
    sha = (2, 4)
    shb = (3, 5)
    a = np.random.rand(*sha).astype(dtype)
    b = np.random.rand(*shb).astype(dtype)

    def _assert_equal(d_0, d_1):
        """Test the equality of two dictionaries containing NumPy arrays."""
        assert sorted(d_0.keys()) == sorted(d_1.keys())
        for key in d_0.keys():
            ac(d_0[key], d_1[key])

    with TemporaryDirectory() as tempdir:
        ds = DiskStore(tempdir)

        ds.register_file_extensions(['key', 'key_bis'])
        assert ds.cluster_ids == []

        ds.store(3, key=a)
        _assert_equal(ds.load(3,
                              ['key'],
                              dtype=dtype,
                              shape=sha,
                              ),
                      {'key': a})
        loaded = ds.load(3, 'key', dtype=dtype, shape=sha)
        ac(loaded, a)

        # Loading a non-existing key returns None.
        assert ds.load(3, 'key_bis') is None
        assert ds.cluster_ids == [3]

        ds.store(3, key_bis=b)
        _assert_equal(ds.load(3, ['key'], dtype=dtype, shape=sha), {'key': a})
        _assert_equal(ds.load(3, ['key_bis'],
                              dtype=dtype,
                              shape=shb,
                              ),
                      {'key_bis': b})
        _assert_equal(ds.load(3,
                              ['key', 'key_bis'],
                              dtype=dtype,
                              ),
                      {'key': a.ravel(), 'key_bis': b.ravel()})
        ac(ds.load(3, 'key_bis', dtype=dtype, shape=shb), b)
        assert ds.cluster_ids == [3]

        ds.erase([2, 3])
        assert ds.load(3, ['key']) == {'key': None}
        assert ds.cluster_ids == []

        # Test load/save file.
        ds.save_file('test', {'a': a})
        ds = DiskStore(tempdir)
        data = ds.load_file('test')
        ae(data['a'], a)
        assert ds.load_file('test2') is None
Beispiel #31
0
def test_transform_chain_add():
    tc = TransformChain()
    tc.add_on_cpu([Scale(.5)])

    tc_2 = TransformChain()
    tc_2.add_on_cpu([Scale(2.)])

    ae((tc + tc_2).apply([3.]), [[3.]])
def test_clustering_new_id():
    spike_clusters = 10 * np.ones(6, dtype=np.int32)
    spike_clusters[2:4] = 20
    spike_clusters[4:6] = 30
    clustering = Clustering(spike_clusters)
    clustering.split(list(range(1, 5)))
    ae(clustering.spike_clusters, [32, 31, 31, 31, 31, 33])
    assert clustering.new_cluster_id() == 34
Beispiel #33
0
def test_transform_chain_one(array):
    translate = Translate([1, 2])
    t = TransformChain()
    t.add([translate])

    assert t.transforms == [translate]

    ae(t.apply(array), [[0, 2], [2, 4]])
Beispiel #34
0
def test_transform_chain_add():
    tc = TransformChain()
    tc.add_on_cpu([Scale(.5)])

    tc_2 = TransformChain()
    tc_2.add_on_cpu([Scale(2.)])

    ae((tc + tc_2).apply([3.]), [[3.]])
Beispiel #35
0
def test_disk_store(tempdir):

    dtype = np.float32
    sha = (2, 4)
    shb = (3, 5)
    a = np.random.rand(*sha).astype(dtype)
    b = np.random.rand(*shb).astype(dtype)

    def _assert_equal(d_0, d_1):
        """Test the equality of two dictionaries containing NumPy arrays."""
        assert sorted(d_0.keys()) == sorted(d_1.keys())
        for key in d_0.keys():
            ac(d_0[key], d_1[key])

    ds = DiskStore(tempdir)

    ds.register_file_extensions(['key', 'key_bis'])
    assert ds.cluster_ids == []

    ds.store(3, key=a)
    _assert_equal(ds.load(3,
                          ['key'],
                          dtype=dtype,
                          shape=sha,
                          ),
                  {'key': a})
    loaded = ds.load(3, 'key', dtype=dtype, shape=sha)
    ac(loaded, a)

    # Loading a non-existing key returns None.
    assert ds.load(3, 'key_bis') is None
    assert ds.cluster_ids == [3]

    ds.store(3, key_bis=b)
    _assert_equal(ds.load(3, ['key'], dtype=dtype, shape=sha), {'key': a})
    _assert_equal(ds.load(3, ['key_bis'],
                          dtype=dtype,
                          shape=shb,
                          ),
                  {'key_bis': b})
    _assert_equal(ds.load(3,
                          ['key', 'key_bis'],
                          dtype=dtype,
                          ),
                  {'key': a.ravel(), 'key_bis': b.ravel()})
    ac(ds.load(3, 'key_bis', dtype=dtype, shape=shb), b)
    assert ds.cluster_ids == [3]

    ds.erase([2, 3])
    assert ds.load(3, ['key']) == {'key': None}
    assert ds.cluster_ids == []

    # Test load/save file.
    ds.save_file('test', {'a': a})
    ds = DiskStore(tempdir)
    data = ds.load_file('test')
    ae(data['a'], a)
    assert ds.load_file('test2') is None
Beispiel #36
0
def test_add_selected_clusters_colors():
    cluster_colors = np.tile(np.c_[np.arange(3)], (1, 3))
    cluster_colors = add_alpha(cluster_colors)
    cluster_colors_sel = _add_selected_clusters_colors([1], [0, 1, 3], cluster_colors)
    ae(cluster_colors_sel[[0]], add_alpha(np.zeros((1, 3))))
    ae(cluster_colors_sel[[2]], add_alpha(2 * np.ones((1, 3))))
    # Cluster at index 0 is selected, should be in blue.
    r, g, b, _ = cluster_colors_sel[1]
    assert b > g > r
Beispiel #37
0
def test_transform_chain_one(array):
    translate = Translate([1, 2])
    t = TransformChain()
    t.add_on_cpu([translate])

    assert t.cpu_transforms == [translate]
    assert t.gpu_transforms == []

    ae(t.apply(array), [[0, 2], [2, 4]])
Beispiel #38
0
def test_unmasked_channels(masks, n_channels):
    # Mask many values in the masks array.
    threshold = .05
    masks[:, 1::2] *= threshold
    # Compute the mean masks.
    mean_masks = mean(masks)
    # Find the unmasked channels.
    channels = get_unmasked_channels(mean_masks, threshold)
    # These are 0, 2, 4, etc.
    ae(channels, np.arange(0, n_channels, 2))
Beispiel #39
0
def test_unmasked_channels(masks, n_channels):
    # Mask many values in the masks array.
    threshold = .05
    masks[:, 1::2] *= threshold
    # Compute the mean masks.
    mean_masks = mean(masks)
    # Find the unmasked channels.
    channels = get_unmasked_channels(mean_masks, threshold)
    # These are 0, 2, 4, etc.
    ae(channels, np.arange(0, n_channels, 2))
 def test_concat(self):
     file_in = os.path.join(test_files_dir, '100_CH9.continuous')
     file_in_2 = os.path.join(test_files_dir, '100_CH9_2.continuous')
     file_out = os.path.join(test_files_dir, 'write_test')
     ch_data = loadContinuous(file_in, dtype=np.int16, trim_last_record=False)
     ch_data_2 = loadContinuous(file_in_2, dtype=np.int16, trim_last_record=False)
     hs = get_header_string(file_in)
     write_continuous(file_out, (ch_data, ch_data_2), hs)
     ch_data_out = loadContinuous(file_out, dtype=np.int16, trim_last_record=False)
     ae(np.concatenate((ch_data['timestamps'], ch_data_2['timestamps'])), ch_data_out['timestamps'])
Beispiel #41
0
def test_creator_simple(tempdir):
    basename = op.join(tempdir, 'my_file')

    creator = KwikCreator(basename)

    # Test create empty files.
    creator.create_empty()
    assert op.exists(basename + '.kwik')
    assert op.exists(basename + '.kwx')

    # Test metadata.
    creator.set_metadata('/application_data/spikedetekt', a=1, b=2., c=[0, 1])

    with open_h5(creator.kwik_path, 'r') as f:
        assert f.read_attr('/application_data/spikedetekt', 'a') == 1
        assert f.read_attr('/application_data/spikedetekt', 'b') == 2.
        ae(f.read_attr('/application_data/spikedetekt', 'c'), [0, 1])

    # Test add spikes in one block.
    n_spikes = 100
    n_channels = 8
    n_features = 3

    spike_samples = artificial_spike_samples(n_spikes)
    features = artificial_features(n_spikes, n_channels, n_features)
    masks = artificial_masks(n_spikes, n_channels)

    creator.add_spikes(
        group=0,
        spike_samples=spike_samples,
        features=features.astype(np.float32),
        masks=masks.astype(np.float32),
        n_channels=n_channels,
        n_features=n_features,
    )

    # Test the spike samples.
    with open_h5(creator.kwik_path, 'r') as f:
        s = f.read('/channel_groups/0/spikes/time_samples')[...]
        assert s.dtype == np.uint64
        ac(s, spike_samples)

    # Test the features and masks.
    with open_h5(creator.kwx_path, 'r') as f:
        fm = f.read('/channel_groups/0/features_masks')[...]
        assert fm.dtype == np.float32
        ac(fm[:, :, 0], features.reshape((-1, n_channels * n_features)))
        ac(fm[:, ::n_features, 1], masks)

    # Spikes can only been added once.
    with raises(RuntimeError):
        creator.add_spikes(group=0,
                           spike_samples=spike_samples,
                           n_channels=n_channels,
                           n_features=n_features)
Beispiel #42
0
def test_creator_simple(tempdir):
    basename = op.join(tempdir, 'my_file')

    creator = KwikCreator(basename)

    # Test create empty files.
    creator.create_empty()
    assert op.exists(basename + '.kwik')
    assert op.exists(basename + '.kwx')

    # Test metadata.
    creator.set_metadata('/application_data/spikedetekt',
                         a=1, b=2., c=[0, 1])

    with open_h5(creator.kwik_path, 'r') as f:
        assert f.read_attr('/application_data/spikedetekt', 'a') == 1
        assert f.read_attr('/application_data/spikedetekt', 'b') == 2.
        ae(f.read_attr('/application_data/spikedetekt', 'c'), [0, 1])

    # Test add spikes in one block.
    n_spikes = 100
    n_channels = 8
    n_features = 3

    spike_samples = artificial_spike_samples(n_spikes)
    features = artificial_features(n_spikes, n_channels, n_features)
    masks = artificial_masks(n_spikes, n_channels)

    creator.add_spikes(group=0,
                       spike_samples=spike_samples,
                       features=features.astype(np.float32),
                       masks=masks.astype(np.float32),
                       n_channels=n_channels,
                       n_features=n_features,
                       )

    # Test the spike samples.
    with open_h5(creator.kwik_path, 'r') as f:
        s = f.read('/channel_groups/0/spikes/time_samples')[...]
        assert s.dtype == np.uint64
        ac(s, spike_samples)

    # Test the features and masks.
    with open_h5(creator.kwx_path, 'r') as f:
        fm = f.read('/channel_groups/0/features_masks')[...]
        assert fm.dtype == np.float32
        ac(fm[:, :, 0], features.reshape((-1, n_channels * n_features)))
        ac(fm[:, ::n_features, 1], masks)

    # Spikes can only been added once.
    with raises(RuntimeError):
        creator.add_spikes(group=0,
                           spike_samples=spike_samples,
                           n_channels=n_channels,
                           n_features=n_features)
Beispiel #43
0
def test_cluster_color_selector_1():
    cluster_ids = [1, 2, 3]
    c = ClusterColorSelector(lambda cid: cid * .1, cluster_ids=cluster_ids)

    assert len(c.get(1, alpha=.5)) == 4
    ae(c.get_values([0, 0]), np.zeros(2))

    for colormap in ('linear', 'rainbow', 'categorical', 'diverging'):
        c.set_color_mapping(colormap=colormap)
        colors = c.get_colors(cluster_ids)
        assert colors.shape == (3, 4)
Beispiel #44
0
def test_transform_chain_complete(array):
    t = TransformChain()
    t.add_on_cpu([Scale(.5), Scale(2.)])
    t.add_on_cpu(Range([-3, -3, 1, 1]))
    t.add_on_gpu(Clip())
    t.add_on_gpu([Subplot('u_shape', 'a_box_index')])

    assert len(t.cpu_transforms) == 3
    assert len(t.gpu_transforms) == 2

    ae(t.apply(array), [[0, .5], [1, 1.5]])
Beispiel #45
0
def test_cluster_store_load():
    with TemporaryDirectory() as tempdir:

        # We define some data and a model.
        n_spikes = 100
        n_clusters = 10

        spike_ids = np.arange(n_spikes)
        spike_clusters = np.random.randint(size=n_spikes,
                                           low=0, high=n_clusters)
        spikes_per_cluster = _spikes_per_cluster(spike_ids, spike_clusters)
        model = {'spike_clusters': spike_clusters}

        # We initialize the ClusterStore.
        cs = ClusterStore(model=model,
                          spikes_per_cluster=spikes_per_cluster,
                          path=tempdir,
                          )

        # We create a n_spikes item to be stored in memory,
        # and we define how to generate it for a given cluster.
        class MyItem(VariableSizeItem):
            name = 'my item'
            fields = ['spikes_square']

            def store(self, cluster):
                spikes = spikes_per_cluster[cluster]
                data = (spikes ** 2).astype(np.int32)
                self.disk_store.store(cluster, spikes_square=data)

            def load(self, cluster, name):
                return self.disk_store.load(cluster, name, np.int32)

            def load_spikes(self, spikes, name):
                return (spikes ** 2).astype(np.int32)

        cs.register_item(MyItem)
        cs.generate()

        # All spikes in cluster 1.
        cluster = 1
        spikes = spikes_per_cluster[cluster]
        ae(cs.load('spikes_square', clusters=[cluster]), spikes ** 2)

        # Some spikes in several clusters.
        clusters = [2, 3, 5]
        spikes = np.concatenate([spikes_per_cluster[cl][::3]
                                 for cl in clusters])
        ae(cs.load('spikes_square', spikes=spikes), np.unique(spikes) ** 2)

        # Empty selection.
        assert len(cs.load('spikes_square', clusters=[])) == 0
        assert len(cs.load('spikes_square', spikes=[])) == 0
def test_extend_spikes_simple():
    spike_clusters = np.array([3, 5, 2, 9, 5, 5, 2])
    spike_ids = np.array([2, 4, 0])

    # These spikes belong to the following clusters.
    clusters = np.unique(spike_clusters[spike_ids])
    ae(clusters, [2, 3, 5])

    # These are the spikes belonging to those clusters, but not in the
    # originally-specified spikes.
    extended = _extend_spikes(spike_ids, spike_clusters)
    ae(extended, [1, 5, 6])
Beispiel #47
0
def test_spikedetekt_store(tempdir):
    groups = [0, 2]
    chunk_keys = [10, 20, 30]

    n_channels = 4
    npc = 2

    _keys = [(0, 10), (0, 20), (2, 10), (2, 30)]
    _counts = [100, 200, 1, 300]

    # Generate random spike samples, features, and masks.
    s = {k: np.arange(c) for k, c in zip(_keys, _counts)}
    f = {k: np.random.rand(c, n_channels, npc)
         for k, c in zip(_keys, _counts)}
    m = {k: np.random.rand(c, n_channels)
         for k, c in zip(_keys, _counts)}

    store = SpikeDetektStore(tempdir, groups=groups, chunk_keys=chunk_keys)

    # Save data.
    for group, chunk_key in _keys:
        spike_samples = s.get((group, chunk_key), None)
        features = f.get((group, chunk_key), None)
        masks = m.get((group, chunk_key), None)

        store.append(group=group, chunk_key=chunk_key,
                     spike_samples=spike_samples,
                     features=features,
                     masks=masks,
                     spike_offset=chunk_key,
                     )

    # Load data.
    for group in groups:
        # Check spike samples.
        ae(store.spike_samples(group),
           np.hstack([s[key] + key[1] for key in _keys if key[0] == group]))

        # Check features and masks.
        for name in ('features', 'masks'):
            # Actual data.
            data_dict = f if name == 'features' else m
            # Stored data (generator).
            data_gen = getattr(store, name)(group)
            # Go through all chunks.
            for chunk_key, data in zip(chunk_keys, data_gen):
                if (group, chunk_key) in _keys:
                    ae(data_dict[group, chunk_key], data)
                else:
                    assert data is None

    # Test spike counts.
    test_spike_counts(store.spike_counts)
Beispiel #48
0
def test_spikedetekt_store(tempdir):
    groups = [0, 2]
    chunk_keys = [10, 20, 30]

    n_channels = 4
    npc = 2

    _keys = [(0, 10), (0, 20), (2, 10), (2, 30)]
    _counts = [100, 200, 1, 300]

    # Generate random spike samples, features, and masks.
    s = {k: np.arange(c) for k, c in zip(_keys, _counts)}
    f = {k: np.random.rand(c, n_channels, npc) for k, c in zip(_keys, _counts)}
    m = {k: np.random.rand(c, n_channels) for k, c in zip(_keys, _counts)}

    store = SpikeDetektStore(tempdir, groups=groups, chunk_keys=chunk_keys)

    # Save data.
    for group, chunk_key in _keys:
        spike_samples = s.get((group, chunk_key), None)
        features = f.get((group, chunk_key), None)
        masks = m.get((group, chunk_key), None)

        store.append(
            group=group,
            chunk_key=chunk_key,
            spike_samples=spike_samples,
            features=features,
            masks=masks,
            spike_offset=chunk_key,
        )

    # Load data.
    for group in groups:
        # Check spike samples.
        ae(store.spike_samples(group),
           np.hstack([s[key] + key[1] for key in _keys if key[0] == group]))

        # Check features and masks.
        for name in ('features', 'masks'):
            # Actual data.
            data_dict = f if name == 'features' else m
            # Stored data (generator).
            data_gen = getattr(store, name)(group)
            # Go through all chunks.
            for chunk_key, data in zip(chunk_keys, data_gen):
                if (group, chunk_key) in _keys:
                    ae(data_dict[group, chunk_key], data)
                else:
                    assert data is None

    # Test spike counts.
    test_spike_counts(store.spike_counts)
Beispiel #49
0
def test_transform_chain_two(array):
    translate = Translate([1, 2])
    scale = Scale([.5, .5])
    t = TransformChain()
    t.add_on_cpu([translate, scale])

    assert t.cpu_transforms == [translate, scale]
    assert t.gpu_transforms == []

    assert isinstance(t.get('Translate'), Translate)
    assert t.get('Unknown') is None

    ae(t.apply(array), [[0, 1], [1, 2]])
Beispiel #50
0
def test_transform_chain_complete(array):
    t = TransformChain()
    t.add_on_cpu([Scale(.5), Scale(2.)])
    t.add_on_cpu(Range([-3, -3, 1, 1]))
    t.add_on_gpu(Clip())
    t.add_on_gpu([Subplot('u_shape', 'a_box_index')])

    assert len(t.cpu_transforms) == 3
    assert len(t.gpu_transforms) == 2

    ae(t.apply(array), [[0, .5], [1, 1.5]])

    assert len(t.remove('Scale').cpu_transforms) == len(t.cpu_transforms) - 2
Beispiel #51
0
def test_download_sample_data(tempdir):
    name = 'hybrid_10sec.dat'
    url = _BASE_URL['cortexlab'] + name

    _add_mock_response(url, _DATA.tostring())
    _add_mock_response(url + '.md5', _CHECKSUM)

    download_sample_data(name, tempdir)
    with open(op.join(tempdir, name), 'rb') as f:
        data = f.read()
    ae(np.fromstring(data, np.float32), _DATA)

    responses.reset()
Beispiel #52
0
def test_read_dat(tempdir):
    n_samples = 100
    n_channels = 10

    arr = artificial_traces(n_samples, n_channels)

    path = op.join(tempdir, 'test')
    arr.tofile(path)
    assert _dat_n_samples(path, dtype=np.float64,
                          n_channels=n_channels) == n_samples
    data = read_dat(path, dtype=arr.dtype, shape=arr.shape)
    ae(arr, data)
    data = read_dat(path, dtype=arr.dtype, n_channels=n_channels)
    ae(arr, data)
Beispiel #53
0
def test_kwik_save(tempdir):

    # Create the test HDF5 file in the temporary directory.
    filename = create_mock_kwik(tempdir,
                                n_clusters=_N_CLUSTERS,
                                n_spikes=_N_SPIKES,
                                n_channels=_N_CHANNELS,
                                n_features_per_channel=_N_FETS,
                                n_samples_traces=_N_SAMPLES_TRACES)

    kwik = KwikModel(filename)

    cluster_groups = {cluster: kwik.cluster_metadata.group(cluster)
                      for cluster in range(_N_CLUSTERS)}
    sc_0 = kwik.spike_clusters.copy()
    sc_1 = sc_0.copy()
    new_cluster = _N_CLUSTERS + 10
    sc_1[_N_SPIKES // 2:] = new_cluster
    cluster_groups[new_cluster] = 7
    ae(kwik.spike_clusters, sc_0)

    assert kwik.cluster_metadata.group(new_cluster) == 3
    kwik.save(sc_1, cluster_groups, {'test': (1, 2.)})
    ae(kwik.spike_clusters, sc_1)
    assert kwik.cluster_metadata.group(new_cluster) == 7

    kwik.close()

    kwik = KwikModel(filename)
    ae(kwik.spike_clusters, sc_1)
    assert kwik.cluster_metadata.group(new_cluster) == 7
    ae(kwik.clustering_metadata['test'], [1, 2])