예제 #1
0
 def import_callback(self, checked=None):
     folder = SETTINGS['main_window.last_data_dir']
     path = QtGui.QFileDialog.getOpenFileName(self,
         "Open a .kwik file", folder, ".kwik file")[0]
     # If a file has been selected, open it.
     if path and self.loader is not None:
         clu = read_clusters(path)
         # TODO
         self.open_done()
예제 #2
0
 def import_callback(self, checked=None):
     folder = SETTINGS['main_window.last_data_dir']
     path = QtGui.QFileDialog.getOpenFileName(self, 
         "Open a .clu file", folder, "CLU file (*.clu.* *.clu_original.*)")[0]
     # If a file has been selected, open it.
     if path and self.loader is not None:
         clu = read_clusters(path)
         # TODO
         self.open_done()
예제 #3
0
def test_clusters():
    dir = TEST_FOLDER
    clufile = os.path.join(dir, 'test.aclu.1')
    clufile2 = os.path.join(dir, 'test.aclu.1.saved')
    clusters = read_clusters(clufile)
    
    assert clusters.dtype == np.int32
    assert clusters.shape == (1000,)
    
    # Save.
    save_clusters(clufile2, clusters)
    
    # Open again.
    clusters2 = read_clusters(clufile2)
    
    assert np.array_equal(clusters, clusters2)
    
    # Check the headers.
    clusters_with_header = load_text(clufile, np.int32, skiprows=0)
    clusters2_with_header = load_text(clufile2, np.int32, skiprows=0)
    
    assert np.array_equal(clusters_with_header, clusters2_with_header)
예제 #4
0
def test_hdf5():
    dir = TEST_FOLDER
    filename = os.path.join(dir, 'test.xml')
    
    # Convert in HDF5.
    with HDF5Writer(filename) as writer:
        writer.convert()
    
    # Open the HDF5 file.
    filename = os.path.join(dir, 'test.kwik')
    with tables.openFile(filename) as file:
        # Shank 1.
        # --------
        spikes = file.root.shanks.shank1.spikes
        clusters_hdf5 = spikes.col('cluster_manual')
        features_hdf5 = spikes.col('features')
    
        # Check that the arrays correspond to the original values.
        clusters = read_clusters(os.path.join(dir, 'test.clu.1'))
        features = read_features(os.path.join(dir, 'test.fet.1'), 
            nchannels, fetdim, freq, do_process=False)
        
        np.testing.assert_equal(clusters_hdf5, clusters)
        np.testing.assert_equal(features_hdf5, features)
        
        
        # Shank 2.
        # --------
        spikes = file.root.shanks.shank2.spikes
        clusters_hdf5 = spikes.col('cluster_manual')
        features_hdf5 = spikes.col('features')
    
        # Check that the arrays correspond to the original values.
        clusters = read_clusters(os.path.join(dir, 'test.clu.2'))
        features = read_features(os.path.join(dir, 'test.fet.2'), 
            nchannels, fetdim, freq, do_process=False)
        
        np.testing.assert_equal(clusters_hdf5, clusters)
        np.testing.assert_equal(features_hdf5, features)
예제 #5
0
def test_clusters():
    dir = TEST_FOLDER
    clufile = os.path.join(dir, 'test.aclu.1')
    clufile2 = os.path.join(dir, 'test.aclu.1.saved')
    clusters = read_clusters(clufile)

    assert clusters.dtype == np.int32
    assert clusters.shape == (1000, )

    # Save.
    save_clusters(clufile2, clusters)

    # Open again.
    clusters2 = read_clusters(clufile2)

    assert np.array_equal(clusters, clusters2)

    # Check the headers.
    clusters_with_header = load_text(clufile, np.int32, skiprows=0)
    clusters2_with_header = load_text(clufile2, np.int32, skiprows=0)

    assert np.array_equal(clusters_with_header, clusters2_with_header)
예제 #6
0
def test_klusters_save():
    """WARNING: this test should occur at the end of the module since it
    changes the mock data sets."""
    # Open the mock data.
    dir = TEST_FOLDER
    xmlfile = os.path.join(dir, 'test.xml')
    l = KlustersLoader(filename=xmlfile)
    
    clusters = l.get_clusters()
    cluster_colors = l.get_cluster_colors()
    cluster_groups = l.get_cluster_groups()
    group_colors = l.get_group_colors()
    group_names = l.get_group_names()
    
    # Set clusters.
    indices = get_indices(clusters)
    l.set_cluster(indices[::2], 2)
    l.set_cluster(indices[1::2], 3)
    
    # Set cluster info.
    cluster_indices = l.get_clusters_unique()
    l.set_cluster_colors(cluster_indices[::2], 10)
    l.set_cluster_colors(cluster_indices[1::2], 20)
    l.set_cluster_groups(cluster_indices[::2], 1)
    l.set_cluster_groups(cluster_indices[1::2], 0)
    
    # Save.
    l.remove_empty_clusters()
    l.save()
    
    clusters = read_clusters(l.filename_aclu)
    cluster_info = read_cluster_info(l.filename_acluinfo)
    
    assert np.all(clusters[::2] == 2)
    assert np.all(clusters[1::2] == 3)
    
    assert np.array_equal(cluster_info.index, cluster_indices)
    assert np.all(cluster_info.values[::2, 0] == 10)
    assert np.all(cluster_info.values[1::2, 0] == 20)
    assert np.all(cluster_info.values[::2, 1] == 1)
    assert np.all(cluster_info.values[1::2, 1] == 0)

    l.close()
    
    
    
예제 #7
0
def test_cluster_info():
    dir = TEST_FOLDER
    clufile = os.path.join(dir, 'test.aclu.1')
    cluinfofile = os.path.join(dir, 'test.acluinfo.1')

    clusters = read_clusters(clufile)
    
    indices = np.unique(clusters)
    colors = np.random.randint(low=0, high=10, size=len(indices))
    groups = np.random.randint(low=0, high=2, size=len(indices))
    cluster_info = pd.DataFrame({'color': pd.Series(colors, index=indices),
        'group': pd.Series(groups, index=indices)})
    
    save_cluster_info(cluinfofile, cluster_info)
    cluster_info2 = read_cluster_info(cluinfofile)
    
    assert np.array_equal(cluster_info.values, cluster_info2.values)
예제 #8
0
def test_klusters_save():
    """WARNING: this test should occur at the end of the module since it
    changes the mock data sets."""
    # Open the mock data.
    dir = TEST_FOLDER
    xmlfile = os.path.join(dir, 'test.xml')
    l = KlustersLoader(filename=xmlfile)

    clusters = l.get_clusters()
    cluster_colors = l.get_cluster_colors()
    cluster_groups = l.get_cluster_groups()
    group_colors = l.get_group_colors()
    group_names = l.get_group_names()

    # Set clusters.
    indices = get_indices(clusters)
    l.set_cluster(indices[::2], 2)
    l.set_cluster(indices[1::2], 3)

    # Set cluster info.
    cluster_indices = l.get_clusters_unique()
    l.set_cluster_colors(cluster_indices[::2], 10)
    l.set_cluster_colors(cluster_indices[1::2], 20)
    l.set_cluster_groups(cluster_indices[::2], 1)
    l.set_cluster_groups(cluster_indices[1::2], 0)

    # Save.
    l.remove_empty_clusters()
    l.save()

    clusters = read_clusters(l.filename_aclu)
    cluster_info = read_cluster_info(l.filename_acluinfo)

    assert np.all(clusters[::2] == 2)
    assert np.all(clusters[1::2] == 3)

    assert np.array_equal(cluster_info.index, cluster_indices)
    assert np.all(cluster_info.values[::2, 0] == 10)
    assert np.all(cluster_info.values[1::2, 0] == 20)
    assert np.all(cluster_info.values[::2, 1] == 1)
    assert np.all(cluster_info.values[1::2, 1] == 0)

    l.close()
예제 #9
0
def test_cluster_info():
    dir = TEST_FOLDER
    clufile = os.path.join(dir, 'test.aclu.1')
    cluinfofile = os.path.join(dir, 'test.acluinfo.1')

    clusters = read_clusters(clufile)

    indices = np.unique(clusters)
    colors = np.random.randint(low=0, high=10, size=len(indices))
    groups = np.random.randint(low=0, high=2, size=len(indices))
    cluster_info = pd.DataFrame({
        'color': pd.Series(colors, index=indices),
        'group': pd.Series(groups, index=indices)
    })

    save_cluster_info(cluinfofile, cluster_info)
    cluster_info2 = read_cluster_info(cluinfofile)

    assert np.array_equal(cluster_info.values, cluster_info2.values)
예제 #10
0
def test_conversion_1():
    
    # Convert klusters data to kwik.
    klusters_to_kwik(filename='test', dir=TEST_FOLDER)
    
    fet = read_features(os.path.join(TEST_FOLDER, 'test.fet.1'), 
                        nchannels, fetdim, freq, do_process=False)
    
    clu = read_clusters(os.path.join(TEST_FOLDER, 'test.clu.1'))
    
    with Experiment('test', dir=TEST_FOLDER, mode='r') as exp:
        
        # Check cluster / cluster group metadata.
        assert np.allclose(sorted(exp.channel_groups[1].clusters.main.keys()),
            range(2, 22))
        assert np.allclose(exp.channel_groups[1].clusters.main.color[:],
            range(1, 21))
        assert np.all(exp.channel_groups[1].clusters.main.group[:] == 3)
        
        # Check original == main.
        assert np.allclose(
            sorted(exp.channel_groups[1].clusters.main.keys()),
            sorted(exp.channel_groups[1].clusters.original.keys()),
            )
        assert np.allclose(
            exp.channel_groups[1].clusters.main.color[:],
            exp.channel_groups[1].clusters.original.color[:],
            )
        assert np.allclose(
            exp.channel_groups[1].clusters.main.group[:],
            exp.channel_groups[1].clusters.original.group[:],
        )
        
        # Test spike clusters.
        assert np.allclose(
            exp.channel_groups[1].spikes.clusters.main[:],
            clu)
        assert np.allclose(
            exp.channel_groups[1].spikes.clusters.main[:],
            exp.channel_groups[1].spikes.clusters.original[:])
        
        # Ensure features masks is contiguous.
        assert isinstance(exp.channel_groups[1].spikes.features_masks, tb.Array)
        assert not isinstance(exp.channel_groups[1].spikes.features_masks, tb.EArray)
        
        # Check features and waveforms.
        nspikes = len(exp.channel_groups[1].spikes.clusters.main[:])
        assert exp.channel_groups[1].spikes.features_masks.shape[0] == nspikes
        # No uspk file ==> no waveforms_raw
        assert exp.channel_groups[1].spikes.waveforms_raw.shape[0] == 0
        assert exp.channel_groups[1].spikes.waveforms_filtered.shape[0] == nspikes
        
        assert exp.channel_groups[1].spikes.time_samples[:].sum() > 0
        assert exp.channel_groups[1].spikes.features_masks[:].sum() > 0
        assert exp.channel_groups[1].spikes.waveforms_filtered[:].sum() > 0
        
        fet_kwik = exp.channel_groups[1].spikes.features[:]
        
        # Check equality between original and kwik (normalized) features array.
        fet = fet.ravel()
        fet_kwik = fet_kwik.ravel()
        ind = fet !=0
        d = (fet[ind] / fet_kwik[ind])
        assert d.max() - d.min() <= .1
        
예제 #11
0
def test_conversion_1():

    # Convert klusters data to kwik.
    klusters_to_kwik(filename='test', dir=TEST_FOLDER)

    fet = read_features(os.path.join(TEST_FOLDER, 'test.fet.1'),
                        nchannels,
                        fetdim,
                        freq,
                        do_process=False)

    clu = read_clusters(os.path.join(TEST_FOLDER, 'test.clu.1'))

    with Experiment('test', dir=TEST_FOLDER, mode='r') as exp:

        # Check cluster / cluster group metadata.
        assert np.allclose(sorted(exp.channel_groups[1].clusters.main.keys()),
                           range(2, 22))
        assert np.allclose(exp.channel_groups[1].clusters.main.color[:],
                           range(1, 21))
        assert np.all(exp.channel_groups[1].clusters.main.group[:] == 3)

        # Check original == main.
        assert np.allclose(
            sorted(exp.channel_groups[1].clusters.main.keys()),
            sorted(exp.channel_groups[1].clusters.original.keys()),
        )
        assert np.allclose(
            exp.channel_groups[1].clusters.main.color[:],
            exp.channel_groups[1].clusters.original.color[:],
        )
        assert np.allclose(
            exp.channel_groups[1].clusters.main.group[:],
            exp.channel_groups[1].clusters.original.group[:],
        )

        # Test spike clusters.
        assert np.allclose(exp.channel_groups[1].spikes.clusters.main[:], clu)
        assert np.allclose(exp.channel_groups[1].spikes.clusters.main[:],
                           exp.channel_groups[1].spikes.clusters.original[:])

        # Ensure features masks is contiguous.
        assert isinstance(exp.channel_groups[1].spikes.features_masks,
                          tb.Array)
        assert not isinstance(exp.channel_groups[1].spikes.features_masks,
                              tb.EArray)

        # Check features and waveforms.
        nspikes = len(exp.channel_groups[1].spikes.clusters.main[:])
        assert exp.channel_groups[1].spikes.features_masks.shape[0] == nspikes
        # No uspk file ==> no waveforms_raw
        assert exp.channel_groups[1].spikes.waveforms_raw.shape[0] == 0
        assert exp.channel_groups[1].spikes.waveforms_filtered.shape[
            0] == nspikes

        assert exp.channel_groups[1].spikes.time_samples[:].sum() > 0
        assert exp.channel_groups[1].spikes.features_masks[:].sum() > 0
        assert exp.channel_groups[1].spikes.waveforms_filtered[:].sum() > 0

        fet_kwik = exp.channel_groups[1].spikes.features[:]

        # Check equality between original and kwik (normalized) features array.
        fet = fet.ravel()
        fet_kwik = fet_kwik.ravel()
        ind = fet != 0
        d = (fet[ind] / fet_kwik[ind])
        assert d.max() - d.min() <= .1