Пример #1
0
def readkwikinfo(kwik, grupete=3):
    model = KwikModel(kwik) # load kwik model from file
    spiketimes = model.spike_times # extract the absolute spike times
    clusters = model.cluster_groups # extract the cluster names
    sample_rate = model.sample_rate # extract sampling freq
    
    spikedata = {} # initialise dictionary
    for cluster in clusters.keys():
        clustergroup = clusters[cluster]
        if clustergroup==grupete: # only look at specified type of cluster, 0 = noise, 1 = MUA, 2 = GOOD, 3 = unsorted
            spiketimematrix = AttrDict({'spike_times': np.zeros(len(spiketimes[where(model.spike_clusters==cluster)]))})
            spiketimematrix.spike_times = spiketimes[where(model.spike_clusters==cluster)]
            spikedata[cluster] = spiketimematrix # data structure is a dictionary with attribute accessible spiketimes
            # attribute accessible means that spikedata.spike_times works, normal dictionaries would be spikedata[spike_times]
    
    model.close()
    
    return spikedata, sample_rate
Пример #2
0
def spike_clusters_into_kwik(scname, kwikname, clustering_name):
    try:
    	print("Loading {0}".format(scname))
    	sc = np.loadtxt(scname, dtype=int) - 1
    	print("Loaded {0} spikes".format(len(sc)))
    except(IOError, OSError) as e:
        print("Error opening file: {1}".format(e))
        return

    model = KwikModel(kwikname)
    print("Adding {0} into {1}, clustering {2}".format(sys.argv[1], sys.argv[2], sys.argv[3]))
    try:
	    model.add_clustering(clustering_name, sc)
    except ValueError:
        print("Overwriting old clustering!")
        time.sleep(5)
        # Hack to switch then overwrite clustering...
        model.add_clustering('123_interim_temp', sc)
        model.clustering = '123_interim_temp'
        model.delete_clustering(clustering_name)
        model.copy_clustering('123_interim_temp', clustering_name)
Пример #3
0
def readkwikinfo(kwik, grupete=3):
    model = KwikModel(kwik)  # load kwik model from file
    spiketimes = model.spike_times  # extract the absolute spike times
    clusters = model.cluster_groups  # extract the cluster names
    sample_rate = model.sample_rate  # extract sampling freq

    spikedata = {}  # initialise dictionary
    for cluster in clusters.keys():
        clustergroup = clusters[cluster]
        if clustergroup == grupete:  # only look at specified type of cluster, 0 = noise, 1 = MUA, 2 = GOOD, 3 = unsorted
            spiketimematrix = AttrDict({
                'spike_times':
                np.zeros(
                    len(spiketimes[where(model.spike_clusters == cluster)]))
            })
            spiketimematrix.spike_times = spiketimes[where(
                model.spike_clusters == cluster)]
            spikedata[
                cluster] = spiketimematrix  # data structure is a dictionary with attribute accessible spiketimes
            # attribute accessible means that spikedata.spike_times works, normal dictionaries would be spikedata[spike_times]

    model.close()

    return spikedata, sample_rate
def add_clustering_to_kwik(kwik_path_dir, filename, clustering_name, clu_array):
    basename =    os.path.join(kwik_path_dir, filename)
    model = KwikModel(basename)
    model.add_clustering(clustering_name, clu_array)
Пример #5
0
def spike_clusters_into_kwik(scname, kwikname, clustering_name):
    try:
        print("Loading {0}".format(scname))
        sc = np.loadtxt(scname, dtype=int) - 1
        print("Loaded {0} spikes".format(len(sc)))
    except (IOError, OSError) as e:
        print("Error opening file: {1}".format(e))
        return

    model = KwikModel(kwikname)
    print("Adding {0} into {1}, clustering {2}".format(sys.argv[1],
                                                       sys.argv[2],
                                                       sys.argv[3]))
    try:
        model.add_clustering(clustering_name, sc)
    except ValueError:
        print("Overwriting old clustering!")
        time.sleep(5)
        # Hack to switch then overwrite clustering...
        model.add_clustering('123_interim_temp', sc)
        model.clustering = '123_interim_temp'
        model.delete_clustering(clustering_name)
        model.copy_clustering('123_interim_temp', clustering_name)