Exemple #1
0
    def __init__(self, file_names, chan_group=0):
        self.file_names = file_names

        if file_names['mda']:
            spikes = readmda(file_names['mda']).astype(int)
            self.spk = spikes[1,:] - 1 # really, 1 indexing?
            self.clu = spikes[2,:]
            with open(file_names['param'], 'r') as f:
                params = json.load(f)
            self.s_f = params['samplerate']

        else: # its a kilosort conversion
            if file_names['clu']:
                self.clu = np.squeeze(np.load(file_names['clu']))
            elif file_names['temp']:
                self.clu = np.squeeze(np.load(file_names['temp']))
            else:
                raise IOError('both spike_clusters.npy and spike_templates.npy weren\'t found')
            self.spk = np.load(file_names['spk'])
            
            with open(file_names['par'], 'r') as f:
                exec(f.read())
                self.s_f = sample_rate
        
        if 'grp' in file_names and file_names['grp']:
            self.grp = load_grp_file(file_names['grp'])
        else:
            self.grp = [(i, 'unsorted') for i in np.unique(self.clu)]

        self.rec_kwik = None
        self.spk_kwik = None
        self.kwf = None
        self.chan_group = chan_group
        self.create_kwf()
	def run(self,args):
		### The processing
		input_path=args['input']
		output_path=args['output']
		factor=float(args['factor'])
		X=readmda(input_path) #read
		X=X*factor #scale the array
		writemda64(X,output_path) #write
		return True
def get_firing_info(file_path, prm):
    firing_times_path = file_path + '/Electrophysiology' + prm.get_sorter_name(
    ) + '/firings.mda'

    units_list = None
    firing_info = None
    if os.path.exists(firing_times_path):
        firing_info = mdaio.readmda(firing_times_path)
        units_list = np.unique(firing_info[2])
    else:
        print('I could not find the MountainSort output [firing.mda] file.')
    return units_list, firing_info
	def run(self,args):
		### The processing
		input_path=args['input']
		output_path=args['output']
		X=readmda(input_path) #read
		M=X.shape[0]
		N=X.shape[1]
		print(X.shape)
		for m in range(0,M):
			row=X[m,:]
			print(row.shape)
			stdev0=np.std(row)
			row=row*(1/stdev0)
			X[m,:]=row
		writemda64(X,output_path) #write
		return True
Exemple #5
0
def get_firing_info(file_path, prm):
    firing_times_path = file_path + '/Electrophysiology' + prm.get_sorter_name() + '/firings.mda'
    units_list = None
    firing_info = None
    if os.path.exists(firing_times_path):
        firing_info = mdaio.readmda(firing_times_path)
        units_list = np.unique(firing_info[2])
    else:
        print('I could not find the MountainSort output [firing.mda] file. I will check if the data was sorted earlier.')
        spatial_firing_path = file_path + '/MountainSort/DataFrames/spatial_firing.pkl'
        if os.path.exists(spatial_firing_path):
            spatial_firing = pd.read_pickle(spatial_firing_path)
            os.mknod(file_path + '/sorted_data_exists.txt')
            return units_list, firing_info, spatial_firing
        else:
            print('There are no sorting results available for this recording.')
    return units_list, firing_info, False
Exemple #6
0
def get_snippets(firing_data, prm):
    print('I will get some random snippets now for each cluster.')
    file_path = prm.get_local_recording_folder_path()
    filtered_data_path = []

    filtered_data_path = file_path + '/Electrophysiology' + prm.get_sorter_name(
    ) + '/filt.mda'

    snippets_all_clusters = []
    if os.path.exists(filtered_data_path):
        filtered_data = mdaio.readmda(filtered_data_path)
        for cluster in range(len(firing_data)):
            #TODO: cluster ID problems
            # cluster = firing_data.cluster_id.values[cluster] - 1
            firing_times = firing_data.firing_times[cluster]

            snippets = extract_random_snippets(filtered_data, firing_times,
                                               firing_data.tetrode[cluster],
                                               50, prm)
            snippets_all_clusters.append(snippets)

        firing_data['random_snippets'] = snippets_all_clusters
    #plt.plot(firing_data.random_snippets[4][3,:,:])
    return firing_data
Exemple #7
0
a = np.array([[1,0],[2,0]]).astype(np.int16)
np.savetxt("geom.csv", a, delimiter=",")
#np.readtxt('/home/nel/Code/VolPy/mountainsort_examples-master/bash_examples/001_ms4_bash_example/dataset/geom.csv',delimiter=',')


name = '/home/nel/Dropbox_old/SimultaneousEphys/09212017Fish1-1/RoiSet.zip'
dims=(44,96)
from caiman.base.rois import nf_read_roi_zip
img = nf_read_roi_zip(name,dims)
plt.figure();plt.imshow(img.sum(axis=0))

path='/tmp/mountainlab-tmp/output_47670870cb9afee72b43d0c03d477d36581a055e_timeseries_out.mda'

path = '/home/nel/Dropbox_old/SimultaneousEphys/MountainSort/dataset/raw.mda'
path = '/tmp/mountainlab-tmp/output_47670870cb9afee72b43d0c03d477d36581a055e_timeseries_out.mda'
X = readmda(path)

path = '/home/nel/Dropbox_old/SimultaneousEphys/MountainSort/output/firings.mda'
Y = readmda(path)

plt.figure()
plt.plot(X[0])
plt.scatter(Y[1],np.repeat(0.3, len(Y[1])))

#%% Yichun
fname = '/home/nel/Dropbox_old/Voltron_fly_data/PPL1_ap2a2/axon/180319-1voltron/Data3/Data3_mini.mat'
import h5py
import numpy as np
arrays = {}
f = h5py.File(fname)
gt = f['wholeCell']['voltage'][0]
def get_firing_info(prm):
    firing_times_path = prm.get_filepath() + '\\sorting_out_t2\\firings.mda'
    firing_info = mdaio.readmda(firing_times_path)
    units_list = np.unique(firing_info[2])
    return units_list, firing_info
Exemple #9
0
from mdaio import readmda, writemda64

X = readmda("test_firings.mda")

writemda64(X, "test2.mda")
    return filtered_trace


if __name__ == "__main__":

    #tet4_firings = readmda('/Volumes/coxfs01/guitchounts/MountainSortCluster-master/TetrodeData/Tetrode4/data/firings2-4.mda')

    ### starting in a dir like GRat54/636xxxxx/0/
    #tetrode_num = 1

    waveform_stats = {}

    for tetrode_num in range(1, 17):
        waveform_stats[tetrode_num] = {}

        tet_raw = readmda('./TetrodeData/Tetrode%d/data/rawT%d.mda' %
                          (tetrode_num, tetrode_num))
        ## ^^ can also load from original RHD file.

        #### this stuff will actually be on /n/regal (or wherever you put it)
        clust_dir = './Tetrode%d/mountainlab/prvbucket/_mountainprocess/' % tetrode_num

        firings_files = [
            out_file for out_file in os.listdir(clust_dir)
            if out_file.startswith('output_firings_out')
        ]
        output_firings_out = firings_files[np.argmax(
            [os.path.getsize(clust_dir + file) for file in firings_files])]
        cluster_times_ids = readmda(clust_dir + output_firings_out)

        metrics_files = [
            out_file for out_file in os.listdir(clust_dir)
Exemple #11
0
from mdaio import readmda,writemda64

X = readmda("test_firings.mda")

writemda64(X,"test2.mda")