def setup(): create_files('myexperiment', dir=DIRPATH, prm=prm, prb=prb) # Open the files. files = open_files('myexperiment', dir=DIRPATH, mode='a') # Add data. add_recording(files, sample_rate=sample_rate, nchannels=nchannels) add_cluster_group(files, channel_group_id='0', id='0', name='Noise') add_cluster(files, channel_group_id='0',) # Close the files close_files(files)
def create_files_Experiment(filename, DIRPATH, prm, prb): #files_exist is a function in SD2.dataio.kwik if not files_exist(filename, dir=DIRPATH): create_files(filename, dir=DIRPATH, prm=prm, prb=prb) # Open the files. files = open_files(filename, dir=DIRPATH, mode='a') # Add data. add_recording(files, sample_rate=prm['sample_rate'], nchannels=prm['nchannels']) add_cluster_group(files, channel_group_id='0', id='0', name='Noise') add_cluster(files, channel_group_id='0',) # Close the files close_files(files)
def learn_data_grid_general(hybdatadict, SDparams,prb,detectioncrit,supervised_params,addtokwik): ''' If addtokwik == True, then the clusterings are also stored in the .kwik file calls learn_data() for various values of the grids and also the function compute_errors() Writes output as clusterings labelled by Hash(svmparams) of the grid in Hash(hybdatadict)_Hash(sdparams)_Hash(detectioncrit)_Hash(supervised_params).kwik using write_kwik(hybdatadict,sdparams,detectioncrit,svmparams,confusion_test,confusion_train) the new .kwik format can store multiple clusterings. supervised_params consists of the following quantities: supervised_params = {'numfirstspikes': 200000,'kernel': 'rbf','grid_C': [1,100000,0.00001], 'grid_weights': listofweights ,gammagrid : [1e-5, 0.001, 0.1, 1, 10, 1000, 100000], cross_param : 2, PCAS : 3, subvector: None} ''' #---------------------------------------------------------- argPLDG = [hybdatadict, SDparams,prb,detectioncrit,supervised_params] if ju.is_cached(pre_learn_data_grid,*argPLDG): print 'Yes, pre_learn_data_grid has been run \n' else: print 'Running pre_learn_data_grid(hybdatadict, SDparams,prb,detectioncrit,supervised_params), \n you have not run it yet' hash_hyb_SD,classweights,scaled_fets, target = pre_learn_data_grid(hybdatadict, SDparams,prb,detectioncrit,supervised_params) DIRPATH = hybdatadict['output_path'] number_of_weights = len(classweights) numspikes = scaled_fets.shape[0] cross_valid = do_cross_validation_shuffle(numspikes,supervised_params['cross_param']) #print cross_valid #do_supervised(supervised_params, #'grid_C': [1,100000,0.00001], number_cvalues = 3 number_cvalues = len(supervised_params['grid_C']) #number_support_vectors = {} weights_clu_test = np.zeros((number_cvalues,number_of_weights,numspikes,2),dtype=np.int32) weights_clu_train = np.zeros((number_cvalues,number_of_weights, numspikes,2),dtype=np.int32) cludict= {(0,0):1, (0,1):2, (1,0):3, (1,1):4} # (prediction, groundtruth) #(0,0) TN, (0,1) FN ,(1,0) FP ,(1,1) TP testclu = np.zeros((number_cvalues,number_of_weights,numspikes),dtype=np.int32) trainclu = np.zeros((number_cvalues,number_of_weights,numspikes),dtype=np.int32) for c, Cval in enumerate(supervised_params['grid_C']): preds = {} preds_train = {} ##Defined to avoid: TypeError: unhashable type: 'numpy.ndarray', something about dictionaries #testclu_pre = np.zeros((number_of_weights,numspikes),dtype=np.int32) #trainclu_pre = np.zeros((number_of_weights,numspikes),dtype=np.int32) for i, (weights) in enumerate(classweights): for j, (train, test) in enumerate(cross_valid): if supervised_params['kernel'] == 'poly': preds[i,j], preds_train[i,j]= do_supervised_learning(test, train,Cval, supervised_params['kernel'], scaled_fets, target,classweights[i]) else:#radial kernel, only allow a single gamma value at a time preds[i,j], preds_train[i,j]= do_supervised_learning_radial(test, train,Cval, supervised_params['kernel'],supervised_params['gamma'], scaled_fets, target,classweights[i]) print 'Computed ', classweights[i] #Used later to make equivalent to 4 seasons clu file weights_clu_test[c,i,test,0] = preds[i,j] weights_clu_test[c,i,test,1] = target[test] #Used later to make equivalent to 4 seasons clu file but for the training set weights_clu_train[c,i,train,0] = preds_train[i,j] weights_clu_train[c,i,train,1] = target[train] #Make 4 seasons clu file equivalent for k in np.arange(numspikes): testclu[c,i,k] = cludict[tuple(weights_clu_test[c,i,k,:])] trainclu[c,i,k] = cludict[tuple(weights_clu_train[c,i,k,:])] # supervisedinputdict = {'test':test, 'train':train, 'Cval': Cval, 'kernel': supervised_params['kernel'], 'scaled_fets':scaled_fets, 'target', target, 'classweights': classweigths #Add clusterings to .kwik file supervisedparamshash = None #if addtokwik = False otherwise crashes if addtokwik: kwikfiles = open_files(hash_hyb_SD,dir=DIRPATH, mode='a') supervisedparamshash = hash_utils.hash_dictionary_md5(supervised_params) supervisedhashname = supervisedparamshash + '_' + repr(c) + '_' + repr(i) add_clustering(kwikfiles,name = supervisedhashname + 'test', spike_clusters=testclu[c,i,:] ) add_clustering(kwikfiles,name = supervisedhashname + 'train', spike_clusters=trainclu[c,i,:] ) close_files(kwikfiles) #print 'testclu[',c,',',i,',',k,']=',testclu[c,i,k] # for c, Cval in enumerate(supervised_params['grid_C']): # kwikfilename = DIRPATH + hash_hyb_SD + '.kwik' # supervisedhashname = hash_utils.hash_dictionary_md5(detectioncrit) # add_clustering_kwik(kwikfilename, detectedgroundtruth, detectionhashname) ####Train and test look like this for 2-fold cross validation and 200 spikes # j = 0 train = [100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 # 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 # 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 # 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 # 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 # 190 191 192 193 194 195 196 197 198 199] # test = [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 # 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 # 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 # 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99] # j = 1 train = [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 # 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 # 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 # 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99] # test = [100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 # 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 # 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 # 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 # 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 # 190 191 192 193 194 195 196 197 198 199] return supervisedparamshash, classweights, testclu, trainclu
def test_detection_algorithm(hybdatadict, SDparams,prb, detectioncrit): ''' It will query whether the cached function: hybridata_creation_lib.create_hybrid_kwdfile(hybdatadict), has been called already with those arguments using `joblib_utils.is_cached`, If it has, it calls it to obtain creation_groundtruth. else if the hybrid dataset does not exist, it will raise an Error. creation_groundtruth consists of the equivalent to the old: GroundtruthResfile,GroundtruthClufile,... (i.e. the times and the cluster labels for the added hybrid spikes. detection criteria include: allowed_discrepancy,CSthreshold This function will call SpikeSimilarityMeasure(a,b) and output the file: Hash(hybdatadict)_Hash(sdparams)_Hash(detectioncrit).kwik It will return detcrit_groundtruth, the groundtruth relative to the criteria, detectioncrit. This will be an ordered dictionary so that hashing can work!''' if ju.is_cached(hcl.create_hybrid_kwdfile,hybdatadict): print 'Yes, this hybrid dataset exists, I shall now check if you have run SD \n' meanwaveform,kwdoutputname, creation_groundtruth, amplitude = hcl.create_hybrid_kwdfile(hybdatadict) #Take the means of the binary donor masks of the donor cluster binmeanmask = hcl.make_average_datamask_from_mean(hybdatadict, fmask= False) argSD = [hybdatadict,SDparams,prb] if ju.is_cached(rsd.run_spikedetekt,*argSD): print 'Yes, SD has been run \n' hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb) else: print 'You need to run Spikedetekt before attempting to analyse results ' DIRPATH = hybdatadict['output_path'] with Experiment(hash_hyb_SD, dir= DIRPATH, mode='a') as expt: res_int= expt.channel_groups[0].spikes.time_samples res_frac = expt.channel_groups[0].spikes.time_fractional res_int_arr = res_int[:] res_frac_arr = res_frac[:] detected_times = res_int_arr + res_frac_arr #Masks fmask = expt.channel_groups[0].spikes.features_masks #Spikes within time window existencewin = np.zeros_like(creation_groundtruth) #Mean binary mask for hybrid cluster if 'manual_meanmask' in detectioncrit.keys(): binmeanmask = detectioncrit['manual_meanmask'] else: binmeanmask = hcl.make_average_datamask_from_mean(hybdatadict, fmask= False) indices_in_timewindow = hash_utils.order_dictionary({}) #indices_in_timewindow = {0 (this is the 1st hybrid spike): (array([0, 1, 3]),), #1: (array([89, 90]),), #2: (array([154, 156, 157]),), #3: (array([191]),), #4: (array([259, 260, 261]),), num_spikes_in_timewindow = hash_utils.order_dictionary({}) CauchySchwarz = hash_utils.order_dictionary({}) detected = hash_utils.order_dictionary({}) NumHybSpikes = creation_groundtruth.shape[0] trivialmainclustering = np.zeros_like(detected_times) detectedgroundtruth = np.zeros_like(detected_times) print detectedgroundtruth.shape for k in np.arange(NumHybSpikes): list_of_differences = np.zeros((detected_times.shape[0])) list_of_differences[:]= detected_times[:] - creation_groundtruth[k] indices_in_timewindow[k] = np.nonzero(np.absolute(list_of_differences)<=detectioncrit['allowed_discrepancy']) num_spikes_in_timewindow[k] = indices_in_timewindow[k][0].shape[0] for j in np.arange(num_spikes_in_timewindow[k]): CauchySchwarz[k,j] = SpikeSimilarityMeasure(fmask[indices_in_timewindow[k][0][j],:,1],binmeanmask[0:3*hybdatadict['numchannels']]) if CauchySchwarz[k,j] > detectioncrit['CSthreshold']: detected[k,j] = 1 else: detected[k,j] = 0 detectedgroundtruth[indices_in_timewindow[k][0][j]]= detected[k,j] #Store detectedgroundtruth in a clustering detectionhashname = hash_utils.hash_dictionary_md5(detectioncrit) #kwikfilename = DIRPATH + hash_hyb_SD #+ '.kwik' #add_clustering_kwik(kwikfilename, detectedgroundtruth, detectionhashname) kwikfiles = open_files(hash_hyb_SD,dir=DIRPATH, mode='a') add_clustering(kwikfiles,name = detectionhashname, spike_clusters=detectedgroundtruth,overwrite = True ) print 'Added a clustering called ', detectionhashname add_clustering(kwikfiles,name = 'main', spike_clusters= trivialmainclustering, overwrite = True) close_files(kwikfiles) #clusters = '/channel_groups[0]/spikes/clusters' #detectionhash = hash_dictionary_md5(detectioncrit) #expt.createEArray(clusters, detectionhash, tb.UInt32Atom(), (0,), # expectedrows=1000000) #percentage_detected = float(sum(detected.values()))/NumHybSpikes detcrit_groundtruth_pre ={'detection_hashname': detectionhashname,'binmeanmask': binmeanmask,'indices_in_timewindow':indices_in_timewindow, 'numspikes_in_timeswindow': num_spikes_in_timewindow, 'Cauchy_Schwarz':CauchySchwarz,'detected': detected,'detected_groundtruth': detectedgroundtruth} detcrit_groundtruth = hash_utils.order_dictionary(detcrit_groundtruth_pre) return detcrit_groundtruth