def make_pca_features_from_spk(SpkFileName): """ Make pca features from a .spk file and save them to features.txt """ SpkDir = os.path.dirname(SpkFileName) n_ch,sample_rate,s_before,s_after = get_pars_from_xml2(find_file_with_ext(SpkDir,"xml",True)) X_ns = np.fromfile(SpkFileName,dtype=np.int16).reshape(-1,s_before+s_after,n_ch)[:,:,0] Feats_ss = compute_pcs(X_ns) save_feature_info(Feats_ss,s_before,s_after,sample_rate)
def make_pca_features_from_spk(SpkFileName): """ Make pca features from a .spk file and save them to features.txt """ SpkDir = os.path.dirname(SpkFileName) n_ch, sample_rate, s_before, s_after = get_pars_from_xml2( find_file_with_ext(SpkDir, "xml", True)) X_ns = np.fromfile(SpkFileName, dtype=np.int16).reshape(-1, s_before + s_after, n_ch)[:, :, 0] Feats_ss = compute_pcs(X_ns) save_feature_info(Feats_ss, s_before, s_after, sample_rate)
def combine_h5s(*dirs): "combine the data from a bunch of h5 files. Also make klusters files" outdirname = common_start(dirs) + "COMBINED" print dirs print os.path.abspath(os.curdir) h5files = [ tables.openFile(find_file_with_ext(dir, ".h5"), mode="r") for dir in dirs ] spike_tables = [h5file.root.SpikeTable for h5file in h5files] with indir(outdirname): global N_CH, S_TOTAL, FPC, SAMPLE_RATE num_ch = [table.cols.st.shape[1] for table in spike_tables] N_CH = max(num_ch) S_TOTAL = table.cols.wave.shape[1] FPC = table.cols.fet.shape[2] new_file = tables.openFile(outdirname + ".h5", mode="w") new_table = new_file.createTable("/", "SpikeTable", spike_dtype()) clu_start = np.arange(0, 5000, 100) SAMPLE_RATE = 25000. # Doesn't actually matter for this script, but write_xml wants it # files in same order as tables clu2DatChannels = {} for (i_file, h5file) in enumerate(h5files): for clu in xrange(clu_start[i_file], clu_start[i_file + 1]): clu2DatChannels[clu] = list(h5file.root.DatChannels) dump("clu2DatChannels.pickle", clu2DatChannels) triples = [(row["time"], i_spike, i_table) for (i_table, table) in enumerate(spike_tables) for (i_spike, row) in enumerate(table)] for time, i_spike, i_table in sorted(triples, key=lambda tup: tup[0]): oldrow = spike_tables[i_table][i_spike] new_table.row["time"] = time new_table.row["fet"] = zero_pad(oldrow["fet"], N_CH) new_table.row["st"] = zero_pad(oldrow["st"], N_CH) new_table.row["wave"] = zero_pad(oldrow["wave"], N_CH) new_table.row["clu"] = oldrow["clu"] + clu_start[i_table] new_table.row.append() new_table.flush() klusters_files(new_table, outdirname) new_file.close()
def combine_h5s(*dirs): "combine the data from a bunch of h5 files. Also make klusters files" outdirname = common_start(dirs) + "COMBINED" print dirs print os.path.abspath(os.curdir) h5files = [tables.openFile(find_file_with_ext(dir,".h5"),mode="r") for dir in dirs] spike_tables = [h5file.root.SpikeTable for h5file in h5files] with indir(outdirname): global N_CH,S_TOTAL,FPC,SAMPLE_RATE num_ch =[table.cols.st.shape[1] for table in spike_tables] N_CH = max(num_ch) S_TOTAL = table.cols.wave.shape[1] FPC = table.cols.fet.shape[2] new_file = tables.openFile(outdirname+".h5",mode="w") new_table = new_file.createTable("/","SpikeTable",spike_dtype()) clu_start = np.arange(0,5000,100) SAMPLE_RATE = 25000. # Doesn't actually matter for this script, but write_xml wants it # files in same order as tables clu2DatChannels = {} for (i_file,h5file) in enumerate(h5files): for clu in xrange(clu_start[i_file],clu_start[i_file+1]): clu2DatChannels[clu] = list(h5file.root.DatChannels) dump("clu2DatChannels.pickle",clu2DatChannels) triples = [(row["time"],i_spike,i_table) for (i_table,table) in enumerate(spike_tables) for (i_spike,row) in enumerate(table)] for time,i_spike,i_table in sorted(triples, key = lambda tup: tup[0]): oldrow = spike_tables[i_table][i_spike] new_table.row["time"] = time new_table.row["fet"]= zero_pad(oldrow["fet"],N_CH) new_table.row["st"] = zero_pad(oldrow["st"],N_CH) new_table.row["wave"] = zero_pad(oldrow["wave"],N_CH) new_table.row["clu"] = oldrow["clu"]+clu_start[i_table] new_table.row.append() new_table.flush() klusters_files(new_table,outdirname) new_file.close()