def open(self, filename=None): """Open everything.""" if filename is None: filename = self.filename else: self.filename = filename dir, basename = os.path.split(filename) # Converting to kwik if needed # kwik = find_filename(basename, 'kwik', dir=dir) # xml = find_filename(basename, 'xml', dir=dir) # self.filename_clu = find_filename(basename, 'clu', dir=dir) self._filenames = find_filenames(filename) kwik = find_filename(basename, 'kwik', dir=dir) xml = self._filenames['xml'] clu = self._filenames['clu'] self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir) # Backup the .clu file. clu_original = find_filename_or_new(filename, 'clu_original') if os.path.exists(clu) and not os.path.exists(clu_original): shutil.copyfile(clu, clu_original) if not kwik: assert xml, ValueError("I need the .xml file!") klusters_to_kwik(filename=xml, dir=dir, progress_report=self._report_progress_open) self.experiment = Experiment(basename, dir=dir, mode='a') # CONSISTENCY CHECK # add missing clusters add_missing_clusters(self.experiment) # TODO # self.initialize_logfile() # Load the similarity measure chosen by the user in the preferences # file: 'gaussian' or 'kl'. # Refresh the preferences file when a new file is opened. # USERPREF.refresh() self.similarity_measure = self.userpref[ 'similarity_measure'] or 'gaussian' debug("Similarity measure: {0:s}.".format(self.similarity_measure)) info("Opening {0:s}.".format(self.experiment.name)) self.shanks = sorted(self.experiment.channel_groups.keys()) self.freq = self.experiment.application_data.spikedetekt.sample_rate self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples self.set_shank(self.shanks[0])
def open(self, filename=None): """Open everything.""" if filename is None: filename = self.filename else: self.filename = filename dir, basename = os.path.split(filename) # Converting to kwik if needed # kwik = find_filename(basename, 'kwik', dir=dir) # xml = find_filename(basename, 'xml', dir=dir) # self.filename_clu = find_filename(basename, 'clu', dir=dir) self._filenames = find_filenames(filename) kwik = find_filename(basename, 'kwik', dir=dir) xml = self._filenames['xml'] clu = self._filenames['clu'] self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir) # Backup the .clu file. clu_original = find_filename_or_new(filename, 'clu_original') if os.path.exists(clu) and not os.path.exists(clu_original): shutil.copyfile(clu, clu_original) if not kwik: assert xml, ValueError("I need the .xml file!") klusters_to_kwik(filename=xml, dir=dir, progress_report=self._report_progress_open) self.experiment = Experiment(basename, dir=dir, mode='a') # CONSISTENCY CHECK # add missing clusters add_missing_clusters(self.experiment) # TODO # self.initialize_logfile() # Load the similarity measure chosen by the user in the preferences # file: 'gaussian' or 'kl'. # Refresh the preferences file when a new file is opened. # USERPREF.refresh() self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian' debug("Similarity measure: {0:s}.".format(self.similarity_measure)) info("Opening {0:s}.".format(self.experiment.name)) self.shanks = sorted(self.experiment.channel_groups.keys()) self.freq = self.experiment.application_data.spikedetekt.sample_rate self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples self.set_shank(self.shanks[0])
def open(self, filename=None): if filename is not None: self.filename = filename self.klusters_data = open_klusters(self.filename) self.filenames = self.klusters_data['filenames'] self.name = self.klusters_data['name'] # Backup the original CLU file. filename_clu_original = find_filename_or_new(self.filename, 'clu_original') shutil.copyfile(self.filenames['clu'], filename_clu_original) if 'probe' in self.klusters_data: prb = probe_to_prb(self.klusters_data['probe']) else: prb = self.klusters_data['prb'] prm = metadata_to_prm(self.klusters_data['metadata']) for chgrp in prb.keys(): prb[chgrp]['nfeatures'] = self.klusters_data[chgrp]['fetcol'] self.filenames_kwik = create_files(self.name, prm=prm, prb=prb) self.files = open_files(self.name, mode='a') self.shanks = sorted([key for key in self.klusters_data.keys() if isinstance(key, (int, long))]) self.shank = self.shanks[0] self.spike = 0
def open_klusters(filename): indices = find_indices(filename) triplet = filename_to_triplet(filename) filenames_shanks = {} for index in indices: filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index, )) klusters_data = { index: open_klusters_oneshank(filename) for index, filename in filenames_shanks.iteritems() } shanks = filenames_shanks.keys() # Find the dataset filenames and load the metadata. filenames = find_filenames(filename) # Metadata common to all shanks. metadata = read_xml(filenames['xml'], 1) # Metadata specific to each shank. metadata.update( {shank: read_xml(filenames['xml'], shank) for shank in shanks}) metadata['shanks'] = sorted(shanks) metadata['has_masks'] = ( ('mask' in filenames and filenames['mask'] is not None) or ('fmask' in filenames and filenames['fmask'] is not None)) klusters_data['name'] = triplet[0] klusters_data['metadata'] = metadata klusters_data['shanks'] = shanks klusters_data['filenames'] = filenames # Load probe file. filename_probe = filenames['probe'] # It no probe file exists, create a default, linear probe with the right # number of channels per shank. if not filename_probe: # Generate a probe filename. filename_probe = find_filename_or_new(filename, 'default.probe', have_file_index=False) shanks = { shank: klusters_data[shank]['nchannels'] for shank in filenames_shanks.keys() } probe_python = generate_probe(shanks, 'complete') # with open(filename_probe, 'w') as f: # f.write(probe_python) # save_probe(filename_probe, probe_python) klusters_data['prb'] = probe_python else: probe_ns = {} execfile(filename_probe, {}, probe_ns) klusters_data['probe'] = probe_ns return klusters_data
def open_klusters(filename): indices = find_indices(filename) triplet = filename_to_triplet(filename) filenames_shanks = {} for index in indices: filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index,)) klusters_data = {index: open_klusters_oneshank(filename) for index, filename in filenames_shanks.iteritems()} shanks = filenames_shanks.keys() # Find the dataset filenames and load the metadata. filenames = find_filenames(filename) # Metadata common to all shanks. metadata = read_xml(filenames['xml'], 1) # Metadata specific to each shank. metadata.update({shank: read_xml(filenames['xml'], shank) for shank in shanks}) metadata['shanks'] = sorted(shanks) metadata['has_masks'] = (('mask' in filenames and filenames['mask'] is not None) or ( 'fmask' in filenames and filenames['fmask'] is not None )) klusters_data['name'] = triplet[0] klusters_data['metadata'] = metadata klusters_data['shanks'] = shanks klusters_data['filenames'] = filenames # Load probe file. filename_probe = filenames['probe'] # It no probe file exists, create a default, linear probe with the right # number of channels per shank. if not filename_probe: # Generate a probe filename. filename_probe = find_filename_or_new(filename, 'default.probe', have_file_index=False) shanks = {shank: klusters_data[shank]['nchannels'] for shank in filenames_shanks.keys()} probe_python = generate_probe(shanks, 'complete') # with open(filename_probe, 'w') as f: # f.write(probe_python) # save_probe(filename_probe, probe_python) klusters_data['prb'] = probe_python else: probe_ns = {} execfile(filename_probe, {}, probe_ns) klusters_data['probe'] = probe_ns return klusters_data