예제 #1
0
def open_klusters_oneshank(filename):
    filenames = find_filenames(filename)
    fileindex = find_index(filename)

    # Open small Klusters files.
    data = {}
    metadata = read_xml(filenames['xml'], fileindex)
    data['clu'] = read_clusters(filenames['clu'])

    # Read .aclu data.
    if 'aclu' in filenames and os.path.exists(filenames['aclu']):
        data['aclu'] = read_clusters(filenames['aclu'])
    else:
        data['aclu'] = data['clu']

    # Read .acluinfo data.
    if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['acluinfo'])
    # If the ACLUINFO does not exist, try CLUINFO (older file extension)
    elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['cluinfo'])
    else:
        data['acluinfo'] = default_cluster_info(np.unique(data['aclu']))

    # Read group info.
    if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']):
        data['groupinfo'] = read_group_info(filenames['groupinfo'])
    else:
        data['groupinfo'] = default_group_info()

    # Find out the number of columns in the .fet file.
    with open(filenames['fet'], 'r') as f:
        f.readline()
        # Get the number of non-empty columns in the .fet file.
        data['fetcol'] = len(
            [col for col in f.readline().split(' ') if col.strip() != ''])

    metadata['nspikes'] = len(data['clu'])
    data['fileindex'] = fileindex

    # Open big Klusters files.
    data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1)
    if 'spk' in filenames and os.path.exists(filenames['spk'] or ''):
        data['spk'] = MemMappedBinary(filenames['spk'],
                                      np.int16,
                                      rowsize=metadata['nchannels'] *
                                      metadata['nsamples'])
    if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''):
        data['uspk'] = MemMappedBinary(filenames['uspk'],
                                       np.int16,
                                       rowsize=metadata['nchannels'] *
                                       metadata['nsamples'])
    if 'mask' in filenames and os.path.exists(filenames['mask'] or ''):
        data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1)

    # data['metadata'] = metadata
    data.update(metadata)

    return data
예제 #2
0
 def set_filenames(self, filename):
     filenames = find_filenames(filename)
     self.filename_kwik = filenames['hdf5_kwik']
     self.filename_log = filenames['kvwlg']
     self.filename_clu = filenames['clu']
     self.filename_kwa = filenames['hdf5_kwa']
     self.filename_raw_kwd = filenames['hdf5_raw']
     self.filename = self.filename_kwik
     self.filenames = filenames
예제 #3
0
def open_klusters_oneshank(filename):
    filenames = find_filenames(filename)
    fileindex = find_index(filename)
    
    # Open small Klusters files.
    data = {}
    metadata = read_xml(filenames['xml'], fileindex)
    data['clu'] = read_clusters(filenames['clu'])
    
    # Read .aclu data.
    if 'aclu' in filenames and os.path.exists(filenames['aclu']):
        data['aclu'] = read_clusters(filenames['aclu'])
    else:
        data['aclu'] = data['clu']
        
    # Read .acluinfo data.
    if 'acluinfo' in filenames and os.path.exists(filenames['acluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['acluinfo'])
    # If the ACLUINFO does not exist, try CLUINFO (older file extension)
    elif 'cluinfo' in filenames and os.path.exists(filenames['cluinfo']):
        data['acluinfo'] = read_cluster_info(filenames['cluinfo'])
    else:
        data['acluinfo'] = default_cluster_info(np.unique(data['aclu']))
        
    # Read group info.
    if 'groupinfo' in filenames and os.path.exists(filenames['groupinfo']):
        data['groupinfo'] = read_group_info(filenames['groupinfo'])
    else:
        data['groupinfo'] = default_group_info()
    
    # Find out the number of columns in the .fet file.
    with open(filenames['fet'], 'r') as f:
        f.readline()
        # Get the number of non-empty columns in the .fet file.
        data['fetcol'] = len([col for col in f.readline().split(' ') if col.strip() != ''])
    
    metadata['nspikes'] = len(data['clu'])
    data['fileindex'] = fileindex

    # Open big Klusters files.
    data['fet'] = MemMappedText(filenames['fet'], np.int64, skiprows=1)
    if 'spk' in filenames and os.path.exists(filenames['spk'] or ''):
        data['spk'] = MemMappedBinary(filenames['spk'], np.int16, 
            rowsize=metadata['nchannels'] * metadata['nsamples'])
    if 'uspk' in filenames and os.path.exists(filenames['uspk'] or ''):
        data['uspk'] = MemMappedBinary(filenames['uspk'], np.int16, 
            rowsize=metadata['nchannels'] * metadata['nsamples'])
    if 'mask' in filenames and os.path.exists(filenames['mask'] or ''):
        data['mask'] = MemMappedText(filenames['mask'], np.float32, skiprows=1)

    # data['metadata'] = metadata
    data.update(metadata)
    
    return data
예제 #4
0
    def open(self, filename=None):
        """Open everything."""
        if filename is None:
            filename = self.filename
        else:
            self.filename = filename
        dir, basename = os.path.split(filename)

        # Converting to kwik if needed
        # kwik = find_filename(basename, 'kwik', dir=dir)
        # xml = find_filename(basename, 'xml', dir=dir)
        # self.filename_clu = find_filename(basename, 'clu', dir=dir)
        self._filenames = find_filenames(filename)
        kwik = find_filename(basename, 'kwik', dir=dir)
        xml = self._filenames['xml']
        clu = self._filenames['clu']

        self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)

        # Backup the .clu file.
        clu_original = find_filename_or_new(filename, 'clu_original')
        if os.path.exists(clu) and not os.path.exists(clu_original):
            shutil.copyfile(clu, clu_original)

        if not kwik:
            assert xml, ValueError("I need the .xml file!")
            klusters_to_kwik(filename=xml,
                             dir=dir,
                             progress_report=self._report_progress_open)

        self.experiment = Experiment(basename, dir=dir, mode='a')

        # CONSISTENCY CHECK
        # add missing clusters
        add_missing_clusters(self.experiment)

        # TODO
        # self.initialize_logfile()
        # Load the similarity measure chosen by the user in the preferences
        # file: 'gaussian' or 'kl'.
        # Refresh the preferences file when a new file is opened.
        # USERPREF.refresh()
        self.similarity_measure = self.userpref[
            'similarity_measure'] or 'gaussian'
        debug("Similarity measure: {0:s}.".format(self.similarity_measure))
        info("Opening {0:s}.".format(self.experiment.name))
        self.shanks = sorted(self.experiment.channel_groups.keys())

        self.freq = self.experiment.application_data.spikedetekt.sample_rate

        self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel
        self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples

        self.set_shank(self.shanks[0])
예제 #5
0
def open_klusters(filename):
    indices = find_indices(filename)
    triplet = filename_to_triplet(filename)
    filenames_shanks = {}
    for index in indices:
        filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index, ))
    klusters_data = {
        index: open_klusters_oneshank(filename)
        for index, filename in filenames_shanks.iteritems()
    }
    shanks = filenames_shanks.keys()

    # Find the dataset filenames and load the metadata.
    filenames = find_filenames(filename)
    # Metadata common to all shanks.
    metadata = read_xml(filenames['xml'], 1)
    # Metadata specific to each shank.
    metadata.update(
        {shank: read_xml(filenames['xml'], shank)
         for shank in shanks})
    metadata['shanks'] = sorted(shanks)
    metadata['has_masks'] = (
        ('mask' in filenames and filenames['mask'] is not None)
        or ('fmask' in filenames and filenames['fmask'] is not None))

    klusters_data['name'] = triplet[0]
    klusters_data['metadata'] = metadata
    klusters_data['shanks'] = shanks
    klusters_data['filenames'] = filenames

    # Load probe file.
    filename_probe = filenames['probe']
    # It no probe file exists, create a default, linear probe with the right
    # number of channels per shank.
    if not filename_probe:
        # Generate a probe filename.
        filename_probe = find_filename_or_new(filename,
                                              'default.probe',
                                              have_file_index=False)
        shanks = {
            shank: klusters_data[shank]['nchannels']
            for shank in filenames_shanks.keys()
        }
        probe_python = generate_probe(shanks, 'complete')
        # with open(filename_probe, 'w') as f:
        # f.write(probe_python)
        # save_probe(filename_probe, probe_python)
        klusters_data['prb'] = probe_python
    else:
        probe_ns = {}
        execfile(filename_probe, {}, probe_ns)
        klusters_data['probe'] = probe_ns

    return klusters_data
예제 #6
0
파일: kwikloader.py 프로젝트: fiath/test
    def open(self, filename=None):
        """Open everything."""
        if filename is None:
            filename = self.filename
        else:
            self.filename = filename
        dir, basename = os.path.split(filename)

        # Converting to kwik if needed
        # kwik = find_filename(basename, 'kwik', dir=dir)
        # xml = find_filename(basename, 'xml', dir=dir)
        # self.filename_clu = find_filename(basename, 'clu', dir=dir)
        self._filenames = find_filenames(filename)
        kwik = find_filename(basename, 'kwik', dir=dir)
        xml = self._filenames['xml']
        clu = self._filenames['clu']

        self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)


        # Backup the .clu file.
        clu_original = find_filename_or_new(filename, 'clu_original')
        if os.path.exists(clu) and not os.path.exists(clu_original):
            shutil.copyfile(clu, clu_original)

        if not kwik:
            assert xml, ValueError("I need the .xml file!")
            klusters_to_kwik(filename=xml, dir=dir,
                progress_report=self._report_progress_open)

        self.experiment = Experiment(basename, dir=dir, mode='a')

        # CONSISTENCY CHECK
        # add missing clusters
        add_missing_clusters(self.experiment)

        # TODO
        # self.initialize_logfile()
        # Load the similarity measure chosen by the user in the preferences
        # file: 'gaussian' or 'kl'.
        # Refresh the preferences file when a new file is opened.
        # USERPREF.refresh()
        self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
        debug("Similarity measure: {0:s}.".format(self.similarity_measure))
        info("Opening {0:s}.".format(self.experiment.name))
        self.shanks = sorted(self.experiment.channel_groups.keys())

        self.freq = self.experiment.application_data.spikedetekt.sample_rate

        self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel
        self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples

        self.set_shank(self.shanks[0])
예제 #7
0
def open_klusters(filename):
    indices = find_indices(filename)
    triplet = filename_to_triplet(filename)
    filenames_shanks = {}
    for index in indices:
        filenames_shanks[index] = triplet_to_filename(triplet[:2] + (index,))
    klusters_data = {index: open_klusters_oneshank(filename) 
        for index, filename in filenames_shanks.iteritems()}
    shanks = filenames_shanks.keys()
           
    # Find the dataset filenames and load the metadata.
    filenames = find_filenames(filename)
    # Metadata common to all shanks.
    metadata = read_xml(filenames['xml'], 1)
    # Metadata specific to each shank.
    metadata.update({shank: read_xml(filenames['xml'], shank)
        for shank in shanks})
    metadata['shanks'] = sorted(shanks)
    metadata['has_masks'] = (('mask' in filenames 
                                    and filenames['mask'] is not None) or (
                                  'fmask' in filenames 
                                    and filenames['fmask'] is not None
                                  ))
    
    klusters_data['name'] = triplet[0]
    klusters_data['metadata'] = metadata
    klusters_data['shanks'] = shanks
    klusters_data['filenames'] = filenames
    
    # Load probe file.
    filename_probe = filenames['probe']
    # It no probe file exists, create a default, linear probe with the right
    # number of channels per shank.
    if not filename_probe:
        # Generate a probe filename.
        filename_probe = find_filename_or_new(filename, 'default.probe',
            have_file_index=False)
        shanks = {shank: klusters_data[shank]['nchannels']
            for shank in filenames_shanks.keys()}
        probe_python = generate_probe(shanks, 'complete')
        # with open(filename_probe, 'w') as f:
            # f.write(probe_python)
        # save_probe(filename_probe, probe_python)
        klusters_data['prb'] = probe_python
    else:
        probe_ns = {}
        execfile(filename_probe, {}, probe_ns)
        klusters_data['probe'] = probe_ns
    
    return klusters_data