Example #1
0
 def read_probe(self, probe):
     try:
         self.probe = process_probe(probe)
     except Exception as e:
         info(("There was an error while loading the probe: "
                   "'{0:s}'").format(e.message))
         self.probe = None
Example #2
0
 def read(self):
     self.initialize_logfile()
     # Load the similarity measure chosen by the user in the preferences
     # file: 'gaussian' or 'kl'.
     # Refresh the preferences file when a new file is opened.
     # USERPREF.refresh()
     self.similarity_measure = self.userpref[
         'similarity_measure'] or 'gaussian'
     debug("Similarity measure: {0:s}.".format(self.similarity_measure))
     info("Opening {0:s}.".format(self.filename))
     self.report_progress(0, 5)
     self.read_metadata()
     self.read_probe()
     self.report_progress(1, 5)
     self.read_features()
     self.report_progress(2, 5)
     self.read_res()
     self.read_clusters()
     self.report_progress(3, 5)
     self.read_cluster_info()
     self.read_group_info()
     self.read_masks()
     self.report_progress(4, 5)
     self.read_waveforms()
     self.report_progress(5, 5)
 def read_probe(self, probe):
     try:
         self.probe = process_probe(probe)
     except Exception as e:
         info(("There was an error while loading the probe: "
                   "'{0:s}'").format(e.message))
         self.probe = None
Example #4
0
 def open_spikes(self):
     """Open a HDF5 kwik file."""
     
     if not os.path.exists(self.filename_kwik) and os.path.exists(self.filenames['fet']):
         klusters_to_hdf5(self.filename, self.klusters_to_hdf5_progress_report)
     
     self.initialize_logfile()
     # Load the similarity measure chosen by the user in the preferences
     # file: 'gaussian' or 'kl'.
     # Refresh the preferences file when a new file is opened.
     # USERPREF.refresh()
     self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
     debug("Similarity measure: {0:s}.".format(self.similarity_measure))
     info("Opening {0:s}.".format(self.filename))
         
     if os.path.exists(self.filename):
         self.kwik = tb.openFile(self.filename, mode='r+')
         self.read_metadata(self.kwik)
         # Get the list of shanks.
         # WARNING
         # The commented code below detects the shank indices from introspection
         # in the "shanks" group. It is not necessary anymore as soon as the
         # metadata contains a "SHANKS" attribute with the list of shanks.
         self.shanks = [int(re.match("shank([0-9]+)",
             shank._v_name).group(1))
                 for shank in self.kwik.listNodes('/shanks')]
         print self.shanks
         # By default, read the first available shank.
         self.set_shank(self.shanks[0])
         self.read_shank()
Example #5
0
 def read_group_info(self):
     try:
         self.group_info = read_group_info(self.filename_groupinfo)
         info("Successfully loaded {0:s}".format(self.filename_groupinfo))
     except IOError:
         info("The GROUPINFO file is missing, generating a default one.")
         self.group_info = default_group_info()
     
     # Convert to Pandas.
     self.group_colors = self.group_info['color'].astype(np.int32)
     self.group_names = self.group_info['name']
 def read_group_info(self):
     try:
         self.group_info = read_group_info(self.filename_groupinfo)
         info("Successfully loaded {0:s}".format(self.filename_groupinfo))
     except IOError:
         info("The GROUPINFO file is missing, generating a default one.")
         self.group_info = default_group_info()
     
     # Convert to Pandas.
     self.group_colors = self.group_info['color'].astype(np.int32)
     self.group_names = self.group_info['name']
Example #7
0
    def open(self, filename=None):
        """Open everything."""
        if filename is None:
            filename = self.filename
        else:
            self.filename = filename
        dir, basename = os.path.split(filename)

        # Converting to kwik if needed
        # kwik = find_filename(basename, 'kwik', dir=dir)
        # xml = find_filename(basename, 'xml', dir=dir)
        # self.filename_clu = find_filename(basename, 'clu', dir=dir)
        self._filenames = find_filenames(filename)
        kwik = find_filename(basename, 'kwik', dir=dir)
        xml = self._filenames['xml']
        clu = self._filenames['clu']

        self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)

        # Backup the .clu file.
        clu_original = find_filename_or_new(filename, 'clu_original')
        if os.path.exists(clu) and not os.path.exists(clu_original):
            shutil.copyfile(clu, clu_original)

        if not kwik:
            assert xml, ValueError("I need the .xml file!")
            klusters_to_kwik(filename=xml,
                             dir=dir,
                             progress_report=self._report_progress_open)

        self.experiment = Experiment(basename, dir=dir, mode='a')

        # CONSISTENCY CHECK
        # add missing clusters
        add_missing_clusters(self.experiment)

        # TODO
        # self.initialize_logfile()
        # Load the similarity measure chosen by the user in the preferences
        # file: 'gaussian' or 'kl'.
        # Refresh the preferences file when a new file is opened.
        # USERPREF.refresh()
        self.similarity_measure = self.userpref[
            'similarity_measure'] or 'gaussian'
        debug("Similarity measure: {0:s}.".format(self.similarity_measure))
        info("Opening {0:s}.".format(self.experiment.name))
        self.shanks = sorted(self.experiment.channel_groups.keys())

        self.freq = self.experiment.application_data.spikedetekt.sample_rate

        self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel
        self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples

        self.set_shank(self.shanks[0])
 def read_waveforms(self):
     try:
         self.waveforms = read_waveforms(self.filename_spk, self.nsamples,
                                         self.nchannels)
         info("Successfully loaded {0:s}".format(self.filename_spk))
     except IOError:
         warn("The SPK file is missing.")
         self.waveforms = np.zeros((self.nspikes, self.nsamples, 
             self.nchannels))
     # Convert to Pandas.
     self.waveforms = pd.Panel(self.waveforms, dtype=np.float32)
Example #9
0
 def read_waveforms(self):
     try:
         self.waveforms = read_waveforms(self.filename_spk, self.nsamples,
                                         self.nchannels)
         info("Successfully loaded {0:s}".format(self.filename_spk))
     except IOError:
         warn("The SPK file is missing.")
         self.waveforms = np.zeros((self.nspikes, self.nsamples, 
             self.nchannels))
     # Convert to Pandas.
     self.waveforms = pd.Panel(self.waveforms, dtype=np.float32)
 def read_masks(self):
     try:
         self.masks, self.masks_full = read_masks(self.filename_mask,
                                                  self.fetdim)
         info("Successfully loaded {0:s}".format(self.filename_mask))
     except IOError:
         warn("The MASKS/FMASKS file is missing.")
         # Default masks if the MASK/FMASK file is not available.
         self.masks = np.ones((self.nspikes, self.nchannels))
         self.masks_full = np.ones(self.features.shape)
     self.masks = pd.DataFrame(self.masks)
     self.masks_full = pd.DataFrame(self.masks_full)
Example #11
0
def test_experiment_repr_nokwd():
    kwd = os.path.join(DIRPATH, 'myexperiment.raw.kwd')
    kwd2 = os.path.join(DIRPATH, 'myexperiment2.raw.kwd')

    # Move a KWD file and test if Experiment works without KWD.
    os.rename(kwd, kwd2)

    info("The following error message is expected (part of the unit test)")
    with Experiment('myexperiment', dir=DIRPATH) as exp:
        s = str(exp)

    os.rename(kwd2, kwd)
Example #12
0
    def open(self, filename=None):
        """Open everything."""
        if filename is None:
            filename = self.filename
        else:
            self.filename = filename
        dir, basename = os.path.split(filename)

        # Converting to kwik if needed
        # kwik = find_filename(basename, 'kwik', dir=dir)
        # xml = find_filename(basename, 'xml', dir=dir)
        # self.filename_clu = find_filename(basename, 'clu', dir=dir)
        self._filenames = find_filenames(filename)
        kwik = find_filename(basename, 'kwik', dir=dir)
        xml = self._filenames['xml']
        clu = self._filenames['clu']

        self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)


        # Backup the .clu file.
        clu_original = find_filename_or_new(filename, 'clu_original')
        if os.path.exists(clu) and not os.path.exists(clu_original):
            shutil.copyfile(clu, clu_original)

        if not kwik:
            assert xml, ValueError("I need the .xml file!")
            klusters_to_kwik(filename=xml, dir=dir,
                progress_report=self._report_progress_open)

        self.experiment = Experiment(basename, dir=dir, mode='a')

        # CONSISTENCY CHECK
        # add missing clusters
        add_missing_clusters(self.experiment)

        # TODO
        # self.initialize_logfile()
        # Load the similarity measure chosen by the user in the preferences
        # file: 'gaussian' or 'kl'.
        # Refresh the preferences file when a new file is opened.
        # USERPREF.refresh()
        self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
        debug("Similarity measure: {0:s}.".format(self.similarity_measure))
        info("Opening {0:s}.".format(self.experiment.name))
        self.shanks = sorted(self.experiment.channel_groups.keys())

        self.freq = self.experiment.application_data.spikedetekt.sample_rate

        self.fetdim = self.experiment.application_data.spikedetekt.nfeatures_per_channel
        self.nsamples = self.experiment.application_data.spikedetekt.waveforms_nsamples

        self.set_shank(self.shanks[0])
Example #13
0
 def read_masks(self):
     try:
         self.masks, self.masks_full = read_masks(self.filename_mask,
                                                  self.fetdim)
         info("Successfully loaded {0:s}".format(self.filename_mask))
     except IOError:
         warn("The MASKS/FMASKS file is missing.")
         # Default masks if the MASK/FMASK file is not available.
         self.masks = np.ones((self.nspikes, self.nchannels))
         self.masks_full = np.ones(self.features.shape)
     self.masks = pd.DataFrame(self.masks)
     self.masks_full = pd.DataFrame(self.masks_full)
Example #14
0
def test_experiment_repr_nokwd():
    kwd = os.path.join(DIRPATH, 'myexperiment.raw.kwd')
    kwd2 = os.path.join(DIRPATH, 'myexperiment2.raw.kwd')

    # Move a KWD file and test if Experiment works without KWD.
    os.rename(kwd, kwd2)

    info("The following error message is expected (part of the unit test)")
    with Experiment('myexperiment', dir=DIRPATH) as exp:
        s = str(exp)

    os.rename(kwd2, kwd)
Example #15
0
def get_header_size(filename_raw, ext):
    if ext == 'dat':
        return 0
    elif ext == 'ns5':

        f = open(filename_raw, 'rb')
        file_total_size = os.path.getsize(filename_raw)
        
        sample_width = 2  # int16 samples
    
        # Read File_Type_ID and check compatibility
        # If v2.2 is used, this value will be 'NEURALCD', which uses a slightly
        # more complex header. Currently unsupported.
        File_Type_ID = [chr(ord(c)) \
            for c in f.read(8)]
        if "".join(File_Type_ID) != 'NEURALSG':
            log.info( "Incompatible ns5 file format. Only v2.1 is supported.\nThis will probably not work.")

        # Skip the next field.
        f.read(16)

        # Read Period.
        period, = struct.unpack('<I', f.read(4))
        freq = period * 30000.0

        # Read Channel_Count and Channel_ID
        Channel_Count, = struct.unpack('<I', f.read(4))
        
        Channel_ID = [struct.unpack('<I', f.read(4))[0]
            for n in xrange(Channel_Count)]
            
        # Compute total header length
        Header = 8 + 16 + 4 + 4 + \
            4*Channel_Count # in bytes

        # determine length of file
        n_samples = (file_total_size - Header) // (Channel_Count * sample_width)
        # Length = np.float64(n_samples) / Channel_Count
        file_total_size2 = sample_width * Channel_Count * n_samples + Header
    
        # Sanity check.
        if file_total_size != file_total_size2:
            fields = ["{0:s}={1:s}".format(key, str(locals()[key])) 
                for key in ('period', 'freq', 'Channel_Count', 'Channel_ID',
                    'n_samples')]
            raise ValueError("The file seems corrupted: " + ", ".join(fields))
    
        return Header
 def read_features(self):
     try:
         self.features, self.spiketimes = read_features(self.filename_fet,
             self.nchannels, self.fetdim, self.freq)
         info("Successfully loaded {0:s}".format(self.filename_fet))
     except IOError:
         raise IOError("The FET file is missing.")
     # Convert to Pandas.
     self.features = pd.DataFrame(self.features, dtype=np.float32)
     self.duration = self.spiketimes[-1]
     self.spiketimes = pd.Series(self.spiketimes, dtype=np.float32)
     
     # Count the number of spikes and save it in the metadata.
     self.nspikes = self.features.shape[0]
     self.metadata['nspikes'] = self.nspikes
     self.nextrafet = self.features.shape[1] - self.nchannels * self.fetdim
Example #17
0
 def read_features(self):
     try:
         self.features, self.spiketimes = read_features(self.filename_fet,
             self.nchannels, self.fetdim, self.freq)
         info("Successfully loaded {0:s}".format(self.filename_fet))
     except IOError:
         raise IOError("The FET file is missing.")
     # Convert to Pandas.
     self.features = pd.DataFrame(self.features, dtype=np.float32)
     self.duration = self.spiketimes[-1]
     self.spiketimes = pd.Series(self.spiketimes, dtype=np.float32)
     
     # Count the number of spikes and save it in the metadata.
     self.nspikes = self.features.shape[0]
     self.metadata['nspikes'] = self.nspikes
     self.nextrafet = self.features.shape[1] - self.nchannels * self.fetdim
 def read_clusters(self):
     try:
         # Try reading the ACLU file, or fallback on the CLU file.
         if os.path.exists(self.filename_aclu):
             self.clusters = read_clusters(self.filename_aclu)
             info("Successfully loaded {0:s}".format(self.filename_aclu))
         else:
             self.clusters = read_clusters(self.filename_clu)
             info("Successfully loaded {0:s}".format(self.filename_clu))
     except IOError:
         warn("The CLU file is missing.")
         # Default clusters if the CLU file is not available.
         self.clusters = np.zeros(self.nspikes, dtype=np.int32)
     # Convert to Pandas.
     self.clusters = pd.Series(self.clusters, dtype=np.int32)
     
     # Count clusters.
     self._update_data()
Example #19
0
def add_missing_clusters(exp):

    shanks = sorted(exp.channel_groups.keys())

    for shank in shanks:
        cg = exp.channel_groups[shank]
        clusters = cg.clusters.main.keys()
        clusters_unique = np.unique(cg.spikes.clusters.main[:])
        # Find missing clusters in the kwik file.
        missing = sorted(set(clusters_unique)-set(clusters))

        # Add all missing clusters with a default color and "Unsorted" cluster group (group #3).
        for idx in missing:
            info("Adding missing cluster %d in shank %d." % (idx, shank))
            add_cluster(exp._files, channel_group_id='%d' % shank,
                        id=str(idx),
                        clustering='main',
                        cluster_group=3)
Example #20
0
 def read_clusters(self):
     try:
         # Try reading the ACLU file, or fallback on the CLU file.
         if os.path.exists(self.filename_aclu):
             self.clusters = read_clusters(self.filename_aclu)
             info("Successfully loaded {0:s}".format(self.filename_aclu))
         else:
             self.clusters = read_clusters(self.filename_clu)
             info("Successfully loaded {0:s}".format(self.filename_clu))
     except IOError:
         warn("The CLU file is missing.")
         # Default clusters if the CLU file is not available.
         self.clusters = np.zeros(self.nspikes, dtype=np.int32)
     # Convert to Pandas.
     self.clusters = pd.Series(self.clusters, dtype=np.int32)
     
     # Count clusters.
     self._update_data()
Example #21
0
def add_missing_clusters(exp):

    shanks = sorted(exp.channel_groups.keys())

    for shank in shanks:
        cg = exp.channel_groups[shank]
        clusters = cg.clusters.main.keys()
        clusters_unique = np.unique(cg.spikes.clusters.main[:])
        # Find missing clusters in the kwik file.
        missing = sorted(set(clusters_unique) - set(clusters))

        # Add all missing clusters with a default color and "Unsorted" cluster group (group #3).
        for idx in missing:
            info("Adding missing cluster %d in shank %d." % (idx, shank))
            add_cluster(exp._files,
                        channel_group_id='%d' % shank,
                        id=str(idx),
                        clustering='main',
                        cluster_group=3)
Example #22
0
 def read(self):
     self.initialize_logfile()
     # Load the similarity measure chosen by the user in the preferences
     # file: 'gaussian' or 'kl'.
     # Refresh the preferences file when a new file is opened.
     # USERPREF.refresh()
     self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
     debug("Similarity measure: {0:s}.".format(self.similarity_measure))
     info("Opening {0:s}.".format(self.filename))
     self.report_progress(0, 5)
     self.read_metadata()
     self.read_probe()
     self.report_progress(1, 5)
     self.read_features()
     self.report_progress(2, 5)
     self.read_res()
     self.read_clusters()
     self.report_progress(3, 5)
     self.read_cluster_info()
     self.read_group_info()
     self.read_masks()
     self.report_progress(4, 5)
     self.read_waveforms()
     self.report_progress(5, 5)
Example #23
0
 def read_probe(self):
     if self.filename_probe is None:
         info("No probe file has been found.")
         self.probe = None
     else:
         try:
             self.probe = read_probe(self.filename_probe, self.fileindex)
             info("Successfully loaded {0:s}".format(self.filename_probe))
         except Exception as e:
             info(("There was an error while loading the probe file "
                       "'{0:s}' : {1:s}").format(self.filename_probe,
                         e.message))
             self.probe = None
 def read_probe(self):
     if self.filename_probe is None:
         info("No probe file has been found.")
         self.probe = None
     else:
         try:
             self.probe = read_probe(self.filename_probe, self.fileindex)
             info("Successfully loaded {0:s}".format(self.filename_probe))
         except Exception as e:
             info(("There was an error while loading the probe file "
                       "'{0:s}' : {1:s}").format(self.filename_probe,
                         e.message))
             self.probe = None
Example #25
0
 def read_cluster_info(self):
     try:
         self.cluster_info = read_cluster_info(self.filename_acluinfo)
         info("Successfully loaded {0:s}".format(self.filename_acluinfo))
     except IOError:
         info("The CLUINFO file is missing, generating a default one.")
         self.cluster_info = default_cluster_info(self.clusters_unique)
             
     if not np.array_equal(self.cluster_info.index, self.clusters_unique):
         info("The CLUINFO file does not correspond to the loaded CLU file.")
         self.cluster_info = default_cluster_info(self.clusters_unique)
         
     self.cluster_colors = self.cluster_info['color'].astype(np.int32)
     self.cluster_groups = self.cluster_info['group'].astype(np.int32)
 def read_cluster_info(self):
     try:
         self.cluster_info = read_cluster_info(self.filename_acluinfo)
         info("Successfully loaded {0:s}".format(self.filename_acluinfo))
     except IOError:
         info("The CLUINFO file is missing, generating a default one.")
         self.cluster_info = default_cluster_info(self.clusters_unique)
             
     if not np.array_equal(self.cluster_info.index, self.clusters_unique):
         info("The CLUINFO file does not correspond to the loaded CLU file.")
         self.cluster_info = default_cluster_info(self.clusters_unique)
         
     self.cluster_colors = self.cluster_info['color'].astype(np.int32)
     self.cluster_groups = self.cluster_info['group'].astype(np.int32)
Example #27
0
def log_action(action, prefix=''):
    method_name, args, kwargs = action
    description = kwargs.get('_description', 
        get_pretty_action(*action))
    log.info(prefix + description)
Example #28
0
# Trying to load the Cython version.
try:
    from correlograms_cython import compute_correlograms_cython as compute_correlograms
    log.debug(("Trying to load the compiled Cython version of the correlograms"
               "computations..."))
except Exception as e:
    log.debug(e.message)
    try:
        log.debug(("failed. Trying to use Cython directly..."))
        import pyximport
        pyximport.install(setup_args={'include_dirs': np.get_include()})
        from correlograms_cython import compute_correlograms_cython as compute_correlograms
    except Exception as e:
        log.debug(e.message)
        log.info(("Unable to load the fast Cython version of the correlograms"
                  "computations, so falling back to the pure Python version."))

        # Pure Python version.
        # --------------------
        def compute_correlograms(spiketimes,
                                 clusters,
                                 clusters_to_update=None,
                                 ncorrbins=None,
                                 corrbin=None):

            if ncorrbins is None:
                ncorrbins = NCORRBINS_DEFAULT
            if corrbin is None:
                corrbin = CORRBIN_DEFAULT

            # Ensure ncorrbins is an even number.
Example #29
0
# Trying to load the Cython version.
try:
    from correlograms_cython import compute_correlograms_cython as compute_correlograms
    log.debug(("Trying to load the compiled Cython version of the correlograms"
               "computations..."))
except Exception as e:
    log.debug(e.message)
    try:
        log.debug(("failed. Trying to use Cython directly..."))
        import pyximport; pyximport.install(
            setup_args={'include_dirs': np.get_include()})
        from correlograms_cython import compute_correlograms_cython as compute_correlograms
    except Exception as e:
        log.debug(e.message)
        log.info(("Unable to load the fast Cython version of the correlograms"
                   "computations, so falling back to the pure Python version.")
                   )

        # Pure Python version.
        # --------------------
        def compute_correlograms(spiketimes, clusters, clusters_to_update=None,
            ncorrbins=None, corrbin=None):
            
            if ncorrbins is None:
                ncorrbins = NCORRBINS_DEFAULT
            if corrbin is None:
                corrbin = CORRBIN_DEFAULT
            
            # Ensure ncorrbins is an even number.
            assert ncorrbins % 2 == 0
            
Example #30
0
def log_action(action, prefix=''):
    method_name, args, kwargs = action
    description = kwargs.get('_description', get_pretty_action(*action))
    log.info(prefix + description)