Example #1
0
    def chomp(self):
        """
        Missing documentation
        
        Returns
        -------
        Value : Type
            Description
        """
        hdlog.debug("Chomping samples from model")
        self._raw_patterns = PatternsRaw(save_sequence=True)
        self._raw_patterns.chomp_spikes(spikes=self._sample_spikes)
        hdlog.info("Raw: %d-bit, %d patterns" %
                   (self._sample_spikes.N, len(self._raw_patterns)))

        hdlog.debug(
            "Chomping dynamics (from network learned on the samples) applied to samples"
        )
        self._hopfield_patterns = PatternsHopfield(learner=self._learner,
                                                   save_sequence=True)
        self._hopfield_patterns.chomp_spikes(spikes=self._sample_spikes)
        hdlog.info("Hopfield: %d-bit, %d patterns" %
                   (self._sample_spikes.N, len(self._hopfield_patterns)))

        # print "Before dynamics:"
        # print self.sample_spikes.spikes
        # print "Applied dynamics:"
        self._hopfield_spikes = self._hopfield_patterns.apply_dynamics(
            spikes=self._sample_spikes, reshape=True)
Example #2
0
    def chomp(self):
        """
        Missing documentation
        
        Returns
        -------
        Value : Type
            Description
        """
        hdlog.debug("Chomping samples from model")
        self._raw_patterns = PatternsRaw(save_sequence=True)
        self._raw_patterns.chomp_spikes(spikes=self._sample_spikes)
        hdlog.info("Raw: %d-bit, %d patterns" % (
            self._sample_spikes.N, len(self._raw_patterns)))

        hdlog.debug("Chomping dynamics (from network learned on the samples) applied to samples")
        self._hopfield_patterns = PatternsHopfield(learner=self._learner, save_sequence=True)
        self._hopfield_patterns.chomp_spikes(spikes=self._sample_spikes)
        hdlog.info("Hopfield: %d-bit, %d patterns" % (
            self._sample_spikes.N, len(self._hopfield_patterns)))

        # print "Before dynamics:"
        # print self.sample_spikes.spikes
        # print "Applied dynamics:"
        self._hopfield_spikes = self._hopfield_patterns.apply_dynamics(spikes=self._sample_spikes, reshape=True)
Example #3
0
 def objective_gradient_batched(self,
                                sampler,
                                sample_size,
                                batch_size,
                                randstate,
                                J=None,
                                return_K=False):
     """
     Missing documentation
     
     Parameters
     ----------
     sampler : Type
         Description
     sample_size : Type
         Description
     batch_size : Type
         Description
     randstate : Type
         Description
     J : numpy array, optional
         Coupling matrix of size N x N, where N denotes the number
         of nodes in the network (default None)
     return_K : bool, optional
         Description (default False)
     
     Returns
     -------
     Value : Type
         Description
     """
     np.random.set_state(randstate)
     nbatch = sample_size / batch_size
     if J is None:
         J = self._J.copy()
         J[np.eye(self._N, dtype=bool)] = -2 * self._theta
     Ksum = 0
     dJ = np.zeros((self._N, self._N), dtype=float)
     for batch in range(nbatch):
         hdlog.debug("batch %i/%i" % (batch + 1, nbatch))
         X = sampler(batch_size)
         S = 2 * X - 1
         Kfull = np.exp(-S * np.dot(X, J.T) + .5 * np.diag(J)[None, :])
         dJ += -np.dot(X.T, Kfull * S) + .5 * np.diag(Kfull.sum(0))
         Ksum += Kfull.sum()
     if self._symmetric is True:
         dJ = .5 * (dJ + dJ.T)
     M = nbatch * batch_size
     if return_K:
         return Ksum / M, dJ / M
     else:
         return dJ / M
Example #4
0
 def objective_gradient_batched(self, sampler, sample_size, batch_size, randstate,
                                J=None, return_K=False):
     """
     Missing documentation
     
     Parameters
     ----------
     sampler : Type
         Description
     sample_size : Type
         Description
     batch_size : Type
         Description
     randstate : Type
         Description
     J : numpy array, optional
         Coupling matrix of size N x N, where N denotes the number
         of nodes in the network (default None)
     return_K : bool, optional
         Description (default False)
     
     Returns
     -------
     Value : Type
         Description
     """
     np.random.set_state(randstate)
     nbatch = sample_size / batch_size
     if J is None:
         J = self._J.copy()
         J[np.eye(self._N, dtype=bool)] = -2 * self._theta
     Ksum = 0
     dJ = np.zeros((self._N, self._N), dtype=float)
     for batch in range(nbatch):
         hdlog.debug("batch %i/%i" % (batch + 1,nbatch))
         X = sampler(batch_size)
         S = 2 * X - 1
         Kfull = np.exp(-S * np.dot(X, J.T) + .5 * np.diag(J)[None, :])
         dJ += -np.dot(X.T, Kfull * S) + .5 * np.diag(Kfull.sum(0))
         Ksum += Kfull.sum()
     if self._symmetric is True:
         dJ = .5 * (dJ + dJ.T)
     M = nbatch * batch_size
     if return_K:
         return Ksum / M, dJ / M
     else:
         return dJ / M
Example #5
0
 def _load_v1(self, contents, load_extra=False):
     # internal function to load v1 file format
     hdlog.debug('loading HopfieldNet, format version 1')
     return Restoreable._load_attributes(self, contents, self._SAVE_ATTRIBUTES_V1)
Example #6
0
    def read_spikes(path_or_files, rate, first_cluster=2, filter_silent=True, return_status=False):
        """
        Reader for `KlustaKwick <https://github.com/klusta-team/klustakwik>`_ files.
        
        Parameters
        ----------
        path_or_files : string
            path of data set or list of \*.res.\* files to load
        rate : float
            sampling rate [in Hz]
        discard_first_cluster : integer, optional
            discard first n clusters, commonly used for unclassified spikes (default 2)
        filter_silent : boolean, optional
            filter out clusters that have no spikes (default True)
        return_status : boolean, optional
            if True returns a status dictionary along with data as second return value (default False)

        Returns
        -------
        spikes_times : numpy array
            returns numpy array of spike times in all clusters. Float values represent spike times
            in seconds (i.e. a value of 1.0 represents a spike at time 1s)
        """

        if isinstance(path_or_files, (str, unicode)):
            # glob all res files
            hdlog.info('Loading KlustaKwick data from %s' % os.path.abspath(path_or_files))
            import glob
            res_files = glob.glob(os.path.join(path_or_files, '*.res.*'))
        else:
            res_files = path_or_files
            hdlog.info('Loading KlustaKwick data from files %s' % str(path_or_files))

        hdlog.info('Processing %d electrode files' % len(res_files))

        spike_times = []
        num_clusters = 0
        num_spikes = 0
        t_min = np.inf
        t_max = -np.inf
        cells_filtered = 0
        electrodes = []

        for fn_res in res_files:
            hdlog.debug('Processing electrode file "%s"..' % fn_res)
            electrodes.append(int(fn_res[fn_res.rindex('.') + 1:]))

            fn_clu = fn_res.replace('.res.', '.clu.')
            if not os.path.exists(fn_clu):
                raise Exception('Cluster file "%s" not found!' % fn_clu)

            #load time stamps
            times = np.loadtxt(fn_res) * (1. / float(rate))

            #load cluster data
            clusters = np.loadtxt(fn_clu).astype(int)
            n_clusters = clusters[0]
            cluster_seq = clusters[1:]

            if cluster_seq.shape[0] != times.shape[0]:
                raise Exception('Data inconsistent for files %s, %s: lengths differ!' % (fn_res, fn_clu))

            hdlog.debug('%d clusters, %d spikes' % (n_clusters, cluster_seq.shape[0]))

            spike_times_electrode = [times[np.where(cluster_seq == c)[0]]
                                     for c in range(first_cluster, n_clusters)]

            if filter_silent:
                c_orig = len(spike_times_electrode)
                spike_times_electrode = [x for x in spike_times_electrode if len(x) > 0]
                c_filtered = c_orig - len(spike_times_electrode)
                cells_filtered += c_filtered

            spike_times.extend(spike_times_electrode)

            num_clusters += n_clusters - first_cluster
            num_spikes += sum(map(len, spike_times_electrode))
            t_min = min(t_min, min(times))
            t_max = max(t_max, max(times))

        status = {
            'clusters': num_clusters,
            'discarded_clusters': first_cluster * len(res_files),
            'filtered': cells_filtered,
            't_min': t_min,
            't_max': t_max,
            'num_spikes': num_spikes,
            'electrodes': electrodes
        }

        hdlog.info('Processed %d clusters (%d discarded), %d cells (%d silent discarded), %d spikes total, t_min=%f s, t_max=%f s, delta=%f s' %
                   (num_clusters, first_cluster * len(res_files), num_clusters - cells_filtered, cells_filtered,
                    num_spikes, t_min, t_max, t_max - t_min))

        if return_status:
            return spike_times, status
        else:
            return spike_times
Example #7
0
 def _load_v1(self, contents, load_extra=False):
     # internal function to load v1 file format
     hdlog.debug('Loading Stimulus, format version 1')
     return Restoreable._load_attributes(self, contents,
                                         self._SAVE_ATTRIBUTES_V1)
Example #8
0
    def read_spikes(path_or_files,
                    rate,
                    first_cluster=2,
                    filter_silent=True,
                    return_status=False):
        """
        Reader for `KlustaKwick <https://github.com/klusta-team/klustakwik>`_ files.
        
        Parameters
        ----------
        path_or_files : string
            path of data set or list of \*.res.\* files to load
        rate : float
            sampling rate [in Hz]
        discard_first_cluster : integer, optional
            discard first n clusters, commonly used for unclassified spikes (default 2)
        filter_silent : boolean, optional
            filter out clusters that have no spikes (default True)
        return_status : boolean, optional
            if True returns a status dictionary along with data as second return value (default False)

        Returns
        -------
        spikes_times : numpy array
            returns numpy array of spike times in all clusters. Float values represent spike times
            in seconds (i.e. a value of 1.0 represents a spike at time 1s)
        """

        if isinstance(path_or_files, (str, unicode)):
            # glob all res files
            hdlog.info('Loading KlustaKwick data from %s' %
                       os.path.abspath(path_or_files))
            import glob
            res_files = glob.glob(os.path.join(path_or_files, '*.res.*'))
        else:
            res_files = path_or_files
            hdlog.info('Loading KlustaKwick data from files %s' %
                       str(path_or_files))

        hdlog.info('Processing %d electrode files' % len(res_files))

        spike_times = []
        num_clusters = 0
        num_spikes = 0
        t_min = np.inf
        t_max = -np.inf
        cells_filtered = 0
        electrodes = []

        for fn_res in res_files:
            hdlog.debug('Processing electrode file "%s"..' % fn_res)
            electrodes.append(int(fn_res[fn_res.rindex('.') + 1:]))

            fn_clu = fn_res.replace('.res.', '.clu.')
            if not os.path.exists(fn_clu):
                raise Exception('Cluster file "%s" not found!' % fn_clu)

            #load time stamps
            times = np.loadtxt(fn_res) * (1. / float(rate))

            #load cluster data
            clusters = np.loadtxt(fn_clu).astype(int)
            n_clusters = clusters[0]
            cluster_seq = clusters[1:]

            if cluster_seq.shape[0] != times.shape[0]:
                raise Exception(
                    'Data inconsistent for files %s, %s: lengths differ!' %
                    (fn_res, fn_clu))

            hdlog.debug('%d clusters, %d spikes' %
                        (n_clusters, cluster_seq.shape[0]))

            spike_times_electrode = [
                times[np.where(cluster_seq == c)[0]]
                for c in xrange(first_cluster, n_clusters)
            ]

            if filter_silent:
                c_orig = len(spike_times_electrode)
                spike_times_electrode = [
                    x for x in spike_times_electrode if len(x) > 0
                ]
                c_filtered = c_orig - len(spike_times_electrode)
                cells_filtered += c_filtered

            spike_times.extend(spike_times_electrode)

            num_clusters += n_clusters - first_cluster
            num_spikes += sum(map(len, spike_times_electrode))
            t_min = min(t_min, min(times))
            t_max = max(t_max, max(times))

        status = {
            'clusters': num_clusters,
            'discarded_clusters': first_cluster * len(res_files),
            'filtered': cells_filtered,
            't_min': t_min,
            't_max': t_max,
            'num_spikes': num_spikes,
            'electrodes': electrodes
        }

        hdlog.info(
            'Processed %d clusters (%d discarded), %d cells (%d silent discarded), %d spikes total, t_min=%f s, t_max=%f s, delta=%f s'
            % (num_clusters, first_cluster * len(res_files),
               num_clusters - cells_filtered, cells_filtered, num_spikes,
               t_min, t_max, t_max - t_min))

        if return_status:
            return spike_times, status
        else:
            return spike_times
Example #9
0
 def _load_v2(self, contents, load_extra=False):
     # internal function to load v1 file format
     hdlog.debug('Loading PatternsHopfield patterns, format version 2')
     return Restoreable._load_attributes(self, contents,
                                         self._SAVE_ATTRIBUTES_V2)
Example #10
0
	def _load_v2(self, contents, load_extra=False):
		# internal function to load v1 file format
		hdlog.debug('Loading PatternsHopfield patterns, format version 2')
		return Restoreable._load_attributes(self, contents, self._SAVE_ATTRIBUTES_V2)