Пример #1
0
    def compute_decoder(self):
        srng = RandomStreams(seed=self.ensemble.seed)

        #TODO: have this be more for higher dimensions?  5000 maximum (like Nengo)?
        S = 500

        samples = make_samples(S, self.ensemble.dimensions, srng)

        # compute the target values (which are the same as the sample points for the 'X' origin)
        if self.func is None:
            values = samples
        else:
            values = numpy.array([self.func(s) for s in samples.T])
            if len(values.shape) < 2: values.shape = values.shape[0], 1
            values = values.T

        # compute the input current for every neuron and every sample point
        J = numpy.dot(self.ensemble.encoders, samples)
        J += numpy.array([self.ensemble.bias]).T

        # generate an array of ensembles, one ensemble per sample point
        neurons = self.ensemble.neuron.__class__(
            (self.ensemble.neurons, S),
            t_rc=self.ensemble.neuron.t_rc,
            t_ref=self.ensemble.neuron.t_ref)

        # run the neuron model for 1 second, accumulating spikes to get a spike rate
        #  TODO: is this enough?  Should it be less?  If we do less, we may get a good noise approximation!
        A = neuron.accumulate(J, neurons)

        # compute Gamma and Upsilon
        G = numpy.dot(A, A.T)
        U = numpy.dot(A, values.T)

        #TODO: optimize this so we're not doing the full eigenvalue decomposition
        #TODO: add NxS method for large N?

        w, v = numpy.linalg.eigh(G)
        limit = 0.1 * 0.1 * max(w)
        for i in range(len(w)):
            if w[i] < limit: w[i] = 0
            else: w[i] = 1.0 / w[i]
        Ginv = numpy.dot(v, numpy.multiply(w[:, numpy.core.newaxis], v.T))

        #Ginv=numpy.linalg.pinv(G)

        # compute decoder
        decoder = numpy.dot(Ginv, U) / (self.ensemble.neuron.dt)
        return decoder.astype('float32')
Пример #2
0
    def compute_decoder(self):
        srng = RandomStreams(seed=self.ensemble.seed)

        #TODO: have this be more for higher dimensions?  5000 maximum (like Nengo)?
        S=500

        samples = make_samples(S,self.ensemble.dimensions,srng)

        # compute the target values (which are the same as the sample points for the 'X' origin)
        if self.func is None:
            values=samples
        else:
            values=numpy.array([self.func(s) for s in samples.T])
            if len(values.shape)<2: values.shape=values.shape[0],1
            values=values.T

        # compute the input current for every neuron and every sample point
        J=numpy.dot(self.ensemble.encoders,samples)
        J+=numpy.array([self.ensemble.bias]).T

        # generate an array of ensembles, one ensemble per sample point
        neurons=self.ensemble.neuron.__class__((self.ensemble.neurons,S),t_rc=self.ensemble.neuron.t_rc,t_ref=self.ensemble.neuron.t_ref)

        # run the neuron model for 1 second, accumulating spikes to get a spike rate
        #  TODO: is this enough?  Should it be less?  If we do less, we may get a good noise approximation!
        A=neuron.accumulate(J,neurons)

        # compute Gamma and Upsilon
        G=numpy.dot(A,A.T)
        U=numpy.dot(A,values.T)

        #TODO: optimize this so we're not doing the full eigenvalue decomposition
        #TODO: add NxS method for large N?

        w,v=numpy.linalg.eigh(G)
        limit=0.1*0.1*max(w)
        for i in range(len(w)):
            if w[i]<limit: w[i]=0
            else: w[i]=1.0/w[i]
        Ginv=numpy.dot(v,numpy.multiply(w[:,numpy.core.newaxis],v.T))

        #Ginv=numpy.linalg.pinv(G)

        # compute decoder
        decoder=numpy.dot(Ginv,U)/(self.ensemble.neuron.dt)
        return decoder.astype('float32')
Пример #3
0
    def compute_decoder(self, eval_points=None):
        """Calculate the scaling values to apply to the output to each of the neurons in the attached 
        population such that the weighted summation of their output generates the desired decoded output.
        Decoder values computed as D = (A'A)^-1 A'X_f where A is the matrix of activity values of each 
        neuron over sampled X values, and X_f is the vector of desired f(x) values across sampled points
        
        :param list eval_points: specific set of points to optimize decoders over 
        """

        #TODO: have num_samples be more for higher dimensions?  5000 maximum (like Nengo)?
        num_samples = 500

        if eval_points == None:
            # generate sample points from state space randomly to minimize decoder error over in decoder calculation
            srng = RandomStreams(
                seed=self.ensemble.seed)  # theano random number generator
            eval_points = make_samples(num_samples, self.ensemble.dimensions,
                                       srng)
        else:  # otherwise reset num_samples, andhow  make sure eval_points is in the right form (rows are input dimensions, columns different samples)
            eval_points = np.array(eval_points)
            if len(eval_points.shape) == 1:
                eval_points.shape = [1, eval_points.shape[0]]
            num_samples = eval_points.shape[1]

        # compute the target_values at the sampled points (which are the same as the sample points for the 'X' origin)      ?????????? what does this ( ) part mean?
        if self.func is None:  # if no function provided, use identity function as default
            target_values = eval_points
        else:  # otherwise calculate target_values using provided function
            # scale all our sample points by ensemble radius, calculate function value, then scale back to unit length
            # this ensures that we accurately capture the shape of the function when the radius is > 1 (think for example func=x**2)
            target_values = numpy.array(
                [self.func(s * self.ensemble.radius)
                 for s in eval_points.T]) / self.ensemble.radius
            if len(target_values.shape) < 2:
                target_values.shape = target_values.shape[0], 1
            target_values = target_values.T

        # compute the input current for every neuron and every sample point
        J = numpy.dot(self.ensemble.encoders, eval_points)
        J += numpy.array([self.ensemble.bias]).T

        # duplicate attached population of neurons into array of ensembles, one ensemble per sample point
        # so in parallel we can calculate the activity of all of the neurons at each sample point
        neurons = self.ensemble.neurons.__class__(
            (self.ensemble.neurons_num, num_samples),
            tau_rc=self.ensemble.neurons.tau_rc,
            tau_ref=self.ensemble.neurons.tau_ref)

        # run the neuron model for 1 second, accumulating spikes to get a spike rate
        #  TODO: is this long enough?  Should it be less?  If we do less, we may get a good noise approximation!
        A = neuron.accumulate(J, neurons)

        # compute Gamma and Upsilon
        G = numpy.dot(A, A.T)
        U = numpy.dot(A, target_values.T)

        #TODO: optimize this so we're not doing the full eigenvalue decomposition
        #TODO: add NxS method for large N?

        #TODO: compare below with pinv rcond
        w, v = numpy.linalg.eigh(
            G
        )  # eigh for symmetric matrices, returns evalues w, and normalized evectors v
        limit = .01 * max(w)  # formerly 0.1 * 0.1 * max(w), set threshold
        for i in range(len(w)):
            if w[i] < limit: w[i] = 0  # if < limit set eval = 0
            else: w[i] = 1.0 / w[i]  # prep for upcoming Ginv calculation
        # w[:, np.core.newaxis] gives transpose of vector, np.multiply is very fast element-wise multiplication
        Ginv = numpy.dot(v, numpy.multiply(w[:, numpy.core.newaxis], v.T))

        #Ginv=numpy.linalg.pinv(G, rcond=.01)

        # compute decoder - least squares method
        decoder = numpy.dot(Ginv, U) / (self.ensemble.neurons.dt)
        return decoder.astype('float32')
Пример #4
0
    def compute_decoder(self, eval_points=None):     
        """Calculate the scaling values to apply to the output to each of the neurons in the attached 
        population such that the weighted summation of their output generates the desired decoded output.
        Decoder values computed as D = (A'A)^-1 A'X_f where A is the matrix of activity values of each 
        neuron over sampled X values, and X_f is the vector of desired f(x) values across sampled points
        
        :param list eval_points: specific set of points to optimize decoders over 
        """
        
        #TODO: have num_samples be more for higher dimensions?  5000 maximum (like Nengo)?
        num_samples=500

        if eval_points == None:  
            # generate sample points from state space randomly to minimize decoder error over in decoder calculation
            srng = RandomStreams(seed=self.ensemble.seed) # theano random number generator
            eval_points = make_samples(num_samples, self.ensemble.dimensions, srng) 
        else: # otherwise reset num_samples, andhow  make sure eval_points is in the right form (rows are input dimensions, columns different samples)
            eval_points = np.array(eval_points)
            if len(eval_points.shape) == 1: eval_points.shape = [1, eval_points.shape[0]]
            num_samples = eval_points.shape[1]

        # compute the target_values at the sampled points (which are the same as the sample points for the 'X' origin)      ?????????? what does this ( ) part mean?
        if self.func is None: # if no function provided, use identity function as default
            target_values = eval_points 
        else: # otherwise calculate target_values using provided function
            # scale all our sample points by ensemble radius, calculate function value, then scale back to unit length
            # this ensures that we accurately capture the shape of the function when the radius is > 1 (think for example func=x**2)
            target_values = numpy.array([self.func(s * self.ensemble.radius) for s in eval_points.T]) / self.ensemble.radius 
            if len(target_values.shape) < 2: target_values.shape = target_values.shape[0], 1
            target_values = target_values.T
        
        # compute the input current for every neuron and every sample point
        J = numpy.dot(self.ensemble.encoders, eval_points)
        J += numpy.array([self.ensemble.bias]).T
        
        # duplicate attached population of neurons into array of ensembles, one ensemble per sample point
        # so in parallel we can calculate the activity of all of the neurons at each sample point 
        neurons = self.ensemble.neurons.__class__((self.ensemble.neurons_num, num_samples), tau_rc=self.ensemble.neurons.tau_rc, tau_ref=self.ensemble.neurons.tau_ref)
        
        # run the neuron model for 1 second, accumulating spikes to get a spike rate
        #  TODO: is this long enough?  Should it be less?  If we do less, we may get a good noise approximation!
        A = neuron.accumulate(J, neurons)
        
        # compute Gamma and Upsilon
        G = numpy.dot(A, A.T)
        U = numpy.dot(A, target_values.T)
        
        #TODO: optimize this so we're not doing the full eigenvalue decomposition
        #TODO: add NxS method for large N?
        
        #TODO: compare below with pinv rcond
        w, v = numpy.linalg.eigh(G) # eigh for symmetric matrices, returns evalues w, and normalized evectors v
        limit = .01 * max(w) # formerly 0.1 * 0.1 * max(w), set threshold 
        for i in range(len(w)):
            if w[i] < limit: w[i] = 0 # if < limit set eval = 0
            else: w[i] = 1.0 / w[i] # prep for upcoming Ginv calculation                                                       
        # w[:, np.core.newaxis] gives transpose of vector, np.multiply is very fast element-wise multiplication
        Ginv = numpy.dot(v, numpy.multiply(w[:, numpy.core.newaxis], v.T)) 
        
        #Ginv=numpy.linalg.pinv(G, rcond=.01)  
        
        # compute decoder - least squares method 
        decoder = numpy.dot(Ginv, U) / (self.ensemble.neurons.dt)
        return decoder.astype('float32')