예제 #1
0
def gen_eval_points(ens, eval_points, rng, scale_eval_points=True):
    if isinstance(eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = eval_points.sample(n_points, ens.dimensions, rng)
    else:
        if (ens.n_eval_points is not None
                and eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(eval_points, dtype=np.float64)

    if scale_eval_points:
        eval_points *= ens.radius  # scale by ensemble radius
    return eval_points
예제 #2
0
파일: ensemble.py 프로젝트: CamZHU/nengo
def gen_eval_points(ens, eval_points, rng, scale_eval_points=True):
    if isinstance(eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = eval_points.sample(n_points, ens.dimensions, rng)
    else:
        if (ens.n_eval_points is not None
                and eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(eval_points, dtype=np.float64)

    if scale_eval_points:
        eval_points *= ens.radius  # scale by ensemble radius
    return eval_points
예제 #3
0
    def add_output(self,
                   function=lambda x: x, 
                   eval_points=nengo.dists.UniformHypersphere(surface=False),
                   solver=nengo.solvers.LstsqL2(),
                   dt=0.001,
                   rng=np.random):

        if not isinstance(eval_points, nengo.dists.Distribution):
            raise TypeError("eval_points (%r) must be a "
                            "nengo.dists.Distribution" % eval_points)
        
        n = self.n_ensembles * self.n_neurons_per_ensemble
        n_points = default_n_eval_points(n, self.dimensions)
        eval_points = eval_points.sample(n_points, self.dimensions, rng=rng)
        
        A = np.empty((n_points, n))
        Y = np.asarray([np.atleast_1d(function(ep)) for ep in eval_points])
        size_out = Y.shape[1]

        for i, ens in enumerate(self._ensembles):
            x = np.dot(eval_points, ens.encoders.T / ens.radius)
            activities = loihi_rates(ens.neuron_type, x, ens.gain, ens.bias, dt)
            A[:, i*ens.n_neurons:(i+1)*ens.n_neurons] = activities

        D, info = solver(A, Y, rng=rng)  # AD ~ Y
        assert D.shape == (n, size_out)

        with self:
            output = nengo.Node(size_in=size_out)
            for i, ens in enumerate(self._ensembles):
                # NoSolver work-around for Neurons -> Ensemble
                # https://github.com/nengo/nengo-loihi/issues/152
                # nengo.Connection(
                #     ens, output, synapse=None,
                #     solver=nengo.solvers.NoSolver(
                #         D[i*ens.n_neurons:(i+1)*ens.n_neurons, :],
                #         weights=False))
                # TODO: investigate weird behaviour having something to do
                #   with the function not being respected when the
                #   add_output weights are embedded in NoSolver to form
                #   a recurrent passthrough
                nengo.Connection(
                    ens.neurons, output, synapse=None,
                    transform=D[i*ens.n_neurons:(i+1)*ens.n_neurons, :].T)

        return output, info
예제 #4
0
def gen_eval_points(ens, eval_points, rng, scale_eval_points=True, dtype=None):
    dtype = rc.float_dtype if dtype is None else dtype
    if isinstance(eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = eval_points.sample(n_points, ens.dimensions, rng)
        eval_points = eval_points.astype(dtype)
    else:
        if ens.n_eval_points is not None and eval_points.shape[
                0] != ens.n_eval_points:
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(eval_points, dtype=dtype)
        assert eval_points.ndim == 2

    if scale_eval_points:
        eval_points *= ens.radius  # scale by ensemble radius
    return eval_points
예제 #5
0
파일: ensemble.py 프로젝트: Ocode/nengo
def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Generate eval points
    if isinstance(ens.eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
        # eval_points should be in the ensemble's representational range
        eval_points *= ens.radius
    else:
        if (ens.n_eval_points is not None
                and ens.eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(ens.eval_points, dtype=np.float64)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
예제 #6
0
        dx = dr*np.cos(theta) - r*np.sin(theta)*dtheta
        dy = dr*np.sin(theta) + r*np.cos(theta)*dtheta

        return [x[0] + tau*dx, x[1] + tau*dy]

    nengo.Connection(system, system[:2], synapse=tau, function=cycle)

    rate = nengo.Node([1])
    nengo.Connection(rate, system[2])

    bump = nengo.Node(lambda t: 1 if t < 0.5 else 0)
    nengo.Connection(bump, system[0])

    n_neurons = 1000
    n_points = default_n_eval_points(n_neurons, 2)
    e_p1 = nengo.dists.UniformHypersphere(surface=True).sample(n_points, 2)
    n_points = default_n_eval_points(n_neurons, 1)
    e_p2 = nengo.dists.Uniform(0, 1).sample(n_points, 1)
    readout = nengo.Ensemble(n_neurons=n_neurons, dimensions=3,
                             eval_points=np.concatenate((e_p1, e_p2), axis=1),
                             label="readout")
    def read_func(x):
        """based off angle, read out pattern"""
        theta_p = np.arctan2(x[1], x[0])
        r_p = np.sqrt(x[0]**2 + x[1]**2)
        if theta_p < 0: 
            theta_p += 2 * np.pi
        index_p = int(len(patterns[0]) * theta_p / (2 * np.pi))
        #ipdb.set_trace()
        return patterns[int(np.round(x[2]))][index_p] * r_p
예제 #7
0
    def __init__(
        self,
        n_neurons=1000,
        n_neurons_out=1000,
        dimensions=None,
        vocab=None,
        intercepts_mem=nengo.dists.Uniform(0, 0),
        intercepts_out=nengo.dists.Uniform(0, 0),
        voja_tau=0.005,
        voja2_rate=None,  #1e-3,
        voja2_bias=1,  #1 is no bias
        pes_rate=None,  #1e-3, None means no output layer (for PES/BCM learning), 0 no learning
        bcm_rate=None,  #1e-10, None means no recurrent connections, 0 no learning
        bcm_theta=1,
        bcm_max_weight=1e-5,
        bcm_diagonal0=True,
        label=None,
        dec_ini=0,
        output_radius=1,
        seed=None,
        load_from=None,
        fwd_dens=.05,
        fwd_multi=0,  #1 
        fb=0,
    ):
        super(Mem_Voja2_Pes_Hop_TwoLayers, self).__init__(label=label)

        #print('SEED %i' % seed)

        if (n_neurons is None or dimensions is None) and load_from is None:
            error('Either provide load_from or n_neurons and dimensions.')

        if load_from is not None:

            data = np.load(load_from + '.npz', allow_pickle=True)
            encoders = data['enc']
            decoders = data['dec']
            hop_weights = data['hop']
            intercepts_mem = data['intercepts_mem']
            #intercepts_out = data['intercepts_out']
            dimensions = decoders.shape[0]
            n_neurons = decoders.shape[1]
            n_neurons_out = int(data['n_neurons_out'])
            output_radius = data['output_radius']
            fwd_multi = data['fwd_multi']
            fwd_matrices = data['fwd_matrices']

            if seed is None:
                seed = data['seed']
            else:
                assert seed == int(data['seed'])

        else:
            rng = np.random
            rng.seed(seed)
            dist = nengo.dists.UniformHypersphere()
            encoders = dist.sample(n_neurons, dimensions, rng=rng)
            if dec_ini == 0:
                decoders = np.zeros((dimensions, n_neurons), dtype=float)
            else:
                #decoders = np.random.normal(0, dec_ini, size=(dimensions, n_neurons))
                decoders = np.random.uniform(-dec_ini,
                                             dec_ini,
                                             size=(dimensions, n_neurons))
            hop_weights = np.zeros((n_neurons, n_neurons))
            fwd_matrices = None

        self.seed = seed
        self.voja2_rate = voja2_rate
        self.pes_rate = pes_rate
        self.bcm_rate = bcm_rate
        self.decoders = decoders
        self.hop_weights = hop_weights
        self.n_neurons_out = n_neurons_out
        self.intercepts_out = intercepts_out
        self.output_radius = output_radius
        self.fwd_multi = fwd_multi
        self.fwd_matrices = fwd_matrices

        with self:

            self.input = nengo.Node(None, size_in=dimensions)

            #initialize memory
            self.mem = nengo.Ensemble(n_neurons=n_neurons,
                                      dimensions=dimensions,
                                      intercepts=intercepts_mem,
                                      encoders=encoders,
                                      seed=seed)

            #build output layer

            rad = output_radius
            if fb > 0:
                self.output_layer = spa.State(
                    dimensions,
                    vocab=vocab,
                    neurons_per_dimension=n_neurons_out,
                    label='retrieval_out',
                    seed=seed,
                    feedback=fb,
                    feedback_synapse=.05)
            else:
                self.output_layer = spa.State(
                    dimensions,
                    vocab=vocab,
                    neurons_per_dimension=n_neurons_out,
                    label='retrieval_out',
                    seed=seed)  #n_neurons_out,

            #set intercepts and radius
            for ens in self.output_layer.all_ensembles:
                ens.intercepts = intercepts_out
                ens.radius *= rad

            for c in self.output_layer.all_connections:
                if c.post_obj is self.output_layer.output:
                    #c.scale_eval_points=False
                    ens = c.pre_obj
                    n_eval_points = default_n_eval_points(
                        ens.n_neurons, ens.dimensions)
                    c.eval_points = ens.eval_points.sample(
                        n_eval_points, ens.dimensions) / rad

            #current forwarding to output layer.
            if fwd_multi > 0:

                #via node
                self.act_node_in = nengo.Node(None, size_in=1)
                nengo.Connection(self.mem.neurons,
                                 self.act_node_in,
                                 transform=np.ones((1, self.mem.n_neurons)) /
                                 self.mem.n_neurons * fwd_multi,
                                 synapse=None)

                density = fwd_dens
                conn_matrices = []
                for ens_out in self.output_layer.all_ensembles:
                    if fwd_matrices is None:
                        connection_matrix = scipy.sparse.random(
                            ens_out.n_neurons, 1, density=density)
                        connection_matrix = connection_matrix != 0
                        nengo.Connection(self.act_node_in,
                                         ens_out.neurons,
                                         transform=connection_matrix.toarray())
                        conn_matrices.append(connection_matrix.toarray())
                    else:
                        nengo.Connection(self.act_node_in,
                                         ens_out.neurons,
                                         transform=fwd_matrices.pop(0))
                self.fwd_matrices = conn_matrices

            #encoder learning
            if voja2_rate is None or voja2_rate == 0:  #if no encoder learning, make default connection
                self.conn_in = nengo.Connection(self.input,
                                                self.mem,
                                                synapse=0)
            else:

                #voja 2 rule version
                print('Voja 2 - rule!')
                learning_rule_type = voja2_rule.Voja2(post_tau=voja_tau,
                                                      learning_rate=voja2_rate,
                                                      bias=voja2_bias)

                self.conn_in = nengo.Connection(
                    self.input,
                    self.mem,
                    learning_rule_type=learning_rule_type,
                    synapse=0)

            #decoder learning
            if pes_rate is not None and pes_rate > 0:

                self.conn_out = nengo.Connection(
                    self.mem.neurons,
                    self.output_layer.input,
                    transform=decoders,
                    learning_rule_type=nengo.PES(learning_rate=pes_rate))
            else:
                self.conn_out = nengo.Connection(self.mem.neurons,
                                                 self.output_layer.input,
                                                 transform=decoders)

            #if pes_rate is not None and pes_rate > 0:
            self.correct = nengo.Node(None, size_in=dimensions)

            self.learn_control = nengo.Node(lambda t, x: x[:-1]
                                            if x[-1] < 0.5 else x[:-1] * 0,
                                            size_in=dimensions + 1)
            if pes_rate is not None and pes_rate > 0:
                nengo.Connection(
                    self.learn_control,
                    self.conn_out.learning_rule,
                )
            nengo.Connection(self.output_layer.output,
                             self.learn_control[:-1],
                             synapse=None)
            nengo.Connection(self.correct,
                             self.learn_control[:-1],
                             transform=-1,
                             synapse=None)
            self.stop_pes = nengo.Node(None, size_in=1)
            nengo.Connection(self.stop_pes,
                             self.learn_control[-1],
                             synapse=None)

            #hopfield learning/BCM
            if bcm_rate is not None:  #recur connection
                self.conn_hop = nengo.Connection(self.mem.neurons,
                                                 self.mem.neurons,
                                                 transform=hop_weights,
                                                 synapse=.05)

                if bcm_rate > 0:
                    self.conn_hop.learning_rule_type = bcm2_rule.BCM2(
                        learning_rate=bcm_rate,
                        theta_tau=bcm_theta,
                        max_weight=bcm_max_weight,
                        diagonal0=bcm_diagonal0)