print("%s+%s=%s" %(val[0], val[1], number_list[ans_val-1]))

# TESTING
q_list[0] = q_list[2]
ans_list[0] = ans_list[2]
q_norm_list[0] = q_norm_list[2]
q_list[1] = q_list[2]
ans_list[1] = ans_list[2]
q_norm_list[1] = q_norm_list[2]

## Generate specialised vocabs
state_vocab = spa.Vocabulary(less_D)
state_vocab.parse("RUN+NONE")

with nengo.Network(label="Root Net", seed=0) as model:
    env = create_adder_env(q_list, q_norm_list, ans_list, state_vocab.parse("NONE").v, vocab)

    with spa.SPA(vocabs=[vocab], label="Fast Net", seed=0) as fast_net:

        ## Generate hetero mem
        K = 400
        # This is usually calculated
        c = 0.51
        e = encoders(np.array(q_norm_list), K, rng)
        fast_net.het_mem = build_hetero_mem(D*2, D, e, c)

        ## Calculate the error from the environment and use it to drive the decoder learning
        # Create the error population
        error = nengo.Ensemble(n_neurons*8, D)
        nengo.Connection(env.learning, error.neurons, transform=[[10.0]]*n_neurons*8,
                         synapse=None)
            )
            ans_list.append(
                vocab.parse(number_list[ans_val-1]).v
            )
            print("%s+%s=%s" %(val[0], val[1], number_list[ans_val-1]))

# TESTING
q_list[0] = q_list[3]
ans_list[0] = ans_list[3]

## Generate specialised vocabs
state_vocab = spa.Vocabulary(less_D)
state_vocab.parse("RUN+NONE")

with nengo.Network(label="Root Net", seed=0) as model:
    env = create_adder_env(q_list, ans_list, state_vocab.parse("NONE").v, vocab, ans_dur=0.1)

    with spa.SPA(vocabs=[vocab, state_vocab], label="Count Net", seed=0) as slow_net:
        slow_net.q1 = spa.State(D, vocab=vocab)
        slow_net.q2 = spa.State(D, vocab=vocab)

        slow_net.answer = spa.State(D, vocab=vocab)

        slow_net.op_state = MemNet(less_D, state_vocab, label="op_state")

        input_keys = number_list[:-1]
        output_keys = number_list[1:]

        ### Result circuit
        ## Incrementing memory
        slow_net.res_assoc = spa.AssociativeMemory(input_vocab=vocab, output_vocab=vocab,
Ejemplo n.º 3
0
import nengo
import numpy as np
import ipdb

from adder_env import create_adder_env
from constants import *

q_list = [(0, 0, 0, 1), (0, 0, 1, 0), (0, 1, 0, 0), (1, 0, 0, 0)]
ans_list = [(-1, 1), (1, -1), (1, 1), (-1, -1)]

with nengo.Network(label="test") as model:
    env = create_adder_env(q_list, ans_list, (1,))

    # questions and op_state are given at the correct interval
    # and are swapped once an answer is given
    q_node = nengo.Node(size_in=D * 2)
    op_node = nengo.Node(size_in=1)

    # input is given constantly throughout
    in_node = nengo.Node(size_in=D * 2)

    def ans_func(t):
        if t < 0.1:
            return (0, 0)
        elif t < 0.45:
            return (-1, 1)
        elif t < 0.6:
            return (0, 0)
        elif t < 0.95:
            return (1, -1)
        elif t < 1.0: