예제 #1
0
def test_constructed_objects_are_accessible():
    with spa.Network() as model:
        model.config[spa.State].vocab = 16
        model.state1 = spa.State()
        model.state2 = spa.State()
        model.state3 = spa.State()

        with spa.ActionSelection() as actions:
            spa.ifmax(spa.dot(model.state1, spa.sym.A),
                      model.state3 >> model.state2)
            spa.ifmax(0.5, spa.sym.B >> model.state2)
        bg = actions.bg
        thalamus = actions.thalamus

        assert isinstance(thalamus.gates[0], nengo.Ensemble)
        assert isinstance(thalamus.gate_in_connections[0], nengo.Connection)
        assert isinstance(thalamus.gate_out_connections[0], nengo.Connection)
        assert isinstance(thalamus.channels[0], spa.State)
        assert isinstance(thalamus.channel_out_connections[0],
                          nengo.Connection)

        assert isinstance(thalamus.fixed_connections[1], nengo.Connection)

        assert thalamus.bg_connection.pre is bg.output
        assert thalamus.bg_connection.post is thalamus.input
예제 #2
0
def test_errors():
    # motor does not exist
    with pytest.raises(AttributeError):
        with spa.Network() as model:
            model.vision = spa.State(vocab=16)
            with spa.ActionSelection:
                spa.ifmax(0.5, spa.sym.A >> model.motor)
예제 #3
0
def test_action_selection_is_not_built_on_exception():
    with spa.Network():
        state1 = spa.State(16)
        state2 = spa.State(32)
        with pytest.raises(SpaTypeError):
            with ActionSelection() as action_sel:
                spa.ifmax(1.0, state1 >> state2)
    assert not action_sel.built
예제 #4
0
def test_action_selection_keys_corner_cases():
    with spa.Network():
        with ActionSelection() as action_sel:
            pass
    assert list(action_sel.keys()) == []

    with spa.Network():
        with ActionSelection() as action_sel:
            spa.ifmax(0.0)
    assert list(action_sel.keys()) == [0]
예제 #5
0
def test_constructed_input_connections_are_accessible():
    with spa.Network() as model:
        model.config[spa.State].vocab = 16
        model.state1 = spa.State()
        model.state2 = spa.State()

        with spa.ActionSelection() as actions:
            spa.ifmax(spa.dot(model.state1, spa.sym.A), spa.sym.A >> model.state2)
        bg = actions.bg

        assert isinstance(bg.input_connections[0], nengo.Connection)
예제 #6
0
def test_routing_recurrency_compilation(Simulator, seed):
    model = spa.Network(seed=seed)
    model.config[spa.State].vocab = 2
    model.config[spa.State].subdimensions = 2
    with model:
        model.buff1 = spa.State(label="buff1")
        model.buff2 = spa.State(label="buff2")
        with spa.ActionSelection():
            spa.ifmax(0.5, model.buff1 >> model.buff2, model.buff2 >> model.buff1)

    with Simulator(model) as sim:
        assert sim
예제 #7
0
def test_action_selection_is_side_effect_free_if_exception_is_raised():
    with spa.Network():
        state_a = spa.State(32)
        state_b = spa.State(32)
        state_c = spa.State(64)

        with pytest.raises(SpaTypeError):
            with ActionSelection():
                spa.ifmax(1, state_a >> state_b, state_a >> state_c)

        with ActionSelection():
            pass
예제 #8
0
def test_access_thal_and_bg_objects():
    with spa.Network() as m:
        m.a = spa.Scalar()
        m.b = spa.Scalar()

        m.c = spa.Scalar()
        m.d = spa.Scalar()

        with spa.ActionSelection() as actions:
            spa.ifmax(m.a, 0 >> m.c)
            spa.ifmax(m.b, 1 >> m.c)

    assert isinstance(actions.bg, spa.BasalGanglia)
    assert isinstance(actions.thalamus, spa.Thalamus)
예제 #9
0
def test_naming_of_actions():
    with spa.Network():
        state1 = spa.State(16)
        state2 = spa.State(16)
        with ActionSelection() as action_sel:
            u0 = spa.ifmax("name0", 0.0, state1 >> state2)
            u1 = spa.ifmax(0.0, state1 >> state2)
            u2 = spa.ifmax("name2", 0.0, state1 >> state2)

    assert tuple(action_sel.keys()) == ("name0", 1, "name2")
    assert action_sel["name0"] is u0
    assert action_sel["name2"] is u2
    for i, u in enumerate((u0, u1, u2)):
        assert action_sel[i] is u
예제 #10
0
def test_action_selection(Simulator, rng):
    vocab = spa.Vocabulary(64)
    vocab.populate("A; B; C; D; E; F")

    with spa.Network() as model:
        state = spa.Transcode(lambda t: "ABCDEF"[min(5, int(t / 0.5))],
                              output_vocab=vocab)
        scalar = spa.Scalar()
        pointer = spa.State(vocab)
        with ActionSelection():
            spa.ifmax(spa.dot(state, PointerSymbol("A")), 0.5 >> scalar)
            spa.ifmax(spa.dot(state, PointerSymbol("B")),
                      PointerSymbol("B") >> pointer)
            spa.ifmax(spa.dot(state, PointerSymbol("C")), state >> pointer)
            d_utility = spa.ifmax(0, PointerSymbol("D") >> pointer)
            spa.ifmax(
                spa.dot(state, PointerSymbol("E")),
                0.25 >> scalar,
                PointerSymbol("E") >> pointer,
            )
        nengo.Connection(nengo.Node(lambda t: 1.5 < t <= 2.0), d_utility)
        p_scalar = nengo.Probe(scalar.output, synapse=0.03)
        p_pointer = nengo.Probe(pointer.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(3.0)

    t = sim.trange()
    assert_allclose(sim.data[p_scalar][(0.3 < t) & (t <= 0.5)], 0.5, atol=0.2)
    assert_sp_close(sim.trange(),
                    sim.data[p_pointer],
                    vocab["B"],
                    skip=0.8,
                    duration=0.2)
    assert_sp_close(sim.trange(),
                    sim.data[p_pointer],
                    vocab["C"],
                    skip=1.3,
                    duration=0.2)
    assert_sp_close(sim.trange(),
                    sim.data[p_pointer],
                    vocab["D"],
                    skip=1.8,
                    duration=0.2)
    assert_allclose(sim.data[p_scalar][(2.3 < t) & (t <= 2.5)], 0.25, atol=0.2)
    assert_sp_close(sim.trange(),
                    sim.data[p_pointer],
                    vocab["E"],
                    skip=2.3,
                    duration=0.2)
예제 #11
0
def test_routing(Simulator, seed, plt):
    model = spa.Network(seed=seed)
    model.config[spa.State].vocab = 3
    model.config[spa.State].subdimensions = 3
    with model:
        model.ctrl = spa.State(16, subdimensions=16, label='ctrl')

        def input_func(t):
            if t < 0.2:
                return 'A'
            elif t < 0.4:
                return 'B'
            else:
                return 'C'

        model.input = spa.Transcode(input_func, output_vocab=16)

        model.buff1 = spa.State(label='buff1')
        model.buff2 = spa.State(label='buff2')
        model.buff3 = spa.State(label='buff3')

        node1 = nengo.Node([0, 1, 0])
        node2 = nengo.Node([0, 0, 1])

        nengo.Connection(node1, model.buff1.input)
        nengo.Connection(node2, model.buff2.input)

        model.input >> model.ctrl
        with spa.ActionSelection():
            spa.ifmax(spa.dot(model.ctrl, spa.sym.A),
                      model.buff1 >> model.buff3)
            spa.ifmax(spa.dot(model.ctrl, spa.sym.B),
                      model.buff2 >> model.buff3)
            spa.ifmax(spa.dot(model.ctrl, spa.sym.C),
                      model.buff1 * model.buff2 >> model.buff3)

        buff3_probe = nengo.Probe(model.buff3.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.6)

    data = sim.data[buff3_probe]

    plt.plot(sim.trange(), data)

    valueA = np.mean(data[150:200], axis=0)  # should be [0, 1, 0]
    valueB = np.mean(data[350:400], axis=0)  # should be [0, 0, 1]
    valueC = np.mean(data[550:600], axis=0)  # should be [1, 0, 0]

    assert valueA[0] < 0.2
    assert valueA[1] > 0.7
    assert valueA[2] < 0.2

    assert valueB[0] < 0.2
    assert valueB[1] < 0.2
    assert valueB[2] > 0.7

    assert valueC[0] > 0.7
    assert valueC[1] < 0.2
    assert valueC[2] < 0.2
예제 #12
0
def test_nondefault_routing(Simulator, seed):
    m = spa.Network(seed=seed)
    m.config[spa.State].vocab = 3
    m.config[spa.State].subdimensions = 3
    with m:
        m.ctrl = spa.State(16, subdimensions=16, label="ctrl")

        def input_func(t):
            if t < 0.2:
                return "A"
            elif t < 0.4:
                return "B"
            else:
                return "C"

        m.input = spa.Transcode(input_func, output_vocab=16)

        m.buff1 = spa.State(label="buff1")
        m.buff2 = spa.State(label="buff2")
        m.cmp = spa.Compare(3)

        node1 = nengo.Node([0, 1, 0])
        node2 = nengo.Node([0, 0, 1])

        nengo.Connection(node1, m.buff1.input)
        nengo.Connection(node2, m.buff2.input)

        m.input >> m.ctrl
        with spa.ActionSelection():
            spa.ifmax(
                spa.dot(m.ctrl, spa.sym.A),
                m.buff1 >> m.cmp.input_a,
                m.buff1 >> m.cmp.input_b,
            )
            spa.ifmax(
                spa.dot(m.ctrl, spa.sym.B),
                m.buff1 >> m.cmp.input_a,
                m.buff2 >> m.cmp.input_b,
            )
            spa.ifmax(
                spa.dot(m.ctrl, spa.sym.C),
                m.buff2 >> m.cmp.input_a,
                m.buff2 >> m.cmp.input_b,
            )

        compare_probe = nengo.Probe(m.cmp.output, synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.6)

    similarity = sim.data[compare_probe]

    valueA = np.mean(similarity[150:200], axis=0)  # should be [1]
    valueB = np.mean(similarity[350:400], axis=0)  # should be [0]
    valueC = np.mean(similarity[550:600], axis=0)  # should be [1]

    assert valueA > 0.6
    assert valueB < 0.3
    assert valueC > 0.6
예제 #13
0
def test_dummy_action():
    with spa.Network():
        with spa.ActionSelection():
            spa.ifmax(0)
            spa.ifmax("named-dummy", 0)

            with pytest.raises(ValueError):
                spa.ifmax("must-provide-condition")
예제 #14
0
    def __init__(self,
                 vocab,
                 label,
                 n_neurons_per_dim,
                 rng,
                 BG_bias,
                 BG_thr,
                 feedback,
                 sender=True,
                 receiver=True,
                 **kwargs):

        super(ADDProcessor, self).__init__(input_vocab=vocab,
                                           output_vocab=vocab,
                                           label=label,
                                           n_neurons_per_dim=n_neurons_per_dim,
                                           mapping=['D2', 'D4', 'D6', 'D8'],
                                           threshold=0,
                                           feedback=feedback,
                                           sender=sender,
                                           receiver=receiver,
                                           **kwargs)

        with self:
            # Domain specific processing
            self.bind = spa.Bind(vocab, n_neurons_per_dim['Bind'])

            with spa.Network() as self.result_controller:
                self.result_controller.labels = []
                with spa.ActionSelection() as self.result_controller.AS:
                    self.result_controller.labels.append("D0 -> D8")
                    spa.ifmax(self.result_controller.labels[-1],
                              BG_bias + spa.dot(s.D0, self.bind.output),
                              s.D8 >> self.AM.input)
                    self.result_controller.labels.append("D10 -> D2")
                    spa.ifmax(self.result_controller.labels[-1],
                              BG_bias + spa.dot(s.D10, self.bind.output),
                              s.D2 >> self.AM.input)
                    self.result_controller.labels.append("no cycle")
                    spa.ifmax(self.result_controller.labels[-1],
                              BG_bias + BG_thr, self.bind >> self.AM.input)

            nengo.Connection(self.input.output, self.bind.input_left)
예제 #15
0
def test_thalamus(Simulator, plt, seed):
    with spa.Network(seed=seed) as m:
        m.vision = spa.State(vocab=16, neurons_per_dimension=80)
        m.motor = spa.State(vocab=16, neurons_per_dimension=80)

        with spa.ActionSelection():
            spa.ifmax(spa.dot(m.vision, spa.sym.A), spa.sym.A >> m.motor)
            spa.ifmax(spa.dot(m.vision, spa.sym.B), m.vision >> m.motor)
            spa.ifmax(spa.dot(m.vision, ~spa.sym.A), ~m.vision >> m.motor)

        def input_f(t):
            if t < 0.1:
                return 'A'
            elif t < 0.3:
                return 'B'
            elif t < 0.5:
                return '~A'
            else:
                return '0'

        m.input = spa.Transcode(input_f, output_vocab=16)
        m.input >> m.vision

        p = nengo.Probe(m.motor.output, synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.5)

    t = sim.trange()
    data = m.motor.vocab.dot(sim.data[p].T)

    plt.plot(t, data.T)

    # Action 1
    assert data[0, t == 0.1] > 0.8
    assert data[1, t == 0.1] < 0.2
    # Action 2
    assert data[0, t == 0.3] < 0.2
    assert data[1, t == 0.3] > 0.8
    # Action 3
    assert data[0, t == 0.5] > 0.8
    assert data[1, t == 0.5] < 0.2
        else:
            return "0"


with spa.Network() as model:
    
    utility = nengo.Node([0])
    
    trigger = TwoStepsTrigger(50)

    
    output_cmd = spa.Transcode(input_vocab=vocab, output_vocab=vocab)
    
    with spa.ActionSelection():
        spa.ifmax(
            utility,
            1 >> trigger.input
        )
        
        spa.ifmax(
            .5,
        )

    POSITION = WM(100, vocab, label='position')
    INCREMENT = WM(100, vocab, label='added')
    nengo.Connection(POSITION.output, INCREMENT.input, transform=vocab.parse('D1').get_binding_matrix())
    nengo.Connection(INCREMENT.output, POSITION.input)

    with spa.ActionSelection():
        
        spa.ifmax(trigger.output[0], 
            1 >> POSITION.gate,
예제 #17
0
    def evaluate(self, p, plt):
        stimuli = []
        for i in range(p.n_stims):
            stimuli.append(('NEUTRAL%d' % i, 'COLOR%d' % i, 'neutral'))
        for i in range(p.n_stims):
            stimuli.append(('COLOR%d' % i, 'COLOR%d' % i, 'congruent'))
        for i in range(p.n_stims):
            stimuli.append(('COLOR%d' % ((i + 1) % p.n_stims), 'COLOR%d' % i,
                            'incongruent'))

        vocab = spa.Vocabulary(p.D,
                               pointer_gen=np.random.RandomState(seed=p.seed))
        for i in range(p.n_stims):
            vocab.populate('NEUTRAL%d' % i)
            vocab.populate('COLOR%d' % i)
        vocab.populate('COLOR; WORD')

        model = spa.Network(seed=p.seed)
        with model:

            def word_func(t):
                index = int(t / (p.t_stim + p.t_isi))
                t = t % (p.t_stim + p.t_isi)
                if t < p.t_isi:
                    return '0'
                else:
                    return stimuli[index % len(stimuli)][0]

            def color_func(t):
                index = int(t / (p.t_stim + p.t_isi))
                t = t % (p.t_stim + p.t_isi)
                if t < p.t_isi:
                    return '0'
                else:
                    return stimuli[index % len(stimuli)][1]

            stim_w = spa.Transcode(word_func, output_vocab=vocab)
            stim_c = spa.Transcode(color_func, output_vocab=vocab)
            stim_a = spa.Transcode('(1-%g)*COLOR + %g*WORD' %
                                   (p.attention_error, p.attention_error),
                                   output_vocab=vocab)

            wm = spa.State(vocab)

            (spa.sym.COLOR * stim_c + spa.sym.WORD * stim_w) * ~stim_a >> wm

            if p.auto_direct != 0:
                stim_w * p.auto_direct >> wm

            speech = spa.State(vocab)

            if p.decision == 'bg':
                with spa.ActionSelection() as action_sel:
                    for i in range(p.n_stims):
                        spa.ifmax(spa.dot(wm, spa.sym('COLOR%d' % i)),
                                  spa.sym('COLOR%d' % i) >> speech)
                    spa.ifmax(0.35, spa.sym('0') >> speech)
            elif p.decision == 'ia':

                def reset_func(t):
                    index = int(t / (p.t_stim + p.t_isi))
                    t = t % (p.t_stim + p.t_isi)
                    if t < p.t_isi:
                        return 1
                    else:
                        return 0

                reset = nengo.Node(reset_func)

                decision = spa.IAAssocMem(
                    vocab,
                    mapping=['COLOR%d' % i for i in range(p.n_stims)],
                    accum_timescale=p.ia_accum_timescale)
                wm >> decision

                nengo.Connection(reset, decision.input_reset)
            else:
                raise Exception('Unknown decision param: %s' % p.decision)
            if not p.use_neurons:
                for ens in model.all_ensembles:
                    ens.neuron_type = nengo.Direct()

            p_output = nengo.Probe(wm.output, synapse=0.02)
            p_correct = nengo.Probe(stim_c.output)
            p_speech = nengo.Probe(speech.output, synapse=0.02)

            if p.decision == 'bg':
                p_act = nengo.Probe(action_sel.thalamus.output, synapse=0.01)
            elif p.decision == 'ia':
                p_act = nengo.Probe(decision.selection.output, synapse=0.01)

        sim = nengo.Simulator(model, progress_bar=p.verbose)
        with sim:
            sim.run(p.n_stims * (p.t_isi + p.t_stim) * 3)

        v = np.einsum('ij,ij->i', sim.data[p_correct], sim.data[p_output])
        steps = int((p.t_isi + p.t_stim) / sim.dt)
        scores = v[steps - 2::steps]

        data = sim.data[p_act]
        rts = []
        accuracy = []
        for condition in range(3):
            for i in range(p.n_stims):
                t_start = (p.t_isi + p.t_stim) * i + p.t_isi + condition * (
                    p.t_isi + p.t_stim) * p.n_stims
                t_end = t_start + p.t_stim
                d = data[int(t_start / sim.dt):int(t_end / sim.dt), i]

                correct = np.max(d) > p.output_threshold
                if correct:
                    rt = np.where(d > p.output_threshold)[0][0] * sim.dt
                else:
                    rt = None

                rts.append(rt)
                accuracy.append(correct)

        if plt:
            plt.subplot(2, 1, 1)
            plt.plot(sim.trange(), sim.data[p_output].dot(vocab.vectors.T))
            plt.subplot(2, 1, 2)
            if p.decision == 'bg':
                plt.plot(sim.trange(), sim.data[p_act][:, :-1])
            elif p.decision == 'ia':
                plt.plot(sim.trange(), sim.data[p_act])

            for i in range(p.n_stims * 3):
                plt.axvline(i * (p.t_isi + p.t_stim) + p.t_isi, ls='--')

        acc_neutral = sum([0.0 if r is None else 1.0
                           for r in rts[:p.n_stims]]) / p.n_stims
        acc_congruent = sum(
            [0.0 if r is None else 1.0
             for r in rts[p.n_stims:p.n_stims * 2]]) / p.n_stims
        acc_incongruent = sum(
            [0.0 if r is None else 1.0
             for r in rts[p.n_stims * 2:]]) / p.n_stims

        if acc_neutral == 0:
            rt_neutral = None
        else:
            rt_neutral = np.mean([r for r in rts[:p.n_stims] if r is not None])

        if acc_congruent == 0:
            rt_congruent = None
        else:
            rt_congruent = np.mean(
                [r for r in rts[p.n_stims:p.n_stims * 2] if r is not None])
        if acc_incongruent == 0:
            rt_incongruent = None
        else:
            rt_incongruent = np.mean(
                [r for r in rts[p.n_stims * 2:] if r is not None])

        return dict(
            scores=scores,
            stimuli=stimuli,
            neutral=np.mean(scores[:p.n_stims]),
            congruent=np.mean(scores[p.n_stims:p.n_stims * 2]),
            incongruent=np.mean(scores[p.n_stims * 2:]),
            rts=rts,
            accuracy=accuracy,
            rt_neutral=rt_neutral,
            rt_congruent=rt_congruent,
            rt_incongruent=rt_incongruent,
            acc_neutral=acc_neutral,
            acc_congruent=acc_congruent,
            acc_incongruent=acc_incongruent,
        )
예제 #18
0
     # the vector associated with the currently requested vector
     model.memory.recall = spa.State(vocab=vocab_dim)
     
 with nengo.Network('motor') as model.motor:
     # tells the motor system which disk to move (A, B, C)
     model.motor.move_disk = spa.State(vocab=vocab_dim)
     # tells the motor system where to move the disk to (A, B, C)
     model.motor.move_peg = spa.State(vocab=vocab_dim)
     
 # Table E.2 is used to define the spa rules for the Tower of Hanoi model
 with nengo.Network('TOH Rules') as model.rules:
     with spa.ActionSelection() as model.rules.action_sel:
         spa.ifmax('LookDone',
             -spa.dot(model.buffer.focus, spa.sym.D0) +
             spa.dot(model.buffer.goal, model.buffer.focus) + 
             spa.dot(model.sensory.goal_current, model.buffer.goal_target) + 
             spa.dot(model.buffer.state, spa.sym.STORE),
             model.buffer.goal * spa.sym.NEXT >> model.buffer.focus,
             model.buffer.goal * spa.sym.NEXT >> model.buffer.goal,
             model.sensory.goal_final >> model.buffer.goal_target)
         spa.ifmax('LookNotDone',
             -spa.dot(model.buffer.focus, spa.sym.D0) +
             spa.dot(model.buffer.goal, model.buffer.focus) + 
             -spa.dot(model.sensory.goal_current, model.buffer.goal_target) + 
             spa.dot(model.buffer.state, spa.sym.STORE),
             model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)
         spa.ifmax('InTheWay1', 
             -spa.dot(model.buffer.focus, model.buffer.goal) + 
             spa.dot(model.sensory.focus_peg, model.sensory.goal_current) + 
             -spa.dot(model.sensory.focus_peg, model.buffer.goal_target) + 
             -spa.dot(model.buffer.state, spa.sym.STORE),
             model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)
예제 #19
0
import nengo_spa as spa
s = spa.sym

model = spa.Network()
with model:
    D = 32
    memory = spa.State(D, label='memory', feedback=1)

    with spa.ActionSelection():
        spa.ifmax(memory @ s.A, s.B >> memory)
        spa.ifmax(memory @ s.B, s.C >> memory)
        spa.ifmax(memory @ s.C, s.D >> memory)
        spa.ifmax(memory @ s.D, s.E >> memory)
        spa.ifmax(memory @ s.E, s.A >> memory)

print('number of neurons:', model.n_neurons)
예제 #20
0
def toh_agent():
    model = nengo.Network('ToH Agent')
    with model:
        env = nengo.Node()
        # Table E.1 is used to define the spa states and subnetworks
        # of the cortical elements for the Tower of Hanoi model
        with nengo.Network('buffer') as model.buffer:
            # used to control the different stages of the problem-solving algorithm
            model.buffer.state = spa.State(vocab=vocab_dim)
            # stores the disk currently being attended to (D0, D1, D2, D3)
            model.buffer.focus = spa.State(vocab=vocab_dim)
            # stores the disk we are trying to move (D0, D1, D2, D3)
            model.buffer.goal = spa.State(vocab=vocab_dim)
            # stores the location we want to move the goal disk to (A, B, C)
            model.buffer.goal_target = spa.State(vocab=vocab_dim)

        with nengo.Network('sensory') as model.sensory:
            # automatically contains the location of the focus disk (A, B, C)
            model.sensory.focus_peg = spa.State(vocab=vocab_dim)
            # automatically contains the location of the goal disk (A, B, C)
            model.sensory.goal_current = spa.State(vocab=vocab_dim)
            # automatically contains the final desired location of the goal disk (A, B, C)
            model.sensory.goal_final = spa.State(vocab=vocab_dim)
            # automatically contains the largest visible disk (D3)
            model.sensory.largest = spa.State(vocab=vocab_dim)
            # automcatically contains DONE if the motor action is finished
            model.sensory.motor = spa.State(vocab=vocab_dim)

        with nengo.Network('memory') as model.memory:
            # stores an association between mem1 and mem2 in working memory
            model.memory.mem_1 = spa.State(vocab=vocab_dim)
            # stores an association between mem1 and mem2 in working memory
            model.memory.mem_2 = spa.State(vocab=vocab_dim)
            # indicates one element of a pair to attempt to recall from working memory
            model.memory.request = spa.State(vocab=vocab_dim)
            # the vector associated with the currently requested vector
            model.memory.recall = spa.State(vocab=vocab_dim)

        with nengo.Network('motor') as model.motor:
            # tells the motor system which disk to move (A, B, C)
            model.motor.move_disk = spa.State(vocab=vocab_dim)
            # tells the motor system where to move the disk to (A, B, C)
            model.motor.move_peg = spa.State(vocab=vocab_dim)

        # Table E.2 is used to define the spa rules for the Tower of Hanoi model
        with nengo.Network('TOH Rules') as model.rules:
            with spa.ActionSelection() as model.rules.action_sel:
                spa.ifmax(
                    'LookDone', -spa.dot(model.buffer.focus, spa.sym.D0) +
                    spa.dot(model.buffer.goal, model.buffer.focus) + spa.dot(
                        model.sensory.goal_current, model.buffer.goal_target) +
                    spa.dot(model.buffer.state, spa.sym.STORE),
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.focus,
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.goal,
                    model.sensory.goal_final >> model.buffer.goal_target)
                spa.ifmax(
                    'LookNotDone', -spa.dot(model.buffer.focus, spa.sym.D0) +
                    spa.dot(model.buffer.goal, model.buffer.focus) + -spa.dot(
                        model.sensory.goal_current, model.buffer.goal_target) +
                    spa.dot(model.buffer.state, spa.sym.STORE),
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)
                spa.ifmax(
                    'InTheWay1',
                    -spa.dot(model.buffer.focus, model.buffer.goal) + spa.dot(
                        model.sensory.focus_peg, model.sensory.goal_current) +
                    -spa.dot(model.sensory.focus_peg, model.buffer.goal_target)
                    + -spa.dot(model.buffer.state, spa.sym.STORE),
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)
                spa.ifmax(
                    'InTheWay2',
                    -spa.dot(model.buffer.focus, model.buffer.goal) + -spa.dot(
                        model.sensory.focus_peg, model.sensory.goal_current) +
                    spa.dot(model.sensory.focus_peg, model.buffer.goal_target)
                    + -spa.dot(model.buffer.state, spa.sym.STORE),
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)
                spa.ifmax(
                    'NotInTheWay',
                    -spa.dot(model.buffer.focus, model.buffer.goal) + -spa.dot(
                        model.sensory.focus_peg, model.sensory.goal_current) +
                    -spa.dot(model.sensory.focus_peg, model.buffer.goal_target)
                    + -spa.dot(model.buffer.focus, spa.sym.D0),
                    model.buffer.goal * spa.sym.NEXT >> model.buffer.focus)

                spa.ifmax(
                    'MoveD0',
                    spa.dot(model.buffer.focus, spa.sym.D0) +
                    spa.dot(model.buffer.goal, spa.sym.D0) + -spa.dot(
                        model.sensory.goal_current, model.buffer.goal_target),
                    spa.sym.D0 >> model.motor.move_disk,
                    model.buffer.goal_target >> model.motor.move_peg)
                spa.ifmax(
                    'MoveGoal',
                    spa.dot(model.buffer.focus, spa.sym.D0) +
                    -spa.dot(model.buffer.goal, spa.sym.D0) +
                    -spa.dot(model.sensory.focus_peg, model.buffer.goal_target)
                    + -spa.dot(model.buffer.goal_target,
                               model.sensory.goal_current) +
                    -spa.dot(model.sensory.focus_peg,
                             model.sensory.goal_current),
                    model.buffer.goal >> model.motor.move_disk,
                    model.buffer.goal_target >> model.motor.move_peg)
                spa.ifmax(
                    'MoveDone',
                    spa.dot(model.sensory.motor, spa.sym.DONE) +
                    -spa.dot(model.buffer.goal, model.sensory.largest) +
                    -spa.dot(model.buffer.state, spa.sym.RECALL),
                    spa.sym.RECALL >> model.buffer.state,
                    model.buffer.goal * ~spa.sym.NEXT >> model.buffer.goal)
                spa.ifmax(
                    'MoveDone2',
                    spa.dot(model.sensory.motor, spa.sym.DONE) +
                    spa.dot(model.buffer.goal, model.sensory.largest) +
                    -spa.dot(model.buffer.state, spa.sym.RECALL),
                    model.sensory.largest * ~spa.sym.NEXT >>
                    model.buffer.focus,
                    model.sensory.largest * ~spa.sym.NEXT >> model.buffer.goal,
                    model.sensory.goal_final >> model.buffer.goal_target,
                    spa.sym.HANOI >> model.buffer.state)

                spa.ifmax(
                    'Store',
                    spa.dot(model.buffer.state, spa.sym.STORE) +
                    -spa.dot(model.memory.recall, model.buffer.goal_target),
                    model.buffer.goal >> model.memory.mem_1,
                    model.buffer.goal_target >> model.memory.mem_2,
                    model.buffer.goal >> model.memory.request)
                spa.ifmax(
                    'StoreDone',
                    spa.dot(model.buffer.state, spa.sym.STORE) +
                    spa.dot(model.memory.recall, model.buffer.goal_target),
                    spa.sym.FIND >> model.buffer.state)

                spa.ifmax(
                    'FindFree1',
                    spa.dot(model.buffer.state, spa.sym.FIND) +
                    -spa.dot(model.buffer.focus, model.buffer.goal) + spa.dot(
                        model.sensory.focus_peg, model.sensory.goal_current) +
                    -spa.dot(model.sensory.focus_peg,
                             model.buffer.goal_target),
                    spa.sym.A + spa.sym.B + spa.sym.C - model.sensory.focus_peg
                    - model.buffer.goal_target >> model.buffer.goal_target,
                    model.buffer.focus >> model.buffer.goal,
                    spa.sym.HANOI >> model.buffer.state)
                spa.ifmax(
                    'FindFree2',
                    spa.dot(model.buffer.state, spa.sym.FIND) +
                    -spa.dot(model.buffer.focus, model.buffer.goal) + -spa.dot(
                        model.sensory.focus_peg, model.sensory.goal_current) +
                    spa.dot(model.sensory.focus_peg, model.buffer.goal_target),
                    spa.sym.A + spa.sym.B + spa.sym.C -
                    model.sensory.goal_current - model.buffer.goal_target >>
                    model.buffer.goal_target,
                    model.buffer.focus >> model.buffer.goal,
                    spa.sym.HANOI >> model.buffer.state)

                spa.ifmax(
                    'Recall',
                    spa.dot(model.buffer.state, spa.sym.RECALL) +
                    -spa.dot(model.memory.recall,
                             spa.sym.A + spa.sym.B + spa.sym.C),
                    model.buffer.goal >> model.memory.request)
                spa.ifmax(
                    'RecallDo',
                    spa.dot(model.buffer.state, spa.sym.RECALL) +
                    spa.dot(model.memory.recall,
                            spa.sym.A + spa.sym.B + spa.sym.C) +
                    -spa.dot(model.memory.recall, model.sensory.goal_current),
                    spa.sym.HANOI >> model.buffer.state,
                    model.buffer.goal >> model.buffer.focus,
                    4 * model.memory.recall >> model.buffer.goal_target)
                spa.ifmax(
                    'RecallNext',
                    spa.dot(model.buffer.state, spa.sym.RECALL) +
                    spa.dot(model.memory.recall,
                            spa.sym.A + spa.sym.B + spa.sym.C) +
                    spa.dot(model.memory.recall, model.sensory.goal_current),
                    spa.sym.HANOI >> model.buffer.state,
                    model.buffer.goal * ~spa.sym.NEXT >> model.buffer.goal,
                    model.buffer.goal >> model.memory.request)
    return model
예제 #21
0
import nengo_spa as spa
s = spa.sym

model = spa.Network()
with model:
    D = 32
    vision = spa.State(D, label='vision')
    speech = spa.State(D, label='speech')

    with spa.ActionSelection():
        spa.ifmax(vision @ s.DOG, s.BARK >> speech)
        spa.ifmax(vision @ s.CAT, s.MEOW >> speech)
        spa.ifmax(vision @ s.RAT, s.SQUEAK >> speech)
        spa.ifmax(vision @ s.COW, s.MOO >> speech)
        spa.ifmax(0.5, s.UNKNOWN >> speech)

print('number of neurons:', model.n_neurons)
예제 #22
0
def test_scalar_product():
    with spa.Network() as model:
        model.scalar = spa.Scalar()
        with spa.ActionSelection():
            spa.ifmax(model.scalar * model.scalar, 1 >> model.scalar)
예제 #23
0
def test_spa_connections_to_utilities():
    with spa.Network():
        with ActionSelection():
            utility = spa.ifmax(0.0)
        0.1 >> utility
예제 #24
0
def test_basal_ganglia(Simulator, seed, plt):
    d = 64
    with spa.Network(seed=seed) as m:
        m.vision = spa.State(vocab=d)
        m.motor = spa.State(vocab=d)
        m.compare = spa.Compare(vocab=d)

        def input(t):
            if t < 0.1:
                return '0'
            elif t < 0.2:
                return 'CAT'
            elif t < 0.3:
                return 'DOG*~CAT'
            elif t < 0.4:
                return 'PARROT'
            elif t < 0.5:
                return 'MOUSE'
            else:
                return '0'

        m.encode = spa.Transcode(input, output_vocab=d)

        # test all acceptable condition formats
        with spa.ActionSelection() as actions:
            spa.ifmax(0.5, spa.sym.A >> m.motor)
            spa.ifmax(spa.dot(m.vision, spa.sym.CAT), spa.sym.B >> m.motor)
            spa.ifmax(spa.dot(m.vision * spa.sym.CAT, spa.sym.DOG),
                      spa.sym.C >> m.motor)
            spa.ifmax(2 * spa.dot(m.vision, spa.sym.CAT * 0.5),
                      spa.sym.D >> m.motor)
            spa.ifmax(
                spa.dot(m.vision, spa.sym.CAT) + 0.5 -
                spa.dot(m.vision, spa.sym.CAT), spa.sym.E >> m.motor)
            spa.ifmax(
                spa.dot(m.vision, spa.sym.PARROT) + m.compare,
                spa.sym.F >> m.motor)
            spa.ifmax(0.5 * spa.dot(m.vision, spa.sym.MOUSE) + 0.5 * m.compare,
                      spa.sym.G >> m.motor)
            spa.ifmax((spa.dot(m.vision, spa.sym.MOUSE) - m.compare) * 0.5,
                      spa.sym.H >> m.motor)

        m.encode >> m.vision
        spa.sym.SHOOP >> m.compare.input_a
        spa.sym.SHOOP >> m.compare.input_b
        bg = actions.bg

        p = nengo.Probe(bg.input, 'output', synapse=0.03)

    with Simulator(m) as sim:
        sim.run(0.5)
    t = sim.trange()

    plt.plot(t, sim.data[p])
    plt.legend(["A", "B", "C", "D", "E", "F", "G", "H"])
    plt.title('Basal Ganglia output')

    # assert the basal ganglia is prioritizing things correctly
    # Motor F
    assert sim.data[p][t == 0.4, 5] > 0.8
    # Motor G
    assert sim.data[p][t == 0.5, 6] > 0.8
    # Motor A
    assert 0.6 > sim.data[p][t == 0.1, 0] > 0.4
    # Motor B
    assert sim.data[p][t == 0.2, 1] > 0.8
    # Motor C
    assert sim.data[p][t == 0.3, 2] > 0.5

    # Motor B should be the same as Motor D
    assert np.allclose(sim.data[p][:, 1], sim.data[p][:, 3], atol=0.2)
    # Motor A should be the same as Motor E
    assert np.allclose(sim.data[p][:, 0], sim.data[p][:, 4], atol=0.2)
예제 #25
0
    PRIM >> SET_PRIM

    input_INSTRUCTIONS >> PRIM.input_left
    spa.translate(clean_POS, vocab) >> PRIM.input_right

    SET_exec = spa.Transcode(input_vocab=vocab, output_vocab=vocab)
    GET_exec = spa.Transcode(input_vocab=vocab, output_vocab=vocab)

    # GET selector
    with spa.Network(label='GET selector') as GET_selector:
        GET_selector.labels = []
        with spa.ActionSelection() as GET_selector.AS:

            GET_selector.labels.append("GET V (FIXATE)")
            spa.ifmax(GET_selector.labels[-1], BG_bias + FIXATE_detector,
                      V.preconscious >> GW.AMs[V].input, s.D1 >> POS.input,
                      s.D1 * clean_POS >> INCREMENT.input)

            # GET_selector.labels.append("GET V")
            # spa.ifmax(GET_selector.labels[-1], BG_bias + spa.dot(GET_PRIM, s.GET_V) * (1-spa.dot(GET_exec, s.GET_V)),
            #     s.GET_V >> GET_exec,
            #     1 >> POS.gate,
            # )

            GET_selector.labels.append("GET ADD")
            spa.ifmax(
                GET_selector.labels[-1], BG_bias +
                spa.dot(GET_PRIM, s.GET_ADD) * (1 - GW.detectors[ADD]),
                ADD.preconscious >> GW.AMs[ADD].input, 1 >> POS.gate,
                s.D1 * clean_POS >> INCREMENT.input)
예제 #26
0
def test_new_action_syntax(Simulator, seed, plt, rng):
    model = spa.Network(seed=seed)
    model.config[spa.State].vocab = 3
    model.config[spa.State].subdimensions = 3
    with model:
        model.ctrl = spa.State(16, subdimensions=16, label="ctrl")

        def input_func(t):
            if t < 0.2:
                return "A"
            elif t < 0.4:
                return "B"
            else:
                return "C"

        model.input = spa.Transcode(input_func, output_vocab=16)

        model.state = spa.State(label="state")
        model.buff1 = spa.State(label="buff1")
        model.buff2 = spa.State(label="buff2")
        model.buff3 = spa.State(label="buff3")

        node1 = nengo.Node([0, 1, 0])
        node2 = nengo.Node([0, 0, 1])

        nengo.Connection(node1, model.buff1.input)
        nengo.Connection(node2, model.buff2.input)

        model.input >> model.ctrl
        model.buff1 >> model.state
        with spa.ActionSelection():
            spa.ifmax(spa.dot(model.ctrl, spa.sym.A),
                      model.buff1 >> model.buff3)
            spa.ifmax(spa.dot(model.ctrl, spa.sym.B),
                      model.buff2 >> model.buff3)
            spa.ifmax(spa.dot(model.ctrl, spa.sym.C),
                      model.buff1 * model.buff2 >> model.buff3)

        state_probe = nengo.Probe(model.state.output, synapse=0.03)
        buff3_probe = nengo.Probe(model.buff3.output, synapse=0.03)

    with Simulator(model) as sim:
        sim.run(0.6)

    data = sim.data[buff3_probe]

    plt.plot(sim.trange(), data)

    state_val = np.mean(sim.data[state_probe][200:], axis=0)
    assert state_val[0] < 0.2
    assert state_val[1] > 0.8
    assert state_val[2] < 0.2

    valueA = np.mean(data[150:200], axis=0)  # should be [0, 1, 0]
    valueB = np.mean(data[350:400], axis=0)  # should be [0, 0, 1]
    valueC = np.mean(data[550:600], axis=0)  # should be [1, 0, 0]

    assert valueA[0] < 0.2
    assert valueA[1] > 0.75
    assert valueA[2] < 0.2

    assert valueB[0] < 0.2
    assert valueB[1] < 0.2
    assert valueB[2] > 0.75

    assert valueC[0] > 0.75
    assert valueC[1] < 0.2
    assert valueC[2] < 0.2
예제 #27
0
    stim_cue >> pfcCUE

    motorClean = spa.WTAAssocMem(threshold=0.1,
                                 input_vocab=vocab,
                                 output_vocab=vocab,
                                 mapping=movesMap,
                                 function=lambda x: 1 if x > 0.1 else 0,
                                 label='motorClean')

    nengo.Connection(mdRULE.output,
                     pfcRULEmemo.input,
                     transform=5,
                     synapse=0.05)

    with spa.ActionSelection() as act_sel:
        spa.ifmax(spa.dot(pfcRULEasso, s.AUD), s.AUD >> mdRULE)
        spa.ifmax(spa.dot(pfcRULEasso, s.VIS), s.VIS >> mdRULE)
        spa.ifmax(spa.dot(stim_target, s.AUD * s.RIGHT + s.VIS * s.LEFT),
                  stim_target * ~pfcRULEmemo >> motorClean)
        spa.ifmax(spa.dot(stim_target, s.VIS * s.RIGHT + s.AUD * s.LEFT),
                  stim_target * ~pfcRULEmemo >> motorClean)

    # learning
    mdCUE_ens = list(mdCUE.all_ensembles)
    mdRULE_ens = list(mdRULE.all_ensembles)
    ppc_ens = list(ppc.all_ensembles)
    errorPPC_ens = list(errorPPC.all_ensembles)

    -ppc >> errorPPC
    mdCUE >> errorPPC
    for i, ppc_e in enumerate(ppc_ens):
예제 #28
0
    # state representation of input
    target  = spa.State(D, label='Target')
    cue     = spa.State(D, label='Cue')
    motor = spa.State(dims, feedback = 1, label = 'motor')


    # Connect to repreesentation of inputs
    target_in   >> target
    cue_in      >> cue

    # pfc
    pfc  = spa.State(dims, feedback = pfcFB,  label = 'pfc')
    vocab = pfc.vocab
    vocab.add('RULE1', vocab.parse('CUE_A+CUE_C'))
    vocab.add('RULE2', vocab.parse('CUE_B+CUE_D'))
    vocab.add('CONTEXT1', vocab.parse('CUE_A+CUE_B'))
    vocab.add('CONTEXT2', vocab.parse('CUE_C+CUE_D'))

    cue >> pfc

    with spa.ActionSelection():
        spa.ifmax( 0.5, s.X*0 >> pfc)
        spa.ifmax( 0.8*spa.dot(pfc, s.RULE1) +  0.8*spa.dot(target, s.LIGHT*(s.RIGHT+s.LEFT)),
                           target*~s.LIGHT >> motor )
        spa.ifmax( 0.8*spa.dot(pfc, s.RULE2) +  0.8*spa.dot(target, s.SOUND*(s.RIGHT+s.LEFT)),
                           target*~s.SOUND >> motor )

    md = spa.WTAAssocMem(threshold = 0.5, input_vocab = pfc.vocab, mapping = map, label = 'MD')
    pfc >> md
    md*0.8 >> pfc
    vocab2.add( 'CONTEXT1', vocab2.parse('CUE_A+CUE_B') )
    vocab2.add( 'CONTEXT2', vocab2.parse('CUE_C+CUE_D') )

    vocab = pfc.vocab
    vocab.add( 'RULE1', vocab.parse('CUE_A+CUE_C') )
    vocab.add( 'RULE2', vocab.parse('CUE_B+CUE_D') )


    # # ppc has cue history (perfect memory), assume direct sensory inputs
    # stim_cues >> ppc
    # # pfc gets inputs from cue responding "population"
    cues >> pfc
    stim_cues >> ppc

    with spa.ActionSelection():
          spa.ifmax( 0.5, s.X*0 >> pfc)
          spa.ifmax( 0.8*spa.dot(pfc, s.RULE1) +  0.8*spa.dot(targets, s.VIS*(s.RIGHT+s.LEFT)),
                       targets*~s.VIS >> motor )
          spa.ifmax( 0.8*spa.dot(pfc, s.RULE2) +  0.8*spa.dot(targets, s.AUD*(s.RIGHT+s.LEFT)),
                       targets*~s.AUD >> motor )

    # md = spa.WTAAssocMem(threshold = 0.5,mapping=map, input_vocab = pfc.vocab, label = 'MD')

    # learn pfc --> md computation
    md >> error
    -pfc >> error

    md_ens = list(md.all_ensembles)
    # pfc_ens = list(pfc.all_ensembles)
    error_ens = list(error.all_ensembles)
    error2_ens = list(error2.all_ensembles)
예제 #30
0
    stim_cue >> ppc
    stim_cue >> pfcCUE

    motorClean = spa.WTAAssocMem(threshold=0.1,
                                 input_vocab=vocab,
                                 mapping=movesMap,
                                 function=lambda x: x > 0,
                                 label='motorClean')

    nengo.Connection(mdRULE.output,
                     pfcRULEmemo.input,
                     transform=5,
                     synapse=0.05)

    with spa.ActionSelection():
        spa.ifmax(0.3 * spa.dot(s.NOTHING, pfcCUE), s.NOTHING >> pfcRULEmemo)

        spa.ifmax(spa.dot(s.CUE_A + s.CUE_C, pfcCUE), s.VIS >> pfcRULE)
        spa.ifmax(spa.dot(s.CUE_B + s.CUE_D, pfcCUE), s.AUD >> pfcRULE)
        spa.ifmax(spa.dot(pfcRULEasso, s.AUD), s.AUD >> mdRULE)
        spa.ifmax(spa.dot(pfcRULEasso, s.VIS), s.VIS >> mdRULE)
        spa.ifmax(spa.dot(stim_target, s.AUD * s.RIGHT + s.VIS * s.LEFT),
                  stim_target * ~pfcRULEmemo >> motorClean)
        spa.ifmax(spa.dot(stim_target, s.VIS * s.RIGHT + s.AUD * s.LEFT),
                  stim_target * ~pfcRULEmemo >> motorClean)

    # learning

    mdCUE_ens = list(mdCUE.all_ensembles)
    mdRULE_ens = list(mdRULE.all_ensembles)
    ppc_ens = list(ppc.all_ensembles)