def test_run(Simulator, algebra, seed): rng = np.random.RandomState(seed) vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate('A; B') with spa.Network(seed=seed) as model: model.bind = spa.Bind(vocab) def inputA(t): if 0 <= t < 0.1: return 'A' else: return 'B' model.input = spa.Transcode(inputA, output_vocab=vocab) model.input >> model.bind.input_left spa.sym.A >> model.bind.input_right with model: p = nengo.Probe(model.bind.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.2) error = rmse(vocab.parse("(B*A).normalized()").v, sim.data[p][-1]) assert error < 0.15 error = rmse(vocab.parse("(A*A).normalized()").v, sim.data[p][100]) assert error < 0.15
def test_routing(Simulator, seed, plt): model = spa.Network(seed=seed) model.config[spa.State].vocab = 3 model.config[spa.State].subdimensions = 3 with model: model.ctrl = spa.State(16, subdimensions=16, label='ctrl') def input_func(t): if t < 0.2: return 'A' elif t < 0.4: return 'B' else: return 'C' model.input = spa.Transcode(input_func, output_vocab=16) model.buff1 = spa.State(label='buff1') model.buff2 = spa.State(label='buff2') model.buff3 = spa.State(label='buff3') node1 = nengo.Node([0, 1, 0]) node2 = nengo.Node([0, 0, 1]) nengo.Connection(node1, model.buff1.input) nengo.Connection(node2, model.buff2.input) model.input >> model.ctrl with spa.ActionSelection(): spa.ifmax(spa.dot(model.ctrl, spa.sym.A), model.buff1 >> model.buff3) spa.ifmax(spa.dot(model.ctrl, spa.sym.B), model.buff2 >> model.buff3) spa.ifmax(spa.dot(model.ctrl, spa.sym.C), model.buff1 * model.buff2 >> model.buff3) buff3_probe = nengo.Probe(model.buff3.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.6) data = sim.data[buff3_probe] plt.plot(sim.trange(), data) valueA = np.mean(data[150:200], axis=0) # should be [0, 1, 0] valueB = np.mean(data[350:400], axis=0) # should be [0, 0, 1] valueC = np.mean(data[550:600], axis=0) # should be [1, 0, 0] assert valueA[0] < 0.2 assert valueA[1] > 0.7 assert valueA[2] < 0.2 assert valueB[0] < 0.2 assert valueB[1] < 0.2 assert valueB[2] > 0.7 assert valueC[0] > 0.7 assert valueC[1] < 0.2 assert valueC[2] < 0.2
def test_memory_run(Simulator, seed, plt): with spa.Network(seed=seed) as model: memory = spa.State(vocab=32, feedback=1.0, feedback_synapse=0.01) def state_input(t): if 0 <= t < 0.05: return "A" else: return "0" state_input = spa.Transcode(state_input, output_vocab=32) state_input >> memory with model: p = nengo.Probe(memory.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.5) t = sim.trange() similarity = np.dot(sim.data[p], memory.vocab.vectors.T) plt.plot(t, similarity) plt.ylabel("Similarity to 'A'") plt.xlabel("Time (s)") # value should peak above 1.0, then decay down to near 1.0 assert np.mean(similarity[(t > 0.05) & (t < 0.1)]) > 1.0 assert np.mean(similarity[(t > 0.2) & (t < 0.3)]) > 0.7 assert np.mean(similarity[t > 0.49]) > 0.5
def test_memory_run_decay(Simulator, plt, seed): with spa.Network(seed=seed) as model: memory = spa.State(vocab=32, feedback=(1.0 - 0.01 / 0.05), feedback_synapse=0.01) def state_input(t): if 0 <= t < 0.05: return "A" else: return "0" state_input = spa.Transcode(state_input, output_vocab=32) state_input >> memory with model: p = nengo.Probe(memory.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.3) data = np.dot(sim.data[p], memory.vocab.vectors.T) t = sim.trange() plt.plot(t, data) assert data[t == 0.05, 0] > 1.0 assert data[t == 0.299, 0] < 0.4
def test_encode_with_input(Simulator, seed): with spa.Network(seed=seed) as model: buffer = spa.State(vocab=16) def stimulus(t, x): return x[0] * buffer.vocab.parse('A') ctrl = nengo.Node(lambda t: t > 0.2) encode = spa.Transcode(stimulus, output_vocab=16, size_in=1) nengo.Connection(ctrl, encode.input) encode >> buffer p = nengo.Probe(buffer.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.4) vocab = buffer.vocab assert_sp_close(sim.trange(), sim.data[p], vocab.parse('0'), duration=0.2) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('A'), skip=.38, duration=0.02)
def test_no_feedback_run(Simulator, plt, seed): with spa.Network(seed=seed) as model: state = spa.State(vocab=32, feedback=0.0) def state_input(t): if 0 <= t < 0.3: return "A" elif 0.2 <= t < 0.6: return "B" else: return "0" state_input = spa.Transcode(state_input, output_vocab=32) state_input >> state with model: p = nengo.Probe(state.output, synapse=0.05) with Simulator(model) as sim: sim.run(0.8) data = np.dot(sim.data[p], state.vocab.vectors.T) plt.plot(sim.trange(), data) assert data[299, 0] > 0.9 assert data[299, 1] < 0.2 assert data[599, 0] < 0.2 assert data[599, 1] > 0.9 assert data[799, 0] < 0.2 assert data[799, 1] < 0.2
def test_binary_operation_on_modules_with_fixed_pointer( Simulator, algebra, op, order, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate("A; B") b = SemanticPointer(vocab["B"].v) # noqa: F841 with spa.Network() as model: a = spa.Transcode("A", output_vocab=vocab) # noqa: F841 if order == "AB": x = eval("a" + op + "b") elif order == "BA": x = eval("b" + op + "a") else: raise ValueError("Invalid order argument.") p = nengo.Probe(x.construct(), synapse=0.03) with Simulator(model) as sim: sim.run(0.3) assert_sp_close( sim.trange(), sim.data[p], vocab.parse(order[0] + op + order[1]), skip=0.2, atol=0.3, )
def test_run(Simulator, algebra, seed): rng = np.random.RandomState(seed) vocab = spa.Vocabulary(32, pointer_gen=rng, algebra=algebra) vocab.populate('A; B') with spa.Network(seed=seed, vocabs=VocabularyMap([vocab])) as model: model.superpos = spa.Superposition(2, vocab=32) def inputA(t): if 0 <= t < 0.1: return 'A' else: return 'B' model.input = spa.Transcode(inputA, output_vocab=vocab) model.input >> model.superpos.inputs[0] spa.sym.A >> model.superpos.inputs[1] with model: p = nengo.Probe(model.superpos.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.2) error = rmse(vocab.parse("(B+A).normalized()").v, sim.data[p][-1]) assert error < 0.1 error = rmse(vocab.parse("(A+A).normalized()").v, sim.data[p][100]) assert error < 0.2
def test_nondefault_routing(Simulator, seed): m = spa.Network(seed=seed) m.config[spa.State].vocab = 3 m.config[spa.State].subdimensions = 3 with m: m.ctrl = spa.State(16, subdimensions=16, label="ctrl") def input_func(t): if t < 0.2: return "A" elif t < 0.4: return "B" else: return "C" m.input = spa.Transcode(input_func, output_vocab=16) m.buff1 = spa.State(label="buff1") m.buff2 = spa.State(label="buff2") m.cmp = spa.Compare(3) node1 = nengo.Node([0, 1, 0]) node2 = nengo.Node([0, 0, 1]) nengo.Connection(node1, m.buff1.input) nengo.Connection(node2, m.buff2.input) m.input >> m.ctrl with spa.ActionSelection(): spa.ifmax( spa.dot(m.ctrl, spa.sym.A), m.buff1 >> m.cmp.input_a, m.buff1 >> m.cmp.input_b, ) spa.ifmax( spa.dot(m.ctrl, spa.sym.B), m.buff1 >> m.cmp.input_a, m.buff2 >> m.cmp.input_b, ) spa.ifmax( spa.dot(m.ctrl, spa.sym.C), m.buff2 >> m.cmp.input_a, m.buff2 >> m.cmp.input_b, ) compare_probe = nengo.Probe(m.cmp.output, synapse=0.03) with Simulator(m) as sim: sim.run(0.6) similarity = sim.data[compare_probe] valueA = np.mean(similarity[150:200], axis=0) # should be [1] valueB = np.mean(similarity[350:400], axis=0) # should be [0] valueC = np.mean(similarity[550:600], axis=0) # should be [1] assert valueA > 0.6 assert valueB < 0.3 assert valueC > 0.6
def test_dot(Simulator, rng): vocab = spa.Vocabulary(16, pointer_gen=rng) vocab.populate("A; B") with spa.Network() as model: a = spa.Transcode("A", output_vocab=vocab) b = spa.Transcode(lambda t: "A" if t <= 0.5 else "B", output_vocab=vocab) x = spa.dot(a, b) p = nengo.Probe(x.construct(), synapse=0.03) with nengo.Simulator(model) as sim: sim.run(1.0) t = sim.trange() assert_allclose(sim.data[p][(0.3 < t) & (t <= 0.5)], 1.0, atol=0.2) assert np.all(sim.data[p][0.8 < t] < 0.2)
def test_output_types(Simulator, value): with spa.Network() as model: stim = spa.Transcode(value, output_vocab=32) state = spa.State(32) stim >> state with Simulator(model) as sim: sim.run(0.01)
def test_transformed_and_network(Simulator, algebra, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate("A; B.unitary()") with spa.Network() as model: a = spa.Transcode("A", output_vocab=vocab) b = spa.Transcode("B", output_vocab=vocab) x = (a * PointerSymbol("~B")) * b p = nengo.Probe(x.construct(), synapse=0.3) with nengo.Simulator(model) as sim: sim.run(0.3) assert_sp_close(sim.trange(), sim.data[p], vocab.parse("A * ~B * B"), skip=0.2, normalized=True)
def test_binary_operation_on_modules(Simulator, algebra, op, suffix, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate("A; B") with spa.Network() as model: a = spa.Transcode("A", output_vocab=vocab) # noqa: F841 b = spa.Transcode("B", output_vocab=vocab) # noqa: F841 x = eval("a" + suffix + op + "b" + suffix) p = nengo.Probe(x.construct(), synapse=0.03) with Simulator(model) as sim: sim.run(0.3) assert_sp_close(sim.trange(), sim.data[p], vocab.parse("A" + op + "B"), skip=0.2, atol=0.3)
def test_dot_matmul(Simulator, rng): vocab = spa.Vocabulary(16, pointer_gen=rng) vocab.populate('A; B') with spa.Network() as model: a = spa.Transcode('A', output_vocab=vocab) # noqa: F841 b = spa.Transcode( # noqa: F841 lambda t: 'A' if t <= 0.5 else 'B', output_vocab=vocab) x = eval('a @ b') p = nengo.Probe(x.construct(), synapse=0.03) with nengo.Simulator(model) as sim: sim.run(1.) t = sim.trange() assert_allclose(sim.data[p][(0.3 < t) & (t <= 0.5)], 1., atol=.2) assert np.all(sim.data[p][0.8 < t] < 0.2)
def test_eval(Simulator): with spa.Network() as net: a = spa.Transcode(input_vocab=16) 0.5 * spa.sym.A >> a p = nengo.Probe(a.output) with Simulator(net) as sim: sim.run(1.0) assert np.allclose(sim.data[p][-1], net.vocabs[16].parse("0.5*A").v)
def test_transformed_and_transformed(Simulator, algebra, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate('A; B.unitary(); C') with spa.Network() as model: a = spa.Transcode('A', output_vocab=vocab) c = spa.Transcode('C', output_vocab=vocab) x = (PointerSymbol('B') * a) * (PointerSymbol('~B') * c) p = nengo.Probe(x.construct(), synapse=0.3) with nengo.Simulator(model) as sim: sim.run(0.3) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('(B * A) * (~B * C)'), skip=0.2, normalized=True, atol=0.3)
def create_model(protocol, vocab, seed, n_neurons=5000): d = vocab.dimensions with nengo.Network(seed=seed) as model: model.config[nengo.Ensemble].max_rates = nengo.dists.Uniform(10, 20) model.config[nengo.Ensemble].intercepts = nengo.dists.Uniform(0.1, 1.) pre_person = nengo.Ensemble(n_neurons, d) pre_landmark = nengo.Ensemble(n_neurons, d) post = nengo.Ensemble(n_neurons, d, radius=1., noise=nengo.processes.FilteredNoise( synapse=nengo.Lowpass(0.1), dist=nengo.dists.Gaussian(0.01, .05))) nengo.Connection(pre_person, post) nengo.Connection(pre_landmark, post) err = nengo.Node(size_in=d + 2) nengo.Connection(pre_person, err[2:]) nengo.Connection(pre_landmark, err[2:]) nengo.Connection(nengo.Node(protocol.learn), err[0]) nengo.Connection(nengo.Node(1.), err[1]) c_person = nengo.Connection(pre_person, post, learning_rule_type=AML(d, 0.9), function=lambda x: np.zeros(d)) nengo.Connection(err, c_person.learning_rule) c_landmark = nengo.Connection(pre_landmark, post, learning_rule_type=AML(d, 0.9), function=lambda x: np.zeros(d)) nengo.Connection(err, c_landmark.learning_rule) stim_p = spa.Transcode(lambda t: protocol.stimulus('P', t), output_vocab=vocab) nengo.Connection(stim_p.output, pre_person) stim_l = spa.Transcode(lambda t: protocol.stimulus('L', t), output_vocab=vocab) nengo.Connection(stim_l.output, pre_landmark) p_post_spikes = nengo.Probe(post.neurons, 'spikes') return model, p_post_spikes
def test_non_default_input_and_output(Simulator, rng): vocab = spa.Vocabulary(32, pointer_gen=rng) vocab.populate("A; B") with spa.Network() as model: a = spa.Transcode("A", output_vocab=vocab) b = spa.Transcode("B", output_vocab=vocab) bind = spa.Bind(vocab) a.output >> bind.input_left b.output >> bind.input_right p = nengo.Probe(bind.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.5) assert_sp_close(sim.trange(), sim.data[p], vocab.parse("A*B"), skip=0.3, atol=0.3)
def test_complex_rule(Simulator, algebra, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate('A; B; C; D') with spa.Network() as model: a = spa.Transcode('A', output_vocab=vocab) b = spa.Transcode('B', output_vocab=vocab) x = (0.5 * PointerSymbol('C') * a + 0.5 * PointerSymbol('D')) * (0.5 * b + a * 0.5) p = nengo.Probe(x.construct(), synapse=0.3) with nengo.Simulator(model) as sim: sim.run(0.3) assert_sp_close( sim.trange(), sim.data[p], vocab.parse('(0.5 * C * A + 0.5 * D) * (0.5 * B + 0.5 * A)'), skip=0.2, normalized=True)
def test_fixed(Simulator, seed): with spa.Network(seed=seed) as model: model.buffer1 = spa.State(vocab=16) model.buffer2 = spa.State(vocab=8, subdimensions=8) model.input1 = spa.Transcode('A', output_vocab=16) model.input2 = spa.Transcode('B', output_vocab=8) model.input1 >> model.buffer1 model.input2 >> model.buffer2 p1 = nengo.Probe(model.buffer1.output, synapse=0.03) p2 = nengo.Probe(model.buffer2.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.1) assert_sp_close(sim.trange(), sim.data[p1], model.buffer1.vocab.parse('A'), skip=0.08) assert_sp_close(sim.trange(), sim.data[p2], model.buffer2.vocab.parse('B'), skip=0.08)
def test_unary_operation_on_module(Simulator, algebra, op, suffix, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate('A') with spa.Network() as model: stimulus = spa.Transcode('A', output_vocab=vocab) # noqa: F841 x = eval(op + 'stimulus' + suffix) p = nengo.Probe(x.construct(), synapse=0.03) with Simulator(model) as sim: sim.run(0.3) assert_sp_close(sim.trange(), sim.data[p], vocab.parse(op + 'A'), skip=0.2)
def test_action_selection(Simulator, rng): vocab = spa.Vocabulary(64) vocab.populate("A; B; C; D; E; F") with spa.Network() as model: state = spa.Transcode(lambda t: "ABCDEF"[min(5, int(t / 0.5))], output_vocab=vocab) scalar = spa.Scalar() pointer = spa.State(vocab) with ActionSelection(): spa.ifmax(spa.dot(state, PointerSymbol("A")), 0.5 >> scalar) spa.ifmax(spa.dot(state, PointerSymbol("B")), PointerSymbol("B") >> pointer) spa.ifmax(spa.dot(state, PointerSymbol("C")), state >> pointer) d_utility = spa.ifmax(0, PointerSymbol("D") >> pointer) spa.ifmax( spa.dot(state, PointerSymbol("E")), 0.25 >> scalar, PointerSymbol("E") >> pointer, ) nengo.Connection(nengo.Node(lambda t: 1.5 < t <= 2.0), d_utility) p_scalar = nengo.Probe(scalar.output, synapse=0.03) p_pointer = nengo.Probe(pointer.output, synapse=0.03) with Simulator(model) as sim: sim.run(3.0) t = sim.trange() assert_allclose(sim.data[p_scalar][(0.3 < t) & (t <= 0.5)], 0.5, atol=0.2) assert_sp_close(sim.trange(), sim.data[p_pointer], vocab["B"], skip=0.8, duration=0.2) assert_sp_close(sim.trange(), sim.data[p_pointer], vocab["C"], skip=1.3, duration=0.2) assert_sp_close(sim.trange(), sim.data[p_pointer], vocab["D"], skip=1.8, duration=0.2) assert_allclose(sim.data[p_scalar][(2.3 < t) & (t <= 2.5)], 0.25, atol=0.2) assert_sp_close(sim.trange(), sim.data[p_pointer], vocab["E"], skip=2.3, duration=0.2)
def test_assignment_of_dynamic_pointer(Simulator, rng): vocab = spa.Vocabulary(16, pointer_gen=rng) vocab.populate("A") with spa.Network() as model: source = spa.Transcode("A", output_vocab=vocab) sink = spa.State(vocab) source >> sink p = nengo.Probe(sink.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.5) assert_sp_close(sim.trange(), sim.data[p], vocab["A"], skip=0.3)
def test_dynamic_translate(Simulator, rng): v1 = spa.Vocabulary(64, pointer_gen=rng) v1.populate("A; B") v2 = spa.Vocabulary(64, pointer_gen=rng) v2.populate("A; B") with spa.Network() as model: source = spa.Transcode("A", output_vocab=v1) x = spa.translate(source, v2) p = nengo.Probe(x.construct(), synapse=0.03) with nengo.Simulator(model) as sim: sim.run(0.5) assert_sp_close(sim.trange(), sim.data[p], v2["A"], skip=0.3, atol=0.2)
def test_am_wta(Simulator, plt, seed, rng): """Test the winner-take-all ability of the associative memory.""" d = 64 vocab = Vocabulary(d, pointer_gen=rng) vocab.populate("A; B; C; D") def input_func(t): if t < 0.2: return "A + 0.8 * B" elif t < 0.3: return "0" else: return "0.8 * A + B" with spa.Network("model", seed=seed) as m: m.am = WTAAssocMem( threshold=0.3, input_vocab=vocab, mapping=vocab.keys(), function=filtered_step_fn, ) m.stimulus = spa.Transcode(input_func, output_vocab=vocab) m.stimulus >> m.am in_p = nengo.Probe(m.am.input) out_p = nengo.Probe(m.am.output, synapse=0.03) with Simulator(m) as sim: sim.run(0.5) t = sim.trange() more_a = (t > 0.15) & (t < 0.2) more_b = t > 0.45 plt.subplot(2, 1, 1) plt.plot(t, similarity(sim.data[in_p], vocab)) plt.ylabel("Input") plt.ylim(top=1.1) plt.subplot(2, 1, 2) plt.plot(t, similarity(sim.data[out_p], vocab)) plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.9, c="g", lw=2) plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.9, c="g", lw=2) plt.ylabel("Output") assert_sp_close(t, sim.data[out_p], vocab["A"], skip=0.15, duration=0.05) assert_sp_close(t, sim.data[out_p], vocab["B"], skip=0.45, duration=0.05)
def test_transformed_and_pointer_symbol(Simulator, algebra, rng): vocab = spa.Vocabulary(16, pointer_gen=rng, algebra=algebra) vocab.populate('A; B') with spa.Network() as model: a = spa.Transcode('A', output_vocab=vocab) x = (a * PointerSymbol('B')) * PointerSymbol('~B') p = nengo.Probe(x.construct(), synapse=0.3) with nengo.Simulator(model) as sim: sim.run(0.3) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('A * B * ~B'), skip=0.2, normalized=True)
def test_dot_with_fixed_matmul(Simulator, rng): vocab = spa.Vocabulary(16, pointer_gen=rng) vocab.populate("A; B") with spa.Network() as model: a = PointerSymbol("A") # noqa: F841 b = spa.Transcode( # noqa: F841 lambda t: "A" if t <= 0.5 else "B", output_vocab=vocab) x = eval("a @ b") p = nengo.Probe(x.construct(), synapse=0.03) with nengo.Simulator(model) as sim: sim.run(1.0) t = sim.trange() assert_allclose(sim.data[p][(0.3 < t) & (t <= 0.5)], 1.0, atol=0.2) assert np.all(sim.data[p][0.8 < t] < 0.2)
def test_time_varying_encode(Simulator, seed): with spa.Network(seed=seed) as model: model.buffer = spa.State(vocab=16) def stimulus(t): if t < 0.1: return 'A' elif t < 0.2: return model.buffer.vocab.parse('B') elif t < 0.3: return model.buffer.vocab.parse('C').v else: return '0' model.encode = spa.Transcode(stimulus, output_vocab=16) model.encode >> model.buffer p = nengo.Probe(model.buffer.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.3) vocab = model.buffer.vocab assert_sp_close(sim.trange(), sim.data[p], vocab.parse('A'), skip=0.08, duration=0.02) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('B'), skip=0.18, duration=0.02) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('C'), skip=0.28, duration=0.02) assert_sp_close(sim.trange(), sim.data[p], vocab.parse('0'), skip=0.38, duration=0.02)
def test_am_ia(Simulator, plt, seed, rng): """Test the winner-take-all ability of the IA memory.""" d = 64 vocab = Vocabulary(d, pointer_gen=rng) vocab.populate("A; B; C; D") def input_func(t): if t < 0.2: return "A + 0.8 * B" else: return "0.6 * A + B" with spa.Network("model", seed=seed) as m: m.am = IAAssocMem(input_vocab=vocab, mapping=vocab.keys()) m.stimulus = spa.Transcode(input_func, output_vocab=vocab) m.reset = nengo.Node(lambda t: 0.2 < t < 0.4) m.stimulus >> m.am nengo.Connection(m.reset, m.am.input_reset, synapse=0.1) in_p = nengo.Probe(m.am.input) reset_p = nengo.Probe(m.reset) out_p = nengo.Probe(m.am.output, synapse=0.03) with nengo.Simulator(m) as sim: sim.run(0.7) t = sim.trange() more_a = (t > 0.15) & (t < 0.2) more_b = t > 0.65 plt.subplot(2, 1, 1) plt.plot(t, similarity(sim.data[in_p], vocab)) plt.plot(t, sim.data[reset_p], c="k", linestyle="--") plt.ylabel("Input") plt.ylim(top=1.1) plt.subplot(2, 1, 2) plt.plot(t, similarity(sim.data[out_p], vocab)) plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.9, c="tab:blue", lw=2) plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.9, c="tab:orange", lw=2) plt.ylabel("Output") assert_sp_close(t, sim.data[out_p], vocab["A"], skip=0.15, duration=0.05) assert_sp_close(t, sim.data[out_p], vocab["B"], skip=0.65, duration=0.05)
def test_am_threshold(Simulator, plt, seed, rng): """Associative memory thresholding with differing input/output vocabs.""" d = 64 vocab = Vocabulary(d, pointer_gen=rng) vocab.populate("A; B; C; D") d2 = int(d / 2) vocab2 = Vocabulary(d2, pointer_gen=rng) vocab2.populate("A; B; C; D") def input_func(t): return "0.49 * A" if t < 0.1 else "0.8 * B" with spa.Network("model", seed=seed) as m: m.am = ThresholdingAssocMem( threshold=0.5, input_vocab=vocab, output_vocab=vocab2, function=filtered_step_fn, mapping="by-key", ) m.stimulus = spa.Transcode(input_func, output_vocab=vocab) m.stimulus >> m.am in_p = nengo.Probe(m.am.input) out_p = nengo.Probe(m.am.output, synapse=0.03) with Simulator(m) as sim: sim.run(0.3) t = sim.trange() below_th = t < 0.1 above_th = t > 0.25 plt.subplot(2, 1, 1) plt.plot(t, similarity(sim.data[in_p], vocab)) plt.ylabel("Input") plt.subplot(2, 1, 2) plt.plot(t, similarity(sim.data[out_p], vocab2)) plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.9, c="g", lw=2) plt.ylabel("Output") assert np.mean(sim.data[out_p][below_th]) < 0.01 assert_sp_close(t, sim.data[out_p], vocab2["B"], skip=0.25, duration=0.05)