def test_translate(Simulator, seed): with spa.Network(seed=seed) as model: model.buffer1 = spa.State(vocab=16) model.buffer2 = spa.State(vocab=32) spa.sym.A >> model.buffer1 spa.translate(model.buffer1, model.buffer2.vocab, populate=True) >> model.buffer2 with model: p = nengo.Probe(model.buffer2.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.2) match = np.dot(sim.data[p], model.buffer2.vocab.parse('A').v) assert match[199] > 0.8
def test_translate(rng): v1 = spa.Vocabulary(16, pointer_gen=rng) v1.populate('A; B') v2 = spa.Vocabulary(16, pointer_gen=rng) v2.populate('A; B') assert_allclose(spa.translate(PointerSymbol('A', TVocabulary(v1)), v2).evaluate().dot(v2['A']), 1., atol=0.2)
def test_translate(rng): v1 = spa.Vocabulary(16, pointer_gen=rng) v1.populate("A; B") v2 = spa.Vocabulary(16, pointer_gen=rng) v2.populate("A; B") assert_allclose( spa.translate(PointerSymbol("A", TVocabulary(v1)), v2).evaluate().dot(v2["A"]), 1.0, atol=0.2, )
def v_sent(self): """ create the vocabulary of short sentences, combining names with propositions """ self.vsent = spa.Vocabulary(dimensions=n_class) for p in self.pre: wp = spa.translate(self.vpre[p], self.words, populate=False) for o1 in self.obj1: wo1 = self.words[o1] * wp for o2 in self.obj2: s = wo1 + self.words[o2] k = "{}_{}_{}".format(o1, p, o2) self.vsent.add(k, s.v)
def __init__( self, seed=1 ): """ build the nengo network """ self.probes = {} self.nodes = {} self.net = spa.Network( seed=seed ) with self.net: self.node( Vision(), "cnn" ) self.probe( self.nodes[ "cnn" ], "cnn_result" ) c = nengo.Config( nengo.Ensemble ) c[ nengo.Ensemble ].neuron_type = nengo.Direct() with c: o1 = spa.State( vocab=vocabs.words, subdimensions=n_class/2, label="obj1_state" ) o2 = spa.State( vocab=vocabs.words, subdimensions=n_class/2, label="obj2_state" ) s_on = spa.State( vocab=vocabs.vsent, subdimensions=n_class/2, label="sentence_ON" ) nengo.Connection( self.nodes[ "cnn" ][ : n_class ], o1.input ) nengo.Connection( self.nodes[ "cnn" ][ n_class : ], o2.input ) on = spa.translate( vocabs.vpre[ 'ON' ], vocabs.words, populate=False ) spa.translate( o1 * on + o2, vocabs.vsent, populate=False ) >> s_on self.probe( o1.output, "obj1_result" ) self.probe( o2.output, "obj2_result" ) self.probe( s_on.output, "s_ON_result" )
def test_dynamic_translate(Simulator, rng): v1 = spa.Vocabulary(64, pointer_gen=rng) v1.populate("A; B") v2 = spa.Vocabulary(64, pointer_gen=rng) v2.populate("A; B") with spa.Network() as model: source = spa.Transcode("A", output_vocab=v1) x = spa.translate(source, v2) p = nengo.Probe(x.construct(), synapse=0.03) with nengo.Simulator(model) as sim: sim.run(0.5) assert_sp_close(sim.trange(), sim.data[p], v2["A"], skip=0.3, atol=0.2)
PRIM = spa.Bind(neurons_per_dimension=200, vocab=vocab, unbind_right=True) GET_PRIM = spa.WTAAssocMem(threshold=.5, input_vocab=PRIM.vocab, mapping=['GET_V', 'GET_COM', 'GET_ADD'], n_neurons=50, function=lambda x: x > 0) SET_PRIM = spa.WTAAssocMem(threshold=.5, input_vocab=PRIM.vocab, mapping=['SET_COM', 'SET_ADD', 'SET_M'], n_neurons=50, function=lambda x: x > 0) PRIM >> GET_PRIM PRIM >> SET_PRIM input_INSTRUCTIONS >> PRIM.input_left spa.translate(clean_POS, vocab) >> PRIM.input_right SET_exec = spa.Transcode(input_vocab=vocab, output_vocab=vocab) GET_exec = spa.Transcode(input_vocab=vocab, output_vocab=vocab) # GET selector with spa.Network(label='GET selector') as GET_selector: GET_selector.labels = [] with spa.ActionSelection() as GET_selector.AS: GET_selector.labels.append("GET V (FIXATE)") spa.ifmax(GET_selector.labels[-1], BG_bias + FIXATE_detector, V.preconscious >> GW.AMs[V].input, s.D1 >> POS.input, s.D1 * clean_POS >> INCREMENT.input) # GET_selector.labels.append("GET V")
########### # Model 9 # ########### # translating between vocabularies v1 = spa.Vocabulary(dim) v1.populate('A; B') v2 = spa.Vocabulary(dim) v2.populate('A; B') with spa.Network() as model: state_1 = spa.State(v1) state_2 = spa.State(v2) spa.translate(state_2, v1) >> state_1 spa.sym.A >> state_2 probe = nengo.Probe(state_1.output, synapse=0.01) with nengo.Simulator(model) as sim: sim.run(0.5) plt.plot(sim.trange(), v1['A'].dot(sim.data[probe].T)) plt.plot(sim.trange(), v2['A'].dot(sim.data[probe].T)) plt.xlabel("Time [s]") plt.ylabel("Similarity") plt.legend(["v1['A']", "v2['A']"]) plt.show() print(model.vocabs)
plt.ylabel("Similarity") plt.legend(loc='best') plt.show() """ d1 = 16 d2 = 32 vocab1 = spa.Vocabulary(d1) vocab1.populate('A') vocab2 = spa.Vocabulary(d2) vocab2.populate('A') with spa.Network() as model: state1 = spa.State(vocab=vocab1) state2 = spa.State(vocab=vocab2) spa.sym.A >> state1 spa.translate(state1, vocab2) >> state2 p = nengo.Probe(state2.output, synapse=0.03) with nengo.Simulator(model) as sim: sim.run(0.5) #plt.plot(sim.trange(), spa.similarity(sim.data[p], vocab1), label='vocab1') plt.plot(sim.trange(), spa.similarity(sim.data[p], vocab2), label='vocab2') plt.xlabel("Time [s]") plt.ylabel("Similarity") plt.legend(loc='best') plt.show()