def model_out_to_responses(recall_vocab, t, model_out, pos_out, proto): similarity = spa.similarity(model_out, recall_vocab) responses = [] positions = np.arange(proto.n_items) last_recall = -1 if proto.serial: for i in positions: recall_phase = t > proto.pres_phase_duration s = recall_phase & (pos_out[:, i] > 0.8) if np.any(s): recall_for_pos = similarity[s][-1, :] else: recall_for_pos = np.array([0.]) if np.any(recall_for_pos > 0.6): recalled = float(np.argmax(recall_for_pos)) if len(responses) == 0 or recalled != last_recall: responses.append(recalled) last_recall = recalled else: responses.append(np.nan) else: responses.append(np.nan) else: above_threshold = similarity[np.max(similarity, axis=1) > 0.8, :] for x in np.argmax(above_threshold, axis=1): if x not in responses: responses.append(float(x)) responses = responses + (proto.n_items - len(responses)) * [np.nan] return responses
def goal_in_func(t, x): """Sets the goals for both the pegs and the disk""" disks = spa.similarity(x, toh.disks) pegs = toh.goal_peg_data print('target_peg = {}'.format(toh.target_peg)) if np.max(pegs) > threshold and np.max(disks) > threshold: toh.goal = disks.index(np.max(disks)) toh.target_peg = 'ABC'[pegs.index(np.max(pegs))]
def model_out_to_timings(recall_vocab, t, model_out, proto): recall_output = spa.similarity(model_out, recall_vocab) > 0.8 recall_times = [] for x in recall_output.T: nz = np.nonzero(x)[0] if len(nz) > 0: recall_times.append(t[nz[0]] - proto.duration) return recall_times + (proto.n_items - len(recall_times)) * [np.nan]
def pr_sent( sim_data ): """ print the most likely sentence associated with two images """ probe = nn.probes[ "s_ON_result" ] simil = spa.similarity( sim_data[ probe ][ -1 ], vocabs.vsent ) idx = simil.argsort() print( "most likely sentence {:20s}".format( vocabs.vsent.keys()[ idx[ -1 ] ] ) ) print( "second possibility is {:20s}".format( vocabs.vsent.keys()[ idx[ -2 ] ] ) )
def find_ambigue( sim_data, comp, amb ): """ find the best match for WITH, given the complement comp and the two ambigue categories amb """ s_comp = sim_data[ nn.get_probe( comp + '_where', net='spa' ) ][ -1 ] s_amb0 = sim_data[ nn.get_probe( amb[ 0 ] + '_where', net='spa' ) ][ -1 ] s_amb1 = sim_data[ nn.get_probe( amb[ 1 ] + '_where', net='spa' ) ][ -1 ] if method == 'SIMILARITY': r = spa.similarity( s_comp, [ s_amb0, s_amb1 ], normalize=True ) return r.argmax() if method == 'CLOSENESS': return closest( s_comp, s_amb0, s_amb1 )
def move_func(t, x): disks = spa.similarity(x, toh.disks) disk = np.argmax(disks) pegs = toh.move_peg_data peg = 'ABC'[np.argmax(pegs)] # 'ABC' is a char array if (np.max(pegs) > threshold and np.max(disks) > threshold): if peg != toh.peg(disk): if toh.can_move(disk, peg): toh.move(disk, peg) print('Moving D{} to {}'.format(disk, peg)) else: print('Cannot move D{} to {}'.format(disk, peg))
def test_casting_vocabs(d1, d2, method, lookup, Simulator, plt, rng): v1 = spa.Vocabulary(d1, pointer_gen=rng) v1.populate("A") v2 = spa.Vocabulary(d2, pointer_gen=rng) v2.populate("A") with spa.Network() as model: a = spa.State(vocab=v1) b = spa.State(vocab=v2) spa.sym.A >> a eval("spa.%s" % method) >> b p = nengo.Probe(b.output, synapse=0.03) with Simulator(model) as sim: sim.run(0.5) t = sim.trange() > 0.2 v = locals()[lookup].parse("A").v plt.plot(sim.trange(), spa.similarity(sim.data[p], v)) plt.xlabel("t [s]") plt.ylabel("Similarity") assert np.mean(spa.similarity(sim.data[p][t], v)) > 0.8
with model: model.config[nengo.Probe].synapse = nengo.Lowpass(0.03) p_color_in = nengo.Probe(color_in.output) p_shape_in = nengo.Probe(shape_in.output) p_cue = nengo.Probe(cue.output) p_conv = nengo.Probe(conv.output) p_out = nengo.Probe(out.output) with nengo.Simulator(model) as sim: sim.run(2.5) plt.figure(figsize=(10, 10)) vocab = model.vocabs[dimensions] plt.subplot(5, 1, 1) plt.plot(sim.trange(), spa.similarity(sim.data[p_color_in], vocab)) plt.legend(vocab.keys(), fontsize='x-small') plt.ylabel("question") plt.subplot(5, 1, 2) plt.plot(sim.trange(), spa.similarity(sim.data[p_shape_in], vocab)) plt.legend(vocab.keys(), fontsize='x-small') plt.ylabel("corespond") plt.subplot(5, 1, 3) plt.plot(sim.trange(), spa.similarity(sim.data[p_cue], vocab)) plt.legend(vocab.keys(), fontsize='x-small') plt.ylabel("cue") plt.subplot(5, 1, 4) for pointer in ['WHEN * JANUARY2016', 'WHO * BNPPARIBAS', 'WHAT * EXCELLENCEPROGRAM', \
def __call__(self, t, x): ####### # Input ####### focus_in = x[:self.D] goal_peg = x[self.D:2 * self.D] goal_in = x[2 * self.D:3 * self.D] # motor cortex input move_peg = x[3 * self.D:4 * self.D] move = x[4 * self.D:5 * self.D] ############ # Processing ############ self.focus = np.argmax(spa.similarity(focus_in, self.disks)) self.goal_peg_data = spa.similarity(goal_peg, self.disks) ## disks = spa.similarity(goal_in, self.disks) pegs = self.goal_peg_data if np.max(pegs) > threshold and np.max(disks) > threshold: self.goal = np.argmax(disks) self.target_peg = 'ABC'[np.argmax(pegs)] self.move_peg_data = spa.similarity(move_peg, self.pegs) ## ## disks = spa.similarity(move, self.disks) disk = np.argmax(disks) pegs = self.move_peg_data peg = 'ABC'[np.argmax(pegs)] # 'ABC' is a char array if (np.max(pegs) > threshold and np.max(disks) > threshold): if peg != self.peg(disk): if self.can_move(disk, peg): self.move(disk, peg) print('Moving D{} to {}'.format(disk, peg)) else: print('Cannot move D{} to {}'.format(disk, peg)) ## ######## # Output ######## # define output array out = [0] * 7 * self.D out[:self.D] = self.disks[self.largest].v # largest out[self.D:2 * self.D] = self.disks[self.focus].v # focus_out out[2 * self.D:3 * self.D] = self.vocab.parse(self.peg( self.goal)).v # goal_peg_out out[3 * self.D:4 * self.D] = self.vocab.parse( self.target_peg).v # target_peg # visual cortex output out[4 * self.D:5 * self.D] = self.disks[self.goal].v # goal_out out[5 * self.D:6 * self.D] = self.vocab.parse( self.target[self.goal]).v # goal_peg_final out[6 * self.D:7 * self. D] = self.zero if self.focus >= self.disk_count else self.vocab.parse( self.peg(self.focus)).v # focus_peg out_viz = [0] * (3 + len(self.location)) out_viz[0] = self.focus # focus_viz out_viz[1] = self.goal # goal_viz out_viz[2] = self.location_dict[self.target_peg] # peg_viz for idx, loc in enumerate(self.location): out_viz[3 + idx] = self.location_dict[loc] # pos_viz out += out_viz return out
stim = spa.Transcode('Hello', output_vocab=dim) state = spa.State(dim) nengo.Connection(stim.output, state.input) probe = nengo.Probe(state.output, synapse=0.01) sim = nengo.Simulator(model) sim.run(0.5) # plots raw vector dimensions plt.plot(sim.trange(), sim.data[probe]) plt.xlabel('time (s)') plt.show() # plots vocab similarity, with legend of vocab keys plt.plot(sim.trange(), spa.similarity(sim.data[probe], state.vocab)) plt.xlabel('time (s)') plt.ylabel('similarity') plt.legend(state.vocab.keys()) plt.show() ########### # Model 2 # ########### # illustrates providing input via Transcode with spa.Network() as model: stim = spa.Transcode('RED*CIRCLE+BLUE*SQUARE', output_vocab=dim) query = spa.Transcode(lambda t: 'CIRCLE' if t < 0.25 else 'SQUARE', output_vocab=dim) state = spa.State(dim)
plt.ylabel("Similarity") plt.legend(loc='best') plt.show() """ d1 = 16 d2 = 32 vocab1 = spa.Vocabulary(d1) vocab1.populate('A') vocab2 = spa.Vocabulary(d2) vocab2.populate('A') with spa.Network() as model: state1 = spa.State(vocab=vocab1) state2 = spa.State(vocab=vocab2) spa.sym.A >> state1 spa.translate(state1, vocab2) >> state2 p = nengo.Probe(state2.output, synapse=0.03) with nengo.Simulator(model) as sim: sim.run(0.5) #plt.plot(sim.trange(), spa.similarity(sim.data[p], vocab1), label='vocab1') plt.plot(sim.trange(), spa.similarity(sim.data[p], vocab2), label='vocab2') plt.xlabel("Time [s]") plt.ylabel("Similarity") plt.legend(loc='best') plt.show()
p = nengo.Probe(result.output, synapse=0.01) with nengo.Simulator(model) as sim: sim.run(0.5) plt.plot(sim.trange(), spa.similarity(sim.data[p], result.vocab)) plt.xlabel("Time [s]") plt.ylabel("Similarity") plt.legend(result.vocab, loc="best") plt.show() """ with spa.Network() as model: stimulus = spa.Transcode('Hello', output_vocab=d) state = spa.State(vocab=d) nengo.Connection(stimulus.output, state.input) p = nengo.Probe(state.output, synapse=0.01) with nengo.Simulator(model) as sim: sim.run(0.5) plt.plot(sim.trange(), spa.similarity(sim.data[p], state.vocab)) plt.xlabel("Time [s]") plt.ylabel("Similarity") plt.legend(state.vocab, loc="best") plt.show() """"""
def move_peg_func(t, x): toh.move_peg_data = spa.similarity(x, toh.pegs)
def goal_peg_func(t, x): toh.goal_peg_data = spa.similarity(x, toh.disks)
def focus_in_func(t, x): toh.focus = np.argmax(spa.similarity(x, toh.disks)) print(toh)