def plot_spikes_inf(self): plot = nengo_plot.Time(self.sim.trange()) plot.add('Visual Buffer', spa.similarity(self.sim.data, self.vision), overlays=[(0.06,'Lin Task'),(0.25, 'Test Stimulus')]) plot.add_spikes('', self.sim.data[self.visSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - Context', spa.similarity(self.sim.data, self.context), overlays=[(0.085,'Inferential Evaluation'),(0.14, 'Rule 1'), (0.19, 'Rule 2'),(0.24, 'Rule 3'), (0.29, 'Rule 4'),(0.35, 'Motor Routing')]) plot.add_spikes('', self.sim.data[self.conSpikes], sample_by_variance=64, cluster=True) plot.add('Applications', spa.similarity(self.sim.data, self.apps), overlays=[(0.2, 'Feature 1'), (0.25, 'Feature 2'),(0.3, 'Feature 3'), (0.35, 'Feature 4')]) plot.add_spikes('', self.sim.data[self.appSpikes], sample_by_variance=64, cluster=True) plot.add('Coherence', self.sim.data[self.score], overlays=[(0.35, 'Coherence Value')]) plot.add_spikes('', self.sim.data[self.scoSpikes], sample_by_variance=64) plot.add('Motor Buffer', spa.similarity(self.sim.data, self.motor), overlays=[(0.42,'Positive Judgment')]) plot.add_spikes('', self.sim.data[self.motSpikes], sample_by_variance=64, cluster=True) plot.save('inf_raster.png')
def plot(self): plots = 6 fig = plt.figure(figsize=(20,10)) p1 = fig.add_subplot(plots,1,1) p1.plot(self.sim.trange(),spa.similarity(self.sim.data,self.vision)) p1.set_title('Visual Buffer', fontsize='15') p2 = fig.add_subplot(plots,1,2) p2.plot(self.sim.trange(),spa.similarity(self.sim.data,self.context)) p2.set_title('Task Context', fontsize='15') p3 = fig.add_subplot(plots,1,3) p3.plot(self.sim.trange(),self.sim.data[self.memory]) p3.set_title('Memory', fontsize='15') p4 = fig.add_subplot(plots,1,4) p4.plot(self.sim.trange(), self.sim.data[self.score]) p4.set_title('Coherence Score', fontsize='15') p5 = fig.add_subplot(plots,1,5) p5.plot(self.sim.trange(),spa.similarity(self.sim.data,self.decision)) p5.set_title('Decision Buffer', fontsize='15') p5.set_ylim([-0.5,2]) p6 = fig.add_subplot(plots,1,6) p6.plot(self.sim.trange(), spa.similarity(self.sim.data, self.motor)) p6.set_title('Motor', fontsize='15') p6.legend(self.motor.target.vocab.keys, fontsize=12, loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=14) fig.subplots_adjust(hspace=0.65) fig.savefig(os.path.join('results', str(self.probe))) plt.close(fig)
def plot_spikes_vis(self): plot = nengo_plot.Time(self.sim.trange()) plot.add('Visual Buffer', spa.similarity(self.sim.data, self.vision), overlays=[(0.06, 'Posner Task'), (0.25, 'Test Stimulus')]) plot.add_spikes('', self.sim.data[self.visSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - Context', spa.similarity(self.sim.data, self.context), overlays=[(0.085, 'Perceptual Evaluation'), (0.14, 'Motor Routing')]) plot.add_spikes('', self.sim.data[self.conSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - SP', spa.similarity(self.sim.data, self.memory), overlays=[(0.25, 'Semantic Pointer')]) plot.add_spikes('', self.sim.data[self.memSpikes], sample_by_variance=64, cluster=True) plot.add('Motor Buffer', spa.similarity(self.sim.data, self.motor), overlays=[(0.25, 'Category Label A')]) plot.add_spikes('', self.sim.data[self.motSpikes], sample_by_variance=64, cluster=True) plot.save('vis_raster.png')
def plot_spikes_inf(self): plot = nengo_plot.Time(self.sim.trange()) plot.add('Visual Buffer', spa.similarity(self.sim.data, self.vision), overlays=[(0.06, 'Lin Task'), (0.25, 'Test Stimulus')]) plot.add_spikes('', self.sim.data[self.visSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - Context', spa.similarity(self.sim.data, self.context), overlays=[(0.085, 'Inferential Evaluation'), (0.14, 'Rule 1'), (0.19, 'Rule 2'), (0.24, 'Rule 3'), (0.29, 'Rule 4'), (0.35, 'Motor Routing')]) plot.add_spikes('', self.sim.data[self.conSpikes], sample_by_variance=64, cluster=True) plot.add('Applications', spa.similarity(self.sim.data, self.apps), overlays=[(0.2, 'Feature 1'), (0.25, 'Feature 2'), (0.3, 'Feature 3'), (0.35, 'Feature 4')]) plot.add_spikes('', self.sim.data[self.appSpikes], sample_by_variance=64, cluster=True) plot.add('Coherence', self.sim.data[self.score], overlays=[(0.35, 'Coherence Value')]) plot.add_spikes('', self.sim.data[self.scoSpikes], sample_by_variance=64) plot.add('Motor Buffer', spa.similarity(self.sim.data, self.motor), overlays=[(0.42, 'Positive Judgment')]) plot.add_spikes('', self.sim.data[self.motSpikes], sample_by_variance=64, cluster=True) plot.save('inf_raster.png')
def output_func(t, x): similarity = spa.similarity(x, out_vocab.vectors) if np.any(similarity > 0.5): global g_motion_out g_motion_out = out_vocab.keys[np.argmax(similarity)] else: global g_motion_out g_motion_out = '' global g_simulator_alive g_simulator_alive = time.time()
def plot(self): plots = 6 fig = plt.figure(figsize=(20, 10)) p1 = fig.add_subplot(plots, 1, 1) p1.plot(self.sim.trange(), spa.similarity(self.sim.data, self.vision)) p1.set_title('Visual Buffer', fontsize='15') p2 = fig.add_subplot(plots, 1, 2) p2.plot(self.sim.trange(), spa.similarity(self.sim.data, self.context)) p2.set_title('Task Context', fontsize='15') p3 = fig.add_subplot(plots, 1, 3) p3.plot(self.sim.trange(), self.sim.data[self.memory]) p3.set_title('Memory', fontsize='15') p4 = fig.add_subplot(plots, 1, 4) p4.plot(self.sim.trange(), self.sim.data[self.score]) p4.set_title('Coherence Score', fontsize='15') p5 = fig.add_subplot(plots, 1, 5) p5.plot(self.sim.trange(), spa.similarity(self.sim.data, self.decision)) p5.set_title('Decision Buffer', fontsize='15') p5.set_ylim([-0.5, 2]) p6 = fig.add_subplot(plots, 1, 6) p6.plot(self.sim.trange(), spa.similarity(self.sim.data, self.motor)) p6.set_title('Motor', fontsize='15') p6.legend(self.motor.target.vocab.keys, fontsize=12, loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=14) fig.subplots_adjust(hspace=0.65) fig.savefig(os.path.join('results', str(self.probe))) plt.close(fig)
def plot_spikes_vis(self): plot = nengo_plot.Time(self.sim.trange()) plot.add('Visual Buffer', spa.similarity(self.sim.data, self.vision), overlays=[(0.06,'Posner Task'),(0.25, 'Test Stimulus')]) plot.add_spikes('', self.sim.data[self.visSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - Context', spa.similarity(self.sim.data, self.context), overlays=[(0.085,'Perceptual Evaluation'), (0.14,'Motor Routing')]) plot.add_spikes('', self.sim.data[self.conSpikes], sample_by_variance=64, cluster=True) plot.add('Memory - SP', spa.similarity(self.sim.data, self.memory), overlays=[(0.25,'Semantic Pointer')]) plot.add_spikes('', self.sim.data[self.memSpikes], sample_by_variance=64, cluster=True) plot.add('Motor Buffer', spa.similarity(self.sim.data, self.motor), overlays=[(0.25,'Category Label A')]) plot.add_spikes('', self.sim.data[self.motSpikes], sample_by_variance=64, cluster=True) plot.save('vis_raster.png')
def run_model(self): res = RecognitionResult() rng = np.random.RandomState(self.seed) paths, freqs = analysis.get_syllables( self.n_syllables, self.minfreq, self.maxfreq, rng) for path, freq in zip(paths, freqs): traj = vtl.parse_ges(path).trajectory(self.model.trial.dt) self.model.add_syllable(label=path2label(path), freq=freq, trajectory=traj) # Determine and save syllable sequence if self.model.trial.repeat: seq_ix = rng.randint(len(paths), size=self.sequence_len) else: seq_ix = rng.permutation(len(paths))[:self.sequence_len] seq = [path2label(paths[i]) for i in seq_ix] res.seq = np.array(seq) # Determine how long to run simt = 0.0 tgt_time = [] for label in res.seq: syllable = self.model.syllable_dict[label] simt += 1. / syllable.freq tgt_time.append(simt) # Set that sequence in the model traj = ideal_traj(self.model, seq) self.model.trial.trajectory = traj # Save frequencies for that sequence res.freqs = np.array([self.model.syllables[i].freq for i in seq_ix]) # -- Run the model net = self.model.build(nengo.Network(seed=self.seed)) with net: p_dmps = [nengo.Probe(dmp.state[0], synapse=0.01) for dmp in net.syllables] p_class = nengo.Probe(net.classifier, synapse=0.01) p_mem = nengo.Probe(net.memory.output, synapse=0.01) sim = nengo.Simulator(net) sim.run(simt) # Save iDMP system states res.dmps = np.hstack([sim.data[p_d] for p_d in p_dmps]) res.dmp_labels = np.array([s.label for s in self.model.syllables]) # Save working memory similarities res.memory = spa.similarity(sim.data[p_mem], net.vocab, True) # Determine classification times and labels t_ix, class_ix = analysis.classinfo(sim.data[p_class], res.dmps) res.class_time = sim.trange()[t_ix] res.class_labels = np.array([path2label(paths[ix]) for ix in class_ix]) # Calculate accuracy / timing metrics recinfo = [(t, l) for t, l in zip(res.class_time, res.class_labels)] tgtinfo = [(t, l) for t, l in zip(tgt_time, res.seq)] res.acc, res.n_sub, res.n_del, res.n_ins = ( analysis.cl_accuracy(recinfo, tgtinfo)) res.tdiff_mean, res.tdiff_var = analysis.cl_timing(recinfo, tgtinfo) log("Accuracy: %.3f" % res.acc) # Determine if memory representation is correct tgt_time = np.asarray(tgt_time) mem_times = (tgt_time[1:] + tgt_time[:-1]) * 0.5 mem_ix = (mem_times / self.model.trial.dt).astype(int) mem_class = np.argmax(res.memory[mem_ix], axis=1) slabels = [s.label for s in self.model.syllables] actual = np.array([slabels.index(lbl) for lbl in res.seq[:-1]]) res.memory_acc = np.mean(mem_class == actual) return res
plt.subplot(5, 1, 1) plt.plot(sim.trange(), model.similarity(sim.data, color_in)) plt.legend(model.get_output_vocab('color_in').keys, fontsize='x-small') plt.ylabel("color") plt.subplot(5, 1, 2) plt.plot(sim.trange(), model.similarity(sim.data, shape_in)) plt.legend(model.get_output_vocab('shape_in').keys, fontsize='x-small') plt.ylabel("shape") plt.subplot(5, 1, 3) plt.plot(sim.trange(), model.similarity(sim.data, cue)) plt.legend(model.get_output_vocab('cue').keys, fontsize='x-small') plt.ylabel("cue") plt.subplot(5, 1, 4) for pointer in ['RED * CIRCLE', 'BLUE * SQUARE']: plt.plot(sim.trange(), vocab.parse(pointer).dot(sim.data[conv].T), label=pointer) plt.legend(fontsize='x-small') plt.ylabel("convolved") plt.subplot(5, 1, 5) plt.plot(sim.trange(), spa.similarity(sim.data[out], vocab)) plt.legend(model.get_output_vocab('out').keys, fontsize='x-small') plt.ylabel("output") plt.xlabel("time [s]"); # The last plot shows that the output is most similar to the semantic pointer bound to the current cue. For example, when `RED` and `CIRCLE` are being convolved and the cue is `CIRCLE`, the output is most similar to `RED`.
sim = nengo.Simulator(model, dt=dt) sim.run(T) t = sim.trange() if plot_res: import matplotlib.pyplot as plt # figure out how to put these into a subplot plt.figure() plt.title("Error") plt.plot(t, np.linalg.norm(sim.data[p_error], axis=1)) plt.figure() plt.title("Keys_1") plt.plot(t, spa.similarity(sim.data[p_keys][:, :D], vocab)) plt.legend(vocab.keys, loc='best') plt.figure() plt.title("Keys_2") plt.plot(t, spa.similarity(sim.data[p_keys][:, D:], vocab)) plt.legend(vocab.keys, loc='best') plt.figure() plt.title("Result") plt.plot(t, spa.similarity(sim.data[p_recall], vocab)) plt.legend(vocab.keys, loc='best') plt.ylim(-1.5, 1.5) plt.figure() plt.title("Actual Answer")
val_res[seed_val] = sim.data[p_values] error_res[seed_val] = np.sum(np.abs(sim.data[p_error]), axis=1) recall_res[seed_val] = sim.data[p_recall] if plot_res: import matplotlib.pyplot as plt # figure out how to put these into a subplot plt.figure() plt.title("Error") plt.plot(np.linalg.norm(sim.data[p_error], axis=1)) plt.figure() plt.title("Result") plt.plot(spa.similarity(sim.data[p_recall], vocab)) plt.legend(vocab.keys, loc='best') plt.ylim(-0.5, 1.1) plt.figure() plt.title("Actual Answer") plt.plot(spa.similarity(sim.data[p_values], vocab)) plt.ylim(-0.5, 1.1) plt.show() ipdb.set_trace() # I should make a wrapper for doing this quickly base_name = "multpred2" np.savez_compressed("data/%s_learning_data" % base_name, p_keys=key_res, p_recall=recall_res, p_error=error_res, p_values=val_res) np.savez_compressed("data/%s_learning_vocab" % base_name, keys=vocab.keys, vecs=vocab.vectors)
def run_model(self): res = RecognitionResult() rng = np.random.RandomState(self.seed) paths, freqs = analysis.get_syllables(self.n_syllables, self.minfreq, self.maxfreq, rng) for path, freq in zip(paths, freqs): traj = vtl.parse_ges(path).trajectory(self.model.trial.dt) self.model.add_syllable(label=path2label(path), freq=freq, trajectory=traj) # Determine and save syllable sequence if self.model.trial.repeat: seq_ix = rng.randint(len(paths), size=self.sequence_len) else: seq_ix = rng.permutation(len(paths))[:self.sequence_len] seq = [path2label(paths[i]) for i in seq_ix] res.seq = np.array(seq) # Determine how long to run simt = 0.0 tgt_time = [] for label in res.seq: syllable = self.model.syllable_dict[label] simt += 1. / syllable.freq tgt_time.append(simt) # Set that sequence in the model traj = ideal_traj(self.model, seq) self.model.trial.trajectory = traj # Save frequencies for that sequence res.freqs = np.array([self.model.syllables[i].freq for i in seq_ix]) # -- Run the model net = self.model.build(nengo.Network(seed=self.seed)) with net: p_dmps = [ nengo.Probe(dmp.state[0], synapse=0.01) for dmp in net.syllables ] p_class = nengo.Probe(net.classifier, synapse=0.01) p_mem = nengo.Probe(net.memory.output, synapse=0.01) sim = nengo.Simulator(net) sim.run(simt) # Save iDMP system states res.dmps = np.hstack([sim.data[p_d] for p_d in p_dmps]) res.dmp_labels = np.array([s.label for s in self.model.syllables]) # Save working memory similarities res.memory = spa.similarity(sim.data[p_mem], net.vocab, True) # Determine classification times and labels t_ix, class_ix = analysis.classinfo(sim.data[p_class], res.dmps) res.class_time = sim.trange()[t_ix] res.class_labels = np.array([path2label(paths[ix]) for ix in class_ix]) # Calculate accuracy / timing metrics recinfo = [(t, l) for t, l in zip(res.class_time, res.class_labels)] tgtinfo = [(t, l) for t, l in zip(tgt_time, res.seq)] res.acc, res.n_sub, res.n_del, res.n_ins = (analysis.cl_accuracy( recinfo, tgtinfo)) res.tdiff_mean, res.tdiff_var = analysis.cl_timing(recinfo, tgtinfo) log("Accuracy: %.3f" % res.acc) # Determine if memory representation is correct tgt_time = np.asarray(tgt_time) mem_times = (tgt_time[1:] + tgt_time[:-1]) * 0.5 mem_ix = (mem_times / self.model.trial.dt).astype(int) mem_class = np.argmax(res.memory[mem_ix], axis=1) slabels = [s.label for s in self.model.syllables] actual = np.array([slabels.index(lbl) for lbl in res.seq[:-1]]) res.memory_acc = np.mean(mem_class == actual) return res
# Calculate the error and use it to drive the PES rule nengo.Connection(env_value, error, transform=-1, synapse=None) nengo.Connection(recall, error, synapse=None) nengo.Connection(error, het_mem.out_conn.learning_rule) # Setup probes p_keys = nengo.Probe(env_key, synapse=None, sample_every=sample_every) p_values = nengo.Probe(env_value, synapse=None, sample_every=sample_every) p_error = nengo.Probe(error, synapse=0.01) p_out = nengo.Probe(het_mem.output, synapse=0.01, sample_every=sample_every) p_recall = nengo.Probe(recall, synapse=None, sample_every=sample_every) sim = nengo.Simulator(model, dt=dt) sim.run(period) pd_res.append([ get_q_text(sim.data[p_keys][-1], vocab), number_dict[sp_text(sim.data[p_values][-1], vocab)], np.sum(np.abs(sim.data[p_error]), axis=1)[-1], np.max(spa.similarity(sim.data[p_recall], vocab)) ]) print("Finished run %s of test %s" % (seed_val, t_n)) # Save as Pandas dataframe base_name = "multpred2" df = pd.DataFrame(pd_res, columns=pd_columns) hdf = pd.HDFStore("results/%s_%s.h5" % (base_name, datetime.datetime.now().strftime("%I_%M_%S"))) df.to_hdf(hdf, base_name)