def __init__(self, dimensions, similarity, seed=None): self.items = [] self.rng = np.random.RandomState(seed=seed) self.vocab = spa.Vocabulary(dimensions) self.dimensions = dimensions self.similarity = similarity self.zero = np.zeros(dimensions)
def test_run(Simulator, seed): rng = np.random.RandomState(seed) vocab = spa.Vocabulary(16, rng=rng) with spa.SPA(seed=seed, vocabs=[vocab]) as model: model.bind = spa.Bind(dimensions=16) def inputA(t): if 0 <= t < 0.1: return "A" else: return "B" model.input = spa.Input(bind_A=inputA, bind_B="A") bind, vocab = model.get_module_output("bind") with model: p = nengo.Probe(bind, "output", synapse=0.03) with Simulator(model) as sim: sim.run(0.2) error = rmse(vocab.parse("B*A").v, sim.data[p][-1]) assert error < 0.1 error = rmse(vocab.parse("A*A").v, sim.data[p][100]) assert error < 0.1
def add_vocabularies(vocab, name1, name2): """ New vocabulary containing semantic pointers from vocab1 and vocab2. name1: name of the first vocabulary name2: name of the second vocabulary """ vocab1, vocab2 = vocab[name1], vocab[name2] d1, d2 = vocab1.dimensions, vocab2.dimensions assert d1 == d2 new_vocab = spa.Vocabulary(d1) # TODO # first add items from vocab1 for key1 in vocab1.keys: vec1 = vocab1[key1].v sp1 = name1.capitalize() + '_' + key1 new_vocab.add(sp1, vec1) # and then from vocab2 for key2 in vocab2.keys: vec2 = vocab2[key2].v sp2 = name2.capitalize() + '_' + key2 new_vocab.add(sp2, vec2) return new_vocab
def create_spa_vocabulary(experiment, randomize=True, D=128): """ Read the experiment specific data from a dictionary `experiment` and create a dictionary with a SPA vocabulary for every ensemble. If randomize is True random vectors instead of orthogonal ones will be created. """ networks = experiment.vocab.keys() vocab = {} for network in networks: try: words = experiment.vocab[network] except KeyError: raise Exception(("Vocabulary for network %s undefined in the " + "experiment %s.") % (network, experiment['name'])) # orthogonal vectors E, P and A for the affect network rand = True if 'affect' in network: rand = False vocab[network] = spa.Vocabulary(D, randomize=rand) for word in words: vocab[network].parse(word) vocab[network].add('NO_EMOTION' if network == 'executive' else 'EMPTY', np.zeros(D)) return vocab
def test_dimension_exception(): with pytest.raises(Exception): with spa.SPA() as model: vocab = spa.Vocabulary(16) model.state = spa.State(dimensions=12, vocab=vocab) with spa.SPA() as model: model.state = spa.State(dimensions=12, subdimensions=3)
def test_exception(): with pytest.raises(Exception): with spa.SPA() as model: vocab = spa.Vocabulary(16) model.buffer = spa.Buffer(dimensions=12, vocab=vocab) with spa.SPA() as model: model.buffer = spa.Buffer(dimensions=12, subdimensions=3)
def test_predefined_vocabs(allclose): D = 64 with spa.SPA() as model: model.vocab1 = spa.Vocabulary(D) model.vocab1.parse("A+B+C") model.vocab2 = spa.Vocabulary(D) model.vocab1.parse("A+B+C") model.buffer1 = spa.Buffer(dimensions=D, vocab=model.vocab1) model.buffer2 = spa.Buffer(dimensions=D, vocab=model.vocab2) def input(t): if t < 0.1: return "A" elif t < 0.2: return "B" else: return "C" model.input = spa.Input(buffer1=input, buffer2=input) a1 = model.input.input_nodes["buffer1"].output(t=0.0) b1 = model.input.input_nodes["buffer1"].output(t=0.1) c1 = model.input.input_nodes["buffer1"].output(t=0.2) a2 = model.input.input_nodes["buffer2"].output(t=0.0) b2 = model.input.input_nodes["buffer2"].output(t=0.1) c2 = model.input.input_nodes["buffer2"].output(t=0.2) assert allclose(a1, model.vocab1.parse("A").v) assert allclose(b1, model.vocab1.parse("B").v) assert allclose(c1, model.vocab1.parse("C").v) assert allclose(a2, model.vocab2.parse("A").v) assert allclose(b2, model.vocab2.parse("B").v) assert allclose(c2, model.vocab2.parse("C").v) assert np.dot(a1, a2) < 0.95 assert np.dot(b1, b2) < 0.95 assert np.dot(c1, c2) < 0.95
def test_predefined_vocabs(): D = 64 with spa.SPA() as model: model.vocab1 = spa.Vocabulary(D) model.vocab1.parse('A+B+C') model.vocab2 = spa.Vocabulary(D) model.vocab1.parse('A+B+C') model.buffer1 = spa.Buffer(dimensions=D, vocab=model.vocab1) model.buffer2 = spa.Buffer(dimensions=D, vocab=model.vocab2) def input(t): if t < 0.1: return 'A' elif t < 0.2: return 'B' else: return 'C' model.input = spa.Input(buffer1=input, buffer2=input) a1 = model.input.input_nodes['buffer1'].output(t=0.0) b1 = model.input.input_nodes['buffer1'].output(t=0.1) c1 = model.input.input_nodes['buffer1'].output(t=0.2) a2 = model.input.input_nodes['buffer2'].output(t=0.0) b2 = model.input.input_nodes['buffer2'].output(t=0.1) c2 = model.input.input_nodes['buffer2'].output(t=0.2) assert np.allclose(a1, model.vocab1.parse('A').v) assert np.allclose(b1, model.vocab1.parse('B').v) assert np.allclose(c1, model.vocab1.parse('C').v) assert np.allclose(a2, model.vocab2.parse('A').v) assert np.allclose(b2, model.vocab2.parse('B').v) assert np.allclose(c2, model.vocab2.parse('C').v) assert np.dot(a1, a2) < 0.95 assert np.dot(b1, b2) < 0.95 assert np.dot(c1, c2) < 0.95
def posner(cls, dimensions, probe, stimuli, seed, raster=False): # Initializes an instance of the model that performs the Posner task main_voc = spa.Vocabulary(dimensions) feat_voc = spa.Vocabulary(dimensions) weight_voc = spa.Vocabulary(dimensions) label_voc = spa.Vocabulary(dimensions) stimuli.threshold = 0.5 if probe not in stimuli.train_vectors: main_voc.add(probe, stimuli.test_vectors[probe]) ConceptModel.add_to(main_voc, stimuli.train_vectors) ConceptModel.add_to(main_voc, stimuli.label_vectors) ConceptModel.add_to(feat_voc, stimuli.label_vectors) ConceptModel.add_to(weight_voc, stimuli.label_vectors) ConceptModel.add_to(label_voc, stimuli.label_vectors) return ConceptModel(probe, stimuli, main_voc, feat_voc, weight_voc, label_voc, seed, raster)
def model(self, p): vocab = spa.Vocabulary(p.D) for i in range(p.M): vocab.parse('M%d' % i) order = np.arange(p.n_tests) np.random.shuffle(order) model = spa.SPA() with model: model.cue = spa.State(p.D, vocab=vocab) for ens in model.cue.all_ensembles: ens.neuron_type = nengo.Direct() model.accum = spa.State(p.D, vocab=vocab, feedback=p.feedback) model.recall = spa.AssociativeMemory(vocab, wta_output=True, threshold_output=True) model.recalled = spa.State(p.D, vocab=vocab) for ens in model.recalled.all_ensembles: ens.neuron_type = nengo.Direct() nengo.Connection(model.cue.output, model.accum.input, transform=p.accum) nengo.Connection(model.recall.output, model.recalled.input) nengo.Connection(model.accum.output, model.recall.input) model.same = nengo.Ensemble(n_neurons=100, dimensions=1, encoders=nengo.dists.Choice([[1]]), intercepts=nengo.dists.Uniform(0.3, 1)) model.dot = nengo.networks.Product(n_neurons=200, dimensions=p.D) nengo.Connection(model.cue.output, model.dot.A) nengo.Connection(model.recalled.output, model.dot.B) nengo.Connection(model.dot.output, model.same, transform=[[1] * p.D]) def stim(t): index = int(t / p.T_test) index2 = order[index % len(order)] if index % 2 == 0: return 'X%d' % (index2 % p.M) else: return 'M%d' % (index2 % p.M) model.input = spa.Input(cue=stim) self.p_same = nengo.Probe(model.same, synapse=0.01) return model
def build_cleanup(self, net): net.vocab = spa.Vocabulary(dimensions=self.cleanup.dimensions) net.vocab.parse(" + ".join(s.label for s in self.syllables)) net.cleanup = spa.AssociativeMemory(net.vocab, wta_output=True, threshold_output=True, **self.cleanup.kwargs()) net.memory = spa.State(dimensions=self.cleanup.dimensions, vocab=net.vocab, **self.memory.kwargs()) nengo.Connection(net.cleanup.output, net.memory.input)
def __init__(self): D = 64 self.vocab1 = spa.Vocabulary(D) self.vocab1.parse('A+B+C') self.vocab2 = spa.Vocabulary(D) self.vocab1.parse('A+B+C') self.buffer1 = spa.Buffer(dimensions=D, vocab=self.vocab1) self.buffer2 = spa.Buffer(dimensions=D, vocab=self.vocab2) def input(t): if t < 0.1: return 'A' elif t < 0.2: return 'B' else: return 'C' self.input = spa.Input(buffer1=input, buffer2=input)
def test_spa_vocab(): # create a model without a vocab and check that it is empty model = spa.SPA() assert model._default_vocabs == {} # create a model with a vocab and check that it's filled va = spa.Vocabulary(16) va.parse("PANTS") vb = spa.Vocabulary(32) vb.parse("SHOES") model = spa.SPA(vocabs=[va, vb]) assert model._default_vocabs[16].keys == ["PANTS"] assert model._default_vocabs[32].keys == ["SHOES"] # warning on vocabs with duplicate dimensions vc = spa.Vocabulary(16) vc.parse("SOCKS") with warns(UserWarning): model = spa.SPA(vocabs=[va, vb, vc]) assert model._default_vocabs[16].keys == ["SOCKS"] assert model._default_vocabs[32].keys == ["SHOES"]
def get_binding_data(n_inputs, n_pairs, dims, seed, t_int, t_mem, dt=0.001): int_steps = int(t_int / dt) mem_steps = int(t_mem / dt) n_steps = int_steps * n_pairs + mem_steps rng = np.random.RandomState(seed) vocab = spa.Vocabulary(dimensions=dims, rng=rng, max_similarity=1) # initialize arrays for input and output trajectories roles = np.zeros((n_inputs, n_steps, dims)) fills = np.zeros((n_inputs, n_steps, dims)) cues = np.zeros((n_inputs, n_steps, dims)) binding = np.zeros((n_inputs, n_steps, dims)) memory = np.zeros((n_inputs, n_steps, dims)) output = np.zeros((n_inputs, n_steps, dims)) # iterate through examples to be generated, fill arrays for n in range(n_inputs): role_names = ["ROLE_%d_%d" % (n, i) for i in range(n_pairs)] filler_names = ["FILLER_%d_%d" % (n, i) for i in range(n_pairs)] # each role/filler pair is presented for t_int seconds for i in range(n_pairs): roles[n, i * int_steps:(i + 1) * int_steps] = vocab.parse( role_names[i]).v fills[n, i * int_steps:(i + 1) * int_steps] = vocab.parse( filler_names[i]).v binding[n, i * int_steps:(i + 1) * int_steps] = vocab.parse( "%s*%s" % (role_names[i], filler_names[i])).v # randomly select a cue cue_idx = rng.randint(n_pairs) # cue is presented during the memorization period cues[n, -mem_steps:, :] = vocab[role_names[cue_idx]].v # the goal is to output the associated filler during the # memorization phase # note: we use nan for the target prior to the memorization phase, # to indicate that it doesn't matter what the network output is output[n, -mem_steps:, :] = vocab[filler_names[cue_idx]].v output[n, :-mem_steps, :] = np.nan memory[...] = np.cumsum(binding, axis=1) * dt / t_int return roles, fills, cues, binding, memory, output, vocab
def murphy(cls, dimensions, probe, stimuli, weights, count, seed, raster=False): # Initializes an instance of the model that performs the Murphy task features = sorted( [w for w in stimuli.features.keys() if str(count) in w]) probe = '%d*A%d+%d*B%d+%d*C%d+%d*D%d'% \ (probe[0], count, probe[1], count, probe[2], count, probe[3], count) stimuli.memory = 'R1*A%d+R2*B%d+R3*C%d+R4*D%d'% \ (count, count, count, count) main_voc = spa.Vocabulary(dimensions) feat_voc = spa.Vocabulary(dimensions) weight_voc = spa.Vocabulary(dimensions) label_voc = spa.Vocabulary(dimensions) for label in stimuli.label_vectors: main_voc.add(label, stimuli.label_vectors[label]) for feature in features: main_voc.add(feature, stimuli.features[feature]) feat_voc.add(feature, stimuli.features[feature]) weight_voc.add( feature, stimuli.features[feature] * 1.5 * weights[features.index(feature)]) ConceptModel.add_to(label_voc, stimuli.label_vectors) stimuli.threshold = 0.724 return ConceptModel(probe, stimuli, main_voc, feat_voc, weight_voc, label_voc, seed, raster)
def brooks(cls, dimensions, probe, stimuli, seed, raster=False): # Initializes an instance of the model that performs the Brooks task main_voc = spa.Vocabulary(dimensions) feat_voc = spa.Vocabulary(dimensions) weight_voc = spa.Vocabulary(dimensions) label_voc = spa.Vocabulary(dimensions) stimuli.threshold = 0.5 ConceptModel.add_to(main_voc, stimuli.stimuli_A) ConceptModel.add_to(main_voc, stimuli.label_vectors) ConceptModel.add_to(feat_voc, stimuli.label_vectors) ConceptModel.add_to(weight_voc, stimuli.label_vectors) ConceptModel.add_to(label_voc, stimuli.label_vectors) if probe not in stimuli.stimuli_A.keys(): main_voc.add(probe, stimuli.test_vectors[probe]) x = stimuli.labelled_stimuli[probe] print 'Correct Labels' print x[0], x[1] return ConceptModel(probe, stimuli, main_voc, feat_voc, weight_voc, label_voc, seed, raster)
def __init__(self, seed, d=64, beta=0.6, beta_stim=1., n=10, dt=0.001, stimulus_gen=None): self.seed = seed self.d = d self.beta = beta self.n = n self.dt = dt self.init_phase = 0.5 self.vocab = spa.Vocabulary(d, rng=np.random.RandomState(seed=seed)) self.recalled_ctxs = [ x for i, x in zip(range(n), stimulus_vectors(self.vocab, beta_stim)) ]
# first item, and classify it using the accumulator. If it is not recognized, # it will send NO to the motor system. If it is recognized, then it will # change attention to W2, reset the accumulator, and classify the next word. # If that word is recognized, it will output YES, and if it is not recognized # it will output NO. # # You can change what words are presented by changing the "words" variable. import nengo import nengo.spa as spa import numpy as np D = 16 # define the known words mem_vocab = spa.Vocabulary(D) mem_vocab.parse('A+B+C+D+E+F') # we'll use this to reset the accumulated evidence to zero reset_vocab = spa.Vocabulary(D) reset_vocab.parse('EVIDENCE') # here are the valid motor responses motor_vocab = spa.Vocabulary(D) motor_vocab.parse('YES+NO+WAIT') # and the valid attention signals attend_vocab = spa.Vocabulary(D) attend_vocab.parse('W1+W2') # These are the two words to be presented
import nengo import nengo.spa as spa D = 16 N = 500 model = spa.SPA(label="circular convolution") with model: vocab=spa.Vocabulary(D) vocab.parse('BLUE') a = nengo.Ensemble(N, D) b = nengo.Ensemble(N, D) c = nengo.Ensemble(N, D) d = nengo.Ensemble(N, D) e = nengo.Ensemble(N, D) model.config[a].vocab=vocab model.config[b].vocab=vocab circonv = nengo.networks.CircularConvolution(100, D) circonv2 = nengo.networks.CircularConvolution(100, D, invert_b=True) nengo.Connection(a, circonv.A) nengo.Connection(b, circonv.B) nengo.Connection(circonv.output, d) nengo.Connection(d, circonv2.A) nengo.Connection(c, circonv2.B) nengo.Connection(circonv2.output, e)
angle, dist = self.compute_angle_and_distance(obj) color = color_map[obj.color] if dist < 1: dist = 1.0 r = 1.0 / dist where.extend([np.sin(angle) * r, np.cos(angle) * r]) what.extend([color]) self.n_objects += 1 return what + where D = 64 dim = 16 vocab = spa.Vocabulary(D) food_vocab = spa.Vocabulary(D) goal_vocab = spa.Vocabulary(D) inter_vocab = spa.Vocabulary(D) turn_vocab = spa.Vocab(dim) model = spa.SPA(label="Find Food") with model: def starter(t): if t < 0.05: return "START_FIND_FOOD" else: return "0"
import nengo.spa as spa vocab = spa.Vocabulary(512) data = [] for i in range(100): data.extend(vocab.parse('A%d' % i).v) import pylab pylab.hist(data, 50) pylab.show()
import nengo import nengo.spa as spa D = 256 D_space = 256 vocab_space = spa.Vocabulary(D_space) model = spa.SPA() with model: model.rule = spa.State(D) model.objs = spa.State(D_space, feedback=1, vocab=vocab_space) model.status = spa.State(D) actions = spa.Actions( 'dot(rule, OBJ1*BELOW*V+OBJ2*O)*0.4 +' '(dot(objs, OBJ1*Y) - dot(objs, OBJ2*Y))+0.5 --> ' 'status=BAD, ' 'objs=-0.1*Y*OBJ1 + 0.1*Y*OBJ2', 'dot(rule, OBJ1*S+BELOW*V+OBJ2*O)*0.4 - ' '(dot(objs, OBJ1*Y) - dot(objs, OBJ2*Y))-0.5 --> ' 'status=GOOD', 'dot(rule, OBJ1*S+LEFT*V+OBJ2*O)*0.4 +' '(dot(objs, OBJ1*X) - dot(objs, OBJ2*X))+0.3 --> ' 'status=BAD, ' 'objs=-0.1*X*OBJ1 + 0.1*X*OBJ2', 'dot(rule, OBJ1*S+LEFT*V+OBJ2*O)*0.4 - ' '(dot(objs, OBJ1*X) - dot(objs, OBJ2*X))-0.3 --> '
def __init__(self, mapping, D_category=16, D_items=64, threshold=0.4, learning_rate=1e-4): #The realted Items are 0-1 2-3 4-5 6-7 sim_const = 0.9 sim_matrices = [] for m in mapping: s = len(mapping[m]) mat = np.random.rand(s, s) / 1000 for r in range(0, s): mat[r][r] = 1 for i in range(0, s - 1, 2): mat[i][i + 1] = sim_const mat[i + 1][i] = sim_const sim_matrices.append(mat) model = spa.SPA() self.model = model self.mapping = mapping self.vocab_category = spa.Vocabulary(D_category) self.vocab_items = spa.Vocabulary(D_items) #for k in sorted(mapping.keys()): #allocating verctors for categories name for cat in range(0, len(mapping)): self.vocab_category.parse(mapping.keys()[cat]) for val in range(0, len(mapping[mapping.keys()[cat]])): item = mapping[mapping.keys()[cat]][val] if val % 2 == 0: self.vocab_items.parse(item) Temp = self.vocab_items.parse(item) ######## else: related = sim_matrices[cat][val][ val - 1] #look at the sim_matrices #print related #print item w = 1.0 / related - 1 v = Temp + w * self.vocab_items.create_pointer() v.normalize() self.vocab_items.add(item, v) with model: model.category = spa.State(D_category, vocab=self.vocab_category) model.items = spa.State(D_items, vocab=self.vocab_items) def learned(x): cats = np.dot(self.vocab_category.vectors, x) best_index = np.argmax( cats ) #takes the category which has the largest projection of x if cats[best_index] < threshold: return self.vocab_items.parse('0').v else: #generate the sum vector k = self.vocab_category.keys[best_index] total = '+'.join(self.mapping[k]) v = self.vocab_items.parse(total).v return v / (2 * np.linalg.norm(v)) c = nengo.Connection( model.category.all_ensembles[0], model.items.input, function=learned, learning_rule_type=nengo.PES(learning_rate=learning_rate)) model.error = spa.State(D_items, vocab=self.vocab_items) nengo.Connection(model.items.output, model.error.input) nengo.Connection(model.error.output, c.learning_rule) self.stim_category_value = np.zeros(D_category) self.stim_category = nengo.Node(self.stim_category) nengo.Connection(self.stim_category, model.category.input, synapse=None) self.stim_correct_value = np.zeros(D_items) self.stim_correct = nengo.Node(self.stim_correct) nengo.Connection(self.stim_correct, model.error.input, synapse=None, transform=-1) self.stim_stoplearn_value = np.zeros(1) self.stim_stoplearn = nengo.Node(self.stim_stoplearn) for ens in model.error.all_ensembles: nengo.Connection(self.stim_stoplearn, ens.neurons, synapse=None, transform=-10 * np.ones((ens.n_neurons, 1))) self.stim_justmemorize_value = np.zeros(1) self.stim_justmemorize = nengo.Node(self.stim_justmemorize) for ens in model.items.all_ensembles: nengo.Connection(self.stim_justmemorize, ens.neurons, synapse=None, transform=-10 * np.ones((ens.n_neurons, 1))) self.probe_items = nengo.Probe(model.items.output, synapse=0.01) self.sim = nengo.Simulator(self.model)
learning = True initialized = False #True learning_rate = 1e-5 #5e-6 # Time between state transitions time_interval = 0.1 #0.5 states = ['S0', 'S1', 'S2'] actions = ['L', 'R'] n_sa_neurons = DIM * 2 * 15 # number of neurons in the state+action population n_prod_neurons = DIM * 50 # number of neurons in the product network # Set all vectors to be orthogonal for now (easy debugging) vocab = spa.Vocabulary(dimensions=DIM, randomize=False) # TODO: these vectors might need to be chosen in a smarter way for sp in states + actions: vocab.parse(sp) class AreaIntercepts(nengo.dists.Distribution): dimensions = nengo.params.NumberParam('dimensions') base = nengo.dists.DistributionParam('base') def __init__(self, dimensions, base=nengo.dists.Uniform(-1, 1)): super(AreaIntercepts, self).__init__() self.dimensions = dimensions self.base = base
def model(self, p): vis_items = ['FATIGUE', 'WHISKEY'] vis_vocab = spa.Vocabulary(p.D) self.vis_items = vis_items self.vis_vocab = vis_vocab result_vocab_items = ['SAME'] input_items = ['PUSH'] action_items = ['F1'] self.result_vocab_items = result_vocab_items self.input_items = input_items self.action_items = action_items ##### Vision and motor system ######### import vision_system as v import motor_system as m reload(v) reload(m) directory = '/home/stacy/github/visual-categorization/assoc_recog_s/images/' image_list = v.load_images(directory, items=vis_items) output_list = v.vector_gen_function(vis_items, vocab=vis_vocab) self.directory = directory self.image_list = image_list self.output_list = output_list model = spa.SPA(label='MAIN') with model: model.vision_system = v.make_vision_system( image_list, output_list, n_neurons=500, AIT_V1_strength=p.AIT_V1_strength, AIT_r_transform=p.AIT_r_transform, V1_r_transform=p.V1_r_transform) model.concept = spa.State(p.D, vocab=vis_vocab) nengo.Connection(model.vision_system.AIT, model.concept.input) model.compare = spa.Compare(p.D) model.wm = spa.State(p.D, vocab=vis_vocab) model.result = spa.State(p.D, feedback=p.result_feedback) nengo.Connection(model.concept.output, model.compare.inputA, synapse=0.01) nengo.Connection(model.wm.output, model.compare.inputB) vocab = model.get_input_vocab('result') nengo.Connection(model.compare.output, model.result.input, transform=p.compare_to_result_strength * np.array([vocab.parse('SAME').v]).T) def result_to_motor(in_vocab, out_vocab): mapping = np.zeros((p.D, p.D)) for i in range(len(input_items)): mapping += np.outer( in_vocab.parse(result_vocab_items[i]).v, out_vocab.parse(input_items[i]).v) transform = mapping.T return transform model.motor_system = m.make_motor_system( input_items, action_items, motor_feedback=p.motor_feedback, motor_transform=p.motor_transform, finger_feedback=p.finger_feedback, motor_to_fingers_strength=p.motor_to_fingers_strength) nengo.Connection(model.result.output, model.motor_system.motor_input.input, transform=result_to_motor( vocab, model.motor_system.motor_vocab)) def present_func(t): if t < 1: index = 0 else: index = 1 return image_list[index] stim = nengo.Node(present_func) nengo.Connection(stim, model.vision_system.presentation_node) stim_wm = nengo.Node( model.get_input_vocab('wm').parse('FATIGUE').v) nengo.Connection(stim_wm, model.wm.input) self.V1_probe = nengo.Probe(model.vision_system.V1) self.AIT_probe = nengo.Probe(model.vision_system.AIT, synapse=0.005) self.PFC_probe = nengo.Probe(model.compare.output, synapse=0.005) self.PMC_probe = nengo.Probe(model.result.output, synapse=0.005) self.MC_probe = nengo.Probe(model.motor_system.motor.output, synapse=0.005) self.finger_probe = nengo.Probe(model.motor_system.fingers.output, synapse=0.005) self.final_probe = nengo.Probe( model.motor_system.finger_pos.output, synapse=0.005) self.mymodel = model return model
self.cellcolor = 5 world = grid.World(Cell, map=mymap, directions=4) body = grid.ContinuousAgent() world.add(body, x=1, y=2, dir=2) import nengo import nengo.spa as spa import numpy as np from config import * # Colour vocabulary COLOURS = ["GREEN", "RED", "BLUE", "MAGENTA", "YELLOW"] colour_vocab = spa.Vocabulary(D) colour_vocab.parse('+'.join(COLOURS)) # Cooldown vocab cooldown_vocab = spa.Vocabulary(D) cooldown_vocab.parse('COOLDOWN') ### FUNCTIONS ### def detect(t): """ Returns distance from wall for each sensor (maximally 4) Angles of sensors are -0.5, 0, 0.5 relative to the body's angle Angles are in range [0,4) (1=east, 2=south, 3=west) """ angles = (np.linspace(-0.5, 0.5, 3) + body.dir) % world.directions
def model(self, p): vocab = spa.Vocabulary(p.D) self.stim = FingStim(vocab, p.T_reset, p.T_stim, p.T_answer, p.n_states, max_diff=p.max_diff, mask_base=p.mask_base) model = nengo.Network() with model: array = StateArray(p.D, p.n_states, vocab=vocab, channel_clarity=p.channel_clarity) reset = nengo.Node(self.stim.reset) nengo.Connection(reset, array.reset, synapse=None) mask = nengo.Node(self.stim.mask) nengo.Connection(mask, array.mask, synapse=None) value = nengo.Node(self.stim.value) nengo.Connection(value, array.input, synapse=None) correct = nengo.Node(self.stim.answer) collect = nengo.Ensemble(n_neurons=p.n_collect, dimensions=p.n_states) readout = nengo.Node(None, size_in=p.n_states) pts = [] target = [] while len(pts) < p.n_samples: i = np.random.choice([1, 2, 3, 4, 5]) j = np.random.choice([1, 2, 3, 4, 5]) if i == j: continue v = np.zeros(5, dtype=float) v[i - 1] = 1 v[j - 1] = 1 pts.append(v) target.append(v) target = np.array(target, dtype=float) target += np.random.randn(*target.shape) * p.sample_noise pts = np.array(pts, dtype=float) pts += np.random.randn(*pts.shape) * p.sample_noise nengo.Connection(collect, readout, eval_points=pts, function=target, scale_eval_points=False) for i in range(p.n_states): nengo.Connection(array.states[i].output, collect[i], transform=np.array( [vocab.parse('TOUCHED').v / 1.5])) self.p_answer = nengo.Probe(collect, synapse=0.01) ''' no_report = nengo.Node(self.stim.no_report) report = nengo.networks.EnsembleArray(100, p.n_states, encoders=nengo.dists.Choice([[1]]), intercepts=nengo.dists.Uniform(0.6,0.9), radius=1) for i in range(p.n_states): nengo.Connection(array.states[i].output, report.input[i], transform=np.array([vocab.parse('TOUCHED').v/1.5])) for ens in report.all_ensembles: nengo.Connection(no_report, ens.neurons, transform=-2*np.ones((ens.n_neurons,1))) reported = nengo.networks.EnsembleArray(100, p.n_states, radius=1, encoders=nengo.dists.Choice([[1]]), intercepts=nengo.dists.Uniform(0.05,0.9)) m=[[-10]*p.n_states for i in range(p.n_states)] for i in range(p.n_states): m[i][i]=0 nengo.Connection(report.output, report.input, transform=m, synapse=0.01) nengo.Connection(report.output, reported.input, transform=1, synapse=0.2) nengo.Connection(reported.output, report.input, transform=-1) nengo.Connection(reported.output, reported.input, transform=1.2) for ens in report.all_ensembles + reported.all_ensembles: nengo.Connection(reset, ens.neurons, transform=-2*np.ones((ens.n_neurons,1))) #self.p_answer = nengo.Probe(answer, synapse=0.01) ''' self.locals = locals() return model
# Set dimensions of semantic pointers. Higher dimensions will make the model more reliable at # the cost of more neurons. Must be divisible by 16 D = 272 #304 #272 #368 Dlow = 112 #for goal and motor print('\tDimensions: %i, %i' % (D,Dlow)) ####### Vocabularies ####### rng_vocabs = np.random.RandomState(seed=fseed) letters = ['A', 'B', 'C', 'D']#, 'E', 'F', 'G', 'H', 'I', 'J', 'K'] numbers = ['ZERO', 'ONE', 'TWO']#, 'THREE', 'FOUR', 'FIVE'] vocab_concepts = spa.Vocabulary(D,rng=rng_vocabs) for letter in letters: vocab_concepts.parse(letter) for number in numbers: vocab_concepts.parse(number) # The model also needs to know some slot names: vocab_concepts.parse('ITEM1') vocab_concepts.parse('ITEM2') vocab_concepts.parse('RESULT') vocab_concepts.parse('IDENTITY') vocab_concepts.parse('NEXT') # 2. Vocab with stored (correct) problem/answer combinations
def __init__(self, mapping, D_category=16, D_items=64, threshold=0.4, learning_rate=1e-4): model = spa.SPA() self.model = model self.mapping = mapping self.vocab_category = spa.Vocabulary(D_category) self.vocab_items = spa.Vocabulary(D_items) for k in sorted( mapping.keys()): #allocating verctors for categories name self.vocab_category.parse(k) for v in mapping[k]: # allocating vectors to the items self.vocab_items.parse(v) with model: model.category = spa.State(D_category, vocab=self.vocab_category) model.items = spa.State(D_items, vocab=self.vocab_items) def learned(x): #cats = np.dot(self.vocab_category.vectors, x) #best_index = np.argmax(cats) #takes the category which has the largest projection of x #if cats[best_index] < threshold: # return self.vocab_items.parse('0').v #else: #generate the sum vector # k = self.vocab_category.keys[best_index] # total = '+'.join(self.mapping[k]) # v = self.vocab_items.parse(total).v return self.vocab_items.parse('0').v # v/(2*np.linalg.norm(v)) c = nengo.Connection( model.category.all_ensembles[0], model.items.input, function=learned, learning_rule_type=nengo.PES(learning_rate=learning_rate)) model.error = spa.State(D_items, vocab=self.vocab_items) nengo.Connection(model.items.output, model.error.input) nengo.Connection(model.error.output, c.learning_rule) self.stim_category_value = np.zeros(D_category) self.stim_category = nengo.Node(self.stim_category) nengo.Connection(self.stim_category, model.category.input, synapse=None) self.stim_correct_value = np.zeros(D_items) self.stim_correct = nengo.Node(self.stim_correct) nengo.Connection(self.stim_correct, model.error.input, synapse=None, transform=-1) self.stim_stoplearn_value = np.zeros(1) self.stim_stoplearn = nengo.Node(self.stim_stoplearn) for ens in model.error.all_ensembles: nengo.Connection(self.stim_stoplearn, ens.neurons, synapse=None, transform=-10 * np.ones((ens.n_neurons, 1))) self.stim_justmemorize_value = np.zeros(1) self.stim_justmemorize = nengo.Node(self.stim_justmemorize) for ens in model.items.all_ensembles: nengo.Connection(self.stim_justmemorize, ens.neurons, synapse=None, transform=-10 * np.ones((ens.n_neurons, 1))) self.probe_items = nengo.Probe(model.items.output, synapse=0.01) self.sim = nengo.Simulator(self.model)
import nengo import numpy as np import nengo.spa as spa vocab = spa.Vocabulary(32) vocab2 = spa.Vocabulary(32) model = nengo.Network() with model: state = spa.State(32, vocab=vocab) bg = nengo.networks.actionselection.BasalGanglia(4) nengo.Connection(state.output, bg.input, transform=[ vocab.parse('DOG').v, vocab.parse('CAT').v, vocab.parse('RAT').v, vocab.parse('COW').v, ]) thal = nengo.networks.actionselection.Thalamus(4) nengo.Connection(bg.output, thal.input) act = spa.State(32, vocab=vocab2) channel = spa.State(32, vocab=vocab2) motor = spa.State(32, vocab=vocab2) nengo.Connection(act.output, channel.input) nengo.Connection(channel.output, motor.input) nengo.Connection(thal.output, motor.input,