def make_vision_system(images, outputs, n_neurons = 1000, AIT_V1_strength = 0.06848695023305285, V1_r_transform = 0.11090645719111913, AIT_r_transform = 0.8079719992231219): #represent currently attended item vision_system = nengo.Network(label = 'vision_system') with vision_system: presentation_node = nengo.Node(None, size_in = images.shape[1], label = 'presentation_node') vision_system.presentation_node = presentation_node rng = np.random.RandomState(9) encoders = Gabor().generate(n_neurons, (11, 11), rng=rng) # gabor encoders, work better, 11,11 apparently, why? encoders = Mask((14, 90)).populate(encoders, rng=rng, flatten=True) V1 = nengo.Ensemble(n_neurons, images.shape[1], eval_points=images, neuron_type=nengo.LIFRate(), intercepts=nengo.dists.Choice([-0.5]), #can switch these off max_rates=nengo.dists.Choice([100]), # why? encoders=encoders, label = 'V1') # 1000 neurons, nrofpix = dimensions # visual_representation = nengo.Node(size_in=Dmid) #output, in this case 466 outputs AIT = nengo.Ensemble(n_neurons, dimensions=outputs.shape[1], label = 'AIT') # output, in this case 466 outputs visconn = nengo.Connection(V1, AIT, synapse=0.005, eval_points = images, function=outputs, solver=nengo.solvers.LstsqL2(reg=0.01)) Ait_V1_backwardsconn = nengo.Connection(AIT,V1, synapse = 0.005, eval_points = outputs, function = images, solver=nengo.solvers.LstsqL2(reg=0.01), transform = AIT_V1_strength) #Transform makes this connection a lot weaker then the forwards conneciton nengo.Connection(presentation_node, V1, synapse=None) nengo.Connection(AIT, AIT, synapse = 0.1, transform = AIT_r_transform) nengo.Connection(V1, V1, synapse = 0.1, transform = V1_r_transform) # display attended item display_node = nengo.Node(display_func, size_in=presentation_node.size_out, label = 'display_node') # to show input nengo.Connection(presentation_node, display_node, synapse=None) # THESE PIECES MAKE EVERYTHING WORK please dont touch them vision_system.AIT = AIT vision_system.V1 = V1 return vision_system
def evaluate(self, p, plt): files = [] sets = [] for f in os.listdir(p.dataset_dir): if f.endswith('events'): files.append(os.path.join(p.dataset_dir, f)) if p.test_set == 'one': test_file = random.sample(files, 1)[0] files.remove(test_file) if p.n_data != -1: files = random.sample(files, p.n_data) inputs = [] targets = [] for f in files: print(f) times, imgs, targs = davis_track.load_data( f, dt=p.dt, decay_time=p.decay_time, separate_channels=p.separate_channels, saturation=p.saturation, merge=p.merge) inputs.append(imgs) targets.append(targs[:, :2]) inputs_all = np.vstack(inputs) targets_all = np.vstack(targets) if p.test_set == 'odd': inputs_train = inputs_all[::2] inputs_test = inputs_all[1::2] targets_train = targets_all[::2] targets_test = targets_all[1::2] elif p.test_set == 'one': times, imgs, targs = davis_track.load_data( test_file, dt=p.dt_test, decay_time=p.decay_time, separate_channels=p.separate_channels, saturation=p.saturation) inputs_test = imgs targets_test = targs[:, :2] inputs_train = inputs_all targets_train = targets_all if p.augment: inputs_train, targets_train = davis_track.augment( inputs_train, targets_train, separate_channels=p.separate_channels) if p.separate_channels: shape = (360 // p.merge, 240 // p.merge) else: shape = (180 // p.merge, 240 // p.merge) dimensions = shape[0] * shape[1] eval_points_train = inputs_train.reshape(-1, dimensions) eval_points_test = inputs_test.reshape(-1, dimensions) model = nengo.Network() with model: from nengo_extras.vision import Gabor, Mask encoders = Gabor().generate(p.n_neurons, (p.gabor_size, p.gabor_size)) encoders = Mask(shape).populate(encoders, flatten=True) ens = nengo.Ensemble( n_neurons=p.n_neurons, dimensions=dimensions, encoders=encoders, neuron_type=nengo.RectifiedLinear(), intercepts=nengo.dists.CosineSimilarity(p.gabor_size**2 + 2)) result = nengo.Node(None, size_in=targets_all.shape[1]) c = nengo.Connection( ens, result, eval_points=eval_points_train, function=targets_train, solver=nengo.solvers.LstsqL2(reg=p.reg), ) sim = nengo.Simulator(model) error_train = sim.data[c].solver_info['rmses'] _, a_train = nengo.utils.ensemble.tuning_curves( ens, sim, inputs=eval_points_train) outputs_train = np.dot(a_train, sim.data[c].weights.T) rmse_train = np.sqrt( np.mean((targets_train - outputs_train)**2, axis=0)) _, a_test = nengo.utils.ensemble.tuning_curves(ens, sim, inputs=eval_points_test) outputs_test = np.dot(a_test, sim.data[c].weights.T) filt = nengo.synapses.Lowpass(p.output_filter) outputs_test = filt.filt(outputs_test, dt=p.dt_test) targets_test = filt.filt(targets_test, dt=p.dt_test) rmse_test = np.sqrt(np.mean( (targets_test - outputs_test)**2, axis=0)) * p.merge if plt: plt.subplot(2, 1, 1) plt.plot(targets_train, ls='--') plt.plot(outputs_train) plt.title('train\nrmse=%1.4f,%1.4f' % tuple(rmse_train)) plt.subplot(2, 1, 2) plt.plot(targets_test, ls='--') plt.plot(outputs_test) plt.title('test\nrmse=%1.4f,%1.4f' % tuple(rmse_test)) return dict( rmse_train=rmse_train, rmse_test=rmse_test, )
X_train = 2 * X_train - 1 # normalize to -1 to 1 X_test = 2 * X_test - 1 # normalize to -1 to 1 train_targets = one_hot(y_train, 10) test_targets = one_hot(y_test, 10) # --- set up network parameters n_vis = X_train.shape[1] n_out = train_targets.shape[1] # n_hid = 300 n_hid = 1000 # n_hid = 3000 # encoders = rng.normal(size=(n_hid, 11, 11)) encoders = Gabor().generate(n_hid, (11, 11), rng=rng) encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True) ens_params = dict( eval_points=X_train, neuron_type=nengo.LIFRate(), intercepts=nengo.dists.Choice([-0.5]), max_rates=nengo.dists.Choice([100]), encoders=encoders, ) solver = nengo.solvers.LstsqL2(reg=0.01) # solver = nengo.solvers.LstsqL2(reg=0.0001) with nengo.Network(seed=3) as model: a = nengo.Ensemble(n_hid, n_vis, **ens_params) v = nengo.Node(size_in=n_out)
def generate_gabors(load_gabors_svd=False, uncued=False, Ns=None, D=None): # global e_cued # global U_cued # global compressed_im_cued # global e_uncued # global U_uncued # global compressed_im_uncued #to speed things up, load previously generated ones if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_cued.npz'): gabors_svd_cued = np.load( 'Stimuli/gabors_svd_cued.npz') #load stims if previously generated e_cued = gabors_svd_cued['e_cued'] U_cued = gabors_svd_cued['U_cued'] compressed_im_cued = gabors_svd_cued['compressed_im_cued'] if not uncued: return e_cued, U_cued, compressed_im_cued print("SVD cued loaded") else: #or generate and save #cued module #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size # D: Each time the gabors are generated some of their properties are randomly sampled gabors_cued = Gabor().generate( Ns, (int(col / 3), int(row / 3))) # DANIEL: Added casting to int #put gabors on image and make them the same shape as the stimuli gabors_cued = Mask((col, row)).populate(gabors_cued, flatten=True).reshape(Ns, -1) #normalize gabors_cued = gabors_cued / abs( max(np.amax(gabors_cued), abs(np.amin(gabors_cued)))) #gabors are added to imagearr for SVD x_cued = np.vstack((imagearr, gabors_cued)) #SVD print("SVD cued started...") U_cued, S_cued, V_cued = np.linalg.svd(x_cued.T) print("SVD cued done") #Use result of SVD to create encoders e_cued = np.dot(gabors_cued, U_cued[:, :D]) #encoders compressed_im_cued = np.dot( imagearr[:1800, :] / 100, U_cued[:, :D]) #D-dimensional vector reps of the images compressed_im_cued = np.vstack( (compressed_im_cued, np.dot(imagearr[-1, :] / 50, U_cued[:, :D]))) np.savez('Stimuli/gabors_svd_cued.npz', e_cued=e_cued, U_cued=U_cued, compressed_im_cued=compressed_im_cued) if not uncued: return e_cued, U_cued, compressed_im_cued #same for uncued module if uncued: if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_uncued.npz'): gabors_svd_uncued = np.load('Stimuli/gabors_svd_uncued.npz' ) #load stims if previously generated e_uncued = gabors_svd_uncued['e_uncued'] U_uncued = gabors_svd_uncued['U_uncued'] compressed_im_uncued = gabors_svd_uncued['compressed_im_uncued'] print("SVD uncued loaded") return (e_cued, U_cued, compressed_im_cued, e_uncued, U_uncued, compressed_im_uncued) else: gabors_uncued = Gabor().generate(Ns, (int(col / 3), int( row / 3))) #.reshape(N, -1) # DANIEL: Added casting to ints gabors_uncued = Mask( (col, row)).populate(gabors_uncued, flatten=True).reshape(Ns, -1) gabors_uncued = gabors_uncued / abs( max(np.amax(gabors_uncued), abs(np.amin(gabors_uncued)))) x_uncued = np.vstack((imagearr, gabors_uncued)) print("SVD uncued started...") U_uncued, S_uncued, V_uncued = np.linalg.svd(x_uncued.T) print("SVD uncued done") e_uncued = np.dot( gabors_uncued, U_uncued[:, :D] ) # Due to the indexing until D, the images are limited to D dimension. This is later also used like this in the model compressed_im_uncued = np.dot(imagearr[:1800, :] / 100, U_uncued[:, :D]) compressed_im_uncued = np.vstack((compressed_im_uncued, np.dot(imagearr[-1, :] / 50, U_uncued[:, :D]))) np.savez('Stimuli/gabors_svd_uncued.npz', e_uncued=e_uncued, U_uncued=U_uncued, compressed_im_uncued=compressed_im_uncued) return (e_cued, U_cued, compressed_im_cued, e_uncued, U_uncued, compressed_im_uncued)
def create_model(): #print trial_info print '---- INTIALIZING MODEL ----' global model model = spa.SPA() with model: #display current stimulus pair (not part of model) if nengo_gui_on: model.pair_input = nengo.Node(present_pair) model.pair_display = nengo.Node( display_func, size_in=model.pair_input.size_out) # to show input nengo.Connection(model.pair_input, model.pair_display, synapse=None) # control model.control_net = nengo.Network() with model.control_net: #assuming the model knows which hand to use (which was blocked) model.hand_input = nengo.Node(get_hand) model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1) nengo.Connection(model.hand_input, model.target_hand.input, synapse=None) model.attend = spa.State(D, vocab=vocab_attend, feedback=.5) # vocab_attend model.goal = spa.State(D, vocab_goal, feedback=1) # current goal ### vision ### # set up network parameters n_vis = X_train.shape[1] # nr of pixels, dimensions of network n_hid = 1000 # nr of gabor encoders/neurons # random state to start rng = np.random.RandomState(9) encoders = -1 * Gabor().generate( n_hid, (9, 9), rng=rng) # gabor encoders, 11x11 apparently, why? maybe smaller encoders = Mask( (14, 90)).populate(encoders, rng=rng, flatten=True) # use them on part of the image model.visual_net = nengo.Network() with model.visual_net: #represent currently attended item model.attended_item = nengo.Node(present_item, size_in=D) nengo.Connection(model.attend.output, model.attended_item) model.vision_gabor = nengo.Ensemble( n_hid, n_vis, eval_points=X_train, #neuron_type=nengo.LIFRate(), neuron_type=nengo.LIF(), #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening intercepts=nengo.dists.Uniform(-0.1, 0.1), #max_rates=nengo.dists.Choice([100]), encoders=encoders) model.visual_representation = nengo.Ensemble(n_hid, dimensions=Dmid) model.visconn = nengo.Connection( model.vision_gabor, model.visual_representation, synapse=0.01, #was .005 eval_points=X_train, function=train_targets, solver=nengo.solvers.LstsqL2(reg=0.01)) nengo.Connection(model.attended_item, model.vision_gabor, synapse=None) #synapse? # display attended item, only in gui if nengo_gui_on: model.display_attended = nengo.Node( display_func, size_in=model.attended_item.size_out) # to show input nengo.Connection(model.attended_item, model.display_attended, synapse=None) #print(model.vision_gabor.neurons.probeable) ### central cognition ### # concepts #model.concepts = spa.AssociativeMemory(vocab_concepts, # wta_output=True, # wta_inhibit_scale=1, #was 1 # default_output_key='NONE', #what to say if input doesn't match # threshold=0.3) # how strong does input need to be for it to recognize #nengo.Connection(model.visual_representation, model.concepts.input, transform=.8*vision_mapping) #not too fast to concepts, might have to be increased to have model react faster to first word. #concepts accumulator #model.concepts_evidence = spa.State(1, feedback=1, feedback_synapse=0.03) #the lower the synapse, the faster it accumulates (was .1) #concepts_evidence_scale = 2.5 #nengo.Connection(model.concepts.am.elem_output, model.concepts_evidence.input, # transform=concepts_evidence_scale * np.ones((1, model.concepts.am.elem_output.size_out)),synapse=0.005) #reset if concepts is NONE (default) #nengo.Connection(model.concepts.am.ensembles[-1], model.concepts_evidence.all_ensembles[0].neurons, # transform=np.ones((model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -40, # was -10 # synapse=0.005) #lower synapse gives shorter impact of reset - makes the reaction a little slower # pair representation #model.vis_pair = spa.State(D, vocab=vocab_concepts, feedback=1.4) #was 2, 1.6 works ok, but everything gets activated. #model.dm_learned_words = spa.AssociativeMemory(vocab_learned_words,default_output_key='NONE',threshold=.3) #familiarity should be continuous over all items, so no wta #nengo.Connection(model.dm_learned_words.output,model.dm_learned_words.input,transform=.4,synapse=.01) # this stores the accumulated evidence for or against familiarity #model.familiarity = spa.State(1, feedback=1, feedback_synapse=0.1) #fb syn influences speed of acc #familiarity_scale = 0.2 #nengo.Connection(model.dm_learned_words.am.ensembles[-1], model.familiarity.input, transform=-familiarity_scale) #accumulate to -1 #nengo.Connection(model.dm_learned_words.am.elem_output, model.familiarity.input, #am.element_output == all outputs, we sum # transform=familiarity_scale * np.ones((1, model.dm_learned_words.am.elem_output.size_out))) #accumulate to 1 #model.do_fam = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) #nengo.Connection(model.do_fam.am.ensembles[-1], model.familiarity.all_ensembles[0].neurons, # transform=np.ones((model.familiarity.all_ensembles[0].n_neurons, 1)) * -10, # synapse=0.005) #fam model.dm_pairs = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs,wta_output=True) #fam nengo.Connection(model.dm_pairs.output,model.dm_pairs.input,transform=.5) #this works: #fam model.representation = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs, wta_output=True) #fam nengo.Connection(model.representation.output, model.representation.input, transform=2) #fam model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified #fam nengo.Connection(model.representation.am.elem_output,model.rep_filled.input, #am.element_output == all outputs, we sum #fam transform=.8*np.ones((1,model.representation.am.elem_output.size_out)),synapse=0) #this doesn't: #model.representation = spa.State(D,feedback=1) #model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified #nengo.Connection(model.representation.output,model.rep_filled.input, #am.element_output == all outputs, we sum # transform=.8*np.ones((1,model.representation.output.size_out)),synapse=0) # this shouldn't really be fixed I think #fam model.comparison = spa.Compare(D, vocab=vocab_concepts) #motor #motor model.motor_net = nengo.Network() #motor with model.motor_net: #motor #input multiplier #motor model.motor_input = spa.State(Dmid,vocab=vocab_motor) #motor #higher motor area (SMA?) #motor model.motor = spa.State(Dmid, vocab=vocab_motor,feedback=.7) #motor #connect input multiplier with higher motor area #motor nengo.Connection(model.motor_input.output,model.motor.input,synapse=.1,transform=2) #motor #finger area #motor model.fingers = spa.AssociativeMemory(vocab_fingers, input_keys=['L1', 'L2', 'R1', 'R2'], wta_output=True) #motor #conncetion between higher order area (hand, finger), to lower area #motor nengo.Connection(model.motor.output, model.fingers.input, transform=.2*motor_mapping) #motor #finger position (spinal?) #motor model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50, n_ensembles=4) #motor nengo.Connection(model.finger_pos.output, model.finger_pos.input, synapse=0.1, transform=0.3) #feedback #motor #connection between finger area and finger position #motor nengo.Connection(model.fingers.am.elem_output, model.finger_pos.input, transform=1.5*np.diag([0.55, .54, .56, .55])) #fix these #model.bg = spa.BasalGanglia( # spa.Actions( # a_attend_item1 = 'dot(goal,DO_TASK) - dot(attend,ITEM1) --> goal=RECOG, attend=ITEM1', # b_attending_item1 = 'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .2 --> goal=RECOG, attend=ITEM1, vis_pair=2*attend*concepts+2*concepts', #, dm_learned_words=vis_pair', #c_attend_item2 = 'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.8 --> goal=RECOG, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair', #d_attending_item2 = 'dot(goal,RECOG) + dot(attend,ITEM2) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair', #e_judge_familiarity = 'dot(goal,RECOG) + dot(attend,ITEM2) + concepts_evidence - 2.1 --> goal=FAMILIARITY, attend=ITEM2, vis_pair=2*attend*concepts+2*concepts, dm_learned_words=vis_pair, do_fam=GO', # fa_judge_familiarityA = 'dot(goal,FAMILIARITY) - .0 --> goal=FAMILIARITY, dm_learned_words=vis_pair, do_fam=GO', #motor g_respond_unfamiliar = 'dot(goal,FAMILIARITY+RESPOND) - familiarity - .9 --> goal=RESPOND, dm_learned_words=vis_pair, do_fam=GO, motor_input=1.5*target_hand+MIDDLE', #motor h_respond_familiar = 'dot(goal,FAMILIARITY+RESPOND) + familiarity - .9 --> goal=RESPOND, dm_learned_words=vis_pair, do_fam=GO, motor_input=1.5*target_hand+INDEX,vis_pair=dm_learned_words', #fam 'dot(goal,RECOG2)+dot(attend,ITEM2)+familiarity-1.3 --> goal=RECOLLECTION,dm_pairs = 2*vis_pair, representation=3*dm_pairs',# vis_pair=ITEM2*concepts', #fam 'dot(goal,RECOLLECTION) - .5 --> goal=RECOLLECTION, representation=2*dm_pairs', #fam 'dot(goal,RECOLLECTION) + 2*rep_filled - 1.3 --> goal=COMPARE_ITEM1, attend=ITEM1, comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', #fam 'dot(goal,COMPARE_ITEM1) + rep_filled + comparison -1 --> goal=COMPARE_ITEM2, attend=ITEM2, comparison_A = 2*vis_pair',#comparison_B = 2*representation*~attend', #fam 'dot(goal,COMPARE_ITEM1) + rep_filled + (1-comparison) -1 --> goal=RESPOND,motor_input=1.0*target_hand+MIDDLE',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', #fam 'dot(goal,COMPARE_ITEM2) + rep_filled + comparison - 1 --> goal=RESPOND,motor_input=1.0*target_hand+INDEX',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', #fam 'dot(goal,COMPARE_ITEM2) + rep_filled + (1-comparison) -1 --> goal=RESPOND,motor_input=1.0*target_hand+MIDDLE',#comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', #fam 'dot(goal,RESPOND) + comparison - 1 --> goal=RESPOND, motor_input=1.0*target_hand+INDEX', #comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', #fam 'dot(goal,RESPOND) + (1-comparison) - 1 --> goal=RESPOND, motor_input=1.0*target_hand+MIDDLE', #comparison_A = 2*vis_pair,comparison_B = 2*representation*~attend', # 'dot(goal,RECOLLECTION) + (1 - dot(representation,vis_pair)) - 1.3 --> goal=RESPOND, motor_input=1.0*target_hand+MIDDLE', #motor x_response_done = 'dot(goal,RESPOND) + dot(motor,MIDDLE+INDEX) - .5 --> goal=END', #motor y_end = 'dot(goal,END)-1 --> goal=END', #z_threshold = '.1 -->' #possible to match complete buffer, ie is representation filled? # motor_input=1.5*target_hand+MIDDLE, # )) #'dot(attention, W1) - evidence - 0.8 --> motor=NO, attention=W1', #'dot(attention, W1) + evidence - 0.8 --> attention=W2, reset=EVIDENCE', #'dot(attention, W1) --> attention=W1', # if we don't set attention it goes back to 0 #'dot(attention, W2) - evidence - 0.8 --> motor=NO, attention=W2', #'dot(attention, W2) + evidence - 0.8 --> motor=YES, attention=W2', #'dot(attention, W2) --> attention=W2', # option might be feedback on attention, then no rule 3/6 but default rule #model.thalamus = spa.Thalamus(model.bg) #model.cortical = spa.Cortical( # cortical connection: shorthand for doing everything with states and connections # spa.Actions( # 'motor_input = .04*target_hand', #'dm_learned_words = .8*concepts', #.5 #'dm_pairs = 2*stimulus' #'vis_pair = 2*attend*concepts+concepts', #fam 'comparison_A = 2*vis_pair', #fam 'comparison_B = 2*representation*~attend', # )) #probes #model.pr_goal = nengo.Probe(model.goal.output,synapse=.01) #motor model.pr_motor_pos = nengo.Probe(model.finger_pos.output,synapse=.01) #raw vector (dimensions x time) #motor model.pr_motor = nengo.Probe(model.fingers.output,synapse=.01) #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01) #model.pr_target = nengo.Probe(model.target_hand.output, synapse=.01) #model.pr_attend = nengo.Probe(model.attend.output, synapse=.01) model.pr_vision_gabor = nengo.Probe(model.vision_gabor.neurons) # ,synapse=.01) #do we need synapse, or should we do something with the spikes #model.pr_familiarity = nengo.Probe(model.dm_learned_words.am.elem_output,synapse=.01) #element output, don't include default #multiply spikes with the connection weights #input model.input = spa.Input(goal=lambda t: 'DO_TASK' if t < 0.02 else '0', )
images[k, :, i[k]:i[k] + shape[1], j[k]:j[k] + shape[2]] for k in range(per_batch) ]) yield cropped.reshape((per_batch, n_vis)), targets # --- set up network parameters method = 'gabor' print("Encoders (n_hid = %d, method=%r)" % (n_hid, method)) if method == 'full': encoders = rng.normal(size=(n_hid, ) + shape).reshape(n_hid, -1) elif method == 'mask': encoders = Mask(shape).populate(rng.normal(size=(n_hid, c, 9, 9)), rng=rng, flatten=True) elif method == 'gabor': gabors = Gabor().generate(n_hid, (13, 13), rng=rng) colors = nengo.dists.UniformHypersphere(surface=True).sample(n_hid, c, rng=rng) gabors = gabors[:, None, :, :] * colors[:, :, None, None] encoders = Mask(shape).populate(gabors, rng=rng, flatten=True) else: raise ValueError(method) encoded = np.dot(batches().next()[0], encoders.T) neuron_type = nengo.LIFRate() intercepts = np.percentile(encoded, 50, axis=0)
def generate_gabors(): global e_cued global U_cued global compressed_im_cued global e_uncued global U_uncued global compressed_im_uncued #to speed things up, load previously generated ones if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_cued.npz'): gabors_svd_cued = np.load( 'Stimuli/gabors_svd_cued.npz') #load stims if previously generated e_cued = gabors_svd_cued['e_cued'] U_cued = gabors_svd_cued['U_cued'] compressed_im_cued = gabors_svd_cued['compressed_im_cued'] print("SVD cued loaded") else: #or generate and save #cued module #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size gabors_cued = Gabor().generate(Ns, (col / 3, row / 3)) #put gabors on image and make them the same shape as the stimuli gabors_cued = Mask((col, row)).populate(gabors_cued, flatten=True).reshape(Ns, -1) #normalize gabors_cued = gabors_cued / abs( max(np.amax(gabors_cued), abs(np.amin(gabors_cued)))) #gabors are added to imagearr for SVD x_cued = np.vstack((imagearr, gabors_cued)) #SVD print("SVD cued started...") U_cued, S_cued, V_cued = np.linalg.svd(x_cued.T) print("SVD cued done") #Use result of SVD to create encoders e_cued = np.dot(gabors_cued, U_cued[:, :D]) #encoders compressed_im_cued = np.dot( imagearr[:1800, :] / 100, U_cued[:, :D]) #D-dimensional vector reps of the images compressed_im_cued = np.vstack( (compressed_im_cued, np.dot(imagearr[-1, :] / 50, U_cued[:, :D]))) np.savez('Stimuli/gabors_svd_cued.npz', e_cued=e_cued, U_cued=U_cued, compressed_im_cued=compressed_im_cued) #same for uncued module if uncued: if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_uncued.npz'): gabors_svd_uncued = np.load('Stimuli/gabors_svd_uncued.npz' ) #load stims if previously generated e_uncued = gabors_svd_uncued['e_uncued'] U_uncued = gabors_svd_uncued['U_uncued'] compressed_im_uncued = gabors_svd_uncued['compressed_im_uncued'] print("SVD uncued loaded") else: gabors_uncued = Gabor().generate( Ns, (col / 3, row / 3)) #.reshape(N, -1) gabors_uncued = Mask( (col, row)).populate(gabors_uncued, flatten=True).reshape(Ns, -1) gabors_uncued = gabors_uncued / abs( max(np.amax(gabors_uncued), abs(np.amin(gabors_uncued)))) x_uncued = np.vstack((imagearr, gabors_uncued)) print("SVD uncued started...") U_uncued, S_uncued, V_uncued = np.linalg.svd(x_uncued.T) print("SVD uncued done") e_uncued = np.dot(gabors_uncued, U_uncued[:, :D]) compressed_im_uncued = np.dot(imagearr[:1800, :] / 100, U_uncued[:, :D]) compressed_im_uncued = np.vstack((compressed_im_uncued, np.dot(imagearr[-1, :] / 50, U_uncued[:, :D]))) np.savez('Stimuli/gabors_svd_uncued.npz', e_uncued=e_uncued, U_uncued=U_uncued, compressed_im_uncued=compressed_im_uncued)
def initialize_vocabs(): #global encoders global train_targets global vocab_concepts global vocab_goal global vocab_motor global vision_mapping global vocab_items global vocab_fingers global motor_mapping if extended_visual: #low level vision vocab_vision = nengo.spa.Vocabulary(Dmid, max_similarity=.5) for name in y_train_words: vocab_vision.parse(name) train_targets = vocab_vision.vectors #word concepts - should have all concepts, including new foils vocab_concepts = spa.Vocabulary(D, max_similarity=0.2) if extended_visual: for i in y_train_words: vocab_concepts.parse(i) else: for i in items: if i not in vocab_concepts.keys: vocab_concepts.parse(i) #vision-concept mapping if extended_visual: vision_mapping = np.zeros((D, Dmid)) for word in y_train_words: vision_mapping += np.outer( vocab_vision.parse(word).v, vocab_concepts.parse(word).v).T #experimental items vocab_items = spa.Vocabulary(D, max_similarity=.2) for item1, item2 in pairs: vocab_items.parse(item1) vocab_items.parse(item2) print(vocab_concepts.keys) #experimental pairs vocab_pairs = spa.Vocabulary(D, max_similarity=.2) list_of_pairs = [] for item1, item2 in pairs: vocab_pairs.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2)) vocab_pairs.add( '%s_%s' % (item1, item2), vocab_pairs.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2))) vocab_concepts.add( '%s_%s' % (item1, item2), vocab_concepts.parse('%s*ITEM1 + %s*ITEM2' % (item1, item2))) list_of_pairs.append('%s_%s' % (item1, item2)) #motor vocab, just for sim calcs vocab_motor = spa.Vocabulary( Dmid) #different dimension to be sure, upper motor hierarchy vocab_motor.parse('LEFT+RIGHT+INDEX+MIDDLE') vocab_fingers = spa.Vocabulary(Dlow) #direct finger activation vocab_fingers.parse('L1+L2+R1+R2') #map higher and lower motor motor_mapping = np.zeros((Dlow, Dmid)) motor_mapping += np.outer( vocab_motor.parse('LEFT+INDEX').v, vocab_fingers.parse('L1').v).T motor_mapping += np.outer( vocab_motor.parse('LEFT+MIDDLE').v, vocab_fingers.parse('L2').v).T motor_mapping += np.outer( vocab_motor.parse('RIGHT+INDEX').v, vocab_fingers.parse('R1').v).T motor_mapping += np.outer( vocab_motor.parse('RIGHT+MIDDLE').v, vocab_fingers.parse('R2').v).T #mapping *= 0.5 #goal vocab vocab_goal = spa.Vocabulary(Dlow) vocab_goal.parse('DO_TASK') vocab_goal.parse('RECOG') vocab_goal.parse('RESPOND') vocab_goal.parse('END') #attend vocab vocab_attend = spa.Vocabulary(D, max_similarity=.2) vocab_attend.parse('ITEM1') vocab_attend.parse('ITEM2') # --- set up network parameters if extended_visual: global n_vis global n_out global n_hid n_vis = X_train.shape[1] #nr of pixels, dimensions of network n_out = train_targets.shape[1] #nr of items n_hid = 1000 # nr of gabor encoders/neurons - recommendations?, one neuron per encoder if extended_visual: # random state to start rng = np.random.RandomState(9) global encoders encoders = Gabor().generate( n_hid, (11, 11), rng=rng) # gabor encoders, work better, 11,11 apparently, why? encoders = Mask((14, 90)).populate( encoders, rng=rng, flatten=True ) # use them on part of the image (28x28 = input image)
# Generate the MNIST training and test data train_targets = one_hot(y_train, 10) test_targets = one_hot(y_test, 10) # Set up the vision network parameters n_vis = x_train.shape[1] # Number of training samples n_out = train_targets.shape[1] # Number of output classes n_hid = 16000 // (im_size**2) # Number of neurons to use # Note: the number of neurons to use is limited such that NxD <= 16000, # where D = im_size * im_size, and N is the number of neurons to use gabor_size = (int(im_size / 2.5), int(im_size / 2.5)) # Size of the gabor filt # Generate the encoders for the neural ensemble encoders = Gabor().generate(n_hid, gabor_size, rng=rng) encoders = Mask((im_size, im_size)).populate(encoders, rng=rng, flatten=True) # Ensemble parameters max_firing_rates = 100 ens_neuron_type = nengo.neurons.RectifiedLinear() ens_intercepts = nengo.dists.Choice([-0.5]) ens_max_rates = nengo.dists.Choice([max_firing_rates]) # Output connection parameters conn_synapse = None conn_eval_points = x_train conn_function = train_targets conn_solver = nengo.solvers.LstsqL2(reg=0.01) # Visual input process parameters presentation_time = 0.25
def create_model(): #print trial_info print('---- INTIALIZING MODEL ----') global model model = spa.SPA() with model: #display current stimulus pair (not part of model) if nengo_gui_on and True: model.pair_input = nengo.Node(present_pair) model.pair_display = nengo.Node( display_func, size_in=model.pair_input.size_out) # to show input nengo.Connection(model.pair_input, model.pair_display, synapse=None) # control model.control_net = nengo.Network() with model.control_net: #assuming the model knows which hand to use (which was blocked) model.hand_input = nengo.Node(get_hand) model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1) nengo.Connection(model.hand_input, model.target_hand.input, synapse=None) model.attend = spa.State(D, vocab=vocab_attend, feedback=.5) # vocab_attend model.goal = spa.State(Dlow, vocab=vocab_goal, feedback=.7) # current goal ### vision ### # set up network parameters n_vis = X_train.shape[1] # nr of pixels, dimensions of network n_hid = 1000 # nr of gabor encoders/neurons # random state to start rng = np.random.RandomState(9) encoders = Gabor().generate( n_hid, (4, 4), rng=rng) # gabor encoders, 11x11 apparently, why? encoders = Mask( (14, 90)).populate(encoders, rng=rng, flatten=True) # use them on part of the image model.visual_net = nengo.Network() with model.visual_net: #represent currently attended item model.attended_item = nengo.Node(present_item2, size_in=D) nengo.Connection(model.attend.output, model.attended_item) model.vision_gabor = nengo.Ensemble( n_hid, n_vis, eval_points=X_train, # neuron_type=nengo.LIF(), neuron_type=nengo.AdaptiveLIF( tau_n=.01, inc_n=.05 ), #to get a better fit, use more realistic neurons that adapt to input intercepts=nengo.dists.Uniform(-0.1, 0.1), #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening #max_rates=nengo.dists.Choice([100]), encoders=encoders) #recurrent connection (time constant 500 ms) # strength = 1 - (100/500) = .8 zeros = np.zeros_like(X_train) nengo.Connection( model.vision_gabor, model.vision_gabor, synapse=0.005, #.1 eval_points=np.vstack( [X_train, zeros, np.random.randn(*X_train.shape)]), transform=.5) model.visual_representation = nengo.Ensemble(n_hid, dimensions=Dmid) model.visconn = nengo.Connection( model.vision_gabor, model.visual_representation, synapse=0.005, #was .005 eval_points=X_train, function=train_targets, solver=nengo.solvers.LstsqL2(reg=0.01)) nengo.Connection(model.attended_item, model.vision_gabor, synapse=.02) #.03) #synapse? # display attended item, only in gui if nengo_gui_on: # show what's being looked at model.display_attended = nengo.Node( display_func, size_in=model.attended_item.size_out) # to show input nengo.Connection(model.attended_item, model.display_attended, synapse=None) #add node to plot total visual activity model.visual_activation = nengo.Node(None, size_in=1) nengo.Connection(model.vision_gabor.neurons, model.visual_activation, transform=np.ones((1, n_hid)), synapse=None) ### central cognition ### ##### Concepts ##### model.concepts = spa.AssociativeMemory( vocab_all_words, #vocab_concepts, wta_output=True, wta_inhibit_scale=1, #was 1 #default_output_key='NONE', #what to say if input doesn't match threshold=0.3 ) # how strong does input need to be for it to recognize nengo.Connection( model.visual_representation, model.concepts.input, transform=.8 * vision_mapping ) #not too fast to concepts, might have to be increased to have model react faster to first word. #concepts accumulator model.concepts_evidence = spa.State( 1, feedback=1, feedback_synapse=0.005 ) #the lower the synapse, the faster it accumulates (was .1) concepts_evidence_scale = 2.5 nengo.Connection(model.concepts.am.elem_output, model.concepts_evidence.input, transform=concepts_evidence_scale * np.ones( (1, model.concepts.am.elem_output.size_out)), synapse=0.005) #concepts switch model.do_concepts = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) nengo.Connection( model.do_concepts.am.ensembles[-1], model.concepts_evidence.all_ensembles[0].neurons, transform=np.ones( (model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) ###### Visual Representation ###### model.vis_pair = spa.State( D, vocab=vocab_all_words, feedback=1.0, feedback_synapse=.05 ) #was 2, 1.6 works ok, but everything gets activated. ##### Familiarity ##### # Assoc Mem with Learned Words # - familiarity signal should be continuous over all items, so no wta model.dm_learned_words = spa.AssociativeMemory(vocab_learned_words, threshold=.2) nengo.Connection(model.dm_learned_words.output, model.dm_learned_words.input, transform=.4, synapse=.02) # Familiarity Accumulator model.familiarity = spa.State( 1, feedback=.9, feedback_synapse=0.1) #fb syn influences speed of acc #familiarity_scale = 0.2 #keep stable for negative fam # familiarity accumulator switch model.do_fam = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) # reset nengo.Connection( model.do_fam.am.ensembles[-1], model.familiarity.all_ensembles[0].neurons, transform=np.ones( (model.familiarity.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) #first a sum to represent summed similarity model.summed_similarity = nengo.Ensemble(n_neurons=100, dimensions=1) nengo.Connection( model.dm_learned_words.am.elem_output, model.summed_similarity, transform=np.ones( (1, model.dm_learned_words.am.elem_output.size_out))) #take sum #then a connection to accumulate this summed sim def familiarity_acc_transform(summed_sim): fam_scale = .5 fam_threshold = 0 #actually, kind of bias fam_max = 1 return fam_scale * (2 * ((summed_sim - fam_threshold) / (fam_max - fam_threshold)) - 1) nengo.Connection(model.summed_similarity, model.familiarity.input, function=familiarity_acc_transform) ##### Recollection & Representation ##### model.dm_pairs = spa.AssociativeMemory( vocab_learned_pairs, wta_output=True) #input_keys=list_of_pairs nengo.Connection(model.dm_pairs.output, model.dm_pairs.input, transform=.5, synapse=.05) #representation rep_scale = 0.5 model.representation = spa.State(D, vocab=vocab_all_words, feedback=1.0) model.rep_filled = spa.State( 1, feedback=.9, feedback_synapse=.1) #fb syn influences speed of acc model.do_rep = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) nengo.Connection( model.do_rep.am.ensembles[-1], model.rep_filled.all_ensembles[0].neurons, transform=np.ones( (model.rep_filled.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) nengo.Connection(model.representation.output, model.rep_filled.input, transform=rep_scale * np.reshape(sum(vocab_learned_pairs.vectors), ((1, D)))) ###### Comparison ##### model.comparison = spa.Compare(D, vocab=vocab_all_words, neurons_per_multiply=500, input_magnitude=.3) #turns out comparison is not an accumulator - we also need one of those. model.comparison_accumulator = spa.State( 1, feedback=.9, feedback_synapse=0.05) #fb syn influences speed of acc model.do_compare = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) #reset nengo.Connection( model.do_compare.am.ensembles[-1], model.comparison_accumulator.all_ensembles[0].neurons, transform=np.ones( (model.comparison_accumulator.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) #error because we apply a function to a 'passthrough' node, inbetween ensemble as a solution: model.comparison_result = nengo.Ensemble(n_neurons=100, dimensions=1) nengo.Connection(model.comparison.output, model.comparison_result) def comparison_acc_transform(comparison): comparison_scale = .6 comparison_threshold = 0 #actually, kind of bias comparison_max = .6 return comparison_scale * (2 * ( (comparison - comparison_threshold) / (comparison_max - comparison_threshold)) - 1) nengo.Connection(model.comparison_result, model.comparison_accumulator.input, function=comparison_acc_transform) #motor model.motor_net = nengo.Network() with model.motor_net: #input multiplier model.motor_input = spa.State(Dmid, vocab=vocab_motor) #higher motor area (SMA?) model.motor = spa.State(Dmid, vocab=vocab_motor, feedback=.7) #connect input multiplier with higher motor area nengo.Connection(model.motor_input.output, model.motor.input, synapse=.1, transform=2) #finger area model.fingers = spa.AssociativeMemory( vocab_fingers, input_keys=['L1', 'L2', 'R1', 'R2'], wta_output=True) nengo.Connection(model.fingers.output, model.fingers.input, synapse=0.1, transform=0.3) #feedback #conncetion between higher order area (hand, finger), to lower area nengo.Connection(model.motor.output, model.fingers.input, transform=.25 * motor_mapping) #was .2 #finger position (spinal?) model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50, n_ensembles=4) nengo.Connection(model.finger_pos.output, model.finger_pos.input, synapse=0.1, transform=0.8) #feedback #connection between finger area and finger position nengo.Connection(model.fingers.am.elem_output, model.finger_pos.input, transform=1.0 * np.diag([0.55, .54, .56, .55])) #fix these model.bg = spa.BasalGanglia( spa.Actions( #wait & start a_aa_wait='dot(goal,WAIT) - .9 --> goal=0', a_attend_item1= 'dot(goal,DO_TASK) - .0 --> goal=RECOG, attend=ITEM1, do_concepts=GO', #attend words b_attending_item1= 'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM1, do_concepts=GO', # vis_pair=2.5*(ITEM1*concepts)', c_attend_item2= 'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.6 --> goal=RECOG2, attend=ITEM2, vis_pair=3*(ITEM1*concepts)', d_attending_item2= 'dot(goal,RECOG2+RECOG) + dot(attend,ITEM2) - concepts_evidence - .4 --> goal=RECOG2, attend=ITEM2, do_concepts=GO, dm_learned_words=1.0*(~ITEM1*vis_pair)', #vis_pair=1.2*(ITEM2*concepts) e_start_familiarity= 'dot(goal,RECOG2) + dot(attend,ITEM2) + concepts_evidence - 1.8 --> goal=FAMILIARITY, do_fam=GO, vis_pair=1.9*(ITEM2*concepts), dm_learned_words=2.0*(~ITEM1*vis_pair+~ITEM2*vis_pair)', #judge familiarity f_accumulate_familiarity= '1.1*dot(goal,FAMILIARITY) - 0.2 --> goal=FAMILIARITY-RECOG2, do_fam=GO, dm_learned_words=.8*(~ITEM1*vis_pair+~ITEM2*vis_pair)', g_respond_unfamiliar= 'dot(goal,FAMILIARITY) - familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND_MISMATCH-FAMILIARITY, do_fam=GO, motor_input=1.6*(target_hand+MIDDLE)', #g2_respond_familiar = 'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+INDEX)', #recollection & representation h_recollection= 'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RECOLLECTION-FAMILIARITY, dm_pairs = vis_pair', i_representation= 'dot(goal,RECOLLECTION) - rep_filled - .1 --> goal=RECOLLECTION, dm_pairs = vis_pair, representation=3*dm_pairs, do_rep=GO', #comparison & respond j_10_compare_word1= 'dot(goal,RECOLLECTION+1.4*COMPARE_ITEM1) + rep_filled - .9 --> goal=COMPARE_ITEM1-RECOLLECTION, do_rep=GO, do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation', k_11_match_word1= 'dot(goal,COMPARE_ITEM1) + comparison_accumulator - .7 --> goal=COMPARE_ITEM2-COMPARE_ITEM1, do_rep=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation', l_12_mismatch_word1= 'dot(goal,COMPARE_ITEM1) + .4 * dot(goal,RESPOND_MISMATCH) - comparison_accumulator - .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM1, do_rep=GO, motor_input=1.6*(target_hand+MIDDLE), do_compare=GO, comparison_A = ~ITEM1*vis_pair, comparison_B = ~ITEM1*representation', compare_word2= 'dot(goal,COMPARE_ITEM2) - .5 --> goal=COMPARE_ITEM2, do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation', m_match_word2= 'dot(goal,COMPARE_ITEM2) + comparison_accumulator - .7 --> goal=RESPOND_MATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+INDEX), do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation', n_mismatch_word2= 'dot(goal,COMPARE_ITEM2) - comparison_accumulator - dot(fingers,L1+L2+R1+R2)- .7 --> goal=RESPOND_MISMATCH-COMPARE_ITEM2, motor_input=1.6*(target_hand+MIDDLE),do_compare=GO, comparison_A = ~ITEM2*vis_pair, comparison_B = ~ITEM2*representation', #respond o_respond_match= 'dot(goal,RESPOND_MATCH) - .1 --> goal=RESPOND_MATCH, motor_input=1.6*(target_hand+INDEX)', p_respond_mismatch= 'dot(goal,RESPOND_MISMATCH) - .1 --> goal=RESPOND_MISMATCH, motor_input=1.6*(target_hand+MIDDLE)', #finish x_response_done= 'dot(goal,RESPOND_MATCH) + dot(goal,RESPOND_MISMATCH) + 2*dot(fingers,L1+L2+R1+R2) - .7 --> goal=2*END', y_end= 'dot(goal,END)-.1 --> goal=END-RESPOND_MATCH-RESPOND_MISMATCH', z_threshold='.05 --> goal=0' #possible to match complete buffer, ie is representation filled? # motor_input=1.5*target_hand+MIDDLE, )) print(model.bg.actions.count) #print(model.bg.dimensions) model.thalamus = spa.Thalamus(model.bg) model.cortical = spa.Cortical( # cortical connection: shorthand for doing everything with states and connections spa.Actions( # 'motor_input = .04*target_hand', # 'dm_learned_words = .1*vis_pair', #'dm_pairs = 2*stimulus' #'vis_pair = 2*attend*concepts+concepts', #fam 'comparison_A = 2*vis_pair', #fam 'comparison_B = 2*representation*~attend', )) #probes model.pr_motor_pos = nengo.Probe( model.finger_pos.output, synapse=.01) #raw vector (dimensions x time) model.pr_motor = nengo.Probe(model.fingers.output, synapse=.01) #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01) if not nengo_gui_on: model.pr_vision_gabor = nengo.Probe( model.vision_gabor.neurons, synapse=.005 ) #do we need synapse, or should we do something with the spikes model.pr_familiarity = nengo.Probe( model.dm_learned_words.am.elem_output, synapse=.01) #element output, don't include default model.pr_concepts = nengo.Probe( model.concepts.am.elem_output, synapse=.01) # element output, don't include default #multiply spikes with the connection weights #input model.input = spa.Input(goal=goal_func) #print(sum(ens.n_neurons for ens in model.all_ensembles)) #return model #to show select BG rules # get names rules if nengo_gui_on: vocab_actions = spa.Vocabulary(model.bg.output.size_out) for i, action in enumerate(model.bg.actions.actions): vocab_actions.add(action.name.upper(), np.eye(model.bg.output.size_out)[i]) model.actions = spa.State(model.bg.output.size_out, subdimensions=model.bg.output.size_out, vocab=vocab_actions) nengo.Connection(model.thalamus.output, model.actions.input) for net in model.networks: if net.label is not None and net.label.startswith('channel'): net.label = ''
train_targets = one_hot_from_labels(train_labels, classes=10) test_targets = one_hot_from_labels(test_labels, classes=10) assert train_images.shape[1] == n_in assert train_targets.shape[1] == n_out # --- network neuron_type = nengo.LIF() n_hids = [1000, 1000] print("Encoders") max_rates0 = 100 * np.ones(n_hids[0]) intercepts0 = 0.1 * np.ones(n_hids[0]) gain0, bias0 = neuron_type.gain_bias(max_rates0, intercepts0) encoders0 = Mask(s_in).populate(Gabor().generate(n_hids[0], (11, 11), rng=rng), rng=rng, flatten=True) h0 = neuron_type.rates(np.dot(train_images, encoders0.T), gain0, bias0) max_rates1 = 100 * np.ones(n_hids[1]) intercepts1 = 0.1 * np.ones(n_hids[1]) gain1, bias1 = neuron_type.gain_bias(max_rates1, intercepts1) encoders1 = ciw_encoders(n_hids[1], h0, train_labels, rng=rng) # encoders1 *= Mask(s_in).generate( # n_hid, rf_shape, rng=rng, flatten=True) encoders1 /= npext.norm(encoders1, axis=1, keepdims=True) h1 = neuron_type.rates(np.dot(h0, encoders1.T), gain1, bias1) print("Solving") solver = hunse_thesis.solvers.LstsqClassifier(reg=0.01) decoders, solver_info = solver(h1, train_targets, rng=rng)
model = nengo.Network() with model: sensor = nengo.Node(env_iface.sensor) sensor_net = nengo.Ensemble(n_neurons=n_input, dimensions=np.prod(env_iface.state_dim), radius=sensor_radius) gabor_size = (5, 5) # Size of the gabor filter # Generate the encoders for the sensory ensemble sensor_encoders = Gabor().generate(n_input, gabor_size, rng=rng) sensor_encoders = Mask(image_shape).populate(sensor_encoders, rng=rng, flatten=True) sensor_net.encoders = sensor_encoders srf_net = PRF(n_excitatory=n_input, n_inhibitory=n_inhibitory, connect_exc_inh_input=True, n_outputs=n_place, dimensions=env_iface.n_actions, label="Spatial receptive field network", seed=seed, **srf_params) actor_net = nengo.Ensemble(n_neurons=n_actor, dimensions=env_iface.n_actions, radius=actor_radius)
def generate_gabors(load_gabors_svd=False, Ns=None, D=None): # global e_first # global U_first # global compressed_im_first # global e_second # global U_second # global compressed_im_second #to speed things up, load previously generated ones if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_first_exp2.npz'): gabors_svd_first = np.load('Stimuli/gabors_svd_first_exp2.npz' ) #load stims if previously generated e_first = gabors_svd_first['e_first'] U_first = gabors_svd_first['U_first'] compressed_im_first = gabors_svd_first['compressed_im_first'] print("SVD first loaded") else: #or generate and save #cued module #for each neuron in the sensory layer, generate a Gabor of 1/3 of the image size gabors_first = Gabor().generate(Ns, (int(col / 3), int(row / 3))) #put gabors on image and make them the same shape as the stimuli gabors_first = Mask((col, row)).populate(gabors_first, flatten=True).reshape(Ns, -1) #normalize gabors_first = gabors_first / abs( max(np.amax(gabors_first), abs(np.amin(gabors_first)))) #gabors are added to imagearr for SVD x_first = np.vstack((imagearr, gabors_first)) #SVD print("SVD first started...") U_first, S_first, V_first = np.linalg.svd(x_first.T) print("SVD first done") #Use result of SVD to create encoders e_first = np.dot(gabors_first, U_first[:, :D]) #encoders compressed_im_first = np.dot( imagearr[:1800, :] / 100, U_first[:, :D]) #D-dimensional vector reps of the images compressed_im_first = np.vstack( (compressed_im_first, np.dot(imagearr[-1, :] / 50, U_first[:, :D]))) np.savez('Stimuli/gabors_svd_first_exp2.npz', e_first=e_first, U_first=U_first, compressed_im_first=compressed_im_first) #same for secondary module if load_gabors_svd & os.path.isfile('Stimuli/gabors_svd_second_exp2.npz'): gabors_svd_second = np.load('Stimuli/gabors_svd_second_exp2.npz' ) #load stims if previously generated e_second = gabors_svd_second['e_second'] U_second = gabors_svd_second['U_second'] compressed_im_second = gabors_svd_second['compressed_im_second'] print("SVD second loaded") else: gabors_second = Gabor().generate( Ns, (int(col / 3), int(row / 3))) #.reshape(N, -1) gabors_second = Mask( (col, row)).populate(gabors_second, flatten=True).reshape(Ns, -1) gabors_second = gabors_second / abs( max(np.amax(gabors_second), abs(np.amin(gabors_second)))) x_second = np.vstack((imagearr, gabors_second)) print("SVD second started...") U_second, S_second, V_second = np.linalg.svd(x_second.T) print("SVD second done") e_second = np.dot(gabors_second, U_second[:, :D]) compressed_im_second = np.dot(imagearr[:1800, :] / 100, U_second[:, :D]) compressed_im_second = np.vstack( (compressed_im_second, np.dot(imagearr[-1, :] / 50, U_second[:, :D]))) np.savez('Stimuli/gabors_svd_second_exp2.npz', e_second=e_second, U_second=U_second, compressed_im_second=compressed_im_second) return e_first, U_first, compressed_im_first, e_second, U_second, compressed_im_second
def create_model(): #print trial_info print('---- INTIALIZING MODEL ----') global model model = spa.SPA() with model: #display current stimulus pair (not part of model) if nengo_gui_on and True: model.pair_input = nengo.Node(present_pair) model.pair_display = nengo.Node( display_func, size_in=model.pair_input.size_out) # to show input nengo.Connection(model.pair_input, model.pair_display, synapse=None) # control model.control_net = nengo.Network() with model.control_net: #assuming the model knows which hand to use (which was blocked) model.hand_input = nengo.Node(get_hand) model.target_hand = spa.State(Dmid, vocab=vocab_motor, feedback=1) nengo.Connection(model.hand_input, model.target_hand.input, synapse=None) model.attend = spa.State(D, vocab=vocab_attend, feedback=.5) # vocab_attend model.goal = spa.State(Dlow, vocab=vocab_goal, feedback=.7) # current goal ### vision ### # set up network parameters n_vis = X_train.shape[1] # nr of pixels, dimensions of network n_hid = 1000 # nr of gabor encoders/neurons # random state to start rng = np.random.RandomState(9) encoders = Gabor().generate( n_hid, (4, 4), rng=rng) # gabor encoders, 11x11 apparently, why? encoders = Mask( (14, 90)).populate(encoders, rng=rng, flatten=True) # use them on part of the image model.visual_net = nengo.Network() with model.visual_net: #represent currently attended item model.attended_item = nengo.Node(present_item2, size_in=D) nengo.Connection(model.attend.output, model.attended_item) model.vision_gabor = nengo.Ensemble( n_hid, n_vis, eval_points=X_train, neuron_type=nengo.LIF(), intercepts=nengo.dists.Uniform(-0.1, 0.1), #intercepts=nengo.dists.Choice([-0.5]), #should we comment this out? not sure what's happening #max_rates=nengo.dists.Choice([100]), encoders=encoders) #recurrent connection (time constant 500 ms) # strength = 1 - (100/500) = .8 zeros = np.zeros_like(X_train) nengo.Connection( model.vision_gabor, model.vision_gabor, synapse=0.005, #.1 eval_points=np.vstack( [X_train, zeros, np.random.randn(*X_train.shape)]), transform=.5) model.visual_representation = nengo.Ensemble(n_hid, dimensions=Dmid) model.visconn = nengo.Connection( model.vision_gabor, model.visual_representation, synapse=0.005, #was .005 eval_points=X_train, function=train_targets, solver=nengo.solvers.LstsqL2(reg=0.01)) nengo.Connection(model.attended_item, model.vision_gabor, synapse=.02) #.03) #synapse? # display attended item, only in gui if nengo_gui_on: # show what's being looked at model.display_attended = nengo.Node( display_func, size_in=model.attended_item.size_out) # to show input nengo.Connection(model.attended_item, model.display_attended, synapse=None) #add node to plot total visual activity model.visual_activation = nengo.Node(None, size_in=1) nengo.Connection(model.vision_gabor.neurons, model.visual_activation, transform=np.ones((1, n_hid)), synapse=None) ### central cognition ### # concepts model.concepts = spa.AssociativeMemory( vocab_all_words, #vocab_concepts, wta_output=True, wta_inhibit_scale=1, #was 1 #default_output_key='NONE', #what to say if input doesn't match threshold=0.3 ) # how strong does input need to be for it to recognize nengo.Connection( model.visual_representation, model.concepts.input, transform=.8 * vision_mapping ) #not too fast to concepts, might have to be increased to have model react faster to first word. #concepts accumulator model.concepts_evidence = spa.State( 1, feedback=1, feedback_synapse=0.005 ) #the lower the synapse, the faster it accumulates (was .1) concepts_evidence_scale = 2.5 nengo.Connection(model.concepts.am.elem_output, model.concepts_evidence.input, transform=concepts_evidence_scale * np.ones( (1, model.concepts.am.elem_output.size_out)), synapse=0.005) #concepts switch model.do_concepts = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) nengo.Connection( model.do_concepts.am.ensembles[-1], model.concepts_evidence.all_ensembles[0].neurons, transform=np.ones( (model.concepts_evidence.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) # pair representation model.vis_pair = spa.State( D, vocab=vocab_all_words, feedback=1.0, feedback_synapse=.05 ) #was 2, 1.6 works ok, but everything gets activated. #learned words model.dm_learned_words = spa.AssociativeMemory( vocab_learned_words, threshold=.2 ) #default_output_key='NONE' familiarity should be continuous over all items, so no wta nengo.Connection(model.dm_learned_words.output, model.dm_learned_words.input, transform=.4, synapse=.02) # this stores the accumulated evidence for or against familiarity model.familiarity = spa.State( 1, feedback=.9, feedback_synapse=0.1) #fb syn influences speed of acc familiarity_scale = 0.2 #keep stable for negative fam #nengo.Connection(model.dm_learned_words.am.ensembles[-1], model.familiarity.input, transform=-(familiarity_scale+0.8)) #accumulate to -1 nengo.Connection( model.dm_learned_words.am.elem_output, model.familiarity.input, #am.element_output == all outputs, we sum transform=(familiarity_scale + .1) * np.ones( (1, model.dm_learned_words.am.elem_output.size_out)) ) #accumulate to 1 model.do_fam = spa.AssociativeMemory(vocab_reset, default_output_key='CLEAR', threshold=.2) nengo.Connection( model.do_fam.am.ensembles[-1], model.familiarity.all_ensembles[0].neurons, transform=np.ones( (model.familiarity.all_ensembles[0].n_neurons, 1)) * -10, synapse=0.005) #negative accumulator nengo.Connection( model.do_fam.am.elem_output, model.familiarity.input, transform=-familiarity_scale * np.ones( (1, model.do_fam.am.elem_output.size_out))) #accumulate to -1 #fam model.dm_pairs = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs,wta_output=True) #fam nengo.Connection(model.dm_pairs.output,model.dm_pairs.input,transform=.5) #this works: #fam model.representation = spa.AssociativeMemory(vocab_learned_pairs, input_keys=list_of_pairs, wta_output=True) #fam nengo.Connection(model.representation.output, model.representation.input, transform=2) #fam model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified #fam nengo.Connection(model.representation.am.elem_output,model.rep_filled.input, #am.element_output == all outputs, we sum #fam transform=.8*np.ones((1,model.representation.am.elem_output.size_out)),synapse=0) #this doesn't: #model.representation = spa.State(D,feedback=1) #model.rep_filled = spa.State(1,feedback_synapse=.005) #no fb syn specified #nengo.Connection(model.representation.output,model.rep_filled.input, #am.element_output == all outputs, we sum # transform=.8*np.ones((1,model.representation.output.size_out)),synapse=0) # this shouldn't really be fixed I think #fam model.comparison = spa.Compare(D, vocab=vocab_concepts) #motor model.motor_net = nengo.Network() with model.motor_net: #input multiplier model.motor_input = spa.State(Dmid, vocab=vocab_motor) #higher motor area (SMA?) model.motor = spa.State(Dmid, vocab=vocab_motor, feedback=.7) #connect input multiplier with higher motor area nengo.Connection(model.motor_input.output, model.motor.input, synapse=.1, transform=2) #finger area model.fingers = spa.AssociativeMemory( vocab_fingers, input_keys=['L1', 'L2', 'R1', 'R2'], wta_output=True) nengo.Connection(model.fingers.output, model.fingers.input, synapse=0.1, transform=0.3) #feedback #conncetion between higher order area (hand, finger), to lower area nengo.Connection(model.motor.output, model.fingers.input, transform=.25 * motor_mapping) #was .2 #finger position (spinal?) model.finger_pos = nengo.networks.EnsembleArray(n_neurons=50, n_ensembles=4) nengo.Connection(model.finger_pos.output, model.finger_pos.input, synapse=0.1, transform=0.8) #feedback #connection between finger area and finger position nengo.Connection(model.fingers.am.elem_output, model.finger_pos.input, transform=1.0 * np.diag([0.55, .54, .56, .55])) #fix these model.bg = spa.BasalGanglia( spa.Actions( #wait & start a_aa_wait='dot(goal,WAIT) - .9 --> goal=0', a_attend_item1= 'dot(goal,DO_TASK) - .1 --> goal=RECOG, attend=ITEM1, do_concepts=GO', #attend words b_attending_item1= 'dot(goal,RECOG) + dot(attend,ITEM1) - concepts_evidence - .3 --> goal=RECOG, attend=ITEM1, do_concepts=GO', # vis_pair=2.5*(ITEM1*concepts)', c_attend_item2= 'dot(goal,RECOG) + dot(attend,ITEM1) + concepts_evidence - 1.6 --> goal=RECOG2, attend=ITEM2, vis_pair=3*(ITEM1*concepts)', d_attending_item2= 'dot(goal,RECOG2+RECOG) + dot(attend,ITEM2) - concepts_evidence - .4 --> goal=RECOG2, attend=ITEM2, do_concepts=GO, dm_learned_words=1.0*(~ITEM1*vis_pair)', #vis_pair=1.2*(ITEM2*concepts) e_judge_familiarity= 'dot(goal,RECOG2) + dot(attend,ITEM2) + concepts_evidence - 1.8 --> goal=FAMILIARITY, do_fam=GO, vis_pair=1.9*(ITEM2*concepts), dm_learned_words=2.0*(~ITEM1*vis_pair+~ITEM2*vis_pair)', #judge familiarity f_judge_familiarity= 'dot(goal,FAMILIARITY) - .1 --> goal=FAMILIARITY, do_fam=GO, dm_learned_words=.8*(~ITEM1*vis_pair+~ITEM2*vis_pair)', g_respond_unfamiliar= 'dot(goal,FAMILIARITY) - familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+MIDDLE)', h_respond_familiar= 'dot(goal,FAMILIARITY) + familiarity - .5*dot(fingers,L1+L2+R1+R2) - .6 --> goal=RESPOND, do_fam=GO, motor_input=1.6*(target_hand+INDEX)', x_response_done= '1.1*dot(goal,RESPOND) + 1.5*dot(fingers,L1+L2+R1+R2) - .7--> goal=2*END', y_end='dot(goal,END)-.1 --> goal=END', z_threshold='.05 --> goal=0' #possible to match complete buffer, ie is representation filled? # motor_input=1.5*target_hand+MIDDLE, )) #'dot(attention, W1) - evidence - 0.8 --> motor=NO, attention=W1', #'dot(attention, W1) + evidence - 0.8 --> attention=W2, reset=EVIDENCE', #'dot(attention, W1) --> attention=W1', # if we don't set attention it goes back to 0 #'dot(attention, W2) - evidence - 0.8 --> motor=NO, attention=W2', #'dot(attention, W2) + evidence - 0.8 --> motor=YES, attention=W2', #'dot(attention, W2) --> attention=W2', # option might be feedback on attention, then no rule 3/6 but default rule model.thalamus = spa.Thalamus(model.bg) model.cortical = spa.Cortical( # cortical connection: shorthand for doing everything with states and connections spa.Actions( # 'motor_input = .04*target_hand', # 'dm_learned_words = .1*vis_pair', #'dm_pairs = 2*stimulus' #'vis_pair = 2*attend*concepts+concepts', #fam 'comparison_A = 2*vis_pair', #fam 'comparison_B = 2*representation*~attend', )) #probes model.pr_motor_pos = nengo.Probe( model.finger_pos.output, synapse=.01) #raw vector (dimensions x time) model.pr_motor = nengo.Probe(model.fingers.output, synapse=.01) #model.pr_motor1 = nengo.Probe(model.motor.output, synapse=.01) if not nengo_gui_on: model.pr_vision_gabor = nengo.Probe( model.vision_gabor.neurons, synapse=.005 ) #do we need synapse, or should we do something with the spikes model.pr_familiarity = nengo.Probe( model.dm_learned_words.am.elem_output, synapse=.01) #element output, don't include default model.pr_concepts = nengo.Probe( model.concepts.am.elem_output, synapse=.01) # element output, don't include default #multiply spikes with the connection weights #input model.input = spa.Input(goal=goal_func) #print(sum(ens.n_neurons for ens in model.all_ensembles)) #return model #to show select BG rules # get names rules if nengo_gui_on and False: vocab_actions = spa.Vocabulary(model.bg.output.size_out) for i, action in enumerate(model.bg.actions.actions): vocab_actions.add(action.name.upper(), np.eye(model.bg.output.size_out)[i]) model.actions = spa.State(model.bg.output.size_out, vocab=vocab_actions) nengo.Connection(model.thalamus.output, model.actions.input) for net in model.networks: if net.label is not None and net.label.startswith('channel'): net.label = ''