model.arg2 = spa.State(D, vocab=vocab_concepts, feedback=0) #argument 2 from input model.answer = spa.State( D, vocab=vocab_concepts, feedback=1, feedback_synapse=.05 ) #result from retrieval (or counting in the full model) #set the inputs to the model (bypassing the need for a visual system) model.input = spa.Input(goal=goal_func, target=target_func, arg1=arg1_func, arg2=arg2_func) #Declarative Memory model.declarative = assoc_mem_acc.AssociativeMemoryAccumulator( input_vocab=vocab_problems, wta_output=True, wta_inhibit_scale=10, threshold=.5) #Comparison model.comparison = compare_acc_zbrodoff.CompareAccumulator( vocab_compare=vocab_concepts, status_scale=.6, status_feedback=.6) #Motor model.motor = spa.State(dimensions=Dlow, vocab=vocab_motor) #Basal Ganglia & Thalamus actions = spa.Actions( #encode and retrieve a_retrieve=
#Imaginal: a network with three slots: arg1, arg2, and answer. model.imaginal = nengo.Network(seed=fseed) with model.imaginal: model.arg1 = spa.State(D, vocab=vocab_concepts,feedback=0) #argument 1 from input /for count feedback=1 model.arg2 = spa.State(D, vocab=vocab_concepts,feedback=0) #argument 2 from input /for count feedback=1 model.answer = spa.State(D, vocab=vocab_concepts,feedback=1,feedback_synapse=.05) # /for count feedback=.8 #model.answer = spa.State(D, vocab=vocab_concepts,feedback=1,feedback_synapse=.05) # /for count feedback=.8 #set the inputs to the model (bypassing the need for a visual system) #model.input = spa.Input(goal=goal_func, target=target_func, arg1=arg1_func, arg2=arg2_func) #from count model model.input = spa.Input(vision=vision_input_func,goal=goal_input_func) #Number memory model.number_memory = assoc_mem_acc.AssociativeMemoryAccumulator(input_vocab = vocab_numbers, wta_output=True, status_scale=.7,threshold=.1,status_feedback=.3) #Alphabet memory model.letter_memory = assoc_mem_acc.AssociativeMemoryAccumulator(input_vocab = vocab_letters, wta_output=True, status_scale=.7,threshold=.2,status_feedback=.3) #Comparison model.comparison = compare_acc_zbrodoff.CompareAccumulator(vocab_compare = vocab_concepts,status_scale = .4,status_feedback = .2, status_feedback_synapse=.05, threshold_cleanup=.1) #final Comparison model.comparison2 = compare_acc_zbrodoff.CompareAccumulator(vocab_compare = vocab_concepts,status_scale = .6,status_feedback = .2, status_feedback_synapse=.05, threshold_cleanup=.1) #Motor model.motor = spa.State(Dlow,vocab=vocab_motor,feedback=1) #motor state #bcm_model
model.arg2 = spa.State( D, vocab=vocab_concepts, feedback=0) #argument 2 from input /for count feedback=1 model.answer = spa.State( D, vocab=vocab_concepts, feedback=1, feedback_synapse=.05) # /for count feedback=.8 #set the inputs to the model (bypassing the need for a visual system) #model.input = spa.Input(goal=goal_func, target=target_func, arg1=arg1_func, arg2=arg2_func) #from count model model.input = spa.Input(vision=vision_input_func, goal=goal_input_func) #Number memory model.number_memory = assoc_mem_acc.AssociativeMemoryAccumulator( input_vocab=vocab_numbers, wta_output=True, status_scale=.7, threshold=.1, status_feedback=.3) #nengo.Connection(model.number_memory.output,model.number_memory.input,transform=.2) #feedback on number memory #Alphabet memory model.letter_memory = assoc_mem_acc.AssociativeMemoryAccumulator( input_vocab=vocab_letters, wta_output=True, status_scale=.7, threshold=.2, status_feedback=.3) #nengo.Connection(model.letter_memory.output,model.letter_memory.input,transform=.2) #feedback on letter memory #Declarative Memory model.declarative = assoc_mem_acc.AssociativeMemoryAccumulator(