def test_formats_for_control_specification_for_mechanism_and_function_params( self): control_spec_list = [ pnl.CONTROL, pnl.CONTROL_SIGNAL, pnl.CONTROL_PROJECTION, pnl.ControlSignal, pnl.ControlSignal(), pnl.ControlProjection, "CP_OBJECT", pnl.ControlMechanism, pnl.ControlMechanism(), (0.3, pnl.CONTROL), (0.3, pnl.CONTROL_SIGNAL), (0.3, pnl.CONTROL_PROJECTION), (0.3, pnl.ControlSignal), (0.3, pnl.ControlSignal()), (0.3, pnl.ControlProjection), (0.3, "CP_OBJECT"), (0.3, pnl.ControlMechanism), (0.3, pnl.ControlMechanism()) ] for i, ctl_tuple in enumerate( [j for j in zip(control_spec_list, reversed(control_spec_list))]): C1, C2 = ctl_tuple # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once if C1 is 'CP_OBJECT': C1 = pnl.ControlProjection() elif isinstance(C1, tuple) and C1[1] is 'CP_OBJECT': C1 = (C1[0], pnl.ControlProjection()) if C2 is 'CP_OBJECT': C2 = pnl.ControlProjection() elif isinstance(C2, tuple) and C2[1] is 'CP_OBJECT': C2 = (C2[0], pnl.ControlProjection()) R = pnl.RecurrentTransferMechanism(noise=C1, function=pnl.Logistic(gain=C2)) assert R.parameter_states[pnl.NOISE].mod_afferents[0].name in \ 'ControlProjection for RecurrentTransferMechanism-{}[noise]'.format(i) assert R.parameter_states[pnl.GAIN].mod_afferents[0].name in \ 'ControlProjection for RecurrentTransferMechanism-{}[gain]'.format(i)
def test_simple_hebbian(self): Hebb_C = pnl.Composition() size = 9 Hebb2 = pnl.RecurrentTransferMechanism( size=size, function=pnl.Linear, enable_learning=True, hetero=0., auto=0., name='Hebb2', ) Hebb_C.add_node(Hebb2) src = [1, 0, 0, 1, 0, 0, 1, 0, 0] inputs_dict = {Hebb2: np.array(src)} Hebb_C.run(num_trials=5, inputs=inputs_dict) activity = Hebb2.value assert np.allclose( activity, [[1.86643089, 0., 0., 1.86643089, 0., 0., 1.86643089, 0., 0.]])
class TestSharedParameters: recurrent_mech = pnl.RecurrentTransferMechanism(default_variable=[0, 0], enable_learning=True) recurrent_mech_no_learning = pnl.RecurrentTransferMechanism( default_variable=[0, 0]) transfer_with_costs = pnl.TransferWithCosts(default_variable=[0, 0]) test_values = [ (recurrent_mech, 'learning_function', recurrent_mech.learning_mechanism.parameters.function), (recurrent_mech, 'learning_rate', recurrent_mech.learning_mechanism.parameters.learning_rate), (transfer_with_costs, 'transfer_fct_mult_param', transfer_with_costs.transfer_fct.parameters.multiplicative_param) ] @pytest.mark.parametrize('obj, parameter_name, source', test_values + [ (recurrent_mech_no_learning, 'learning_function', None), ]) def test_sources(self, obj, parameter_name, source): assert getattr(obj.parameters, parameter_name).source is source @pytest.mark.parametrize('obj, parameter_name, source', test_values) def test_values(self, obj, parameter_name, source): obj_param = getattr(obj.parameters, parameter_name) eids = range(5) for eid in eids: obj.execute(np.array([eid, eid]), context=eid) assert all([obj_param.get(eid) is source.get(eid) for eid in eids]) @pytest.mark.parametrize('obj, parameter_name, attr_name', [ (transfer_with_costs, 'intensity_cost_fct_mult_param', 'modulable'), (recurrent_mech, 'learning_function', 'stateful'), (recurrent_mech, 'learning_function', 'loggable'), (recurrent_mech.recurrent_projection, 'auto', 'modulable'), (recurrent_mech, 'integration_rate', 'modulable'), (recurrent_mech, 'noise', 'modulable'), ]) def test_param_attrs_match(self, obj, parameter_name, attr_name): shared_param = getattr(obj.parameters, parameter_name) source_param = shared_param.source assert getattr(shared_param, attr_name) == getattr(source_param, attr_name)
def test_formats_for_control_specification_for_mechanism_and_function_params( self, noise, gain): # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once if noise == 'CP_OBJECT': noise = pnl.ControlProjection() elif isinstance(noise, tuple) and noise[1] == 'CP_OBJECT': noise = (noise[0], pnl.ControlProjection()) if gain == 'CP_OBJECT': gain = pnl.ControlProjection() elif isinstance(gain, tuple) and gain[1] == 'CP_OBJECT': gain = (gain[0], pnl.ControlProjection()) R = pnl.RecurrentTransferMechanism( # NOTE: fixed name prevents failures due to registry naming # for parallel test runs name='R-CONTROL', noise=noise, function=psyneulink.core.components.functions.transferfunctions. Logistic(gain=gain)) assert R.parameter_ports[pnl.NOISE].mod_afferents[0].name in \ 'ControlProjection for R-CONTROL[noise]' assert R.parameter_ports[pnl.GAIN].mod_afferents[0].name in \ 'ControlProjection for R-CONTROL[gain]'
words_input_layer = pnl.TransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions.Linear, name='WORDS_INPUT') task_input_layer = pnl.TransferMechanism( size=2, function=psyneulink.core.components.functions.transferfunctions.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism( size=2, function=psyneulink.core.components.functions.transferfunctions.Logistic(), hetero=-2, integrator_mode=True, integration_rate=0.1, name='TASK') # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') colors_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions.Logistic( x_0=4.0), integrator_mode=True, hetero=-2.0, # noise=pnl.NormalDist(mean=0.0, standard_deviation=.0).function, integration_rate=0.1, # cohen-huston text says 0.01 name='COLORS HIDDEN')
def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, epochs=10, learning_rate=20, attach_LCA=True, competition=0.2, self_excitation=0.2, leak=0.4, threshold=1e-4, exec_limit=EXEC_LIMIT): # Get all tasks from bipartite graph (edges) and strip 'i/o' suffix all_tasks = get_all_tasks(bipartite_graph) # Analyze bipartite graph for network properties onodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 0 ] inodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 1 ] input_dims = len(inodes) output_dims = len(onodes) num_tasks = len(all_tasks) # Start building network as PsyNeuLink object # Layer parameters nh = num_hidden D_i = num_features * input_dims D_c = num_tasks D_h = nh D_o = num_features * output_dims # Weight matrices (defaults provided by Dillon) wih = np.random.rand(D_i, D_h) * 0.02 - 0.01 wch = np.random.rand(D_c, D_h) * 0.02 - 0.01 wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 # Training params (defaults provided by Dillon) patience = 10 min_delt = 0.00001 lr = learning_rate # Instantiate layers and projections il = pnl.TransferMechanism(size=D_i, name='input') cl = pnl.TransferMechanism(size=D_c, name='control') hl = pnl.TransferMechanism(size=D_h, name='hidden', function=pnl.Logistic(bias=-2)) ol = pnl.TransferMechanism(size=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) pco = pnl.MappingProjection(matrix=wco) pho = pnl.MappingProjection(matrix=who) # Create training data for network # We train across all possible inputs, one task at a time input_examples, output_examples, control_examples = generate_training_data(all_tasks, num_features, input_dims, output_dims) # Training parameter set input_set = { 'inputs': { il: input_examples.tolist(), cl: control_examples.tolist() }, 'targets': { ol: output_examples.tolist() }, 'epochs': 10 #epochs # LCA doesn't settle for 1000 epochs } # Build network mnet = pnl.AutodiffComposition(learning_rate=learning_rate, name='mnet') mnet.output_CIM.parameters.value._set_history_max_length(1000) mnet.add_node(il) mnet.add_node(cl) mnet.add_node(hl) mnet.add_node(ol) mnet.add_projection(projection=pih, sender=il, receiver=hl) mnet.add_projection(projection=pch, sender=cl, receiver=hl) mnet.add_projection(projection=pco, sender=cl, receiver=ol) mnet.add_projection(projection=pho, sender=hl, receiver=ol) # Train network print("training 2:", MNET_BIN_EXECUTE) t1 = time.time() mnet.learn( inputs=input_set, minibatch_size=input_set['epochs'], bin_execute=MNET_BIN_EXECUTE, patience=patience, min_delta=min_delt, ) t2 = time.time() print("training 2:", MNET_BIN_EXECUTE, t2-t1) for projection in mnet.projections: if hasattr(projection.parameters, 'matrix'): weights = projection.parameters.matrix.get(mnet) projection.parameters.matrix.set(weights, None) # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: lci = pnl.LeakyCompetingIntegrator(rate=leak, time_step_size=0.01) lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) lca = pnl.RecurrentTransferMechanism(size=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, name='lca', termination_threshold=threshold, reset_stateful_function_when=pnl.AtTrialStart()) # Wrapper composition used to pass values between mnet (AutodiffComposition) and lca (LCAMechanism) wrapper_composition = pnl.Composition() # Add mnet and lca to outer_composition wrapper_composition.add_linear_processing_pathway([mnet, lca]) # Dummy to save mnet results if str(LCA_BIN_EXECUTE).startswith("LLVM"): dummy = pnl.TransferMechanism(size=D_o, name="MNET_OUT") wrapper_composition.add_linear_processing_pathway([mnet, dummy]) # Set execution limit lca.parameters.max_executions_before_finished.set(exec_limit, wrapper_composition) # # Logging/Debugging # lca.set_log_conditions('value', pnl.LogCondition.EXECUTION) return wrapper_composition return mnet
# first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism( #default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Input') inputLayer.set_log_conditions([pnl.RESULT]) # Recurrent Transfer Mechanism that models the recurrence in the activation between the two stimulus and action # dimensions. Positive self excitation and negative opposite inhibition with an integrator rate = tau # Modulated variable in simulations is the GAIN variable of this mechanism activation = pnl.RecurrentTransferMechanism( default_variable=[[0.0, 0.0]], function=pnl.Logistic(gain=1.0), matrix=[[1.0, -1.0], [-1.0, 1.0]], integrator_mode=True, integrator_function=pnl.AdaptiveIntegrator(rate=(tau)), initial_value=np.array([[0.0, 0.0]]), output_ports=[pnl.RESULT], name='Activity') activation.set_log_conditions([pnl.RESULT, "mod_gain"]) stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Info") stimulusInfo.set_log_conditions([pnl.RESULT])
# Response layer, responses: ('red', 'green') #tau = 0.1 (here, smoothing factor) #should be randomly distributed noise to the net input of each unit (except input unit) # Now a RecurrentTransferMechanism compared to Lauda's Stroop model! response_layer = pnl.RecurrentTransferMechanism( size=2, #Recurrent function=psyneulink.core.components.functions.transferfunctions. Logistic, #pnl.Stability(matrix=np.matrix([[0.0, -1.0], [-1.0, 0.0]])), name='RESPONSE', output_ports=[ pnl.RESULT, { pnl.NAME: 'DECISION_ENERGY', pnl.VARIABLE: (pnl.OWNER_VALUE, 0), pnl.FUNCTION: psyneulink.core.components.functions.objectivefunctions.Stability( default_variable=np.array([0.0, -1.0]), metric=pnl.ENERGY, matrix=np.array([[0.0, -1.0], [-1.0, 0.0]])) } ], integrator_mode=True, #) # noise=pnl.NormalDist(mean=0.0, standard_deviation=.01).function) integration_rate=0.1) #response_layer.set_log_conditions('value') #response_layer.set_log_conditions('gain') # SET UP CONNECTIONS # rows correspond to sender
import psyneulink as pnl comp = pnl.Composition(name="comp") inner_comp = pnl.Composition(name="Inner Composition") A = pnl.TransferMechanism(function=pnl.Linear(slope=5.0, intercept=2.0), name="A") B = pnl.TransferMechanism(function=pnl.Logistic, name="B") C = pnl.RecurrentTransferMechanism(name="C") D = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator, name="D") E = pnl.TransferMechanism(name="E") F = pnl.TransferMechanism(name="F") for m in [E, F]: inner_comp.add_node(m) for m in [A, B, C, D, inner_comp]: comp.add_node(m) comp.add_projection(pnl.MappingProjection(), A, B) comp.add_projection(pnl.MappingProjection(), A, C) comp.add_projection(pnl.MappingProjection(), B, D) comp.add_projection(pnl.MappingProjection(), C, D) comp.add_projection(pnl.MappingProjection(), C, inner_comp) inner_comp.add_projection(pnl.MappingProjection(), E, F) comp.scheduler.add_condition_set({ A: pnl.EveryNPasses(1), B: pnl.EveryNCalls(A, 2), C: pnl.EveryNCalls(B, 2)
termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) B = pnl.TransferMechanism( name="B", function=pnl.Logistic(default_variable=[[0]]), termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) C = pnl.RecurrentTransferMechanism( name="C", function=pnl.Linear(default_variable=[[0]]), initial_value=[[0]], output_ports=["RESULTS"], termination_measure=pnl.Distance( metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]] ), ) D = pnl.IntegratorMechanism( name="D", function=pnl.SimpleIntegrator(initializer=[[0]], default_variable=[[0]]), ) Inner_Composition = pnl.Composition(name="Inner Composition") E = pnl.TransferMechanism( name="E", function=pnl.Linear(default_variable=[[0]]), termination_measure=pnl.Distance(
# Response layer, responses: ('red', 'green') #tau = 0.1 (here, smoothing factor) #should be randomly distributed noise to the net input of each unit (except input unit) # Now a RecurrentTransferMechanism compared to Lauda's Stroop model! response_layer = pnl.RecurrentTransferMechanism( size=2, #Recurrent function=pnl. Logistic, #pnl.Stability(matrix=np.matrix([[0.0, -1.0], [-1.0, 0.0]])), name='RESPONSE', output_states=[ pnl.RECURRENT_OUTPUT.RESULT, { pnl.NAME: 'DECISION_ENERGY', pnl.VARIABLE: (pnl.OWNER_VALUE, 0), pnl.FUNCTION: pnl.Stability(default_variable=np.array([0.0, -1.0]), metric=pnl.ENERGY, matrix=np.array([[0.0, -1.0], [-1.0, 0.0]])) } ], integrator_mode=True, #) # noise=pnl.NormalDist(mean=0.0, standard_dev=.01).function) smoothing_factor=0.1) #response_layer.set_log_conditions('value') #response_layer.set_log_conditions('gain') # SET UP CONNECTIONS # rows correspond to sender
def test_botvinick_model(benchmark, mode, reps): if reps > 1 and not pytest.config.getoption("--stress"): pytest.skip("not stressed") benchmark.group = "Botvinick (scale " + str(reps / 100) + ")" # SET UP MECHANISMS ---------------------------------------------------------------------------------------------------- # Linear input layer # colors: ('red', 'green'), words: ('RED','GREEN') colors_input_layer = pnl.TransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions.Linear, name='COLORS_INPUT') words_input_layer = pnl.TransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions.Linear, name='WORDS_INPUT') task_input_layer = pnl.TransferMechanism( size=2, function=psyneulink.core.components.functions.transferfunctions.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism( size=2, function=psyneulink.core.components.functions.transferfunctions. Logistic(), hetero=-2, integrator_mode=True, integration_rate=0.01, name='TASK_LAYER') # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') colors_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions. Logistic( x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') words_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=psyneulink.core.components.functions.transferfunctions. Logistic(x_0=4.0), integrator_mode=True, hetero=-2, integration_rate=0.01, name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') response_layer = pnl.RecurrentTransferMechanism( size=2, function=psyneulink.core.components.functions.transferfunctions. Logistic(), hetero=-2.0, integrator_mode=True, integration_rate=0.01, output_states=[ pnl.RECURRENT_OUTPUT.RESULT, { pnl.NAME: 'DECISION_ENERGY', pnl.VARIABLE: (pnl.OWNER_VALUE, 0), pnl.FUNCTION: psyneulink.core.components.functions.objectivefunctions. Stability(default_variable=np.array([0.0, 0.0]), metric=pnl.ENERGY, matrix=np.array([[0.0, -4.0], [-4.0, 0.0]])) } ], name='RESPONSE', ) # Mapping projections--------------------------------------------------------------------------------------------------- color_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) word_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) task_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0], [0.0, 1.0]])) color_task_weights = pnl.MappingProjection( matrix=np.array([[4.0, 0.0], [4.0, 0.0], [4.0, 0.0]])) task_color_weights = pnl.MappingProjection( matrix=np.array([[4.0, 4.0, 4.0], [0.0, 0.0, 0.0]])) response_color_weights = pnl.MappingProjection( matrix=np.array([[1.5, 0.0, 0.0], [0.0, 1.5, 0.0]])) response_word_weights = pnl.MappingProjection( matrix=np.array([[2.5, 0.0, 0.0], [0.0, 2.5, 0.0]])) color_response_weights = pnl.MappingProjection( matrix=np.array([[1.5, 0.0], [0.0, 1.5], [0.0, 0.0]])) word_response_weights = pnl.MappingProjection( matrix=np.array([[2.5, 0.0], [0.0, 2.5], [0.0, 0.0]])) word_task_weights = pnl.MappingProjection( matrix=np.array([[0.0, 4.0], [0.0, 4.0], [0.0, 4.0]])) task_word_weights = pnl.MappingProjection( matrix=np.array([[0.0, 0.0, 0.0], [4.0, 4.0, 4.0]])) # CREATE Composition comp = pnl.Composition() # Add mechanisms comp.add_node(colors_input_layer) comp.add_node(colors_hidden_layer) comp.add_node(words_input_layer) comp.add_node(words_hidden_layer) comp.add_node(task_input_layer) comp.add_node(task_layer) comp.add_node(response_layer) # Add projections comp.add_projection(task_input_weights, task_input_layer, task_layer) # Color process comp.add_projection(color_input_weights, colors_input_layer, colors_hidden_layer) comp.add_projection(color_response_weights, colors_hidden_layer, response_layer) comp.add_projection(response_color_weights, response_layer, colors_hidden_layer) # Word process comp.add_projection(word_input_weights, words_input_layer, words_hidden_layer) comp.add_projection(word_response_weights, words_hidden_layer, response_layer) comp.add_projection(response_word_weights, response_layer, words_hidden_layer) # Color task process comp.add_projection(task_color_weights, task_layer, colors_hidden_layer) comp.add_projection(color_task_weights, colors_hidden_layer, task_layer) # Word task process comp.add_projection(task_word_weights, task_layer, words_hidden_layer) comp.add_projection(word_task_weights, words_hidden_layer, task_layer) def trial_dict(red_color, green_color, neutral_color, red_word, green_word, neutral_word, CN, WR): trialdict = { colors_input_layer: [red_color, green_color, neutral_color], words_input_layer: [red_word, green_word, neutral_word], task_input_layer: [CN, WR] } return trialdict # Define initialization trials separately CN_trial_initialize_input = trial_dict( 0, 0, 0, 0, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_incongruent_trial_input = trial_dict( 1, 0, 0, 0, 1, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_congruent_trial_input = trial_dict( 1, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_control_trial_input = trial_dict( 1, 0, 0, 0, 0, 1, 1, 0) #red_color, green color, red_word, green word, CN, WR Stimulus = [[CN_trial_initialize_input, CN_congruent_trial_input], [CN_trial_initialize_input, CN_incongruent_trial_input], [CN_trial_initialize_input, CN_control_trial_input]] # should be 500 and 1000 ntrials0 = 5 * reps ntrials = 10 * reps comp._analyze_graph() def run(bin_execute): results = [] for stim in Stimulus: # RUN the SYSTEM to initialize ---------------------------------------------------------------------------------------- comp.run(inputs=stim[0], num_trials=ntrials0, bin_execute=bin_execute) comp.run(inputs=stim[1], num_trials=ntrials, bin_execute=bin_execute) # reinitialize after condition was run colors_hidden_layer.reinitialize([[0, 0, 0]], execution_context=comp) words_hidden_layer.reinitialize([[0, 0, 0]], execution_context=comp) response_layer.reinitialize([[0, 0]], execution_context=comp) task_layer.reinitialize([[0, 0]], execution_context=comp) # Comp results include concatenation of both the above runs results.append(comp.results.copy()) comp.reinitialize() comp.results = [] return results res = benchmark(run, mode) if reps == 1: res2d = [x[0] for r in res for x in r] assert np.allclose(res2d, [[0.4976852381289525, 0.4976852381289525], [0.4954107346393883, 0.4954107346393883], [0.493176053709877, 0.493176053709877], [0.49098075641903416, 0.49098075641903416], [0.48882440125362586, 0.48882440125362586], [0.4867086398433437, 0.4867065445883512], [0.48463311528336894, 0.4846267297743475], [0.48259746969966644, 0.4825844985226081], [0.4806013445692439, 0.4805793913088515], [0.4786443810171789, 0.478610947754902], [0.47672622009168814, 0.47667870698765635], [0.4748465030184808, 0.4747822079766836], [0.47300487143558895, 0.47292098985146575], [0.4712009676098337, 0.47109459219926386], [0.4694344346360397, 0.469302555344549], [0.4976852381289525, 0.4976852381289525], [0.4954107346393883, 0.4954107346393883], [0.493176053709877, 0.493176053709877], [0.49098075641903416, 0.49098075641903416], [0.48882440125362586, 0.48882440125362586], [0.4867073174570701, 0.48670786697451607], [0.4846290813861633, 0.4846307636703061], [0.4825892679847457, 0.4825927002315042], [0.4805874511719694, 0.4805932846864651], [0.4786232042071176, 0.47863212451395776], [0.4766961000320741, 0.47670882693361377], [0.47480571159250756, 0.47482299917547827], [0.47295161213880393, 0.4729742487298337], [0.47113337550774326, 0.47116218357843975], [0.46935057638588656, 0.4693864124082814], [0.4976852381289525, 0.4976852381289525], [0.4954107346393883, 0.4954107346393883], [0.493176053709877, 0.493176053709877], [0.49098075641903416, 0.49098075641903416], [0.48882440125362586, 0.48882440125362586], [0.4867073174570701, 0.4867065445883512], [0.484629088030232, 0.4846267364184149], [0.4825892948040597, 0.4825845253419109], [0.4805875188258231, 0.48057945896265514], [0.4786233407217324, 0.4786110842693559], [0.47669634103703906, 0.4766789479921973], [0.4748061005546799, 0.4747825969378818], [0.47295220059350046, 0.47292157830414155], [0.471134223287054, 0.47109543997470166], [0.4693517518439444, 0.4693037307956372]]) # MODIFIED 5/23/19 OLD: # # FIXME: for some reason numpy adds another layer of array # if mode == 'Python': # # res1d = [x[1][0] for r in res for x in r] # res1d = [x[1] for r in res for x in r] # else: # MODIFIED 5/23/19 NEW: [JDC] res1d = [x[1] for r in res for x in r] # MODIFIED 5/23/19 END assert np.allclose( res1d, [[0.9907623850058885], [0.9817271839837536], [0.9728904798113899], [0.9642484126952278], [0.9557971810438632], [0.9475371212778005], [0.9394646472005338], [0.9315762316131724], [0.9238684065412114], [0.9163377633447616], [0.9089809527216751], [0.901794684612485], [0.8947757280155361], [0.8879209107202126], [0.8812271189656683], [0.9907623850058885], [0.9817271839837536], [0.9728904798113899], [0.9642484126952278], [0.9557971810438632], [0.9475371212816769], [0.9394646472360609], [0.9315762317580137], [0.9238684069513318], [0.9163377642853222], [0.9089809546004746], [0.9017946880160064], [0.894775733747658], [0.8879209198436373], [0.8812271328461213], [0.9907623850058885], [0.9817271839837536], [0.9728904798113899], [0.9642484126952278], [0.9557971810438632], [0.9475345468215851], [0.9394568532220963], [0.9315605030724186], [0.9238419591260757], [0.9162977442377989], [0.9089244414290619], [0.9017186938531998], [0.8946772046683807], [0.8877967368262162], [0.8810741127833248]]) if reps == 10: assert np.allclose(res[0][ntrials0 - 1][0], [0.42505118, 0.42505118]) assert np.allclose(res[0][-1][0], [0.43621363, 0.40023224]) assert np.allclose(res[1][ntrials0 - 1][0], [0.42505118, 0.42505118]) assert np.allclose(res[1][-1][0], [0.41420086, 0.42196304]) assert np.allclose(res[2][ntrials0 - 1][0], [0.42505118, 0.42505118]) assert np.allclose(res[2][-1][0], [0.41689666, 0.40291293]) assert np.allclose(res[0][ntrials0 - 1][1], [0.72267401]) assert np.allclose(res[0][-1][1], [0.69834703]) assert np.allclose(res[1][ntrials0 - 1][1], [0.72267401]) assert np.allclose(res[1][-1][1], [0.69910981]) assert np.allclose(res[2][ntrials0 - 1][1], [0.72267401]) assert np.allclose(res[2][-1][1], [0.67189222]) if reps == 100: assert np.allclose(res[0][ntrials0 - 1][0], [0.48611807, 0.48611807]) assert np.allclose(res[0][-1][0], [0.95970536, 0.21425063]) assert np.allclose(res[1][ntrials0 - 1][0], [0.48611807, 0.48611807]) assert np.allclose(res[1][-1][0], [0.55802971, 0.83844741]) assert np.allclose(res[2][ntrials0 - 1][0], [0.48611807, 0.48611807]) assert np.allclose(res[2][-1][0], [0.89746087, 0.25060644]) assert np.allclose(res[0][ntrials0 - 1][1], [0.94524311]) assert np.allclose(res[0][-1][1], [0.82246989]) assert np.allclose(res[1][ntrials0 - 1][1], [0.94524311]) assert np.allclose(res[1][-1][1], [1.87151424]) assert np.allclose(res[2][ntrials0 - 1][1], [0.94524311]) assert np.allclose(res[2][-1][1], [0.89963791])
def test_stability_flexibility_susan_and_sebastian(self): # computeAccuracy(trialInformation) # Inputs: trialInformation[0, 1, 2, 3] # trialInformation[0] - Task Dimension : [0, 1] or [1, 0] # trialInformation[1] - Stimulus Dimension: Congruent {[1, 1] or [-1, -1]} // Incongruent {[-1, 1] or [1, -1]} # trialInformation[2] - Upper Threshold: Probability of DDM choosing upper bound # trialInformation[3] - Lower Threshold: Probability of DDM choosing lower bound def computeAccuracy(trialInformation): # Unload contents of trialInformation # Origin Node Inputs taskInputs = trialInformation[0] stimulusInputs = trialInformation[1] # DDM Outputs upperThreshold = trialInformation[2] lowerThreshold = trialInformation[3] # Keep Track of Accuracy accuracy = [] # Beginning of Accuracy Calculation colorTrial = (taskInputs[0] == 1) motionTrial = (taskInputs[1] == 1) # Based on the task dimension information, decide which response is "correct" # Obtain accuracy probability from DDM thresholds in "correct" direction if colorTrial: if stimulusInputs[0] == 1: accuracy.append(upperThreshold) elif stimulusInputs[0] == -1: accuracy.append(lowerThreshold) if motionTrial: if stimulusInputs[1] == 1: accuracy.append(upperThreshold) elif stimulusInputs[1] == -1: accuracy.append(lowerThreshold) # Accounts for initialization runs that have no variable input if len(accuracy) == 0: accuracy = [0] # print("Accuracy: ", accuracy[0]) # print() return [accuracy] # BEGIN: Composition Construction # Constants as defined in Musslick et al. 2018 tau = 0.9 # Time Constant DRIFT = 1 # Drift Rate STARTING_POINT = 0.0 # Starting Point THRESHOLD = 0.0475 # Threshold NOISE = 0.04 # Noise T0 = 0.2 # T0 # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_states=[pnl.RESULT], name='Task Input [I1, I2]') # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_states=[pnl.RESULT], name="Stimulus Input [S1, S2]") # Activation Layer: [Color Activation, Motion Activation] # Recurrent: Self Excitation, Mutual Inhibition # Controlled: Gain Parameter activation = pnl.RecurrentTransferMechanism( default_variable=[[0.0, 0.0]], function=pnl.Logistic(gain=1.0), matrix=[[1.0, -1.0], [-1.0, 1.0]], integrator_mode=True, integrator_function=pnl.AdaptiveIntegrator(rate=(tau)), initial_value=np.array([[0.0, 0.0]]), output_states=[pnl.RESULT], name='Task Activations [Act 1, Act 2]') # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), input_states=pnl.InputState(combine=pnl.PRODUCT), output_states=[pnl.RESULT], name='Non-Automatic Component [S1*Activity1, S2*Activity2]') # Summation of nonAutomatic and Automatic Components ddmCombination = pnl.TransferMechanism( size=1, function=pnl.Linear(slope=1, intercept=0), input_states=pnl.InputState(combine=pnl.SUM), output_states=[pnl.RESULT], name="Drift = (S1 + S2) + (S1*Activity1 + S2*Activity2)") decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical( drift_rate=DRIFT, starting_point=STARTING_POINT, threshold=THRESHOLD, noise=NOISE, t0=T0), output_states=[ pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME, pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD ], name='DDM') taskLayer.set_log_conditions([pnl.RESULT]) stimulusInfo.set_log_conditions([pnl.RESULT]) activation.set_log_conditions([pnl.RESULT, "mod_gain"]) nonAutomaticComponent.set_log_conditions([pnl.RESULT]) ddmCombination.set_log_conditions([pnl.RESULT]) decisionMaker.set_log_conditions([ pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD, pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME ]) # Composition Creation stabilityFlexibility = pnl.Composition(controller_mode=pnl.BEFORE) # Node Creation stabilityFlexibility.add_node(taskLayer) stabilityFlexibility.add_node(activation) stabilityFlexibility.add_node(nonAutomaticComponent) stabilityFlexibility.add_node(stimulusInfo) stabilityFlexibility.add_node(ddmCombination) stabilityFlexibility.add_node(decisionMaker) # Projection Creation stabilityFlexibility.add_projection(sender=taskLayer, receiver=activation) stabilityFlexibility.add_projection(sender=activation, receiver=nonAutomaticComponent) stabilityFlexibility.add_projection(sender=stimulusInfo, receiver=nonAutomaticComponent) stabilityFlexibility.add_projection(sender=stimulusInfo, receiver=ddmCombination) stabilityFlexibility.add_projection(sender=nonAutomaticComponent, receiver=ddmCombination) stabilityFlexibility.add_projection(sender=ddmCombination, receiver=decisionMaker) # Beginning of Controller # Grid Search Range searchRange = pnl.SampleSpec(start=1.0, stop=1.9, num=10) # Modulate the GAIN parameter from activation layer # Initalize cost function as 0 signal = pnl.ControlSignal( projections=[(pnl.GAIN, activation)], function=pnl.Linear, variable=1.0, intensity_cost_function=pnl.Linear(slope=0.0), allocation_samples=searchRange) # Use the computeAccuracy function to obtain selection values # Pass in 4 arguments whenever computeRewardRate is called objectiveMechanism = pnl.ObjectiveMechanism( monitor=[ taskLayer, stimulusInfo, (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker), (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker) ], function=computeAccuracy, name="Controller Objective Mechanism") # Sets trial history for simulations over specified signal search parameters metaController = pnl.OptimizationControlMechanism( agent_rep=stabilityFlexibility, features=[taskLayer.input_state, stimulusInfo.input_state], feature_function=pnl.Buffer(history=10), name="Controller", objective_mechanism=objectiveMechanism, function=pnl.GridSearch(), control_signals=[signal]) stabilityFlexibility.add_controller(metaController) stabilityFlexibility.enable_controller = True # stabilityFlexibility.model_based_optimizer_mode = pnl.BEFORE for i in range(1, len(stabilityFlexibility.controller.input_states)): stabilityFlexibility.controller.input_states[ i].function.reinitialize() # Origin Node Inputs taskTrain = [[1, 0], [0, 1], [1, 0], [0, 1]] stimulusTrain = [[1, -1], [-1, 1], [1, -1], [-1, 1]] inputs = {taskLayer: taskTrain, stimulusInfo: stimulusTrain} stabilityFlexibility.run(inputs)
def runStabilityFlexibility(tasks, stimuli, gain): integrationConstant = 0.8 # time constant DRIFT = 0.25 # Drift Rate STARTING_POINT = 0.0 # Starting Point THRESHOLD = 0.05 # Threshold NOISE = 0.1 # Noise T0 = 0.2 # T0 wa = 0.2 g = gain # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism( #default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_states=[pnl.RESULT], name='Input') inputLayer.set_log_conditions([pnl.RESULT]) # Recurrent Transfer Mechanism that models the recurrence in the activation between the two stimulus and action # dimensions. Positive self excitation and negative opposite inhibition with an integrator rate = tau # Modulated variable in simulations is the GAIN variable of this mechanism activation = pnl.RecurrentTransferMechanism( default_variable=[[0.0, 0.0]], function=pnl.Logistic(gain=g), matrix=[[1.0, -1.0], [-1.0, 1.0]], integrator_mode=True, integrator_function=pnl.AdaptiveIntegrator(rate=integrationConstant), initial_value=np.array([[0.0, 0.0]]), output_states=[pnl.RESULT], name='Activity') activation.set_log_conditions([pnl.RESULT, "mod_gain"]) stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_states=[pnl.RESULT], name="Stimulus Info") stimulusInfo.set_log_conditions([pnl.RESULT]) congruenceWeighting = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=wa, intercept=0), name='Congruence * Automatic Component') controlledElement = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), input_states=pnl.InputState(combine=pnl.PRODUCT), output_states=[pnl.RESULT], name='Stimulus Info * Activity') controlledElement.set_log_conditions([pnl.RESULT]) ddmCombination = pnl.TransferMechanism(size=1, function=pnl.Linear(slope=1, intercept=0), output_states=[pnl.RESULT], name="DDM Integrator") ddmCombination.set_log_conditions([pnl.RESULT]) decisionMaker = pnl.DDM( function=pnl.DriftDiffusionAnalytical(drift_rate=DRIFT, starting_point=STARTING_POINT, threshold=THRESHOLD, noise=NOISE, t0=T0), output_states=[ pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME, pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD ], name='DDM') decisionMaker.set_log_conditions([ pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD, pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME ]) ########### Composition stabilityFlexibility = pnl.Composition() ### NODE CREATION stabilityFlexibility.add_node(inputLayer) stabilityFlexibility.add_node(activation) stabilityFlexibility.add_node(congruenceWeighting) stabilityFlexibility.add_node(controlledElement) stabilityFlexibility.add_node(stimulusInfo) stabilityFlexibility.add_node(ddmCombination) stabilityFlexibility.add_node(decisionMaker) stabilityFlexibility.add_projection(sender=inputLayer, receiver=activation) stabilityFlexibility.add_projection(sender=activation, receiver=controlledElement) stabilityFlexibility.add_projection(sender=stimulusInfo, receiver=congruenceWeighting) stabilityFlexibility.add_projection(sender=stimulusInfo, receiver=controlledElement) stabilityFlexibility.add_projection(sender=congruenceWeighting, receiver=ddmCombination) stabilityFlexibility.add_projection(sender=controlledElement, receiver=ddmCombination) stabilityFlexibility.add_projection(sender=ddmCombination, receiver=decisionMaker) runs = len(tasks) inputs = {inputLayer: tasks, stimulusInfo: stimuli} stabilityFlexibility.run(inputs) decisions = decisionMaker.log.nparray() upper, lower = extractValues(decisions) modelResults = [tasks, stimuli, upper, lower] accuracies = computeAccuracy(modelResults) activations = activation.log.nparray() activity1 = [] activity2 = [] for i in range(0, runs): activity1.append(activations[1][1][4][i + 1][0]) activity2.append(activations[1][1][4][i + 1][1]) return accuracies, activity1, activity2
colors_input_layer = pnl.TransferMechanism(size=3, function=pnl.Linear, name='COLORS_INPUT') words_input_layer = pnl.TransferMechanism(size=3, function=pnl.Linear, name='WORDS_INPUT') task_input_layer = pnl.TransferMechanism(size=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism(size=2, function=pnl.Logistic(), hetero=-2, integrator_mode=True, integration_rate=0.01, name='TASK_LAYER') # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, function=pnl.Logistic(x_0=4.0), integrator_mode=True,
def test_botvinick_model(benchmark, mode, reps): benchmark.group = "Botvinick (scale " + str(reps / 100) + ")" # SET UP MECHANISMS ---------------------------------------------------------------------------------------------------- # Linear input layer # colors: ('red', 'green'), words: ('RED','GREEN') colors_input_layer = pnl.TransferMechanism( size=3, function=psyneulink.core.components.Linear, name='COLORS_INPUT') words_input_layer = pnl.TransferMechanism( size=3, function=psyneulink.core.components.Linear, name='WORDS_INPUT') task_input_layer = pnl.TransferMechanism( size=2, function=psyneulink.core.components.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism( size=2, function=psyneulink.core.components.Logistic, hetero=-2, integrator_mode=True, integration_rate=0.01, name='TASK_LAYER') # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') colors_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=psyneulink.core.components.Logistic( x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') words_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=psyneulink.core.components.Logistic(x_0=4.0), integrator_mode=True, hetero=-2, integration_rate=0.01, name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') response_layer = pnl.RecurrentTransferMechanism( size=2, function=psyneulink.core.components.Logistic, hetero=-2.0, integrator_mode=True, integration_rate=0.01, output_ports=[ pnl.RESULT, { pnl.NAME: 'DECISION_ENERGY', pnl.VARIABLE: (pnl.OWNER_VALUE, 0), pnl.FUNCTION: psyneulink.core.components.Stability( default_variable=np.array([0.0, 0.0]), metric=pnl.ENERGY, matrix=np.array([[0.0, -4.0], [-4.0, 0.0]])) } ], name='RESPONSE', ) # Mapping projections--------------------------------------------------------------------------------------------------- color_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) word_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])) task_input_weights = pnl.MappingProjection( matrix=np.array([[1.0, 0.0], [0.0, 1.0]])) color_task_weights = pnl.MappingProjection( matrix=np.array([[4.0, 0.0], [4.0, 0.0], [4.0, 0.0]])) task_color_weights = pnl.MappingProjection( matrix=np.array([[4.0, 4.0, 4.0], [0.0, 0.0, 0.0]])) response_color_weights = pnl.MappingProjection( matrix=np.array([[1.5, 0.0, 0.0], [0.0, 1.5, 0.0]])) response_word_weights = pnl.MappingProjection( matrix=np.array([[2.5, 0.0, 0.0], [0.0, 2.5, 0.0]])) color_response_weights = pnl.MappingProjection( matrix=np.array([[1.5, 0.0], [0.0, 1.5], [0.0, 0.0]])) word_response_weights = pnl.MappingProjection( matrix=np.array([[2.5, 0.0], [0.0, 2.5], [0.0, 0.0]])) word_task_weights = pnl.MappingProjection( matrix=np.array([[0.0, 4.0], [0.0, 4.0], [0.0, 4.0]])) task_word_weights = pnl.MappingProjection( matrix=np.array([[0.0, 0.0, 0.0], [4.0, 4.0, 4.0]])) # CREATE Composition comp = pnl.Composition() # Add mechanisms comp.add_node(colors_input_layer) comp.add_node(colors_hidden_layer) comp.add_node(words_input_layer) comp.add_node(words_hidden_layer) comp.add_node(task_input_layer) comp.add_node(task_layer) comp.add_node(response_layer) # Add projections comp.add_projection(task_input_weights, task_input_layer, task_layer) # Color process comp.add_projection(color_input_weights, colors_input_layer, colors_hidden_layer) comp.add_projection(color_response_weights, colors_hidden_layer, response_layer) comp.add_projection(response_color_weights, response_layer, colors_hidden_layer) # Word process comp.add_projection(word_input_weights, words_input_layer, words_hidden_layer) comp.add_projection(word_response_weights, words_hidden_layer, response_layer) comp.add_projection(response_word_weights, response_layer, words_hidden_layer) # Color task process comp.add_projection(task_color_weights, task_layer, colors_hidden_layer) comp.add_projection(color_task_weights, colors_hidden_layer, task_layer) # Word task process comp.add_projection(task_word_weights, task_layer, words_hidden_layer) comp.add_projection(word_task_weights, words_hidden_layer, task_layer) def trial_dict(red_color, green_color, neutral_color, red_word, green_word, neutral_word, CN, WR): trialdict = { colors_input_layer: [red_color, green_color, neutral_color], words_input_layer: [red_word, green_word, neutral_word], task_input_layer: [CN, WR] } return trialdict # Define initialization trials separately CN_trial_initialize_input = trial_dict( 0, 0, 0, 0, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_incongruent_trial_input = trial_dict( 1, 0, 0, 0, 1, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_congruent_trial_input = trial_dict( 1, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR CN_control_trial_input = trial_dict( 1, 0, 0, 0, 0, 1, 1, 0) #red_color, green color, red_word, green word, CN, WR Stimulus = [[CN_trial_initialize_input, CN_congruent_trial_input], [CN_trial_initialize_input, CN_incongruent_trial_input], [CN_trial_initialize_input, CN_control_trial_input]] # should be 500 and 1000 ntrials0 = 5 * reps ntrials = 10 * reps def run(bin_execute): results = [] for i, stim in enumerate(Stimulus): # RUN the COMPOSITION to initialize -------------------------------- exec_id = "exec_" + str(i) comp.run(inputs=stim[0], num_trials=ntrials0, bin_execute=bin_execute, context=exec_id) comp.run(inputs=stim[1], num_trials=ntrials, bin_execute=bin_execute, context=exec_id) # Comp results include concatenation of both the above runs results.append(comp.results) return results res = run(mode) # the corresponding output port indices in composition results # these were 0 and 1 in the prior version of the test response_results_index = 3 response_decision_energy_index = 4 if reps == 1: res2d = [x[response_results_index] for r in res for x in r] # NOTE: The formatting below provides visual split between # initialization and runs. Please do not change it. assert np.allclose(res2d, [[0.497679878752004, 0.497679878752004], [0.4954000154631831, 0.4954000154631831], [0.4931599760310996, 0.4931599760310996], [0.4909593232354856, 0.4909593232354856], [0.4887976172454234, 0.4887976172454234], [0.4866744160981826, 0.4866744160981826], [0.4845913708928323, 0.4845892761509014], [0.4825481248817388, 0.4825417411478152], [0.4805443207349158, 0.4805313535747981], [0.4785796008355799, 0.4785576550393446], [0.4766536075535322, 0.4766201866281457], [0.4747659834976122, 0.4747184892433102], [0.4729163717484405, 0.4728521039182508], [0.471104416072617, 0.4710205721142167], [0.4693297611195044, 0.4692234359984253], [0.497679878752004, 0.497679878752004], [0.4954000154631831, 0.4954000154631831], [0.4931599760310996, 0.4931599760310996], [0.4909593232354856, 0.4909593232354856], [0.4887976172454234, 0.4887976172454234], [0.4866744160981826, 0.4866744160981826], [0.4845900488307297, 0.484590598212878], [0.4825440921072959, 0.4825457739209331], [0.4805361215634117, 0.4805395527400902], [0.478565712169312, 0.4785715436856292], [0.4766324385709689, 0.4766413555592825], [0.4747358754098249, 0.4747485972170732], [0.4728755976222633, 0.4728928778172651], [0.4710511807198248, 0.4710738070495766], [0.4692622010511354, 0.4692909953467661], [0.497679878752004, 0.497679878752004], [0.4954000154631831, 0.4954000154631831], [0.4931599760310996, 0.4931599760310996], [0.4909593232354856, 0.4909593232354856], [0.4887976172454234, 0.4887976172454234], [0.4866744160981826, 0.4866744160981826], [0.4845900488307297, 0.4845892761509014], [0.4825440987479238, 0.4825417477884414], [0.4805361483673633, 0.480531380378737], [0.4785657797808562, 0.4785577226508331], [0.4766325749933464, 0.476620323050344], [0.4747361162403673, 0.4747187300733827], [0.4728759862850626, 0.4728524925799762], [0.4710517686957791, 0.4710211600879554], [0.469263048105203, 0.469224283048266]]) res1d = [x[response_decision_energy_index] for r in res for x in r] assert np.allclose( res1d, [[0.9907410468584376], [0.9816847012836883], [0.9728270478359791], [0.964164228287384], [0.9556924424992138], [0.9474079491380278], [0.9393111265997224], [0.9313984494721904], [0.9236664515817239], [0.9161117261021626], [0.9087309255565738], [0.9015207617204032], [0.8944780054345429], [0.8875994863362322], [0.880882092515256], [0.9907410468584376], [0.9816847012836883], [0.9728270478359791], [0.964164228287384], [0.9556924424992138], [0.9474079491380278], [0.9393111266035642], [0.9313984495075565], [0.9236664517261579], [0.9161117265115205], [0.9087309264959722], [0.9015207635977346], [0.8944780088366047], [0.887599492067544], [0.8808821016396065], [0.9907410468584376], [0.9816847012836883], [0.9728270478359791], [0.964164228287384], [0.9556924424992138], [0.9474079491380278], [0.939308563971253], [0.9313906911792855], [0.9236507947874026], [0.9160853988421866], [0.9086910874785843], [0.901464504886388], [0.894402355184426], [0.8875014022102764], [0.8807584692328312]]) if reps == 10: assert np.allclose(res[0][ntrials0 - 1][response_results_index], [0.42481045, 0.42481045]) assert np.allclose(res[0][-1][response_results_index], [0.43512335, 0.39995991]) assert np.allclose(res[1][ntrials0 - 1][response_results_index], [0.42481045, 0.42481045]) assert np.allclose(res[1][-1][response_results_index], [0.41360321, 0.42121262]) assert np.allclose(res[2][ntrials0 - 1][response_results_index], [0.42481045, 0.42481045]) assert np.allclose(res[2][-1][response_results_index], [0.41621778, 0.40255998]) assert np.allclose( res[0][ntrials0 - 1][response_decision_energy_index], [0.72185566]) assert np.allclose(res[0][-1][response_decision_energy_index], [0.69612758]) assert np.allclose( res[1][ntrials0 - 1][response_decision_energy_index], [0.72185566]) assert np.allclose(res[1][-1][response_decision_energy_index], [0.69685957]) assert np.allclose( res[2][ntrials0 - 1][response_decision_energy_index], [0.72185566]) assert np.allclose(res[2][-1][response_decision_energy_index], [0.67021047]) if reps == 100: assert np.allclose(res[0][ntrials0 - 1][response_results_index], [0.48590224, 0.48590224]) assert np.allclose(res[0][-1][response_results_index], [0.95967791, 0.21434208]) assert np.allclose(res[1][ntrials0 - 1][response_results_index], [0.48590224, 0.48590224]) assert np.allclose(res[1][-1][response_results_index], [0.55847666, 0.83814112]) assert np.allclose(res[2][ntrials0 - 1][response_results_index], [0.48590224, 0.48590224]) assert np.allclose(res[2][-1][response_results_index], [0.89673726, 0.25100269]) assert np.allclose( res[0][ntrials0 - 1][response_decision_energy_index], [0.94440397]) assert np.allclose(res[0][-1][response_decision_energy_index], [0.82279743]) assert np.allclose( res[1][ntrials0 - 1][response_decision_energy_index], [0.94440397]) assert np.allclose(res[1][-1][response_decision_energy_index], [1.87232903]) assert np.allclose( res[2][ntrials0 - 1][response_decision_energy_index], [0.94440397]) assert np.allclose(res[2][-1][response_decision_energy_index], [0.90033387]) if benchmark.enabled: benchmark(run, mode)
def my_conflict_function(variable): maxi = variable - 0.0180 new = np.fmax([0], maxi) out = [new[0] * new[1] * 500] return out # Create color feature layer, word feature layer, task demand layer and response layer color_feature_layer = pnl.RecurrentTransferMechanism( size=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate hetero=inhibition, # Inhibition among units within a layer output_ports=[{ # Create new OutputPort by applying pnl.NAME: 'SPECIAL_LOGISTIC', # the "my_special_Logistic" function pnl.VARIABLE: (pnl.OWNER_VALUE, 0), pnl.FUNCTION: my_special_Logistic }], name='COLOR_LAYER') # The word_feature_layer is set up as the color_feature_layer word_feature_layer = pnl.RecurrentTransferMechanism( size=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate hetero=inhibition, # Inhibition among units within a layer output_ports=[{ # Create new OutputPort by applying pnl.NAME: 'SPECIAL_LOGISTIC', # the "my_special_Logistic" function
FeatureNames=['small','medium','large','red','yellow','blue','circle','rectangle','triangle'] # create a variable that corresponds to the size of our feature space sizeF = len(FeatureNames) small_red_circle = [1,0,0,1,0,0,1,0,0] src = small_red_circle Hebb_comp = pnl.Composition() Hebb_mech=pnl.RecurrentTransferMechanism( size=sizeF, function=pnl.Linear, #integrator_mode = True, #integration_rate = 0.5, enable_learning = True, learning_rate = .1, name='Hebb_mech', #matrix=pnl.AutoAssociativeProjection, auto=0, hetero=0 ) Hebb_comp.add_node(Hebb_mech) Hebb_comp.execution_id = 1 # Use print_info to show numerical values and vis_info to show graphs of the changing values def print_info(): print('\nWeight matrix:\n', Hebb_mech.matrix.base, '\nActivity: ', Hebb_mech.value)
colors_input_layer = pnl.TransferMechanism(size=3, function=pnl.Linear, name='COLORS_INPUT') words_input_layer = pnl.TransferMechanism(size=3, function=pnl.Linear, name='WORDS_INPUT') task_input_layer = pnl.TransferMechanism(size=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism(size=2, function=pnl.Logistic(), hetero=inhibition, integrator_mode=True, integration_rate=rate, name='TASK') # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') colors_hidden_layer = pnl.RecurrentTransferMechanism( size=3, function=pnl.Logistic(x_0=bias), integrator_mode=True, hetero=inhibition, # noise=pnl.NormalDist(mean=0.0, standard_deviation=.0), integration_rate=rate, # cohen-huston text says 0.01 name='COLORS HIDDEN') words_hidden_layer = pnl.RecurrentTransferMechanism( size=3,
name="Stimulus Input [S1, S2]") congruenceWeighting = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=congruentWeight, intercept=0), name='Congruence * Automatic Component') # Activation Layer: [Color Activation, Motion Activation] # Recurrent: Self Excitation, Mutual Inhibition # Controlled: Gain Parameter activation = pnl.RecurrentTransferMechanism( default_variable=[[0.0, 0.0]], function=pnl.Logistic(gain=1.0), matrix=[[1.0, -1.0], [-1.0, 1.0]], integrator_mode=True, integrator_function=pnl.AdaptiveIntegrator(rate=integrationConstant), initial_value=np.array([[0.0, 0.0]]), output_states=[pnl.RESULT], name='Task Activations [Act 1, Act 2]') # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), input_states=pnl.InputState(combine=pnl.PRODUCT), output_states=[pnl.RESULT], name='Non-Automatic Component [S1*Activity1, S2*Activity2]') # Summation of nonAutomatic and Automatic Components