def figure_5a(): """ This creates the plot for figure 5A in the Montague paper. Figure 5A is a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).' """ # Create Processing Components sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear(slope=1.0, intercept=0.01), name='Action Selection') sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism, receiver=action_selection, matrix=np.zeros((60, 60))) # Create Composition composition_name = 'TD_Learning_Figure_5A' comp = pnl.Composition(name=composition_name) # Add Processing Components to the Composition pathway = [sample_mechanism, sample_to_action_selection, action_selection] # Add Learning Components to the Composition learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components # Unpack Relevant Learning Components prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM] target_mechanism = learning_related_components[pnl.TARGET_MECHANISM] # Create Log prediction_error_mechanism.log.set_log_conditions(pnl.VALUE) # Create Stimulus Dictionary no_reward_trials = {14, 29, 44, 59, 74, 89} inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials) # Run Composition comp.learn(inputs=inputs) if args.enable_plot: # Get Delta Values from Log delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE] # Plot Delta Values form trials 1, 30, and 50 with plt.style.context('seaborn'): plt.plot(delta_vals[0][0], "-o", label="Trial 1") plt.plot(delta_vals[29][0], "-s", label="Trial 30") plt.plot(delta_vals[49][0], "-o", label="Trial 50") plt.title("Montague et. al. (1996) -- Figure 5A") plt.xlabel("Timestep") plt.ylabel("∂") plt.legend() plt.xlim(xmin=35) plt.xticks() plt.show(block=not pnl._called_from_pytest) return comp
def test_model_based_ocm_no_simulations(self): A = pnl.ProcessingMechanism(name='A') B = pnl.ProcessingMechanism(name='B', function=pnl.SimpleIntegrator(rate=1)) comp = pnl.Composition(name='comp') comp.add_linear_processing_pathway([A, B]) control_signal = pnl.ControlSignal( projections=[(pnl.SLOPE, A)], function=pnl.Linear, variable=1.0, allocation_samples=[1, 2, 3], intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism( agent_rep=comp, features=[A.input_state], objective_mechanism=objective_mech, function=pnl.GridSearch(), num_estimates=1, control_signals=[control_signal], search_statefulness=False, ) comp.add_controller(ocm) inputs = {A: [[[1.0]]]} comp.run(inputs=inputs, num_trials=1) # initial 1 + each allocation sample (1, 2, 3) integrated assert B.parameters.value.get(comp) == 7
def test_DDM_threshold_modulation(mode): M = pnl.DDM( name='DDM', function=pnl.DriftDiffusionAnalytical( threshold=20.0, ), ) monitor = pnl.TransferMechanism(default_variable=[[0.0]], size=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='monitor') control = pnl.ControlMechanism( monitor_for_control=monitor, control_signals=[(pnl.THRESHOLD, M)]) C = pnl.Composition() C.add_node(M, required_roles=[pnl.NodeRole.ORIGIN, pnl.NodeRole.TERMINAL]) C.add_node(monitor) C.add_node(control) inputs = {M:[1], monitor:[3]} val = C.run(inputs, num_trials=1, bin_execute=mode) # FIXME: Python version returns dtype=object val = np.asfarray(val) assert np.allclose(val[0], [60.0]) assert np.allclose(val[1], [60.2])
def test_parameter_setter(): f = pnl.Linear() f.parameters.slope.setter = lambda x: x**2 f.parameters.slope.set(3) assert f.parameters.slope.get() == 9
def test_gating(benchmark, comp_mode): Input_Layer = pnl.TransferMechanism( name='Input_Layer', default_variable=np.zeros((2,)), function=pnl.Logistic() ) Output_Layer = pnl.TransferMechanism( name='Output_Layer', default_variable=[0, 0, 0], function=pnl.Linear(), output_ports={ pnl.NAME: 'RESULTS USING UDF', pnl.FUNCTION: pnl.Linear(slope=pnl.GATING) } ) Gating_Mechanism = pnl.GatingMechanism( size=[1], gating_signals=[Output_Layer.output_port] ) p_pathway = [Input_Layer, Output_Layer] stim_list = { Input_Layer: [[-1, 30], [-1, 30], [-1, 30], [-1, 30]], Gating_Mechanism: [[0.0], [0.5], [1.0], [2.0]] } comp = pnl.Composition(name="comp") comp.add_linear_processing_pathway(p_pathway) comp.add_node(Gating_Mechanism) comp.run(num_trials=4, inputs=stim_list, execution_mode=comp_mode) expected_results = [ [np.array([0., 0., 0.])], [np.array([0.63447071, 0.63447071, 0.63447071])], [np.array([1.26894142, 1.26894142, 1.26894142])], [np.array([2.53788284, 2.53788284, 2.53788284])] ] np.testing.assert_allclose(comp.results, expected_results) if benchmark.enabled: benchmark(comp.run, num_trials=4, inputs=stim_list, execution_mode=comp_mode)
def test_log_dictionary_with_scheduler(self): T1 = pnl.TransferMechanism(name='log_test_T1', integrator_mode=True, integration_rate=0.5) T2 = pnl.TransferMechanism(name='log_test_T2', function=pnl.Linear(slope=6.0)) PS = pnl.Process(name='log_test_PS', pathway=[T1, T2]) SYS = pnl.System(name='log_test_SYS', processes=[PS]) def pass_threshold(mech, thresh): results = mech.output_states[0].value for val in results: if abs(val) >= thresh: return True return False terminate_trial = { pnl.TimeScale.TRIAL: pnl.While(pass_threshold, T2, 5.0) } T1.set_log_conditions(pnl.VALUE) T1.set_log_conditions(pnl.SLOPE) T1.set_log_conditions(pnl.RESULTS) T2.set_log_conditions(pnl.VALUE) T2.set_log_conditions(pnl.SLOPE) SYS.run(inputs={T1: [[1.0]]}, termination_processing=terminate_trial) log_dict_T1 = T1.log.nparray_dictionary( entries=['RESULTS', 'slope', 'value']) log_dict_T2 = T2.log.nparray_dictionary(entries=['value', 'slope']) # Check order of keys (must match order of specification) assert list(log_dict_T1.keys()) == [ 'Run', 'Trial', 'Pass', 'Time_step', 'RESULTS', 'slope', 'value' ] assert list(log_dict_T2.keys()) == [ 'Run', 'Trial', 'Pass', 'Time_step', 'value', 'slope' ] # Check values T1 assert np.allclose(log_dict_T1["Run"], [[0], [0], [0]]) assert np.allclose(log_dict_T1["Trial"], [[0], [0], [0]]) assert np.allclose(log_dict_T1["Time_step"], [[0], [0], [0]]) assert np.allclose(log_dict_T1["RESULTS"], [[0.5], [0.75], [0.875]]) assert np.allclose(log_dict_T1["value"], [[[0.5]], [[0.75]], [[0.875]]]) assert np.allclose(log_dict_T1["slope"], [[1], [1], [1]]) # Check values T2 assert np.allclose(log_dict_T2["Run"], [[0], [0], [0]]) assert np.allclose(log_dict_T2["Trial"], [[0], [0], [0]]) assert np.allclose(log_dict_T2["Time_step"], [[1], [1], [1]]) assert np.allclose(log_dict_T2["value"], [[[3]], [[4.5]], [[5.25]]]) assert np.allclose(log_dict_T2["slope"], [[6], [6], [6]])
def test_model_based_ocm_with_buffer(self): A = pnl.ProcessingMechanism(name='A') B = pnl.ProcessingMechanism(name='B') comp = pnl.Composition(name='comp', controller_mode=pnl.BEFORE) comp.add_linear_processing_pathway([A, B]) search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25) control_signal = pnl.ControlSignal( projections=[(pnl.SLOPE, A)], function=pnl.Linear, variable=1.0, allocation_samples=search_range, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism( agent_rep=comp, features=[A.input_state], feature_function=pnl.Buffer(history=2), objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[control_signal]) objective_mech.log.set_log_conditions(pnl.OUTCOME) comp.add_controller(ocm) inputs = {A: [[[1.0]], [[2.0]], [[3.0]]]} for i in range(1, len(ocm.input_states)): ocm.input_states[i].function.reinitialize() comp.run(inputs=inputs, retain_old_simulation_data=True) log = objective_mech.log.nparray_dictionary() # "outer" composition assert np.allclose(log["comp"][pnl.OUTCOME], [[0.75], [1.5], [2.25]]) # preprocess to ignore control allocations log_parsed = {} for key, value in log.items(): cleaned_key = re.sub(r'comp-sim-(\d).*', r'\1', key) log_parsed[cleaned_key] = value # First round of simulations is only one trial. # (Even though the feature fn is a Buffer, there is no history yet) for i in range(0, 3): assert len(log_parsed[str(i)]["Trial"]) == 1 # Second and third rounds of simulations are two trials. # (The buffer has history = 2) for i in range(3, 9): assert len(log_parsed[str(i)]["Trial"]) == 2
def get_node(percept, node_id): # helper func for creating a node tm_function = pnl.Linear(slope=1, intercept=0) tm_integrator_mode = True tm_integration_rate = .5 node_ = pnl.TransferMechanism( name=f'{percept}-{node_id}', function=tm_function, integrator_mode=tm_integrator_mode, integration_rate=tm_integration_rate, default_variable=np.zeros((1, )), ) return node_
def get_model(learning_rate=.3, n_time_steps=60): """get a model, described in Montague, Dayan, and Sejnowski (1996) Parameters ---------- n_time_steps : int number of time steps per trial learning_rate : float learning rate, default to 1e-3 Returns ------- pnl.composition, list the model """ # Create Processing Components sample_mechanism = pnl.TransferMechanism( default_variable=np.zeros(n_time_steps), name=pnl.SAMPLE) action_func = pnl.Linear(slope=1.0, intercept=0.01) action_selection = pnl.TransferMechanism( default_variable=np.zeros(n_time_steps), function=action_func, name='Action Selection') sample_to_action_selection = pnl.MappingProjection( sender=sample_mechanism, receiver=action_selection, matrix=np.zeros((n_time_steps, n_time_steps))) # Create Composition comp = pnl.Composition() # Add Processing Components to the Composition pathway = [sample_mechanism, sample_to_action_selection, action_selection] # Add Learning Components to the Composition learning_related_components = comp.add_td_learning_pathway( pathway, learning_rate=learning_rate) # Unpack Relevant Learning Components prediction_error_mechanism = learning_related_components[ pnl.COMPARATOR_MECHANISM] target_mechanism = learning_related_components[pnl.TARGET_MECHANISM] # Create Log prediction_error_mechanism.log.set_log_conditions(pnl.VALUE) nodes = [sample_mechanism, prediction_error_mechanism, target_mechanism] return comp, nodes
def test_grid_search_random_selection(self): A = pnl.ProcessingMechanism(name='A') A.log.set_log_conditions(items="mod_slope") B = pnl.ProcessingMechanism(name='B', function=pnl.Logistic()) comp = pnl.Composition(name='comp') comp.add_linear_processing_pathway([A, B]) search_range = pnl.SampleSpec(start=15., stop=35., step=5) control_signal = pnl.ControlSignal( projections=[(pnl.SLOPE, A)], function=pnl.Linear, variable=1.0, allocation_samples=search_range, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism( agent_rep=comp, features=[A.input_state], objective_mechanism=objective_mech, function=pnl.GridSearch(select_randomly_from_optimal_values=True), control_signals=[control_signal]) comp.add_controller(ocm) inputs = {A: [[[1.0]]]} comp.run(inputs=inputs, num_trials=10, execution_id='outer_comp') log_arr = A.log.nparray_dictionary() # control signal value (mod slope) is chosen randomly from all of the control signal values # that correspond to a net outcome of 1 assert np.allclose([[1.], [15.], [15.], [20.], [20.], [15.], [20.], [25.], [15.], [35.]], log_arr['outer_comp']['mod_slope'])
def test_model_based_num_estimates(self): A = pnl.ProcessingMechanism(name='A') B = pnl.ProcessingMechanism(name='B', function=pnl.SimpleIntegrator(rate=1)) comp = pnl.Composition(name='comp') comp.add_linear_processing_pathway([A, B]) search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25) control_signal = pnl.ControlSignal( projections=[(pnl.SLOPE, A)], function=pnl.Linear, variable=1.0, allocation_samples=search_range, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism( agent_rep=comp, features=[A.input_state], objective_mechanism=objective_mech, function=pnl.GridSearch(), num_estimates=5, control_signals=[control_signal]) comp.add_controller(ocm) inputs = {A: [[[1.0]]]} comp.run(inputs=inputs, num_trials=2) assert np.allclose( comp.simulation_results, [[np.array([2.25])], [np.array([3.5])], [np.array([4.75])], [np.array([3.])], [np.array([4.25])], [np.array([5.5])]]) assert np.allclose(comp.results, [[np.array([1.])], [np.array([1.75])]])
def test_model_based_ocm_before(self, benchmark, mode): A = pnl.ProcessingMechanism(name='A') B = pnl.ProcessingMechanism(name='B') comp = pnl.Composition(name='comp', controller_mode=pnl.BEFORE) comp.add_linear_processing_pathway([A, B]) search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25) control_signal = pnl.ControlSignal( projections=[(pnl.SLOPE, A)], function=pnl.Linear, variable=1.0, allocation_samples=search_range, intensity_cost_function=pnl.Linear(slope=0.)) objective_mech = pnl.ObjectiveMechanism(monitor=[B]) ocm = pnl.OptimizationControlMechanism( agent_rep=comp, features=[A.input_state], objective_mechanism=objective_mech, function=pnl.GridSearch(), control_signals=[control_signal]) # objective_mech.log.set_log_conditions(pnl.OUTCOME) comp.add_controller(ocm) inputs = {A: [[[1.0]], [[2.0]], [[3.0]]]} comp.run(inputs=inputs, bin_execute=mode) # objective_mech.log.print_entries(pnl.OUTCOME) assert np.allclose( comp.results, [[np.array([0.75])], [np.array([1.5])], [np.array([2.25])]]) benchmark(comp.run, inputs, bin_execute=mode)
def test_parameter_getter(): f = pnl.Linear() f.parameters.slope.getter = lambda x: x**2 assert f.parameters.slope.get(x=3) == 9
import numpy as np import psyneulink as pnl # Control Parameters signalSearchRange = np.arange(1.0,3.1,0.5) # why 0.8 to 2.0 in increments of 0.2 np.array([1.0])# test_mech = pnl.TransferMechanism(size=1) # Stimulus Mechanisms Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear(slope=0.3324)) Target_Stim.set_log_conditions('value') Flanker_Stim = pnl.TransferMechanism(name='Flanker Stimulus', function=pnl.Linear(slope=0.3545)) Flanker_Stim.set_log_conditions('value') # Processing Mechanisms (Control) Target_Rep = pnl.TransferMechanism(name='Target Representation', function=pnl.Linear( slope=(1.0, pnl.ControlProjection( control_signal_params={ pnl.ALLOCATION_SAMPLES: signalSearchRange}))), prefs = {pnl.LOG_PREF: pnl.PreferenceEntry(pnl.LogCondition.INITIALIZATION, pnl.PreferenceLevel.INSTANCE)}) Target_Rep.set_log_conditions('value') # Log Target_Rep Target_Rep.set_log_conditions('slope') # Log Target_Rep Target_Rep.loggable_items #log initialization Target_Rep.log.LogCondition =2 Flanker_Rep = pnl.TransferMechanism(name='Flanker Representation',
# In[1]: import psyneulink as pnl import numpy as np # In[2]: # ECin = pnl.KWTA(size=8, function=pnl.Linear) # DG = pnl.KWTA(size=400, function=pnl.Linear) # CA3 = pnl.KWTA(size=80, function=pnl.Linear) # CA1 = pnl.KWTA(size=100, function=pnl.Linear) # ECout = pnl.KWTA(size=8, function=pnl.Linear) ECin = pnl.TransferMechanism(size=8, function=pnl.Linear(), name='ECin') DG = pnl.TransferMechanism(size=400, function=pnl.Logistic(), name='DG') CA3 = pnl.TransferMechanism(size=80, function=pnl.Logistic(), name='CA3') CA1 = pnl.TransferMechanism(size=100, function=pnl.Linear(), name='CA1') ECout = pnl.TransferMechanism(size=8, function=pnl.Logistic(), name='ECout') # In[3]: def make_mask(in_features, out_features, connectivity): mask = np.zeros((in_features, out_features)) rand = np.random.random(mask.shape) idxs = np.where(rand < connectivity) mask[idxs[0], idxs[1]] = 1 return mask
prefs={ pnl.VERBOSE_PREF: pnl.PreferenceEntry(False, pnl.PreferenceLevel.INSTANCE), # pnl.REPORT_OUTPUT_PREF: pnl.PreferenceEntry(True, pnl.PreferenceLevel.INSTANCE) }) process_prefs = pnl.ComponentPreferenceSet( reportOutput_pref=pnl.PreferenceEntry(False, pnl.PreferenceLevel.INSTANCE), verbose_pref=pnl.PreferenceEntry(True, pnl.PreferenceLevel.INSTANCE)) # Control Parameters signalSearchRange = np.arange(0.8, 2.0, 0.2) # Stimulus Mechanisms Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear(slope=0.3324)) Flanker_Stim = pnl.TransferMechanism(name='Flanker Stimulus', function=pnl.Linear(slope=0.3545221843)) # Processing Mechanisms (Control) Target_Rep = pnl.TransferMechanism( name='Target Representation', function=pnl.Linear(slope=( 1.0, pnl.ControlProjection( function=pnl.Linear, control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}) )), prefs=mechanism_prefs) Flanker_Rep = pnl.TransferMechanism( name='Flanker Representation',
import psyneulink as pnl cueInterval = pnl.TransferMechanism(default_variable=[[0.0]], size=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Cue-Stimulus Interval') taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') activation = pnl.LCAMechanism(default_variable=[[0.0, 0.0]], size=2, function=pnl.Logistic(gain=1), leak=.5, competition=2, noise=0, time_step_size=.1, termination_measure=pnl.TimeScale.TRIAL, termination_threshold=3, name='Task Activations [Act 1, Act 2]') # response = pnl.ProcessingMechanism() # Create controller csiController = pnl.ControlMechanism(monitor_for_control=cueInterval, control_signals=[ (pnl.TERMINATION_THRESHOLD,
def test_combine_param_conflicting_function_spec(self): with pytest.raises(pnl.InputStateError) as error_text: t = pnl.TransferMechanism(input_states=pnl.InputState( function=pnl.Linear(), combine=pnl.PRODUCT)) assert "Specification of 'combine' argument (PRODUCT) conflicts with Function specified " \ "in 'function' argument (Linear Function" in str(error_text.value)
runs = len(INPUT) excitatoryWeight = np.asarray([[1]]) inhibitoryWeight = np.asarray([[-1]]) gain = np.asarray([[g]]) DRIFT = 1 # Drift Rate STARTING_POINT = 0.0 # Starting Point THRESHOLD = 0.0475 # Threshold NOISE = 0.04 # Noise T0 = 0.2 # T0 # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism( #default_variable=[[0.0, 0.0]], size=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Input') inputLayer.set_log_conditions([pnl.RESULT]) # Recurrent Transfer Mechanism that models the recurrence in the activation between the two stimulus and action # dimensions. Positive self excitation and negative opposite inhibition with an integrator rate = tau # Modulated variable in simulations is the GAIN variable of this mechanism activation = pnl.RecurrentTransferMechanism( default_variable=[[0.0, 0.0]], function=pnl.Logistic(gain=1.0), matrix=[[1.0, -1.0], [-1.0, 1.0]], integrator_mode=True, integrator_function=pnl.AdaptiveIntegrator(rate=(tau)), initial_value=np.array([[0.0, 0.0]]), output_ports=[pnl.RESULT],
def test_copy(): f = pnl.Linear() g = copy.deepcopy(f) assert isinstance(g.parameters.additive_param, pnl.ParameterAlias) assert g.parameters.additive_param.source is g.parameters.intercept
def figure_5c(): """ This creates the plot for Figure 5C in the Montague paper. Figure 5C shows 'extinction of response to the sensory cue.' The setup is the same as Figure 5A, except that reward delivery stops at trial 70 """ # Create Processing Components sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear( slope=1.0, intercept=1.0), name='Action Selection') sample_to_action_selection = pnl.MappingProjection( sender=sample_mechanism, receiver=action_selection, matrix=np.zeros((60, 60))) # Create Composition composition_name = 'TD_Learning_Figure_5C' comp = pnl.Composition(name=composition_name) # Add Processing Components to the Composition pathway = [sample_mechanism, sample_to_action_selection, action_selection] # Add Learning Components to the Composition learning_related_components = comp.add_td_learning_pathway( pathway, learning_rate=0.3) # Unpack Relevant Learning Components prediction_error_mechanism = learning_related_components[ pnl.COMPARATOR_MECHANISM] target_mechanism = learning_related_components[pnl.TARGET_MECHANISM] # Create Log prediction_error_mechanism.log.set_log_conditions(pnl.VALUE) # Create Stimulus Dictionary inputs = build_stimulus_dictionary_figure_5c(sample_mechanism, target_mechanism) # Run Composition comp.run(inputs=inputs) # comp.show_graph() # Get Delta Values from Log delta_vals = prediction_error_mechanism.log.nparray_dictionary( )[composition_name][pnl.VALUE] with plt.style.context('seaborn'): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x_vals, y_vals = np.meshgrid(np.arange(150), np.arange(40, 60, step=1)) d_vals = np.array([d[0][40:60] for d in delta_vals]).transpose() ax.plot_surface(x_vals, y_vals, d_vals) ax.invert_yaxis() ax.set_xlabel("Trial") ax.set_ylabel("Timestep") ax.set_zlabel("∂") ax.set_title("Montague et. al. (1996) -- Figure 5C") plt.show()
import psyneulink as pnl comp = pnl.Composition(name='comp') A = pnl.TransferMechanism(function=pnl.Linear(slope=5.0, intercept=2.0), name='A') B = pnl.TransferMechanism(function=pnl.Logistic, name='B') C = pnl.TransferMechanism(function=pnl.Exponential, name='C') for m in [A, B, C]: comp.add_node(m) comp.add_projection(pnl.MappingProjection(), A, B) comp.add_projection(pnl.MappingProjection(), A, C) comp.run(inputs={A: 0}, log=True, num_trials=1) print('Finished running model') print(comp.results) for node in comp.nodes: print(f'{node} {node.name}: {node.parameters.value.get(comp)}') with open('model_with_simple_graph.json', 'w') as outfi: outfi.write(comp.json_summary) with open('model_with_simple_graph.converted.py', 'w') as outfi: outfi.write(pnl.generate_script_from_json(comp.json_summary)) outfi.write('\ncomp.show_graph()') comp.show_graph()
# Decision Mechanisms Decision = pnl.DDM( function=pnl.DriftDiffusionAnalytical( # drift_rate=(0.1170), threshold=(thresh), noise=(c), starting_point=(x_0), t0=t0), name='Decision', output_ports=[ pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME, pnl.PROBABILITY_UPPER_THRESHOLD, { pnl.NAME: 'OFFSET RT', pnl.VARIABLE: (pnl.OWNER_VALUE, 2), pnl.FUNCTION: pnl.Linear(0, slope=1.0, intercept=1) } ], ) #drift_rate=(1.0),threshold=(0.2645),noise=(0.5),starting_point=(0), t0=0.15 Decision.set_log_conditions('InputPort-0') #, log_condition=pnl.PROCESSING) Decision.set_log_conditions('PROBABILITY_UPPER_THRESHOLD') print(Decision.loggable_items) # Outcome Mechanisms: Reward = pnl.TransferMechanism(name='Reward') # Composition Umemoto_comp = pnl.Composition(name="Umemoto_System") #weights
import psyneulink as pnl comp = pnl.Composition(name='comp') inner_comp = pnl.Composition(name='Inner Composition') A = pnl.TransferMechanism(function=pnl.Linear(slope=5.0, intercept=2.0), name='A') B = pnl.TransferMechanism(function=pnl.Logistic, name='B') C = pnl.TransferMechanism(function=pnl.Exponential, name='C') E = pnl.TransferMechanism(name='E', function=pnl.Linear(slope=2.0)) F = pnl.TransferMechanism(name='F', function=pnl.Linear(intercept=2.0)) for m in [E, F]: inner_comp.add_node(m) for m in [A, B, C, inner_comp]: comp.add_node(m) comp.add_projection(pnl.MappingProjection(), A, B) comp.add_projection(pnl.MappingProjection(), A, C) comp.add_projection(pnl.MappingProjection(), C, inner_comp) inner_comp.add_projection(pnl.MappingProjection(), E, F) comp.run(inputs={A: 1}, log=True) print(comp.results) for node in comp.nodes + inner_comp.nodes: print(f'{node.name}: {node.parameters.value.get(comp)}')
0.2) # why 0.8 to 2.0 in increments of 0.2 # test_mech = pnl.TransferMechanism(size=3) # Stimulus Mechanisms Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear) Flanker_Stim = pnl.TransferMechanism(name='Flanker Stimulus', function=pnl.Linear) # Processing Mechanisms (Control) Target_Rep = pnl.TransferMechanism( name='Target Representation', function=pnl.Linear(slope=( 1.0, pnl.ControlProjection( control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}) ))) Target_Rep.set_log_conditions('value') # Log Target_Rep Target_Rep.loggable_items Flanker_Rep = pnl.TransferMechanism( name='Flanker Representation', function=pnl.Linear(slope=( 1.0, pnl.ControlProjection( control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}) ))) Flanker_Rep.set_log_conditions('value') # Log Flanker_Rep Flanker_Rep.loggable_items # Processing Mechanism (Automatic)
def model_training(): """ This creates the plot for figure 5A in the Montague paper. Figure 5A is a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).' """ sample = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear( slope=1.0, intercept=0.01), name='Action Selection') stimulus_onset = 41 reward_delivery = 54 samples = np.zeros(60) samples[stimulus_onset:] = 1 samples = np.tile(samples, (120, 1)) targets = np.zeros(60) targets[reward_delivery] = 1 targets = np.tile(targets, (120, 1)) # no reward given every 15 trials to simulate a wrong response targets[14][reward_delivery] = 0 targets[29][reward_delivery] = 0 targets[44][reward_delivery] = 0 targets[59][reward_delivery] = 0 targets[74][reward_delivery] = 0 targets[89][reward_delivery] = 0 pnl.MappingProjection(sender=sample, receiver=action_selection, matrix=np.full((60, 60), 0.0)) learning_projection = pnl.LearningProjection( learning_function=pnl.TDLearning(learning_rate=0.3)) p = pnl.Process(default_variable=np.zeros(60), pathway=[sample, action_selection], learning=learning_projection, size=60, target=np.zeros(60)) trial = 0 def print_header(): nonlocal trial print("\n\n*** EPISODE: {}".format(trial)) def store_delta_vals(): nonlocal trial delta_vals[trial] = s.mechanisms[2].value trial += 1 print('Delta values: \n{0}'.format(s.mechanisms[2].value)) input_list = {sample: samples} target_list = {action_selection: targets} s = pnl.System(processes=[p]) delta_vals = np.zeros((120, 60)) s.run(num_trials=120, inputs=input_list, targets=target_list, learning=True, call_before_trial=print_header, call_after_trial=store_delta_vals) with plt.style.context('seaborn'): plt.plot(delta_vals[0], "-o", label="Trial 1") plt.plot(delta_vals[29], "-s", label="Trial 30") plt.plot(delta_vals[49], "-o", label="Trial 50") plt.title("Montague et. al. (1996) -- Figure 5A") plt.xlabel("Timestep") plt.ylabel("∂") plt.legend() plt.xlim(xmin=35) plt.xticks() plt.show()
import psyneulink as pnl import numpy as np target_stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear(slope=0.3324)) flanker_stim = pnl.TransferMechanism(name='Flanker Stimulus', function=pnl.Linear(slope=0.3545221843)) # Processing Mechanisms (Control) Target_Rep = pnl.TransferMechanism(name='Target Representation') Flanker_Rep = pnl.TransferMechanism(name='Flanker Representation') # Processing Mechanism (Automatic) Automatic_Component = pnl.TransferMechanism(name='Automatic Component') # Decision Mechanism Decision = pnl.DDM(name='Decision', function=pnl.DriftDiffusionAnalytical(drift_rate=(1.0), threshold=(0.2645), noise=(0.5), starting_point=(0), t0=0.15), output_ports=[ pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME, pnl.PROBABILITY_UPPER_THRESHOLD ]) # Outcome Mechanism reward = pnl.TransferMechanism(name='reward') # Pathways
def model_training_response_extinction(): """ This creates the plot for Figure 5C in the Montague paper. Figure 5C shows 'extinction of response to the sensory cue.' The setup is the same as Figure 5A, except that reward delivery stops at trial 70 """ sample = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear( slope=1.0, intercept=1.0), name='Action Selection') stimulus_onset = 42 reward_delivery = 54 samples = np.zeros(60) samples[stimulus_onset:] = 1 samples = np.tile(samples, (150, 1)) targets = np.zeros(60) targets[reward_delivery] = 1 targets = np.tile(targets, (150, 1)) # stop delivering reward after trial 70 for i in range(71, 150): targets[i][reward_delivery] = 0 pnl.MappingProjection(sender=sample, receiver=action_selection, matrix=np.zeros((60, 60))) learning_projection = pnl.LearningProjection( learning_function=pnl.TDLearning(learning_rate=0.3)) p = pnl.Process(default_variable=np.zeros(60), pathway=[sample, action_selection], learning=learning_projection, size=60, target=np.zeros(60)) trial = 0 def print_header(): nonlocal trial print("\n\n*** EPISODE: {}".format(trial)) input_list = {sample: samples} target_list = {action_selection: targets} s = pnl.System(processes=[p]) delta_vals = np.zeros((150, 60)) trial = 0 def store_delta_vals(): nonlocal trial delta_vals[trial] = s.mechanisms[2].value trial += 1 s.run(num_trials=150, inputs=input_list, targets=target_list, learning=True, call_before_trial=print_header, call_after_trial=store_delta_vals) with plt.style.context('seaborn'): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x_vals, y_vals = np.meshgrid(np.arange(150), np.arange(40, 60, step=1)) ax.plot_surface(x_vals, y_vals, delta_vals[:, 40:60].transpose()) ax.invert_yaxis() ax.set_xlabel("Trial") ax.set_ylabel("Timestep") ax.set_zlabel("∂") ax.set_title("Montague et. al. (1996) -- Figure 5C") plt.show()
# set up inner comp controller and add to comp icomp.add_controller( pnl.OptimizationControlMechanism( agent_rep=icomp, features=[ia.input_port, ib.input_port], name="Controller", objective_mechanism=pnl.ObjectiveMechanism( monitor=ic.output_port, function=pnl.SimpleIntegrator, name="iController Objective Mechanism" ), function=pnl.GridSearch(direction=pnl.MAXIMIZE), control_signals=[pnl.ControlSignal(projections=[(pnl.SLOPE, ia)], variable=1.0, intensity_cost_function=pnl.Linear(slope=0.0), allocation_samples=pnl.SampleSpec(start=1.0, stop=5.0, num=5))]) ) # instantiate outer comp ocomp = pnl.Composition(name='ocomp', controller_mode=pnl.BEFORE) # setup structure for outer comp ocomp.add_node(icomp) # add controller to outer comp ocomp.add_controller( pnl.OptimizationControlMechanism( agent_rep=ocomp,
def model_training_full_experiment(): """ This creates the plot for figure 5B in the Montague paper. Figure 5B shows the 'entire time course of model responses (trials 1-150).' The setup is the same as in Figure 5A, except that training begins at trial 10. """ sample = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear( slope=1.0, intercept=1.0), name='Action Selection') stimulus_onset = 41 reward_delivery = 54 samples = np.zeros(60) samples[stimulus_onset:] = 1 samples = np.tile(samples, (120, 1)) targets = np.zeros(60) targets[reward_delivery] = 1 targets = np.tile(targets, (120, 1)) # training begins at trial 11 # no reward given every 15 trials to simulate a wrong response no_reward_trials = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 29, 44, 59, 74, 89, 104, 119 ] for t in no_reward_trials: targets[t][reward_delivery] = 0 pnl.MappingProjection(sender=sample, receiver=action_selection, matrix=np.zeros((60, 60))) learning_projection = pnl.LearningProjection( learning_function=pnl.TDLearning(learning_rate=0.3)) p = pnl.Process(default_variable=np.zeros(60), pathway=[sample, action_selection], learning=learning_projection, size=60, target=np.zeros(60)) trial = 0 def print_header(): nonlocal trial print("\n\n*** EPISODE: {}".format(trial)) def store_delta_vals(): nonlocal trial delta_vals[trial] = s.mechanisms[2].value trial += 1 input_list = {sample: samples} target_list = {action_selection: targets} s = pnl.System(processes=[p]) delta_vals = np.zeros((120, 60)) s.run(num_trials=120, inputs=input_list, targets=target_list, learning=True, call_before_trial=print_header, call_after_trial=store_delta_vals) with plt.style.context('seaborn'): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') x_vals, y_vals = np.meshgrid(np.arange(120), np.arange(40, 60, step=1)) ax.plot_surface(x_vals, y_vals, delta_vals[:, 40:60].transpose()) ax.invert_yaxis() ax.set_xlabel("Trial") ax.set_ylabel("Timestep") ax.set_zlabel("∂") ax.set_title("Montague et. al. (1996) -- Figure 5B") plt.show()