def test_lc_control_mech_basic(self, benchmark, mode): LC = pnl.LCControlMechanism(base_level_gain=3.0, scaling_factor_gain=0.5, default_variable=10.0) if mode == 'Python': def EX(variable): LC.execute(variable) return LC.output_values elif mode == 'LLVM': e = pnlvm.execution.MechExecution(LC) EX = e.execute elif mode == 'PTX': e = pnlvm.execution.MechExecution(LC) EX = e.cuda_execute val = EX([10.0]) # All values are the same because LCControlMechanism assigns all of its ControlSignals to the same value # (the 1st item of its function's value). # FIX: 6/6/19 - Python returns 3d array but LLVM returns 2d array # (np.allclose bizarrely passes for LLVM because all the values are the same) assert np.allclose( val, [[[3.00139776]], [[3.00139776]], [[3.00139776]], [[3.00139776]]]) if benchmark.enabled: benchmark(EX, [10.0])
def test_lc_control_mech_basic(self, benchmark, mode): LC = pnl.LCControlMechanism( base_level_gain=3.0, scaling_factor_gain=0.5, default_variable = 10.0 ) if mode == 'Python': EX = LC.execute elif mode == 'LLVM': e = pnlvm.execution.MechExecution(LC) EX = e.execute elif mode == 'PTX': e = pnlvm.execution.MechExecution(LC) EX = e.cuda_execute val = EX([10.0]) # LLVM returns combination of all output states so let's do that for # Python as well if mode == 'Python': val = [s.value for s in LC.output_states] benchmark(EX, [10.0]) assert np.allclose(val, [3.00139776, 3.00139776, 3.00139776, 3.00139776])
def test_lc_control_mech_basic(self, benchmark, mode): LC = pnl.LCControlMechanism( base_level_gain=3.0, scaling_factor_gain=0.5 ) val = LC.execute([[10.0]]) assert np.allclose(np.asfarray(val).flatten(), [3.00139776, 0.512152259, .00279552477, 0.05000]) val = benchmark(LC.execute, [[10.0]])
def test_lc_control_modulated_mechanisms_all(self): T_1 = pnl.TransferMechanism(name='T_1') T_2 = pnl.TransferMechanism(name='T_2') LC = pnl.LCControlMechanism(monitor_for_control=[T_1, T_2], modulated_mechanisms=pnl.ALL ) S = pnl.System(processes=[pnl.proc(T_1, T_2, LC)]) assert len(LC.control_signals)==1 assert len(LC.control_signals[0].efferents)==2 assert T_1.parameter_states[pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents assert T_2.parameter_states[pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents
def test_lc_control_modulated_mechanisms_all(self): T_1 = pnl.TransferMechanism(name='T_1') T_2 = pnl.TransferMechanism(name='T_2') # S = pnl.System(processes=[pnl.proc(T_1, T_2, LC)]) C = pnl.Composition(pathways=[T_1, T_2]) LC = pnl.LCControlMechanism(monitor_for_control=[T_1, T_2], modulated_mechanisms=C) C.add_node(LC) assert len(LC.control_signals) == 1 assert len(LC.control_signals[0].efferents) == 2 assert T_1.parameter_ports[ pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents assert T_2.parameter_ports[ pnl.SLOPE].mod_afferents[0] in LC.control_signals[0].efferents
def test_lc_control_mech_basic(self, benchmark, mech_mode): LC = pnl.LCControlMechanism(base_level_gain=3.0, scaling_factor_gain=0.5, default_variable=10.0) EX = pytest.helpers.get_mech_execution(LC, mech_mode) val = EX([10.0]) # All values are the same because LCControlMechanism assigns all of its ControlSignals to the same value # (the 1st item of its function's value). # FIX: 6/6/19 - Python returns 3d array but LLVM returns 2d array # (np.allclose bizarrely passes for LLVM because all the values are the same) assert np.allclose( val, [[[3.00139776]], [[3.00139776]], [[3.00139776]], [[3.00139776]]]) if benchmark.enabled: benchmark(EX, [10.0])
LC = pnl.LCControlMechanism( integration_method="EULER", # We set the integration method to Euler like in the paper threshold_FitzHughNagumo=a, # Here we use the Euler method for integration and we want to set the parameters, uncorrelated_activity_FitzHughNagumo=d, # for the FitzHugh–Nagumo system. time_step_size_FitzHughNagumo=dt, mode_FitzHughNagumo=C, time_constant_v_FitzHughNagumo=tau_v, time_constant_w_FitzHughNagumo=tau_u, a_v_FitzHughNagumo=-1.0, b_v_FitzHughNagumo=1.0, c_v_FitzHughNagumo=1.0, d_v_FitzHughNagumo=0.0, e_v_FitzHughNagumo=-1.0, f_v_FitzHughNagumo=1.0, a_w_FitzHughNagumo=1.0, b_w_FitzHughNagumo=-1.0, c_w_FitzHughNagumo=0.0, t_0_FitzHughNagumo=0.0, base_level_gain=G, # Additionally, we set the parameters k and G to compute the gain equation scaling_factor_gain=k, initial_v_FitzHughNagumo=initial_v, # Initialize v initial_w_FitzHughNagumo=initial_w, # Initialize w objective_mechanism=pnl.ObjectiveMechanism( function=psyneulink.core.components.functions.transferfunctions.Linear, monitored_output_states=[( decision_layer, # Project output of T1 and T2 but not distractor from decision layer to LC np.array([[lcwt], [lcwt], [0.0]]) )], name='Combine values' ), modulated_mechanisms=[decision_layer, response_layer], # Modulate gain of decision & response layers name='LC' )
LC = pnl.LCControlMechanism( integration_method="EULER", threshold_FitzHughNagumo=a, uncorrelated_activity_FitzHughNagumo=d, base_level_gain=G, scaling_factor_gain=k, time_step_size_FitzHughNagumo=dt, mode_FitzHughNagumo=C, time_constant_v_FitzHughNagumo=tau_v, time_constant_w_FitzHughNagumo=tau_u, a_v_FitzHughNagumo=-1.0, b_v_FitzHughNagumo=1.0, c_v_FitzHughNagumo=1.0, d_v_FitzHughNagumo=0.0, e_v_FitzHughNagumo=-1.0, f_v_FitzHughNagumo=1.0, a_w_FitzHughNagumo=1.0, b_w_FitzHughNagumo=-1.0, c_w_FitzHughNagumo=0.0, t_0_FitzHughNagumo=0.0, initial_v_FitzHughNagumo=initial_v, initial_w_FitzHughNagumo=initial_u, objective_mechanism=pnl.ObjectiveMechanism( function=psyneulink.core.components.functions.transferfunctions.Linear, monitored_output_ports=[(decision_layer, None, None, np.array([[w_vX1], [0.0]]))], name='LC ObjectiveMechanism'), modulated_mechanisms=[decision_layer, response_layer ], # Modulate gain of decision & response layers name='LC')
conflict_process = pnl.Process(pathway=[action_selection, conflicts]) LC_NE = pnl.LCControlMechanism(objective_mechanism=pnl.ObjectiveMechanism(monitored_output_states=[action_selection], name='LC-NE ObjectiveMech'), modulated_mechanisms=[action_selection], integration_method='EULER', initial_w_FitzHughNagumo=initial_u, initial_v_FitzHughNagumo=initial_v, time_step_size_FitzHughNagumo=dt, t_0_FitzHughNagumo=0.0, a_v_FitzHughNagumo=-1.0, b_v_FitzHughNagumo=1.0, c_v_FitzHughNagumo=1.0, d_v_FitzHughNagumo=0.0, e_v_FitzHughNagumo=-1.0, f_v_FitzHughNagumo=1.0, time_constant_v_FitzHughNagumo=tau_v, a_w_FitzHughNagumo=1.0, b_w_FitzHughNagumo=-1.0, c_w_FitzHughNagumo=0.0, threshold_FitzHughNagumo=a, time_constant_w_FitzHughNagumo=tau_u, mode_FitzHughNagumo=C, uncorrelated_activity_FitzHughNagumo=d, base_level_gain=G, scaling_factor_gain=k, name='LC-NE') updateC = pnl.ControlMechanism(objective_mechanism=pnl.ObjectiveMechanism( monitor_for_control=[action_selection.output_states[1], conflicts.output_state]),
def test_default_lc_control_mechanism(self, benchmark, mode): G = 1.0 k = 0.5 starting_value_LC = 2.0 user_specified_gain = 1.0 A = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='A') B = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='B') # B.output_states[0].value *= 0.0 # Reset after init | Doesn't matter here b/c default var = zero, no intercept LC = pnl.LCControlMechanism( modulated_mechanisms=[A, B], base_level_gain=G, scaling_factor_gain=k, objective_mechanism=pnl.ObjectiveMechanism( function=pnl.Linear, monitored_output_states=[B], name='LC ObjectiveMechanism' ) ) for output_state in LC.output_states: output_state.value *= starting_value_LC P = pnl.Process(pathway=[A, B, LC]) S = pnl.System(processes=[P]) LC.reinitialize_when = pnl.Never() # THIS CURRENTLY DOES NOT WORK: # P = pnl.Process(pathway=[A, B]) # P2 = pnl.Process(pathway=[LC]) # S = pnl.System(processes=[P, P2]) # S.show_graph() gain_created_by_LC_output_state_1 = [] mod_gain_assigned_to_A = [] base_gain_assigned_to_A = [] mod_gain_assigned_to_B = [] base_gain_assigned_to_B = [] def report_trial(): gain_created_by_LC_output_state_1.append(LC.output_states[0].value[0]) mod_gain_assigned_to_A.append(A.mod_gain) mod_gain_assigned_to_B.append(B.mod_gain) base_gain_assigned_to_A.append(A.function_object.gain) base_gain_assigned_to_B.append(B.function_object.gain) benchmark(S.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]}, call_after_trial=report_trial) # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value assert mod_gain_assigned_to_A[0] == starting_value_LC # (2) _gain should always be set to user-specified value for i in range(5): assert base_gain_assigned_to_A[i] == user_specified_gain assert base_gain_assigned_to_B[i] == user_specified_gain # (3) LC output on trial n becomes gain of A and B on trial n + 1 assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_state_1[0:-1]) # (4) mechanisms A and B should always have the same gain values (b/c they are identical) assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
def test_default_lc_control_mechanism(self): G = 1.0 k = 0.5 starting_value_LC = 2.0 user_specified_gain = 1.0 A = pnl.TransferMechanism( function=pnl.Logistic(gain=user_specified_gain), name='A') B = pnl.TransferMechanism( function=pnl.Logistic(gain=user_specified_gain), name='B') # B.output_states[0].value *= 0.0 # Reset after init | Doesn't matter here b/c default var = zero, no intercept LC = pnl.LCControlMechanism(modulated_mechanisms=[A, B], base_level_gain=G, scaling_factor_gain=k, objective_mechanism=pnl.ObjectiveMechanism( function=pnl.Linear, monitor=[B], name='LC ObjectiveMechanism')) for output_state in LC.output_states: output_state.value *= starting_value_LC path = [A, B, LC] S = pnl.Composition() S.add_node(A, required_roles=pnl.NodeRole.INPUT) S.add_linear_processing_pathway(pathway=path) S.add_node(LC, required_roles=pnl.NodeRole.OUTPUT) LC.reinitialize_when = pnl.Never() gain_created_by_LC_output_state_1 = [] mod_gain_assigned_to_A = [] base_gain_assigned_to_A = [] mod_gain_assigned_to_B = [] base_gain_assigned_to_B = [] A_value = [] B_value = [] LC_value = [] def report_trial(system): gain_created_by_LC_output_state_1.append( LC.output_state.parameters.value.get(system)[0]) mod_gain_assigned_to_A.append(A.get_mod_gain(system)) mod_gain_assigned_to_B.append(B.get_mod_gain(system)) base_gain_assigned_to_A.append(A.function.parameters.gain.get()) base_gain_assigned_to_B.append(B.function.parameters.gain.get()) A_value.append(A.parameters.value.get(system)) B_value.append(B.parameters.value.get(system)) LC_value.append(LC.parameters.value.get(system)) result = S.run(inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]}, call_after_trial=functools.partial(report_trial, S)) # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value assert mod_gain_assigned_to_A[0] == starting_value_LC # (2) _gain should always be set to user-specified value for i in range(5): assert base_gain_assigned_to_A[i] == user_specified_gain assert base_gain_assigned_to_B[i] == user_specified_gain # (3) LC output on trial n becomes gain of A and B on trial n + 1 assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_state_1[0:-1]) # (4) mechanisms A and B should always have the same gain values (b/c they are identical) assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
def test_lc_control_mechanism_as_controller(self, benchmark, mode): G = 1.0 k = 0.5 starting_value_LC = 2.0 user_specified_gain = 1.0 A = pnl.TransferMechanism( function=psyneulink.core.components.functions.transferfunctions. Logistic(gain=user_specified_gain), name='A') B = pnl.TransferMechanism( function=psyneulink.core.components.functions.transferfunctions. Logistic(gain=user_specified_gain), name='B') C = pnl.Composition() LC = pnl.LCControlMechanism(modulated_mechanisms=[A, B], base_level_gain=G, scaling_factor_gain=k, objective_mechanism=pnl.ObjectiveMechanism( function=psyneulink.core.components. functions.transferfunctions.Linear, monitor=[B], name='LC ObjectiveMechanism')) C.add_linear_processing_pathway([A, B]) C.add_controller(LC) for output_port in LC.output_ports: output_port.parameters.value.set(output_port.value * starting_value_LC, C, override=True) LC.reset_stateful_function_when = pnl.Never() gain_created_by_LC_output_port_1 = [] mod_gain_assigned_to_A = [] base_gain_assigned_to_A = [] mod_gain_assigned_to_B = [] base_gain_assigned_to_B = [] def report_trial(composition): from psyneulink import parse_context context = parse_context(composition) gain_created_by_LC_output_port_1.append( LC.output_ports[0].parameters.value.get(context)) mod_gain_assigned_to_A.append([A.get_mod_gain(composition)]) mod_gain_assigned_to_B.append([B.get_mod_gain(composition)]) base_gain_assigned_to_A.append(A.function.gain) base_gain_assigned_to_B.append(B.function.gain) C._analyze_graph() benchmark(C.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]}, call_after_trial=functools.partial(report_trial, C)) # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value assert mod_gain_assigned_to_A[0] == [starting_value_LC] # (2) _gain should always be set to user-specified value for i in range(5): assert base_gain_assigned_to_A[i] == user_specified_gain assert base_gain_assigned_to_B[i] == user_specified_gain # (3) LC output on trial n becomes gain of A and B on trial n + 1 assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_port_1[0:-1]) # (4) mechanisms A and B should always have the same gain values (b/c they are identical) assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)