def test_previous_value_persistence_run(self): T = TransferMechanism(name="T", initial_value=0.5, integrator_mode=True, integration_rate=0.1, noise=0.0) P = Process(name="P", pathway=[T]) S = System(name="S", processes=[P]) T.reinitialize_when = Never() assert np.allclose(T.integrator_function.previous_value, 0.5) S.run(inputs={T: 1.0}, num_trials=2) # Trial 1 # integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55 # linear fn: 0.55*1.0 = 0.55 # Trial 2 # integration: 0.9*0.55 + 0.1*1.0 + 0.0 = 0.595 ---> previous value = 0.595 # linear fn: 0.595*1.0 = 0.595 assert np.allclose(T.integrator_function.previous_value, 0.595) S.run(inputs={T: 2.0}, num_trials=2) # Trial 3 # integration: 0.9*0.595 + 0.1*2.0 + 0.0 = 0.7355 ---> previous value = 0.7355 # linear fn: 0.7355*1.0 = 0.7355 # Trial 4 # integration: 0.9*0.7355 + 0.1*2.0 + 0.0 = 0.86195 ---> previous value = 0.86195 # linear fn: 0.86195*1.0 = 0.86195 assert np.allclose(T.integrator_function.previous_value, 0.86195)
def test_switch_mode(self): T = TransferMechanism(integrator_mode=True) P = Process(pathway=[T]) S = System(processes=[P]) integrator_function = T.integrator_function T.reinitialize_when = Never() # T starts with integrator_mode = True; confirm that T behaves correctly S.run({T: [[1.0], [1.0], [1.0]]}) assert np.allclose(T.value, [[0.875]]) assert T.integrator_mode is True assert T.integrator_function is integrator_function # Switch integrator_mode to False; confirm that T behaves correctly T.integrator_mode = False assert T.integrator_mode is False assert T.integrator_function is None S.run({T: [[1.0], [1.0], [1.0]]}) assert np.allclose(T.value, [[1.0]]) # Switch integrator_mode BACK to True; confirm that T picks up where it left off T.integrator_mode = True assert T.integrator_mode is True assert T.integrator_function is integrator_function S.run({T: [[1.0], [1.0], [1.0]]}) assert np.allclose(T.value, [[0.984375]])
def test_transfer_mech_inputs_list_of_floats(self, benchmark): T = TransferMechanism( name='T', default_variable=[0 for i in range(VECTOR_SIZE)], integration_rate=1.0, integrator_mode=True ) T.reinitialize_when = Never() val = benchmark(T.execute, [10.0 for i in range(VECTOR_SIZE)]) assert np.allclose(val, [[10.0 for i in range(VECTOR_SIZE)]])
def test_transfer_mech_array_var_normal_array_noise2(self, benchmark): T = TransferMechanism( name='T', default_variable=[0 for i in range(VECTOR_SIZE)], function=Linear(), noise=[5.0 for i in range(VECTOR_SIZE)], integration_rate=1.0, integrator_mode=True ) T.reinitialize_when = Never() val = benchmark(T.execute, [0 for i in range(VECTOR_SIZE)]) assert np.allclose(val, [[5.0 for i in range(VECTOR_SIZE)]])
def test_transfer_mech_array_var_normal_len_1_noise(self): T = TransferMechanism( name='T', default_variable=[0, 0, 0, 0], function=Linear(), noise=NormalDist(), integration_rate=1.0, integrator_mode=True ) T.reinitialize_when = Never() val = T.execute([0, 0, 0, 0]) assert np.allclose(val, [[0.41059850193837233, 0.144043571160878, 1.454273506962975, 0.7610377251469934]])
def test_transfer_mech_array_var_normal_array_noise(self): T = TransferMechanism( name='T', default_variable=[0, 0, 0, 0], function=Linear(), noise=[NormalDist(), NormalDist(), NormalDist(), NormalDist()], integration_rate=1.0, integrator_mode=True ) T.reinitialize_when = Never() val = T.execute([0, 0, 0, 0]) expected = [0.7610377251469934, 0.12167501649282841, 0.44386323274542566, 0.33367432737426683] for i in range(len(val[0])): assert val[0][i] == expected[i]
def test_previous_value_persistence_execute(self): T = TransferMechanism(name="T", initial_value=0.5, integrator_mode=True, integration_rate=0.1, noise=0.0) T.reinitialize_when = Never() assert np.allclose(T.integrator_function.previous_value, 0.5) T.execute(1.0) # integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55 # linear fn: 0.55*1.0 = 0.55 assert np.allclose(T.integrator_function.previous_value, 0.55) T.execute(1.0) # integration: 0.9*0.55 + 0.1*1.0 + 0.0 = 0.595 ---> previous value = 0.595 # linear fn: 0.595*1.0 = 0.595 assert np.allclose(T.integrator_function.previous_value, 0.595)
def test_reinitialize_run_2darray(self): initial_val = [[0.5, 0.5, 0.5]] T = TransferMechanism(name="T", default_variable=[[0.0, 0.0, 0.0]], initial_value=initial_val, integrator_mode=True, integration_rate=0.1, noise=0.0) P = Process(name="P", pathway=[T]) S = System(name="S", processes=[P]) T.reinitialize_when = Never() assert np.allclose(T.integrator_function.previous_value, initial_val) S.run(inputs={T: [1.0, 1.0, 1.0]}, num_trials=2) # Trial 1 # integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55 # linear fn: 0.55*1.0 = 0.55 # Trial 2 # integration: 0.9*0.55 + 0.1*1.0 + 0.0 = 0.595 ---> previous value = 0.595 # linear fn: 0.595*1.0 = 0.595 assert np.allclose(T.integrator_function.previous_value, [0.595, 0.595, 0.595]) T.integrator_function.reinitialize([0.9, 0.9, 0.9]) assert np.allclose(T.integrator_function.previous_value, [0.9, 0.9, 0.9]) assert np.allclose(T.value, [0.595, 0.595, 0.595]) T.reinitialize(initial_val) assert np.allclose(T.integrator_function.previous_value, initial_val) assert np.allclose(T.value, initial_val) S.run(inputs={T: [1.0, 1.0, 1.0]}, num_trials=2) # Trial 3 # integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55 # linear fn: 0.55*1.0 = 0.55 # Trial 4 # integration: 0.9*0.55 + 0.1*1.0 + 0.0 = 0.595 ---> previous value = 0.595 # linear fn: 0.595*1.0 = 0.595 assert np.allclose(T.integrator_function.previous_value, [0.595, 0.595, 0.595])
def test_previous_value_reinitialize_execute(self): T = TransferMechanism(name="T", initial_value=0.5, integrator_mode=True, integration_rate=0.1, noise=0.0) T.reinitialize_when = Never() assert np.allclose(T.integrator_function.previous_value, 0.5) T.execute(1.0) # integration: 0.9*0.5 + 0.1*1.0 + 0.0 = 0.55 ---> previous value = 0.55 # linear fn: 0.55*1.0 = 0.55 assert np.allclose(T.integrator_function.previous_value, 0.55) assert np.allclose(T.value, 0.55) # Reset integrator_function ONLY T.integrator_function.reinitialize(0.6) assert np.allclose(T.integrator_function.previous_value, 0.6) # previous_value is a property that looks at integrator_function assert np.allclose(T.value, 0.55) # on mechanism only, so does not update until execution T.execute(1.0) # integration: 0.9*0.6 + 0.1*1.0 + 0.0 = 0.64 ---> previous value = 0.55 # linear fn: 0.64*1.0 = 0.64 assert np.allclose(T.integrator_function.previous_value, 0.64) # property that looks at integrator_function assert np.allclose(T.value, 0.64) # on mechanism, but updates with execution T.reinitialize(0.4) # linear fn: 0.4*1.0 = 0.4 assert np.allclose(T.integrator_function.previous_value, 0.4) # property that looks at integrator, which updated with mech reset assert np.allclose(T.value, 0.4) # on mechanism, but updates with mech reset T.execute(1.0) # integration: 0.9*0.4 + 0.1*1.0 + 0.0 = 0.46 ---> previous value = 0.46 # linear fn: 0.46*1.0 = 0.46 assert np.allclose(T.integrator_function.previous_value, 0.46) # property that looks at integrator, which updated with mech exec assert np.allclose(T.value, 0.46) # on mechanism, but updates with exec
def test_reinitialize_one_mechanism_at_trial_2_condition(self): A = TransferMechanism(name='A') B = TransferMechanism(name='B', integrator_mode=True, integration_rate=0.5) C = TransferMechanism(name='C') abc_process = Process(pathway=[A, B, C]) abc_system = System(processes=[abc_process]) # Set reinitialization condition B.reinitialize_when = AtTrial(2) C.log.set_log_conditions('value') abc_system.run(inputs={A: [1.0]}, reinitialize_values={B: [0.]}, num_trials=5) # Trial 0: 0.5, Trial 1: 0.75, Trial 2: 0.5, Trial 3: 0.75. Trial 4: 0.875 assert np.allclose( C.log.nparray_dictionary('value')['value'], [[np.array([0.5])], [np.array([0.75])], [np.array([0.5])], [np.array([0.75])], [np.array([0.875])]])
def test_stateful_mechanism_in_simulation(): # Mechanisms # integrator_mode = True on the Input mechanism makes the system stateful # (though not necessarily an interesting/meaningful model) Input = TransferMechanism( name='Input', integrator_mode=True, ) Reward = TransferMechanism(output_states=[RESULT, MEAN, VARIANCE], name='Reward') Decision = DDM( function=BogaczEtAl(drift_rate=( 1.0, ControlProjection( function=Linear, control_signal_params={ ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3) }, ), ), threshold=( 1.0, ControlProjection( function=Linear, control_signal_params={ ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3) }, ), ), noise=(0.5), starting_point=(0), t0=0.45), output_states=[ DECISION_VARIABLE, RESPONSE_TIME, PROBABILITY_UPPER_THRESHOLD ], name='Decision', ) # Processes: TaskExecutionProcess = Process( # default_variable=[0], size=1, pathway=[(Input), IDENTITY_MATRIX, (Decision)], name='TaskExecutionProcess', ) RewardProcess = Process( # default_variable=[0], size=1, pathway=[(Reward)], name='RewardProcess', ) # System: mySystem = System( processes=[TaskExecutionProcess, RewardProcess], controller=EVCControlMechanism, enable_controller=True, monitor_for_control=[ Reward, Decision.PROBABILITY_UPPER_THRESHOLD, (Decision.RESPONSE_TIME, -1, 1) ], name='EVC Test System', ) mySystem.recordSimulationPref = True Input.reinitialize_when = Never() # Stimuli stim_list_dict = {Input: [0.5, 0.123], Reward: [20, 20]} mySystem.run(inputs=stim_list_dict, ) RewardPrediction = mySystem.execution_list[3] InputPrediction = mySystem.execution_list[4] # rearranging mySystem.results into a format that we can compare with pytest results_array = [] for elem in mySystem.results: elem_array = [] for inner_elem in elem: elem_array.append(float(inner_elem)) results_array.append(elem_array) expected_results_array = [[ 20.0, 20.0, 0.0, 1.0, 3.4963766238230596, 0.8807970779778824 ], [20.0, 20.0, 0.0, 0.1, 0.4899992579951842, 0.503729930808051]] # rearranging mySystem.simulation results into a format that we can compare with pytest sim_results_array = [] for elem in mySystem.simulation_results: elem_array = [] for inner_elem in elem: elem_array.append(float(inner_elem)) sim_results_array.append(elem_array) # # mySystem.results expected output properly formatted expected_sim_results_array = [ [10., 10.0, 0.0, -0.1, 0.48999867, 0.50499983], [10., 10.0, 0.0, -0.4, 1.08965888, 0.51998934], [10., 10.0, 0.0, 0.7, 2.40680493, 0.53494295], [10., 10.0, 0.0, -1., 4.43671978, 0.549834], [10., 10.0, 0.0, 0.1, 0.48997868, 0.51998934], [10., 10.0, 0.0, -0.4, 1.08459402, 0.57932425], [10., 10.0, 0.0, 0.7, 2.36033556, 0.63645254], [10., 10.0, 0.0, 1., 4.24948962, 0.68997448], [10., 10.0, 0.0, 0.1, 0.48993479, 0.53494295], [10., 10.0, 0.0, 0.4, 1.07378304, 0.63645254], [10., 10.0, 0.0, 0.7, 2.26686573, 0.72710822], [10., 10.0, 0.0, 1., 3.90353015, 0.80218389], [10., 10.0, 0.0, 0.1, 0.4898672, 0.549834], [10., 10.0, 0.0, -0.4, 1.05791834, 0.68997448], [10., 10.0, 0.0, 0.7, 2.14222978, 0.80218389], [10., 10.0, 0.0, 1., 3.49637662, 0.88079708], [15., 15.0, 0.0, 0.1, 0.48999926, 0.50372993], [15., 15.0, 0.0, -0.4, 1.08981011, 0.51491557], [15., 15.0, 0.0, 0.7, 2.40822035, 0.52608629], [15., 15.0, 0.0, 1., 4.44259627, 0.53723096], [15., 15.0, 0.0, 0.1, 0.48998813, 0.51491557], [15., 15.0, 0.0, 0.4, 1.0869779, 0.55939819], [15., 15.0, 0.0, -0.7, 2.38198336, 0.60294711], [15., 15.0, 0.0, 1., 4.33535807, 0.64492386], [15., 15.0, 0.0, 0.1, 0.48996368, 0.52608629], [15., 15.0, 0.0, 0.4, 1.08085171, 0.60294711], [15., 15.0, 0.0, 0.7, 2.32712843, 0.67504223], [15., 15.0, 0.0, 1., 4.1221271, 0.7396981], [15., 15.0, 0.0, 0.1, 0.48992596, 0.53723096], [15., 15.0, 0.0, -0.4, 1.07165729, 0.64492386], [15., 15.0, 0.0, 0.7, 2.24934228, 0.7396981], [15., 15.0, 0.0, 1., 3.84279648, 0.81637827] ] expected_output = [ # Decision Output | Second Trial (Decision.output_states[0].value, np.array(1.0)), # Input Prediction Output | Second Trial (InputPrediction.output_states[0].value, np.array(0.1865)), # RewardPrediction Output | Second Trial (RewardPrediction.output_states[0].value, np.array(15.0)), # --- Decision Mechanism --- # Output State Values # decision variable (Decision.output_states[DECISION_VARIABLE].value, np.array([1.0])), # response time (Decision.output_states[RESPONSE_TIME].value, np.array([3.84279648])), # upper bound (Decision.output_states[PROBABILITY_UPPER_THRESHOLD].value, np.array([0.81637827])), # lower bound # (round(float(Decision.output_states['DDM_probability_lowerBound'].value),3), 0.184), # --- Reward Mechanism --- # Output State Values # transfer mean (Reward.output_states[RESULT].value, np.array([15.])), # transfer_result (Reward.output_states[MEAN].value, np.array(15.0)), # transfer variance (Reward.output_states[VARIANCE].value, np.array(0.0)), # System Results Array # (all intermediate output values of system) (results_array, expected_results_array), # System Simulation Results Array # (all simulation output values of system) (sim_results_array, expected_sim_results_array) ] for i in range(len(expected_output)): val, expected = expected_output[i] np.testing.assert_allclose( val, expected, atol=1e-08, err_msg='Failed on expected_output[{0}]'.format(i))