def test_reset_state_integrator_mechanism(self): A = pnl.IntegratorMechanism(name='A', function=pnl.DriftDiffusionIntegrator()) # Execute A twice # [0] saves decision variable only (not time) original_output = [A.execute(1.0)[0], A.execute(1.0)[0]] # SAVING STATE - - - - - - - - - - - - - - - - - - - - - - - - - reset_stateful_functions_to = {} for attr in A.function.stateful_attributes: reset_stateful_functions_to[attr] = getattr(A.function, attr) print(reset_stateful_functions_to) # Execute A twice AFTER saving the state so that it continues accumulating. # We expect the next two outputs to repeat once we reset the state b/c we will return it to the current state output_after_saving_state = [A.execute(1.0)[0], A.execute(1.0)[0]] # RESETTING STATE - - - - - - - - - - - - - - - - - - - - - - - - A.reset(**reset_stateful_functions_to) # We expect these results to match the results from immediately after saving the state output_after_reinitialization = [A.execute(1.0)[0], A.execute(1.0)[0]] assert np.allclose(output_after_saving_state, output_after_reinitialization) assert np.allclose( original_output, [np.array([[1.0]]), np.array([[2.0]])]) assert np.allclose( output_after_reinitialization, [np.array([[3.0]]), np.array([[4.0]])])
def test_DDM_in_composition(benchmark, mode): M = pnl.DDM( name='DDM', function=pnl.DriftDiffusionIntegrator( rate=1, noise=0.0, offset=0.0, starting_point=0.0, time_step_size=0.1, ), ) C = pnl.Composition() C.add_linear_processing_pathway([M]) inputs = {M: [10]} val = C.run(inputs, num_trials=2, bin_execute=mode) # FIXME: Python version returns dtype=object val = np.asfarray(val) assert np.allclose(val[0], [2.0]) assert np.allclose(val[1], [0.2]) if benchmark.enabled: benchmark(C.run, inputs, num_trials=2, bin_execute=mode)