Exemplo n.º 1
0
    def test_configure_learning(self):

        o = pnl.TransferMechanism()
        m = pnl.ContrastiveHebbianMechanism(input_size=2,
                                            hidden_size=0,
                                            target_size=2,
                                            mode=pnl.SIMPLE_HEBBIAN,
                                            separated=False,
                                            matrix=[[0, -.5], [-.5, 0]])

        with pytest.warns(UserWarning) as record:
            m.learning_enabled = True

        correct_message_found = False
        for warning in record:
            if ("Learning cannot be enabled" in str(warning.message)
                    and "because it has no LearningMechanism" in str(
                        warning.message)):
                correct_message_found = True
                break
        assert correct_message_found

        m.configure_learning()
        m.reinitialize_when = pnl.Never()
        s = pnl.sys(m, o)

        ms = pnl.Scheduler(system=s)
        ms.add_condition(o, pnl.WhenFinished(m))
        s.scheduler_processing = ms
        results = s.run(inputs=[2, 2], num_trials=4)

        np.testing.assert_allclose(
            results,
            [[[2.671875]], [[2.84093837]], [[3.0510183]], [[3.35234623]]])
    def test_configure_learning(self):

        o = pnl.TransferMechanism()
        m = pnl.ContrastiveHebbianMechanism(input_size=2,
                                            hidden_size=0,
                                            target_size=2,
                                            mode=pnl.SIMPLE_HEBBIAN,
                                            separated=False,
                                            matrix=[[0, -.5], [-.5, 0]])

        regexp = r"Learning cannot be enabled for .* because it has no LearningMechanism"
        with pytest.warns(UserWarning, match=regexp):
            m.learning_enabled = True

        m.configure_learning()
        m.reset_stateful_function_when = pnl.Never()

        c = pnl.Composition()
        c.add_linear_processing_pathway([m, o])
        c.scheduler.add_condition(o, pnl.WhenFinished(m))
        c.learn(inputs={m: [2, 2]}, num_trials=4)
        results = c.parameters.results.get(c)
        np.testing.assert_allclose(
            results,
            [[[2.671875]], [[2.84093837]], [[3.0510183]], [[3.35234623]]])
Exemplo n.º 3
0
task_word_response_process_1 = pnl.Pathway(pathway=[
    task_input_layer, task_layer, task_word_weights, words_hidden_layer
])

task_word_response_process_2 = pnl.Pathway(
    pathway=[words_hidden_layer, word_task_weights, task_layer])

# Create Composition --------------------------------------------------------------------------------------------------
Bidirectional_Stroop = pnl.Composition(
    pathways=[
        color_response_process_1, word_response_process_1,
        task_color_response_process_1, task_word_response_process_1,
        color_response_process_2, word_response_process_2,
        task_color_response_process_2, task_word_response_process_2
    ],
    reinitialize_mechanisms_when=pnl.Never(),
    name='Bidirectional Stroop Model')

input_dict = {
    colors_input_layer: [0, 0, 0],
    words_input_layer: [0, 0, 0],
    task_input_layer: [0, 1]
}
print("\n\n\n\n")
print(Bidirectional_Stroop.run(inputs=input_dict))

for node in Bidirectional_Stroop.mechanisms:
    print(node.name, " Value: ", node.get_output_values(Bidirectional_Stroop))

# # LOGGING:
colors_hidden_layer.set_log_conditions('value')
                             function=pnl.FitzHughNagumoIntegrator(
                                 name='FitzHughNagumoIntegrator Function-0',
                                 d_v=1,
                                 initial_v=-1,
                                 initializer=[[0]],
                                 default_variable=[[0]]))
im = pnl.IntegratorMechanism(name='im',
                             function=pnl.AdaptiveIntegrator(
                                 initializer=[[0]],
                                 rate=0.5,
                                 default_variable=[[0]]))

comp.add_node(fn)
comp.add_node(im)

comp.add_projection(projection=pnl.MappingProjection(
    name='MappingProjection from fn[OutputPort-0] to im[InputPort-0]',
    function=pnl.LinearMatrix(matrix=[[1.0]], default_variable=[-1.0])),
                    sender=fn,
                    receiver=im)

comp.scheduler.add_condition(fn, pnl.Always())
comp.scheduler.add_condition(
    im, pnl.All(pnl.EveryNCalls(fn, 20.0), pnl.AfterNCalls(fn, 1600.0)))

comp.scheduler.termination_conds = {
    pnl.TimeScale.RUN: pnl.Never(),
    pnl.TimeScale.TRIAL: pnl.AfterNCalls(fn, 2000)
}
comp.show_graph()
Exemplo n.º 5
0
fn = pnl.IntegratorMechanism(name="fn", function=fhn)

comp = pnl.Composition(name="comp")

im = pnl.IntegratorMechanism(name="im")  # only used to demonstrate conditions
comp.add_linear_processing_pathway([fn, im])
comp.scheduler.add_condition_set({
    fn:
    pnl.TimeInterval(repeat=0.05, unit="ms"),
    im:
    pnl.TimeInterval(start=80, repeat=1, unit="ms"),
})

comp.termination_processing = {
    pnl.TimeScale.RUN: pnl.Never(
    ),  # default, "Never" for early termination - ends when all trials finished
    pnl.TimeScale.TRIAL: pnl.TimeTermination(100, unit="ms"),
}

print("Running the SimpleFN model...")

comp.run(inputs={fn: 0},
         log=True,
         scheduling_mode=pnl.SchedulingMode.EXACT_TIME)

print("\n".join([
    "{:~}: {}".format(
        comp.scheduler.execution_timestamps[comp.default_execution_id]
        [i].absolute,
        {node.name
         for node in time_step},
)
Stroop_model.add_projection(
    projection=pnl.MappingProjection(
        name="MappingProjection_from_TASK_RESULT__to_word_hidden_InputPort_0",
        function=pnl.LinearMatrix(default_variable=[0.5, 0.5],
                                  matrix=[[0.0, 0.0], [4.0, 4.0]]),
    ),
    sender=TASK,
    receiver=word_hidden,
)
Stroop_model.add_controller(CONTROL)

Stroop_model.scheduler.add_condition(word_hidden,
                                     pnl.EveryNCalls(dependency=TASK, n=10))
Stroop_model.scheduler.add_condition(color_hidden,
                                     pnl.EveryNCalls(dependency=TASK, n=10))
Stroop_model.scheduler.add_condition(
    OUTPUT,
    pnl.All(
        pnl.EveryNCalls(dependency=color_hidden, n=1),
        pnl.EveryNCalls(dependency=word_hidden, n=1),
    ),
)
Stroop_model.scheduler.add_condition(DECISION,
                                     pnl.EveryNCalls(dependency=OUTPUT, n=1))

Stroop_model.scheduler.termination_conds = {
    pnl.TimeScale.ENVIRONMENT_SEQUENCE: pnl.Never(),
    pnl.TimeScale.ENVIRONMENT_STATE_UPDATE: pnl.AllHaveRun(),
}
    def test_default_lc_control_mechanism(self, benchmark, mode):
        G = 1.0
        k = 0.5
        starting_value_LC = 2.0
        user_specified_gain = 1.0

        A = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='A')
        B = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='B')
        # B.output_states[0].value *= 0.0  # Reset after init | Doesn't matter here b/c default var = zero, no intercept

        LC = pnl.LCControlMechanism(
            modulated_mechanisms=[A, B],
            base_level_gain=G,
            scaling_factor_gain=k,
            objective_mechanism=pnl.ObjectiveMechanism(
                function=pnl.Linear,
                monitored_output_states=[B],
                name='LC ObjectiveMechanism'
            )
        )
        for output_state in LC.output_states:
            output_state.value *= starting_value_LC

        P = pnl.Process(pathway=[A, B, LC])
        S = pnl.System(processes=[P])
        LC.reinitialize_when = pnl.Never()
        # THIS CURRENTLY DOES NOT WORK:
        # P = pnl.Process(pathway=[A, B])
        # P2 = pnl.Process(pathway=[LC])
        # S = pnl.System(processes=[P, P2])
        # S.show_graph()

        gain_created_by_LC_output_state_1 = []
        mod_gain_assigned_to_A = []
        base_gain_assigned_to_A = []
        mod_gain_assigned_to_B = []
        base_gain_assigned_to_B = []

        def report_trial():
            gain_created_by_LC_output_state_1.append(LC.output_states[0].value[0])
            mod_gain_assigned_to_A.append(A.mod_gain)
            mod_gain_assigned_to_B.append(B.mod_gain)
            base_gain_assigned_to_A.append(A.function_object.gain)
            base_gain_assigned_to_B.append(B.function_object.gain)

        benchmark(S.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]},
              call_after_trial=report_trial)

        # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value
        assert mod_gain_assigned_to_A[0] == starting_value_LC

        # (2) _gain should always be set to user-specified value
        for i in range(5):
            assert base_gain_assigned_to_A[i] == user_specified_gain
            assert base_gain_assigned_to_B[i] == user_specified_gain

        # (3) LC output on trial n becomes gain of A and B on trial n + 1
        assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_state_1[0:-1])

        # (4) mechanisms A and B should always have the same gain values (b/c they are identical)
        assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
Exemplo n.º 8
0
import psyneulink as pnl

comp = pnl.Composition(name='comp')

fn = pnl.IntegratorMechanism(name='fn', function=pnl.FitzHughNagumoIntegrator(name='FitzHughNagumoIntegrator Function-0', d_v=1, initial_v=-1, initializer=[1.0], default_variable=[[0]]))

comp.add_node(fn)


comp.scheduler.add_condition(fn, pnl.Always())

comp.scheduler.termination_conds = {pnl.TimeScale.RUN: pnl.Never(), pnl.TimeScale.TRIAL: pnl.AllHaveRun()}
comp.show_graph()
Exemplo n.º 9
0
    def test_default_lc_control_mechanism(self):
        G = 1.0
        k = 0.5
        starting_value_LC = 2.0
        user_specified_gain = 1.0

        A = pnl.TransferMechanism(
            function=pnl.Logistic(gain=user_specified_gain), name='A')
        B = pnl.TransferMechanism(
            function=pnl.Logistic(gain=user_specified_gain), name='B')
        # B.output_states[0].value *= 0.0  # Reset after init | Doesn't matter here b/c default var = zero, no intercept

        LC = pnl.LCControlMechanism(modulated_mechanisms=[A, B],
                                    base_level_gain=G,
                                    scaling_factor_gain=k,
                                    objective_mechanism=pnl.ObjectiveMechanism(
                                        function=pnl.Linear,
                                        monitor=[B],
                                        name='LC ObjectiveMechanism'))
        for output_state in LC.output_states:
            output_state.value *= starting_value_LC

        path = [A, B, LC]
        S = pnl.Composition()
        S.add_node(A, required_roles=pnl.NodeRole.INPUT)
        S.add_linear_processing_pathway(pathway=path)
        S.add_node(LC, required_roles=pnl.NodeRole.OUTPUT)
        LC.reinitialize_when = pnl.Never()

        gain_created_by_LC_output_state_1 = []
        mod_gain_assigned_to_A = []
        base_gain_assigned_to_A = []
        mod_gain_assigned_to_B = []
        base_gain_assigned_to_B = []
        A_value = []
        B_value = []
        LC_value = []

        def report_trial(system):
            gain_created_by_LC_output_state_1.append(
                LC.output_state.parameters.value.get(system)[0])
            mod_gain_assigned_to_A.append(A.get_mod_gain(system))
            mod_gain_assigned_to_B.append(B.get_mod_gain(system))
            base_gain_assigned_to_A.append(A.function.parameters.gain.get())
            base_gain_assigned_to_B.append(B.function.parameters.gain.get())
            A_value.append(A.parameters.value.get(system))
            B_value.append(B.parameters.value.get(system))
            LC_value.append(LC.parameters.value.get(system))

        result = S.run(inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]},
                       call_after_trial=functools.partial(report_trial, S))

        # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value
        assert mod_gain_assigned_to_A[0] == starting_value_LC

        # (2) _gain should always be set to user-specified value
        for i in range(5):
            assert base_gain_assigned_to_A[i] == user_specified_gain
            assert base_gain_assigned_to_B[i] == user_specified_gain

        # (3) LC output on trial n becomes gain of A and B on trial n + 1
        assert np.allclose(mod_gain_assigned_to_A[1:],
                           gain_created_by_LC_output_state_1[0:-1])

        # (4) mechanisms A and B should always have the same gain values (b/c they are identical)
        assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
Exemplo n.º 10
0
    def test_lc_control_mechanism_as_controller(self, benchmark, mode):
        G = 1.0
        k = 0.5
        starting_value_LC = 2.0
        user_specified_gain = 1.0

        A = pnl.TransferMechanism(
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic(gain=user_specified_gain),
            name='A')
        B = pnl.TransferMechanism(
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic(gain=user_specified_gain),
            name='B')
        C = pnl.Composition()
        LC = pnl.LCControlMechanism(modulated_mechanisms=[A, B],
                                    base_level_gain=G,
                                    scaling_factor_gain=k,
                                    objective_mechanism=pnl.ObjectiveMechanism(
                                        function=psyneulink.core.components.
                                        functions.transferfunctions.Linear,
                                        monitor=[B],
                                        name='LC ObjectiveMechanism'))
        C.add_linear_processing_pathway([A, B])
        C.add_controller(LC)

        for output_port in LC.output_ports:
            output_port.parameters.value.set(output_port.value *
                                             starting_value_LC,
                                             C,
                                             override=True)

        LC.reset_stateful_function_when = pnl.Never()

        gain_created_by_LC_output_port_1 = []
        mod_gain_assigned_to_A = []
        base_gain_assigned_to_A = []
        mod_gain_assigned_to_B = []
        base_gain_assigned_to_B = []

        def report_trial(composition):
            from psyneulink import parse_context
            context = parse_context(composition)
            gain_created_by_LC_output_port_1.append(
                LC.output_ports[0].parameters.value.get(context))
            mod_gain_assigned_to_A.append([A.get_mod_gain(composition)])
            mod_gain_assigned_to_B.append([B.get_mod_gain(composition)])
            base_gain_assigned_to_A.append(A.function.gain)
            base_gain_assigned_to_B.append(B.function.gain)

        C._analyze_graph()
        benchmark(C.run,
                  inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]},
                  call_after_trial=functools.partial(report_trial, C))

        # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value
        assert mod_gain_assigned_to_A[0] == [starting_value_LC]

        # (2) _gain should always be set to user-specified value
        for i in range(5):
            assert base_gain_assigned_to_A[i] == user_specified_gain
            assert base_gain_assigned_to_B[i] == user_specified_gain

        # (3) LC output on trial n becomes gain of A and B on trial n + 1
        assert np.allclose(mod_gain_assigned_to_A[1:],
                           gain_created_by_LC_output_port_1[0:-1])

        # (4) mechanisms A and B should always have the same gain values (b/c they are identical)
        assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
Exemplo n.º 11
0
    fn,
    pnl.TimeInterval(
        repeat="50 microsecond",
        start=None,
        end=None,
        unit="ms",
        start_inclusive=True,
        end_inclusive=True,
    ),
)
comp.scheduler.add_condition(
    im,
    pnl.TimeInterval(
        repeat="1 millisecond",
        start="80 millisecond",
        end=None,
        unit="ms",
        start_inclusive=True,
        end_inclusive=True,
    ),
)

comp.scheduler.termination_conds = {
    pnl.TimeScale.ENVIRONMENT_SEQUENCE:
    pnl.Never(),
    pnl.TimeScale.ENVIRONMENT_STATE_UPDATE:
    pnl.TimeTermination(t="100 millisecond",
                        inclusive=True,
                        unit="millisecond"),
}