def test_formats_for_control_specification_for_mechanism_and_function_params(
            self):

        control_spec_list = [
            pnl.CONTROL, pnl.CONTROL_SIGNAL, pnl.CONTROL_PROJECTION,
            pnl.ControlSignal,
            pnl.ControlSignal(), pnl.ControlProjection, "CP_OBJECT",
            pnl.ControlMechanism,
            pnl.ControlMechanism(), (0.3, pnl.CONTROL),
            (0.3, pnl.CONTROL_SIGNAL), (0.3, pnl.CONTROL_PROJECTION),
            (0.3, pnl.ControlSignal), (0.3, pnl.ControlSignal()),
            (0.3, pnl.ControlProjection), (0.3, "CP_OBJECT"),
            (0.3, pnl.ControlMechanism), (0.3, pnl.ControlMechanism())
        ]
        for i, ctl_tuple in enumerate(
            [j for j in zip(control_spec_list, reversed(control_spec_list))]):
            C1, C2 = ctl_tuple

            # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
            if C1 is 'CP_OBJECT':
                C1 = pnl.ControlProjection()
            elif isinstance(C1, tuple) and C1[1] is 'CP_OBJECT':
                C1 = (C1[0], pnl.ControlProjection())
            if C2 is 'CP_OBJECT':
                C2 = pnl.ControlProjection()
            elif isinstance(C2, tuple) and C2[1] is 'CP_OBJECT':
                C2 = (C2[0], pnl.ControlProjection())

            R = pnl.RecurrentTransferMechanism(noise=C1,
                                               function=pnl.Logistic(gain=C2))
            assert R.parameter_states[pnl.NOISE].mod_afferents[0].name in \
                   'ControlProjection for RecurrentTransferMechanism-{}[noise]'.format(i)
            assert R.parameter_states[pnl.GAIN].mod_afferents[0].name in \
                   'ControlProjection for RecurrentTransferMechanism-{}[gain]'.format(i)
Exemple #2
0
    def test_deferred_init_default_ModulatoryProjection_names(self):
        LP1 = pnl.LearningProjection()
        LP2 = pnl.LearningProjection()
        assert LP1.name == 'Deferred Init LearningProjection'
        assert LP2.name == 'Deferred Init LearningProjection-1'

        CP1 = pnl.ControlProjection()
        CP2 = pnl.ControlProjection()
        assert CP1.name == 'Deferred Init ControlProjection'
        assert CP2.name == 'Deferred Init ControlProjection-1'

        GP1 = pnl.GatingProjection()
        GP2 = pnl.GatingProjection()
        assert GP1.name == 'Deferred Init GatingProjection'
        assert GP2.name == 'Deferred Init GatingProjection-1'
    def test_formats_for_control_specification_for_mechanism_and_function_params(
            self, noise, gain):
        # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
        if noise == 'CP_OBJECT':
            noise = pnl.ControlProjection()
        elif isinstance(noise, tuple) and noise[1] == 'CP_OBJECT':
            noise = (noise[0], pnl.ControlProjection())
        if gain == 'CP_OBJECT':
            gain = pnl.ControlProjection()
        elif isinstance(gain, tuple) and gain[1] == 'CP_OBJECT':
            gain = (gain[0], pnl.ControlProjection())

        R = pnl.RecurrentTransferMechanism(
            # NOTE: fixed name prevents failures due to registry naming
            # for parallel test runs
            name='R-CONTROL',
            noise=noise,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic(gain=gain))
        assert R.parameter_ports[pnl.NOISE].mod_afferents[0].name in \
                'ControlProjection for R-CONTROL[noise]'
        assert R.parameter_ports[pnl.GAIN].mod_afferents[0].name in \
                'ControlProjection for R-CONTROL[gain]'
Exemple #4
0
# Control Parameters
signalSearchRange = np.arange(0.8, 2.0, 0.2)

# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus',
                                    function=pnl.Linear(slope=0.3324))
Flanker_Stim = pnl.TransferMechanism(name='Flanker Stimulus',
                                     function=pnl.Linear(slope=0.3545221843))

# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(
    name='Target Representation',
    function=pnl.Linear(slope=(
        1.0,
        pnl.ControlProjection(
            function=pnl.Linear,
            control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange})
    )),
    prefs=mechanism_prefs)
Flanker_Rep = pnl.TransferMechanism(
    name='Flanker Representation',
    function=pnl.Linear(slope=(
        1.0,
        pnl.ControlProjection(
            function=pnl.Linear,
            control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange})
    )),
    prefs=mechanism_prefs)

# Processing Mechanism (Automatic)
Automatic_Component = pnl.TransferMechanism(name='Automatic Component',
Exemple #5
0
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=0.3324))
Flanker_Stim = pnl.TransferMechanism(
    name='Flanker Stimulus',
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=0.3545221843))

# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(
    name='Target Representation',
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=(
            1.0,
            pnl.ControlProjection(function=psyneulink.core.components.
                                  functions.transferfunctions.Linear,
                                  control_signal_params={
                                      pnl.ALLOCATION_SAMPLES: signalSearchRange
                                  }))),
    prefs=mechanism_prefs)
Flanker_Rep = pnl.TransferMechanism(
    name='Flanker Representation',
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=(
            1.0,
            pnl.ControlProjection(function=psyneulink.core.components.
                                  functions.transferfunctions.Linear,
                                  control_signal_params={
                                      pnl.ALLOCATION_SAMPLES: signalSearchRange
                                  }))),
    prefs=mechanism_prefs)
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=0.3324))
Target_Stim.set_log_conditions('value')
Flanker_Stim = pnl.TransferMechanism(
    name='Flanker Stimulus',
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=0.3545))
Flanker_Stim.set_log_conditions('value')

# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(
    name='Target Representation',
    function=psyneulink.core.components.functions.transferfunctions.Linear(
        slope=(1.0,
               pnl.ControlProjection(control_signal_params={
                   pnl.ALLOCATION_SAMPLES: signalSearchRange
               }))),
    prefs={
        pnl.LOG_PREF:
        pnl.PreferenceEntry(pnl.LogCondition.INITIALIZATION,
                            pnl.PreferenceLevel.INSTANCE)
    })
Target_Rep.set_log_conditions('value')  # Log Target_Rep
Target_Rep.set_log_conditions('slope')  # Log Target_Rep
Target_Rep.loggable_items

#log initialization

Target_Rep.log.LogCondition = 2

Flanker_Rep = pnl.TransferMechanism(
Exemple #7
0
words_input_layer = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Linear,
    name='WORDS_INPUT')

# Specify signalSearchRange for control_signal_params (not sure if needed)
#signalSearchRange = np.array([1.0])#np.arange(1.0,2.1,0.5) # why 0.8 to 2.0 in increments of 0.2 )#

#   Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.
    Logistic(gain=(
        1.0,
        pnl.ControlProjection(  # receiver= response_layer.output_ports[1],
            # 'DECISION_ENERGY'
            # modulation=pnl.ModulationParam.OVERRIDE,#what to implement here
        ))),
    name='TASK')
task_layer.set_log_conditions('gain')
task_layer.set_log_conditions('value')

task_layer.loggable_items

#   Hidden layer units, colors: ('red','green') words: ('RED','GREEN')
#   Logistic activation function, Gain = 1.0, Bias = -4.0
#should be randomly distributed noise to the net input of each unit (except input unit)
#should have tau = integration_rate = 0.1
colors_hidden_layer = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Logistic(
        gain=1.0, x_0=4.0),
import numpy as np

from psyneulink import *

# TEST GaussianProcessOptimization in context of EVC-Gratton model

# Mechanisms
Input = pnl.TransferMechanism(name='Input', )
Reward = pnl.TransferMechanism(
    output_ports=[pnl.RESULT, pnl.MEAN, pnl.VARIANCE], name='Reward')
Decision = pnl.DDM(
    function=pnl.DriftDiffusionAnalytical(drift_rate=(
        1.0,
        pnl.ControlProjection(
            function=pnl.Linear,
            control_signal_params={
                pnl.ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3)
            },
        ),
    ),
                                          threshold=(
                                              1.0,
                                              pnl.ControlProjection(
                                                  function=pnl.Linear,
                                                  control_signal_params={
                                                      pnl.ALLOCATION_SAMPLES:
                                                      np.arange(
                                                          0.1, 1.01, 0.3)
                                                  },
                                              ),
                                          ),
                                          noise=(0.5),
Exemple #9
0
    def test_evc(self):
        # Mechanisms
        Input = pnl.TransferMechanism(name='Input')
        reward = pnl.TransferMechanism(
            output_states=[pnl.RESULT, pnl.OUTPUT_MEAN, pnl.OUTPUT_VARIANCE],
            name='reward')
        Decision = pnl.DDM(function=pnl.DriftDiffusionAnalytical(
            drift_rate=(1.0,
                        pnl.ControlProjection(function=pnl.Linear,
                                              control_signal_params={
                                                  pnl.ALLOCATION_SAMPLES:
                                                  np.arange(0.1, 1.01, 0.3)
                                              })),
            threshold=(1.0,
                       pnl.ControlProjection(function=pnl.Linear,
                                             control_signal_params={
                                                 pnl.ALLOCATION_SAMPLES:
                                                 np.arange(0.1, 1.01, 0.3)
                                             })),
            noise=0.5,
            starting_point=0,
            t0=0.45),
                           output_states=[
                               pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                               pnl.PROBABILITY_UPPER_THRESHOLD
                           ],
                           name='Decision')

        comp = pnl.Composition(name="evc")
        comp.add_node(reward, required_roles=[pnl.NodeRole.OUTPUT])
        comp.add_node(Decision, required_roles=[pnl.NodeRole.OUTPUT])
        task_execution_pathway = [Input, pnl.IDENTITY_MATRIX, Decision]
        comp.add_linear_processing_pathway(task_execution_pathway)

        comp.add_controller(controller=pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[Input.input_state, reward.input_state],
            feature_function=pnl.AdaptiveIntegrator(rate=0.5),
            objective_mechanism=pnl.ObjectiveMechanism(
                function=pnl.LinearCombination(operation=pnl.PRODUCT),
                monitor=[
                    reward, Decision.output_states[
                        pnl.PROBABILITY_UPPER_THRESHOLD],
                    (Decision.output_states[pnl.RESPONSE_TIME], -1, 1)
                ]),
            function=pnl.GridSearch(),
            control_signals=[("drift_rate", Decision), ("threshold",
                                                        Decision)]))

        comp.enable_controller = True

        comp._analyze_graph()

        stim_list_dict = {Input: [0.5, 0.123], reward: [20, 20]}

        comp.run(inputs=stim_list_dict, retain_old_simulation_data=True)

        # Note: Removed decision variable OutputState from simulation results because sign is chosen randomly
        expected_sim_results_array = [[[10.], [10.0], [0.0], [0.48999867],
                                       [0.50499983]],
                                      [[10.], [10.0], [0.0], [1.08965888],
                                       [0.51998934]],
                                      [[10.], [10.0], [0.0], [2.40680493],
                                       [0.53494295]],
                                      [[10.], [10.0], [0.0], [4.43671978],
                                       [0.549834]],
                                      [[10.], [10.0], [0.0], [0.48997868],
                                       [0.51998934]],
                                      [[10.], [10.0], [0.0], [1.08459402],
                                       [0.57932425]],
                                      [[10.], [10.0], [0.0], [2.36033556],
                                       [0.63645254]],
                                      [[10.], [10.0], [0.0], [4.24948962],
                                       [0.68997448]],
                                      [[10.], [10.0], [0.0], [0.48993479],
                                       [0.53494295]],
                                      [[10.], [10.0], [0.0], [1.07378304],
                                       [0.63645254]],
                                      [[10.], [10.0], [0.0], [2.26686573],
                                       [0.72710822]],
                                      [[10.], [10.0], [0.0], [3.90353015],
                                       [0.80218389]],
                                      [[10.], [10.0], [0.0], [0.4898672],
                                       [0.549834]],
                                      [[10.], [10.0], [0.0], [1.05791834],
                                       [0.68997448]],
                                      [[10.], [10.0], [0.0], [2.14222978],
                                       [0.80218389]],
                                      [[10.], [10.0], [0.0], [3.49637662],
                                       [0.88079708]],
                                      [[15.], [15.0], [0.0], [0.48999926],
                                       [0.50372993]],
                                      [[15.], [15.0], [0.0], [1.08981011],
                                       [0.51491557]],
                                      [[15.], [15.0], [0.0], [2.40822035],
                                       [0.52608629]],
                                      [[15.], [15.0], [0.0], [4.44259627],
                                       [0.53723096]],
                                      [[15.], [15.0], [0.0], [0.48998813],
                                       [0.51491557]],
                                      [[15.], [15.0], [0.0], [1.0869779],
                                       [0.55939819]],
                                      [[15.], [15.0], [0.0], [2.38198336],
                                       [0.60294711]],
                                      [[15.], [15.0], [0.0], [4.33535807],
                                       [0.64492386]],
                                      [[15.], [15.0], [0.0], [0.48996368],
                                       [0.52608629]],
                                      [[15.], [15.0], [0.0], [1.08085171],
                                       [0.60294711]],
                                      [[15.], [15.0], [0.0], [2.32712843],
                                       [0.67504223]],
                                      [[15.], [15.0], [0.0], [4.1221271],
                                       [0.7396981]],
                                      [[15.], [15.0], [0.0], [0.48992596],
                                       [0.53723096]],
                                      [[15.], [15.0], [0.0], [1.07165729],
                                       [0.64492386]],
                                      [[15.], [15.0], [0.0], [2.24934228],
                                       [0.7396981]],
                                      [[15.], [15.0], [0.0], [3.84279648],
                                       [0.81637827]]]

        for simulation in range(len(expected_sim_results_array)):
            assert np.allclose(
                expected_sim_results_array[simulation],
                # Note: Skip decision variable OutputState
                comp.simulation_results[simulation][0:3] +
                comp.simulation_results[simulation][4:6])

        expected_results_array = [[[20.0], [20.0], [0.0], [1.0],
                                   [2.378055160151634], [0.9820137900379085]],
                                  [[20.0], [20.0], [0.0], [0.1],
                                   [0.48999967725112503],
                                   [0.5024599801509442]]]

        for trial in range(len(expected_results_array)):
            np.testing.assert_allclose(
                comp.results[trial],
                expected_results_array[trial],
                atol=1e-08,
                err_msg='Failed on expected_output[{0}]'.format(trial))
Exemple #10
0
#   Linear input units, colors: ('red', 'green'), words: ('RED','GREEN')
colors_input_layer = pnl.TransferMechanism(size=2,
                                           function=pnl.Linear,
                                           name='COLORS_INPUT')

words_input_layer = pnl.TransferMechanism(size=2,
                                          function=pnl.Linear,
                                          name='WORDS_INPUT')

# Specify signalSearchRange for control_signal_params (not sure if needed)
#signalSearchRange = np.array([1.0])#np.arange(1.0,2.1,0.5) # why 0.8 to 2.0 in increments of 0.2 )#

#   Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.TransferMechanism(size=2,
                                   function=pnl.Logistic(gain=(1.0, pnl.ControlProjection(#receiver= response_layer.output_states[1],
#'DECISION_ENERGY'
                                       #modulation=pnl.ModulationParam.OVERRIDE,#what to implement here
                                           ))),
                                   name='TASK')
task_layer.set_log_conditions('gain')
task_layer.set_log_conditions('value')

task_layer.loggable_items

#   Hidden layer units, colors: ('red','green') words: ('RED','GREEN')
#   Logistic activation function, Gain = 1.0, Bias = -4.0
#should be randomly distributed noise to the net input of each unit (except input unit)
#should have tau = integration_rate = 0.1
colors_hidden_layer = pnl.TransferMechanism(size=2,
                                            function=pnl.Logistic(gain=1.0, bias=4.0),
                                            integrator_mode=True,
                                          #  noise=pnl.NormalDist(mean=0.0, standard_dev=.005).function,