def test_formats_for_control_specification_for_mechanism_and_function_params(
            self):

        clear_registry()

        control_spec_list = [
            pnl.CONTROL, pnl.CONTROL_SIGNAL, pnl.CONTROL_PROJECTION,
            pnl.ControlSignal,
            pnl.ControlSignal(), pnl.ControlProjection, "CP_OBJECT",
            pnl.ControlMechanism,
            pnl.ControlMechanism(), (0.3, pnl.CONTROL),
            (0.3, pnl.CONTROL_SIGNAL), (0.3, pnl.CONTROL_PROJECTION),
            (0.3, pnl.ControlSignal), (0.3, pnl.ControlSignal()),
            (0.3, pnl.ControlProjection), (0.3, "CP_OBJECT"),
            (0.3, pnl.ControlMechanism), (0.3, pnl.ControlMechanism())
        ]
        for i, ctl_tuple in enumerate(
            [j for j in zip(control_spec_list, reversed(control_spec_list))]):
            C1, C2 = ctl_tuple

            # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
            if C1 is 'CP_OBJECT':
                C1 = pnl.ControlProjection()
            elif isinstance(C1, tuple) and C1[1] is 'CP_OBJECT':
                C1 = (C1[0], pnl.ControlProjection())
            if C2 is 'CP_OBJECT':
                C2 = pnl.ControlProjection()
            elif isinstance(C2, tuple) and C2[1] is 'CP_OBJECT':
                C2 = (C2[0], pnl.ControlProjection())

            R = pnl.RecurrentTransferMechanism(noise=C1,
                                               function=pnl.Logistic(gain=C2))
            assert R.parameter_states[pnl.NOISE].mod_afferents[0].name in \
                   'ControlProjection for RecurrentTransferMechanism-{}[noise]'.format(i)
            assert R.parameter_states[pnl.GAIN].mod_afferents[0].name in \
                   'ControlProjection for RecurrentTransferMechanism-{}[gain]'.format(i)
예제 #2
0
    def test_control_modulation(self):
        Tx = pnl.TransferMechanism(name='Tx')
        Ty = pnl.TransferMechanism(name='Ty')
        Tz = pnl.TransferMechanism(name='Tz')
        C = pnl.ControlMechanism(
            # function=pnl.Linear,
            default_variable=[1],
            monitor_for_control=Ty,
            objective_mechanism=True,
            control_signals=pnl.ControlSignal(modulation=pnl.OVERRIDE,
                                              modulates=(pnl.SLOPE, Tz)))
        comp = pnl.Composition(pathways=[[Tx, Tz], [Ty, C]])
        # comp.show_graph()

        assert Tz.parameter_ports[pnl.SLOPE].mod_afferents[0].sender.owner == C
        result = comp.run(inputs={Tx: [1, 1], Ty: [4, 4]})
        assert comp.results == [[[4.], [4.]], [[4.], [4.]]]
    def test_control_modulation(self):
        Tx = pnl.TransferMechanism(name='Tx')
        Ty = pnl.TransferMechanism(name='Ty')
        Tz = pnl.TransferMechanism(name='Tz')
        C =  pnl.ControlMechanism(
                # function=pnl.Linear,
                default_variable=[1],
                monitor_for_control=Ty,
                control_signals=pnl.ControlSignal(modulation=pnl.OVERRIDE,
                                                  projections=(pnl.SLOPE,Tz)))
        P1=pnl.Process(pathway=[Tx,Tz])
        P2=pnl.Process(pathway=[Ty, C])
        S=pnl.System(processes=[P1, P2])

        assert Tz.parameter_states[pnl.SLOPE].mod_afferents[0].sender.owner == C
        result = S.run(inputs={Tx:[1,1], Ty:[4,4]})
        assert result == [[[4.], [4.]], [[4.], [4.]]]
예제 #4
0
    def test_grid_search_random_selection(self):
        A = pnl.ProcessingMechanism(name='A')

        A.log.set_log_conditions(items="mod_slope")
        B = pnl.ProcessingMechanism(name='B', function=pnl.Logistic())

        comp = pnl.Composition(name='comp')
        comp.add_linear_processing_pathway([A, B])

        search_range = pnl.SampleSpec(start=15., stop=35., step=5)
        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=search_range,
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(select_randomly_from_optimal_values=True),
            control_signals=[control_signal])

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]]]}

        comp.run(inputs=inputs, num_trials=10, execution_id='outer_comp')

        log_arr = A.log.nparray_dictionary()

        # control signal value (mod slope) is chosen randomly from all of the control signal values
        # that correspond to a net outcome of 1
        assert np.allclose([[1.], [15.], [15.], [20.], [20.], [15.], [20.],
                            [25.], [15.], [35.]],
                           log_arr['outer_comp']['mod_slope'])
예제 #5
0
    def test_model_based_num_estimates(self):

        A = pnl.ProcessingMechanism(name='A')
        B = pnl.ProcessingMechanism(name='B',
                                    function=pnl.SimpleIntegrator(rate=1))

        comp = pnl.Composition(name='comp')
        comp.add_linear_processing_pathway([A, B])

        search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25)
        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=search_range,
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            num_estimates=5,
            control_signals=[control_signal])

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]]]}

        comp.run(inputs=inputs, num_trials=2)

        assert np.allclose(
            comp.simulation_results,
            [[np.array([2.25])], [np.array([3.5])], [np.array([4.75])],
             [np.array([3.])], [np.array([4.25])], [np.array([5.5])]])
        assert np.allclose(comp.results,
                           [[np.array([1.])], [np.array([1.75])]])
예제 #6
0
    def test_model_based_ocm_before(self, benchmark, mode):

        A = pnl.ProcessingMechanism(name='A')
        B = pnl.ProcessingMechanism(name='B')

        comp = pnl.Composition(name='comp', controller_mode=pnl.BEFORE)
        comp.add_linear_processing_pathway([A, B])

        search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25)
        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=search_range,
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            control_signals=[control_signal])
        # objective_mech.log.set_log_conditions(pnl.OUTCOME)

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]], [[2.0]], [[3.0]]]}

        comp.run(inputs=inputs, bin_execute=mode)

        # objective_mech.log.print_entries(pnl.OUTCOME)
        assert np.allclose(
            comp.results,
            [[np.array([0.75])], [np.array([1.5])], [np.array([2.25])]])
        benchmark(comp.run, inputs, bin_execute=mode)
예제 #7
0
TargetAutomatic_pathway = [Target_Stim, Automatic_Component, Decision]
Umemoto_comp.add_linear_processing_pathway(TargetAutomatic_pathway)

FlankerAutomatic_pathway = [Distractor_Stim, Automatic_Component, Decision]
Umemoto_comp.add_linear_processing_pathway(FlankerAutomatic_pathway)

Reward_pathway = [Reward]
Umemoto_comp.add_linear_processing_pathway(Reward_pathway)

Umemoto_comp.add_node(Decision, required_roles=pnl.NodeRole.TERMINAL)

# COMPOSITION
Target_Rep_Control_Signal = pnl.ControlSignal(modulates=[(pnl.SLOPE, Target_Rep)],
                                              function=pnl.Linear,
                                              variable=1.0,
                                              cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
                                              intensity_cost_function=pnl.Exponential(scale=1, rate=1),
                                              adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
                                              allocation_samples=signalSearchRange)

Distractor_Rep_Control_Signal = pnl.ControlSignal(modulates=[(pnl.SLOPE, Distractor_Rep)],
                                                  function=pnl.Linear,
                                                  variable=1.0,
                                                  cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
                                                  intensity_cost_function=pnl.Exponential(rate=0.8046),
                                                  adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
                                                  allocation_samples=signalSearchRange)

Umemoto_comp.add_model_based_optimizer(optimizer=pnl.OptimizationControlMechanism(agent_rep=Umemoto_comp,
                                                                                  features={pnl.SHADOW_EXTERNAL_INPUTS: [Target_Stim, Distractor_Stim, Reward]},
                                                                                  feature_function=pnl.AdaptiveIntegrator(rate=1.0),
예제 #8
0
RewardProcess = pnl.Process(
    pathway=[Reward],
    name='RewardProcess'
)

# System:
mySystem = pnl.System(processes=[TargetControlProcess,
                                 FlankerControlProcess,
                                 TargetAutomaticProcess,
                                 FlankerAutomaticProcess,
                                 RewardProcess],
                      controller=pnl.EVCControlMechanism(
                              control_signals=pnl.ControlSignal(projections=[(pnl.SLOPE, Target_Rep),
                                                                              (pnl.SLOPE, Distractor_Rep)
                                                                              ],
                                                                function=psyneulink.core.components.functions.transferfunctions.Logistic,
                                                                cost_options=[pnl.ControlSignalCosts.INTENSITY,
                                                                               pnl.ControlSignalCosts.ADJUSTMENT],
                                                                allocation_samples=signalSearchRange
                                                                )),
                      enable_controller=True,
                      monitor_for_control=[
                          # (None, None, np.ones((2,1))), # what the **** is this for? Markus October 25 2018
                          Reward,
                          Decision.PROBABILITY_UPPER_THRESHOLD,
                          ('OFFSET RT', 1, -1),
                      ],
                      name='EVC Markus System')

# log controller

mySystem.loggable_items
예제 #9
0
                                    receiver=controlledElement)
stabilityFlexibility.add_projection(sender=stimulusInfo,
                                    receiver=controlledElement)
stabilityFlexibility.add_projection(sender=stimulusInfo,
                                    receiver=ddmCombination)
stabilityFlexibility.add_projection(sender=controlledElement,
                                    receiver=ddmCombination)
stabilityFlexibility.add_projection(sender=ddmCombination,
                                    receiver=decisionMaker)

# beginning of Controller

search_range = pnl.SampleSpec(start=1.0, stop=1.3, num=3)

signal = pnl.ControlSignal(modulates=[(pnl.GAIN, activation)],
                           function=pnl.Linear,
                           variable=1.0,
                           allocation_samples=search_range)

objective_mech = pnl.ObjectiveMechanism(monitor=[
    inputLayer, stimulusInfo, (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
    (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
],
                                        function=computeAccuracy)

meta_controller = pnl.OptimizationControlMechanism(
    agent_rep=stabilityFlexibility,
    state_features=[inputLayer.input_port, stimulusInfo.input_port],
    # state_features = {pnl.SHADOW_INPUTS: [inputLayer, stimulusInfo]},
    # state_features = [(inputLayer, pnl.SHADOW_INPUTS),
    #             (stimulusInfo, pnl.SHADOW_INPUTS)],
    objective_mechanism=objective_mech,
예제 #10
0
    default_variable=[0],
    pathway=[Distractor_Stim, Automatic_Component_Flanker, Decision],  #
    name='Flanker1 Automatic Process')

RewardProcess = pnl.Process(pathway=[Reward], name='RewardProcess')

# System:
mySystem = pnl.System(
    processes=[
        TargetControlProcess, FlankerControlProcess, TargetAutomaticProcess,
        FlankerAutomaticProcess, RewardProcess
    ],
    controller=pnl.EVCControlMechanism(control_signals=pnl.ControlSignal(
        modulates=[(pnl.SLOPE, Target_Rep), (pnl.SLOPE, Distractor_Rep)],
        function=psyneulink.core.components.functions.nonstateful.
        transferfunctions.Logistic,
        cost_options=[
            pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT
        ],
        allocation_samples=signalSearchRange)),
    enable_controller=True,
    monitor_for_control=[
        # (None, None, np.ones((2,1))), # what the **** is this for? Markus October 25 2018
        Reward,
        Decision.PROBABILITY_UPPER_THRESHOLD,
        ('OFFSET RT', 1, -1),
    ],
    name='EVC Markus System')

# log controller

mySystem.loggable_items
            mu_0=-0.17,
            sigma_0=9.0909),  # -0.17, 9.0909 precision = 0.11; 1/p = v
        prediction_terms=[pnl.PV.FC, pnl.PV.COST]),
    function=pnl.GradientOptimization(
        convergence_criterion=pnl.VALUE,
        convergence_threshold=0.001,  #0.001
        step_size=1,  #1
        annealing_function=lambda x, y: x / np.sqrt(y)
        # direction=pnl.ASCENT
    ),
    control_signals=pnl.ControlSignal(
        projections=[(pnl.SLOPE, color_task), ('color_control', word_task)],
        # function=pnl.ReLU,
        function=pnl.Logistic,
        cost_options=[
            pnl.ControlSignalCosts.INTENSITY, pnl.ControlSignalCosts.ADJUSTMENT
        ],
        intensity_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        adjustment_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        # allocation_samples=[i / 2 for i in list(range(0, 50, 1))]
    ))

# print(lvoc.loggable_items)
lvoc.set_log_conditions('value')
# print(lvoc.loggable_items)
# lvoc.set_log_conditions('variable')
# lvoc.agent_rep.set_log_conditions('regression_weights')

lvoc.reportOutputPref = True

c.add_node(lvoc)
예제 #12
0
    flanker_automatic_pathway
]

# Composition
evc_gratton = pnl.Composition(name="EVCGratton")
evc_gratton.add_node(Decision, required_roles=pnl.NodeRole.OUTPUT)
for path in pathways:
    evc_gratton.add_linear_processing_pathway(path)
evc_gratton.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)

# Control Signals
signalSearchRange = pnl.SampleSpec(start=1.0, stop=1.8, step=0.2)

target_rep_control_signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Target_Rep)],
    variable=1.0,
    intensity_cost_function=pnl.Exponential(rate=0.8046),
    allocation_samples=signalSearchRange)

flanker_rep_control_signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Flanker_Rep)],
    variable=1.0,
    intensity_cost_function=pnl.Exponential(rate=0.8046),
    allocation_samples=signalSearchRange)

objective_mech = pnl.ObjectiveMechanism(
    function=pnl.LinearCombination(operation=pnl.PRODUCT),
    monitor=[
        reward, (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)
    ])
# Model Based OCM (formerly controller)
예제 #13
0
    def test_stability_flexibility_susan_and_sebastian(self):

        # computeAccuracy(trialInformation)
        # Inputs: trialInformation[0, 1, 2, 3]
        # trialInformation[0] - Task Dimension : [0, 1] or [1, 0]
        # trialInformation[1] - Stimulus Dimension: Congruent {[1, 1] or [-1, -1]} // Incongruent {[-1, 1] or [1, -1]}
        # trialInformation[2] - Upper Threshold: Probability of DDM choosing upper bound
        # trialInformation[3] - Lower Threshold: Probability of DDM choosing lower bound

        def computeAccuracy(trialInformation):

            # Unload contents of trialInformation
            # Origin Node Inputs
            taskInputs = trialInformation[0]
            stimulusInputs = trialInformation[1]

            # DDM Outputs
            upperThreshold = trialInformation[2]
            lowerThreshold = trialInformation[3]

            # Keep Track of Accuracy
            accuracy = []

            # Beginning of Accuracy Calculation
            colorTrial = (taskInputs[0] == 1)
            motionTrial = (taskInputs[1] == 1)

            # Based on the task dimension information, decide which response is "correct"
            # Obtain accuracy probability from DDM thresholds in "correct" direction
            if colorTrial:
                if stimulusInputs[0] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[0] == -1:
                    accuracy.append(lowerThreshold)

            if motionTrial:
                if stimulusInputs[1] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[1] == -1:
                    accuracy.append(lowerThreshold)

            # Accounts for initialization runs that have no variable input
            if len(accuracy) == 0:
                accuracy = [0]

            # print("Accuracy: ", accuracy[0])
            # print()

            return [accuracy]

        # BEGIN: Composition Construction

        # Constants as defined in Musslick et al. 2018
        tau = 0.9  # Time Constant
        DRIFT = 1  # Drift Rate
        STARTING_POINT = 0.0  # Starting Point
        THRESHOLD = 0.0475  # Threshold
        NOISE = 0.04  # Noise
        T0 = 0.2  # T0

        # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive
        # Origin Node
        taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                          size=2,
                                          function=pnl.Linear(slope=1,
                                                              intercept=0),
                                          output_states=[pnl.RESULT],
                                          name='Task Input [I1, I2]')

        # Stimulus Layer: [Color Stimulus, Motion Stimulus]
        # Origin Node
        stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                             size=2,
                                             function=pnl.Linear(slope=1,
                                                                 intercept=0),
                                             output_states=[pnl.RESULT],
                                             name="Stimulus Input [S1, S2]")

        # Activation Layer: [Color Activation, Motion Activation]
        # Recurrent: Self Excitation, Mutual Inhibition
        # Controlled: Gain Parameter
        activation = pnl.RecurrentTransferMechanism(
            default_variable=[[0.0, 0.0]],
            function=pnl.Logistic(gain=1.0),
            matrix=[[1.0, -1.0], [-1.0, 1.0]],
            integrator_mode=True,
            integrator_function=pnl.AdaptiveIntegrator(rate=(tau)),
            initial_value=np.array([[0.0, 0.0]]),
            output_states=[pnl.RESULT],
            name='Task Activations [Act 1, Act 2]')

        # Hadamard product of Activation and Stimulus Information
        nonAutomaticComponent = pnl.TransferMechanism(
            default_variable=[[0.0, 0.0]],
            size=2,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.PRODUCT),
            output_states=[pnl.RESULT],
            name='Non-Automatic Component [S1*Activity1, S2*Activity2]')

        # Summation of nonAutomatic and Automatic Components
        ddmCombination = pnl.TransferMechanism(
            size=1,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.SUM),
            output_states=[pnl.RESULT],
            name="Drift = (S1 + S2) + (S1*Activity1 + S2*Activity2)")

        decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(
            drift_rate=DRIFT,
            starting_point=STARTING_POINT,
            threshold=THRESHOLD,
            noise=NOISE,
            t0=T0),
                                output_states=[
                                    pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                                    pnl.PROBABILITY_UPPER_THRESHOLD,
                                    pnl.PROBABILITY_LOWER_THRESHOLD
                                ],
                                name='DDM')

        taskLayer.set_log_conditions([pnl.RESULT])
        stimulusInfo.set_log_conditions([pnl.RESULT])
        activation.set_log_conditions([pnl.RESULT, "mod_gain"])
        nonAutomaticComponent.set_log_conditions([pnl.RESULT])
        ddmCombination.set_log_conditions([pnl.RESULT])
        decisionMaker.set_log_conditions([
            pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
            pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME
        ])

        # Composition Creation

        stabilityFlexibility = pnl.Composition(controller_mode=pnl.BEFORE)

        # Node Creation
        stabilityFlexibility.add_node(taskLayer)
        stabilityFlexibility.add_node(activation)
        stabilityFlexibility.add_node(nonAutomaticComponent)
        stabilityFlexibility.add_node(stimulusInfo)
        stabilityFlexibility.add_node(ddmCombination)
        stabilityFlexibility.add_node(decisionMaker)

        # Projection Creation
        stabilityFlexibility.add_projection(sender=taskLayer,
                                            receiver=activation)
        stabilityFlexibility.add_projection(sender=activation,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=nonAutomaticComponent,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=ddmCombination,
                                            receiver=decisionMaker)

        # Beginning of Controller

        # Grid Search Range
        searchRange = pnl.SampleSpec(start=1.0, stop=1.9, num=10)

        # Modulate the GAIN parameter from activation layer
        # Initalize cost function as 0
        signal = pnl.ControlSignal(
            projections=[(pnl.GAIN, activation)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Linear(slope=0.0),
            allocation_samples=searchRange)

        # Use the computeAccuracy function to obtain selection values
        # Pass in 4 arguments whenever computeRewardRate is called
        objectiveMechanism = pnl.ObjectiveMechanism(
            monitor=[
                taskLayer, stimulusInfo,
                (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
                (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
            ],
            function=computeAccuracy,
            name="Controller Objective Mechanism")

        #  Sets trial history for simulations over specified signal search parameters
        metaController = pnl.OptimizationControlMechanism(
            agent_rep=stabilityFlexibility,
            features=[taskLayer.input_state, stimulusInfo.input_state],
            feature_function=pnl.Buffer(history=10),
            name="Controller",
            objective_mechanism=objectiveMechanism,
            function=pnl.GridSearch(),
            control_signals=[signal])

        stabilityFlexibility.add_controller(metaController)
        stabilityFlexibility.enable_controller = True
        # stabilityFlexibility.model_based_optimizer_mode = pnl.BEFORE

        for i in range(1, len(stabilityFlexibility.controller.input_states)):
            stabilityFlexibility.controller.input_states[
                i].function.reinitialize()
        # Origin Node Inputs
        taskTrain = [[1, 0], [0, 1], [1, 0], [0, 1]]
        stimulusTrain = [[1, -1], [-1, 1], [1, -1], [-1, 1]]

        inputs = {taskLayer: taskTrain, stimulusInfo: stimulusTrain}
        stabilityFlexibility.run(inputs)
예제 #14
0
    def test_evc_gratton(self):
        # Stimulus Mechanisms
        target_stim = pnl.TransferMechanism(name='Target Stimulus',
                                            function=pnl.Linear(slope=0.3324))
        flanker_stim = pnl.TransferMechanism(
            name='Flanker Stimulus', function=pnl.Linear(slope=0.3545221843))

        # Processing Mechanisms (Control)
        Target_Rep = pnl.TransferMechanism(name='Target Representation')
        Flanker_Rep = pnl.TransferMechanism(name='Flanker Representation')

        # Processing Mechanism (Automatic)
        Automatic_Component = pnl.TransferMechanism(name='Automatic Component')

        # Decision Mechanism
        Decision = pnl.DDM(name='Decision',
                           function=pnl.DriftDiffusionAnalytical(
                               drift_rate=(1.0),
                               threshold=(0.2645),
                               noise=(0.5),
                               starting_point=(0),
                               t0=0.15),
                           output_states=[
                               pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                               pnl.PROBABILITY_UPPER_THRESHOLD
                           ])

        # Outcome Mechanism
        reward = pnl.TransferMechanism(name='reward')

        # Pathways
        target_control_pathway = [target_stim, Target_Rep, Decision]
        flanker_control_pathway = [flanker_stim, Flanker_Rep, Decision]
        target_automatic_pathway = [target_stim, Automatic_Component, Decision]
        flanker_automatic_pathway = [
            flanker_stim, Automatic_Component, Decision
        ]
        pathways = [
            target_control_pathway, flanker_control_pathway,
            target_automatic_pathway, flanker_automatic_pathway
        ]

        # Composition
        evc_gratton = pnl.Composition(name="EVCGratton")
        evc_gratton.add_node(Decision, required_roles=pnl.NodeRole.OUTPUT)
        for path in pathways:
            evc_gratton.add_linear_processing_pathway(path)
        evc_gratton.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)

        # Control Signals
        signalSearchRange = pnl.SampleSpec(start=1.0, stop=1.8, step=0.2)

        target_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Target_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        flanker_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Flanker_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        objective_mech = pnl.ObjectiveMechanism(
            function=pnl.LinearCombination(operation=pnl.PRODUCT),
            monitor=[
                reward,
                (Decision.output_states[pnl.PROBABILITY_UPPER_THRESHOLD], 1,
                 -1)
            ])
        # Model Based OCM (formerly controller)
        evc_gratton.add_controller(controller=pnl.OptimizationControlMechanism(
            agent_rep=evc_gratton,
            features=[
                target_stim.input_state, flanker_stim.input_state,
                reward.input_state
            ],
            feature_function=pnl.AdaptiveIntegrator(rate=1.0),
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            control_signals=[
                target_rep_control_signal, flanker_rep_control_signal
            ]))
        evc_gratton.enable_controller = True

        targetFeatures = [1, 1, 1]
        flankerFeatures = [1, -1, 1]
        rewardValues = [100, 100, 100]

        stim_list_dict = {
            target_stim: targetFeatures,
            flanker_stim: flankerFeatures,
            reward: rewardValues
        }

        evc_gratton.run(inputs=stim_list_dict)

        expected_results_array = [[[0.32257752863413636], [0.9481940753514433],
                                   [100.]],
                                  [[0.42963678062444666],
                                   [0.47661180945923376], [100.]],
                                  [[0.300291026852769], [0.97089165101931],
                                   [100.]]]

        expected_sim_results_array = [
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
            [[0.42963678], [0.47661181], [100.]],
            [[0.42846471], [0.43938586], [100.]],
            [[0.42628176], [0.40282965], [100.]],
            [[0.42314468], [0.36732207], [100.]],
            [[0.41913221], [0.333198], [100.]],
            [[0.42978939], [0.51176048], [100.]],
            [[0.42959394], [0.47427693], [100.]],
            [[0.4283576], [0.43708106], [100.]],
            [[0.4261132], [0.40057958], [100.]],
            [[0.422919], [0.36514906], [100.]],
            [[0.42902209], [0.54679323], [100.]],
            [[0.42980788], [0.50942101], [100.]],
            [[0.42954704], [0.47194318], [100.]],
            [[0.42824656], [0.43477897], [100.]],
            [[0.42594094], [0.3983337], [100.]],
            [[0.42735293], [0.58136855], [100.]],
            [[0.42910149], [0.54447221], [100.]],
            [[0.42982229], [0.50708112], [100.]],
            [[0.42949608], [0.46961065], [100.]],
            [[0.42813159], [0.43247968], [100.]],
            [[0.42482049], [0.61516258], [100.]],
            [[0.42749136], [0.57908829], [100.]],
            [[0.42917687], [0.54214925], [100.]],
            [[0.42983261], [0.50474093], [100.]],
            [[0.42944107], [0.46727945], [100.]],
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
        ]

        for trial in range(len(evc_gratton.results)):
            assert np.allclose(
                expected_results_array[trial],
                # Note: Skip decision variable OutputState
                evc_gratton.results[trial][1:])
        for simulation in range(len(evc_gratton.simulation_results)):
            assert np.allclose(
                expected_sim_results_array[simulation],
                # Note: Skip decision variable OutputState
                evc_gratton.simulation_results[simulation][1:])
예제 #15
0
    def test_control_signal_default_allocation_specification(self):

        m1 = pnl.ProcessingMechanism()
        m2 = pnl.ProcessingMechanism()
        m3 = pnl.ProcessingMechanism()

        # default_allocation not specified in constructor of pnl.ControlMechanism,
        #     so should be set to defaultControlAllocation (=[1]) if not specified in pnl.ControlSignal constructor
        c1 = pnl.ControlMechanism(
            name='C1',
            default_variable=[10],
            control_signals=[
                pnl.ControlSignal(modulates=(
                    pnl.SLOPE,
                    m1)),  # test for assignment to defaultControlAllocation
                pnl.ControlSignal(
                    default_allocation=2,  # test for scalar assignment
                    modulates=(pnl.SLOPE, m2)),
                pnl.ControlSignal(
                    default_allocation=[3],  # test for array assignment
                    modulates=(pnl.SLOPE, m3))
            ])
        comp = pnl.Composition()
        comp.add_nodes([m1, m2, m3])
        comp.add_controller(c1)
        assert c1.control_signals[0].value == [
            10
        ]  # defaultControlAllocation should be assigned
        # (as no default_allocation from pnl.ControlMechanism)
        assert m1.parameter_ports[pnl.SLOPE].value == [1]
        assert c1.control_signals[1].value == [
            2
        ]  # default_allocation from pnl.ControlSignal (converted scalar)
        assert m2.parameter_ports[pnl.SLOPE].value == [1]
        assert c1.control_signals[2].value == [
            3
        ]  # default_allocation from pnl.ControlSignal
        assert m3.parameter_ports[pnl.SLOPE].value == [1]
        result = comp.run(inputs={m1: [2], m2: [3], m3: [4]})
        assert np.allclose(result, [[20.], [6.], [12.]])
        assert c1.control_signals[0].value == [10]
        assert m1.parameter_ports[pnl.SLOPE].value == [10]
        assert c1.control_signals[1].value == [10]
        assert m2.parameter_ports[pnl.SLOPE].value == [2]
        assert c1.control_signals[2].value == [10]
        assert m3.parameter_ports[pnl.SLOPE].value == [3]
        result = comp.run(inputs={m1: [2], m2: [3], m3: [4]})
        assert np.allclose(result, [[20.], [30.], [40.]])
        assert c1.control_signals[0].value == [10]
        assert m1.parameter_ports[pnl.SLOPE].value == [10]
        assert c1.control_signals[1].value == [10]
        assert m2.parameter_ports[pnl.SLOPE].value == [10]
        assert c1.control_signals[2].value == [10]
        assert m3.parameter_ports[pnl.SLOPE].value == [10]

        # default_allocation *is* specified in constructor of pnl.ControlMechanism,
        #     so should be used unless specified in pnl.ControlSignal constructor
        c2 = pnl.ControlMechanism(
            name='C3',
            default_variable=[10],
            default_allocation=[4],
            control_signals=[
                pnl.ControlSignal(modulates=(
                    pnl.SLOPE,
                    m1)),  # tests for assignment to default_allocation
                pnl.ControlSignal(
                    default_allocation=
                    5,  # tests for override of default_allocation
                    modulates=(pnl.SLOPE, m2)),
                pnl.ControlSignal(
                    default_allocation=[6],  # as above same but with array
                    modulates=(pnl.SLOPE, m3))
            ])
        comp = pnl.Composition()
        comp.add_nodes([m1, m2, m3])
        comp.add_controller(c2)
        assert c2.control_signals[0].value == [
            4
        ]  # default_allocation from pnl.ControlMechanism assigned
        assert m1.parameter_ports[pnl.SLOPE].value == [
            10
        ]  # has not yet received pnl.ControlSignal value
        assert c2.control_signals[1].value == [
            5
        ]  # default_allocation from pnl.ControlSignal assigned (converted scalar)
        assert m2.parameter_ports[pnl.SLOPE].value == [10]
        assert c2.control_signals[2].value == [
            6
        ]  # default_allocation from pnl.ControlSignal assigned
        assert m3.parameter_ports[pnl.SLOPE].value == [10]
        result = comp.run(inputs={m1: [2], m2: [3], m3: [4]})
        assert np.allclose(result, [[8.], [15.], [24.]])
        assert c2.control_signals[0].value == [10]
        assert m1.parameter_ports[pnl.SLOPE].value == [4]
        assert c2.control_signals[1].value == [10]
        assert m2.parameter_ports[pnl.SLOPE].value == [5]
        assert c2.control_signals[2].value == [10]
        assert m3.parameter_ports[pnl.SLOPE].value == [6]
        result = comp.run(inputs={m1: [2], m2: [3], m3: [4]})
        assert np.allclose(result, [[20.], [30.], [40.]])
        assert c2.control_signals[0].value == [10]
        assert m1.parameter_ports[pnl.SLOPE].value == [10]
        assert c2.control_signals[1].value == [10]
        assert m2.parameter_ports[pnl.SLOPE].value == [10]
        assert c2.control_signals[2].value == [10]
        assert m3.parameter_ports[pnl.SLOPE].value == [10]
예제 #16
0
stabilityFlexibility.add_projection(sender=stimulusInfo,
                                    receiver=controlledElement)
stabilityFlexibility.add_projection(sender=stimulusInfo,
                                    receiver=ddmCombination)
stabilityFlexibility.add_projection(sender=controlledElement,
                                    receiver=ddmCombination)
stabilityFlexibility.add_projection(sender=ddmCombination,
                                    receiver=decisionMaker)

# beginning of Controller

search_range = pnl.SampleSpec(start=0.1, stop=0.3, num=3)

signal = pnl.ControlSignal(modulates=[(pnl.GAIN, activation)],
                           function=pnl.Linear,
                           variable=1.0,
                           intensity_cost_function=pnl.Linear(slope=0.),
                           allocation_samples=search_range)

objective_mech = pnl.ObjectiveMechanism(monitor=[
    inputLayer, stimulusInfo, (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
    (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
],
                                        function=computeAccuracy)

meta_controller = pnl.OptimizationControlMechanism(
    agent_rep=stabilityFlexibility,
    features=[inputLayer.input_port, stimulusInfo.input_port],
    feature_function=pnl.Buffer(history=3),
    objective_mechanism=objective_mech,
    function=pnl.GridSearch(),
예제 #17
0
FlankerAutomatic_pathway = [Distractor_Stim, Distactor_Component]
Umemoto_comp.add_linear_processing_pathway(FlankerAutomatic_pathway)

Reward_pathway = [Reward]
Umemoto_comp.add_linear_processing_pathway(Reward_pathway)

Umemoto_comp.add_node(Decision,
                      # required_roles=pnl.NodeRole.OUTPUT
                      )

# COMPOSITION
Target_Rep_Control_Signal = pnl.ControlSignal(
    modulates=[(pnl.SLOPE, Target_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
    intensity_cost_function=pnl.Exponential(scale=1, rate=1),
    # compute_reconfiguration_cost=pnl.Distance(metric=pnl.EUCLIDEAN),
    # adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),#offset = -1
    allocation_samples=signalSearchRange)

Distractor_Rep_Control_Signal = pnl.ControlSignal(
    modulates=[(pnl.SLOPE, Distractor_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
    intensity_cost_function=pnl.Exponential(scale=1, rate=1),
    # adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
    #offset = -1
    allocation_samples=signalSearchRange)
        # save_samples=True,
        # save_values=True,
        # direction=pnl.ASCENT
    ),
    # opt control alloc used to compute ctrl sigs
    control_signals=[
        pnl.ControlSignal(
            default_allocation=default_control_signal,
            modulates=[(pnl.SLOPE, color_task), ('color_control', word_task)],
            # function=pnl.ReLU,
            # function=pnl.Logistic,
            cost_options=[
                pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT
            ],
            intensity_cost_function=pnl.Exponential(rate=0.25,
                                                    bias=-1),  # 0.25, -3
            # adjustment_cost_function=pnl.Exponential(rate=.25, bias=-1), # 0.25, -3
            # adjustment_cost_function=lambda x: np.exp(.25 * np.abs(x) - 1),
            adjustment_cost_function=adj_cost_fct,
            # intensity_cost_function=pnl.Linear(slope=0, intercept=0), # 0.25, -3
            # adjustment_cost_function=pnl.Linear(slope=0, intercept=0), # 0.25, -3
            allocation_samples=control_signal_range
            # allocation_samples = np.arange(0.1, 1.01, 0.3)
            # allocation_samples=[i / 2 for i in list(range(0, 50, 1))]
        )
    ])

lvoc.set_log_conditions('value')
# lvoc.set_log_conditions('features')
# print("LVOC loggable: ", lvoc.loggable_items)
# lvoc.set_log_conditions('variable')
class TestProjectionSpecificationFormats:
    def test_projection_specification_formats(self):
        """Test various matrix and Projection specifications
        Also tests assignment of Projections to pathay of Composition using add_linear_processing_pathway:
        - Projection explicitly specified in sequence (M1_M2_proj)
        - Projection pre-constructed and assigned to Mechanisms, but not specified in pathway(M2_M3_proj)
        - Projection specified in pathway that is duplicate one preconstructed and assigned to Mechanisms (M3_M4_proj)
          (currently it should be ignored; in the future, if/when Projections between the same sender and receiver
           in different Compositions are allowed, then it should be used)
        """
        M1 = pnl.ProcessingMechanism(size=2)
        M2 = pnl.ProcessingMechanism(size=5)
        M3 = pnl.ProcessingMechanism(size=4)
        M4 = pnl.ProcessingMechanism(size=3)

        M1_M2_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
        M2_M3_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
        M3_M4_matrix_A = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 5)
        M3_M4_matrix_B = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)

        M1_M2_proj = pnl.MappingProjection(matrix=M1_M2_matrix)
        M2_M3_proj = pnl.MappingProjection(sender=M2,
                                           receiver=M3,
                                           matrix={
                                               pnl.VALUE: M2_M3_matrix,
                                               pnl.FUNCTION:
                                               pnl.AccumulatorIntegrator,
                                               pnl.FUNCTION_PARAMS: {
                                                   pnl.DEFAULT_VARIABLE:
                                                   M2_M3_matrix,
                                                   pnl.INITIALIZER:
                                                   M2_M3_matrix
                                               }
                                           })
        M3_M4_proj_A = pnl.MappingProjection(sender=M3,
                                             receiver=M4,
                                             matrix=M3_M4_matrix_A)
        c = pnl.Composition()
        c.add_linear_processing_pathway(
            pathway=[M1, M1_M2_proj, M2, M3, M3_M4_matrix_B, M4])

        assert np.allclose(M2_M3_proj.matrix.base, M2_M3_matrix)
        assert M2.efferents[0] is M2_M3_proj
        assert np.allclose(M3.efferents[0].matrix.base, M3_M4_matrix_A)
        # This is if different Projections are allowed between the same sender and receiver in different Compositions:
        # assert np.allclose(M3.efferents[1].matrix, M3_M4_matrix_B)
        c.run(inputs={M1: [2, -30]})
        # assert np.allclose(c.results, [[-130.19166667, -152.53333333, -174.875]])
        assert np.allclose(c.results, [[-78.115, -91.52, -104.925]])

    def test_multiple_modulatory_projection_specs(self):

        M = pnl.DDM(name='MY DDM')
        C = pnl.ControlMechanism(control_signals=[{
            pnl.PROJECTIONS: [
                M.parameter_ports[psyneulink.core.components.functions.
                                  distributionfunctions.DRIFT_RATE],
                M.parameter_ports[psyneulink.core.globals.keywords.THRESHOLD]
            ]
        }])
        G = pnl.GatingMechanism(gating_signals=[{
            pnl.PROJECTIONS: [
                M.output_ports[pnl.DECISION_VARIABLE], M.output_ports[
                    pnl.RESPONSE_TIME]
            ]
        }])
        assert len(C.control_signals) == 1
        assert len(C.control_signals[0].efferents) == 2
        assert M.parameter_ports[
            psyneulink.core.components.functions.distributionfunctions.
            DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
        assert M.parameter_ports[
            psyneulink.core.globals.keywords.
            THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
        assert len(G.gating_signals) == 1
        assert len(G.gating_signals[0].efferents) == 2
        assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[
            0] == G.gating_signals[0].efferents[0]
        assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[
            0] == G.gating_signals[0].efferents[1]

    def test_multiple_modulatory_projections_with_port_Name(self):

        M = pnl.DDM(name='MY DDM')
        C = pnl.ControlMechanism(control_signals=[{
            'DECISION_CONTROL': [
                M.parameter_ports[psyneulink.core.components.functions.
                                  distributionfunctions.DRIFT_RATE],
                M.parameter_ports[psyneulink.core.globals.keywords.THRESHOLD]
            ]
        }])
        G = pnl.GatingMechanism(gating_signals=[{
            'DDM_OUTPUT_GATE': [
                M.output_ports[pnl.DECISION_VARIABLE], M.output_ports[
                    pnl.RESPONSE_TIME]
            ]
        }])
        assert len(C.control_signals) == 1
        assert C.control_signals[0].name == 'DECISION_CONTROL'
        assert len(C.control_signals[0].efferents) == 2
        assert M.parameter_ports[
            psyneulink.core.components.functions.distributionfunctions.
            DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
        assert M.parameter_ports[
            psyneulink.core.globals.keywords.
            THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
        assert len(G.gating_signals) == 1
        assert G.gating_signals[0].name == 'DDM_OUTPUT_GATE'
        assert len(G.gating_signals[0].efferents) == 2
        assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[
            0] == G.gating_signals[0].efferents[0]
        assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[
            0] == G.gating_signals[0].efferents[1]

    def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(
            self):

        M = pnl.DDM(name='MY DDM')
        C = pnl.ControlMechanism(control_signals=[{
            pnl.MECHANISM:
            M,
            pnl.PARAMETER_PORTS: [
                psyneulink.core.components.functions.distributionfunctions.
                DRIFT_RATE, psyneulink.core.globals.keywords.THRESHOLD
            ]
        }])
        G = pnl.GatingMechanism(gating_signals=[{
            pnl.MECHANISM:
            M,
            pnl.OUTPUT_PORTS: [pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME]
        }])
        assert len(C.control_signals) == 1
        assert len(C.control_signals[0].efferents) == 2
        assert M.parameter_ports[
            psyneulink.core.components.functions.distributionfunctions.
            DRIFT_RATE].mod_afferents[0] == C.control_signals[0].efferents[0]
        assert M.parameter_ports[
            psyneulink.core.globals.keywords.
            THRESHOLD].mod_afferents[0] == C.control_signals[0].efferents[1]
        assert len(G.gating_signals) == 1
        assert len(G.gating_signals[0].efferents) == 2
        assert M.output_ports[pnl.DECISION_VARIABLE].mod_afferents[
            0] == G.gating_signals[0].efferents[0]
        assert M.output_ports[pnl.RESPONSE_TIME].mod_afferents[
            0] == G.gating_signals[0].efferents[1]

    def test_mapping_projection_with_mech_and_port_Name_specs(self):
        R1 = pnl.TransferMechanism(output_ports=['OUTPUT_1', 'OUTPUT_2'])
        R2 = pnl.TransferMechanism(default_variable=[[0], [0]],
                                   input_ports=['INPUT_1', 'INPUT_2'])
        T = pnl.TransferMechanism(input_ports=[{
            pnl.MECHANISM:
            R1,
            pnl.OUTPUT_PORTS: ['OUTPUT_1', 'OUTPUT_2']
        }],
                                  output_ports=[{
                                      pnl.MECHANISM:
                                      R2,
                                      pnl.INPUT_PORTS: ['INPUT_1', 'INPUT_2']
                                  }])
        assert len(R1.output_ports) == 2
        assert len(R2.input_ports) == 2
        assert len(T.input_ports) == 1
        for input_port in T.input_ports:
            for projection in input_port.path_afferents:
                assert projection.sender.owner is R1
        assert len(T.output_ports) == 1
        for output_port in T.output_ports:
            for projection in output_port.efferents:
                assert projection.receiver.owner is R2

    def test_mapping_projection_using_2_item_tuple_with_list_of_port_Names(
            self):

        T1 = pnl.TransferMechanism(name='T1', input_ports=[[0, 0], [0, 0, 0]])
        T2 = pnl.TransferMechanism(name='T2',
                                   output_ports=[
                                       (['InputPort-0', 'InputPort-1'], T1)
                                   ])
        assert len(T2.output_ports) == 1
        assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0'
        assert T2.output_ports[0].efferents[0].matrix.base.shape == (1, 2)
        assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1'
        assert T2.output_ports[0].efferents[1].matrix.base.shape == (1, 3)

    def test_mapping_projection_using_2_item_tuple_and_3_item_tuples_with_index_specs(
            self):

        T1 = pnl.TransferMechanism(name='T1', input_ports=[[0, 0], [0, 0, 0]])
        T2 = pnl.TransferMechanism(name='T2',
                                   input_ports=['a', 'b', 'c'],
                                   output_ports=[
                                       (['InputPort-0', 'InputPort-1'], T1),
                                       ('InputPort-0', (pnl.OWNER_VALUE, 2),
                                        T1),
                                       (['InputPort-0', 'InputPort-1'], 1, T1)
                                   ])
        assert len(T2.output_ports) == 3
        assert T2.output_ports[0].efferents[0].receiver.name == 'InputPort-0'
        assert T2.output_ports[0].efferents[0].matrix.base.shape == (1, 2)
        assert T2.output_ports[0].efferents[1].receiver.name == 'InputPort-1'
        assert T2.output_ports[0].efferents[1].matrix.base.shape == (1, 3)
        assert T2.output_ports[1].owner_value_index == 2
        assert T2.output_ports[2].owner_value_index == 1

    def test_2_item_tuple_from_control_signal_to_parameter_port(self):

        D = pnl.DDM(name='D')

        # Single name
        C = pnl.ControlMechanism(
            control_signals=[(psyneulink.core.components.functions.
                              distributionfunctions.DRIFT_RATE, D)])
        assert C.control_signals[0].name == 'D[drift_rate] ControlSignal'
        assert C.control_signals[0].efferents[0].receiver.name == 'drift_rate'

        # List of names
        C = pnl.ControlMechanism(control_signals=[([
            psyneulink.core.components.functions.distributionfunctions.
            DRIFT_RATE, psyneulink.core.globals.keywords.THRESHOLD
        ], D)])
        assert C.control_signals[
            0].name == 'D[drift_rate, threshold] ControlSignal'
        assert C.control_signals[0].efferents[0].receiver.name == 'drift_rate'
        assert C.control_signals[0].efferents[1].receiver.name == 'threshold'

    def test_2_item_tuple_from_parameter_port_to_control_signals(self):

        C = pnl.ControlMechanism(control_signals=['a', 'b'])
        D = pnl.DDM(name='D3',
                    function=psyneulink.core.components.functions.
                    distributionfunctions.DriftDiffusionAnalytical(
                        drift_rate=(3, C),
                        threshold=(2, C.control_signals['b'])))
        assert D.parameter_ports[
            psyneulink.core.components.functions.distributionfunctions.
            DRIFT_RATE].mod_afferents[0].sender == C.control_signals[0]
        assert D.parameter_ports[
            psyneulink.core.globals.keywords.
            THRESHOLD].mod_afferents[0].sender == C.control_signals[1]

    def test_2_item_tuple_from_gating_signal_to_output_ports(self):

        D4 = pnl.DDM(name='D4')

        # Single name
        G = pnl.GatingMechanism(gating_signals=[(pnl.DECISION_VARIABLE, D4)])
        assert G.gating_signals[0].name == 'D4[DECISION_VARIABLE] GatingSignal'
        assert G.gating_signals[0].efferents[
            0].receiver.name == 'DECISION_VARIABLE'

        # List of names
        G = pnl.GatingMechanism(
            gating_signals=[([pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME], D4)])
        assert G.gating_signals[
            0].name == 'D4[DECISION_VARIABLE, RESPONSE_TIME] GatingSignal'
        assert G.gating_signals[0].efferents[
            0].receiver.name == 'DECISION_VARIABLE'
        assert G.gating_signals[0].efferents[
            1].receiver.name == 'RESPONSE_TIME'

    def test_2_item_tuple_from_input_and_output_ports_to_gating_signals(self):

        G = pnl.GatingMechanism(gating_signals=['a', 'b'])
        T = pnl.TransferMechanism(name='T',
                                  input_ports=[(3, G)],
                                  output_ports=[(2, G.gating_signals['b'])])
        assert T.input_ports[0].mod_afferents[0].sender == G.gating_signals[0]
        assert T.output_ports[0].mod_afferents[0].sender == G.gating_signals[1]

    control_spec_list = [
        pnl.CONTROL, pnl.CONTROL_SIGNAL, pnl.CONTROL_PROJECTION,
        pnl.ControlSignal,
        pnl.ControlSignal(), pnl.ControlProjection, "CP_OBJECT",
        pnl.ControlMechanism,
        pnl.ControlMechanism(), pnl.ControlMechanism, (0.3, pnl.CONTROL),
        (0.3, pnl.CONTROL_SIGNAL), (0.3, pnl.CONTROL_PROJECTION),
        (0.3, pnl.ControlSignal), (0.3, pnl.ControlSignal()),
        (0.3, pnl.ControlProjection), (0.3, "CP_OBJECT"),
        (0.3, pnl.ControlMechanism), (0.3, pnl.ControlMechanism()),
        (0.3, pnl.ControlMechanism)
    ]

    @pytest.mark.parametrize(
        'noise, gain',
        [(noise, gain) for noise, gain in
         [j for j in zip(control_spec_list, reversed(control_spec_list))]])
    def test_formats_for_control_specification_for_mechanism_and_function_params(
            self, noise, gain):
        # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
        if noise == 'CP_OBJECT':
            noise = pnl.ControlProjection()
        elif isinstance(noise, tuple) and noise[1] == 'CP_OBJECT':
            noise = (noise[0], pnl.ControlProjection())
        if gain == 'CP_OBJECT':
            gain = pnl.ControlProjection()
        elif isinstance(gain, tuple) and gain[1] == 'CP_OBJECT':
            gain = (gain[0], pnl.ControlProjection())

        R = pnl.RecurrentTransferMechanism(
            # NOTE: fixed name prevents failures due to registry naming
            # for parallel test runs
            name='R-CONTROL',
            noise=noise,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic(gain=gain))
        assert R.parameter_ports[pnl.NOISE].mod_afferents[0].name in \
                'ControlProjection for R-CONTROL[noise]'
        assert R.parameter_ports[pnl.GAIN].mod_afferents[0].name in \
                'ControlProjection for R-CONTROL[gain]'

    gating_spec_list = [
        pnl.GATING, pnl.CONTROL, pnl.GATING_SIGNAL, pnl.CONTROL_SIGNAL,
        pnl.GATING_PROJECTION, pnl.CONTROL_PROJECTION, pnl.GatingSignal,
        pnl.ControlSignal,
        pnl.GatingSignal(),
        pnl.ControlSignal(), pnl.GatingProjection, "GP_OBJECT",
        pnl.GatingMechanism, pnl.ControlMechanism,
        pnl.GatingMechanism(),
        pnl.ControlMechanism(), (0.3, pnl.GATING), (0.3, pnl.CONTROL),
        (0.3, pnl.GATING_SIGNAL), (0.3, pnl.CONTROL_SIGNAL),
        (0.3, pnl.GATING_PROJECTION), (0.3, pnl.CONTROL_PROJECTION),
        (0.3, pnl.GatingSignal), (0.3, pnl.ControlSignal),
        (0.3, pnl.GatingSignal()), (0.3, pnl.ControlSignal()),
        (0.3, pnl.GatingProjection), (0.3, pnl.ControlProjection),
        (0.3, "GP_OBJECT"), (0.3, pnl.GatingMechanism),
        (0.3, pnl.ControlMechanism), (0.3, pnl.GatingMechanism()),
        (0.3, pnl.ControlMechanism())
    ]

    @pytest.mark.parametrize(
        'input_port, output_port',
        [(inp, outp) for inp, outp in
         [j for j in zip(gating_spec_list, reversed(gating_spec_list))]])
    def test_formats_for_gating_specification_of_input_and_output_ports(
            self, input_port, output_port):
        G_IN, G_OUT = input_port, output_port

        # This shenanigans is to avoid assigning the same instantiated ControlProjection more than once
        if G_IN == 'GP_OBJECT':
            G_IN = pnl.GatingProjection()
        elif isinstance(G_IN, tuple) and G_IN[1] == 'GP_OBJECT':
            G_IN = (G_IN[0], pnl.GatingProjection())
        if G_OUT == 'GP_OBJECT':
            G_OUT = pnl.GatingProjection()
        elif isinstance(G_OUT, tuple) and G_OUT[1] == 'GP_OBJECT':
            G_OUT = (G_OUT[0], pnl.GatingProjection())

        if isinstance(G_IN, tuple):
            IN_NAME = G_IN[1]
        else:
            IN_NAME = G_IN
        IN_CONTROL = pnl.CONTROL in repr(IN_NAME).split(".")[-1].upper()
        if isinstance(G_OUT, tuple):
            OUT_NAME = G_OUT[1]
        else:
            OUT_NAME = G_OUT
        OUT_CONTROL = pnl.CONTROL in repr(OUT_NAME).split(".")[-1].upper()

        T = pnl.TransferMechanism(name='T-GATING',
                                  input_ports=[G_IN],
                                  output_ports=[G_OUT])

        if IN_CONTROL:
            assert T.input_ports[0].mod_afferents[0].name in \
                    'ControlProjection for T-GATING[InputPort-0]'
        else:
            assert T.input_ports[0].mod_afferents[0].name in \
                    'GatingProjection for T-GATING[InputPort-0]'

        if OUT_CONTROL:
            assert T.output_ports[0].mod_afferents[0].name in \
                    'ControlProjection for T-GATING[OutputPort-0]'
        else:
            assert T.output_ports[0].mod_afferents[0].name in \
                    'GatingProjection for T-GATING[OutputPort-0]'

        # with pytest.raises(pnl.ProjectionError) as error_text:
        #     T1 = pnl.ProcessingMechanism(name='T1', input_ports=[pnl.ControlMechanism()])
        # assert 'Primary OutputPort of ControlMechanism-0 (ControlSignal-0) ' \
        #        'cannot be used as a sender of a Projection to InputPort of T1' in error_text.value.args[0]
        #
        # with pytest.raises(pnl.ProjectionError) as error_text:
        #     T2 = pnl.ProcessingMechanism(name='T2', output_ports=[pnl.ControlMechanism()])
        # assert 'Primary OutputPort of ControlMechanism-1 (ControlSignal-0) ' \
        #        'cannot be used as a sender of a Projection to OutputPort of T2' in error_text.value.args[0]

    def test_no_warning_when_matrix_specified(self):

        with pytest.warns(None) as w:
            c = pnl.Composition()
            m0 = pnl.ProcessingMechanism(default_variable=[0, 0, 0, 0])
            p0 = pnl.MappingProjection(matrix=[[0, 0, 0, 0], [0, 0, 0, 0],
                                               [0, 0, 0, 0], [0, 0, 0, 0]])
            m1 = pnl.TransferMechanism(default_variable=[0, 0, 0, 0])
            c.add_linear_processing_pathway([m0, p0, m1])
            for warn in w:
                if r'elementwise comparison failed; returning scalar instead' in warn.message.args[
                        0]:
                    raise

    # KDM: this is a good candidate for pytest.parametrize
    def test_masked_mapping_projection(self):

        t1 = pnl.TransferMechanism(size=2)
        t2 = pnl.TransferMechanism(size=2)
        proj = pnl.MaskedMappingProjection(sender=t1,
                                           receiver=t2,
                                           matrix=[[1, 2], [3, 4]],
                                           mask=[[1, 0], [0, 1]],
                                           mask_operation=pnl.ADD)
        c = pnl.Composition(pathways=[[t1, proj, t2]])
        val = c.execute(inputs={t1: [1, 2]})
        assert np.allclose(val, [[8, 12]])

        t1 = pnl.TransferMechanism(size=2)
        t2 = pnl.TransferMechanism(size=2)
        proj = pnl.MaskedMappingProjection(sender=t1,
                                           receiver=t2,
                                           matrix=[[1, 2], [3, 4]],
                                           mask=[[1, 0], [0, 1]],
                                           mask_operation=pnl.MULTIPLY)
        c = pnl.Composition(pathways=[[t1, proj, t2]])
        val = c.execute(inputs={t1: [1, 2]})
        assert np.allclose(val, [[1, 8]])

        t1 = pnl.TransferMechanism(size=2)
        t2 = pnl.TransferMechanism(size=2)
        proj = pnl.MaskedMappingProjection(sender=t1,
                                           receiver=t2,
                                           mask=[[1, 2], [3, 4]],
                                           mask_operation=pnl.MULTIPLY)
        c = pnl.Composition(pathways=[[t1, proj, t2]])
        val = c.execute(inputs={t1: [1, 2]})
        assert np.allclose(val, [[1, 8]])

    def test_masked_mapping_projection_mask_conficts_with_matrix(self):

        with pytest.raises(pnl.MaskedMappingProjectionError) as error_text:

            t1 = pnl.TransferMechanism(size=2)
            t2 = pnl.TransferMechanism(size=2)
            pnl.MaskedMappingProjection(sender=t1,
                                        receiver=t2,
                                        mask=[[1, 2, 3], [4, 5, 6]],
                                        mask_operation=pnl.MULTIPLY)
        assert "Shape of the 'mask'" in str(error_text.value)
        assert "((2, 3)) must be the same as its 'matrix' ((2, 2))" in str(
            error_text.value)

    # FIX 7/22/15 [JDC] - REPLACE WITH MORE ELABORATE TESTS OF DUPLICATE PROJECTIONS:
    #                     SAME FROM OutputPort;  SAME TO InputPort
    #                     TEST ERROR MESSAGES GENERATED BY VARIOUS _check_for_duplicates METHODS
    # def test_duplicate_projection_detection_and_warning(self):
    #
    #     with pytest.warns(UserWarning) as record:
    #         T1 = pnl.TransferMechanism(name='T1')
    #         T2 = pnl.TransferMechanism(name='T2')
    #         T3 = pnl.TransferMechanism(name='T3')
    #         T4 = pnl.TransferMechanism(name='T4')
    #
    #         MP1 = pnl.MappingProjection(sender=T1,receiver=T2,name='MP1')
    #         MP2 = pnl.MappingProjection(sender=T1,receiver=T2,name='MP2')
    #         pnl.proc(T1,MP1,T2,T3)
    #         pnl.proc(T1,MP2,T2,T4)
    #
    #     # hack to find a specific warning (other warnings may be generated by the Process construction)
    #     correct_message_found = False
    #     for warning in record:
    #         if "that already has an identical Projection" in str(warning.message):
    #             correct_message_found = True
    #             break
    #
    #     assert len(T2.afferents)==1
    #     assert correct_message_found

    def test_duplicate_projection_creation_error(self):

        from psyneulink.core.components.projections.projection import DuplicateProjectionError
        with pytest.raises(DuplicateProjectionError) as record:
            T1 = pnl.TransferMechanism(name='T1')
            T2 = pnl.TransferMechanism(name='T2')
            pnl.MappingProjection(sender=T1, receiver=T2, name='MP1')
            pnl.MappingProjection(sender=T1, receiver=T2, name='MP2')
        assert 'Attempt to assign Projection to InputPort-0 of T2 that already has an identical Projection.' \
               in record.value.args[0]
예제 #20
0
def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1):
    # model params
    integration_rate = 1

    hidden_func = pnl.Logistic(gain=1.0, x_0=4.0)

    # input layer, color and word
    reward = pnl.TransferMechanism(name='reward')

    punish = pnl.TransferMechanism(name='punish')

    inp_clr = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='COLOR INPUT'
    )
    inp_wrd = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='WORD INPUT'
    )
    # task layer, represent the task instruction; color naming / word reading
    inp_task = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='TASK'
    )
    # hidden layer for color and word
    hid_clr = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='COLORS HIDDEN'
    )
    hid_wrd = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='WORDS HIDDEN'
    )
    # output layer
    output = pnl.TransferMechanism(
        size=N_UNITS,
        function=pnl.Logistic,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='OUTPUT'
    )
    # decision layer, some accumulator

    signalSearchRange = pnl.SampleSpec(start=0.05, stop=5, step=0.05)

    decision = pnl.DDM(name='Decision',
                       input_format=pnl.ARRAY,
                       function=pnl.DriftDiffusionAnalytical(drift_rate=1,
                                                             threshold =1,
                                                             noise=1,
                                                             starting_point=0,
                                                             t0=0.35),
                       output_ports=[pnl.RESPONSE_TIME,
                                     pnl.PROBABILITY_UPPER_THRESHOLD,
                                     pnl.PROBABILITY_LOWER_THRESHOLD]
                       )

    driftrate_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, inp_clr)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Exponential(rate=1),#pnl.Exponential(rate=0.8),#pnl.Exponential(rate=1),
                                                 allocation_samples=signalSearchRange)


    threshold_control_signal = pnl.ControlSignal(projections=[(pnl.THRESHOLD, decision)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Linear(slope=0),
                                                 allocation_samples=signalSearchRange)


    reward_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[reward,
                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    punish_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[punish,
                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    objective_mech = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.SUM,
                                                                           weights=[[1],[-1]]),
                                            monitor=[reward_rate, punish_rate])

    # objective_mech = pnl.ObjectiveMechanism(function=object_function,
    #                                         monitor=[reward,
    #                                                  punish,
    #                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
    #                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
    #                                                  (decision.output_ports[pnl.RESPONSE_TIME])])



    # PROJECTIONS, weights copied from cohen et al (1990)
    wts_clr_ih = pnl.MappingProjection(
        matrix=[[2.2, -2.2], [-2.2, 2.2]], name='COLOR INPUT TO HIDDEN')
    wts_wrd_ih = pnl.MappingProjection(
        matrix=[[2.6, -2.6], [-2.6, 2.6]], name='WORD INPUT TO HIDDEN')
    wts_clr_ho = pnl.MappingProjection(
        matrix=[[1.3, -1.3], [-1.3, 1.3]], name='COLOR HIDDEN TO OUTPUT')
    wts_wrd_ho = pnl.MappingProjection(
        matrix=[[2.5, -2.5], [-2.5, 2.5]], name='WORD HIDDEN TO OUTPUT')
    wts_tc = pnl.MappingProjection(
        matrix=[[4.0, 4.0], [0, 0]], name='COLOR NAMING')
    wts_tw = pnl.MappingProjection(
        matrix=[[0, 0], [4.0, 4.0]], name='WORD READING')


    # build the model
    model = pnl.Composition(name='STROOP model')

    model.add_node(decision, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(punish, required_roles=pnl.NodeRole.OUTPUT)


    model.add_linear_processing_pathway([inp_clr, wts_clr_ih, hid_clr])
    model.add_linear_processing_pathway([inp_wrd, wts_wrd_ih, hid_wrd])
    model.add_linear_processing_pathway([hid_clr, wts_clr_ho, output])
    model.add_linear_processing_pathway([hid_wrd, wts_wrd_ho, output])
    model.add_linear_processing_pathway([inp_task, wts_tc, hid_clr])
    model.add_linear_processing_pathway([inp_task, wts_tw, hid_wrd])
    model.add_linear_processing_pathway([output, pnl.IDENTITY_MATRIX, decision])  # 3/15/20
    # model.add_linear_processing_pathway([output, [[1,-1]], (decision, pnl.NodeRole.OUTPUT)])   # 3/15/20
    # model.add_linear_processing_pathway([output, [[1],[-1]], decision])   # 3/15/20

    model.add_nodes([reward_rate, punish_rate])

    controller = pnl.OptimizationControlMechanism(agent_rep=model,
                                                  features=[inp_clr.input_port,
                                                            inp_wrd.input_port,
                                                            inp_task.input_port,
                                                            reward.input_port,
                                                            punish.input_port],
                                                  feature_function=pnl.AdaptiveIntegrator(rate=0.1),
                                                  objective_mechanism=objective_mech,
                                                  function=pnl.GridSearch(),
                                                  control_signals=[driftrate_control_signal,
                                                                   threshold_control_signal])

    model.add_controller(controller=controller)

    # collect the node handles
    nodes = [inp_clr, inp_wrd, inp_task, hid_clr, hid_wrd, output, decision, reward, punish,controller]
    metadata = [integration_rate, dec_noise_std, unit_noise_std]
    return model, nodes, metadata
# set up inner comp controller and add to comp
icomp.add_controller(
        pnl.OptimizationControlMechanism(
                agent_rep=icomp,
                features=[ia.input_port, ib.input_port],
                name="Controller",
                objective_mechanism=pnl.ObjectiveMechanism(
                        monitor=ic.output_port,
                        function=pnl.SimpleIntegrator,
                        name="iController Objective Mechanism"
                ),
                function=pnl.GridSearch(direction=pnl.MAXIMIZE),
                control_signals=[pnl.ControlSignal(projections=[(pnl.SLOPE, ia)],
                                                   variable=1.0,
                                                   intensity_cost_function=pnl.Linear(slope=0.0),
                                                   allocation_samples=pnl.SampleSpec(start=1.0,
                                                                                     stop=5.0,
                                                                                     num=5))])
)

# instantiate outer comp
ocomp = pnl.Composition(name='ocomp', controller_mode=pnl.BEFORE)

# setup structure for outer comp
ocomp.add_node(icomp)

# add controller to outer comp
ocomp.add_controller(
        pnl.OptimizationControlMechanism(
                agent_rep=ocomp,
                features=[ia.input_port, ib.input_port],
예제 #22
0
stabilityFlexibility.add_projection(sender=ddmCombination,
                                    receiver=decisionMaker)

# Beginning of Controller

# Grid Search Range
# searchRange = pnl.SampleSpec(start=0.25, stop=4.0, num=16)
# searchRange = pnl.SampleSpec(start=1.0, stop=1.9, num=10)

searchRange = pnl.SampleSpec(start=0.5, stop=5.0, num=10)

# Modulate the GAIN parameter from activation layer
# Initalize cost function as 0
signal = pnl.ControlSignal(projections=[(pnl.GAIN, activation)],
                           function=pnl.Linear,
                           variable=1.0,
                           intensity_cost_function=pnl.Linear(slope=0.0),
                           allocation_samples=searchRange)

# Use the computeAccuracy function to obtain selection values
# Pass in 4 arguments whenever computeRewardRate is called
objectiveMechanism = pnl.ObjectiveMechanism(
    monitor=[
        taskLayer, stimulusInfo,
        (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
        (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
    ],
    function=pnl.AccuracyIntegrator,
    name="Controller Objective Mechanism")
objectiveMechanism.set_log_conditions(items=pnl.VALUE)