Exemplo n.º 1
0
Umemoto_comp.add_linear_processing_pathway(FlankerAutomatic_pathway)

Reward_pathway = [Reward]
Umemoto_comp.add_linear_processing_pathway(Reward_pathway)

Umemoto_comp.add_node(Decision,
                      # required_roles=pnl.NodeRole.OUTPUT
                      )

# COMPOSITION
Target_Rep_Control_Signal = pnl.ControlSignal(
    modulates=[(pnl.SLOPE, Target_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
    intensity_cost_function=pnl.Exponential(scale=1, rate=1),
    # compute_reconfiguration_cost=pnl.Distance(metric=pnl.EUCLIDEAN),
    # adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),#offset = -1
    allocation_samples=signalSearchRange)

Distractor_Rep_Control_Signal = pnl.ControlSignal(
    modulates=[(pnl.SLOPE, Distractor_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT],
    intensity_cost_function=pnl.Exponential(scale=1, rate=1),
    # adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
    #offset = -1
    allocation_samples=signalSearchRange)

Umemoto_comp.add_model_based_optimizer(
        max_iterations=100
        # save_samples=True,
        # save_values=True,
        # direction=pnl.ASCENT
    ),
    # opt control alloc used to compute ctrl sigs
    control_signals=[
        pnl.ControlSignal(
            default_allocation=default_control_signal,
            modulates=[(pnl.SLOPE, color_task), ('color_control', word_task)],
            # function=pnl.ReLU,
            # function=pnl.Logistic,
            cost_options=[
                pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT
            ],
            intensity_cost_function=pnl.Exponential(rate=0.25,
                                                    bias=-1),  # 0.25, -3
            # adjustment_cost_function=pnl.Exponential(rate=.25, bias=-1), # 0.25, -3
            # adjustment_cost_function=lambda x: np.exp(.25 * np.abs(x) - 1),
            adjustment_cost_function=adj_cost_fct,
            # intensity_cost_function=pnl.Linear(slope=0, intercept=0), # 0.25, -3
            # adjustment_cost_function=pnl.Linear(slope=0, intercept=0), # 0.25, -3
            allocation_samples=control_signal_range
            # allocation_samples = np.arange(0.1, 1.01, 0.3)
            # allocation_samples=[i / 2 for i in list(range(0, 50, 1))]
        )
    ])

lvoc.set_log_conditions('value')
# lvoc.set_log_conditions('features')
# print("LVOC loggable: ", lvoc.loggable_items)
# lvoc.set_log_conditions('variable')
Exemplo n.º 3
0
def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1):
    # model params
    integration_rate = 1

    hidden_func = pnl.Logistic(gain=1.0, x_0=4.0)

    # input layer, color and word
    reward = pnl.TransferMechanism(name='reward')

    punish = pnl.TransferMechanism(name='punish')

    inp_clr = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='COLOR INPUT'
    )
    inp_wrd = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='WORD INPUT'
    )
    # task layer, represent the task instruction; color naming / word reading
    inp_task = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='TASK'
    )
    # hidden layer for color and word
    hid_clr = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='COLORS HIDDEN'
    )
    hid_wrd = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='WORDS HIDDEN'
    )
    # output layer
    output = pnl.TransferMechanism(
        size=N_UNITS,
        function=pnl.Logistic,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='OUTPUT'
    )
    # decision layer, some accumulator

    signalSearchRange = pnl.SampleSpec(start=0.05, stop=5, step=0.05)

    decision = pnl.DDM(name='Decision',
                       input_format=pnl.ARRAY,
                       function=pnl.DriftDiffusionAnalytical(drift_rate=1,
                                                             threshold =1,
                                                             noise=1,
                                                             starting_point=0,
                                                             t0=0.35),
                       output_ports=[pnl.RESPONSE_TIME,
                                     pnl.PROBABILITY_UPPER_THRESHOLD,
                                     pnl.PROBABILITY_LOWER_THRESHOLD]
                       )

    driftrate_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, inp_clr)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Exponential(rate=1),#pnl.Exponential(rate=0.8),#pnl.Exponential(rate=1),
                                                 allocation_samples=signalSearchRange)


    threshold_control_signal = pnl.ControlSignal(projections=[(pnl.THRESHOLD, decision)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Linear(slope=0),
                                                 allocation_samples=signalSearchRange)


    reward_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[reward,
                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    punish_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[punish,
                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    objective_mech = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.SUM,
                                                                           weights=[[1],[-1]]),
                                            monitor=[reward_rate, punish_rate])

    # objective_mech = pnl.ObjectiveMechanism(function=object_function,
    #                                         monitor=[reward,
    #                                                  punish,
    #                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
    #                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
    #                                                  (decision.output_ports[pnl.RESPONSE_TIME])])



    # PROJECTIONS, weights copied from cohen et al (1990)
    wts_clr_ih = pnl.MappingProjection(
        matrix=[[2.2, -2.2], [-2.2, 2.2]], name='COLOR INPUT TO HIDDEN')
    wts_wrd_ih = pnl.MappingProjection(
        matrix=[[2.6, -2.6], [-2.6, 2.6]], name='WORD INPUT TO HIDDEN')
    wts_clr_ho = pnl.MappingProjection(
        matrix=[[1.3, -1.3], [-1.3, 1.3]], name='COLOR HIDDEN TO OUTPUT')
    wts_wrd_ho = pnl.MappingProjection(
        matrix=[[2.5, -2.5], [-2.5, 2.5]], name='WORD HIDDEN TO OUTPUT')
    wts_tc = pnl.MappingProjection(
        matrix=[[4.0, 4.0], [0, 0]], name='COLOR NAMING')
    wts_tw = pnl.MappingProjection(
        matrix=[[0, 0], [4.0, 4.0]], name='WORD READING')


    # build the model
    model = pnl.Composition(name='STROOP model')

    model.add_node(decision, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(punish, required_roles=pnl.NodeRole.OUTPUT)


    model.add_linear_processing_pathway([inp_clr, wts_clr_ih, hid_clr])
    model.add_linear_processing_pathway([inp_wrd, wts_wrd_ih, hid_wrd])
    model.add_linear_processing_pathway([hid_clr, wts_clr_ho, output])
    model.add_linear_processing_pathway([hid_wrd, wts_wrd_ho, output])
    model.add_linear_processing_pathway([inp_task, wts_tc, hid_clr])
    model.add_linear_processing_pathway([inp_task, wts_tw, hid_wrd])
    model.add_linear_processing_pathway([output, pnl.IDENTITY_MATRIX, decision])  # 3/15/20
    # model.add_linear_processing_pathway([output, [[1,-1]], (decision, pnl.NodeRole.OUTPUT)])   # 3/15/20
    # model.add_linear_processing_pathway([output, [[1],[-1]], decision])   # 3/15/20

    model.add_nodes([reward_rate, punish_rate])

    controller = pnl.OptimizationControlMechanism(agent_rep=model,
                                                  features=[inp_clr.input_port,
                                                            inp_wrd.input_port,
                                                            inp_task.input_port,
                                                            reward.input_port,
                                                            punish.input_port],
                                                  feature_function=pnl.AdaptiveIntegrator(rate=0.1),
                                                  objective_mechanism=objective_mech,
                                                  function=pnl.GridSearch(),
                                                  control_signals=[driftrate_control_signal,
                                                                   threshold_control_signal])

    model.add_controller(controller=controller)

    # collect the node handles
    nodes = [inp_clr, inp_wrd, inp_task, hid_clr, hid_wrd, output, decision, reward, punish,controller]
    metadata = [integration_rate, dec_noise_std, unit_noise_std]
    return model, nodes, metadata
Exemplo n.º 4
0
Reward_pathway = [Reward]
Umemoto_comp.add_linear_processing_pathway(Reward_pathway)

Umemoto_comp.add_node(Decision,
                      # required_roles=pnl.NodeRole.OUTPUT
                      )

# COMPOSITION
Target_Rep_Control_Signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Target_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[
        pnl.ControlSignalCosts.INTENSITY, pnl.ControlSignalCosts.ADJUSTMENT
    ],
    intensity_cost_function=pnl.Exponential(scale=1, rate=1),
    adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
    allocation_samples=signalSearchRange)

Distractor_Rep_Control_Signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Distractor_Rep)],
    function=pnl.Linear,
    variable=1.0,
    cost_options=[
        pnl.ControlSignalCosts.INTENSITY, pnl.ControlSignalCosts.ADJUSTMENT
    ],
    intensity_cost_function=pnl.Exponential(rate=0.8046),
    adjustment_cost_function=pnl.Exponential(scale=1, rate=1, offset=-1),
    allocation_samples=signalSearchRange)

Umemoto_comp.add_model_based_optimizer(
Exemplo n.º 5
0
A = pnl.TransferMechanism(name='A',
                          function=pnl.Linear(intercept=2.0,
                                              slope=5.0,
                                              default_variable=[[0]]),
                          initial_value=[[0]],
                          termination_measure=pnl.Distance(
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))
B = pnl.TransferMechanism(name='B',
                          function=pnl.Logistic(default_variable=[[0]]),
                          initial_value=[[0]],
                          termination_measure=pnl.Distance(
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))
C = pnl.TransferMechanism(name='C',
                          function=pnl.Exponential(default_variable=[[0]]),
                          initial_value=[[0]],
                          termination_measure=pnl.Distance(
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))

comp.add_node(A)
comp.add_node(B)
comp.add_node(C)

comp.add_projection(projection=pnl.MappingProjection(
    name='MappingProjection from A[RESULT] to B[InputPort-0]',
    function=pnl.LinearMatrix(matrix=[[1.0]], default_variable=[2.0])),
                    sender=A,
                    receiver=B)
comp.add_projection(projection=pnl.MappingProjection(
Exemplo n.º 6
0
    name='EVC Gratton System')

# Show characteristics of system:
mySystem.show()
mySystem.controller.show()

# Show graph of system (with control components)
# mySystem.show_graph(show_dimensions=pnl.ALL, show_projection_labels=True)
mySystem.show_graph(show_control=True, show_projection_labels=False)
# mySystem.show_graph(show_control=True, show_processes=True, show_headers=False)
# mySystem.show_graph(show_control=True, show_mechanism_structure=True, show_headers=False)
# mySystem.show_graph(show_control=True, show_mechanism_structure=True, show_headers=False, show_processes=True)

# configure EVC components
mySystem.controller.control_signals[
    0].intensity_cost_function = pnl.Exponential(rate=0.8046).function
mySystem.controller.control_signals[
    1].intensity_cost_function = pnl.Exponential(rate=0.8046).function

for mech in mySystem.controller.prediction_mechanisms.mechanisms:
    if mech.name == 'Flanker Stimulus Prediction Mechanism' or mech.name == 'Target Stimulus Prediction Mechanism':
        # when you find a key mechanism (transfer mechanism) with the correct name, print its name
        print(mech.name)
        mech.function_object.rate = 1.0

    if 'Reward' in mech.name:
        print(mech.name)
        mech.function_object.rate = 0.8
        # mySystem.controller.prediction_mechanisms[mech].parameterStates['rate'].base_value = 1.0

print('new rate of integration mechanisms before System execution:')
Exemplo n.º 7
0
    def test_evc_gratton(self):
        # Stimulus Mechanisms
        target_stim = pnl.TransferMechanism(name='Target Stimulus',
                                            function=pnl.Linear(slope=0.3324))
        flanker_stim = pnl.TransferMechanism(
            name='Flanker Stimulus', function=pnl.Linear(slope=0.3545221843))

        # Processing Mechanisms (Control)
        Target_Rep = pnl.TransferMechanism(name='Target Representation')
        Flanker_Rep = pnl.TransferMechanism(name='Flanker Representation')

        # Processing Mechanism (Automatic)
        Automatic_Component = pnl.TransferMechanism(name='Automatic Component')

        # Decision Mechanism
        Decision = pnl.DDM(name='Decision',
                           function=pnl.DriftDiffusionAnalytical(
                               drift_rate=(1.0),
                               threshold=(0.2645),
                               noise=(0.5),
                               starting_point=(0),
                               t0=0.15),
                           output_states=[
                               pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                               pnl.PROBABILITY_UPPER_THRESHOLD
                           ])

        # Outcome Mechanism
        reward = pnl.TransferMechanism(name='reward')

        # Pathways
        target_control_pathway = [target_stim, Target_Rep, Decision]
        flanker_control_pathway = [flanker_stim, Flanker_Rep, Decision]
        target_automatic_pathway = [target_stim, Automatic_Component, Decision]
        flanker_automatic_pathway = [
            flanker_stim, Automatic_Component, Decision
        ]
        pathways = [
            target_control_pathway, flanker_control_pathway,
            target_automatic_pathway, flanker_automatic_pathway
        ]

        # Composition
        evc_gratton = pnl.Composition(name="EVCGratton")
        evc_gratton.add_node(Decision, required_roles=pnl.NodeRole.OUTPUT)
        for path in pathways:
            evc_gratton.add_linear_processing_pathway(path)
        evc_gratton.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)

        # Control Signals
        signalSearchRange = pnl.SampleSpec(start=1.0, stop=1.8, step=0.2)

        target_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Target_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        flanker_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Flanker_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        objective_mech = pnl.ObjectiveMechanism(
            function=pnl.LinearCombination(operation=pnl.PRODUCT),
            monitor=[
                reward,
                (Decision.output_states[pnl.PROBABILITY_UPPER_THRESHOLD], 1,
                 -1)
            ])
        # Model Based OCM (formerly controller)
        evc_gratton.add_controller(controller=pnl.OptimizationControlMechanism(
            agent_rep=evc_gratton,
            features=[
                target_stim.input_state, flanker_stim.input_state,
                reward.input_state
            ],
            feature_function=pnl.AdaptiveIntegrator(rate=1.0),
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            control_signals=[
                target_rep_control_signal, flanker_rep_control_signal
            ]))
        evc_gratton.enable_controller = True

        targetFeatures = [1, 1, 1]
        flankerFeatures = [1, -1, 1]
        rewardValues = [100, 100, 100]

        stim_list_dict = {
            target_stim: targetFeatures,
            flanker_stim: flankerFeatures,
            reward: rewardValues
        }

        evc_gratton.run(inputs=stim_list_dict)

        expected_results_array = [[[0.32257752863413636], [0.9481940753514433],
                                   [100.]],
                                  [[0.42963678062444666],
                                   [0.47661180945923376], [100.]],
                                  [[0.300291026852769], [0.97089165101931],
                                   [100.]]]

        expected_sim_results_array = [
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
            [[0.42963678], [0.47661181], [100.]],
            [[0.42846471], [0.43938586], [100.]],
            [[0.42628176], [0.40282965], [100.]],
            [[0.42314468], [0.36732207], [100.]],
            [[0.41913221], [0.333198], [100.]],
            [[0.42978939], [0.51176048], [100.]],
            [[0.42959394], [0.47427693], [100.]],
            [[0.4283576], [0.43708106], [100.]],
            [[0.4261132], [0.40057958], [100.]],
            [[0.422919], [0.36514906], [100.]],
            [[0.42902209], [0.54679323], [100.]],
            [[0.42980788], [0.50942101], [100.]],
            [[0.42954704], [0.47194318], [100.]],
            [[0.42824656], [0.43477897], [100.]],
            [[0.42594094], [0.3983337], [100.]],
            [[0.42735293], [0.58136855], [100.]],
            [[0.42910149], [0.54447221], [100.]],
            [[0.42982229], [0.50708112], [100.]],
            [[0.42949608], [0.46961065], [100.]],
            [[0.42813159], [0.43247968], [100.]],
            [[0.42482049], [0.61516258], [100.]],
            [[0.42749136], [0.57908829], [100.]],
            [[0.42917687], [0.54214925], [100.]],
            [[0.42983261], [0.50474093], [100.]],
            [[0.42944107], [0.46727945], [100.]],
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
        ]

        for trial in range(len(evc_gratton.results)):
            assert np.allclose(
                expected_results_array[trial],
                # Note: Skip decision variable OutputState
                evc_gratton.results[trial][1:])
        for simulation in range(len(evc_gratton.simulation_results)):
            assert np.allclose(
                expected_sim_results_array[simulation],
                # Note: Skip decision variable OutputState
                evc_gratton.simulation_results[simulation][1:])
        prediction_terms=[pnl.PV.FC, pnl.PV.COST]),
    function=pnl.GradientOptimization(
        convergence_criterion=pnl.VALUE,
        convergence_threshold=0.001,  #0.001
        step_size=1,  #1
        annealing_function=lambda x, y: x / np.sqrt(y)
        # direction=pnl.ASCENT
    ),
    control_signals=pnl.ControlSignal(
        modulates=[(pnl.SLOPE, color_task), ('color_control', word_task)],
        # function=pnl.ReLU,
        function=pnl.Logistic,
        cost_options=[
            pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT
        ],
        intensity_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        adjustment_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        # allocation_samples=[i / 2 for i in list(range(0, 50, 1))]
    ))

# print(lvoc.loggable_items)
lvoc.set_log_conditions('value')
# print(lvoc.loggable_items)
# lvoc.set_log_conditions('variable')
# lvoc.agent_rep.set_log_conditions('regression_weights')

lvoc.reportOutputPref = True

c.add_node(lvoc)

# c.show_graph(show_node_structure=pnl.ALL)