Ejemplo n.º 1
0
def test_linear_combination_function_in_mechanism(operation, input,
                                                  input_ports, scale, offset,
                                                  benchmark, mode):
    f = pnl.LinearCombination(default_variable=input,
                              operation=operation,
                              scale=scale,
                              offset=offset)
    p = pnl.ProcessingMechanism(size=[len(input[0])] * len(input),
                                function=f,
                                input_ports=input_ports)

    if mode == 'Python':
        EX = p.execute
    elif mode == 'LLVM':
        e = pnlvm.execution.MechExecution(p)
        EX = e.execute
    elif mode == 'PTX':
        e = pnlvm.execution.MechExecution(p)
        EX = e.cuda_execute

    res = benchmark(EX, input)

    scale = 1.0 if scale is None else scale
    offset = 0.0 if offset is None else offset
    if operation == pnl.SUM:
        expected = np.sum(input, axis=0) * scale + offset
    if operation == pnl.PRODUCT:
        expected = np.product(input, axis=0) * scale + offset

    assert np.allclose(res, expected)
Ejemplo n.º 2
0
    def test_lvoc_features_function(self):
        m1 = pnl.TransferMechanism(
            input_states=["InputState A", "InputState B"])
        m2 = pnl.TransferMechanism()
        c = pnl.Composition()
        c.add_node(m1, required_roles=pnl.NodeRole.INPUT)
        c.add_node(m2, required_roles=pnl.NodeRole.INPUT)
        c._analyze_graph()
        lvoc = pnl.OptimizationControlMechanism(
            agent_rep=pnl.RegressionCFA,
            features=[
                m1.input_states[0], m1.input_states[1], m2.input_state, m2
            ],
            feature_function=pnl.LinearCombination(offset=10.0),
            objective_mechanism=pnl.ObjectiveMechanism(monitor=[m1, m2]),
            function=pnl.GradientOptimization(max_iterations=1),
            control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)])
        c.add_node(lvoc)
        input_dict = {m1: [[1], [1]], m2: [1]}

        c.run(inputs=input_dict)

        assert len(lvoc.input_states) == 5

        for i in range(1, 5):
            assert lvoc.input_states[i].function.offset == 10.0
Ejemplo n.º 3
0
def test_linear_combination_function(variable, operation, exponents, weights,
                                     scale, offset, func_mode, benchmark):
    if weights == 'V':
        weights = [[-1**i] for i, v in enumerate(variable)]
    if exponents == 'V':
        exponents = [[v[0]] for v in variable]

    f = pnl.LinearCombination(default_variable=variable,
                              operation=operation,
                              exponents=exponents,
                              weights=weights,
                              scale=scale,
                              offset=offset)
    EX = pytest.helpers.get_func_execution(f, func_mode)
    res = benchmark(EX, variable)

    scale = 1.0 if scale is None else scale
    offset = 0.0 if offset is None else offset
    exponent = 1.0 if exponents is None else exponents
    weights = 1.0 if weights is None else weights

    tmp = (variable**exponent) * weights
    if operation == pnl.SUM:
        expected = np.sum(tmp, axis=0) * scale + offset
    if operation == pnl.PRODUCT:
        expected = np.product(tmp, axis=0) * scale + offset

    assert np.allclose(res, expected)
Ejemplo n.º 4
0
 def test_combine_param_conflicting_fct_operation_spec(self):
     with pytest.raises(pnl.InputStateError) as error_text:
         t = pnl.TransferMechanism(
             input_states=pnl.InputState(function=pnl.LinearCombination(
                 operation=pnl.SUM),
                                         combine=pnl.PRODUCT))
     assert "Specification of 'combine' argument (PRODUCT) conflicts with specification of 'operation' (SUM) " \
            "for LinearCombination in 'function' argument for InputState" in str(error_text.value)
Ejemplo n.º 5
0
 def test_combine_param_redundant_fct_constructor_spec(self):
     t1 = pnl.TransferMechanism(size=2)
     t2 = pnl.TransferMechanism(size=2)
     t3 = pnl.TransferMechanism(
         size=2,
         input_states=pnl.InputState(
             function=pnl.LinearCombination(operation=pnl.PRODUCT),
             combine=pnl.PRODUCT))
     p1 = pnl.Process(pathway=[t1, t3])
     p2 = pnl.Process(pathway=[t2, t3])
     s = pnl.System(processes=[p1, p2])
     input_dict = {t1: [1, 2], t2: [3, 4]}
     val = s.run(inputs=input_dict)
     assert np.allclose(val, [[3, 8]])
Ejemplo n.º 6
0
def test_linear_combination_function_in_mechanism(operation, input, size,
                                                  input_states, scale, offset,
                                                  expected, benchmark):
    f = pnl.LinearCombination(default_variable=input,
                              operation=operation,
                              scale=scale,
                              offset=offset)
    p = pnl.ProcessingMechanism(size=[size] * len(input_states),
                                function=f,
                                input_states=input_states)
    benchmark.group = "CombinationFunction " + pnl.LinearCombination.componentName + "in Mechanism"
    res = benchmark(f.execute, input)
    if expected is None:
        if operation == pnl.SUM:
            expected = np.sum(input, axis=0) * scale + offset
        if operation == pnl.PRODUCT:
            expected = np.product(input, axis=0) * scale + offset

    assert np.allclose(res, expected)
Ejemplo n.º 7
0
def test_linear_combination_function_in_mechanism(operation, input,
                                                  input_ports, scale, offset,
                                                  benchmark, mech_mode):
    f = pnl.LinearCombination(default_variable=input,
                              operation=operation,
                              scale=scale,
                              offset=offset)
    p = pnl.ProcessingMechanism(size=[len(input[0])] * len(input),
                                function=f,
                                input_ports=input_ports)

    EX = pytest.helpers.get_mech_execution(p, mech_mode)

    res = benchmark(EX, input)

    scale = 1.0 if scale is None else scale
    offset = 0.0 if offset is None else offset
    if operation == pnl.SUM:
        expected = np.sum(input, axis=0) * scale + offset
    if operation == pnl.PRODUCT:
        expected = np.product(input, axis=0) * scale + offset

    assert np.allclose(res, expected)
Ejemplo n.º 8
0
def test_linear_combination_function(variable, operation, exponents, weights,
                                     scale, offset, mode, benchmark):
    if weights == 'V':
        weights = [[-1**i] for i, v in enumerate(variable)]
    if exponents == 'V':
        exponents = [[v[0]] for v in variable]

    f = pnl.LinearCombination(default_variable=variable,
                              operation=operation,
                              exponents=exponents,
                              weights=weights,
                              scale=scale,
                              offset=offset)
    if mode == 'Python':
        EX = f.function
    elif mode == 'LLVM':
        e = pnlvm.execution.FuncExecution(f)
        EX = e.execute
    elif mode == 'PTX':
        e = pnlvm.execution.FuncExecution(f)
        EX = e.cuda_execute

    res = benchmark(EX, variable)

    scale = 1.0 if scale is None else scale
    offset = 0.0 if offset is None else offset
    exponent = 1.0 if exponents is None else exponents
    weights = 1.0 if weights is None else weights

    tmp = (variable**exponent) * weights
    if operation == pnl.SUM:
        expected = np.sum(tmp, axis=0) * scale + offset
    if operation == pnl.PRODUCT:
        expected = np.product(tmp, axis=0) * scale + offset

    assert np.allclose(res, expected)
Ejemplo n.º 9
0
signalSearchRange = pnl.SampleSpec(start=1.0, stop=1.8, step=0.2)

target_rep_control_signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Target_Rep)],
    variable=1.0,
    intensity_cost_function=pnl.Exponential(rate=0.8046),
    allocation_samples=signalSearchRange)

flanker_rep_control_signal = pnl.ControlSignal(
    projections=[(pnl.SLOPE, Flanker_Rep)],
    variable=1.0,
    intensity_cost_function=pnl.Exponential(rate=0.8046),
    allocation_samples=signalSearchRange)

objective_mech = pnl.ObjectiveMechanism(
    function=pnl.LinearCombination(operation=pnl.PRODUCT),
    monitor=[
        reward, (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)
    ])
# Model Based OCM (formerly controller)
evc_gratton.add_controller(controller=pnl.OptimizationControlMechanism(
    agent_rep=evc_gratton,
    features=[
        target_stim.input_port, flanker_stim.input_port, reward.input_port
    ],
    feature_function=pnl.AdaptiveIntegrator(rate=1.0),
    objective_mechanism=objective_mech,
    function=pnl.GridSearch(),
    control_signals=[target_rep_control_signal, flanker_rep_control_signal]))

evc_gratton.show_graph(show_controller=True)
    function=pnl.Linear(default_variable=[[0.0, 0.0]]),
    default_variable=[[0.0, 0.0]],
)
word_hidden = pnl.ProcessingMechanism(
    name="word_hidden",
    function=pnl.Logistic(bias=-4.0, default_variable=[[0.0, 0.0]]),
    default_variable=[[0.0, 0.0]],
)
task_input = pnl.ProcessingMechanism(
    name="task_input",
    function=pnl.Linear(default_variable=[[0.0, 0.0]]),
    default_variable=[[0.0, 0.0]],
)
TASK = pnl.LCAMechanism(
    name="TASK",
    combination_function=pnl.LinearCombination(default_variable=[[0.0, 0.0]]),
    function=pnl.Logistic(default_variable=[[0.0, 0.0]]),
    integrator_function=pnl.LeakyCompetingIntegrator(
        name="LeakyCompetingIntegrator_Function_0",
        initializer=[[0.5, 0.5]],
        rate=0.5,
        default_variable=[[0.0, 0.0]],
    ),
    output_ports=["RESULTS"],
    termination_comparison_op=">=",
    default_variable=[[0.0, 0.0]],
)
DECISION = pnl.DDM(
    name="DECISION",
    function=pnl.DriftDiffusionAnalytical(default_variable=[[0.0]]),
    input_ports=[{
Ejemplo n.º 11
0
def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1):
    # model params
    integration_rate = 1

    hidden_func = pnl.Logistic(gain=1.0, x_0=4.0)

    # input layer, color and word
    reward = pnl.TransferMechanism(name='reward')

    punish = pnl.TransferMechanism(name='punish')

    inp_clr = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='COLOR INPUT'
    )
    inp_wrd = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='WORD INPUT'
    )
    # task layer, represent the task instruction; color naming / word reading
    inp_task = pnl.TransferMechanism(
        size=N_UNITS, function=pnl.Linear, name='TASK'
    )
    # hidden layer for color and word
    hid_clr = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='COLORS HIDDEN'
    )
    hid_wrd = pnl.TransferMechanism(
        size=N_UNITS,
        function=hidden_func,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='WORDS HIDDEN'
    )
    # output layer
    output = pnl.TransferMechanism(
        size=N_UNITS,
        function=pnl.Logistic,
        integrator_mode=True,
        integration_rate=integration_rate,
        # noise=pnl.NormalDist(standard_deviation=unit_noise_std).function,
        noise=pnl.NormalDist(standard_deviation=unit_noise_std),
        name='OUTPUT'
    )
    # decision layer, some accumulator

    signalSearchRange = pnl.SampleSpec(start=0.05, stop=5, step=0.05)

    decision = pnl.DDM(name='Decision',
                       input_format=pnl.ARRAY,
                       function=pnl.DriftDiffusionAnalytical(drift_rate=1,
                                                             threshold =1,
                                                             noise=1,
                                                             starting_point=0,
                                                             t0=0.35),
                       output_ports=[pnl.RESPONSE_TIME,
                                     pnl.PROBABILITY_UPPER_THRESHOLD,
                                     pnl.PROBABILITY_LOWER_THRESHOLD]
                       )

    driftrate_control_signal = pnl.ControlSignal(projections=[(pnl.SLOPE, inp_clr)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Exponential(rate=1),#pnl.Exponential(rate=0.8),#pnl.Exponential(rate=1),
                                                 allocation_samples=signalSearchRange)


    threshold_control_signal = pnl.ControlSignal(projections=[(pnl.THRESHOLD, decision)],
                                                 variable=1.0,
                                                 intensity_cost_function=pnl.Linear(slope=0),
                                                 allocation_samples=signalSearchRange)


    reward_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[reward,
                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    punish_rate = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.PRODUCT,
                                                                        exponents=[[1],[1],[-1]]),
                                         monitor=[punish,
                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
                                                  decision.output_ports[pnl.RESPONSE_TIME]])

    objective_mech = pnl.ObjectiveMechanism(function=pnl.LinearCombination(operation=pnl.SUM,
                                                                           weights=[[1],[-1]]),
                                            monitor=[reward_rate, punish_rate])

    # objective_mech = pnl.ObjectiveMechanism(function=object_function,
    #                                         monitor=[reward,
    #                                                  punish,
    #                                                  decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD],
    #                                                  decision.output_ports[pnl.PROBABILITY_LOWER_THRESHOLD],
    #                                                  (decision.output_ports[pnl.RESPONSE_TIME])])



    # PROJECTIONS, weights copied from cohen et al (1990)
    wts_clr_ih = pnl.MappingProjection(
        matrix=[[2.2, -2.2], [-2.2, 2.2]], name='COLOR INPUT TO HIDDEN')
    wts_wrd_ih = pnl.MappingProjection(
        matrix=[[2.6, -2.6], [-2.6, 2.6]], name='WORD INPUT TO HIDDEN')
    wts_clr_ho = pnl.MappingProjection(
        matrix=[[1.3, -1.3], [-1.3, 1.3]], name='COLOR HIDDEN TO OUTPUT')
    wts_wrd_ho = pnl.MappingProjection(
        matrix=[[2.5, -2.5], [-2.5, 2.5]], name='WORD HIDDEN TO OUTPUT')
    wts_tc = pnl.MappingProjection(
        matrix=[[4.0, 4.0], [0, 0]], name='COLOR NAMING')
    wts_tw = pnl.MappingProjection(
        matrix=[[0, 0], [4.0, 4.0]], name='WORD READING')


    # build the model
    model = pnl.Composition(name='STROOP model')

    model.add_node(decision, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)
    model.add_node(punish, required_roles=pnl.NodeRole.OUTPUT)


    model.add_linear_processing_pathway([inp_clr, wts_clr_ih, hid_clr])
    model.add_linear_processing_pathway([inp_wrd, wts_wrd_ih, hid_wrd])
    model.add_linear_processing_pathway([hid_clr, wts_clr_ho, output])
    model.add_linear_processing_pathway([hid_wrd, wts_wrd_ho, output])
    model.add_linear_processing_pathway([inp_task, wts_tc, hid_clr])
    model.add_linear_processing_pathway([inp_task, wts_tw, hid_wrd])
    model.add_linear_processing_pathway([output, pnl.IDENTITY_MATRIX, decision])  # 3/15/20
    # model.add_linear_processing_pathway([output, [[1,-1]], (decision, pnl.NodeRole.OUTPUT)])   # 3/15/20
    # model.add_linear_processing_pathway([output, [[1],[-1]], decision])   # 3/15/20

    model.add_nodes([reward_rate, punish_rate])

    controller = pnl.OptimizationControlMechanism(agent_rep=model,
                                                  features=[inp_clr.input_port,
                                                            inp_wrd.input_port,
                                                            inp_task.input_port,
                                                            reward.input_port,
                                                            punish.input_port],
                                                  feature_function=pnl.AdaptiveIntegrator(rate=0.1),
                                                  objective_mechanism=objective_mech,
                                                  function=pnl.GridSearch(),
                                                  control_signals=[driftrate_control_signal,
                                                                   threshold_control_signal])

    model.add_controller(controller=controller)

    # collect the node handles
    nodes = [inp_clr, inp_wrd, inp_task, hid_clr, hid_wrd, output, decision, reward, punish,controller]
    metadata = [integration_rate, dec_noise_std, unit_noise_std]
    return model, nodes, metadata
Ejemplo n.º 12
0
    def test_evc_gratton(self):
        # Stimulus Mechanisms
        target_stim = pnl.TransferMechanism(name='Target Stimulus',
                                            function=pnl.Linear(slope=0.3324))
        flanker_stim = pnl.TransferMechanism(
            name='Flanker Stimulus', function=pnl.Linear(slope=0.3545221843))

        # Processing Mechanisms (Control)
        Target_Rep = pnl.TransferMechanism(name='Target Representation')
        Flanker_Rep = pnl.TransferMechanism(name='Flanker Representation')

        # Processing Mechanism (Automatic)
        Automatic_Component = pnl.TransferMechanism(name='Automatic Component')

        # Decision Mechanism
        Decision = pnl.DDM(name='Decision',
                           function=pnl.DriftDiffusionAnalytical(
                               drift_rate=(1.0),
                               threshold=(0.2645),
                               noise=(0.5),
                               starting_point=(0),
                               t0=0.15),
                           output_states=[
                               pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                               pnl.PROBABILITY_UPPER_THRESHOLD
                           ])

        # Outcome Mechanism
        reward = pnl.TransferMechanism(name='reward')

        # Pathways
        target_control_pathway = [target_stim, Target_Rep, Decision]
        flanker_control_pathway = [flanker_stim, Flanker_Rep, Decision]
        target_automatic_pathway = [target_stim, Automatic_Component, Decision]
        flanker_automatic_pathway = [
            flanker_stim, Automatic_Component, Decision
        ]
        pathways = [
            target_control_pathway, flanker_control_pathway,
            target_automatic_pathway, flanker_automatic_pathway
        ]

        # Composition
        evc_gratton = pnl.Composition(name="EVCGratton")
        evc_gratton.add_node(Decision, required_roles=pnl.NodeRole.OUTPUT)
        for path in pathways:
            evc_gratton.add_linear_processing_pathway(path)
        evc_gratton.add_node(reward, required_roles=pnl.NodeRole.OUTPUT)

        # Control Signals
        signalSearchRange = pnl.SampleSpec(start=1.0, stop=1.8, step=0.2)

        target_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Target_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        flanker_rep_control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, Flanker_Rep)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Exponential(rate=0.8046),
            allocation_samples=signalSearchRange)

        objective_mech = pnl.ObjectiveMechanism(
            function=pnl.LinearCombination(operation=pnl.PRODUCT),
            monitor=[
                reward,
                (Decision.output_states[pnl.PROBABILITY_UPPER_THRESHOLD], 1,
                 -1)
            ])
        # Model Based OCM (formerly controller)
        evc_gratton.add_controller(controller=pnl.OptimizationControlMechanism(
            agent_rep=evc_gratton,
            features=[
                target_stim.input_state, flanker_stim.input_state,
                reward.input_state
            ],
            feature_function=pnl.AdaptiveIntegrator(rate=1.0),
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            control_signals=[
                target_rep_control_signal, flanker_rep_control_signal
            ]))
        evc_gratton.enable_controller = True

        targetFeatures = [1, 1, 1]
        flankerFeatures = [1, -1, 1]
        rewardValues = [100, 100, 100]

        stim_list_dict = {
            target_stim: targetFeatures,
            flanker_stim: flankerFeatures,
            reward: rewardValues
        }

        evc_gratton.run(inputs=stim_list_dict)

        expected_results_array = [[[0.32257752863413636], [0.9481940753514433],
                                   [100.]],
                                  [[0.42963678062444666],
                                   [0.47661180945923376], [100.]],
                                  [[0.300291026852769], [0.97089165101931],
                                   [100.]]]

        expected_sim_results_array = [
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
            [[0.42963678], [0.47661181], [100.]],
            [[0.42846471], [0.43938586], [100.]],
            [[0.42628176], [0.40282965], [100.]],
            [[0.42314468], [0.36732207], [100.]],
            [[0.41913221], [0.333198], [100.]],
            [[0.42978939], [0.51176048], [100.]],
            [[0.42959394], [0.47427693], [100.]],
            [[0.4283576], [0.43708106], [100.]],
            [[0.4261132], [0.40057958], [100.]],
            [[0.422919], [0.36514906], [100.]],
            [[0.42902209], [0.54679323], [100.]],
            [[0.42980788], [0.50942101], [100.]],
            [[0.42954704], [0.47194318], [100.]],
            [[0.42824656], [0.43477897], [100.]],
            [[0.42594094], [0.3983337], [100.]],
            [[0.42735293], [0.58136855], [100.]],
            [[0.42910149], [0.54447221], [100.]],
            [[0.42982229], [0.50708112], [100.]],
            [[0.42949608], [0.46961065], [100.]],
            [[0.42813159], [0.43247968], [100.]],
            [[0.42482049], [0.61516258], [100.]],
            [[0.42749136], [0.57908829], [100.]],
            [[0.42917687], [0.54214925], [100.]],
            [[0.42983261], [0.50474093], [100.]],
            [[0.42944107], [0.46727945], [100.]],
            [[0.32257753], [0.94819408], [100.]],
            [[0.31663196], [0.95508757], [100.]],
            [[0.31093566], [0.96110142], [100.]],
            [[0.30548947], [0.96633839], [100.]],
            [[0.30029103], [0.97089165], [100.]],
            [[0.3169957], [0.95468427], [100.]],
            [[0.31128378], [0.9607499], [100.]],
            [[0.30582202], [0.96603252], [100.]],
            [[0.30060824], [0.9706259], [100.]],
            [[0.29563774], [0.97461444], [100.]],
            [[0.31163288], [0.96039533], [100.]],
            [[0.30615555], [0.96572397], [100.]],
            [[0.30092641], [0.97035779], [100.]],
            [[0.2959409], [0.97438178], [100.]],
            [[0.29119255], [0.97787196], [100.]],
            [[0.30649004], [0.96541272], [100.]],
            [[0.30124552], [0.97008732], [100.]],
            [[0.29624499], [0.97414704], [100.]],
            [[0.29148205], [0.97766847], [100.]],
            [[0.28694892], [0.98071974], [100.]],
            [[0.30156558], [0.96981445], [100.]],
            [[0.29654999], [0.97391021], [100.]],
            [[0.29177245], [0.97746315], [100.]],
            [[0.28722523], [0.98054192], [100.]],
            [[0.28289958], [0.98320731], [100.]],
        ]

        for trial in range(len(evc_gratton.results)):
            assert np.allclose(
                expected_results_array[trial],
                # Note: Skip decision variable OutputState
                evc_gratton.results[trial][1:])
        for simulation in range(len(evc_gratton.simulation_results)):
            assert np.allclose(
                expected_sim_results_array[simulation],
                # Note: Skip decision variable OutputState
                evc_gratton.simulation_results[simulation][1:])
Ejemplo n.º 13
0
    def test_evc(self):
        # Mechanisms
        Input = pnl.TransferMechanism(name='Input')
        reward = pnl.TransferMechanism(
            output_states=[pnl.RESULT, pnl.OUTPUT_MEAN, pnl.OUTPUT_VARIANCE],
            name='reward')
        Decision = pnl.DDM(function=pnl.DriftDiffusionAnalytical(
            drift_rate=(1.0,
                        pnl.ControlProjection(function=pnl.Linear,
                                              control_signal_params={
                                                  pnl.ALLOCATION_SAMPLES:
                                                  np.arange(0.1, 1.01, 0.3)
                                              })),
            threshold=(1.0,
                       pnl.ControlProjection(function=pnl.Linear,
                                             control_signal_params={
                                                 pnl.ALLOCATION_SAMPLES:
                                                 np.arange(0.1, 1.01, 0.3)
                                             })),
            noise=0.5,
            starting_point=0,
            t0=0.45),
                           output_states=[
                               pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                               pnl.PROBABILITY_UPPER_THRESHOLD
                           ],
                           name='Decision')

        comp = pnl.Composition(name="evc")
        comp.add_node(reward, required_roles=[pnl.NodeRole.OUTPUT])
        comp.add_node(Decision, required_roles=[pnl.NodeRole.OUTPUT])
        task_execution_pathway = [Input, pnl.IDENTITY_MATRIX, Decision]
        comp.add_linear_processing_pathway(task_execution_pathway)

        comp.add_controller(controller=pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[Input.input_state, reward.input_state],
            feature_function=pnl.AdaptiveIntegrator(rate=0.5),
            objective_mechanism=pnl.ObjectiveMechanism(
                function=pnl.LinearCombination(operation=pnl.PRODUCT),
                monitor=[
                    reward, Decision.output_states[
                        pnl.PROBABILITY_UPPER_THRESHOLD],
                    (Decision.output_states[pnl.RESPONSE_TIME], -1, 1)
                ]),
            function=pnl.GridSearch(),
            control_signals=[("drift_rate", Decision), ("threshold",
                                                        Decision)]))

        comp.enable_controller = True

        comp._analyze_graph()

        stim_list_dict = {Input: [0.5, 0.123], reward: [20, 20]}

        comp.run(inputs=stim_list_dict, retain_old_simulation_data=True)

        # Note: Removed decision variable OutputState from simulation results because sign is chosen randomly
        expected_sim_results_array = [[[10.], [10.0], [0.0], [0.48999867],
                                       [0.50499983]],
                                      [[10.], [10.0], [0.0], [1.08965888],
                                       [0.51998934]],
                                      [[10.], [10.0], [0.0], [2.40680493],
                                       [0.53494295]],
                                      [[10.], [10.0], [0.0], [4.43671978],
                                       [0.549834]],
                                      [[10.], [10.0], [0.0], [0.48997868],
                                       [0.51998934]],
                                      [[10.], [10.0], [0.0], [1.08459402],
                                       [0.57932425]],
                                      [[10.], [10.0], [0.0], [2.36033556],
                                       [0.63645254]],
                                      [[10.], [10.0], [0.0], [4.24948962],
                                       [0.68997448]],
                                      [[10.], [10.0], [0.0], [0.48993479],
                                       [0.53494295]],
                                      [[10.], [10.0], [0.0], [1.07378304],
                                       [0.63645254]],
                                      [[10.], [10.0], [0.0], [2.26686573],
                                       [0.72710822]],
                                      [[10.], [10.0], [0.0], [3.90353015],
                                       [0.80218389]],
                                      [[10.], [10.0], [0.0], [0.4898672],
                                       [0.549834]],
                                      [[10.], [10.0], [0.0], [1.05791834],
                                       [0.68997448]],
                                      [[10.], [10.0], [0.0], [2.14222978],
                                       [0.80218389]],
                                      [[10.], [10.0], [0.0], [3.49637662],
                                       [0.88079708]],
                                      [[15.], [15.0], [0.0], [0.48999926],
                                       [0.50372993]],
                                      [[15.], [15.0], [0.0], [1.08981011],
                                       [0.51491557]],
                                      [[15.], [15.0], [0.0], [2.40822035],
                                       [0.52608629]],
                                      [[15.], [15.0], [0.0], [4.44259627],
                                       [0.53723096]],
                                      [[15.], [15.0], [0.0], [0.48998813],
                                       [0.51491557]],
                                      [[15.], [15.0], [0.0], [1.0869779],
                                       [0.55939819]],
                                      [[15.], [15.0], [0.0], [2.38198336],
                                       [0.60294711]],
                                      [[15.], [15.0], [0.0], [4.33535807],
                                       [0.64492386]],
                                      [[15.], [15.0], [0.0], [0.48996368],
                                       [0.52608629]],
                                      [[15.], [15.0], [0.0], [1.08085171],
                                       [0.60294711]],
                                      [[15.], [15.0], [0.0], [2.32712843],
                                       [0.67504223]],
                                      [[15.], [15.0], [0.0], [4.1221271],
                                       [0.7396981]],
                                      [[15.], [15.0], [0.0], [0.48992596],
                                       [0.53723096]],
                                      [[15.], [15.0], [0.0], [1.07165729],
                                       [0.64492386]],
                                      [[15.], [15.0], [0.0], [2.24934228],
                                       [0.7396981]],
                                      [[15.], [15.0], [0.0], [3.84279648],
                                       [0.81637827]]]

        for simulation in range(len(expected_sim_results_array)):
            assert np.allclose(
                expected_sim_results_array[simulation],
                # Note: Skip decision variable OutputState
                comp.simulation_results[simulation][0:3] +
                comp.simulation_results[simulation][4:6])

        expected_results_array = [[[20.0], [20.0], [0.0], [1.0],
                                   [2.378055160151634], [0.9820137900379085]],
                                  [[20.0], [20.0], [0.0], [0.1],
                                   [0.48999967725112503],
                                   [0.5024599801509442]]]

        for trial in range(len(expected_results_array)):
            np.testing.assert_allclose(
                comp.results[trial],
                expected_results_array[trial],
                atol=1e-08,
                err_msg='Failed on expected_output[{0}]'.format(trial))