Example #1
0
    def test_lvoc_features_function(self):
        m1 = pnl.TransferMechanism(
            input_states=["InputState A", "InputState B"])
        m2 = pnl.TransferMechanism()
        c = pnl.Composition()
        c.add_node(m1, required_roles=pnl.NodeRole.INPUT)
        c.add_node(m2, required_roles=pnl.NodeRole.INPUT)
        c._analyze_graph()
        lvoc = pnl.OptimizationControlMechanism(
            agent_rep=pnl.RegressionCFA,
            features=[
                m1.input_states[0], m1.input_states[1], m2.input_state, m2
            ],
            feature_function=pnl.LinearCombination(offset=10.0),
            objective_mechanism=pnl.ObjectiveMechanism(monitor=[m1, m2]),
            function=pnl.GradientOptimization(max_iterations=1),
            control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)])
        c.add_node(lvoc)
        input_dict = {m1: [[1], [1]], m2: [1]}

        c.run(inputs=input_dict)

        assert len(lvoc.input_states) == 5

        for i in range(1, 5):
            assert lvoc.input_states[i].function.offset == 10.0
        monitor=[
            task_decision.output_states[pnl.PROBABILITY_UPPER_THRESHOLD],
            task_decision.output_states[pnl.PROBABILITY_LOWER_THRESHOLD],
            reward
        ],
        # monitored_output_states=[task_decision, reward],
        function=objective_function),
    agent_rep=pnl.RegressionCFA(
        update_weights=pnl.BayesGLM(
            mu_0=-0.17,
            sigma_0=9.0909),  # -0.17, 9.0909 precision = 0.11; 1/p = v
        prediction_terms=[pnl.PV.FC, pnl.PV.COST]),
    function=pnl.GradientOptimization(
        convergence_criterion=pnl.VALUE,
        convergence_threshold=0.001,  #0.001
        step_size=1,  #1
        annealing_function=lambda x, y: x / np.sqrt(y)
        # direction=pnl.ASCENT
    ),
    control_signals=pnl.ControlSignal(
        projections=[(pnl.SLOPE, color_task), ('color_control', word_task)],
        # function=pnl.ReLU,
        function=pnl.Logistic,
        cost_options=[
            pnl.ControlSignalCosts.INTENSITY, pnl.ControlSignalCosts.ADJUSTMENT
        ],
        intensity_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        adjustment_cost_function=pnl.Exponential(rate=0.25, bias=-3),
        # allocation_samples=[i / 2 for i in list(range(0, 50, 1))]
    ))
     function=objective_function),
 # posterior weight distribution
 agent_rep=pnl.RegressionCFA(
     # update_weights=pnl.BayesGLM(mu_0=-0.17, sigma_0=0.11), #sigma_0=math.sqrt(0.11))
     update_weights=pnl.BayesGLM(
         mu_0=-0.17, sigma_0=0.0000000000000001),  #sigma_0=math.sqrt(0.11))
     # update_weights=pnl.BayesGLM(mu_0=+0.17, sigma_0=0.11), #sigma_0=math.sqrt(0.11))
     prediction_terms=[pnl.PV.C, pnl.PV.FC, pnl.PV.FF, pnl.PV.COST]),
 # sample control allocs, and return best
 # evaluate() computes outcome (obj mech) - costs given state (features) and sample ctrl alloc
 function=pnl.GradientOptimization(
     convergence_criterion=pnl.VALUE,
     convergence_threshold=0.001,
     step_size=2,  #1
     # Note: Falk used 10 in the denom below, but indexed sample numbers from 1;
     #       but sample_num passed to _follow_gradient is indexed from 0, so use 11 below
     annealing_function=lambda x, y: x / np.sqrt(11 + y),
     max_iterations=100
     # save_samples=True,
     # save_values=True,
     # direction=pnl.ASCENT
 ),
 # opt control alloc used to compute ctrl sigs
 control_signals=[
     pnl.ControlSignal(
         default_allocation=default_control_signal,
         modulates=[(pnl.SLOPE, color_task), ('color_control', word_task)],
         # function=pnl.ReLU,
         # function=pnl.Logistic,
         cost_options=[
             pnl.CostFunctions.INTENSITY, pnl.CostFunctions.ADJUSTMENT
         ],