Beispiel #1
0
 def test_nested_control_projection_count_controller(self):
     # Inner Composition
     ia = TransferMechanism(name='ia')
     icomp = Composition(name='icomp', pathways=[ia])
     # Outer Composition
     ocomp = Composition(name='ocomp', pathways=[icomp])
     ocm = OptimizationControlMechanism(
         name='ocm',
         agent_rep=ocomp,
         control_signals=[
             ControlSignal(projections=[(NOISE, ia)]),
             ControlSignal(projections=[(INTERCEPT, ia)]),
             ControlSignal(projections=[(SLOPE, ia)]),
         ],
         search_space=[[1], [1], [1]])
     ocomp.add_controller(ocm)
     assert len(ocm.efferents) == 3
     assert all([
         proj.receiver.owner == icomp.parameter_CIM
         for proj in ocm.efferents
     ])
     assert len(ia.mod_afferents) == 3
     assert all([
         proj.sender.owner == icomp.parameter_CIM
         for proj in ia.mod_afferents
     ])
Beispiel #2
0
    def test_parameter_CIM_port_order(self):
        # Note:  CIM_port order is also tested in TestNodes and test_simplified_necker_cube()

        # Inner Composition
        ia = TransferMechanism(name='ia')
        icomp = Composition(name='icomp', pathways=[ia])

        # Outer Composition
        ocomp = Composition(name='ocomp', pathways=[icomp])
        ocm = OptimizationControlMechanism(
            name='ic',
            agent_rep=ocomp,
            control_signals=[
                ControlSignal(projections=[(NOISE, ia)]),
                ControlSignal(projections=[(INTERCEPT, ia)]),
                ControlSignal(projections=[(SLOPE, ia)]),
            ])
        ocomp.add_controller(ocm)

        assert INTERCEPT in icomp.parameter_CIM.output_ports.names[0]
        assert NOISE in icomp.parameter_CIM.output_ports.names[1]
        assert SLOPE in icomp.parameter_CIM.output_ports.names[2]
Beispiel #3
0
def test_predator_prey(benchmark, mode, samples):
    if len(samples) > 10 and mode not in {"LLVM", "LLVMRun", "Python-PTX"}:
        pytest.skip("This test takes too long")
    # OCM default mode is Python
    mode, ocm_mode = (mode + "-Python").split('-')[0:2]
    benchmark.group = "Predator-Prey " + str(len(samples))
    obs_len = 3
    obs_coords = 2
    player_idx = 0
    player_obs_start_idx = player_idx * obs_len
    player_value_idx = player_idx * obs_len + obs_coords
    player_coord_slice = slice(player_obs_start_idx,player_value_idx)
    predator_idx = 1
    predator_obs_start_idx = predator_idx * obs_len
    predator_value_idx = predator_idx * obs_len + obs_coords
    predator_coord_slice = slice(predator_obs_start_idx,predator_value_idx)
    prey_idx = 2
    prey_obs_start_idx = prey_idx * obs_len
    prey_value_idx = prey_idx * obs_len + obs_coords
    prey_coord_slice = slice(prey_obs_start_idx,prey_value_idx)

    player_len = prey_len = predator_len = obs_coords

    # Perceptual Mechanisms
    player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS")
    prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS")
    predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS")

    # Action Mechanism
    #    Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
    #    note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
    greedy_action_mech = ComparatorMechanism(name='ACTION',sample=player_obs,target=prey_obs)

    # Create Composition
    agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
    agent_comp.add_node(player_obs)
    agent_comp.add_node(predator_obs)
    agent_comp.add_node(prey_obs)
    agent_comp.add_node(greedy_action_mech)
    agent_comp.exclude_node_roles(predator_obs, NodeRole.OUTPUT)

    ocm = OptimizationControlMechanism(features={SHADOW_INPUTS: [player_obs, predator_obs, prey_obs]},
                                       agent_rep=agent_comp,
                                       function=GridSearch(direction=MINIMIZE,
                                                           save_values=True),

                                       objective_mechanism=ObjectiveMechanism(function=Distance(metric=NORMED_L0_SIMILARITY),
                                                                              monitor=[
                                                                                  player_obs,
                                                                                  prey_obs
                                                                              ]),
                                       control_signals=[ControlSignal(modulates=(VARIANCE,player_obs),
                                                                      allocation_samples=samples),
                                                        ControlSignal(modulates=(VARIANCE,predator_obs),
                                                                      allocation_samples=samples),
                                                        ControlSignal(modulates=(VARIANCE,prey_obs),
                                                                      allocation_samples=samples)
                                                        ],
                                       )
    agent_comp.add_controller(ocm)
    agent_comp.enable_controller = True
    ocm.comp_execution_mode = ocm_mode

    input_dict = {player_obs:[[1.1576537,  0.60782117]],
                  predator_obs:[[-0.03479106, -0.47666293]],
                  prey_obs:[[-0.60836214,  0.1760381 ]],
                 }
    run_results = agent_comp.run(inputs=input_dict, num_trials=2, bin_execute=mode)

    if len(samples) == 2:
        # KDM 12/4/19: modified results due to global seed offset of
        # GaussianDistort assignment.
        # to produce old numbers, run get_global_seed once before creating
        # each Mechanism with GaussianDistort above
        assert np.allclose(run_results[0], [[-10.06333025,   2.4845505 ]])
        if mode == 'Python':
            assert np.allclose(ocm.feature_values, [[ 1.1576537,   0.60782117],
                                                    [-0.03479106, -0.47666293],
                                                    [-0.60836214,  0.1760381 ]])

    if benchmark.enabled:
        benchmark(agent_comp.run, inputs=input_dict, bin_execute=mode)
def test_predator_prey(benchmark, mode, samples):
    benchmark.group = "Predator-Prey " + str(len(samples))
    # These should probably be replaced by reference to ForagerEnv constants:
    obs_len = 3
    obs_coords = 2
    action_len = 2
    player_idx = 0
    player_obs_start_idx = player_idx * obs_len
    player_value_idx = player_idx * obs_len + obs_coords
    player_coord_slice = slice(player_obs_start_idx, player_value_idx)
    predator_idx = 1
    predator_obs_start_idx = predator_idx * obs_len
    predator_value_idx = predator_idx * obs_len + obs_coords
    predator_coord_slice = slice(predator_obs_start_idx, predator_value_idx)
    prey_idx = 2
    prey_obs_start_idx = prey_idx * obs_len
    prey_value_idx = prey_idx * obs_len + obs_coords
    prey_coord_slice = slice(prey_obs_start_idx, prey_value_idx)

    player_len = prey_len = predator_len = obs_coords

    # Perceptual Mechanisms
    player_obs = ProcessingMechanism(size=prey_len,
                                     function=GaussianDistort,
                                     name="PLAYER OBS")
    prey_obs = ProcessingMechanism(size=prey_len,
                                   function=GaussianDistort,
                                   name="PREY OBS")
    predator_obs = TransferMechanism(size=predator_len,
                                     function=GaussianDistort,
                                     name="PREDATOR OBS")

    # Action Mechanism
    #    Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
    #    note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
    greedy_action_mech = ComparatorMechanism(name='ACTION',
                                             sample=player_obs,
                                             target=prey_obs)

    # Create Composition
    agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
    agent_comp.add_node(player_obs)
    agent_comp.add_node(predator_obs)
    agent_comp.add_node(prey_obs)
    agent_comp.add_node(greedy_action_mech)

    # ControlMechanism

    ocm = OptimizationControlMechanism(
        features={SHADOW_INPUTS: [player_obs, predator_obs, prey_obs]},
        agent_rep=agent_comp,
        function=GridSearch(direction=MINIMIZE, save_values=True),
        objective_mechanism=ObjectiveMechanism(
            function=Distance(metric=NORMED_L0_SIMILARITY),
            monitor=[player_obs, predator_obs, prey_obs]),
        control_signals=[
            ControlSignal(modulates=(VARIANCE, player_obs),
                          allocation_samples=samples),
            ControlSignal(modulates=(VARIANCE, predator_obs),
                          allocation_samples=samples),
            ControlSignal(modulates=(VARIANCE, prey_obs),
                          allocation_samples=samples),
        ],
    )
    agent_comp.add_controller(ocm)
    agent_comp.enable_controller = True
    ocm.comp_execution_mode = mode

    input_dict = {
        player_obs: [[1.1576537, 0.60782117]],
        predator_obs: [[-0.03479106, -0.47666293]],
        prey_obs: [[-0.60836214, 0.1760381]],
    }
    run_results = agent_comp.run(inputs=input_dict,
                                 num_trials=2,
                                 bin_execute=mode)

    if len(samples) == 2:
        assert np.allclose(run_results[0], [[-19.06547277, 5.47274121]])
        if mode == 'Python':
            assert np.allclose(
                ocm.feature_values,
                [[1.1576537, 0.60782117], [-0.03479106, -0.47666293],
                 [-0.60836214, 0.1760381]])

    benchmark(agent_comp.run, inputs=input_dict, bin_execute=mode)