コード例 #1
0
ファイル: test_interfaces.py プロジェクト: gnossen/PsyNeuLink
    def test_non_origin_partial_input_spec(self):
        A = ProcessingMechanism(name='A', function=Linear(slope=2.0))
        B = ProcessingMechanism(name='B', input_ports=[[0.], A.input_port])

        comp = Composition(name='comp')

        comp.add_linear_processing_pathway([A, B])

        comp.run(inputs={A: [[1.23]]})
        assert np.allclose(B.get_input_values(comp), [[2.46], [1.23]])
コード例 #2
0
def test_simplified_greedy_agent_random(benchmark, mode):
    # These should probably be replaced by reference to ForagerEnv constants:
    obs_len = 2
    action_len = 2
    player_coord_idx = slice(0, 2)
    predator_coord_idx = slice(3, 5)
    prey_coord_idx = slice(6, 8)
    player_value_idx = 2
    predator_value_idx = 5
    prey_value_idx = 8

    player_len = prey_len = predator_len = obs_len

    player = ProcessingMechanism(size=prey_len,
                                 function=GaussianDistort,
                                 name="PLAYER OBS")
    prey = ProcessingMechanism(size=prey_len,
                               function=GaussianDistort,
                               name="PREY OBS")

    # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
    # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
    greedy_action_mech = ComparatorMechanism(name='MOTOR OUTPUT',
                                             sample=player,
                                             target=prey)

    agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
    agent_comp.add_node(player)
    agent_comp.add_node(prey)
    agent_comp.add_node(greedy_action_mech)

    # Projections to greedy_action_mech were created by assignments of sample and target args in its constructor,
    #  so just add them to the Composition).
    for projection in greedy_action_mech.projections:
        agent_comp.add_projection(projection)

    run_results = agent_comp.run(inputs={
        player: [[619, 177]],
        prey: [[419, 69]]
    },
                                 bin_execute=mode)
    # KDM 12/4/19: modified results due to global seed offset of
    # GaussianDistort assignment.
    # to produce old numbers, run get_global_seed once before creating
    # each Mechanism with GaussianDistort above
    assert np.allclose(run_results,
                       [[-199.5484223217141, -107.79361870517444]])
    benchmark(
        agent_comp.run, **{
            'inputs': {
                player: [[619, 177]],
                prey: [[419, 69]],
            },
            'bin_execute': mode
        })
コード例 #3
0
ファイル: test_buffer.py プロジェクト: GandoNgola/PsyNeuLink
    def test_buffer_as_function_of_processing_mech(self, benchmark):

        P = ProcessingMechanism(function=Buffer(default_variable=[[0.0]],
                                                initializer=[0.0],
                                                history=3))
        val = P.execute(1.0)

        # NOTE: actual output is [0, [[1]]]
        assert np.allclose(np.asfarray(val), [[0., 1.]])
        if benchmark.enabled:
            benchmark(P.execute, 5.0)
コード例 #4
0
 def test_processing_mechanism_multiple_input_ports(self):
     PM1 = ProcessingMechanism(size=[4, 4], function=LinearCombination, input_ports=['input_1', 'input_2'])
     PM2 = ProcessingMechanism(size=[2, 2, 2], function=LinearCombination, input_ports=['1', '2', '3'])
     PM1.execute([[1, 2, 3, 4], [5, 4, 2, 2]])
     PM2.execute([[2, 0], [1, 3], [1, 0]])
     assert np.allclose(PM1.value, [6, 6, 5, 6])
     assert np.allclose(PM2.value, [4, 3])
コード例 #5
0
def test_nested_composition_run_trials_inputs(benchmark, executions, mode):
    benchmark.group = "Nested Composition mutliple trials/inputs multirun {}".format(
        executions)

    # mechanisms
    A = ProcessingMechanism(name="A", function=AdaptiveIntegrator(rate=0.1))
    B = ProcessingMechanism(name="B", function=Logistic)

    inner_comp = Composition(name="inner_comp")
    inner_comp.add_linear_processing_pathway([A, B])
    inner_comp._analyze_graph()
    sched = Scheduler(composition=inner_comp)

    outer_comp = Composition(name="outer_comp")
    outer_comp.add_node(inner_comp)

    outer_comp._analyze_graph()
    sched = Scheduler(composition=outer_comp)

    # The input dict should assign inputs origin nodes (inner_comp in this case)
    var = {inner_comp: [[[2.0]], [[3.0]]]}
    expected = [[[0.549833997312478]], [[0.617747874769249]],
                [[0.6529428177055896]], [[0.7044959416252289]]]
    if executions > 1:
        var = [var for _ in range(executions)]
    if mode == 'Python':

        def f(v, num_trials, res=False):
            results = []
            for i in range(executions):
                outer_comp.run(v[i], execution_id=i, num_trials=num_trials)
                if res:  # copy the results immediately, otherwise it's empty
                    results.append(outer_comp.results.copy())
            return results

        res = f(var, 4, True) if executions > 1 else f([var], 4, True)
        benchmark(f if executions > 1 else outer_comp.run, var, num_trials=4)
    elif mode == 'LLVM':
        e = pnlvm.execution.CompExecution(outer_comp,
                                          [None for _ in range(executions)])
        res = e.run(var, 4, 2)
        benchmark(e.run, var, 4, 2)
    elif mode == 'PTX':
        e = pnlvm.execution.CompExecution(outer_comp,
                                          [None for _ in range(executions)])
        res = e.cuda_run(var, 4, 2)
        benchmark(e.cuda_run, var, 4, 2)

    assert np.allclose(res, [expected for _ in range(executions)])
    assert len(res) == executions or executions == 1
コード例 #6
0
    def test_origin_input_source_true_no_input(self):
        A = ProcessingMechanism(name='A')
        B = ProcessingMechanism(name='B')
        C = ProcessingMechanism(name='C', default_variable=[[4.56]])

        comp = Composition(name='comp')

        comp.add_linear_processing_pathway([A, B])
        comp.add_node(C)

        comp.run(inputs={A: [[1.23]]})

        assert np.allclose(A.parameters.value.get(comp), [[1.23]])
        assert np.allclose(B.parameters.value.get(comp), [[1.23]])
        assert np.allclose(C.parameters.value.get(comp), [[4.56]])
コード例 #7
0
    def test_one_to_two(self):
        A = ProcessingMechanism(name='A')
        B = ProcessingMechanism(name='B')
        C = ProcessingMechanism(name='C', input_states=[A.input_state])

        comp = Composition(name='comp')

        comp.add_linear_processing_pathway([A, B])
        comp.add_node(C)

        comp.run(inputs={A: [[1.23]]})

        assert np.allclose(A.parameters.value.get(comp), [[1.23]])
        assert np.allclose(B.parameters.value.get(comp), [[1.23]])
        assert np.allclose(C.parameters.value.get(comp), [[1.23]])
コード例 #8
0
    def test_dict_of_arrays(self):
        input_labels_dict = {
            "red": [1, 0, 0],
            "green": [0, 1, 0],
            "blue": [0, 0, 1]
        }
        M = ProcessingMechanism(default_variable=[[0, 0, 0]],
                                params={INPUT_LABELS_DICT: input_labels_dict})
        P = Process(pathway=[M])
        S = System(processes=[P])

        store_input_labels = []

        def call_after_trial():
            store_input_labels.append(M.get_input_labels(S))

        S.run(inputs=['red', 'green', 'blue', 'red'],
              call_after_trial=call_after_trial)
        assert np.allclose(
            S.results, [[[1, 0, 0]], [[0, 1, 0]], [[0, 0, 1]], [[1, 0, 0]]])
        assert store_input_labels == [['red'], ['green'], ['blue'], ['red']]

        S.run(inputs='red')
        assert np.allclose(
            S.results,
            [[[1, 0, 0]], [[0, 1, 0]], [[0, 0, 1]], [[1, 0, 0]], [[1, 0, 0]]])

        S.run(inputs=['red'])
        assert np.allclose(S.results, [[[1, 0, 0]], [[0, 1, 0]], [[0, 0, 1]],
                                       [[1, 0, 0]], [[1, 0, 0]], [[1, 0, 0]]])
コード例 #9
0
    def test_dict_of_arrays(self):
        input_labels_dict = {"red": [1.0, 0.0], "green": [0.0, 1.0]}
        output_labels_dict = {"red": [1.0, 0.0], "green": [0.0, 1.0]}
        M = ProcessingMechanism(size=2,
                                params={
                                    INPUT_LABELS_DICT: input_labels_dict,
                                    OUTPUT_LABELS_DICT: output_labels_dict
                                })
        P = Process(pathway=[M])
        S = System(processes=[P])

        store_output_labels = []

        def call_after_trial():
            store_output_labels.append(M.get_output_labels(S))

        S.run(inputs=['red', 'green', 'green', 'red'],
              call_after_trial=call_after_trial)
        assert np.allclose(
            S.results,
            [[[1.0, 0.0]], [[0.0, 1.0]], [[0.0, 1.0]], [[1.0, 0.0]]])
        assert store_output_labels == [['red'], ['green'], ['green'], ['red']]

        store_output_labels = []
        S.run(inputs=[[1.0, 0.0], 'green', [0.0, 1.0], 'red'],
              call_after_trial=call_after_trial)
        assert np.allclose(
            S.results,
            [[[1.0, 0.0]], [[0.0, 1.0]], [[0.0, 1.0]], [[1.0, 0.0]],
             [[1.0, 0.0]], [[0.0, 1.0]], [[0.0, 1.0]], [[1.0, 0.0]]])
        assert store_output_labels == [['red'], ['green'], ['green'], ['red']]
コード例 #10
0
    def test_not_all_output_port_values_have_label(self):
        input_labels_dict = {
            "red": [1.0, 0.0],
            "green": [0.0, 1.0],
            "blue": [2.0, 2.0]
        }
        output_labels_dict = {"red": [1.0, 0.0], "green": [0.0, 1.0]}
        M = ProcessingMechanism(size=2,
                                params={
                                    INPUT_LABELS_DICT: input_labels_dict,
                                    OUTPUT_LABELS_DICT: output_labels_dict
                                })
        P = Process(pathway=[M])
        S = System(processes=[P])

        store_output_labels = []

        def call_after_trial():
            store_output_labels.append(M.get_output_labels(S))

        S.run(inputs=['red', 'blue', 'green', 'blue'],
              call_after_trial=call_after_trial)
        assert np.allclose(
            S.results,
            [[[1.0, 0.0]], [[2.0, 2.0]], [[0.0, 1.0]], [[2.0, 2.0]]])

        assert store_output_labels[0] == ['red']
        assert np.allclose(store_output_labels[1], [[2.0, 2.0]])
        assert store_output_labels[2] == ['green']
        assert np.allclose(store_output_labels[3], [[2.0, 2.0]])
コード例 #11
0
    def test_processing_mechanism_linear_function(self):

        PM1 = ProcessingMechanism()
        PM1.execute(1.0)
        assert np.allclose(PM1.value, 1.0)

        PM2 = ProcessingMechanism(function=Linear(slope=2.0, intercept=1.0))
        PM2.execute(1.0)
        assert np.allclose(PM2.value, 3.0)
コード例 #12
0
    def test_output_ports(self, mech_mode, op, expected, benchmark):
        benchmark.group = "Output Port Op: {}".format(op)
        PM1 = ProcessingMechanism(default_variable=[0, 0, 0], output_ports=[op])
        var = [1, 2, 4] if op in {MEAN, MEDIAN, STANDARD_DEVIATION, VARIANCE} else [1, 2, -4]
        ex = pytest.helpers.get_mech_execution(PM1, mech_mode)

        res = benchmark(ex, var)
        assert np.allclose(res, expected)
コード例 #13
0
    def test_invalid_mismatched_input_lens(self):
        A = ProcessingMechanism(name="A")
        B = ProcessingMechanism(name="B")
        C = ProcessingMechanism(name="C")

        comp = Composition(name="COMP")

        comp.add_linear_processing_pathway([A, C])
        comp.add_linear_processing_pathway([B, C])

        inputs_to_A = [[1.0], [2.0]]  # 2 input specs
        inputs_to_B = [[1.0], [2.0], [3.0], [4.0]]  # 4 input specs

        with pytest.raises(CompositionError) as error_text:
            comp.run(inputs={A: inputs_to_A, B: inputs_to_B})
        assert "input dictionary for COMP contains input specifications of different lengths" in str(
            error_text.value)
コード例 #14
0
def test_nested_composition_execution(benchmark, executions, mode):
    benchmark.group = "Nested Composition execution multirun {}".format(
        executions)

    # mechanisms
    A = ProcessingMechanism(name="A", function=AdaptiveIntegrator(rate=0.1))
    B = ProcessingMechanism(name="B", function=Logistic)

    inner_comp = Composition(name="inner_comp")
    inner_comp.add_linear_processing_pathway([A, B])
    inner_comp._analyze_graph()
    sched = Scheduler(composition=inner_comp)

    outer_comp = Composition(name="outer_comp")
    outer_comp.add_node(inner_comp)

    outer_comp._analyze_graph()
    sched = Scheduler(composition=outer_comp)

    # The input dict should assign inputs origin nodes (inner_comp in this case)
    var = {inner_comp: [[1.0]]}
    expected = [[0.52497918747894]]
    if executions > 1:
        var = [var for _ in range(executions)]
    if mode == 'Python':
        f = lambda x: [
            outer_comp.execute(x[i], execution_id=i) for i in range(executions)
        ]
        res = f(var) if executions > 1 else outer_comp.execute(var)
        benchmark(f if executions > 1 else outer_comp.execute, var)
    elif mode == 'LLVM':
        e = pnlvm.execution.CompExecution(outer_comp,
                                          [None for _ in range(executions)])
        e.execute(var)
        res = e.extract_node_output(outer_comp.output_CIM)
        benchmark(e.execute, var)
    elif mode == 'PTX':
        e = pnlvm.execution.CompExecution(outer_comp,
                                          [None for _ in range(executions)])
        e.cuda_execute(var)
        res = e.extract_node_output(outer_comp.output_CIM)
        benchmark(e.cuda_execute, var)

    assert np.allclose(res, [expected for _ in range(executions)])
    assert len(res) == executions
コード例 #15
0
    def test_valid_input_float(self):
        A = ProcessingMechanism(name="A")
        comp = Composition(name="comp")
        comp.add_node(A)

        comp.run(inputs={A: 5.0})
        assert np.allclose(comp.results, [[5.0]])

        comp.run(inputs={A: [5.0, 10.0, 15.0]})
        assert np.allclose(comp.results, [[[5.0]], [[5.0]], [[10.0]], [[15.0]]])
コード例 #16
0
 def test_processing_mechanism_default_function(self, mode, variable, benchmark):
     PM = ProcessingMechanism(default_variable=[0, 0, 0, 0])
     if mode == "Python":
         ex = PM.execute
     elif mode == "LLVM":
         ex = pnlvm.MechExecution(PM).execute
     elif mode == "PTX":
         ex = pnlvm.MechExecution(PM).cuda_execute
     res = benchmark(ex, variable)
     assert np.allclose(res, [[1., 2., 3., 4.]])
コード例 #17
0
    def test_equivalance_of_threshold_and_termination_specifications_just_threshold(self, mode):
        # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and
        #       generic TransferMechanism termination_<*> arguments

        lca_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default
        response = ProcessingMechanism(size=2)
        comp = Composition()
        comp.add_linear_processing_pathway([lca_thresh, response])
        result1 = comp.run(inputs={lca_thresh:[1,0]}, bin_execute=mode)

        lca_termination = LCAMechanism(size=2,
                                       leak=0.5,
                                       termination_threshold=0.7,
                                       termination_measure=max,
                                       termination_comparison_op='>=')
        comp2 = Composition()
        response2 = ProcessingMechanism(size=2)
        comp2.add_linear_processing_pathway([lca_termination,response2])
        result2 = comp2.run(inputs={lca_termination:[1,0]}, bin_execute=mode)
        assert np.allclose(result1, result2)
コード例 #18
0
    def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(self):
        # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and
        #       generic TransferMechanism termination_<*> arguments

        lca_thresh = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT)
        response = ProcessingMechanism(size=3)
        comp = Composition()
        comp.add_linear_processing_pathway([lca_thresh, response])
        result1 = comp.run(inputs={lca_thresh:[1,0.5,0]})

        lca_termination = LCAMechanism(size=3,
                                       leak=0.5,
                                       termination_threshold=0.1,
                                       termination_measure=max_vs_next,
                                       termination_comparison_op='>=')
        comp2 = Composition()
        response2 = ProcessingMechanism(size=3)
        comp2.add_linear_processing_pathway([lca_termination,response2])
        result2 = comp2.run(inputs={lca_termination:[1,0.5,0]})
        assert np.allclose(result1, result2)
コード例 #19
0
    def test_mix_and_match_input_sources(self):
        A = ProcessingMechanism(name='A')
        B = ProcessingMechanism(name='B', default_variable=[[0.], [0.]])
        C = ProcessingMechanism(
            name='C',
            input_states=[B.input_states[1], A.input_state, B.input_states[0]])

        input_dict = {A: [[2.0]], B: [[3.0], [1.0]]}

        comp = Composition(name="comp")

        comp.add_node(A)
        comp.add_node(B)
        comp.add_node(C)

        comp.run(inputs=input_dict)

        assert np.allclose(A.parameters.value.get(comp), [[2.]])
        assert np.allclose(B.parameters.value.get(comp), [[3.], [1.]])
        assert np.allclose(C.parameters.value.get(comp), [[1.], [2.], [3.]])
コード例 #20
0
    def test_equivalance_of_threshold_and_when_finished_condition(self):
        # Note: This tests the equivalence of results when:
        #       execute_until_finished is True for the LCAMechanism (by default)
        #           and the call to execution loops until it reaches threshold (1st test)
        #       vs. when execute_until_finished is False and a condition is added to the scheduler
        #           that causes the LCAMechanism it to execute until it reaches threshold (2nd test).

        # loop Mechanism's call to execute
        lca_until_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default
        response = ProcessingMechanism(size=2)
        comp = Composition()
        comp.add_linear_processing_pathway([lca_until_thresh, response])
        result1 = comp.run(inputs={lca_until_thresh:[1,0]})

        # loop Composition's call to Mechanism
        lca_single_step = LCAMechanism(size=2, leak=0.5, threshold=0.7, execute_until_finished=False)
        comp2 = Composition()
        response2 = ProcessingMechanism(size=2)
        comp2.add_linear_processing_pathway([lca_single_step,response2])
        comp2.scheduler.add_condition(response2, WhenFinished(lca_single_step))
        result2 = comp2.run(inputs={lca_single_step:[1,0]})
        assert np.allclose(result1, result2)
コード例 #21
0
    def test_dict_of_floats(self):
        input_labels_dict_M1 = {"red": 1,
                                "green": 0}
        output_labels_dict_M2 = {"red": 0,
                                "green": 1}

        M1 = ProcessingMechanism(params={INPUT_LABELS_DICT: input_labels_dict_M1})
        M2 = ProcessingMechanism(params={OUTPUT_LABELS_DICT: output_labels_dict_M2})
        P = Process(pathway=[M1, M2],
                    learning=ENABLED,
                    learning_rate=0.25)
        S = System(processes=[P])

        learned_matrix = []
        def record_matrix_after_trial():
            learned_matrix.append(M2.path_afferents[0].get_mod_matrix(S))
        S.run(inputs=['red', 'green', 'green', 'red'],
              targets=['red', 'green', 'green', 'red'],
              call_after_trial=record_matrix_after_trial)

        assert np.allclose(S.results, [[[1.]], [[0.]], [[0.]], [[0.75]]])
        assert np.allclose(learned_matrix, [[[0.75]], [[0.75]], [[0.75]], [[0.5625]]])
コード例 #22
0
    def test_invalid_matrix_specs(self):

        with pytest.raises(FunctionError) as error_text:
            PM_mismatched_float = ProcessingMechanism(function=LinearMatrix(
                default_variable=0.0,
                matrix=[[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0],
                        [0.0, 0.0, 3.0, 0.0], [0.0, 0.0, 0.0, 4.0]]),
                                                      default_variable=0.0)
        assert "Specification of matrix and/or default_variable" in str(error_text.value) and \
               "not compatible for multiplication" in str(error_text.value)

        with pytest.raises(FunctionError) as error_text:
            PM_mismatched_matrix = ProcessingMechanism(
                function=LinearMatrix(default_variable=[[0.0, 0.0], [0.0, 0.0],
                                                        [0.0, 0.0]],
                                      matrix=[[1.0, 0.0, 0.0, 0.0],
                                              [0.0, 2.0, 0.0, 0.0],
                                              [0.0, 0.0, 3.0, 0.0],
                                              [0.0, 0.0, 0.0, 4.0]]),
                default_variable=[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
        assert "Specification of matrix and/or default_variable" in str(error_text.value) and \
               "not compatible for multiplication" in str(error_text.value)
コード例 #23
0
 def test_output_ports(self, mode, op, expected, benchmark):
     benchmark.group = "Output Port Op: {}".format(op)
     PM1 = ProcessingMechanism(default_variable=[0, 0, 0], output_ports=[op])
     var = [1, 2, 4] if op in {MEAN, MEDIAN, STANDARD_DEVIATION, VARIANCE} else [1, 2, -4]
     if mode == "Python":
         ex = PM1.execute
     elif mode == "LLVM":
         ex = pnlvm.MechExecution(PM1).execute
     elif mode == "PTX":
         ex = pnlvm.MechExecution(PM1).cuda_execute
     res = benchmark(ex, var)
     res = PM1.output_ports[0].value if mode == "Python" else res
     assert np.allclose(res, expected)
コード例 #24
0
 def test_array_mode(self):
     input_mech = ProcessingMechanism(size=2)
     ddm = DDM(input_format=ARRAY,
               function=DriftDiffusionAnalytical(),
               output_ports=[SELECTED_INPUT_ARRAY, DECISION_VARIABLE_ARRAY],
               name='DDM')
     comp = Composition()
     comp.add_linear_processing_pathway(pathway=[input_mech, ddm])
     result = comp.run(inputs={input_mech: [1, 0]})
     assert np.allclose(ddm.output_ports[0].value, [1, 0])
     assert np.allclose(ddm.output_ports[1].value, [1, 0])
     assert np.allclose(
         ddm.value, [[1.00000000e+00], [1.19932930e+00], [9.99664650e-01],
                     [3.35350130e-04], [1.19932930e+00], [2.48491374e-01],
                     [1.48291009e+00], [1.19932930e+00], [2.48491374e-01],
                     [1.48291009e+00]])
     assert np.allclose(result, [[1., 0.]])
コード例 #25
0
    def test_dict_of_floats(self):
        input_labels_dict = {"red": 1,
                             "green":0}

        M = ProcessingMechanism(params={INPUT_LABELS_DICT: input_labels_dict})
        P = Process(pathway=[M])
        S = System(processes=[P])

        store_input_labels = []

        def call_after_trial():
            store_input_labels.append(M.get_input_labels(S))

        S.run(inputs=['red', 'green', 'green', 'red'],
              call_after_trial=call_after_trial)
        assert np.allclose(S.results, [[[1.]], [[0.]], [[0.]], [[1.]]])
        assert store_input_labels == [['red'], ['green'], ['green'], ['red']]
        S.run(inputs=[1, 'green', 0, 'red'])
        assert np.allclose(S.results, [[[1.]], [[0.]], [[0.]], [[1.]], [[1.]], [[0.]], [[0.]], [[1.]]])
コード例 #26
0
    def test_user_added_ports(self):

        comp = Composition()
        mech = ProcessingMechanism()
        comp.add_node(mech)
        # instantiate custom input and output ports
        inp = InputPort(size=2)
        out = OutputPort(size=2)
        # add custom input and output ports to CIM
        comp.input_CIM.add_ports([inp, out])
        # verify the ports have been added to the user_added_ports set
        # and that no extra ports were added
        assert inp in comp.input_CIM.user_added_ports['input_ports']
        assert len(comp.input_CIM.user_added_ports['input_ports']) == 1
        assert out in comp.input_CIM.user_added_ports['output_ports']
        assert len(comp.input_CIM.user_added_ports['output_ports']) == 1
        comp.input_CIM.remove_ports([inp, out])
        # verify that call to remove ports succesfully removed the ports from user_added_ports
        assert len(comp.input_CIM.user_added_ports['input_ports']) == 0
        assert len(comp.input_CIM.user_added_ports['output_ports']) == 0
コード例 #27
0
    def test_warning_on_custom_cim_ports(self):

        comp = Composition()
        mech = ProcessingMechanism()
        comp.add_node(mech)
        warning_text = (
            'You are attempting to add custom ports to a CIM, which can result in unpredictable behavior '
            'and is therefore recommended against. If suitable, you should instead add ports to the '
            r'mechanism\(s\) that project to or are projected to from the CIM.'
        )
        with pytest.warns(UserWarning, match=warning_text):
            # KDM 7/22/20: previously was OutputPort, but that produces
            # an invalid CIM state that cannot be executed, and will
            # throw an error due to new _update_default_variable call
            comp.input_CIM.add_ports(InputPort())

        with pytest.warns(None) as w:
            comp._analyze_graph()
            comp.run({mech: [[1]]})

        assert len(w) == 0
コード例 #28
0
def test_moving_average():

    # Set an arbitrary seed and a global random state to keep the randomly generated quantities the same between runs
    seed = 20170530  # this will be separately given to ELFI
    np.random.seed(seed)

    # true parameters
    t1_true = 0.6
    t2_true = 0.2

    # Define a function that simulates a 2nd order moving average, assuming mean zero:
    # y_t = w_t + t1*w_t-1 + t2*w_t-2
    # where t1 and t2 are real and w_k is i.i.d sequence white noise with N(0,1)
    def MA2(input=[0],
            t1=0.5,
            t2=0.5,
            n_obs=100,
            batch_size=1,
            random_state=None):
        # FIXME: Convert arguments to scalar if they are not. Why is this nescessary?
        # PsyNeuLink, when creating a user defined function, seems to expect the function
        # to support inputs of type np.ndarray even when they are only allowed to be
        # scalars.
        n_obs = n_obs[0] if (type(n_obs) is np.ndarray) else n_obs
        batch_size = batch_size[0] if (
            type(batch_size) is np.ndarray) else batch_size

        # Make inputs 2d arrays for numpy broadcasting with w
        t1 = np.asanyarray(t1).reshape((-1, 1))
        t2 = np.asanyarray(t2).reshape((-1, 1))
        random_state = random_state or np.random

        w = random_state.randn(int(batch_size),
                               int(n_obs) + 2)  # i.i.d. sequence ~ N(0,1)
        x = w[:, 2:] + t1 * w[:, 1:-1] + t2 * w[:, :-2]
        return x

    # Lets make some observed data. This will be the data we try to fit parameters for.
    y_obs = MA2(t1_true, t2_true)

    # Make a processing mechanism out of our simulator.
    ma_mech = ProcessingMechanism(function=MA2,
                                  size=1,
                                  name='Moving Average (2nd Order)')

    # Now lets add it to a composition
    comp = Composition(name="Moving_Average")
    comp.add_node(ma_mech)

    # Now lets setup some control signals for the parameters we want to
    # infer. This is where we would like to specify priors.
    signalSearchRange = SampleSpec(start=0.1, stop=2.0, step=0.2)
    t1_control_signal = ControlSignal(projections=[('t1', ma_mech)],
                                      allocation_samples=signalSearchRange,
                                      modulation=OVERRIDE)
    t2_control_signal = ControlSignal(projections=[('t2', ma_mech)],
                                      allocation_samples=signalSearchRange,
                                      modulation=OVERRIDE)

    # A function to calculate the auto-covariance with specific lag for a
    # time series. We will use this function to compute the summary statistics
    # for generated and observed data so that we can compute a metric between the
    # two. In PsyNeuLink terms, this will be part of an ObjectiveMechanism.
    # A function to calculate the auto-covariance with specific lag for a
    # time series. We will use this function to compute the summary statistics
    # for generated and observed data so that we can compute a metric between the
    # two. In PsyNeuLink terms, this will be part of an ObjectiveMechanism.
    def autocov(agent_rep, x=None, lag=1):
        if x is None:
            return np.asarray(0.0)

        C = np.mean(x[:, lag:] * x[:, :-lag], axis=1)
        return C

    # # Lets make one function that computes all the summary stats in one go because PsyNeuLink
    # # objective mechanism expect a single function.
    # def objective_function(x):
    #     return np.concatenate((autocov(x), autocov(x, lag=2)))
    #
    # # Objective Mechanism and its function currently need to be specified in the script.
    # # (In future versions, this will be set up automatically)
    # objective_mech = ObjectiveMechanism(function=objective_function,
    #                                         monitor=[ma_mech])

    # Setup the controller with the ParamEstimationFunction
    comp.add_controller(controller=OptimizationControlMechanism(
        agent_rep=comp,
        function=ParamEstimationFunction(
            priors={
                't1': (scipy.stats.uniform, 0, 2),
                't2': (scipy.stats.uniform, 0, 2)
            },
            observed=y_obs,
            summary=[(autocov, 1), (autocov, 2)],
            discrepancy='euclidean',
            n_samples=3,
            quantile=0.01,  # Set very small now cause things are slow.
            seed=seed),
        objective_mechanism=False,
        control_signals=[t1_control_signal, t2_control_signal]))

    comp.disable_all_history()

    # Lets setup some input to the mechanism, not that it uses it for anything.
    stim_list_dict = {ma_mech: [0]}

    # # FIXME: Show graph fails when the controller doesn't have an objective mechanism.
    # comp.show_graph(show_controller=True,
    #                 show_projection_labels=True,
    #                 show_node_structure=True,
    #                 show_cim=True,
    #                 show_dimensions=True)

    comp.run(inputs=stim_list_dict)

    # FIXME: The final test should be to check if the true parameters set above are
    # recovered approximately. Not sure how to get all the samples out from
    # above yet though so just pass for now.
    assert True
コード例 #29
0
 def test_processing_mechanism_CombineMeans_function(self):
     PM1 = ProcessingMechanism(function=CombineMeans)
     PM1.execute(1.0)
コード例 #30
0
def test_predator_prey(benchmark, mode, samples):
    if len(samples) > 10 and mode not in {"LLVM", "LLVMRun", "Python-PTX"}:
        pytest.skip("This test takes too long")
    # OCM default mode is Python
    mode, ocm_mode = (mode + "-Python").split('-')[0:2]
    benchmark.group = "Predator-Prey " + str(len(samples))
    obs_len = 3
    obs_coords = 2
    player_idx = 0
    player_obs_start_idx = player_idx * obs_len
    player_value_idx = player_idx * obs_len + obs_coords
    player_coord_slice = slice(player_obs_start_idx,player_value_idx)
    predator_idx = 1
    predator_obs_start_idx = predator_idx * obs_len
    predator_value_idx = predator_idx * obs_len + obs_coords
    predator_coord_slice = slice(predator_obs_start_idx,predator_value_idx)
    prey_idx = 2
    prey_obs_start_idx = prey_idx * obs_len
    prey_value_idx = prey_idx * obs_len + obs_coords
    prey_coord_slice = slice(prey_obs_start_idx,prey_value_idx)

    player_len = prey_len = predator_len = obs_coords

    # Perceptual Mechanisms
    player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS")
    prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS")
    predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS")

    # Action Mechanism
    #    Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey:
    #    note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI)
    greedy_action_mech = ComparatorMechanism(name='ACTION',sample=player_obs,target=prey_obs)

    # Create Composition
    agent_comp = Composition(name='PREDATOR-PREY COMPOSITION')
    agent_comp.add_node(player_obs)
    agent_comp.add_node(predator_obs)
    agent_comp.add_node(prey_obs)
    agent_comp.add_node(greedy_action_mech)
    agent_comp.exclude_node_roles(predator_obs, NodeRole.OUTPUT)

    ocm = OptimizationControlMechanism(features={SHADOW_INPUTS: [player_obs, predator_obs, prey_obs]},
                                       agent_rep=agent_comp,
                                       function=GridSearch(direction=MINIMIZE,
                                                           save_values=True),

                                       objective_mechanism=ObjectiveMechanism(function=Distance(metric=NORMED_L0_SIMILARITY),
                                                                              monitor=[
                                                                                  player_obs,
                                                                                  prey_obs
                                                                              ]),
                                       control_signals=[ControlSignal(modulates=(VARIANCE,player_obs),
                                                                      allocation_samples=samples),
                                                        ControlSignal(modulates=(VARIANCE,predator_obs),
                                                                      allocation_samples=samples),
                                                        ControlSignal(modulates=(VARIANCE,prey_obs),
                                                                      allocation_samples=samples)
                                                        ],
                                       )
    agent_comp.add_controller(ocm)
    agent_comp.enable_controller = True
    ocm.comp_execution_mode = ocm_mode

    input_dict = {player_obs:[[1.1576537,  0.60782117]],
                  predator_obs:[[-0.03479106, -0.47666293]],
                  prey_obs:[[-0.60836214,  0.1760381 ]],
                 }
    run_results = agent_comp.run(inputs=input_dict, num_trials=2, bin_execute=mode)

    if len(samples) == 2:
        # KDM 12/4/19: modified results due to global seed offset of
        # GaussianDistort assignment.
        # to produce old numbers, run get_global_seed once before creating
        # each Mechanism with GaussianDistort above
        assert np.allclose(run_results[0], [[-10.06333025,   2.4845505 ]])
        if mode == 'Python':
            assert np.allclose(ocm.feature_values, [[ 1.1576537,   0.60782117],
                                                    [-0.03479106, -0.47666293],
                                                    [-0.60836214,  0.1760381 ]])

    if benchmark.enabled:
        benchmark(agent_comp.run, inputs=input_dict, bin_execute=mode)