示例#1
0
    def test_model_based_ocm_no_simulations(self):
        A = pnl.ProcessingMechanism(name='A')
        B = pnl.ProcessingMechanism(name='B',
                                    function=pnl.SimpleIntegrator(rate=1))

        comp = pnl.Composition(name='comp')
        comp.add_linear_processing_pathway([A, B])

        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=[1, 2, 3],
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            num_estimates=1,
            control_signals=[control_signal],
            search_statefulness=False,
        )

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]]]}

        comp.run(inputs=inputs, num_trials=1)

        # initial 1 + each allocation sample (1, 2, 3) integrated
        assert B.parameters.value.get(comp) == 7
示例#2
0
    def test_model_based_num_estimates(self):

        A = pnl.ProcessingMechanism(name='A')
        B = pnl.ProcessingMechanism(name='B',
                                    function=pnl.SimpleIntegrator(rate=1))

        comp = pnl.Composition(name='comp')
        comp.add_linear_processing_pathway([A, B])

        search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25)
        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=search_range,
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            num_estimates=5,
            control_signals=[control_signal])

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]]]}

        comp.run(inputs=inputs, num_trials=2)

        assert np.allclose(
            comp.simulation_results,
            [[np.array([2.25])], [np.array([3.5])], [np.array([4.75])],
             [np.array([3.])], [np.array([4.25])], [np.array([5.5])]])
        assert np.allclose(comp.results,
                           [[np.array([1.])], [np.array([1.75])]])
示例#3
0
#should be randomly distributed noise to the net input of each unit (except input unit)
response_layer = pnl.TransferMechanism(
    size=2,
    function=pnl.Logistic,
    name='RESPONSE',
    integrator_mode=True,
    noise=pnl.NormalDist(mean=0.0, standard_dev=.01).function,
    smoothing_factor=0.1)
#   Respond red accumulator
#parameters from paper
#alpha = rate of evidence accumlation = 0.1
#sigma = noise = 0.1
#noise will be
# squareroot(time_step_size * noise) * a random sample from a normal distribution
respond_red_accumulator = pnl.IntegratorMechanism(
    function=pnl.SimpleIntegrator(noise=0.1, rate=0.1),
    name='respond_red_accumulator')
#   Respond green accumulator
respond_green_accumulator = pnl.IntegratorMechanism(
    function=pnl.SimpleIntegrator(noise=0.1, rate=0.1),
    name='respond_green_accumulator')

#   add logging
response_layer.set_log_conditions('value')
respond_red_accumulator.set_log_conditions('value')
respond_green_accumulator.set_log_conditions('value')

# In[ ]:

#   SET UP CONNECTIONS
#   rows correspond to sender
示例#4
0
    def test_bustamante_Stroop_model(self):
        #  INPUT UNITS

        #  colors: ('red', 'green'), words: ('RED','GREEN')
        colors_input_layer = pnl.TransferMechanism(
            size=2,
            function=psyneulink.core.components.functions.transferfunctions.
            Linear,
            name='COLORS_INPUT')

        words_input_layer = pnl.TransferMechanism(
            size=2,
            function=psyneulink.core.components.functions.transferfunctions.
            Linear,
            name='WORDS_INPUT')

        #   Task layer, tasks: ('name the color', 'read the word')
        task_layer = pnl.TransferMechanism(size=2,
                                           function=psyneulink.core.components.
                                           functions.transferfunctions.Linear,
                                           name='TASK')

        #   HIDDEN LAYER UNITS

        #   colors_hidden: ('red','green')
        #   Logistic activation function, Gain = 1.0, Bias = -4.0 (in PNL bias is subtracted so enter +4.0 to get negative bias)
        #   randomly distributed noise to the net input
        #   time averaging = integration_rate = 0.1
        unit_noise = 0.005
        colors_hidden_layer = pnl.TransferMechanism(
            size=2,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic(gain=1.0, x_0=4.0),
            # should be able to get same result with offset = -4.0
            integrator_mode=True,
            noise=psyneulink.core.components.functions.distributionfunctions.
            NormalDist(mean=0, standard_deviation=unit_noise).function,
            integration_rate=0.1,
            name='COLORS HIDDEN')
        #    words_hidden: ('RED','GREEN')
        words_hidden_layer = pnl.TransferMechanism(
            size=2,
            function=pnl.Logistic(gain=1.0, x_0=4.0),
            integrator_mode=True,
            noise=pnl.NormalDist(mean=0,
                                 standard_deviation=unit_noise).function,
            integration_rate=0.1,
            name='WORDS HIDDEN')

        #    OUTPUT UNITS

        #   Response layer, provide input to accumulator, responses: ('red', 'green')
        #   time averaging = tau = 0.1
        #   randomly distributed noise to the net input
        response_layer = pnl.TransferMechanism(
            size=2,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic,
            name='RESPONSE',
            integrator_mode=True,
            noise=psyneulink.core.components.functions.distributionfunctions.
            NormalDist(mean=0, standard_deviation=unit_noise).function,
            integration_rate=0.1)
        #   Respond red accumulator
        #   alpha = rate of evidence accumlation = 0.1
        #   sigma = noise = 0.1
        #   noise will be: squareroot(time_step_size * noise) * a random sample from a normal distribution
        accumulator_noise = 0.1
        respond_red_accumulator = pnl.IntegratorMechanism(
            function=pnl.SimpleIntegrator(noise=pnl.NormalDist(
                mean=0, standard_deviation=accumulator_noise).function,
                                          rate=0.1),
            name='respond_red_accumulator')
        #   Respond green accumulator
        respond_green_accumulator = pnl.IntegratorMechanism(
            function=pnl.SimpleIntegrator(noise=pnl.NormalDist(
                mean=0, standard_deviation=accumulator_noise).function,
                                          rate=0.1),
            name='respond_green_accumulator')

        #   LOGGING
        colors_hidden_layer.set_log_conditions('value')
        words_hidden_layer.set_log_conditions('value')
        response_layer.set_log_conditions('value')
        respond_red_accumulator.set_log_conditions('value')
        respond_green_accumulator.set_log_conditions('value')

        #   SET UP CONNECTIONS

        #   rows correspond to sender
        #   columns correspond to: weighting of the contribution that a given sender makes to the receiver

        #   INPUT TO HIDDEN
        # row 0: input_'red' to hidden_'red', hidden_'green'
        # row 1: input_'green' to hidden_'red', hidden_'green'
        color_weights = pnl.MappingProjection(matrix=np.atleast_2d(
            [[2.2, -2.2], [-2.2, 2.2]]),
                                              name='COLOR_WEIGHTS')
        # row 0: input_'RED' to hidden_'RED', hidden_'GREEN'
        # row 1: input_'GREEN' to hidden_'RED', hidden_'GREEN'
        word_weights = pnl.MappingProjection(matrix=np.atleast_2d([[2.6, -2.6],
                                                                   [-2.6,
                                                                    2.6]]),
                                             name='WORD_WEIGHTS')

        #   HIDDEN TO RESPONSE
        # row 0: hidden_'red' to response_'red', response_'green'
        # row 1: hidden_'green' to response_'red', response_'green'
        color_response_weights = pnl.MappingProjection(
            matrix=np.atleast_2d([[1.3, -1.3], [-1.3, 1.3]]),
            name='COLOR_RESPONSE_WEIGHTS')
        # row 0: hidden_'RED' to response_'red', response_'green'
        # row 1: hidden_'GREEN' to response_'red', response_'green'
        word_response_weights = pnl.MappingProjection(
            matrix=np.atleast_2d([[2.5, -2.5], [-2.5, 2.5]]),
            name='WORD_RESPONSE_WEIGHTS')

        #   TASK TO HIDDEN LAYER
        #   row 0: task_CN to hidden_'red', hidden_'green'
        #   row 1: task_WR to hidden_'red', hidden_'green'
        task_CN_weights = pnl.MappingProjection(matrix=np.atleast_2d(
            [[4.0, 4.0], [0, 0]]),
                                                name='TASK_CN_WEIGHTS')

        #   row 0: task_CN to hidden_'RED', hidden_'GREEN'
        #   row 1: task_WR to hidden_'RED', hidden_'GREEN'
        task_WR_weights = pnl.MappingProjection(matrix=np.atleast_2d(
            [[0, 0], [4.0, 4.0]]),
                                                name='TASK_WR_WEIGHTS')

        #   RESPONSE UNITS TO ACCUMULATORS
        #   row 0: response_'red' to respond_red_accumulator
        #   row 1: response_'green' to respond_red_accumulator
        respond_red_differencing_weights = pnl.MappingProjection(
            matrix=np.atleast_2d([[1.0], [-1.0]]), name='RESPOND_RED_WEIGHTS')

        #   row 0: response_'red' to respond_green_accumulator
        #   row 1: response_'green' to respond_green_accumulator
        respond_green_differencing_weights = pnl.MappingProjection(
            matrix=np.atleast_2d([[-1.0], [1.0]]),
            name='RESPOND_GREEN_WEIGHTS')

        # CREATE COMPOSITION FROM PATHWAYS
        my_Stroop = pnl.Composition(pathways=[
            {
                'WORD_PATHWAY': [
                    words_input_layer, word_weights, words_hidden_layer,
                    word_response_weights, response_layer
                ]
            },
            {
                'COLO_PATHWAY': [
                    colors_input_layer, color_weights, colors_hidden_layer,
                    color_response_weights, response_layer
                ]
            },
            {
                'TASK_CN_PATHWAY':
                [task_layer, task_CN_weights, colors_hidden_layer]
            },
            {
                'TASK_WR_PATHWAY':
                [task_layer, task_WR_weights, words_hidden_layer]
            },
            {
                'RESPOND_RED_PATHWAY': [
                    response_layer, respond_red_differencing_weights,
                    respond_red_accumulator
                ]
            },
            {
                'RESPOND_GREEN_PATHWAY': [
                    response_layer, respond_green_differencing_weights,
                    respond_green_accumulator
                ]
            },
        ])

        # my_Stroop.show()
        # my_Stroop.show_graph(show_dimensions=pnl.ALL)

        # Function to create test trials
        # a RED word input is [1,0] to words_input_layer and GREEN word is [0,1]
        # a red color input is [1,0] to colors_input_layer and green color is [0,1]
        # a color-naming trial is [1,0] to task_layer and a word-reading trial is [0,1]

        def trial_dict(red_color, green_color, red_word, green_word, CN, WR):

            trialdict = {
                colors_input_layer: [red_color, green_color],
                words_input_layer: [red_word, green_word],
                task_layer: [CN, WR]
            }
            return trialdict

        #   CREATE THRESHOLD FUNCTION
        # first value of DDM's value is DECISION_VARIABLE
        # context is always passed to Condition functions and is the context
        # in which the function gets called - below, during system execution
        def pass_threshold(mech1, mech2, thresh, context=None):
            results1 = mech1.output_ports[0].parameters.value.get(context)
            results2 = mech2.output_ports[0].parameters.value.get(context)
            for val in results1:
                if val >= thresh:
                    return True
            for val in results2:
                if val >= thresh:
                    return True
            return False

        accumulator_threshold = 1.0

        mechanisms_to_update = [
            colors_hidden_layer, words_hidden_layer, response_layer
        ]

        def switch_integrator_mode(mechanisms, mode):
            for mechanism in mechanisms:
                mechanism.integrator_mode = mode

        def switch_noise(mechanisms, noise):
            for mechanism in mechanisms:
                mechanism.noise.base = noise

        def switch_to_initialization_trial(mechanisms):
            # Turn off accumulation
            switch_integrator_mode(mechanisms, False)
            # Turn off noise
            switch_noise(mechanisms, 0)
            # Execute once per trial
            my_Stroop.termination_processing = {
                pnl.TimeScale.TRIAL: pnl.AllHaveRun()
            }

        def switch_to_processing_trial(mechanisms):
            # Turn on accumulation
            switch_integrator_mode(mechanisms, True)
            # Turn on noise
            switch_noise(
                mechanisms,
                psyneulink.core.components.functions.distributionfunctions.
                NormalDist(mean=0, standard_deviation=unit_noise).function)
            # Execute until one of the accumulators crosses the threshold
            my_Stroop.termination_processing = {
                pnl.TimeScale.TRIAL:
                pnl.While(pass_threshold, respond_red_accumulator,
                          respond_green_accumulator, accumulator_threshold)
            }

        def switch_trial_type():
            # Next trial will be a processing trial
            if isinstance(
                    my_Stroop.termination_processing[pnl.TimeScale.TRIAL],
                    pnl.AllHaveRun):
                switch_to_processing_trial(mechanisms_to_update)
            # Next trial will be an initialization trial
            else:
                switch_to_initialization_trial(mechanisms_to_update)

        CN_trial_initialize_input = trial_dict(0, 0, 0, 0, 1, 0)

        WR_trial_initialize_input = trial_dict(0, 0, 0, 0, 0, 1)

        # Start with an initialization trial
        switch_to_initialization_trial(mechanisms_to_update)

        my_Stroop.run(
            inputs=trial_dict(0, 1, 1, 0, 1, 0),
            # termination_processing=change_termination_processing,
            num_trials=4,
            call_after_trial=switch_trial_type)
示例#5
0
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
C = pnl.RecurrentTransferMechanism(
    name="C",
    function=pnl.Linear(default_variable=[[0]]),
    initial_value=[[0]],
    output_ports=["RESULTS"],
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
D = pnl.IntegratorMechanism(
    name="D",
    function=pnl.SimpleIntegrator(initializer=[[0]], default_variable=[[0]]),
)

Inner_Composition = pnl.Composition(name="Inner Composition")

E = pnl.TransferMechanism(
    name="E",
    function=pnl.Linear(default_variable=[[0]]),
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
F = pnl.TransferMechanism(
    name="F",
    function=pnl.Linear(default_variable=[[0]]),
    termination_measure=pnl.Distance(
示例#6
0
import psyneulink as pnl

comp = pnl.Composition(name="ABCD")

A = pnl.TransferMechanism(function=pnl.Linear(slope=2.0, intercept=2.0),
                          name="A")
B = pnl.TransferMechanism(function=pnl.Logistic, name="B")
C = pnl.TransferMechanism(function=pnl.Exponential, name="C")
D = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(rate=0.05), name="D")

for m in [A, B, C, D]:
    comp.add_node(m)

comp.add_linear_processing_pathway([A, B, D])
comp.add_linear_processing_pathway([A, C, D])

comp.run(inputs={A: 0}, log=True, num_trials=50)

print("Finished running model")

print(comp.results)
for node in comp.nodes:
    print(f"{node} {node.name}: {node.parameters.value.get(comp)}")

# comp.show_graph()

try:
    import matplotlib.pyplot as plt

    def generate_time_array(node, context="ABCD", param="value"):
        return [
示例#7
0
import psyneulink as pnl
import ABCD

ABCD = pnl.Composition(name='ABCD')

A_0 = pnl.TransferMechanism(name='A_0',
                            function=pnl.Linear(intercept=2, slope=2))
A_input_0 = pnl.TransferMechanism(name='A_input_0',
                                  function=pnl.Linear(default_variable=0))
B_0 = pnl.TransferMechanism(name='B_0', function=pnl.Logistic)
C_0 = pnl.TransferMechanism(name='C_0', function=pnl.Exponential)
D_0 = pnl.IntegratorMechanism(name='D_0',
                              function=pnl.SimpleIntegrator(rate=0.05))

ABCD.add_node(A_0)
ABCD.add_node(A_input_0)
ABCD.add_node(B_0)
ABCD.add_node(C_0)
ABCD.add_node(D_0)

ABCD.add_projection(projection=pnl.MappingProjection(name='Edge A_0 to B_0'),
                    sender=A_0,
                    receiver=B_0)
ABCD.add_projection(
    projection=pnl.MappingProjection(name='Edge A_input_0 to A_0'),
    sender=A_input_0,
    receiver=A_0)
ABCD.add_projection(projection=pnl.MappingProjection(name='Edge A_0 to C_0'),
                    sender=A_0,
                    receiver=C_0)
ABCD.add_projection(projection=pnl.MappingProjection(name='Edge B_0 to D_0'),
示例#8
0
    def _generate_layers(self):
        """
        Generate the layers for this model. The hidden layers use an integrator mode, rate, and noise function.
        TODO: does the indirect pathway accumulate exactly the same as the hidden and output layers?
        :return: None, saves the layers into a whole bunch of members
        """
        # Inputs
        self.color_input_layer = pnl.TransferMechanism(size=self.num_features, name='color_input')
        self.shape_input_layer = pnl.TransferMechanism(size=self.num_features, name='shape_input')

        # Task units
        self.color_task_layer = pnl.TransferMechanism(size=1, name='color_task')
        self.shape_task_layer = pnl.TransferMechanism(size=1, name='shape_task')

        # Hidden layers
        self.color_hidden_layer = pnl.TransferMechanism(size=self.hidden_layer_size,
                                                        name='color_hidden',
                                                        function=pnl.Logistic(gain=self.hidden_gain,
                                                                              bias=self.hidden_bias),
                                                        integrator_mode=self.integrator_mode,
                                                        integration_rate=self.integration_rate,
                                                        noise=self._generate_noise_function())
        self.shape_hidden_layer = pnl.TransferMechanism(size=self.hidden_layer_size,
                                                        name='shape_hidden',
                                                        function=pnl.Logistic(gain=self.hidden_gain,
                                                                              bias=self.hidden_bias),
                                                        integrator_mode=self.integrator_mode,
                                                        integration_rate=self.integration_rate,
                                                        noise=self._generate_noise_function())

        # self.color_dummy = pnl.TransferMechanism(size=self.hidden_layer_size, name='dummy')

        if self.indirect_path:
            self._generate_indirect_layer()

        # Output layers
        self.output_layer = pnl.TransferMechanism(size=self.num_features,
                                                  name='output',
                                                  function=pnl.Logistic,
                                                  integrator_mode=self.integrator_mode,
                                                  integration_rate=self.integration_rate,
                                                  noise=self._generate_noise_function())

        self.first_accumulator = pnl.IntegratorMechanism(
            function=pnl.SimpleIntegrator(noise=pnl.NormalDist(standard_dev=self.accumulator_noise_std).function,
                                          rate=self.accumulator_rate),
            name='first_response_accumulator')

        self.second_accumulator = pnl.IntegratorMechanism(
            function=pnl.SimpleIntegrator(noise=pnl.NormalDist(standard_dev=self.accumulator_noise_std).function,
                                          rate=self.accumulator_rate),
            name='second_response_accumulator')

        if self.log_values:
            self.log_layers = [self.color_hidden_layer, self.shape_hidden_layer,
                               self.output_layer, self.first_accumulator, self.second_accumulator]
            if self.indirect_path:
                # Inserting there for it to appear in the correct order in output dataframe
                self.log_layers.insert(2, self.indirect_shape_layer)

            for layer in self.log_layers:
                layer.set_log_conditions('value')
示例#9
0
class TestMechanismFunctionParameters:
    f = pnl.Linear()
    i = pnl.SimpleIntegrator()
    mech_1 = pnl.TransferMechanism(function=f, integrator_function=i)
    mech_2 = pnl.TransferMechanism(function=f, integrator_function=i)
    integrator_mechanism = pnl.IntegratorMechanism(function=i)

    @pytest.mark.parametrize(
        "f, g",
        [
            pytest.param(
                mech_1.defaults.function,
                mech_2.defaults.function,
                id="function_defaults",
            ),
            pytest.param(
                mech_1.defaults.function,
                mech_1.parameters.function.get(),
                id="function_default-and-value",
            ),
            pytest.param(
                mech_1.defaults.function,
                mech_2.parameters.function.get(),
                id="function_default-and-other-value",
            ),
            pytest.param(
                mech_1.defaults.integrator_function,
                mech_2.defaults.integrator_function,
                id="integrator_function_defaults",
            ),
            pytest.param(
                mech_1.defaults.integrator_function,
                mech_1.parameters.integrator_function.get(),
                id="integrator_function_default-and-value",
            ),
            pytest.param(
                mech_1.defaults.integrator_function,
                mech_2.parameters.integrator_function.get(),
                id="integrator_function_default-and-other-value",
            ),
        ],
    )
    def test_function_parameter_distinctness(self, f, g):
        assert f is not g

    @pytest.mark.parametrize("f, owner", [
        pytest.param(mech_1.parameters.function.get(), mech_1, id='function'),
        pytest.param(integrator_mechanism.class_defaults.function,
                     integrator_mechanism.class_parameters.function,
                     id="class_default_function"),
        pytest.param(mech_1.defaults.function,
                     mech_1.parameters.function,
                     id="default_function"),
        pytest.param(mech_1.parameters.termination_measure.get(),
                     mech_1,
                     id='termination_measure'),
        pytest.param(mech_1.class_defaults.termination_measure,
                     mech_1.class_parameters.termination_measure,
                     id="class_default_termination_measure"),
        pytest.param(mech_1.defaults.termination_measure,
                     mech_1.parameters.termination_measure,
                     id="default_termination_measure"),
    ])
    def test_function_parameter_ownership(self, f, owner):
        assert f.owner is owner

    @pytest.mark.parametrize('param_name, function', [
        ('function', f),
        ('integrator_function', i),
    ])
    def test_function_parameter_assignment(self, param_name, function):
        # mech_1 should use the exact instances, mech_2 should have copies
        assert getattr(self.mech_1.parameters, param_name).get() is function
        assert getattr(self.mech_2.parameters,
                       param_name).get() is not function
示例#10
0
#   Response layer, responses: ('red', 'green')
#tau = 0.1 (here, smoothing factor)
#should be randomly distributed noise to the net input of each unit (except input unit)
response_layer = pnl.TransferMechanism(size=2,
                                       function=pnl.Logistic,
                                       name='RESPONSE',
                                       integrator_mode=True,
                                       noise=pnl.NormalDist(mean=0.0, standard_dev=.01).function,
                                       integration_rate=0.1)
#   Respond red accumulator
#parameters from paper
#alpha = rate of evidence accumlation = 0.1
#sigma = noise = 0.1
#noise will be
# squareroot(time_step_size * noise) * a random sample from a normal distribution
respond_red_accumulator = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(noise=0.1,
                                                                               rate=0.1),
                                                  name='respond_red_accumulator')
#   Respond green accumulator
respond_green_accumulator = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(noise=0.1,
                                                                               rate=0.1),
                                                    name='respond_green_accumulator')

#   add logging
response_layer.set_log_conditions('value')
respond_red_accumulator.set_log_conditions('value')
respond_green_accumulator.set_log_conditions('value')


# In[ ]:

示例#11
0
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))
B = pnl.TransferMechanism(name='B',
                          function=pnl.Logistic(default_variable=[[0]]),
                          initial_value=[[0]],
                          termination_measure=pnl.Distance(
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))
C = pnl.TransferMechanism(name='C',
                          function=pnl.Exponential(default_variable=[[0]]),
                          initial_value=[[0]],
                          termination_measure=pnl.Distance(
                              metric=pnl.MAX_ABS_DIFF,
                              default_variable=[[[0]], [[0]]]))
D = pnl.IntegratorMechanism(name='D',
                            function=pnl.SimpleIntegrator(
                                rate=0.05, default_variable=[[0]]))

ABCD.add_node(A)
ABCD.add_node(B)
ABCD.add_node(C)
ABCD.add_node(D)

ABCD.add_projection(projection=pnl.MappingProjection(
    name='MappingProjection from A[RESULT] to B[InputPort-0]',
    function=pnl.LinearMatrix(matrix=[[1.0]], default_variable=[2.0])),
                    sender=A,
                    receiver=B)
ABCD.add_projection(projection=pnl.MappingProjection(
    name='MappingProjection from A[RESULT] to C[InputPort-0]',
    function=pnl.LinearMatrix(matrix=[[1.0]], default_variable=[2.0])),
                    sender=A,