def test_default_lc_control_mechanism(self, benchmark, mode):
        G = 1.0
        k = 0.5
        starting_value_LC = 2.0
        user_specified_gain = 1.0

        A = pnl.TransferMechanism(function=psyneulink.core.components.functions.transferfunctions.Logistic(gain=user_specified_gain), name='A')
        B = pnl.TransferMechanism(function=psyneulink.core.components.functions.transferfunctions.Logistic(gain=user_specified_gain), name='B')
        # B.output_states[0].value *= 0.0  # Reset after init | Doesn't matter here b/c default var = zero, no intercept

        P = pnl.Process(pathway=[A, B])
        S = pnl.System(processes=[P])

        LC = pnl.LCControlMechanism(
            system=S,
            modulated_mechanisms=[A, B],
            base_level_gain=G,
            scaling_factor_gain=k,
            objective_mechanism=pnl.ObjectiveMechanism(
                function=psyneulink.core.components.functions.transferfunctions.Linear,
                monitor=[B],
                name='LC ObjectiveMechanism'
            )
        )
        for output_state in LC.output_states:
            output_state.parameters.value.set(output_state.value * starting_value_LC, S, override=True)

        LC.reinitialize_when = pnl.Never()
        # THIS CURRENTLY DOES NOT WORK:
        # P = pnl.Process(pathway=[A, B])
        # P2 = pnl.Process(pathway=[LC])
        # S = pnl.System(processes=[P, P2])
        # S.show_graph()

        gain_created_by_LC_output_state_1 = []
        mod_gain_assigned_to_A = []
        base_gain_assigned_to_A = []
        mod_gain_assigned_to_B = []
        base_gain_assigned_to_B = []

        def report_trial(system):
            gain_created_by_LC_output_state_1.append(LC.output_state.parameters.value.get(system))
            mod_gain_assigned_to_A.append(A.get_mod_gain(system))
            mod_gain_assigned_to_B.append(B.get_mod_gain(system))
            base_gain_assigned_to_A.append(A.function.gain)
            base_gain_assigned_to_B.append(B.function.gain)

        benchmark(S.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]},
              call_after_trial=functools.partial(report_trial, S))

        # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value
        assert mod_gain_assigned_to_A[0] == starting_value_LC

        # (2) _gain should always be set to user-specified value
        for i in range(5):
            assert base_gain_assigned_to_A[i] == user_specified_gain
            assert base_gain_assigned_to_B[i] == user_specified_gain

        # (3) LC output on trial n becomes gain of A and B on trial n + 1
        assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_state_1[0:-1])

        # (4) mechanisms A and B should always have the same gain values (b/c they are identical)
        assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
Esempio n. 2
0
import numpy as np

### building the LeabraMechanism
n_input = 4  # don't change this!
n_output = 2  # don't change this!
n_hidden = 0
Leab = pnl.LeabraMechanism(input_size=n_input, output_size=n_output, hidden_layers=n_hidden,
                        hidden_sizes=None, training_flag=True, quarter_size=20)

### building the PsyNeuLink network
T_input = pnl.TransferMechanism(size=n_input)
T_target = pnl.TransferMechanism(size=n_output)
# target_projection connects T_target to the TARGET input state of Leab
target_projection = pnl.MappingProjection(sender=T_target, receiver = Leab.input_states[1])

p_input = pnl.Process(pathway=[T_input, Leab])
p_target = pnl.Process(pathway=[T_target, target_projection, Leab])

sys = pnl.System(processes=[p_input, p_target])

### building the learning data
n = 1000
inputs = [None] * n
targets = [None] * n
print("here's what the inputs/targets will look like:")
for i in range(n):
    nums = np.random.randint(0, 7, size=2) * 0.4
    a = nums[0]
    b = nums[1]
    inputs[i] = [a, a, b, b]
    if a > b:
Esempio n. 3
0
                               starting_point=pnl.CONTROL,
                               noise=pnl.CONTROL,
                           ),
                           output_states=[pnl.SELECTED_INPUT_ARRAY],
                           name='DDM')

# Construct Process
# Notes:
#    The np.array specifies the matrix used as the Mapping Projection from input_layer to action_selection,
#        which insures the left element of the input favors the left action (positive value of DDM decision variable),
#        and the right element favors the right action (negative value of DDM decision variable)
#    The learning argument specifies Reinforcement as the learning function for the Projection
p = pnl.Process(
    default_variable=[0, 0],
    # pathway=[input_layer, np.array([[1],[-1]]), action_selection],
    pathway=[input_layer, pnl.IDENTITY_MATRIX, action_selection],
    learning=pnl.LearningProjection(
        learning_function=psyneulink.core.components.functions.
        learningfunctions.Reinforcement(learning_rate=0.5)),
    target=0)

s = pnl.System(processes=[p],
               controller=pnl.EVCControlMechanism(
                   control_signals=(pnl.LEARNING_RATE,
                                    p.learning_mechanisms[1])))

# EXECUTION:

# Prints initial weight matrix for the Projection from the input_layer to the action_selection Mechanism
print('reward prediction weights: \n',
      action_selection.input_state.path_afferents[0].matrix)
Esempio n. 4
0
    def test_log_array_with_scheduler(self):
        T1 = pnl.TransferMechanism(name='log_test_T1',
                                   integrator_mode=True,
                                   smoothing_factor=0.5)
        T2 = pnl.TransferMechanism(name='log_test_T2',
                                   function=pnl.Linear(slope=6.0))
        PS = pnl.Process(name='log_test_PS', pathway=[T1, T2])
        SYS = pnl.System(name='log_test_SYS', processes=[PS])

        def pass_threshold(mech, thresh):
            results = mech.output_states[0].value
            for val in results:
                if abs(val) >= thresh:
                    return True
            return False

        terminate_trial = {
            pnl.TimeScale.TRIAL: pnl.While(pass_threshold, T2, 5.0)
        }

        T1.set_log_conditions(pnl.VALUE)
        T1.set_log_conditions(pnl.SLOPE)
        T1.set_log_conditions(pnl.RESULTS)
        T2.set_log_conditions(pnl.VALUE)
        T2.set_log_conditions(pnl.SLOPE)

        SYS.run(inputs={T1: [[1.0]]}, termination_processing=terminate_trial)

        log_array_T1 = T1.log.nparray(entries=['RESULTS', 'slope', 'value'])
        log_array_T2 = T2.log.nparray(entries=['value', 'slope'])

        # Check values
        run_results = [["Run"], [0], [0], [0]]
        trial_results = [["Trial"], [0], [0], [0]]
        pass_results = [["Pass"], [0], [1], [2]]
        time_step_results = [["Time_step"], [0], [0], [0]]
        results_results = ["RESULTS", [0.5], [0.75], [0.875]]
        slope_results = ["slope", [1], [1], [1]]
        value_results = ["value", [[0.5]], [[0.75]], [[0.875]]]
        for i in range(4):
            assert log_array_T1[0][i] == run_results[i]
            assert log_array_T1[1][i] == trial_results[i]
            assert log_array_T1[2][i] == pass_results[i]
            assert log_array_T1[3][i] == time_step_results[i]
            assert log_array_T1[4][i] == results_results[i]
            assert log_array_T1[5][i] == slope_results[i]
            assert log_array_T1[6][i] == value_results[i]

        # Check values
        run_results = [["Run"], [0], [0], [0]]
        trial_results = [["Trial"], [0], [0], [0]]
        pass_results = [["Pass"], [0], [1], [2]]
        time_step_results = [["Time_step"], [1], [1], [1]]
        value_results = ["value", [[3]], [[4.5]], [[5.25]]]
        slope_results = ["slope", [6], [6], [6]]
        for i in range(4):
            assert log_array_T2[0][i] == run_results[i]
            assert log_array_T2[1][i] == trial_results[i]
            assert log_array_T2[2][i] == pass_results[i]
            assert log_array_T2[3][i] == time_step_results[i]
            assert log_array_T2[4][i] == value_results[i]
            assert log_array_T2[5][i] == slope_results[i]
Esempio n. 5
0
    def test_log(self):

        T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2)
        T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2)
        PS = pnl.Process(name='log_test_PS', pathway=[T_1, T_2])
        PJ = T_2.path_afferents[0]

        assert T_1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert T_2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert PJ.loggable_items == {'matrix': 'OFF',
                                     'value': 'OFF'}

        T_1.set_log_conditions(pnl.NOISE)
        T_1.set_log_conditions(pnl.RESULTS)
        PJ.set_log_conditions(pnl.MATRIX)

        assert T_1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'EXECUTION',
                                     'intercept': 'OFF',
                                     'noise': 'EXECUTION',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert T_2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert PJ.loggable_items == {'matrix': 'EXECUTION',
                                     'value': 'OFF'}

        PS.execute()
        PS.execute()
        PS.execute()

        assert T_1.logged_items == {'RESULTS': 'EXECUTION', 'noise': 'EXECUTION'}
        assert PJ.logged_items == {'matrix': 'EXECUTION'}

        T_1.log.print_entries()

        # assert T_1.log.print_entries() ==
        # # Log for mech_A:
        # #
        # # Index     Variable:                                          Context                                                                  Value
        # # 0         'RESULTS'.........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # # 1         'RESULTS'.........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # #
        # #
        # # 0         'noise'...........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # # 1         'noise'...........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        #
        # assert T_2.log.print_entries() ==
        # # Log for mech_A:
        # #
        # # Index     Variable:                                          Context                                                                  Value
        # # 0         'RESULTS'.........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # # 1         'RESULTS'.........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # #
        # #
        # # 0         'noise'...........................................' EXECUTING  PROCESS Process-0'.......................................    0.0
        # # 1         'noise'...........................................' EXECUTING  PROCESS Process-0'.......................................    0.0

        print(T_1.log.csv(entries=['noise', 'RESULTS'], owner_name=False, quotes=None))
        assert T_1.log.csv(entries=['noise', 'RESULTS'], owner_name=False, quotes=None) == \
                        "\'Index\', \'noise\', \'RESULTS\'\n0, 0.0, 0.0 0.0\n1, 0.0, 0.0 0.0\n2, 0.0, 0.0 0.0\n"

        assert PJ.log.csv(entries='matrix', owner_name=True, quotes=True) == \
               "\'Index\', \'MappingProjection from log_test_T_1 to log_test_T_2[matrix]\'\n" \
               "\'0\', \'1.0 0.0\' \'0.0 1.0\'\n" \
               "\'1\', \'1.0 0.0\' \'0.0 1.0\'\n" \
               "\'2\', \'1.0 0.0\' \'0.0 1.0\'\n"

        result = T_1.log.nparray(entries=['noise', 'RESULTS'], header=False, owner_name=True)
        np.testing.assert_array_equal(result,
                                      np.array([[[0], [1], [2]],
                                                [[ 0.], [ 0.], [ 0.]],
                                                [[ 0.,  0.], [ 0.,  0.],[ 0., 0.]]]))
Esempio n. 6
0
                                                          [0.0, 0.0]]),
                                        name='TASK_CN_WEIGHTS')

# column 0: task_CN to hidden_'RED', hidden_'GREEN'
# column 1: task_WR to hidden_'RED', hidden_'GREEN'
task_WR_weights = pnl.MappingProjection(matrix=np.matrix([[0, 0.0], [4.0,
                                                                     4.0]]),
                                        name='TASK_WR_WEIGHTS')

# In[ ]:

#   CREATE PATHWAYS
#   Words pathway
words_process = pnl.Process(pathway=[
    words_input_layer, word_weights, words_hidden_layer, word_response_weights,
    response_layer
],
                            name='WORDS_PROCESS')

#   Colors pathway
colors_process = pnl.Process(pathway=[
    colors_input_layer, color_weights, colors_hidden_layer,
    color_response_weights, response_layer
],
                             name='COLORS_PROCESS')

#   Task representation pathway
task_CN_process = pnl.Process(
    pathway=[task_layer, task_CN_weights, colors_hidden_layer],
    name='TASK_CN_PROCESS')
Esempio n. 7
0
    def test_log_dictionary_without_time(self):

        T1 = pnl.TransferMechanism(name='log_test_T1',
                                    size=2)
        T2 = pnl.TransferMechanism(name='log_test_T2',
                                    size=2)
        PS = pnl.Process(name='log_test_PS', pathway=[T1, T2])
        PJ = T2.path_afferents[0]

        assert T1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert T2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert PJ.loggable_items == {'matrix': 'OFF',
                                     'value': 'OFF'}

        T1.set_log_conditions(pnl.SLOPE)
        T1.set_log_conditions(pnl.RESULTS)
        T1.set_log_conditions(pnl.VALUE)
        PJ.set_log_conditions(pnl.MATRIX)
        T2.set_log_conditions(pnl.SLOPE)
        T2.set_log_conditions(pnl.RESULTS)
        T2.set_log_conditions(pnl.VALUE)

        assert T1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'EXECUTION',
                                     'RESULTS': 'EXECUTION',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'EXECUTION'}
        assert T2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'EXECUTION',
                                     'RESULTS': 'EXECUTION',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'EXECUTION'}
        assert PJ.loggable_items == {'matrix': 'EXECUTION',
                                     'value': 'OFF'}

        PS.execute([1.0, 2.0])
        PS.execute([3.0, 4.0])
        PS.execute([5.0, 6.0])

        assert T1.logged_items == {'RESULTS': 'EXECUTION',
                                   'slope': 'EXECUTION',
                                   'value': 'EXECUTION'}
        assert T2.logged_items == {'RESULTS': 'EXECUTION',
                                   'slope': 'EXECUTION',
                                   'value': 'EXECUTION'}
        assert PJ.logged_items == {'matrix': 'EXECUTION'}

        log_dict_T1 = T1.log.nparray_dictionary(entries=['value', 'slope', 'RESULTS'])

        expected_values_T1 = [[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]
        expected_slopes_T1 = [[1.0], [1.0], [1.0]]
        expected_results_T1 = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]

        assert np.allclose(expected_values_T1, log_dict_T1['value'])
        assert np.allclose(expected_slopes_T1, log_dict_T1['slope'])
        assert np.allclose(expected_results_T1, log_dict_T1['RESULTS'])

        assert list(log_dict_T1.keys()) == ['Index', 'value', 'slope', 'RESULTS']

        log_dict_T1_reorder = T1.log.nparray_dictionary(entries=['slope', 'value', 'RESULTS'])

        assert list(log_dict_T1_reorder.keys()) == ['Index', 'slope', 'value', 'RESULTS']
Esempio n. 8
0
# In[22]:

#I want to put in a mapping projection that just ensures all our weight matrices between sigs and bins is I.

mapII = pnl.MappingProjection(matrix=np.eye(len(nouns)), name="mapII")

mapIi = pnl.MappingProjection(matrix=np.eye(len(is_list)), name="mapIi")

mapIh = pnl.MappingProjection(matrix=np.eye(len(has_list)), name="mapIh")

mapIc = pnl.MappingProjection(matrix=np.eye(len(can_list)), name="mapIc")

# In[23]:

#This is where we build the processes.
p11 = pnl.Process(pathway=[nouns_in, h1, h2], learning=pnl.LEARNING)

p12 = pnl.Process(pathway=[rels_in, h2], learning=pnl.LEARNING)

p21 = pnl.Process(pathway=[h2, out_sig_I], learning=pnl.LEARNING)

p22 = pnl.Process(pathway=[h2, out_sig_is], learning=pnl.LEARNING)

p23 = pnl.Process(pathway=[h2, out_sig_has], learning=pnl.LEARNING)

p24 = pnl.Process(pathway=[h2, out_sig_can], learning=pnl.LEARNING)

# In[24]:

#These are the processes that transform sigs to bins
#
Esempio n. 9
0
    def _generate_processes(self):
        self.input_output_processes = []
        for (input_index,
             output_index) in itertools.product(range(self.num_dimensions),
                                                range(self.num_dimensions)):
            # proc = pnl.Process(pathway=[self.input_layers[input_index],
            #                          (pnl.random_matrix(self.num_features, self.hidden_layer_size, 2,
            #                                            -1) * self.weight_init_scale, pnl.LEARNING),
            #                          self.hidden_layer,
            #                          (pnl.random_matrix(self.hidden_layer_size, self.num_features, 2,
            #                                            -1) * self.weight_init_scale, pnl.LEARNING),
            #                          self.output_layers[output_index]],
            #                 name='input-{i}-output-{o}-proc'.format(i=input_index,
            #                                                         o=output_index))
            #                 #learning=pnl.LEARNING))
            #
            # proc = pnl.Process(pathway=[self.input_layers[input_index],
            #                          pnl.MappingProjection(matrix=(pnl.random_matrix(self.num_features, self.hidden_layer_size, 2,
            #                                             -1) * self.weight_init_scale, pnl.LEARNING_PROJECTION)),
            #                          self.hidden_layer,
            #                          pnl.MappingProjection(matrix=(pnl.random_matrix(self.hidden_layer_size, self.num_features, 2,
            #                                             -1) * self.weight_init_scale, pnl.LEARNING_PROJECTION)),
            #                          self.output_layers[output_index]],
            #                 name='input-{i}-output-{o}-proc'.format(i=input_index,
            #                                                         o=output_index),
            #                 learning=pnl.LEARNING)

            input_to_hidden = pnl.MappingProjection(
                name='input-{i}-to-hidden'.format(i=input_index),
                sender=self.input_layers[input_index],
                receiver=self.hidden_layer,
                matrix=pnl.random_matrix(self.num_features,
                                         self.hidden_layer_size, 2, -1) *
                self.weight_init_scale)

            hidden_to_output = pnl.MappingProjection(
                name='hidden-to-output-{o}'.format(o=output_index),
                sender=self.hidden_layer,
                receiver=self.output_layers[output_index],
                matrix=pnl.random_matrix(self.hidden_layer_size,
                                         self.num_features, 2, -1) *
                self.weight_init_scale)

            proc = pnl.Process(pathway=[
                self.input_layers[input_index], input_to_hidden,
                self.hidden_layer, hidden_to_output,
                self.output_layers[output_index]
            ],
                               name='input-{i}-output-{o}-proc'.format(
                                   i=input_index, o=output_index),
                               learning=pnl.ENABLED)

            self.input_output_processes.append(proc)

        self.task_hidden_processes = []
        self.task_output_processes = []
        self.output_bias_processes = []
        for output_index in range(self.num_dimensions):
            self.task_hidden_processes.append(
                pnl.Process(
                    pathway=[
                        self.task_layer,
                        # pnl.random_matrix(self.num_tasks, self.hidden_layer_size, 2,
                        #                   -1) * self.weight_init_scale,
                        self.hidden_layer,
                        # pnl.random_matrix(self.hidden_layer_size, self.num_features, 2,
                        #                   -1) * self.weight_init_scale,
                        self.output_layers[output_index]
                    ],
                    name='task-hidden-proc-{o}'.format(o=output_index),
                    learning=pnl.LEARNING))

            self.task_output_processes.append(
                pnl.Process(
                    pathway=[
                        self.task_layer,
                        # pnl.random_matrix(self.num_tasks, self.num_features, 2,
                        #                   -1) * self.weight_init_scale,
                        self.output_layers[output_index]
                    ],
                    name='task-output-proc-{o}'.format(o=output_index),
                    learning=pnl.LEARNING))

            self.output_bias_processes.append(
                pnl.Process(
                    pathway=[
                        self.output_biases[output_index],
                        self.output_layers[output_index]
                    ],
                    name='output-bias-proc-{o}'.format(o=output_index)))

        self.hidden_bias_process = pnl.Process(
            pathway=[self.hidden_bias, self.hidden_layer],
            name='hidden-bias-proc')
Esempio n. 10
0
    ])
)
word_response_weights = pnl.MappingProjection(
    matrix=np.array([
        [2.5, 0.0],
        [0.0, 2.5],
        [0.0, 0.0]
    ])
)
#
# Create pathways -----------------------------------------------------------------------------------------------------
color_response_process_1 = pnl.Process(
    pathway=[
        colors_input_layer,
        color_input_weights,
        colors_hidden_layer,
        color_response_weights,
        response_layer,
    ],
    name='COLORS_RESPONSE_PROCESS_1'
)

color_response_process_2 = pnl.Process(
    pathway=[
        response_layer,
        response_color_weights,
        colors_hidden_layer
    ],
    name='COLORS_RESPONSE_PROCESS_2'
)

word_response_process_1 = pnl.Process(
Esempio n. 11
0
    def test_rumelhart_semantic_network_sequential(self):

        rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
        rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
        rep_hidden = pnl.TransferMechanism(
            size=4,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic,
            name='REP_HIDDEN')
        rel_hidden = pnl.TransferMechanism(
            size=5,
            function=psyneulink.core.components.functions.transferfunctions.
            Logistic,
            name='REL_HIDDEN')
        rep_out = pnl.TransferMechanism(size=10,
                                        function=psyneulink.core.components.
                                        functions.transferfunctions.Logistic,
                                        name='REP_OUT')
        prop_out = pnl.TransferMechanism(size=12,
                                         function=psyneulink.core.components.
                                         functions.transferfunctions.Logistic,
                                         name='PROP_OUT')
        qual_out = pnl.TransferMechanism(size=13,
                                         function=psyneulink.core.components.
                                         functions.transferfunctions.Logistic,
                                         name='QUAL_OUT')
        act_out = pnl.TransferMechanism(size=14,
                                        function=psyneulink.core.components.
                                        functions.transferfunctions.Logistic,
                                        name='ACT_OUT')

        rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
                                      learning=pnl.LEARNING,
                                      name='REP_HIDDEN_PROC')
        rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
                                      learning=pnl.LEARNING,
                                      name='REL_HIDDEN_PROC')
        rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
                                   learning=pnl.LEARNING,
                                   name='REL_REP_PROC')
        rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
                                    learning=pnl.LEARNING,
                                    name='REL_PROP_PROC')
        rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
                                    learning=pnl.LEARNING,
                                    name='REL_QUAL_PROC')
        rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
                                   learning=pnl.LEARNING,
                                   name='REL_ACT_PROC')

        S = pnl.System(processes=[
            rep_hidden_proc, rel_hidden_proc, rel_rep_proc, rel_prop_proc,
            rel_qual_proc, rel_act_proc
        ])
        # S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
        validate_learning_mechs(S)

        print(S.origin_mechanisms)
        print(S.terminal_mechanisms)
        S.run(
            learning=True,
            # num_trials=2,
            inputs={
                rel_in:
                [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
            },
            # targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
        )
        print(S.results)
Esempio n. 12
0
        # m=pnl.GATING,
        # b=2.0
        # )
    })

Gating_Mechanism = pnl.GatingMechanism(
    # default_gating_policy=0.0,
    size=[1],
    gating_signals=[
        # Output_Layer
        Output_Layer.output_state,
    ])

p = pnl.Process(size=2,
                pathway=[Input_Layer, Output_Layer],
                prefs={
                    pnl.VERBOSE_PREF: False,
                    pnl.REPORT_OUTPUT_PREF: False
                })

g = pnl.Process(default_variable=[1.0], pathway=[Gating_Mechanism])

stim_list = {
    Input_Layer: [[-1, 30], [-1, 30], [-1, 30], [-1, 30]],
    Gating_Mechanism: [[0.0], [0.5], [1.0], [2.0]]
}


def print_header(system):
    print("\n\n**** Time: ", system.scheduler_processing.clock.simple_time)

Esempio n. 13
0
    def test_using_Hebbian_learning_of_orthognal_inputs_with_integrator_mode(
            self):
        """Same as tests/mechanisms/test_recurrent_transfer_mechanism/test_learning_of_orthognal_inputs

        Tests that ContrastiveHebbianMechanism behaves like RecurrentTransferMechanism with Hebbian LearningFunction
        (allowing for epsilon differences due to INTEGRATION and convergence criterion).
        """
        size = 4
        R = pnl.ContrastiveHebbianMechanism(
            input_size=4,
            hidden_size=0,
            target_size=4,
            separated=False,
            mode=pnl.SIMPLE_HEBBIAN,
            enable_learning=True,
            function=psyneulink.core.components.functions.transferfunctions.
            Linear,
            integrator_mode=True,
            integration_rate=0.2,
            learning_function=psyneulink.core.components.functions.
            learningfunctions.Hebbian,
            minus_phase_termination_threshold=.01,
            plus_phase_termination_threshold=.01,
            # auto=0,
            hetero=np.full((size, size), 0.0))
        P = pnl.Process(pathway=[R])
        S = pnl.System(processes=[P])

        inputs_dict = {R: [1, 0, 1, 0]}
        S.run(num_trials=4, inputs=inputs_dict)
        # KDM 10/2/18: removing this test from here, as it's kind of unimportant to this specific test
        #   and the behavior of the scheduler's time can be a bit odd - should hopefully fix that in future
        #   and test in its own module
        # assert S.scheduler.get_clock(S).previous_time.pass_ == 19
        np.testing.assert_allclose(
            R.output_ports[pnl.ACTIVITY_DIFFERENCE].parameters.value.get(S),
            [1.14142296, 0.0, 1.14142296, 0.0])
        np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S),
                                   [1.14142296, 0.0, 1.14142296, 0.0])
        np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S),
                                   [0.0, 0.0, 0.0, 0.0])
        np.testing.assert_allclose(
            R.output_ports[pnl.CURRENT_ACTIVITY].parameters.value.get(S),
            [1.1414229612568625, 0.0, 1.1414229612568625, 0.0])
        np.testing.assert_allclose(
            R.recurrent_projection.get_mod_matrix(S),
            [[0.0, 0.0, 0.22035998, 0.0], [0.0, 0.0, 0.0, 0.0],
             [0.22035998, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]])
        # Reset state so learning of new pattern is "uncontaminated" by activity from previous one
        R.output_port.parameters.value.set([0, 0, 0, 0], S, override=True)
        inputs_dict = {R: [0, 1, 0, 1]}
        S.run(num_trials=4, inputs=inputs_dict)
        np.testing.assert_allclose(
            R.recurrent_projection.get_mod_matrix(S),
            [[0.0, 0.0, 0.22035998, 0.0], [0.0, 0.0, 0.0, 0.22035998],
             [0.22035998, 0.0, 0.0, 0.], [0.0, 0.22035998, 0.0, 0.]])
        np.testing.assert_allclose(
            R.output_ports[pnl.CURRENT_ACTIVITY].parameters.value.get(S),
            [0.0, 1.1414229612568625, 0.0, 1.1414229612568625])
        np.testing.assert_allclose(
            R.output_ports[pnl.ACTIVITY_DIFFERENCE].parameters.value.get(S),
            [0.0, 1.14142296, 0.0, 1.14142296])
        np.testing.assert_allclose(R.parameters.plus_phase_activity.get(S),
                                   [0.0, 1.14142296, 0.0, 1.14142296])
        np.testing.assert_allclose(R.parameters.minus_phase_activity.get(S),
                                   [0.0, 0.0, 0.0, 0.0])
Esempio n. 14
0
# create a LeabraMechanism in PsyNeuLink
L = pnl.LeabraMechanism(
    input_size=input_size,
    output_size=output_size,
    hidden_layers=hidden_layers,
    hidden_sizes=hidden_sizes,
    name='L',
    training_flag=train_flag
)


T1 = pnl.TransferMechanism(name='T1', size=input_size, function=psyneulink.core.components.functions
                           .transferfunctions.Linear)
T2 = pnl.TransferMechanism(name='T2', size=output_size, function=psyneulink.core.components.functions.transferfunctions.Linear)

p1 = pnl.Process(pathway=[T1, L])
proj = pnl.MappingProjection(sender=T2, receiver=L.input_states[1])
p2 = pnl.Process(pathway=[T2, proj, L])
s = pnl.System(processes=[p1, p2])

print('Running Leabra in PsyNeuLink...')
start_time = time.process_time()
outputs = s.run(inputs={T1: input_pattern.copy(), T2: training_pattern.copy()})
end_time = time.process_time()

print('Time to run LeabraMechanism in PsyNeuLink: ', end_time - start_time, "seconds")
print('LeabraMechanism Outputs Over Time: ', outputs, type(outputs))
print('LeabraMechanism Final Output: ', outputs[-1], type(outputs[-1]))


random.seed(random_seed_value)
action_selection = pnl.TransferMechanism(
    size=3,
    function=pnl.SoftMax(output=pnl.ALL, gain=1.0),
    output_states={
        pnl.NAME: 'SELECTED ACTION',
        pnl.VARIABLE: [(pnl.INPUT_STATE_VARIABLES, 0), (pnl.OWNER_VALUE, 0)],
        pnl.FUNCTION: pnl.OneHot(mode=pnl.PROB).function
    },
    # output_states={pnl.NAME: "SOFT_MAX",
    #                pnl.VARIABLE: (pnl.OWNER_VALUE,0),
    #                pnl.FUNCTION: pnl.SoftMax(output=pnl.PROB,gain=1.0)},
    name='Action Selection')

p = pnl.Process(default_variable=[0, 0, 0],
                pathway=[input_layer, action_selection],
                learning=pnl.LearningProjection(
                    learning_function=pnl.Reinforcement(learning_rate=0.05)),
                target=0)

print('reward prediction weights: \n',
      action_selection.input_state.path_afferents[0].matrix)
print('target_mechanism weights: \n',
      action_selection.output_state.efferents[0].matrix)

actions = ['left', 'middle', 'right']
reward_values = [10, 0, 0]
first_reward = 0

# Must initialize reward (won't be used, but needed for declaration of lambda function)
action_selection.output_state.value = [0, 0, 1]
# Get reward value for selected action)
Esempio n. 16
0
                                   function=psyneulink.core.components.functions.transferfunctions.Logistic,
                                   name="hidden layer"
                                   )

proj=pnl.MappingProjection(matrix=(np.random.rand(2,2)),name="proj 1")
proj_h=pnl.MappingProjection(matrix=(np.random.rand(2,1)),name="proj 2")

#p_b=pnl.MappingProjection(matrix=.1*(np.random.rand(2,2)),name="proj bias 1")
#p_b_o=pnl.MappingProjection(matrix=.1*(np.random.rand(1,1)),name="proj bias 2")

output_layer=pnl.TransferMechanism(size=1, function=psyneulink.core.components.functions.transferfunctions.Logistic, name="output layer")

#bias_h=pnl.Process(pathway=[bias_mech_h,p_b,hidden_layer],learning=pnl.ENABLED)
#bias_out=pnl.Process(pathway=[bias_mech_out,p_b_o,output_layer],learning=pnl.ENABLED)

net3l=pnl.Process(pathway=[input_layer,proj,hidden_layer,proj_h,output_layer],learning=pnl.ENABLED)

sys3l=pnl.System(processes=[
    #bias_h,
    #bias_out,
    net3l],learning_rate=8)
#### AFTER THIS PART IS FINE #####

sys3l.show_graph(output_fmt = 'jupyter')

trials=4000
X=np.array([[1,1],[1,0],[0,1],[0,0]])
#X=[[1,1,1],[1,0,1],[0,1,1],[0,0,1]]
b_h_ins=[[1,1],[1,1],[1,1],[1,1]]
b_o_ins=[[1],[1],[1],[1]]
AND_labels_pnl=[[1],[0],[0],[0]]
Esempio n. 17
0
print(Decision.execute([1]))

# Decision.set_log_conditions('DECISION_VARIABLE')
# Decision.set_log_conditions('value')
# Decision.set_log_conditions('PROBABILITY_UPPER_THRESHOLD')
Decision.set_log_conditions('InputPort-0')
# Decision.set_log_conditions('RESPONSE_TIME')

# Decision.loggable_items

# Outcome Mechanisms:
Reward = pnl.TransferMechanism(size=1, name='Reward')

# Processes:
TargetControlProcess = pnl.Process(default_variable=[0],
                                   pathway=[Target_Stim, Target_Rep, Decision],
                                   name='Target Control Process')

FlankerControlProcess = pnl.Process(
    default_variable=[0],
    pathway=[Distractor_Stim, Distractor_Rep, Decision],
    name='Flanker Control Process')

TargetAutomaticProcess = pnl.Process(
    default_variable=[0],
    pathway=[Target_Stim, Automatic_Component_Target, Decision],
    name='Target Automatic Process')

FlankerAutomaticProcess = pnl.Process(
    default_variable=[0],
    pathway=[Distractor_Stim, Automatic_Component_Flanker, Decision],  #
Esempio n. 18
0
#but we do need to specify the size, which will be the size of our input array.

input_layer=pnl.TransferMechanism(size=(3), name='INPUT LAYER')

#Next, we specify our output layer. This is where we do our sigmoid transformation, by simply applying the Logistic function.
#The size we specify for this layer is the number of output nodes we want. In this case, we want the network to return a scalar
#for each example (either a 1 or a zero), so our size is 1

output_layer=pnl.TransferMechanism(size=1, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='OUTPUT LAYER')

#Now, we put them together into a process.
#Notice, that we did not need to specify a weighting matrix. One will automatically be generated by psyneulink when we create our
#process.
# JDC ADDED:
# Normally, for learning to occur in a process, we would just specify that learning=pnl.ENABLED.
# However, if we want to specify a specific learning function or error_function to be used, then we must
# specify it by construction a default LearningProjection and giving it the parameters we want.  In this
# case it is the error_function, that we will set to CROSS_ENTROPY (using PsyNeulink's Distance Function):

net2l=pnl.Process(pathway=[input_layer,output_layer],
                  learning=pnl.LearningProjection(error_function=psyneulink.core.components.functions
                                                  .objectivefunctions.Distance(metric=pnl.CROSS_ENTROPY))
                  )

#The pathway argument specifies in which order to execute the layers. THis way, the output of one will be mapped to the input of
#the next.
#To run the process, we will put it into a system.


sys2l=pnl.System(processes=[net2l],learning_rate=4)
sys2l.show_graph(show_learning=pnl.ALL)
Esempio n. 19
0
#                           function=pnl.Stability(metric=pnl.ENERGY,
#                                                  normalize=True),
#                           name='K')

conflicts = pnl.IntegratorMechanism(
    input_states=[action_selection.output_states[2]],
    function=pnl.AGTUtilityIntegrator(short_term_gain=6.0,
                                      long_term_gain=6.0,
                                      short_term_rate=0.05,
                                      long_term_rate=0.2),
    name='Short- and Long-term conflict')

decision_process = pnl.Process(
    default_variable=[0, 0],
    pathway=[input_layer, action_selection],
    learning=pnl.LearningProjection(
        learning_function=pnl.Reinforcement(learning_rate=0.03)
    ),  # if learning rate set to .3 output state values annealing to [0., 0.]
    # which leads to error in reward function
    target=0)

print('reward prediction weights: \n',
      action_selection.input_state.path_afferents[0].matrix)
print('target_mechanism weights: \n',
      action_selection.output_state.efferents[0].matrix)

conflict_process = pnl.Process(pathway=[action_selection, conflicts])

LC_NE = pnl.LCControlMechanism(objective_mechanism=pnl.ObjectiveMechanism(
    monitored_output_states=[action_selection], name='LC-NE ObjectiveMech'),
                               modulated_mechanisms=[action_selection],
                               integration_method='EULER',
Esempio n. 20
0
    default_variable=[0, 0])

myHiddenLayer = pnl.TransferMechanism(
    name='Hidden Layer 1',
    function=psyneulink.core.components.functions.transferfunctions.Logistic(
        gain=1.0, x_0=0),
    default_variable=np.zeros((5, )))

myDDM = pnl.DDM(name='My_DDM',
                function=psyneulink.core.components.functions.
                distributionfunctions.DriftDiffusionAnalytical(
                    drift_rate=0.5, threshold=1, starting_point=0.0))

myProcess = pnl.Process(
    name='Neural Network DDM Process',
    default_variable=[0, 0],
    pathway=[
        myInputLayer,
        psyneulink.core.components.functions.transferfunctions.get_matrix(
            pnl.RANDOM_CONNECTIVITY_MATRIX, 2, 5), myHiddenLayer,
        pnl.FULL_CONNECTIVITY_MATRIX, myDDM
    ])

myProcess.reportOutputPref = True
myInputLayer.reportOutputPref = True
myHiddenLayer.reportOutputPref = True
myDDM.reportOutputPref = pnl.PreferenceEntry(True,
                                             pnl.PreferenceLevel.INSTANCE)

pnl.run(myProcess, [[-1, 2], [2, 3], [5, 5]])
Esempio n. 21
0
    def test_log_dictionary_with_time(self):

        T1 = pnl.TransferMechanism(name='log_test_T1',
                                   size=2)
        T2 = pnl.TransferMechanism(name='log_test_T2',
                                   function=pnl.Linear(slope=2.0),
                                   size=2)
        PS = pnl.Process(name='log_test_PS', pathway=[T1, T2])
        SYS = pnl.System(name='log_test_SYS', processes=[PS])

        assert T1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}
        assert T2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'OFF',
                                     'RESULTS': 'OFF',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'OFF'}

        T1.set_log_conditions(pnl.SLOPE)
        T1.set_log_conditions(pnl.RESULTS)
        T1.set_log_conditions(pnl.VALUE)

        assert T1.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'EXECUTION',
                                     'RESULTS': 'EXECUTION',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'EXECUTION'}

        T2.set_log_conditions(pnl.SLOPE)
        T2.set_log_conditions(pnl.RESULTS)
        T2.set_log_conditions(pnl.VALUE)

        assert T2.loggable_items == {'InputState-0': 'OFF',
                                     'slope': 'EXECUTION',
                                     'RESULTS': 'EXECUTION',
                                     'intercept': 'OFF',
                                     'noise': 'OFF',
                                     'smoothing_factor': 'OFF',
                                     'value': 'EXECUTION'}

        # RUN ZERO  |  TRIALS ZERO, ONE, TWO ----------------------------------

        SYS.run(inputs={T1: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})

        assert T1.logged_items == {'RESULTS': 'EXECUTION',
                                   'slope': 'EXECUTION',
                                   'value': 'EXECUTION'}
        assert T2.logged_items == {'RESULTS': 'EXECUTION',
                                   'slope': 'EXECUTION',
                                   'value': 'EXECUTION'}

        # T1 log after zero-th run -------------------------------------------

        log_dict_T1 = T1.log.nparray_dictionary(entries=['value', 'slope', 'RESULTS'])

        expected_run_T1 = [[0], [0], [0]]
        expected_trial_T1 = [[0], [1], [2]]
        expected_time_step_T1 = [[0], [0], [0]]
        expected_values_T1 = [[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]]
        expected_slopes_T1 = [[1.0], [1.0], [1.0]]
        expected_results_T1 = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]

        assert np.allclose(expected_run_T1, log_dict_T1['Run'])
        assert np.allclose(expected_trial_T1, log_dict_T1['Trial'])
        assert np.allclose(expected_time_step_T1, log_dict_T1['Time_step'])
        assert np.allclose(expected_values_T1, log_dict_T1['value'])
        assert np.allclose(expected_slopes_T1, log_dict_T1['slope'])
        assert np.allclose(expected_results_T1, log_dict_T1['RESULTS'])

        # T2 log after zero-th run --------------------------------------------

        log_dict_T2 = T2.log.nparray_dictionary(entries=['value', 'slope', 'RESULTS'])

        expected_run_T2 = [[0], [0], [0]]
        expected_trial_T2 = [[0], [1], [2]]
        expected_time_step_T2 = [[1], [1], [1]]
        expected_values_T2 = [[[2.0, 4.0]], [[6.0, 8.0]], [[10.0, 12.0]]]
        expected_slopes_T2 = [[2.0], [2.0], [2.0]]
        expected_results_T2 = [[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]]

        assert np.allclose(expected_run_T2, log_dict_T2['Run'])
        assert np.allclose(expected_trial_T2, log_dict_T2['Trial'])
        assert np.allclose(expected_time_step_T2, log_dict_T2['Time_step'])
        assert np.allclose(expected_values_T2, log_dict_T2['value'])
        assert np.allclose(expected_slopes_T2, log_dict_T2['slope'])
        assert np.allclose(expected_results_T2, log_dict_T2['RESULTS'])

        # RUN ONE  |  TRIALS ZERO, ONE, TWO -------------------------------------

        SYS.run(inputs={T1: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})

        # T1 log after first run -------------------------------------------

        log_dict_T1 = T1.log.nparray_dictionary(entries=['value', 'slope', 'RESULTS'])

        # expected_run_T1_2 = [[1], [1], [1]]
        expected_run_T1_2 = [[0], [0], [0]] + expected_run_T1
        expected_trial_T1_2 = [[0], [1], [2]] + expected_trial_T1
        expected_time_step_T1_2 = [[0], [0], [0]] + expected_time_step_T1
        expected_values_T1_2 = expected_values_T1 + expected_values_T1
        expected_slopes_T1_2 = expected_slopes_T1 + expected_slopes_T1
        expected_results_T1_2 = expected_results_T1 + expected_results_T1

        # assert np.allclose(expected_run_T1_2, log_dict_T1['Run'])
        # assert np.allclose(expected_trial_T1_2, log_dict_T1['Trial'])
        # assert np.allclose(expected_time_step_T1_2, log_dict_T1['Time_step'])
        assert np.allclose(expected_values_T1_2, log_dict_T1['value'])
        assert np.allclose(expected_slopes_T1_2, log_dict_T1['slope'])
        assert np.allclose(expected_results_T1_2, log_dict_T1['RESULTS'])

        # T2 log after first run -------------------------------------------

        log_dict_T2_2 = T2.log.nparray_dictionary(entries=['value', 'slope', 'RESULTS'])

        expected_run_T2_2 = [[0], [0], [0]] + expected_run_T2
        expected_trial_T2_2 = [[0], [1], [2]] + expected_trial_T2
        expected_time_step_T2_2 = [[1], [1], [1]] + expected_time_step_T2
        expected_values_T2_2 = [[[2.0, 4.0]], [[6.0, 8.0]], [[10.0, 12.0]]] + expected_values_T2
        expected_slopes_T2_2 = [[2.0], [2.0], [2.0]] + expected_slopes_T2
        expected_results_T2_2 = [[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]] + expected_results_T2

        # assert np.allclose(expected_run_T2_2, log_dict_T2_2['Run'])
        # assert np.allclose(expected_trial_T2_2, log_dict_T2_2['Trial'])
        # assert np.allclose(expected_time_step_T2_2, log_dict_T2_2['Time_step'])
        assert np.allclose(expected_values_T2_2, log_dict_T2_2['value'])
        assert np.allclose(expected_slopes_T2_2, log_dict_T2_2['slope'])
        assert np.allclose(expected_results_T2_2, log_dict_T2_2['RESULTS'])
Esempio n. 22
0
#something weird with dimensions we might think that it should be a 1 x 2 matrix
# column 0: response_'red' to respond_green_accumulator
# column 1: response_'green' to respond_green_accumulator
respond_green_differencing_weights = pnl.MappingProjection(matrix=np.matrix([[-1.0], [1.0]]),
                                                           name='RESPOND_GREEN_WEIGHTS')



# In[ ]:


#   CREATE PATHWAYS
#   Words pathway
words_process = pnl.Process(pathway=[words_input_layer,
                                     word_weights,
                                     words_hidden_layer,
                                     word_response_weights,
                                     response_layer], name='WORDS_PROCESS')

#   Colors pathway
colors_process = pnl.Process(pathway=[colors_input_layer,
                                      color_weights,
                                      colors_hidden_layer,
                                      color_response_weights,
                                      response_layer], name='COLORS_PROCESS')

#   Task representation pathway
task_CN_process = pnl.Process(pathway=[task_layer,
                                       task_CN_weights,
                                       colors_hidden_layer],
                              name='TASK_CN_PROCESS')
Esempio n. 23
0
    def test_clear_log(self):

        # Create System
        T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2)
        T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2)
        PS = pnl.Process(name='log_test_PS', pathway=[T_1, T_2])
        PJ = T_2.path_afferents[0]
        SYS = pnl.System(name="log_test_SYS", processes=[PS])

        # Set log conditions on each component
        T_1.set_log_conditions(pnl.NOISE)
        T_1.set_log_conditions(pnl.RESULTS)
        T_2.set_log_conditions(pnl.SLOPE)
        T_2.set_log_conditions(pnl.RESULTS)
        PJ.set_log_conditions(pnl.MATRIX)

        # Run system
        SYS.run(inputs={T_1: [1.0, 1.0]})

        # Create log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that values were logged correctly
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([[1.0, 1.0]])) and \
               np.allclose(log_dict_T_1['noise'], np.array([[0.0]]))

        assert np.allclose(log_dict_T_2['RESULTS'], np.array([[1.0, 1.0]])) and \
               np.allclose(log_dict_T_2['slope'], np.array([[1.0]]))

        assert np.allclose(log_dict_PJ['matrix'], np.array([[1.0, 0.0], [0.0, 1.0]]))

        # Clear T_1s log and delete entries
        T_1.log.clear_entries(delete_entry=False)

        # Clear T_2s log and DO NOT delete entries
        T_2.log.clear_entries(delete_entry=True)

        # Create new log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that T_1 log values were removed
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([])) and \
               np.allclose(log_dict_T_1['noise'], np.array([]))

        # Confirm that T_2 log values were removed and dictionary entries were destroyed
        assert log_dict_T_2 == OrderedDict()

        # Confirm that PJ log values were not affected by changes to T_1 and T_2's logs
        assert np.allclose(log_dict_PJ['matrix'], np.array([[1.0, 0.0], [0.0, 1.0]]))

        # Run system again
        SYS.run(inputs={T_1: [2.0, 2.0]})

        # Create new log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that T_1 log values only include most recent run
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([[2.0, 2.0]])) and \
               np.allclose(log_dict_T_1['noise'], np.array([[0.0]]))
        # NOTE: "Run" value still incremented, but only the most recent one is returned (# runs does not reset to zero)
        assert np.allclose(log_dict_T_1['Run'], np.array([[1]]))

        # Confirm that T_2 log values only include most recent run
        assert np.allclose(log_dict_T_2['RESULTS'], np.array([[2.0, 2.0]])) and \
               np.allclose(log_dict_T_2['slope'], np.array([[1.0]]))
        assert np.allclose(log_dict_T_2['Run'], np.array([[1]]))

        # Confirm that PJ log values include all runs
        assert np.allclose(log_dict_PJ['matrix'], np.array([[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]])) and \
               np.allclose(log_dict_PJ['Run'], np.array([[0], [1]]))
    def test_lauras_cohen_1990_model(self):
        #  INPUT UNITS

        #  colors: ('red', 'green'), words: ('RED','GREEN')
        colors_input_layer = pnl.TransferMechanism(size=2,
                                                   function=psyneulink.core.components.functions.transferfunctions.Linear,
                                                   name='COLORS_INPUT')

        words_input_layer = pnl.TransferMechanism(size=2,
                                                  function=psyneulink.core.components.functions.transferfunctions.Linear,
                                                  name='WORDS_INPUT')

        #   Task layer, tasks: ('name the color', 'read the word')
        task_layer = pnl.TransferMechanism(size=2,
                                           function=psyneulink.core.components.functions.transferfunctions.Linear,
                                           name='TASK')

        #   HIDDEN LAYER UNITS

        #   colors_hidden: ('red','green')
        #   Logistic activation function, Gain = 1.0, Bias = -4.0 (in PNL bias is subtracted so enter +4.0 to get negative bias)
        #   randomly distributed noise to the net input
        #   time averaging = integration_rate = 0.1
        unit_noise = 0.005
        colors_hidden_layer = pnl.TransferMechanism(size=2,
                                                    function=psyneulink.core.components.functions.transferfunctions
                                                    .Logistic(gain=1.0, x_0=4.0),
                                                    # should be able to get same result with offset = -4.0
                                                    integrator_mode=True,
                                                    noise=psyneulink.core.components.functions.distributionfunctions
                                                    .NormalDist(mean=0, standard_deviation=unit_noise).function,
                                                    integration_rate=0.1,
                                                    name='COLORS HIDDEN')
        #    words_hidden: ('RED','GREEN')
        words_hidden_layer = pnl.TransferMechanism(size=2,
                                                   function=pnl.Logistic(gain=1.0, x_0=4.0),
                                                   integrator_mode=True,
                                                   noise=pnl.NormalDist(mean=0,
                                                                        standard_deviation=unit_noise).function,
                                                   integration_rate=0.1,
                                                   name='WORDS HIDDEN')

        #    OUTPUT UNITS

        #   Response layer, provide input to accumulator, responses: ('red', 'green')
        #   time averaging = tau = 0.1
        #   randomly distributed noise to the net input
        response_layer = pnl.TransferMechanism(size=2,
                                               function=psyneulink.core.components.functions.transferfunctions.Logistic,
                                               name='RESPONSE',
                                               integrator_mode=True,
                                               noise=psyneulink.core.components.functions.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function,
                                               integration_rate=0.1)
        #   Respond red accumulator
        #   alpha = rate of evidence accumlation = 0.1
        #   sigma = noise = 0.1
        #   noise will be: squareroot(time_step_size * noise) * a random sample from a normal distribution
        accumulator_noise = 0.1
        respond_red_accumulator = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(noise=pnl.NormalDist(mean=0,
                                                                                                             standard_deviation=accumulator_noise).function,
                                                                                        rate=0.1),
                                                          name='respond_red_accumulator')
        #   Respond green accumulator
        respond_green_accumulator = pnl.IntegratorMechanism(function=pnl.SimpleIntegrator(noise=pnl.NormalDist(mean=0,
                                                                                                               standard_deviation=accumulator_noise).function,
                                                                                          rate=0.1),
                                                            name='respond_green_accumulator')

        #   LOGGING
        colors_hidden_layer.set_log_conditions('value')
        words_hidden_layer.set_log_conditions('value')
        response_layer.set_log_conditions('value')
        respond_red_accumulator.set_log_conditions('value')
        respond_green_accumulator.set_log_conditions('value')

        #   SET UP CONNECTIONS

        #   rows correspond to sender
        #   columns correspond to: weighting of the contribution that a given sender makes to the receiver

        #   INPUT TO HIDDEN
        # row 0: input_'red' to hidden_'red', hidden_'green'
        # row 1: input_'green' to hidden_'red', hidden_'green'
        color_weights = pnl.MappingProjection(matrix=np.atleast_2d([[2.2, -2.2],
                                                                [-2.2, 2.2]]),
                                              name='COLOR_WEIGHTS')
        # row 0: input_'RED' to hidden_'RED', hidden_'GREEN'
        # row 1: input_'GREEN' to hidden_'RED', hidden_'GREEN'
        word_weights = pnl.MappingProjection(matrix=np.atleast_2d([[2.6, -2.6],
                                                               [-2.6, 2.6]]),
                                             name='WORD_WEIGHTS')

        #   HIDDEN TO RESPONSE
        # row 0: hidden_'red' to response_'red', response_'green'
        # row 1: hidden_'green' to response_'red', response_'green'
        color_response_weights = pnl.MappingProjection(matrix=np.atleast_2d([[1.3, -1.3],
                                                                         [-1.3, 1.3]]),
                                                       name='COLOR_RESPONSE_WEIGHTS')
        # row 0: hidden_'RED' to response_'red', response_'green'
        # row 1: hidden_'GREEN' to response_'red', response_'green'
        word_response_weights = pnl.MappingProjection(matrix=np.atleast_2d([[2.5, -2.5],
                                                                        [-2.5, 2.5]]),
                                                      name='WORD_RESPONSE_WEIGHTS')

        #   TASK TO HIDDEN LAYER
        #   row 0: task_CN to hidden_'red', hidden_'green'
        #   row 1: task_WR to hidden_'red', hidden_'green'
        task_CN_weights = pnl.MappingProjection(matrix=np.atleast_2d([[4.0, 4.0],
                                                                  [0, 0]]),
                                                name='TASK_CN_WEIGHTS')

        #   row 0: task_CN to hidden_'RED', hidden_'GREEN'
        #   row 1: task_WR to hidden_'RED', hidden_'GREEN'
        task_WR_weights = pnl.MappingProjection(matrix=np.atleast_2d([[0, 0],
                                                                  [4.0, 4.0]]),
                                                name='TASK_WR_WEIGHTS')

        #   RESPONSE UNITS TO ACCUMULATORS
        #   row 0: response_'red' to respond_red_accumulator
        #   row 1: response_'green' to respond_red_accumulator
        respond_red_differencing_weights = pnl.MappingProjection(matrix=np.atleast_2d([[1.0], [-1.0]]),
                                                                 name='RESPOND_RED_WEIGHTS')

        #   row 0: response_'red' to respond_green_accumulator
        #   row 1: response_'green' to respond_green_accumulator
        respond_green_differencing_weights = pnl.MappingProjection(matrix=np.atleast_2d([[-1.0], [1.0]]),
                                                                   name='RESPOND_GREEN_WEIGHTS')

        #   CREATE PATHWAYS
        #   Words pathway
        words_process = pnl.Process(pathway=[words_input_layer,
                                             word_weights,
                                             words_hidden_layer,
                                             word_response_weights,
                                             response_layer], name='WORDS_PROCESS')

        #   Colors pathway
        colors_process = pnl.Process(pathway=[colors_input_layer,
                                              color_weights,
                                              colors_hidden_layer,
                                              color_response_weights,
                                              response_layer], name='COLORS_PROCESS')

        #   Task representation pathway
        task_CN_process = pnl.Process(pathway=[task_layer,
                                               task_CN_weights,
                                               colors_hidden_layer],
                                      name='TASK_CN_PROCESS')
        task_WR_process = pnl.Process(pathway=[task_layer,
                                               task_WR_weights,
                                               words_hidden_layer],
                                      name='TASK_WR_PROCESS')

        #   Evidence accumulation pathway
        respond_red_process = pnl.Process(pathway=[response_layer,
                                                   respond_red_differencing_weights,
                                                   respond_red_accumulator],
                                          name='RESPOND_RED_PROCESS')
        respond_green_process = pnl.Process(pathway=[response_layer,
                                                     respond_green_differencing_weights,
                                                     respond_green_accumulator],
                                            name='RESPOND_GREEN_PROCESS')

        #   CREATE SYSTEM
        my_Stroop = pnl.System(processes=[colors_process,
                                          words_process,
                                          task_CN_process,
                                          task_WR_process,
                                          respond_red_process,
                                          respond_green_process],
                               name='FEEDFORWARD_STROOP_SYSTEM')

        # my_Stroop.show()
        # my_Stroop.show_graph(show_dimensions=pnl.ALL)

        # Function to create test trials
        # a RED word input is [1,0] to words_input_layer and GREEN word is [0,1]
        # a red color input is [1,0] to colors_input_layer and green color is [0,1]
        # a color-naming trial is [1,0] to task_layer and a word-reading trial is [0,1]

        def trial_dict(red_color, green_color, red_word, green_word, CN, WR):

            trialdict = {
                colors_input_layer: [red_color, green_color],
                words_input_layer: [red_word, green_word],
                task_layer: [CN, WR]
            }
            return trialdict

        #   CREATE THRESHOLD FUNCTION
        # first value of DDM's value is DECISION_VARIABLE
        # execution_context is always passed to Condition functions and is the context
        # in which the function gets called - below, during system execution
        def pass_threshold(mech1, mech2, thresh, execution_context=None):
            results1 = mech1.output_states[0].parameters.value.get(execution_context)
            results2 = mech2.output_states[0].parameters.value.get(execution_context)
            for val in results1:
                if val >= thresh:
                    return True
            for val in results2:
                if val >= thresh:
                    return True
            return False

        accumulator_threshold = 1.0

        mechanisms_to_update = [colors_hidden_layer, words_hidden_layer, response_layer]

        def switch_integrator_mode(mechanisms, mode):
            for mechanism in mechanisms:
                mechanism.integrator_mode = mode

        def switch_noise(mechanisms, noise):
            for mechanism in mechanisms:
                mechanism.noise = noise

        def switch_to_initialization_trial(mechanisms):
            # Turn off accumulation
            switch_integrator_mode(mechanisms, False)
            # Turn off noise
            switch_noise(mechanisms, 0)
            # Execute once per trial
            my_Stroop.termination_processing = {pnl.TimeScale.TRIAL: pnl.AllHaveRun()}

        def switch_to_processing_trial(mechanisms):
            # Turn on accumulation
            switch_integrator_mode(mechanisms, True)
            # Turn on noise
            switch_noise(mechanisms, psyneulink.core.components.functions.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function)
            # Execute until one of the accumulators crosses the threshold
            my_Stroop.termination_processing = {
                pnl.TimeScale.TRIAL: pnl.While(
                    pass_threshold,
                    respond_red_accumulator,
                    respond_green_accumulator,
                    accumulator_threshold
                )
            }

        def switch_trial_type():
            # Next trial will be a processing trial
            if isinstance(my_Stroop.termination_processing[pnl.TimeScale.TRIAL], pnl.AllHaveRun):
                switch_to_processing_trial(mechanisms_to_update)
            # Next trial will be an initialization trial
            else:
                switch_to_initialization_trial(mechanisms_to_update)

        CN_trial_initialize_input = trial_dict(0, 0, 0, 0, 1, 0)

        WR_trial_initialize_input = trial_dict(0, 0, 0, 0, 0, 1)

        # Start with an initialization trial
        switch_to_initialization_trial(mechanisms_to_update)

        my_Stroop.run(inputs=trial_dict(0, 1, 1, 0, 1, 0),
                      # termination_processing=change_termination_processing,
                      num_trials=4,
                      call_after_trial=switch_trial_type)
Esempio n. 25
0
def create_weights(in_size=2, out_size=2):
    return pnl.random_matrix(in_size, out_size, 2, -1) * 0.1


# Create the layers
input_layer = pnl.TransferMechanism(size=2, name='input')
output_layer = pnl.TransferMechanism(size=2, name='output')
indirect_layer = pnl.TransferMechanism(size=2, name='indirect')
"""
If process block #1 is before process block #2, the system creation hangs in an infinite warning loop
However, if process block #2 is before process block #1, the system creates just fine.
"""

# Process block #1 -- Create the input-output process
input_output_process = pnl.Process(
    pathway=[input_layer, create_weights(), output_layer],
    name='input-output',
    learning=pnl.LEARNING)
input_output_process.pathway[1].learning_mechanism.learning_rate = 0.1

# Process block #2 -- create the indirect processes
input_indirect_process = pnl.Process(
    pathway=[input_layer, create_weights(), indirect_layer],
    name='input-indirect',
    learning=pnl.LEARNING)
input_indirect_process.pathway[1].learning_mechanism.learning_rate = 0.5

indirect_output_process = pnl.Process(
    pathway=[indirect_layer, create_weights(), output_layer],
    name='indirect-output',
    learning=pnl.LEARNING)
indirect_output_process.pathway[1].learning_mechanism.learning_rate = 0.5
Esempio n. 26
0
myInputLayer = pnl.TransferMechanism(name='Input Layer',
                                     function=pnl.Linear(),
                                     default_variable=[0, 0])

myHiddenLayer = pnl.TransferMechanism(name='Hidden Layer 1',
                                      function=pnl.Logistic(gain=1.0, bias=0),
                                      default_variable=np.zeros((5, )))

myDDM = pnl.DDM(name='My_DDM',
                function=pnl.BogaczEtAl(drift_rate=0.5,
                                        threshold=1,
                                        starting_point=0.0))

myProcess = pnl.Process(name='Neural Network DDM Process',
                        default_variable=[0, 0],
                        pathway=[
                            myInputLayer,
                            pnl.get_matrix(pnl.RANDOM_CONNECTIVITY_MATRIX, 2,
                                           5), myHiddenLayer,
                            pnl.FULL_CONNECTIVITY_MATRIX, myDDM
                        ])

myProcess.reportOutputPref = True
myInputLayer.reportOutputPref = True
myHiddenLayer.reportOutputPref = True
myDDM.reportOutputPref = pnl.PreferenceEntry(True,
                                             pnl.PreferenceLevel.INSTANCE)

pnl.run(myProcess, [[-1, 2], [2, 3], [5, 5]])
Esempio n. 27
0
    [crswt, crswt, inpwt]
])

# Weight matrix from Decision Layer --> Response Layer
output_weights = np.array([
    [
        decwt, 0.0
    ],  # Weight from T1 and T2 but not distractor unit (row 3 set to all zeros) to response layer
    [
        0.0, decwt
    ],  # Need a 3 by 2 matrix, to project from decision layer with 3 units to response layer with 2 units
    [0.0, 0.0]
])

decision_process = pnl.Process(pathway=[
    input_layer, input_weights, decision_layer, output_weights, response_layer
],
                               name='DECISION PROCESS')

# Abstracted LC to modulate gain --------------------------------------------------------------------

# This LCControlMechanism modulates gain.
LC = pnl.LCControlMechanism(
    integration_method=
    "EULER",  # We set the integration method to Euler like in the paper
    threshold_FHN=
    a,  # Here we use the Euler method for integration and we want to set the parameters,
    uncorrelated_activity_FHN=d,  # for the FitzHugh–Nagumo system.
    time_step_size_FHN=dt,
    mode_FHN=C,
    time_constant_v_FHN=tau_v,
    time_constant_w_FHN=tau_u,
Esempio n. 28
0
CH_Weights_matrix = np.arange(4).reshape((2, 2))
WH_Weights_matrix = np.arange(4).reshape((2, 2))
HO_Weights_matrix = np.arange(4).reshape((2, 2))

CH_Weights = pnl.MappingProjection(name='Color-Hidden Weights',
                                   matrix=CH_Weights_matrix)
WH_Weights = pnl.MappingProjection(name='Word-Hidden Weights',
                                   matrix=WH_Weights_matrix)
HO_Weights = pnl.MappingProjection(name='Hidden-Output Weights',
                                   matrix=HO_Weights_matrix)

color_naming_process = pnl.Process(
    default_variable=[1, 2.5],
    pathway=[colors, CH_Weights, hidden, HO_Weights, response],
    learning=pnl.LEARNING,
    target=[2, 2],
    name='Color Naming',
    prefs=process_prefs)

word_reading_process = pnl.Process(default_variable=[.5, 3],
                                   pathway=[words, WH_Weights, hidden],
                                   name='Word Reading',
                                   learning=pnl.LEARNING,
                                   target=[3, 3],
                                   prefs=process_prefs)

# color_naming_process.execute()
# word_reading_process.execute()

mySystem = pnl.System(processes=[color_naming_process, word_reading_process],
Esempio n. 29
0
        }
    ],
)  #drift_rate=(1.0),threshold=(0.2645),noise=(0.5),starting_point=(0), t0=0.15
Decision.set_log_conditions('DECISION_VARIABLE')
Decision.set_log_conditions('value')
Decision.set_log_conditions('PROBABILITY_UPPER_THRESHOLD')
Decision.set_log_conditions('InputState-0')

Decision.loggable_items

# Outcome Mechanisms:
Reward = pnl.TransferMechanism(name='Reward')

# Processes:
TargetControlProcess = pnl.Process(default_variable=[0],
                                   pathway=[Target_Stim, Target_Rep, Decision],
                                   name='Target Control Process')

FlankerControlProcess = pnl.Process(
    default_variable=[0],
    pathway=[Flanker_Stim, Flanker_Rep, Decision],
    name='Flanker Control Process')

TargetAutomaticProcess = pnl.Process(
    default_variable=[0],
    pathway=[Target_Stim, Automatic_Component, Decision],
    name='Target Automatic Process')

FlankerAutomaticProcess = pnl.Process(
    default_variable=[0],
    pathway=[Flanker_Stim, Automatic_Component, Decision],
Esempio n. 30
0
ECin_to_DG = pnl.MappingProjection(matrix=mat_ECin_DG)
DG_to_CA3 = pnl.MappingProjection(matrix=mat_DG_CA3)
ECin_to_CA3 = pnl.MappingProjection(matrix=mat_ECin_CA3)
CA3_to_CA1 = pnl.MappingProjection(matrix=mat_CA3_CA1)
CA1_to_ECout = pnl.MappingProjection(sender=CA1,
                                     receiver=ECout,
                                     matrix=mat_CA1_ECout)
ECin_to_CA1 = pnl.MappingProjection(sender=ECin,
                                    receiver=CA1,
                                    matrix=mat_ECin_CA1)

# In[7]:

proc_ECin_DG = pnl.Process(pathway=[ECin, ECin_to_DG, DG],
                           learning=pnl.ENABLED,
                           learning_rate=0.2)
proc_ECin_CA3 = pnl.Process(pathway=[ECin, ECin_to_CA3, CA3],
                            learning=pnl.ENABLED,
                            learning_rate=0.2)
proc_DG_CA3 = pnl.Process(pathway=[DG, DG_to_CA3, CA3],
                          learning=pnl.ENABLED,
                          learning_rate=0)
proc_CA3_CA1 = pnl.Process(pathway=[CA3, CA3_to_CA1, CA1],
                           learning=pnl.ENABLED,
                           learning_rate=0.05)
proc_CA1_ECout = pnl.Process(pathway=[CA1, ECout],
                             learning=pnl.ENABLED,
                             learning_rate=0.02)
proc_ECin_CA1 = pnl.Process(pathway=[ECin, CA1], learning_rate=0.02)