Example #1
0
    def test_clear_log(self):

        # Create System
        T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2)
        T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2)
        PS = pnl.Process(name='log_test_PS', pathway=[T_1, T_2])
        PJ = T_2.path_afferents[0]
        SYS = pnl.System(name="log_test_SYS", processes=[PS])

        # Set log conditions on each component
        T_1.set_log_conditions(pnl.NOISE)
        T_1.set_log_conditions(pnl.RESULTS)
        T_2.set_log_conditions(pnl.SLOPE)
        T_2.set_log_conditions(pnl.RESULTS)
        PJ.set_log_conditions(pnl.MATRIX)

        # Run system
        SYS.run(inputs={T_1: [1.0, 1.0]})

        # Create log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that values were logged correctly
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([[1.0, 1.0]])) and \
               np.allclose(log_dict_T_1['noise'], np.array([[0.0]]))

        assert np.allclose(log_dict_T_2['RESULTS'], np.array([[1.0, 1.0]])) and \
               np.allclose(log_dict_T_2['slope'], np.array([[1.0]]))

        assert np.allclose(log_dict_PJ['matrix'],
                           np.array([[1.0, 0.0], [0.0, 1.0]]))

        # Clear T_1s log and delete entries
        T_1.log.clear_entries(delete_entry=False)

        # Clear T_2s log and DO NOT delete entries
        T_2.log.clear_entries(delete_entry=True)

        # Create new log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that T_1 log values were removed
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([])) and \
               np.allclose(log_dict_T_1['noise'], np.array([]))

        # Confirm that T_2 log values were removed and dictionary entries were destroyed
        assert log_dict_T_2 == OrderedDict()

        # Confirm that PJ log values were not affected by changes to T_1 and T_2's logs
        assert np.allclose(log_dict_PJ['matrix'],
                           np.array([[1.0, 0.0], [0.0, 1.0]]))

        # Run system again
        SYS.run(inputs={T_1: [2.0, 2.0]})

        # Create new log dict for each component
        log_dict_T_1 = T_1.log.nparray_dictionary()
        log_dict_T_2 = T_2.log.nparray_dictionary()
        log_dict_PJ = PJ.log.nparray_dictionary()

        # Confirm that T_1 log values only include most recent run
        assert np.allclose(log_dict_T_1['RESULTS'], np.array([[2.0, 2.0]])) and \
               np.allclose(log_dict_T_1['noise'], np.array([[0.0]]))
        # NOTE: "Run" value still incremented, but only the most recent one is returned (# runs does not reset to zero)
        assert np.allclose(log_dict_T_1['Run'], np.array([[1]]))

        # Confirm that T_2 log values only include most recent run
        assert np.allclose(log_dict_T_2['RESULTS'], np.array([[2.0, 2.0]])) and \
               np.allclose(log_dict_T_2['slope'], np.array([[1.0]]))
        assert np.allclose(log_dict_T_2['Run'], np.array([[1]]))

        # Confirm that PJ log values include all runs
        assert np.allclose(log_dict_PJ['matrix'], np.array([[[1.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 1.0]]])) and \
               np.allclose(log_dict_PJ['Run'], np.array([[0], [1]]))
RewardProcess = pnl.Process(default_variable=[0],
                            pathway=[Reward, test_mech],
                            name='RewardProcess')

# System:
mySystem = pnl.System(
    processes=[
        TargetControlProcess, FlankerControlProcess, TargetAutomaticProcess,
        FlankerAutomaticProcess, RewardProcess
    ],
    controller=pnl.EVCControlMechanism(
        prefs={
            pnl.LOG_PREF:
            pnl.PreferenceEntry(pnl.LogCondition.INITIALIZATION,
                                pnl.PreferenceLevel.INSTANCE)
        }),
    enable_controller=True,
    monitor_for_control=[
        # (None, None, np.ones((1,1))),
        Reward,
        Decision.PROBABILITY_UPPER_THRESHOLD,
        ('OFFSET_RT', 1, -1),
    ],
    name='EVC Markus System')

# Show characteristics of system:
mySystem.show()
# mySystem.controller.show()

# Show graph of system
Example #3
0
                                    name='WORDS_RESPONSE_PROCESS')

task_color_response_process = pnl.Process(pathway=[
    task_input_layer, task_input_weights, task_layer, task_color_weights,
    colors_hidden_layer, color_task_weights, task_layer
])

task_word_response_process = pnl.Process(pathway=[
    task_input_layer, task_layer, task_word_weights, words_hidden_layer,
    word_task_weights, task_layer
])

# Create system -------------------------------------------------------------------------------------------------------
Bidirectional_Stroop = pnl.System(processes=[
    color_response_process, word_response_process, task_color_response_process,
    task_word_response_process
],
                                  reinitialize_mechanisms_when=pnl.Never(),
                                  name='FEEDFORWARD_STROOP_SYSTEM')

# LOGGING:
colors_hidden_layer.set_log_conditions('value')
words_hidden_layer.set_log_conditions('value')

Bidirectional_Stroop.show()

# Bidirectional_Stroop.show_graph(show_dimensions=pnl.ALL)#,show_mechanism_structure=pnl.VALUES) # Uncomment to show graph of the system


# Create threshold function -------------------------------------------------------------------------------------------
# execution_context is automatically passed into Conditions, and references the execution context in which they are being run,
# which in this case is simply the Bidirectional_Stroop system
p21 = pnl.Process(pathway=[h2, map_h2_I, out_sig_I], learning=pnl.LEARNING)

p22 = pnl.Process(pathway=[h2, map_h2_is, out_sig_is], learning=pnl.LEARNING)

p23 = pnl.Process(pathway=[h2, map_h2_has, out_sig_has], learning=pnl.LEARNING)

p24 = pnl.Process(pathway=[h2, map_h2_can, out_sig_can], learning=pnl.LEARNING)

############################# THIS IS WHERE WE BUILD OUT THE SYSTEM ###################################

rumel_sys = pnl.System(processes=[
    p11,
    p12,
    p21,
    p22,
    p23,
    p24,
],
                       learning_rate=.5)

rumel_sys.show_graph(output_fmt='jupyter')

############################## THIS IS WHERE WE SETUP THE LOOP VARIABLES #########################################

# THESE ARRAYS STORE THE ERROR VALUES FROM THE SIG OUTPUT AND BIN OUTPUT
delta_bin_array = []
delta_sig_array = []

# SET NUMBER OF EPOCHS:
epochs = 1000
                           hidden_layers=n_hidden,
                           hidden_sizes=None,
                           training_flag=True,
                           quarter_size=20)

### building the PsyNeuLink network
T_input = pnl.TransferMechanism(size=n_input)
T_target = pnl.TransferMechanism(size=n_output)
# target_projection connects T_target to the TARGET InputPort of Leab
target_projection = pnl.MappingProjection(sender=T_target,
                                          receiver=Leab.input_ports[1])

p_input = pnl.Process(pathway=[T_input, Leab])
p_target = pnl.Process(pathway=[T_target, target_projection, Leab])

sys = pnl.System(processes=[p_input, p_target])

### building the learning data
n = 1000
inputs = [None] * n
targets = [None] * n
print("here's what the inputs/targets will look like:")
for i in range(n):
    nums = np.random.randint(0, 7, size=2) * 0.4
    a = nums[0]
    b = nums[1]
    inputs[i] = [a, a, b, b]
    if a > b:
        targets[i] = [1, 0]
    elif b > a:
        targets[i] = [0, 1]
Example #6
0
# In[7]:


proc_ECin_DG = pnl.Process(pathway=[ECin, ECin_to_DG, DG], learning=pnl.ENABLED, learning_rate=0.2)
proc_ECin_CA3 = pnl.Process(pathway=[ECin, ECin_to_CA3, CA3], learning=pnl.ENABLED, learning_rate=0.2)
proc_DG_CA3 = pnl.Process(pathway=[DG, DG_to_CA3, CA3], learning=pnl.ENABLED, learning_rate=0)
proc_CA3_CA1 = pnl.Process(pathway=[CA3, CA3_to_CA1, CA1], learning=pnl.ENABLED, learning_rate=0.05)
proc_CA1_ECout = pnl.Process(pathway=[CA1, ECout], learning=pnl.ENABLED, learning_rate=0.02)
proc_ECin_CA1 = pnl.Process(pathway=[ECin, CA1], learning_rate=0.02)


# In[8]:


TSP = pnl.System(processes=[proc_ECin_DG, proc_ECin_CA3, proc_DG_CA3, proc_CA3_CA1, proc_CA1_ECout])
# MSP = pnl.System(processes=[proc_ECin_CA1, proc_CA1_ECout])


# In[9]:


TSP.show_graph()
assert True

# In[10]:


## Method for making input
def statistical():
    chars = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
Example #7
0
def print_header(system):
    print(
        "\n\n**** TRIAL: ", system.scheduler_processing.times[
            pnl.TimeScale.RUN][pnl.TimeScale.TRIAL])


def show_weights():
    print('Reward prediction weights: \n',
          action_selection.input_state.path_afferents[0].matrix)
    print('\nAction selected:  {}; predicted reward: {}'.format(
        np.nonzero(action_selection.output_state.value)[0][0],
        action_selection.output_state.value[np.nonzero(
            action_selection.output_state.value)][0]))


p.run(num_trials=10,
      inputs=[[[1, 1, 1]]],
      targets=reward,
      call_after_trial=show_weights)

input_list = {input_layer: [[1, 1, 1]]}

s = pnl.System(processes=[p], targets=[0])

s.run(num_trials=10,
      inputs=input_list,
      targets=reward,
      call_before_trial=functools.partial(print_header, s),
      call_after_trial=show_weights)
Example #8
0
    b_w_FHN=-1.0,
    c_w_FHN=0.0,
    t_0_FHN=0.0,
    initial_v_FHN=initial_v,
    initial_w_FHN=initial_u,
    objective_mechanism=pnl.ObjectiveMechanism(function=pnl.Linear,
                                               monitored_output_states=[
                                                   (decision_layer, None, None,
                                                    np.array([[w_vX1], [0.0]]))
                                               ],
                                               name='LC ObjectiveMechanism'),
    modulated_mechanisms=[decision_layer, response_layer
                          ],  # Modulate gain of decision & response layers
    name='LC')

task = pnl.System(processes=[decision_process])

# This displays a diagram of the System
task.show_graph(show_dimensions=True)

# Create Stimulus -----------------------------------------------------------------------------------------------------

# number of trials
trials = 1000

# assign inputs to input_layer (Origin Mechanism) for each trial
stimulus_dictionary = {
    input_layer: np.repeat(np.array([[0.0, 0.0], [1.0, 0.0]]), 500, axis=0)
}
# First 500 trials: target receives input of 0.0, distractor receives input of 0.0
# Second 500 trials: target receives input of 1.0, distractor receives input of 0.0
Example #9
0
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions

ci = pnl.TransferMechanism(size=2, name='COLORS INPUT')
wi = pnl.TransferMechanism(size=2, name='WORDS INPUT')
ch = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Logistic,
    name='COLORS HIDDEN')
wh = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Logistic,
    name='WORDS HIDDEN')
tl = pnl.TransferMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Logistic(
        gain=pnl.CONTROL),
    name='TASK CONTROL')
rl = pnl.LCAMechanism(
    size=2,
    function=psyneulink.core.components.functions.transferfunctions.Logistic,
    name='RESPONSE')
cp = pnl.Process(pathway=[ci, ch, rl])
wp = pnl.Process(pathway=[wi, wh, rl])
tc = pnl.Process(pathway=[tl, ch])
tw = pnl.Process(pathway=[tl, wh])
s = pnl.System(processes=[tc, tw, cp, wp],
               controller=pnl.EVCControlMechanism(name='EVC Mechanimsm'),
               monitor_for_control=[rl])
s.show_graph()
Example #10
0
respond_red_process = pnl.Process(pathway=[
    response_layer, respond_red_differencing_weights, respond_red_accumulator
],
                                  name='RESPOND_RED_PROCESS')
respond_green_process = pnl.Process(pathway=[
    response_layer, respond_green_differencing_weights,
    respond_green_accumulator
],
                                    name='RESPOND_GREEN_PROCESS')

# In[ ]:

#   CREATE SYSTEM
my_Stroop = pnl.System(processes=[
    colors_process, words_process, task_CN_process, task_WR_process,
    respond_red_process, respond_green_process
],
                       name='FEEDFORWARD_STROOP_SYSTEM')

# In[ ]:

# test inputs

# no color, no word, no task
notask_noinput_trial_input = {
    colors_input_layer: [0, 0],
    words_input_layer: [0, 0],
    task_layer: [0, 0]
}

# color 'red', word 'RED', task none
def test_lauras_cohen_1990_model():
    #  INPUT UNITS

    #  colors: ('red', 'green'), words: ('RED','GREEN')
    colors_input_layer = pnl.TransferMechanism(size=2,
                                               function=pnl.Linear,
                                               name='COLORS_INPUT')

    words_input_layer = pnl.TransferMechanism(size=2,
                                              function=pnl.Linear,
                                              name='WORDS_INPUT')

    #   Task layer, tasks: ('name the color', 'read the word')
    task_layer = pnl.TransferMechanism(size=2,
                                       function=pnl.Linear,
                                       name='TASK')

    #   HIDDEN LAYER UNITS

    #   colors_hidden: ('red','green')
    #   Logistic activation function, Gain = 1.0, Bias = -4.0 (in PNL bias is subtracted so enter +4.0 to get negative bias)
    #   randomly distributed noise to the net input
    #   time averaging = integration_rate = 0.1
    unit_noise = 0.005
    colors_hidden_layer = pnl.TransferMechanism(
        size=2,
        function=pnl.Logistic(gain=1.0, bias=4.0),
        # should be able to get same result with offset = -4.0
        integrator_mode=True,
        noise=pnl.NormalDist(mean=0, standard_dev=unit_noise).function,
        integration_rate=0.1,
        name='COLORS HIDDEN')
    #    words_hidden: ('RED','GREEN')
    words_hidden_layer = pnl.TransferMechanism(
        size=2,
        function=pnl.Logistic(gain=1.0, bias=4.0),
        integrator_mode=True,
        noise=pnl.NormalDist(mean=0, standard_dev=unit_noise).function,
        integration_rate=0.1,
        name='WORDS HIDDEN')

    #    OUTPUT UNITS

    #   Response layer, provide input to accumulator, responses: ('red', 'green')
    #   time averaging = tau = 0.1
    #   randomly distributed noise to the net input
    response_layer = pnl.TransferMechanism(
        size=2,
        function=pnl.Logistic,
        name='RESPONSE',
        integrator_mode=True,
        noise=pnl.NormalDist(mean=0, standard_dev=unit_noise).function,
        integration_rate=0.1)
    #   Respond red accumulator
    #   alpha = rate of evidence accumlation = 0.1
    #   sigma = noise = 0.1
    #   noise will be: squareroot(time_step_size * noise) * a random sample from a normal distribution
    accumulator_noise = 0.1
    respond_red_accumulator = pnl.IntegratorMechanism(
        function=pnl.SimpleIntegrator(noise=pnl.NormalDist(
            mean=0, standard_dev=accumulator_noise).function,
                                      rate=0.1),
        name='respond_red_accumulator')
    #   Respond green accumulator
    respond_green_accumulator = pnl.IntegratorMechanism(
        function=pnl.SimpleIntegrator(noise=pnl.NormalDist(
            mean=0, standard_dev=accumulator_noise).function,
                                      rate=0.1),
        name='respond_green_accumulator')

    #   LOGGING
    colors_hidden_layer.set_log_conditions('value')
    words_hidden_layer.set_log_conditions('value')
    response_layer.set_log_conditions('value')
    respond_red_accumulator.set_log_conditions('value')
    respond_green_accumulator.set_log_conditions('value')

    #   SET UP CONNECTIONS

    #   rows correspond to sender
    #   columns correspond to: weighting of the contribution that a given sender makes to the receiver

    #   INPUT TO HIDDEN
    # row 0: input_'red' to hidden_'red', hidden_'green'
    # row 1: input_'green' to hidden_'red', hidden_'green'
    color_weights = pnl.MappingProjection(matrix=np.matrix([[2.2, -2.2],
                                                            [-2.2, 2.2]]),
                                          name='COLOR_WEIGHTS')
    # row 0: input_'RED' to hidden_'RED', hidden_'GREEN'
    # row 1: input_'GREEN' to hidden_'RED', hidden_'GREEN'
    word_weights = pnl.MappingProjection(matrix=np.matrix([[2.6, -2.6],
                                                           [-2.6, 2.6]]),
                                         name='WORD_WEIGHTS')

    #   HIDDEN TO RESPONSE
    # row 0: hidden_'red' to response_'red', response_'green'
    # row 1: hidden_'green' to response_'red', response_'green'
    color_response_weights = pnl.MappingProjection(
        matrix=np.matrix([[1.3, -1.3], [-1.3, 1.3]]),
        name='COLOR_RESPONSE_WEIGHTS')
    # row 0: hidden_'RED' to response_'red', response_'green'
    # row 1: hidden_'GREEN' to response_'red', response_'green'
    word_response_weights = pnl.MappingProjection(matrix=np.matrix(
        [[2.5, -2.5], [-2.5, 2.5]]),
                                                  name='WORD_RESPONSE_WEIGHTS')

    #   TASK TO HIDDEN LAYER
    #   row 0: task_CN to hidden_'red', hidden_'green'
    #   row 1: task_WR to hidden_'red', hidden_'green'
    task_CN_weights = pnl.MappingProjection(matrix=np.matrix([[4.0, 4.0],
                                                              [0, 0]]),
                                            name='TASK_CN_WEIGHTS')

    #   row 0: task_CN to hidden_'RED', hidden_'GREEN'
    #   row 1: task_WR to hidden_'RED', hidden_'GREEN'
    task_WR_weights = pnl.MappingProjection(matrix=np.matrix([[0, 0],
                                                              [4.0, 4.0]]),
                                            name='TASK_WR_WEIGHTS')

    #   RESPONSE UNITS TO ACCUMULATORS
    #   row 0: response_'red' to respond_red_accumulator
    #   row 1: response_'green' to respond_red_accumulator
    respond_red_differencing_weights = pnl.MappingProjection(
        matrix=np.matrix([[1.0], [-1.0]]), name='RESPOND_RED_WEIGHTS')

    #   row 0: response_'red' to respond_green_accumulator
    #   row 1: response_'green' to respond_green_accumulator
    respond_green_differencing_weights = pnl.MappingProjection(
        matrix=np.matrix([[-1.0], [1.0]]), name='RESPOND_GREEN_WEIGHTS')

    #   CREATE PATHWAYS
    #   Words pathway
    words_process = pnl.Process(pathway=[
        words_input_layer, word_weights, words_hidden_layer,
        word_response_weights, response_layer
    ],
                                name='WORDS_PROCESS')

    #   Colors pathway
    colors_process = pnl.Process(pathway=[
        colors_input_layer, color_weights, colors_hidden_layer,
        color_response_weights, response_layer
    ],
                                 name='COLORS_PROCESS')

    #   Task representation pathway
    task_CN_process = pnl.Process(
        pathway=[task_layer, task_CN_weights, colors_hidden_layer],
        name='TASK_CN_PROCESS')
    task_WR_process = pnl.Process(
        pathway=[task_layer, task_WR_weights, words_hidden_layer],
        name='TASK_WR_PROCESS')

    #   Evidence accumulation pathway
    respond_red_process = pnl.Process(pathway=[
        response_layer, respond_red_differencing_weights,
        respond_red_accumulator
    ],
                                      name='RESPOND_RED_PROCESS')
    respond_green_process = pnl.Process(pathway=[
        response_layer, respond_green_differencing_weights,
        respond_green_accumulator
    ],
                                        name='RESPOND_GREEN_PROCESS')

    #   CREATE SYSTEM
    my_Stroop = pnl.System(processes=[
        colors_process, words_process, task_CN_process, task_WR_process,
        respond_red_process, respond_green_process
    ],
                           name='FEEDFORWARD_STROOP_SYSTEM')

    # my_Stroop.show()
    # my_Stroop.show_graph(show_dimensions=pnl.ALL)

    # Function to create test trials
    # a RED word input is [1,0] to words_input_layer and GREEN word is [0,1]
    # a red color input is [1,0] to colors_input_layer and green color is [0,1]
    # a color-naming trial is [1,0] to task_layer and a word-reading trial is [0,1]

    def trial_dict(red_color, green_color, red_word, green_word, CN, WR):

        trialdict = {
            colors_input_layer: [red_color, green_color],
            words_input_layer: [red_word, green_word],
            task_layer: [CN, WR]
        }
        return trialdict

    #   CREATE THRESHOLD FUNCTION
    # first value of DDM's value is DECISION_VARIABLE
    def pass_threshold(mech1, mech2, thresh):
        results1 = mech1.output_states[0].value
        results2 = mech2.output_states[0].value
        for val in results1:
            if val >= thresh:
                return True
        for val in results2:
            if val >= thresh:
                return True
        return False

    accumulator_threshold = 1.0

    mechanisms_to_update = [
        colors_hidden_layer, words_hidden_layer, response_layer
    ]

    def switch_integrator_mode(mechanisms, mode):
        for mechanism in mechanisms:
            mechanism.integrator_mode = mode

    def switch_noise(mechanisms, noise):
        for mechanism in mechanisms:
            mechanism.noise = noise

    def switch_to_initialization_trial(mechanisms):
        # Turn off accumulation
        switch_integrator_mode(mechanisms, False)
        # Turn off noise
        switch_noise(mechanisms, 0)
        # Execute once per trial
        my_Stroop.termination_processing = {
            pnl.TimeScale.TRIAL: pnl.AllHaveRun()
        }

    def switch_to_processing_trial(mechanisms):
        # Turn on accumulation
        switch_integrator_mode(mechanisms, True)
        # Turn on noise
        switch_noise(mechanisms,
                     pnl.NormalDist(mean=0, standard_dev=unit_noise).function)
        # Execute until one of the accumulators crosses the threshold
        my_Stroop.termination_processing = {
            pnl.TimeScale.TRIAL:
            pnl.While(pass_threshold, respond_red_accumulator,
                      respond_green_accumulator, accumulator_threshold)
        }

    def switch_trial_type():
        # Next trial will be a processing trial
        if isinstance(my_Stroop.termination_processing[pnl.TimeScale.TRIAL],
                      pnl.AllHaveRun):
            switch_to_processing_trial(mechanisms_to_update)
        # Next trial will be an initialization trial
        else:
            switch_to_initialization_trial(mechanisms_to_update)

    CN_trial_initialize_input = trial_dict(0, 0, 0, 0, 1, 0)

    WR_trial_initialize_input = trial_dict(0, 0, 0, 0, 0, 1)

    # Start with an initialization trial
    switch_to_initialization_trial(mechanisms_to_update)

    my_Stroop.run(
        inputs=trial_dict(0, 1, 1, 0, 1, 0),
        # termination_processing=change_termination_processing,
        num_trials=6,
        call_after_trial=switch_trial_type)

    # respond_red_accumulator.log.print_entries()
    respond_green_accumulator.log.print_entries()
Example #12
0
output_layer = pnl.TransferMechanism(
    size=1,
    function=psyneulink.core.components.functions.transferfunctions.Logistic,
    name="output layer")

#bias_h=pnl.Process(pathway=[bias_mech_h,p_b,hidden_layer],learning=pnl.ENABLED)
#bias_out=pnl.Process(pathway=[bias_mech_out,p_b_o,output_layer],learning=pnl.ENABLED)

net3l = pnl.Process(
    pathway=[input_layer, proj, hidden_layer, proj_h, output_layer],
    learning=pnl.ENABLED)

sys3l = pnl.System(
    processes=[
        #bias_h,
        #bias_out,
        net3l
    ],
    learning_rate=8)
#### AFTER THIS PART IS FINE #####

sys3l.show_graph(output_fmt='jupyter')

trials = 4000
X = np.array([[1, 1], [1, 0], [0, 1], [0, 0]])
#X=[[1,1,1],[1,0,1],[0,1,1],[0,0,1]]
b_h_ins = [[1, 1], [1, 1], [1, 1], [1, 1]]
b_o_ins = [[1], [1], [1], [1]]
AND_labels_pnl = [[1], [0], [0], [0]]
OR_labels_pnl = [[1], [1], [1], [0]]
XOR_labels_pnl = [[0], [1], [1], [0]]
    def test_default_lc_control_mechanism(self, benchmark, mode):
        G = 1.0
        k = 0.5
        starting_value_LC = 2.0
        user_specified_gain = 1.0

        A = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='A')
        B = pnl.TransferMechanism(function=pnl.Logistic(gain=user_specified_gain), name='B')
        # B.output_states[0].value *= 0.0  # Reset after init | Doesn't matter here b/c default var = zero, no intercept

        LC = pnl.LCControlMechanism(
            modulated_mechanisms=[A, B],
            base_level_gain=G,
            scaling_factor_gain=k,
            objective_mechanism=pnl.ObjectiveMechanism(
                function=pnl.Linear,
                monitored_output_states=[B],
                name='LC ObjectiveMechanism'
            )
        )
        for output_state in LC.output_states:
            output_state.value *= starting_value_LC

        P = pnl.Process(pathway=[A, B, LC])
        S = pnl.System(processes=[P])
        LC.reinitialize_when = pnl.Never()
        # THIS CURRENTLY DOES NOT WORK:
        # P = pnl.Process(pathway=[A, B])
        # P2 = pnl.Process(pathway=[LC])
        # S = pnl.System(processes=[P, P2])
        # S.show_graph()

        gain_created_by_LC_output_state_1 = []
        mod_gain_assigned_to_A = []
        base_gain_assigned_to_A = []
        mod_gain_assigned_to_B = []
        base_gain_assigned_to_B = []

        def report_trial():
            gain_created_by_LC_output_state_1.append(LC.output_states[0].value[0])
            mod_gain_assigned_to_A.append(A.mod_gain)
            mod_gain_assigned_to_B.append(B.mod_gain)
            base_gain_assigned_to_A.append(A.function_object.gain)
            base_gain_assigned_to_B.append(B.function_object.gain)

        benchmark(S.run, inputs={A: [[1.0], [1.0], [1.0], [1.0], [1.0]]},
              call_after_trial=report_trial)

        # (1) First value of gain in mechanisms A and B must be whatever we hardcoded for LC starting value
        assert mod_gain_assigned_to_A[0] == starting_value_LC

        # (2) _gain should always be set to user-specified value
        for i in range(5):
            assert base_gain_assigned_to_A[i] == user_specified_gain
            assert base_gain_assigned_to_B[i] == user_specified_gain

        # (3) LC output on trial n becomes gain of A and B on trial n + 1
        assert np.allclose(mod_gain_assigned_to_A[1:], gain_created_by_LC_output_state_1[0:-1])

        # (4) mechanisms A and B should always have the same gain values (b/c they are identical)
        assert np.allclose(mod_gain_assigned_to_A, mod_gain_assigned_to_B)
Example #14
0
# projection that takes the signal from the hidden layer and transforms it to get an input for
# the output layer (the xor_out mechanism)
out_map = pnl.MappingProjection(name='hidden_to_output',
                                matrix=np.random.randn(10, 1) * 0.1,
                                sender=xor_hid,
                                receiver=xor_out)

# Put together the mechanisms and projections to get the System representing the XOR model:

# the order of mechanisms and projections is specified at the process level
xor_process = pnl.Process(pathway=[xor_in, hid_map, xor_hid, out_map, xor_out],
                          learning=pnl.LEARNING)

# the learning_rate parameter determines the size of learning updates during training for the System.
xor_sys = pnl.System(processes=[xor_process], learning_rate=learning_rate)

# The comparator mechanism for computing loss and the learning mechanisms/projections for doing
# backpropagation/the learning update during training are set up for the System automatically.

# Train the System representing the XOR model by calling run.

# The 4 learning steps are performed by the run method behind the scenes - as stated in the design doc,
# the loss measurement computed by the system's comparator mechanism defaults to MSE loss, and the learning
# update carried out by learning mechanisms/projections defaults to basic stochastic gradient descent (sgd).

results_sys = xor_sys.run(inputs={xor_in: xor_inputs},
                          targets={xor_out: xor_targets},
                          num_trials=(len(xor_inputs) * num_epochs + 1))

system_total_time = time.time() - system_start_time
def test_gating_with_UDF():
    def my_linear_fct(x,
                      m=2.0,
                      b=0.0,
                      params={
                          pnl.ADDITIVE_PARAM: 'b',
                          pnl.MULTIPLICATIVE_PARAM: 'm'
                      }):
        return m * x + b

    def my_simple_linear_fct(x, m=1.0, b=0.0):
        return m * x + b

    def my_exp_fct(
            x,
            r=1.0,
            # b=pnl.CONTROL,
            b=0.0,
            params={
                pnl.ADDITIVE_PARAM: 'b',
                pnl.MULTIPLICATIVE_PARAM: 'r'
            }):
        return x**r + b

    def my_sinusoidal_fct(input,
                          phase=0,
                          amplitude=1,
                          params={
                              pnl.ADDITIVE_PARAM: 'phase',
                              pnl.MULTIPLICATIVE_PARAM: 'amplitude'
                          }):
        frequency = input[0]
        t = input[1]
        return amplitude * np.sin(2 * np.pi * frequency * t + phase)

    Input_Layer = pnl.TransferMechanism(name='Input_Layer',
                                        default_variable=np.zeros((2, )),
                                        function=psyneulink.core.components.
                                        functions.transferfunctions.Logistic)

    Output_Layer = pnl.TransferMechanism(
        name='Output_Layer',
        default_variable=[0, 0, 0],
        function=psyneulink.core.components.functions.transferfunctions.Linear,
        # function=pnl.Logistic,
        # output_states={pnl.NAME: 'RESULTS USING UDF',
        #                pnl.VARIABLE: [(pnl.OWNER_VALUE,0), pnl.TIME_STEP],
        #                pnl.FUNCTION: my_sinusoidal_fct}
        output_states={
            pnl.NAME:
            'RESULTS USING UDF',
            # pnl.VARIABLE: (pnl.OWNER_VALUE, 0),
            pnl.FUNCTION:
            psyneulink.core.components.functions.transferfunctions.Linear(
                slope=pnl.GATING)
            # pnl.FUNCTION: pnl.Logistic(gain=pnl.GATING)
            # pnl.FUNCTION: my_linear_fct
            # pnl.FUNCTION: my_exp_fct
            # pnl.FUNCTION:pnl.UserDefinedFunction(custom_function=my_simple_linear_fct,
            #                                      params={pnl.ADDITIVE_PARAM:'b',
            #                                              pnl.MULTIPLICATIVE_PARAM:'m',
            #                                              },
            # m=pnl.GATING,
            # b=2.0
            # )
        })

    Gating_Mechanism = pnl.GatingMechanism(
        # default_gating_allocation=0.0,
        size=[1],
        gating_signals=[
            # Output_Layer
            Output_Layer.output_state,
        ])

    p = pnl.Process(size=2,
                    pathway=[Input_Layer, Output_Layer],
                    prefs={
                        pnl.VERBOSE_PREF: False,
                        pnl.REPORT_OUTPUT_PREF: False
                    })

    g = pnl.Process(default_variable=[1.0], pathway=[Gating_Mechanism])

    stim_list = {
        Input_Layer: [[-1, 30], [-1, 30], [-1, 30], [-1, 30]],
        Gating_Mechanism: [[0.0], [0.5], [1.0], [2.0]]
    }

    mySystem = pnl.System(processes=[p, g])

    mySystem.reportOutputPref = False

    results = mySystem.run(
        num_trials=4,
        inputs=stim_list,
    )

    expected_results = [[np.array([0., 0., 0.])],
                        [np.array([0.63447071, 0.63447071, 0.63447071])],
                        [np.array([1.26894142, 1.26894142, 1.26894142])],
                        [np.array([2.53788284, 2.53788284, 2.53788284])]]

    np.testing.assert_allclose(results, expected_results)
Example #16
0

decision_process.run(num_trials=10,
                     inputs=[[1, 1]],
                     targets=reward
                     )


# inputs = np.tile(np.repeat(np.array([[1., 0.], [0., 0.], [0., 1.], [0., 0.]]), 20, axis=0), (4, 1))
# input_dict = {input_layer: inputs}
input_dict = {input_layer: [1, 0]}

DA_sys = pnl.System(
    processes=[decision_process, conflict_process,
               update_process],
    controller=updateC,
    targets=[0],
    name='NE-DA System'
)

DA_sys.show_graph(show_learning=pnl.ALL,
                  show_control=pnl.ALL,
                  show_dimensions=True,
                  show_mechanism_structure=pnl.ALL)

DA_sys.run(
    num_trials=10,
    inputs=input_dict,
    targets=reward,
    call_after_trial=functools.partial(show_weights, DA_sys)
)
Example #17
0
def show_target(system):
    i = system.input
    t = system.target_input_states[0].value
    print('\nOLD WEIGHTS: \n')
    print('- Input Weights: \n', Input_Weights.matrix)
    print('- Middle Weights: \n', Middle_Weights.matrix)
    print('- Output Weights: \n', Output_Weights.matrix)

    print('\nSTIMULI:\n\n- Input: {}\n- Target: {}\n'.format(i, t))
    print('ACTIVITY FROM OLD WEIGHTS: \n')
    print('- Middle 1: \n', Hidden_Layer_1.value)
    print('- Middle 2: \n', Hidden_Layer_2.value)
    print('- Output:\n', Output_Layer.value)


mySystem = pnl.System(processes=[z], targets=[0, 0, 1], learning_rate=2.0)

mySystem.reportOutputPref = True
# Shows graph will full information:
# mySystem.show_graph(show_learning=pnl.ALL, show_dimensions=pnl.ALL)
# Shows minimal graph:
mySystem.show_graph()

stim_list = {Input_Layer: [[-1, 30]]}
target_list = {Output_Layer: [[0, 0, 1]]}

mySystem.run(num_trials=10,
             inputs=stim_list,
             targets=target_list,
             call_before_trial=functools.partial(print_header, mySystem),
             call_after_trial=functools.partial(show_target, mySystem),
    pathway=[Reward],
    name='RewardProcess'
)

# System:
mySystem = pnl.System(processes=[TargetControlProcess,
                                 FlankerControlProcess,
                                 TargetAutomaticProcess,
                                 FlankerAutomaticProcess,
                                 RewardProcess],
                      controller=pnl.EVCControlMechanism(
                              control_signals=pnl.ControlSignal(projections=[(pnl.SLOPE, Target_Rep),
                                                                              (pnl.SLOPE, Distractor_Rep)
                                                                              ],
                                                                function=psyneulink.core.components.functions.transferfunctions.Logistic,
                                                                cost_options=[pnl.ControlSignalCosts.INTENSITY,
                                                                               pnl.ControlSignalCosts.ADJUSTMENT],
                                                                allocation_samples=signalSearchRange
                                                                )),
                      enable_controller=True,
                      monitor_for_control=[
                          # (None, None, np.ones((2,1))), # what the **** is this for? Markus October 25 2018
                          Reward,
                          Decision.PROBABILITY_UPPER_THRESHOLD,
                          ('OFFSET RT', 1, -1),
                      ],
                      name='EVC Markus System')

# log controller

mySystem.loggable_items
Example #19
0
    def test_rumelhart_semantic_network_sequential(self):

        rep_in = pnl.TransferMechanism(size=10, name='REP_IN')
        rel_in = pnl.TransferMechanism(size=11, name='REL_IN')
        rep_hidden = pnl.TransferMechanism(size=4,
                                           function=pnl.Logistic,
                                           name='REP_HIDDEN')
        rel_hidden = pnl.TransferMechanism(size=5,
                                           function=pnl.Logistic,
                                           name='REL_HIDDEN')
        rep_out = pnl.TransferMechanism(size=10,
                                        function=pnl.Logistic,
                                        name='REP_OUT')
        prop_out = pnl.TransferMechanism(size=12,
                                         function=pnl.Logistic,
                                         name='PROP_OUT')
        qual_out = pnl.TransferMechanism(size=13,
                                         function=pnl.Logistic,
                                         name='QUAL_OUT')
        act_out = pnl.TransferMechanism(size=14,
                                        function=pnl.Logistic,
                                        name='ACT_OUT')

        rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
                                      learning=pnl.LEARNING,
                                      name='REP_HIDDEN_PROC')
        rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
                                      learning=pnl.LEARNING,
                                      name='REL_HIDDEN_PROC')
        rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
                                   learning=pnl.LEARNING,
                                   name='REL_REP_PROC')
        rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
                                    learning=pnl.LEARNING,
                                    name='REL_PROP_PROC')
        rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
                                    learning=pnl.LEARNING,
                                    name='REL_QUAL_PROC')
        rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
                                   learning=pnl.LEARNING,
                                   name='REL_ACT_PROC')

        S = pnl.System(processes=[
            rep_hidden_proc, rel_hidden_proc, rel_rep_proc, rel_prop_proc,
            rel_qual_proc, rel_act_proc
        ])
        # S.show_graph(show_learning=pnl.ALL, show_dimensions=True)
        validate_learning_mechs(S)

        print(S.origin_mechanisms)
        print(S.terminal_mechanisms)
        S.run(
            inputs={
                rel_in:
                [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
                rep_in: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
            },
            # targets={rep_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          prop_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          qual_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]],
            #          act_out: [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
        )
Example #20
0
    target=[2, 2],
    name='Color Naming',
    prefs=process_prefs)

word_reading_process = pnl.Process(default_variable=[.5, 3],
                                   pathway=[words, WH_Weights, hidden],
                                   name='Word Reading',
                                   learning=pnl.LEARNING,
                                   target=[3, 3],
                                   prefs=process_prefs)

# color_naming_process.execute()
# word_reading_process.execute()

mySystem = pnl.System(processes=[color_naming_process, word_reading_process],
                      targets=[20, 20],
                      name='Stroop Model',
                      prefs=system_prefs)

mySystem.show_graph(
    # show_learning=True
)


def print_header(system):
    print(
        "\n\n**** TRIAL: ", system.scheduler_processing.times[
            pnl.TimeScale.RUN][pnl.TimeScale.TRIAL])


def show_target():
    print('\nColor Naming\n\tInput: {}\n\tTarget: {}'.format(
    modulated_mechanisms=[decision_layer, response_layer],  # Modulate gain of decision & response layers
    name='LC'
)

# Log value of LC
LC.set_log_conditions('value')

# Set initial gain to G + k*initial_w, when the System runs the very first time,
# since the decison layer executes before the LC and hence needs one initial gain value to start with.
for output_state in LC.output_states:
    output_state.value *= G + k * initial_w

LC_process = pnl.Process(pathway=[LC])

# Now, we specify the processes of the System, which in this case is just the decision_process
task = pnl.System(processes=[decision_process, LC_process],
                  reinitialize_mechanisms_when=pnl.Never(),)

# Create Stimulus -----------------------------------------------------------------------------------------------------

# In the paper, each period has 100 time steps, so we will create 11 time periods.
# As described in the paper in figure 3, during the first 3 time periods input to distractor units is fixed to 1.
# Then T1 gets turned on during time period 4 with an input of 1.
# T2 gets turns on with some lag from T1 onset on, in this example we turn T2 on with Lag 2 and an input of 1
# Between T1 and T2 and after T2 the distractor unit is on.
# We create one array with 3 numbers, one for each input unit and repeat this array 100 times for one time period
# We do this 11 times. T1 is on for time4, T2 is on for time7 to model Lag3
num_time_steps = 100  # Each stimulus is presented for two units of time which is equivalent to 100 time steps
stimulus_T1 = np.repeat(np.array([[0, 0, 1]]), num_time_steps, axis=0)
stimulus_T2 = np.repeat(np.array([[0, 0, 1]]), num_time_steps, axis=0)
stimulus_T3 = np.repeat(np.array([[0, 0, 1]]), num_time_steps, axis=0)
stimulus_T4 = np.repeat(np.array([[1, 0, 0]]), num_time_steps, axis=0)    # Turn T1 on
Example #22
0
def model_training():
    """
    This creates the plot for figure 5A in the Montague paper. Figure 5A is
    a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).'
    """
    sample = pnl.TransferMechanism(default_variable=np.zeros(60),
                                   name=pnl.SAMPLE)

    action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
                                             function=pnl.Linear(
                                                 slope=1.0, intercept=0.01),
                                             name='Action Selection')

    stimulus_onset = 41
    reward_delivery = 54

    samples = np.zeros(60)
    samples[stimulus_onset:] = 1
    samples = np.tile(samples, (120, 1))

    targets = np.zeros(60)
    targets[reward_delivery] = 1
    targets = np.tile(targets, (120, 1))

    # no reward given every 15 trials to simulate a wrong response
    targets[14][reward_delivery] = 0
    targets[29][reward_delivery] = 0
    targets[44][reward_delivery] = 0
    targets[59][reward_delivery] = 0
    targets[74][reward_delivery] = 0
    targets[89][reward_delivery] = 0

    pnl.MappingProjection(sender=sample,
                          receiver=action_selection,
                          matrix=np.full((60, 60), 0.0))
    learning_projection = pnl.LearningProjection(
        learning_function=pnl.TDLearning(learning_rate=0.3))

    p = pnl.Process(default_variable=np.zeros(60),
                    pathway=[sample, action_selection],
                    learning=learning_projection,
                    size=60,
                    target=np.zeros(60))
    trial = 0

    def print_header():
        nonlocal trial
        print("\n\n*** EPISODE: {}".format(trial))

    def store_delta_vals():
        nonlocal trial
        delta_vals[trial] = s.mechanisms[2].value
        trial += 1

        print('Delta values: \n{0}'.format(s.mechanisms[2].value))

    input_list = {sample: samples}

    target_list = {action_selection: targets}

    s = pnl.System(processes=[p])

    delta_vals = np.zeros((120, 60))

    s.run(num_trials=120,
          inputs=input_list,
          targets=target_list,
          learning=True,
          call_before_trial=print_header,
          call_after_trial=store_delta_vals)
    with plt.style.context('seaborn'):
        plt.plot(delta_vals[0], "-o", label="Trial 1")
        plt.plot(delta_vals[29], "-s", label="Trial 30")
        plt.plot(delta_vals[49], "-o", label="Trial 50")
        plt.title("Montague et. al. (1996) -- Figure 5A")
        plt.xlabel("Timestep")
        plt.ylabel("∂")
        plt.legend()
        plt.xlim(xmin=35)
        plt.xticks()
        plt.show()
Example #23
0
    pathway=[task_layer, task_CN_weights, colors_hidden_layer],
    name='TASK_CN_PROCESS')

task_WR_process = pnl.Process(
    pathway=[task_layer, task_WR_weights, words_hidden_layer],
    name='TASK_WR_PROCESS')

#   CREATE SYSTEM
my_Stroop = pnl.System(
    processes=[
        colors_process, words_process, task_CN_process, task_WR_process
    ],
    controller=pnl.ControlMechanism,
    monitor_for_control=[response_layer],
    enable_controller=True,
    # objective_mechanism =pnl.ObjectiveMechanism(default_variable=[0.0, 0.0],
    #                                             monitored_output_ports=[response_layer.output_ports[0]],
    #                                             function=pnl.Linear(default_variable= [0.0, 0.0]),
    #                                            name="Objective Mechanism"),
    # monitor_for_control=
    # function=pnl.LinearCombination(operation=pnl.ENERGY))
    # respond_red_process,
    # respond_green_process],
    name='FEEDFORWARD_STROOP_SYSTEM')

# my_Stroop.controller.set_log_conditions('TASK[gain] ControlSignal')
# my_Stroop.controller.loggable_items


#   CREATE THRESHOLD FUNCTION
# first value of DDM's value is DECISION_VARIABLE
def pass_threshold(mech1, thresh):
Example #24
0
def model_training_response_extinction():
    """
    This creates the plot for Figure 5C in the Montague paper. Figure 5C shows
    'extinction of response to the sensory cue.' The setup is the same as
    Figure 5A, except that reward delivery stops at trial 70
    """
    sample = pnl.TransferMechanism(default_variable=np.zeros(60),
                                   name=pnl.SAMPLE)

    action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
                                             function=pnl.Linear(
                                                 slope=1.0, intercept=1.0),
                                             name='Action Selection')

    stimulus_onset = 42
    reward_delivery = 54

    samples = np.zeros(60)
    samples[stimulus_onset:] = 1
    samples = np.tile(samples, (150, 1))

    targets = np.zeros(60)
    targets[reward_delivery] = 1
    targets = np.tile(targets, (150, 1))

    # stop delivering reward after trial 70
    for i in range(71, 150):
        targets[i][reward_delivery] = 0

    pnl.MappingProjection(sender=sample,
                          receiver=action_selection,
                          matrix=np.zeros((60, 60)))

    learning_projection = pnl.LearningProjection(
        learning_function=pnl.TDLearning(learning_rate=0.3))

    p = pnl.Process(default_variable=np.zeros(60),
                    pathway=[sample, action_selection],
                    learning=learning_projection,
                    size=60,
                    target=np.zeros(60))

    trial = 0

    def print_header():
        nonlocal trial
        print("\n\n*** EPISODE: {}".format(trial))

    input_list = {sample: samples}

    target_list = {action_selection: targets}

    s = pnl.System(processes=[p])

    delta_vals = np.zeros((150, 60))
    trial = 0

    def store_delta_vals():
        nonlocal trial
        delta_vals[trial] = s.mechanisms[2].value
        trial += 1

    s.run(num_trials=150,
          inputs=input_list,
          targets=target_list,
          learning=True,
          call_before_trial=print_header,
          call_after_trial=store_delta_vals)
    with plt.style.context('seaborn'):
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x_vals, y_vals = np.meshgrid(np.arange(150), np.arange(40, 60, step=1))
        ax.plot_surface(x_vals, y_vals, delta_vals[:, 40:60].transpose())
        ax.invert_yaxis()
        ax.set_xlabel("Trial")
        ax.set_ylabel("Timestep")
        ax.set_zlabel("∂")
        ax.set_title("Montague et. al. (1996) -- Figure 5C")
        plt.show()
Example #25
0
rep_hidden_proc = pnl.Process(pathway=[rep_in, rep_hidden, rel_hidden],
                              learning=pnl.LEARNING,
                              name='REP_HIDDEN_PROC')
rel_hidden_proc = pnl.Process(pathway=[rel_in, rel_hidden],
                              learning=pnl.LEARNING,
                              name='REL_HIDDEN_PROC')
rel_rep_proc = pnl.Process(pathway=[rel_hidden, rep_out],
                           learning=pnl.LEARNING,
                           name='REL_REP_PROC')
rel_prop_proc = pnl.Process(pathway=[rel_hidden, prop_out],
                            learning=pnl.LEARNING,
                            name='REL_PROP_PROC')
rel_qual_proc = pnl.Process(pathway=[rel_hidden, qual_out],
                            learning=pnl.LEARNING,
                            name='REL_QUAL_PROC')
rel_act_proc = pnl.Process(pathway=[rel_hidden, act_out],
                           learning=pnl.LEARNING,
                           name='REL_ACT_PROC')

# The System:
S = pnl.System(processes=[
    rep_hidden_proc, rel_hidden_proc, rel_rep_proc, rel_prop_proc,
    rel_qual_proc, rel_act_proc
])

# Shows just the processing network:
# S.show_graph(show_dimensions=True)

# Shows all of the learning components:
S.show_graph(show_learning=pnl.ALL)
Example #26
0
def model_training_full_experiment():
    """
    This creates the plot for figure 5B in the Montague paper. Figure 5B shows
    the 'entire time course of model responses (trials 1-150).' The setup is
    the same as in Figure 5A, except that training begins at trial 10.
    """
    sample = pnl.TransferMechanism(default_variable=np.zeros(60),
                                   name=pnl.SAMPLE)

    action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
                                             function=pnl.Linear(
                                                 slope=1.0, intercept=1.0),
                                             name='Action Selection')

    stimulus_onset = 41
    reward_delivery = 54

    samples = np.zeros(60)
    samples[stimulus_onset:] = 1
    samples = np.tile(samples, (120, 1))

    targets = np.zeros(60)
    targets[reward_delivery] = 1
    targets = np.tile(targets, (120, 1))

    # training begins at trial 11
    # no reward given every 15 trials to simulate a wrong response
    no_reward_trials = [
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14, 29, 44, 59, 74, 89, 104, 119
    ]
    for t in no_reward_trials:
        targets[t][reward_delivery] = 0

    pnl.MappingProjection(sender=sample,
                          receiver=action_selection,
                          matrix=np.zeros((60, 60)))

    learning_projection = pnl.LearningProjection(
        learning_function=pnl.TDLearning(learning_rate=0.3))

    p = pnl.Process(default_variable=np.zeros(60),
                    pathway=[sample, action_selection],
                    learning=learning_projection,
                    size=60,
                    target=np.zeros(60))
    trial = 0

    def print_header():
        nonlocal trial
        print("\n\n*** EPISODE: {}".format(trial))

    def store_delta_vals():
        nonlocal trial
        delta_vals[trial] = s.mechanisms[2].value
        trial += 1

    input_list = {sample: samples}

    target_list = {action_selection: targets}

    s = pnl.System(processes=[p])

    delta_vals = np.zeros((120, 60))

    s.run(num_trials=120,
          inputs=input_list,
          targets=target_list,
          learning=True,
          call_before_trial=print_header,
          call_after_trial=store_delta_vals)
    with plt.style.context('seaborn'):
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x_vals, y_vals = np.meshgrid(np.arange(120), np.arange(40, 60, step=1))
        ax.plot_surface(x_vals, y_vals, delta_vals[:, 40:60].transpose())
        ax.invert_yaxis()
        ax.set_xlabel("Trial")
        ax.set_ylabel("Timestep")
        ax.set_zlabel("∂")
        ax.set_title("Montague et. al. (1996) -- Figure 5B")
        plt.show()
Example #27
0
    name='Flanker1 Automatic Process')

# RewardProcess = pnl.Process(
#     default_variable=[0],
#     pathway=[Reward, test_mech],
#     name='RewardProcess'
# )

# System:
mySystem = pnl.System(
    processes=[
        TargetControlProcess, FlankerControlProcess, TargetAutomaticProcess,
        FlankerAutomaticProcess
    ],
    controller=pnl.EVCControlMechanism,
    enable_controller=True,
    monitor_for_control=[
        # (None, None, np.ones((2,1))),
        # Reward,
        Decision.PROBABILITY_UPPER_THRESHOLD,
        ('OFFSET RT', 1, -1),
    ],
    name='EVC Markus System')

# Show characteristics of system:
mySystem.show()
# mySystem.controller.show()

# Show graph of system
# mySystem.show_graph(show_control=True)# show_control=True,show_dimensions=True)

#Markus: incongruent trial weights:
Example #28
0
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions

stimulus_layer = pnl.TransferMechanism(size=4)
task_layer = pnl.TransferMechanism(size=4)
hidden_layer = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic)
output_layer = pnl.TransferMechanism(size=4, function=psyneulink.core.components.functions.transferfunctions.Logistic)

network_process = pnl.Process(pathway=[stimulus_layer, hidden_layer, output_layer])
hidden_control_process = pnl.Process(pathway=[task_layer, hidden_layer])
output_control_process = pnl.Process(pathway=[task_layer, output_layer])

multitasking_system = pnl.System(processes=[network_process, hidden_control_process, output_control_process])

# WEIGHTS TO COME FROM SEBASTIAN

example_stimulus_inputs = [[1,0,0,1],[1,0,1,0]]
example_task_inputs = [[0,0,0,1],[1,0,0,0]]
example_training_pattern = [[0,0,0,1],[1,0,0,0]]

# RUN THIS TO GET SPACE OF INPUTS ON WHICH TO OPTIMIZE LCAMechanism PARAMS:
inputs_to_LCA = multitasking_system.run(inputs={stimulus_layer:example_stimulus_inputs,
                                                task_layer:example_task_inputs})

# SOME PYTHON ALGORITHM HERE THAT SELECTS THE 2-UNIT SUBVECTOR FROM inputs_to_LCA CORRESPONDING TO THE RELEVANT TASK
# AS INPUT TO optimization_system BELOW, AND THEN RUN THE SYSTEM FOR EACH INPUT, USING EVC TO OPTIMIZE LCAMechanism PARAMETERS
#  FOR EACH, BASED CONTROL PARAMETERS AND OBJECTIVE FUNCTION

input_layer = pnl.TransferMechanism(size=2)
decision_layer = pnl.LCAMechanism(size=2,
                                  # INCLUDE TERMINATION CONDITION USING THREHSOLD = ControlSignal)
Example #29
0
    prefs=process_prefs,
    name='Flanker1 Automatic Process')

RewardProcess = pnl.Process(default_variable=[0],
                            pathway=[Reward],
                            prefs=process_prefs,
                            name='RewardProcess')

# System:
mySystem = pnl.System(
    processes=[
        TargetControlProcess, FlankerControlProcess, TargetAutomaticProcess,
        FlankerAutomaticProcess, RewardProcess
    ],
    controller=pnl.EVCControlMechanism(name='Task Controller', ),
    enable_controller=True,
    monitor_for_control=[
        Reward,
        Decision.PROBABILITY_UPPER_THRESHOLD,
        ('OFFSET RT', 1, -1),
    ],
    # monitor_for_control=[Reward, DDM_PROBABILITY_UPPER_THRESHOLD, (DDM_RESPONSE_TIME, -1, 1)],
    name='EVC Gratton System')

# Show characteristics of system:
mySystem.show()
mySystem.controller.show()

# Show graph of system (with control components)
# mySystem.show_graph()
# mySystem.show_graph(show_dimensions=pnl.ALL, show_projection_labels=True)
# mySystem.show_graph(show_control=True, show_projection_labels=False)
Example #30
0
    def test_log_array_with_scheduler(self):
        T1 = pnl.TransferMechanism(name='log_test_T1',
                                   integrator_mode=True,
                                   integration_rate=0.5)
        T2 = pnl.TransferMechanism(name='log_test_T2',
                                   function=pnl.Linear(slope=6.0))
        PS = pnl.Process(name='log_test_PS', pathway=[T1, T2])
        SYS = pnl.System(name='log_test_SYS', processes=[PS])

        def pass_threshold(mech, thresh):
            results = mech.output_states[0].value
            for val in results:
                if abs(val) >= thresh:
                    return True
            return False

        terminate_trial = {
            pnl.TimeScale.TRIAL: pnl.While(pass_threshold, T2, 5.0)
        }

        T1.set_log_conditions(pnl.VALUE)
        T1.set_log_conditions(pnl.SLOPE)
        T1.set_log_conditions(pnl.RESULTS)
        T2.set_log_conditions(pnl.VALUE)
        T2.set_log_conditions(pnl.SLOPE)

        SYS.run(inputs={T1: [[1.0]]}, termination_processing=terminate_trial)

        log_array_T1 = T1.log.nparray(entries=['RESULTS', 'slope', 'value'])
        log_array_T2 = T2.log.nparray(entries=['value', 'slope'])

        # Check values
        run_results = [["Run"], [0], [0], [0]]
        trial_results = [["Trial"], [0], [0], [0]]
        pass_results = [["Pass"], [0], [1], [2]]
        time_step_results = [["Time_step"], [0], [0], [0]]
        results_results = ["RESULTS", [0.5], [0.75], [0.875]]
        slope_results = ["slope", [1], [1], [1]]
        value_results = ["value", [[0.5]], [[0.75]], [[0.875]]]
        for i in range(4):
            assert log_array_T1[0][i] == run_results[i]
            assert log_array_T1[1][i] == trial_results[i]
            assert log_array_T1[2][i] == pass_results[i]
            assert log_array_T1[3][i] == time_step_results[i]
            assert log_array_T1[4][i] == results_results[i]
            assert log_array_T1[5][i] == slope_results[i]
            assert log_array_T1[6][i] == value_results[i]

        # Check values
        run_results = [["Run"], [0], [0], [0]]
        trial_results = [["Trial"], [0], [0], [0]]
        pass_results = [["Pass"], [0], [1], [2]]
        time_step_results = [["Time_step"], [1], [1], [1]]
        value_results = ["value", [[3]], [[4.5]], [[5.25]]]
        slope_results = ["slope", [6], [6], [6]]
        for i in range(4):
            assert log_array_T2[0][i] == run_results[i]
            assert log_array_T2[1][i] == trial_results[i]
            assert log_array_T2[2][i] == pass_results[i]
            assert log_array_T2[3][i] == time_step_results[i]
            assert log_array_T2[4][i] == value_results[i]
            assert log_array_T2[5][i] == slope_results[i]