response_color_weights = pnl.MappingProjection(
    matrix=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]))

response_word_weights = pnl.MappingProjection(
    matrix=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]))

color_response_weights = pnl.MappingProjection(
    matrix=np.array([[1.5, 0.0], [0.0, 1.5], [0.0, 0.0]]))
word_response_weights = pnl.MappingProjection(
    matrix=np.array([[2.5, 0.0], [0.0, 2.5], [0.0, 0.0]]))
#
# Create pathways -----------------------------------------------------------------------------------------------------
color_response_process_1 = pnl.Pathway(pathway=[
    colors_input_layer,
    color_input_weights,
    colors_hidden_layer,
    color_response_weights,
    response_layer,
],
                                       name='COLORS_RESPONSE_PROCESS_1')

color_response_process_2 = pnl.Pathway(
    pathway=[response_layer, response_color_weights, colors_hidden_layer],
    name='COLORS_RESPONSE_PROCESS_2')

word_response_process_1 = pnl.Pathway(pathway=[
    words_input_layer, word_input_weights, words_hidden_layer,
    word_response_weights, response_layer
],
                                      name='WORDS_RESPONSE_PROCESS_1')

word_response_process_2 = pnl.Pathway(pathway=[
Beispiel #2
0
    name='Output Weights',
    sender=Hidden_Layer_2,
    receiver=Output_Layer,
    matrix=Output_Weights_matrix
)

z = pnl.Pathway(
    # default_variable=[0, 0],
    pathway=[
        Input_Layer,
        # The following reference to Input_Weights is needed to use it in the pathway
        # since it's sender and receiver args are not specified in its
        # declaration above
        Input_Weights,
        Hidden_Layer_1,
        # No projection specification is needed here since the sender arg for Middle_Weights
        #    is Hidden_Layer_1 and its receiver arg is Hidden_Layer_2
        # Middle_Weights,
        Hidden_Layer_2,
        # Output_Weights does not need to be listed for the same reason as Middle_Weights
        # If Middle_Weights and/or Output_Weights is not declared above, then the process
        #    will assign a default for missing projection
        # Output_Weights,
        Output_Layer
    ]
)

g = pnl.Pathway(
    pathway=[Gating_Mechanism]
)

stim_list = {
    [0.0, 1.0]
]))

# to send a control signal from the task demand layer to the response layer,
# set matrix to -1 to reduce response layer activation
# specify the sender of the projection which is the second OutputPort the task demand layer
# specify the receiver of the projection
task_conflict_to_response_weights = pnl.MappingProjection(
    matrix=np.array([[-1.0, -1.0]]),
    sender=task_demand_layer.output_ports[1],
    receiver=response_layer)

# Create pathways -----------------------------------------------------------------------------------------------------
color_response_process = pnl.Pathway(pathway=[
    colors_input_layer, color_input_weights, color_feature_layer,
    color_response_weights, response_layer
],
                                     name='COLORS_RESPONSE_PROCESS')

word_response_process = pnl.Pathway(pathway=[
    words_input_layer, word_input_weights, word_feature_layer,
    word_response_weights, response_layer
],
                                    name='WORDS_RESPONSE_PROCESS')

task_color_process = pnl.Pathway(pathway=[
    task_input_layer, task_input_weights, task_demand_layer,
    task_color_weights, color_feature_layer, color_task_weights,
    task_demand_layer
],
                                 name='TASK_COLOR_PROCESS')
Beispiel #4
0
    #  Competition param does not apply because there is only one unit
    function=pnl.Logistic(x_0=b_response),
    noise=pnl.NormalDist(standard_deviation=SD),
    integrator_mode=True,
    name='RESPONSE')

# Connect mechanisms --------------------------------------------------------------------------------------------------

# Weight matrix from Input Layer --> Decision Layer
input_weights = np.array([[w_XiIi, w_XiIj], [w_XiIj, w_XiIi]])

# Weight matrix from Decision Layer --> Response Layer
output_weights = np.array([[w_X3X1], [0.00]])

decision_pathway = pnl.Pathway(pathway=[
    input_layer, input_weights, decision_layer, output_weights, response_layer
],
                               name='DECISION PROCESS')

# Monitor decision layer in order to modulate gain --------------------------------------------------------------------

LC = pnl.LCControlMechanism(
    integration_method="EULER",
    threshold_FitzHughNagumo=a,
    uncorrelated_activity_FitzHughNagumo=d,
    base_level_gain=G,
    scaling_factor_gain=k,
    time_step_size_FitzHughNagumo=dt,
    mode_FitzHughNagumo=C,
    time_constant_v_FitzHughNagumo=tau_v,
    time_constant_w_FitzHughNagumo=tau_u,
    a_v_FitzHughNagumo=-1.0,
Beispiel #5
0
                                name="map_h2_is"
                                )

map_h2_has = pnl.MappingProjection(matrix=np.random.rand(16,len(has_list)),
                                name="map_h2_has"
                                )

map_h2_can = pnl.MappingProjection(matrix=np.random.rand(16,len(can_list)),
                                name="map_h2_can"
                                )

#################### THIS IS THE PART WHERE WE START BUILDING OUT ALL THE PROCESSES ########################

p11 = pnl.Pathway(pathway=[nouns_in,
                           map_nouns_h1,
                           h1,
                           map_h1_h2,
                           h2])

p12 = pnl.Pathway(pathway=[rels_in,
                           map_rel_h2,
                           h2])

p21 = pnl.Pathway(pathway=[h2,
                           map_h2_I,
                           out_sig_I])

p22 = pnl.Pathway(pathway=[h2,
                           map_h2_is,
                           out_sig_is])
Beispiel #6
0
    function=psyneulink.core.components.functions.transferfunctions.SoftMax(
        output=pnl.ALL, gain=1.0),
    output_ports={
        pnl.NAME:
        'SELECTED ACTION',
        pnl.VARIABLE: [(pnl.INPUT_PORT_VARIABLES, 0), (pnl.OWNER_VALUE, 0)],
        pnl.FUNCTION:
        psyneulink.core.components.functions.selectionfunctions.OneHot(
            mode=pnl.PROB).function
    },
    # output_ports={pnl.NAME: "SOFT_MAX",
    #                pnl.VARIABLE: (pnl.OWNER_VALUE,0),
    #                pnl.FUNCTION: pnl.SoftMax(output=pnl.PROB,gain=1.0)},
    name='Action Selection')

p = pnl.Pathway(pathway=([input_layer, action_selection], pnl.Reinforcement), )

actions = ['left', 'middle', 'right']
reward_values = [10, 0, 0]
first_reward = 0

# Must initialize reward (won't be used, but needed for declaration of lambda function)
action_selection.output_port.value = [0, 0, 1]
# Get reward value for selected action)


def reward(context=None):
    """Return the reward associated with the selected action"""
    return [
        reward_values[int(
            np.nonzero(
Beispiel #7
0
CH_Weights = pnl.MappingProjection(
    name='Color-Hidden Weights',
    matrix=CH_Weights_matrix
)
WH_Weights = pnl.MappingProjection(
    name='Word-Hidden Weights',
    matrix=WH_Weights_matrix
)
HO_Weights = pnl.MappingProjection(
    name='Hidden-Output Weights',
    matrix=HO_Weights_matrix
)

color_naming_pathway = pnl.Pathway(
    pathway=[colors, CH_Weights, hidden, HO_Weights, response],
    name='Color Naming',
    # prefs=pathway_prefs
)

word_reading_pathway = pnl.Pathway(
    pathway=[words, WH_Weights, hidden],
    name='Word Reading',
    # prefs=pathway_prefs
)

# color_naming_pathway.execute()
# word_reading_pathway.execute()

comp = pnl.Composition(pathways=[(color_naming_pathway, pnl.BackPropagation),
                                 (word_reading_pathway, pnl.BackPropagation)],
                       name='Stroop Model',
Beispiel #8
0
import psyneulink as pnl

a = pnl.TransferMechanism()
b = pnl.TransferMechanism()
c = pnl.TransferMechanism()

p = pnl.Pathway(pathway=[a, b, c])

comp = pnl.Composition()
comp.add_backpropagation_learning_pathway(pathway=p)