Beispiel #1
0
import psyneulink as pnl

comp = pnl.Composition(name="comp")

A = pnl.TransferMechanism(
    name="A",
    function=pnl.Linear(intercept=2.0, slope=5.0, default_variable=[[0]]),
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
B = pnl.TransferMechanism(
    name="B",
    function=pnl.Logistic(default_variable=[[0]]),
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
C = pnl.RecurrentTransferMechanism(
    name="C",
    function=pnl.Linear(default_variable=[[0]]),
    initial_value=[[0]],
    output_ports=["RESULTS"],
    termination_measure=pnl.Distance(
        metric=pnl.MAX_ABS_DIFF, default_variable=[[[0]], [[0]]]
    ),
)
D = pnl.IntegratorMechanism(
    name="D",
    function=pnl.SimpleIntegrator(initializer=[[0]], default_variable=[[0]]),
)
Beispiel #2
0
    optimizer=pnl.OptimizationControlMechanism(
        agent_rep=Umemoto_comp,
        state_features=[
            Target_Stim.input_port, Distractor_Stim.input_port,
            Reward.input_port
        ],
        state_feature_function=pnl.AdaptiveIntegrator(rate=1.0),
        objective_mechanism=pnl.ObjectiveMechanism(monitor_for_control=[
            Reward,
            (Decision.output_ports[pnl.PROBABILITY_UPPER_THRESHOLD], 1, -1)
        ], ),
        function=pnl.GridSearch(save_values=True),
        control_signals=[
            Target_Rep_Control_Signal, Distractor_Rep_Control_Signal
        ],
        compute_reconfiguration_cost=pnl.Distance(
            metric=pnl.EUCLIDEAN)  # 0.8046),
    ))
Umemoto_comp.enable_model_based_optimizer = True
# Umemoto_comp.model_based_optimizer.set_log_conditions('value')

print(Target_Rep.loggable_items)
nTrials = 3
targetFeatures = [w_t]
flankerFeatures_inc = [w_d]
reward = [0]  #[100]

targetInputList = targetFeatures
flankerInputList = flankerFeatures_inc
rewardList = reward

stim_list_dict = {
Beispiel #3
0
#but we do need to specify the size, which will be the size of our input array.

input_layer = pnl.TransferMechanism(size=(3), name='INPUT LAYER')

#Next, we specify our output layer. This is where we do our sigmoid transformation, by simply applying the Logistic function.
#The size we specify for this layer is the number of output nodes we want. In this case, we want the network to return a scalar
#for each example (either a 1 or a zero), so our size is 1

output_layer = pnl.TransferMechanism(size=1,
                                     function=pnl.Logistic,
                                     name='OUTPUT LAYER')

#Now, we put them together into a process.
#Notice, that we did not need to specify a weighting matrix. One will automatically be generated by psyneulink when we create our
#process.
# JDC ADDED:
# Normally, for learning to occur in a process, we would just specify that learning=pnl.ENABLED.
# However, if we want to specify a specific learning function or error_function to be used, then we must
# specify it by construction a default LearningProjection and giving it the parameters we want.  In this
# case it is the error_function, that we will set to CROSS_ENTROPY (using PsyNeulink's Distance Function):

net2l = pnl.Process(pathway=[input_layer, output_layer],
                    learning=pnl.LearningProjection(
                        error_function=pnl.Distance(metric=pnl.CROSS_ENTROPY)))

#The pathway argument specifies in which order to execute the layers. THis way, the output of one will be mapped to the input of
#the next.
#To run the process, we will put it into a system.

sys2l = pnl.System(processes=[net2l], learning_rate=4)
sys2l.show_graph(show_learning=pnl.ALL)