Esempio n. 1
0
    def test_2_item_tuple_from_parameter_state_to_control_signals(self):

        C = pnl.ControlMechanism(control_signals=['a','b'])
        D = pnl.DDM(name='D3',
                     function=pnl.BogaczEtAl(drift_rate=(3,C),
                                             threshold=(2,C.control_signals['b']))
                    )
        assert D.parameter_states[pnl.DRIFT_RATE].mod_afferents[0].sender==C.control_signals[0]
        assert D.parameter_states[pnl.THRESHOLD].mod_afferents[0].sender==C.control_signals[1]
Esempio n. 2
0
        pnl.ControlProjection(
            function=pnl.Linear,
            control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange})
    )),
    prefs=mechanism_prefs)

# Processing Mechanism (Automatic)
Automatic_Component = pnl.TransferMechanism(name='Automatic Component',
                                            function=pnl.Linear(slope=(1.0)),
                                            prefs=mechanism_prefs)

# Decision Mechanisms
Decision = pnl.DDM(
    function=pnl.BogaczEtAl(drift_rate=(1.0),
                            threshold=(0.2645),
                            noise=(0.5),
                            starting_point=(0),
                            t0=0.15),
    prefs=mechanism_prefs,
    name='Decision',
    output_states=[
        pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
        pnl.PROBABILITY_UPPER_THRESHOLD, {
            pnl.NAME: 'OFFSET RT',
            pnl.VARIABLE: (pnl.OWNER_VALUE, 2),
            pnl.FUNCTION: pnl.Linear(0, slope=0.3, intercept=1)
        }
    ],
)

# Outcome Mechanisms:
Esempio n. 3
0
Flanker_Rep.set_log_conditions('value') # Log Flanker_Rep
Flanker_Rep.set_log_conditions('slope') # Log Flanker_Rep
Flanker_Rep.loggable_items

Target_Rep.log.LogCondition =2

# Processing Mechanism (Automatic)
Automatic_Component = pnl.TransferMechanism(name='Automatic Component',function=pnl.Linear)
Automatic_Component.loggable_items
Automatic_Component.set_log_conditions('value')

# Decision Mechanisms
Decision = pnl.DDM(function=pnl.BogaczEtAl(
        drift_rate=1.0,
        threshold=0.2645,
        # noise=(0.5),
        starting_point=0,
        t0=0.15
    ),name='Decision',
    output_states=[
        pnl.DECISION_VARIABLE,
        pnl.RESPONSE_TIME,
        pnl.PROBABILITY_UPPER_THRESHOLD,
        {
            pnl.NAME: 'OFFSET_RT',
            pnl.VARIABLE: (pnl.OWNER_VALUE, 1),
                           pnl.FUNCTION: pnl.Linear(0, slope=0.0, intercept=1).function
        }
    ],) #drift_rate=(1.0),threshold=(0.2645),noise=(0.5),starting_point=(0), t0=0.15
Decision.set_log_conditions('DECISION_VARIABLE')
Decision.set_log_conditions('value')
Esempio n. 4
0
# CONSTRUCTION:

input_layer = pnl.TransferMechanism(size=2, name='Input Layer')

# Takes sum of input layer elements as external component of drift rate
# Notes:
#    - drift_rate parameter in constructor for DDM is the "internally modulated" component of the drift_rate;
#    - arguments to DDM's function (BogaczEtAl) are specified as CONTROL, so that their values will be determined
#        by the EVCControlMechanism of the System to which the action_selection Mechanism is assigned (see below)
#    - the input_format argument specifies that the input to the DDM should be one-hot encoded two-element array
#    - the output_states argument specifies use of the DECISION_VARIABLE_ARRAY OutputState, which encodes the
#        response in the same format as the ARRAY input_format/.
action_selection = pnl.DDM(input_format=pnl.ARRAY,
                           function=pnl.BogaczEtAl(
                               drift_rate=pnl.CONTROL,
                               threshold=pnl.CONTROL,
                               starting_point=pnl.CONTROL,
                               noise=pnl.CONTROL,
                           ),
                           output_states=[pnl.SELECTED_INPUT_ARRAY],
                           name='DDM')

# Construct Process
# Notes:
#    The np.array specifies the matrix used as the Mapping Projection from input_layer to action_selection,
#        which insures the left element of the input favors the left action (positive value of DDM decision variable),
#        and the right element favors the right action (negative value of DDM decision variable)
#    The learning argument specifies Reinforcement as the learning function for the Projection
p = pnl.Process(
    default_variable=[0, 0],
    # pathway=[input_layer, np.array([[1],[-1]]), action_selection],
    pathway=[input_layer, pnl.IDENTITY_MATRIX, action_selection],
import numpy as np
import psyneulink as pnl

myInputLayer = pnl.TransferMechanism(name='Input Layer',
                                     function=pnl.Linear(),
                                     default_variable=[0, 0])

myHiddenLayer = pnl.TransferMechanism(name='Hidden Layer 1',
                                      function=pnl.Logistic(gain=1.0, bias=0),
                                      default_variable=np.zeros((5, )))

myDDM = pnl.DDM(name='My_DDM',
                function=pnl.BogaczEtAl(drift_rate=0.5,
                                        threshold=1,
                                        starting_point=0.0))

myProcess = pnl.Process(name='Neural Network DDM Process',
                        default_variable=[0, 0],
                        pathway=[
                            myInputLayer,
                            pnl.get_matrix(pnl.RANDOM_CONNECTIVITY_MATRIX, 2,
                                           5), myHiddenLayer,
                            pnl.FULL_CONNECTIVITY_MATRIX, myDDM
                        ])

myProcess.reportOutputPref = True
myInputLayer.reportOutputPref = True
myHiddenLayer.reportOutputPref = True
myDDM.reportOutputPref = pnl.PreferenceEntry(True,
                                             pnl.PreferenceLevel.INSTANCE)