コード例 #1
0
    def test_input_state_and_assigned_projection_names(self):
        T1 = pnl.TransferMechanism(name='T1')
        T2 = pnl.TransferMechanism(name='T2', input_states=[T1])
        I1 = pnl.InputState(owner=T2)
        I2 = pnl.InputState(projections=[T1])
        assert I2.name == 'Deferred Init InputState'
        T2.add_states([I2])
        assert I1.name == 'InputState-1'
        assert I2.name == 'InputState-2'
        assert T2.input_states[0].path_afferents[0].name == \
               'MappingProjection from T1[RESULTS] to T2[InputState-0]'
        assert T2.input_states[2].path_afferents[0].name == \
               'MappingProjection from T1[RESULTS] to T2[InputState-2]'

    # ------------------------------------------------------------------------------------------------
    # TEST 10
    # Test that OutputStates are properly named

        T1 = pnl.TransferMechanism(output_states=['MY OUTPUT_STATE',[0]])
        assert T1.output_states[0].name == 'MY OUTPUT_STATE'
        assert T1.output_states[1].name == 'OutputState-0'
        O = pnl.OutputState(owner=T1)
        assert T1.output_states[2].name == 'OutputState-1'
        O2 = pnl.OutputState()
        T1.add_states([O2])
        assert T1.output_states[3].name == 'OutputState-2'
コード例 #2
0
 def test_internal_only(self):
     m = pnl.TransferMechanism(input_states=[
         'EXTERNAL',
         pnl.InputState(name='INTERNAL_ONLY', internal_only=True)
     ])
     assert m.input_values == [[0.], [0.]]
     assert m.external_input_values == [[0.]]
コード例 #3
0
 def test_combine_param_conflicting_fct_operation_spec(self):
     with pytest.raises(pnl.InputStateError) as error_text:
         t = pnl.TransferMechanism(
             input_states=pnl.InputState(function=pnl.LinearCombination(
                 operation=pnl.SUM),
                                         combine=pnl.PRODUCT))
     assert "Specification of 'combine' argument (PRODUCT) conflicts with specification of 'operation' (SUM) " \
            "for LinearCombination in 'function' argument for InputState" in str(error_text.value)
コード例 #4
0
 def test_combine_param_conflicting_fct_class_spec(self):
     with pytest.raises(pnl.InputStateError) as error_text:
         t = pnl.TransferMechanism(input_states=pnl.InputState(
             function=psyneulink.core.components.functions.
             transferfunctions.Linear,
             combine=pnl.PRODUCT))
     assert "Specification of 'combine' argument (PRODUCT) conflicts with Function specified " \
            "in 'function' argument (Linear) for InputState" in str(error_text.value)
コード例 #5
0
 def test_combine_param_alone(self):
     t1 = pnl.TransferMechanism(size=2)
     t2 = pnl.TransferMechanism(size=2)
     t3 = pnl.TransferMechanism(
         size=2, input_states=pnl.InputState(combine=pnl.PRODUCT))
     p1 = pnl.Process(pathway=[t1, t3])
     p2 = pnl.Process(pathway=[t2, t3])
     s = pnl.System(processes=[p1, p2])
     input_dict = {t1: [1, 2], t2: [3, 4]}
     val = s.run(inputs=input_dict)
     assert np.allclose(val, [[3, 8]])
コード例 #6
0
 def test_combine_param_redundant_fct_class_spec(self):
     t1 = pnl.TransferMechanism(size=2)
     t2 = pnl.TransferMechanism(size=2)
     t3 = pnl.TransferMechanism(size=2,
                                input_states=pnl.InputState(
                                    function=pnl.LinearCombination,
                                    combine=pnl.PRODUCT))
     p1 = pnl.Process(pathway=[t1, t3])
     p2 = pnl.Process(pathway=[t2, t3])
     s = pnl.System(processes=[p1, p2])
     input_dict = {t1: [1, 2], t2: [3, 4]}
     val = s.run(inputs=input_dict)
     assert np.allclose(val, [[3, 8]])
コード例 #7
0
 def test_combine_param_conflicting_function_spec(self):
     with pytest.raises(pnl.InputStateError) as error_text:
         t = pnl.TransferMechanism(input_states=pnl.InputState(
             function=pnl.Linear(), combine=pnl.PRODUCT))
     assert "Specification of 'combine' argument (PRODUCT) conflicts with Function specified " \
            "in 'function' argument (Linear Function" in str(error_text.value)
コード例 #8
0
activation.set_log_conditions([pnl.RESULT, "mod_gain"])

stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                     size=2,
                                     function=pnl.Linear(slope=1, intercept=0),
                                     output_states=[pnl.RESULT],
                                     name="Stimulus Info")

stimulusInfo.set_log_conditions([pnl.RESULT])

controlledElement = pnl.TransferMechanism(
    default_variable=[[0.0, 0.0]],
    size=2,
    function=pnl.Linear(slope=1, intercept=0),
    input_states=pnl.InputState(combine=pnl.PRODUCT),
    output_states=[pnl.RESULT],
    name='Stimulus Info * Activity')

controlledElement.set_log_conditions([pnl.RESULT])

ddmCombination = pnl.TransferMechanism(size=1,
                                       function=pnl.Linear(slope=1,
                                                           intercept=0),
                                       output_states=[pnl.RESULT],
                                       name="DDM Integrator")
ddmCombination.set_log_conditions([pnl.RESULT])

decisionMaker = pnl.DDM(
    function=pnl.DriftDiffusionAnalytical(drift_rate=DRIFT,
                                          starting_point=STARTING_POINT,
コード例 #9
0
    def test_stability_flexibility_susan_and_sebastian(self):

        # computeAccuracy(trialInformation)
        # Inputs: trialInformation[0, 1, 2, 3]
        # trialInformation[0] - Task Dimension : [0, 1] or [1, 0]
        # trialInformation[1] - Stimulus Dimension: Congruent {[1, 1] or [-1, -1]} // Incongruent {[-1, 1] or [1, -1]}
        # trialInformation[2] - Upper Threshold: Probability of DDM choosing upper bound
        # trialInformation[3] - Lower Threshold: Probability of DDM choosing lower bound

        def computeAccuracy(trialInformation):

            # Unload contents of trialInformation
            # Origin Node Inputs
            taskInputs = trialInformation[0]
            stimulusInputs = trialInformation[1]

            # DDM Outputs
            upperThreshold = trialInformation[2]
            lowerThreshold = trialInformation[3]

            # Keep Track of Accuracy
            accuracy = []

            # Beginning of Accuracy Calculation
            colorTrial = (taskInputs[0] == 1)
            motionTrial = (taskInputs[1] == 1)

            # Based on the task dimension information, decide which response is "correct"
            # Obtain accuracy probability from DDM thresholds in "correct" direction
            if colorTrial:
                if stimulusInputs[0] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[0] == -1:
                    accuracy.append(lowerThreshold)

            if motionTrial:
                if stimulusInputs[1] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[1] == -1:
                    accuracy.append(lowerThreshold)

            # Accounts for initialization runs that have no variable input
            if len(accuracy) == 0:
                accuracy = [0]

            # print("Accuracy: ", accuracy[0])
            # print()

            return [accuracy]

        # BEGIN: Composition Construction

        # Constants as defined in Musslick et al. 2018
        tau = 0.9  # Time Constant
        DRIFT = 1  # Drift Rate
        STARTING_POINT = 0.0  # Starting Point
        THRESHOLD = 0.0475  # Threshold
        NOISE = 0.04  # Noise
        T0 = 0.2  # T0

        # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive
        # Origin Node
        taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                          size=2,
                                          function=pnl.Linear(slope=1,
                                                              intercept=0),
                                          output_states=[pnl.RESULT],
                                          name='Task Input [I1, I2]')

        # Stimulus Layer: [Color Stimulus, Motion Stimulus]
        # Origin Node
        stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                             size=2,
                                             function=pnl.Linear(slope=1,
                                                                 intercept=0),
                                             output_states=[pnl.RESULT],
                                             name="Stimulus Input [S1, S2]")

        # Activation Layer: [Color Activation, Motion Activation]
        # Recurrent: Self Excitation, Mutual Inhibition
        # Controlled: Gain Parameter
        activation = pnl.RecurrentTransferMechanism(
            default_variable=[[0.0, 0.0]],
            function=pnl.Logistic(gain=1.0),
            matrix=[[1.0, -1.0], [-1.0, 1.0]],
            integrator_mode=True,
            integrator_function=pnl.AdaptiveIntegrator(rate=(tau)),
            initial_value=np.array([[0.0, 0.0]]),
            output_states=[pnl.RESULT],
            name='Task Activations [Act 1, Act 2]')

        # Hadamard product of Activation and Stimulus Information
        nonAutomaticComponent = pnl.TransferMechanism(
            default_variable=[[0.0, 0.0]],
            size=2,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.PRODUCT),
            output_states=[pnl.RESULT],
            name='Non-Automatic Component [S1*Activity1, S2*Activity2]')

        # Summation of nonAutomatic and Automatic Components
        ddmCombination = pnl.TransferMechanism(
            size=1,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.SUM),
            output_states=[pnl.RESULT],
            name="Drift = (S1 + S2) + (S1*Activity1 + S2*Activity2)")

        decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(
            drift_rate=DRIFT,
            starting_point=STARTING_POINT,
            threshold=THRESHOLD,
            noise=NOISE,
            t0=T0),
                                output_states=[
                                    pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                                    pnl.PROBABILITY_UPPER_THRESHOLD,
                                    pnl.PROBABILITY_LOWER_THRESHOLD
                                ],
                                name='DDM')

        taskLayer.set_log_conditions([pnl.RESULT])
        stimulusInfo.set_log_conditions([pnl.RESULT])
        activation.set_log_conditions([pnl.RESULT, "mod_gain"])
        nonAutomaticComponent.set_log_conditions([pnl.RESULT])
        ddmCombination.set_log_conditions([pnl.RESULT])
        decisionMaker.set_log_conditions([
            pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
            pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME
        ])

        # Composition Creation

        stabilityFlexibility = pnl.Composition(controller_mode=pnl.BEFORE)

        # Node Creation
        stabilityFlexibility.add_node(taskLayer)
        stabilityFlexibility.add_node(activation)
        stabilityFlexibility.add_node(nonAutomaticComponent)
        stabilityFlexibility.add_node(stimulusInfo)
        stabilityFlexibility.add_node(ddmCombination)
        stabilityFlexibility.add_node(decisionMaker)

        # Projection Creation
        stabilityFlexibility.add_projection(sender=taskLayer,
                                            receiver=activation)
        stabilityFlexibility.add_projection(sender=activation,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=nonAutomaticComponent,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=ddmCombination,
                                            receiver=decisionMaker)

        # Beginning of Controller

        # Grid Search Range
        searchRange = pnl.SampleSpec(start=1.0, stop=1.9, num=10)

        # Modulate the GAIN parameter from activation layer
        # Initalize cost function as 0
        signal = pnl.ControlSignal(
            projections=[(pnl.GAIN, activation)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Linear(slope=0.0),
            allocation_samples=searchRange)

        # Use the computeAccuracy function to obtain selection values
        # Pass in 4 arguments whenever computeRewardRate is called
        objectiveMechanism = pnl.ObjectiveMechanism(
            monitor=[
                taskLayer, stimulusInfo,
                (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
                (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
            ],
            function=computeAccuracy,
            name="Controller Objective Mechanism")

        #  Sets trial history for simulations over specified signal search parameters
        metaController = pnl.OptimizationControlMechanism(
            agent_rep=stabilityFlexibility,
            features=[taskLayer.input_state, stimulusInfo.input_state],
            feature_function=pnl.Buffer(history=10),
            name="Controller",
            objective_mechanism=objectiveMechanism,
            function=pnl.GridSearch(),
            control_signals=[signal])

        stabilityFlexibility.add_controller(metaController)
        stabilityFlexibility.enable_controller = True
        # stabilityFlexibility.model_based_optimizer_mode = pnl.BEFORE

        for i in range(1, len(stabilityFlexibility.controller.input_states)):
            stabilityFlexibility.controller.input_states[
                i].function.reinitialize()
        # Origin Node Inputs
        taskTrain = [[1, 0], [0, 1], [1, 0], [0, 1]]
        stimulusTrain = [[1, -1], [-1, 1], [1, -1], [-1, 1]]

        inputs = {taskLayer: taskTrain, stimulusInfo: stimulusTrain}
        stabilityFlexibility.run(inputs)
コード例 #10
0
def runStabilityFlexibility(tasks, stimuli, gain):

    integrationConstant = 0.8  # time constant
    DRIFT = 0.25  # Drift Rate
    STARTING_POINT = 0.0  # Starting Point
    THRESHOLD = 0.05  # Threshold
    NOISE = 0.1  # Noise
    T0 = 0.2  # T0
    wa = 0.2
    g = gain

    # first element is color task attendance, second element is motion task attendance
    inputLayer = pnl.TransferMechanism(  #default_variable=[[0.0, 0.0]],
        size=2,
        function=pnl.Linear(slope=1, intercept=0),
        output_states=[pnl.RESULT],
        name='Input')
    inputLayer.set_log_conditions([pnl.RESULT])

    # Recurrent Transfer Mechanism that models the recurrence in the activation between the two stimulus and action
    # dimensions. Positive self excitation and negative opposite inhibition with an integrator rate = tau
    # Modulated variable in simulations is the GAIN variable of this mechanism
    activation = pnl.RecurrentTransferMechanism(
        default_variable=[[0.0, 0.0]],
        function=pnl.Logistic(gain=g),
        matrix=[[1.0, -1.0], [-1.0, 1.0]],
        integrator_mode=True,
        integrator_function=pnl.AdaptiveIntegrator(rate=integrationConstant),
        initial_value=np.array([[0.0, 0.0]]),
        output_states=[pnl.RESULT],
        name='Activity')

    activation.set_log_conditions([pnl.RESULT, "mod_gain"])

    stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                         size=2,
                                         function=pnl.Linear(slope=1,
                                                             intercept=0),
                                         output_states=[pnl.RESULT],
                                         name="Stimulus Info")

    stimulusInfo.set_log_conditions([pnl.RESULT])

    congruenceWeighting = pnl.TransferMechanism(
        default_variable=[[0.0, 0.0]],
        size=2,
        function=pnl.Linear(slope=wa, intercept=0),
        name='Congruence * Automatic Component')

    controlledElement = pnl.TransferMechanism(
        default_variable=[[0.0, 0.0]],
        size=2,
        function=pnl.Linear(slope=1, intercept=0),
        input_states=pnl.InputState(combine=pnl.PRODUCT),
        output_states=[pnl.RESULT],
        name='Stimulus Info * Activity')

    controlledElement.set_log_conditions([pnl.RESULT])

    ddmCombination = pnl.TransferMechanism(size=1,
                                           function=pnl.Linear(slope=1,
                                                               intercept=0),
                                           output_states=[pnl.RESULT],
                                           name="DDM Integrator")

    ddmCombination.set_log_conditions([pnl.RESULT])

    decisionMaker = pnl.DDM(
        function=pnl.DriftDiffusionAnalytical(drift_rate=DRIFT,
                                              starting_point=STARTING_POINT,
                                              threshold=THRESHOLD,
                                              noise=NOISE,
                                              t0=T0),
        output_states=[
            pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
            pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD
        ],
        name='DDM')

    decisionMaker.set_log_conditions([
        pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
        pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME
    ])

    ########### Composition

    stabilityFlexibility = pnl.Composition()

    ### NODE CREATION

    stabilityFlexibility.add_node(inputLayer)
    stabilityFlexibility.add_node(activation)
    stabilityFlexibility.add_node(congruenceWeighting)
    stabilityFlexibility.add_node(controlledElement)
    stabilityFlexibility.add_node(stimulusInfo)
    stabilityFlexibility.add_node(ddmCombination)
    stabilityFlexibility.add_node(decisionMaker)

    stabilityFlexibility.add_projection(sender=inputLayer, receiver=activation)
    stabilityFlexibility.add_projection(sender=activation,
                                        receiver=controlledElement)
    stabilityFlexibility.add_projection(sender=stimulusInfo,
                                        receiver=congruenceWeighting)
    stabilityFlexibility.add_projection(sender=stimulusInfo,
                                        receiver=controlledElement)
    stabilityFlexibility.add_projection(sender=congruenceWeighting,
                                        receiver=ddmCombination)
    stabilityFlexibility.add_projection(sender=controlledElement,
                                        receiver=ddmCombination)
    stabilityFlexibility.add_projection(sender=ddmCombination,
                                        receiver=decisionMaker)

    runs = len(tasks)
    inputs = {inputLayer: tasks, stimulusInfo: stimuli}

    stabilityFlexibility.run(inputs)

    decisions = decisionMaker.log.nparray()
    upper, lower = extractValues(decisions)
    modelResults = [tasks, stimuli, upper, lower]
    accuracies = computeAccuracy(modelResults)

    activations = activation.log.nparray()
    activity1 = []
    activity2 = []
    for i in range(0, runs):
        activity1.append(activations[1][1][4][i + 1][0])
        activity2.append(activations[1][1][4][i + 1][1])

    return accuracies, activity1, activity2