コード例 #1
0
    def test_model_based_ocm_with_buffer(self):

        A = pnl.ProcessingMechanism(name='A')
        B = pnl.ProcessingMechanism(name='B')

        comp = pnl.Composition(name='comp', controller_mode=pnl.BEFORE)
        comp.add_linear_processing_pathway([A, B])

        search_range = pnl.SampleSpec(start=0.25, stop=0.75, step=0.25)
        control_signal = pnl.ControlSignal(
            projections=[(pnl.SLOPE, A)],
            function=pnl.Linear,
            variable=1.0,
            allocation_samples=search_range,
            intensity_cost_function=pnl.Linear(slope=0.))

        objective_mech = pnl.ObjectiveMechanism(monitor=[B])
        ocm = pnl.OptimizationControlMechanism(
            agent_rep=comp,
            features=[A.input_state],
            feature_function=pnl.Buffer(history=2),
            objective_mechanism=objective_mech,
            function=pnl.GridSearch(),
            control_signals=[control_signal])
        objective_mech.log.set_log_conditions(pnl.OUTCOME)

        comp.add_controller(ocm)

        inputs = {A: [[[1.0]], [[2.0]], [[3.0]]]}

        for i in range(1, len(ocm.input_states)):
            ocm.input_states[i].function.reinitialize()
        comp.run(inputs=inputs, retain_old_simulation_data=True)

        log = objective_mech.log.nparray_dictionary()

        # "outer" composition
        assert np.allclose(log["comp"][pnl.OUTCOME], [[0.75], [1.5], [2.25]])

        # preprocess to ignore control allocations
        log_parsed = {}
        for key, value in log.items():
            cleaned_key = re.sub(r'comp-sim-(\d).*', r'\1', key)
            log_parsed[cleaned_key] = value

        # First round of simulations is only one trial.
        # (Even though the feature fn is a Buffer, there is no history yet)
        for i in range(0, 3):
            assert len(log_parsed[str(i)]["Trial"]) == 1

        # Second and third rounds of simulations are two trials.
        # (The buffer has history = 2)
        for i in range(3, 9):
            assert len(log_parsed[str(i)]["Trial"]) == 2
コード例 #2
0
signal = pnl.ControlSignal(modulates=[(pnl.GAIN, activation)],
                           function=pnl.Linear,
                           variable=1.0,
                           intensity_cost_function=pnl.Linear(slope=0.),
                           allocation_samples=search_range)

objective_mech = pnl.ObjectiveMechanism(monitor=[
    inputLayer, stimulusInfo, (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
    (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
],
                                        function=computeAccuracy)

meta_controller = pnl.OptimizationControlMechanism(
    agent_rep=stabilityFlexibility,
    features=[inputLayer.input_port, stimulusInfo.input_port],
    feature_function=pnl.Buffer(history=3),
    objective_mechanism=objective_mech,
    function=pnl.GridSearch(),
    control_signals=[signal])

inputs = {inputLayer: INPUT, stimulusInfo: stimulusInput}
stabilityFlexibility.add_controller(meta_controller)
stabilityFlexibility.enable_model_based_optimizer = True

print("Beginning of Run")

for i in range(1, len(stabilityFlexibility.controller.input_ports)):
    stabilityFlexibility.controller.input_ports[i].function.reinitialize()

# stabilityFlexibility.show_graph(show_controller=True)
コード例 #3
0
signal = pnl.ControlSignal(modulates=[(pnl.GAIN, activation)],
                           function=pnl.Linear,
                           variable=1.0,
                           allocation_samples=search_range)

objective_mech = pnl.ObjectiveMechanism(monitor=[
    inputLayer, stimulusInfo, (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
    (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
],
                                        function=computeAccuracy)

meta_controller = pnl.OptimizationControlMechanism(
    agent_rep=stabilityFlexibility,
    state_features=[inputLayer.input_port, stimulusInfo.input_port],
    state_feature_function=pnl.Buffer(history=100),
    objective_mechanism=objective_mech,
    function=pnl.GridSearch(),
    control_signals=[signal])

inputs = {inputLayer: INPUT, stimulusInfo: stimulusInput}
stabilityFlexibility.add_model_based_optimizer(meta_controller)
stabilityFlexibility.enable_model_based_optimizer = True

# stabilityFlexibility.show_graph(show_node_structure=[pnl.FUNCTIONS,
# 													 pnl.PORT_FUNCTION_PARAMS,
# 													 pnl.MECH_FUNCTION_PARAMS],
# 								show_controller=True)
stabilityFlexibility.show_graph(show_node_structure=pnl.ALL,
                                show_controller=True,
                                show_cim=True)
コード例 #4
0
    def test_stability_flexibility_susan_and_sebastian(self):

        # computeAccuracy(trialInformation)
        # Inputs: trialInformation[0, 1, 2, 3]
        # trialInformation[0] - Task Dimension : [0, 1] or [1, 0]
        # trialInformation[1] - Stimulus Dimension: Congruent {[1, 1] or [-1, -1]} // Incongruent {[-1, 1] or [1, -1]}
        # trialInformation[2] - Upper Threshold: Probability of DDM choosing upper bound
        # trialInformation[3] - Lower Threshold: Probability of DDM choosing lower bound

        def computeAccuracy(trialInformation):

            # Unload contents of trialInformation
            # Origin Node Inputs
            taskInputs = trialInformation[0]
            stimulusInputs = trialInformation[1]

            # DDM Outputs
            upperThreshold = trialInformation[2]
            lowerThreshold = trialInformation[3]

            # Keep Track of Accuracy
            accuracy = []

            # Beginning of Accuracy Calculation
            colorTrial = (taskInputs[0] == 1)
            motionTrial = (taskInputs[1] == 1)

            # Based on the task dimension information, decide which response is "correct"
            # Obtain accuracy probability from DDM thresholds in "correct" direction
            if colorTrial:
                if stimulusInputs[0] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[0] == -1:
                    accuracy.append(lowerThreshold)

            if motionTrial:
                if stimulusInputs[1] == 1:
                    accuracy.append(upperThreshold)
                elif stimulusInputs[1] == -1:
                    accuracy.append(lowerThreshold)

            # Accounts for initialization runs that have no variable input
            if len(accuracy) == 0:
                accuracy = [0]

            # print("Accuracy: ", accuracy[0])
            # print()

            return [accuracy]

        # BEGIN: Composition Construction

        # Constants as defined in Musslick et al. 2018
        tau = 0.9  # Time Constant
        DRIFT = 1  # Drift Rate
        STARTING_POINT = 0.0  # Starting Point
        THRESHOLD = 0.0475  # Threshold
        NOISE = 0.04  # Noise
        T0 = 0.2  # T0

        # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive
        # Origin Node
        taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                          size=2,
                                          function=pnl.Linear(slope=1,
                                                              intercept=0),
                                          output_states=[pnl.RESULT],
                                          name='Task Input [I1, I2]')

        # Stimulus Layer: [Color Stimulus, Motion Stimulus]
        # Origin Node
        stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
                                             size=2,
                                             function=pnl.Linear(slope=1,
                                                                 intercept=0),
                                             output_states=[pnl.RESULT],
                                             name="Stimulus Input [S1, S2]")

        # Activation Layer: [Color Activation, Motion Activation]
        # Recurrent: Self Excitation, Mutual Inhibition
        # Controlled: Gain Parameter
        activation = pnl.RecurrentTransferMechanism(
            default_variable=[[0.0, 0.0]],
            function=pnl.Logistic(gain=1.0),
            matrix=[[1.0, -1.0], [-1.0, 1.0]],
            integrator_mode=True,
            integrator_function=pnl.AdaptiveIntegrator(rate=(tau)),
            initial_value=np.array([[0.0, 0.0]]),
            output_states=[pnl.RESULT],
            name='Task Activations [Act 1, Act 2]')

        # Hadamard product of Activation and Stimulus Information
        nonAutomaticComponent = pnl.TransferMechanism(
            default_variable=[[0.0, 0.0]],
            size=2,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.PRODUCT),
            output_states=[pnl.RESULT],
            name='Non-Automatic Component [S1*Activity1, S2*Activity2]')

        # Summation of nonAutomatic and Automatic Components
        ddmCombination = pnl.TransferMechanism(
            size=1,
            function=pnl.Linear(slope=1, intercept=0),
            input_states=pnl.InputState(combine=pnl.SUM),
            output_states=[pnl.RESULT],
            name="Drift = (S1 + S2) + (S1*Activity1 + S2*Activity2)")

        decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(
            drift_rate=DRIFT,
            starting_point=STARTING_POINT,
            threshold=THRESHOLD,
            noise=NOISE,
            t0=T0),
                                output_states=[
                                    pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
                                    pnl.PROBABILITY_UPPER_THRESHOLD,
                                    pnl.PROBABILITY_LOWER_THRESHOLD
                                ],
                                name='DDM')

        taskLayer.set_log_conditions([pnl.RESULT])
        stimulusInfo.set_log_conditions([pnl.RESULT])
        activation.set_log_conditions([pnl.RESULT, "mod_gain"])
        nonAutomaticComponent.set_log_conditions([pnl.RESULT])
        ddmCombination.set_log_conditions([pnl.RESULT])
        decisionMaker.set_log_conditions([
            pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
            pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME
        ])

        # Composition Creation

        stabilityFlexibility = pnl.Composition(controller_mode=pnl.BEFORE)

        # Node Creation
        stabilityFlexibility.add_node(taskLayer)
        stabilityFlexibility.add_node(activation)
        stabilityFlexibility.add_node(nonAutomaticComponent)
        stabilityFlexibility.add_node(stimulusInfo)
        stabilityFlexibility.add_node(ddmCombination)
        stabilityFlexibility.add_node(decisionMaker)

        # Projection Creation
        stabilityFlexibility.add_projection(sender=taskLayer,
                                            receiver=activation)
        stabilityFlexibility.add_projection(sender=activation,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=nonAutomaticComponent)
        stabilityFlexibility.add_projection(sender=stimulusInfo,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=nonAutomaticComponent,
                                            receiver=ddmCombination)
        stabilityFlexibility.add_projection(sender=ddmCombination,
                                            receiver=decisionMaker)

        # Beginning of Controller

        # Grid Search Range
        searchRange = pnl.SampleSpec(start=1.0, stop=1.9, num=10)

        # Modulate the GAIN parameter from activation layer
        # Initalize cost function as 0
        signal = pnl.ControlSignal(
            projections=[(pnl.GAIN, activation)],
            function=pnl.Linear,
            variable=1.0,
            intensity_cost_function=pnl.Linear(slope=0.0),
            allocation_samples=searchRange)

        # Use the computeAccuracy function to obtain selection values
        # Pass in 4 arguments whenever computeRewardRate is called
        objectiveMechanism = pnl.ObjectiveMechanism(
            monitor=[
                taskLayer, stimulusInfo,
                (pnl.PROBABILITY_UPPER_THRESHOLD, decisionMaker),
                (pnl.PROBABILITY_LOWER_THRESHOLD, decisionMaker)
            ],
            function=computeAccuracy,
            name="Controller Objective Mechanism")

        #  Sets trial history for simulations over specified signal search parameters
        metaController = pnl.OptimizationControlMechanism(
            agent_rep=stabilityFlexibility,
            features=[taskLayer.input_state, stimulusInfo.input_state],
            feature_function=pnl.Buffer(history=10),
            name="Controller",
            objective_mechanism=objectiveMechanism,
            function=pnl.GridSearch(),
            control_signals=[signal])

        stabilityFlexibility.add_controller(metaController)
        stabilityFlexibility.enable_controller = True
        # stabilityFlexibility.model_based_optimizer_mode = pnl.BEFORE

        for i in range(1, len(stabilityFlexibility.controller.input_states)):
            stabilityFlexibility.controller.input_states[
                i].function.reinitialize()
        # Origin Node Inputs
        taskTrain = [[1, 0], [0, 1], [1, 0], [0, 1]]
        stimulusTrain = [[1, -1], [-1, 1], [1, -1], [-1, 1]]

        inputs = {taskLayer: taskTrain, stimulusInfo: stimulusTrain}
        stabilityFlexibility.run(inputs)