예제 #1
0
 def test_detection_of_legal_arg_in_kwargs(self):
     assert isinstance(pnl.ProcessingMechanism().reinitialize_when,
                       pnl.Never)
     assert isinstance(
         pnl.ProcessingMechanism(
             reinitialize_when=pnl.AtTrialStart()).reinitialize_when,
         pnl.AtTrialStart)
예제 #2
0
    def test_log_multi_calls_single_timestep(self, scheduler_conditions, multi_run):
        con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue())
        pipeline = con_with_rpc_pipeline.rpc_pipeline
        lca = pnl.LCAMechanism(
            size=2,
            leak=0.5,
            threshold=0.515,
            reset_stateful_function_when=pnl.AtTrialStart()
        )
        lca.set_delivery_conditions(pnl.VALUE)
        m0 = pnl.ProcessingMechanism(
            size=2
        )
        comp = pnl.Composition()
        comp.add_linear_processing_pathway([m0, lca])
        if scheduler_conditions:
            comp.scheduler.add_condition(lca, pnl.AfterNCalls(m0, 2))
        comp.run(inputs={m0: [[1, 0], [1, 0], [1, 0]]}, context=con_with_rpc_pipeline)

        actual = []
        while not pipeline.empty():
            actual.append(pipeline.get())
        integration_end_dict = {i.time: i for i in actual}
        if scheduler_conditions:
            expected_times = ['0:0:1:1', '0:1:1:1', '0:2:1:1']
        else:
            expected_times = ['0:0:0:1', '0:1:0:1', '0:2:0:1']
        assert list(integration_end_dict.keys()) == expected_times
        vals = [i.value.data for i in integration_end_dict.values()]
        # floats in value, so use np.allclose
        assert np.allclose(vals, [[[0.52466739, 0.47533261]] * 3])
        if multi_run:
            comp.run(inputs={m0: [[1, 0], [1, 0], [1, 0]]}, context=con_with_rpc_pipeline)
            actual = []
            while not pipeline.empty():
                actual.append(pipeline.get())
            integration_end_dict.update({i.time: i for i in actual})
            if scheduler_conditions:
                expected_times = ['0:0:1:1', '0:1:1:1', '0:2:1:1', '1:0:1:1', '1:1:1:1', '1:2:1:1']
            else:
                expected_times = ['0:0:0:1', '0:1:0:1', '0:2:0:1', '1:0:0:1', '1:1:0:1', '1:2:0:1']
            assert list(integration_end_dict.keys()) == expected_times
            vals = [i.value.data for i in integration_end_dict.values()]
            # floats in value, so use np.allclose
            assert np.allclose(vals, [[[0.52466739, 0.47533261]] * 6])
예제 #3
0
def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs=10, learning_rate=20, attach_LCA=True, competition=0.2, self_excitation=0.2, leak=0.4, threshold=1e-4):
    # Get all tasks from bipartite graph (edges) and strip 'i/o' suffix
    all_tasks = get_all_tasks(bipartite_graph)

    # Analyze bipartite graph for network properties
    onodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 0 ]
    inodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 1 ]
    input_dims = len(inodes)
    output_dims = len(onodes)
    num_tasks = len(all_tasks)

    # Start building network as PsyNeuLink object
    # Layer parameters
    nh = num_hidden
    D_i = num_features * input_dims
    D_c = num_tasks
    D_h = nh
    D_o = num_features * output_dims

    # Weight matrices (defaults provided by Dillon)
    wih = np.random.rand(D_i, D_h) * 0.02 - 0.01
    wch = np.random.rand(D_c, D_h) * 0.02 - 0.01
    wco = np.random.rand(D_c, D_o) * 0.02 - 0.01
    who = np.random.rand(D_h, D_o) * 0.02 - 0.01

    # Training params (defaults provided by Dillon)
    patience = 10
    min_delt = 0.00001
    lr = learning_rate

    # Instantiate layers and projections
    il = pnl.TransferMechanism(size=D_i, name='input')
    cl = pnl.TransferMechanism(size=D_c, name='control')

    hl = pnl.TransferMechanism(size=D_h, name='hidden',
                                               function=pnl.Logistic(bias=-2))

    ol = pnl.TransferMechanism(size=D_o, name='output',
                                               function=pnl.Logistic(bias=-2))

    pih = pnl.MappingProjection(matrix=wih)
    pch = pnl.MappingProjection(matrix=wch)
    pco = pnl.MappingProjection(matrix=wco)
    pho = pnl.MappingProjection(matrix=who)

    # Create training data for network
    # We train across all possible inputs, one task at a time
    input_examples, output_examples, control_examples = generate_training_data(all_tasks, num_features, input_dims, output_dims)

    # Training parameter set
    input_set = {
            'inputs': {
                    il: input_examples.tolist(),
                    cl: control_examples.tolist()
            },
            'targets': {
                    ol: output_examples.tolist()
            },
            'epochs': epochs
    }

    mnet = pnl.AutodiffComposition(learning_rate=learning_rate,
                                   name='mnet')

    mnet.output_CIM.parameters.value._set_history_max_length(100000)
    mnet.add_node(il)
    mnet.add_node(cl)
    mnet.add_node(hl)
    mnet.add_node(ol)
    mnet.add_projection(projection=pih, sender=il, receiver=hl)
    mnet.add_projection(projection=pch, sender=cl, receiver=hl)
    mnet.add_projection(projection=pco, sender=cl, receiver=ol)
    mnet.add_projection(projection=pho, sender=hl, receiver=ol)

    # Train network
    print("training 1")
    t1 = time.time()
    mnet.learn(
        inputs=input_set,
        minibatch_size=1,
        bin_execute=MNET_BIN_EXECUTE,
        patience=patience,
        min_delta=min_delt,
    )
    t2 = time.time()
    print("training 1:", MNET_BIN_EXECUTE, t2-t1)

    # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay)
    if attach_LCA:
        lca = pnl.LCAMechanism(size=D_o,
                               leak=leak,
                               competition=competition,
                               self_excitation=self_excitation,
                               time_step_size=0.01,
                               threshold=threshold,
                               threshold_criterion=pnl.CONVERGENCE,
                               reset_stateful_function_when=pnl.AtTrialStart(),
                               name='lca')

        # Wrapper composition used to pass values between mnet (AutodiffComposition) and lca (LCAMechanism)
        wrapper_composition = pnl.Composition()

        # Add mnet and lca to outer_composition
        wrapper_composition.add_linear_processing_pathway([mnet, lca])

        return wrapper_composition

    return mnet
예제 #4
0
def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, epochs=10, learning_rate=20, attach_LCA=True, competition=0.2, self_excitation=0.2, leak=0.4, threshold=1e-4, exec_limit=EXEC_LIMIT):
    # Get all tasks from bipartite graph (edges) and strip 'i/o' suffix
    all_tasks = get_all_tasks(bipartite_graph)

    # Analyze bipartite graph for network properties
    onodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 0 ]
    inodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 1 ]
    input_dims = len(inodes)
    output_dims = len(onodes)
    num_tasks = len(all_tasks)

    # Start building network as PsyNeuLink object
    # Layer parameters
    nh = num_hidden
    D_i = num_features * input_dims
    D_c = num_tasks
    D_h = nh
    D_o = num_features * output_dims

    # Weight matrices (defaults provided by Dillon)
    wih = np.random.rand(D_i, D_h) * 0.02 - 0.01
    wch = np.random.rand(D_c, D_h) * 0.02 - 0.01
    wco = np.random.rand(D_c, D_o) * 0.02 - 0.01
    who = np.random.rand(D_h, D_o) * 0.02 - 0.01

    # Training params (defaults provided by Dillon)
    patience = 10
    min_delt = 0.00001
    lr = learning_rate

    # Instantiate layers and projections
    il = pnl.TransferMechanism(size=D_i, name='input')
    cl = pnl.TransferMechanism(size=D_c, name='control')

    hl = pnl.TransferMechanism(size=D_h,
                               name='hidden',
                               function=pnl.Logistic(bias=-2))

    ol = pnl.TransferMechanism(size=D_o,
                               name='output',
                               function=pnl.Logistic(bias=-2))

    pih = pnl.MappingProjection(matrix=wih)
    pch = pnl.MappingProjection(matrix=wch)
    pco = pnl.MappingProjection(matrix=wco)
    pho = pnl.MappingProjection(matrix=who)

    # Create training data for network
    # We train across all possible inputs, one task at a time
    input_examples, output_examples, control_examples = generate_training_data(all_tasks, num_features, input_dims, output_dims)

    # Training parameter set
    input_set = {
            'inputs': {
                    il: input_examples.tolist(),
                    cl: control_examples.tolist()
            },
            'targets': {
                    ol: output_examples.tolist()
            },
            'epochs': 10 #epochs # LCA doesn't settle for 1000 epochs
    }

    # Build network
    mnet = pnl.AutodiffComposition(learning_rate=learning_rate,
                                   name='mnet')

    mnet.output_CIM.parameters.value._set_history_max_length(1000)
    mnet.add_node(il)
    mnet.add_node(cl)
    mnet.add_node(hl)
    mnet.add_node(ol)
    mnet.add_projection(projection=pih, sender=il, receiver=hl)
    mnet.add_projection(projection=pch, sender=cl, receiver=hl)
    mnet.add_projection(projection=pco, sender=cl, receiver=ol)
    mnet.add_projection(projection=pho, sender=hl, receiver=ol)

    # Train network
    print("training 2:", MNET_BIN_EXECUTE)
    t1 = time.time()
    mnet.learn(
        inputs=input_set,
        minibatch_size=input_set['epochs'],
        bin_execute=MNET_BIN_EXECUTE,
        patience=patience,
        min_delta=min_delt,
    )
    t2 = time.time()
    print("training 2:", MNET_BIN_EXECUTE, t2-t1)

    for projection in mnet.projections:
        if hasattr(projection.parameters, 'matrix'):
            weights = projection.parameters.matrix.get(mnet)
            projection.parameters.matrix.set(weights, None)


    # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay)
    if attach_LCA:
        lci = pnl.LeakyCompetingIntegrator(rate=leak,
                                           time_step_size=0.01)

        lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition)

        lca = pnl.RecurrentTransferMechanism(size=D_o,
                                             matrix=lca_matrix,
                                             integrator_mode=True,
                                             integrator_function=lci,
                                             name='lca',
                                             termination_threshold=threshold,
                                             reset_stateful_function_when=pnl.AtTrialStart())

        # Wrapper composition used to pass values between mnet (AutodiffComposition) and lca (LCAMechanism)
        wrapper_composition = pnl.Composition()

        # Add mnet and lca to outer_composition
        wrapper_composition.add_linear_processing_pathway([mnet, lca])

        # Dummy to save mnet results
        if str(LCA_BIN_EXECUTE).startswith("LLVM"):
            dummy = pnl.TransferMechanism(size=D_o,
                                          name="MNET_OUT")
            wrapper_composition.add_linear_processing_pathway([mnet, dummy])

        # Set execution limit
        lca.parameters.max_executions_before_finished.set(exec_limit, wrapper_composition)

        # # Logging/Debugging
        # lca.set_log_conditions('value', pnl.LogCondition.EXECUTION)

        return wrapper_composition

    return mnet