def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, epochs=10, learning_rate=20, attach_LCA=True, competition=0.2, self_excitation=0.2, leak=0.4, threshold=1e-4, exec_limit=EXEC_LIMIT): # Get all tasks from bipartite graph (edges) and strip 'i/o' suffix all_tasks = get_all_tasks(bipartite_graph) # Analyze bipartite graph for network properties onodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 0 ] inodes = [ n for n, d in bipartite_graph.nodes(data=True) if d['bipartite'] == 1 ] input_dims = len(inodes) output_dims = len(onodes) num_tasks = len(all_tasks) # Start building network as PsyNeuLink object # Layer parameters nh = num_hidden D_i = num_features * input_dims D_c = num_tasks D_h = nh D_o = num_features * output_dims # Weight matrices (defaults provided by Dillon) wih = np.random.rand(D_i, D_h) * 0.02 - 0.01 wch = np.random.rand(D_c, D_h) * 0.02 - 0.01 wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 # Training params (defaults provided by Dillon) patience = 10 min_delt = 0.00001 lr = learning_rate # Instantiate layers and projections il = pnl.TransferMechanism(size=D_i, name='input') cl = pnl.TransferMechanism(size=D_c, name='control') hl = pnl.TransferMechanism(size=D_h, name='hidden', function=pnl.Logistic(bias=-2)) ol = pnl.TransferMechanism(size=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) pco = pnl.MappingProjection(matrix=wco) pho = pnl.MappingProjection(matrix=who) # Create training data for network # We train across all possible inputs, one task at a time input_examples, output_examples, control_examples = generate_training_data(all_tasks, num_features, input_dims, output_dims) # Training parameter set input_set = { 'inputs': { il: input_examples.tolist(), cl: control_examples.tolist() }, 'targets': { ol: output_examples.tolist() }, 'epochs': 10 #epochs # LCA doesn't settle for 1000 epochs } # Build network mnet = pnl.AutodiffComposition(learning_rate=learning_rate, name='mnet') mnet.output_CIM.parameters.value._set_history_max_length(1000) mnet.add_node(il) mnet.add_node(cl) mnet.add_node(hl) mnet.add_node(ol) mnet.add_projection(projection=pih, sender=il, receiver=hl) mnet.add_projection(projection=pch, sender=cl, receiver=hl) mnet.add_projection(projection=pco, sender=cl, receiver=ol) mnet.add_projection(projection=pho, sender=hl, receiver=ol) # Train network print("training 2:", MNET_BIN_EXECUTE) t1 = time.time() mnet.learn( inputs=input_set, minibatch_size=input_set['epochs'], bin_execute=MNET_BIN_EXECUTE, patience=patience, min_delta=min_delt, ) t2 = time.time() print("training 2:", MNET_BIN_EXECUTE, t2-t1) for projection in mnet.projections: if hasattr(projection.parameters, 'matrix'): weights = projection.parameters.matrix.get(mnet) projection.parameters.matrix.set(weights, None) # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: lci = pnl.LeakyCompetingIntegrator(rate=leak, time_step_size=0.01) lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) lca = pnl.RecurrentTransferMechanism(size=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, name='lca', termination_threshold=threshold, reset_stateful_function_when=pnl.AtTrialStart()) # Wrapper composition used to pass values between mnet (AutodiffComposition) and lca (LCAMechanism) wrapper_composition = pnl.Composition() # Add mnet and lca to outer_composition wrapper_composition.add_linear_processing_pathway([mnet, lca]) # Dummy to save mnet results if str(LCA_BIN_EXECUTE).startswith("LLVM"): dummy = pnl.TransferMechanism(size=D_o, name="MNET_OUT") wrapper_composition.add_linear_processing_pathway([mnet, dummy]) # Set execution limit lca.parameters.max_executions_before_finished.set(exec_limit, wrapper_composition) # # Logging/Debugging # lca.set_log_conditions('value', pnl.LogCondition.EXECUTION) return wrapper_composition return mnet
name="word_hidden", function=pnl.Logistic(bias=-4.0, default_variable=[[0.0, 0.0]]), default_variable=[[0.0, 0.0]], ) task_input = pnl.ProcessingMechanism( name="task_input", function=pnl.Linear(default_variable=[[0.0, 0.0]]), default_variable=[[0.0, 0.0]], ) TASK = pnl.LCAMechanism( name="TASK", combination_function=pnl.LinearCombination(default_variable=[[0.0, 0.0]]), function=pnl.Logistic(default_variable=[[0.0, 0.0]]), integrator_function=pnl.LeakyCompetingIntegrator( name="LeakyCompetingIntegrator_Function_0", initializer=[[0.5, 0.5]], rate=0.5, default_variable=[[0.0, 0.0]], ), output_ports=["RESULTS"], termination_comparison_op=">=", default_variable=[[0.0, 0.0]], ) DECISION = pnl.DDM( name="DECISION", function=pnl.DriftDiffusionAnalytical(default_variable=[[0.0]]), input_ports=[{ pnl.NAME: pnl.ARRAY, pnl.VARIABLE: [[0.0, 0.0]], pnl.FUNCTION: pnl.Reduce(default_variable=[[0.0, 0.0]], weights=[1, -1]),