def solve_xor_rec(): in_keys = [-1,-2] out_keys = [0] hidden_keys = [1] nodes = [] nodes.append(Neuron(1, { 'activation_function': heaviside, 'integration_function': sum, 'bias' : -1.5, 'activity': 0, 'output': 0, 'weights': [(-1, 1),(-2,1)] })) nodes.append(Neuron(0, { 'activation_function': heaviside, 'integration_function': sum, 'bias': -0.5, 'activity': 0, 'output': 0, 'weights': [(-1,1),(-2,1),(1,-10)] })) net = SwitchNeuronNetwork(in_keys,out_keys,nodes) return net
def solve_one_to_many(): input_keys = [-1, -2, -3, -4] output_keys = [0,1] node_keys = [3,4,5] switch_keys = [7,8,9,10,11,12] nodes = [] node_weights = {3: [(-1, 1), (-4, 1)], 4: [(-2, 1), (-4, 1)], 5: [(-3, 1), (-4, 1)]} modulating_nodes_dict = { 'activation_function': lambda x: clamp(x,-1,1), 'integration_function': mult, 'activity': 0, 'output': 0, 'bias' : 1 } for i in node_keys: node_dict = copy.deepcopy(modulating_nodes_dict) node_dict['weights'] = node_weights[i] nodes.append(Neuron(i, node_dict)) slow, fast = 0,0 switch_std_w = {} while fast < len(switch_keys): switch_std_w[switch_keys[fast]] = [(input_keys[slow], 1), (input_keys[slow], -1)] fast += 1 switch_std_w[switch_keys[fast]] = [(input_keys[slow], 1), (input_keys[slow], -1)] fast+=1 slow+=1 w1, w2 = 0.5, 1 switch_mod_w = {7: [(3,w2)], 8: [(7,w1)], 9: [(4,w2)], 10:[(9,w1)], 11: [(5,w2)], 12: [(11,w1)]} for key in switch_keys: nodes.append(SwitchNeuron(key,switch_std_w[key],switch_mod_w[key])) out_w = {0 : [(8,1), (10,1), (12,1)], 1: [(7,1), (9,1), (11,1)]} out_dict = { 'activation_function': heaviside, 'integration_function': sum, 'activity': 0, 'output': 0, 'bias' : 0 } for key in output_keys: params = copy.deepcopy(out_dict) params['weights'] = out_w[key] nodes.append(Neuron(key,params)) net = SwitchNeuronNetwork(input_keys,output_keys,nodes) return net
def solve_one_to_one_3x3(): input_keys = [-1, -2, -3, -4] output_keys = [0] switch_keys = [1, 2, 3] node_keys = [4, 5, 6] nodes = [] modulating_nodes_dict = { 'activation_function': lambda x: clamp(x,-10,10), 'integration_function': mult, 'activity': 0, 'output': 0, 'bias':1 } node_weights = {4: [(-1, 1), (-4, 1)], 5: [(-2, 1), (-4, 1)], 6: [(-3, 1), (-4, 1)]} for i in node_keys: node_dict = copy.deepcopy(modulating_nodes_dict) node_dict['weights'] = node_weights[i] nodes.append(Neuron(i, node_dict)) switch_std_weights = { 1: [(-1, 10), (-1, 0), (-1, -10)], 2: [(-2, 10), (-2, 0), (-2, -10)], 3: [(-3, 10), (-3, 0), (-3, -10)] } switch_mod_weights = { 1: [(4, 1 / 3)], 2: [(5, 1 / 3)], 3: [(6, 1 / 3)] } for key in switch_keys: nodes.append(SwitchNeuron(key, switch_std_weights[key], switch_mod_weights[key])) node_0_std = { 'activation_function': lambda x: clamp(x,-10,10), 'integration_function': sum, 'activity': 0, 'output': 0, 'weights': [(1, 1), (2, 1), (3, 1)], 'bias' : 0 } nodes.append(Neuron(0, node_0_std)) net = SwitchNeuronNetwork(input_keys, output_keys, nodes) agent = Agent(net,lambda x: x,lambda x: convert_to_action(x)) return agent
def solve_tmaze(): input_keys = [-1,-2,-3,-4,-5] output_keys = [0] node_keys = [1,2,3] nodes = [] #Aggregating neuron params = { 'activation_function' : lambda x : x, 'integration_function' : sum, 'activity': 0, 'output' : 0, 'weights' : [(-1,-1), (-5,1)], 'bias':0 } nodes.append(Neuron(1,params)) m_params = { 'activation_function': lambda x: clamp(x, -0.8,0), 'integration_function': mult, 'activity': 0, 'output': 0, 'weights': [(1, 1), (-4, 1)], 'bias' : 1 } nodes.append(Neuron(2,m_params)) std_weights = [(-3,5), (-3,-5)] mod_weights = [(2,-1.25*0.5)] nodes.append(SwitchNeuron(3,std_weights,mod_weights)) o_params = { 'activation_function': tanh, 'integration_function': sum, 'activity': 0, 'output': 0, 'weights': [(3,1)], 'bias' : 0 } nodes.append(Neuron(0,o_params)) net = SwitchNeuronNetwork(input_keys,output_keys,nodes) #For input, append the bias to -1 input agent = Agent(net, append_bias, convert_to_direction) return agent
def create(genome, config, map_size): """ Receives a genome and returns its phenotype (a SwitchNeuronNetwork). """ genome_config = config.genome_config #required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections) input_keys = copy.deepcopy(genome_config.input_keys) output_keys = copy.deepcopy(genome_config.output_keys) # Gather inputs and expressed connections. std_inputs = {} mod_inputs = {} children = {} node_keys = set(genome.nodes.keys()) # + list(genome_config.input_keys[:]) aux_keys = set() # Here we populate the children dictionary for each unique not isolated node. for n in genome.nodes.keys(): children[n] = [] if n in output_keys: continue #For this implementation everything besides the reward and the output is a map #if not genome.nodes[n].is_isolated: for _ in range(1, map_size): new_idx = max(node_keys) + 1 children[n].append(new_idx) node_keys.add(new_idx) #assume 2 input nodes: the first one will be scaled to a map and the second one will represent the reward n = input_keys[0] children[n] = [] for _ in range(1, map_size): new_idx = min(input_keys) - 1 children[n].append(new_idx) input_keys.append(new_idx) n = input_keys[1] children[n] = [] # We don't scale the output with the map size to keep passing the parameters of the network easy. # This part can be revised in the future for n in output_keys: children[n] = [] #Iterate over every connection for cg in itervalues(genome.connections): #If it's not enabled don't include it # if not cg.enabled: # continue i, o = cg.key #If neither node is required for output then skip the connection # if o not in required and i not in required: # continue #Find the map corresponding to each node of the connection in_map = [i] + children[i] out_map = [o] + children[o] #If the connection is modulatory and the output neuron a switch neuron then the new weights are stored #in the mod dictionary. We assume that only switch neurons have a modulatory part. if cg.is_mod and genome.nodes[o].is_switch: node_inputs = mod_inputs else: node_inputs = std_inputs for n in out_map: if n not in node_inputs.keys(): node_inputs[n] = [] if len(in_map) == map_size and len(out_map) == map_size: # Map to map connectivity if cg.one2one: if cg.extended: #extended one-to- #create a new intermediatery map for j in range(0, map_size): idx = max(node_keys.union(aux_keys)) + 1 children[idx] = [] aux_keys.add(idx) for _ in range(1, map_size): new_idx = max(node_keys.union(aux_keys)) + 1 children[idx].append(new_idx) aux_keys.add(new_idx) aux_map = [idx] + children[idx] for node in aux_map: node_inputs[node] = [] #add one to one connections between in_map and aux map with weight 1 for i in range(map_size): node_inputs[aux_map[i]].append((in_map[j], 1)) #add one to one connections between aux map and out map with stepped weights weights = calculate_weights(False, cg.weight, map_size) for i in range(map_size): node_inputs[out_map[j]].append( (aux_map[i], weights[i])) else: weight = cg.weight for i in range(map_size): node_inputs[out_map[i]].append((in_map[i], weight)) else: # 1-to-all if not cg.uniform: # Step start = -cg.weight end = cg.weight step = (end - start) / (map_size - 1) for o_n in out_map: s = start for i_n in in_map: node_inputs[o_n].append((i_n, s)) s += step else: # Uniform for o_n in out_map: for i_n in in_map: node_inputs[o_n].append((i_n, cg.weight)) else: # Map-to-isolated or isolated-to-isolated if not cg.uniform: # Step start = -cg.weight end = cg.weight step = (end - start) / (map_size - 1) for o_n in out_map: s = start for i_n in in_map: node_inputs[o_n].append((i_n, s)) s += step else: # Uniform for o_n in out_map: for i_n in in_map: node_inputs[o_n].append((i_n, cg.weight)) nodes = [] #Sometimes the output neurons end up not having any connections during the evolutionary process. While we do not #desire such networks, we should still allow them to make predictions to avoid fatal errors. for okey in output_keys: if okey not in node_keys: node_keys.add(okey) std_inputs[okey] = [] # While we cannot deduce the order of activations of the neurons due to the fact that we allow for arbitrary connection # schemes, we certainly want the output neurons to activate last. conns = {} for k in node_keys.union(aux_keys): conns[k] = [] parents = children.keys() for k in conns.keys(): if k in input_keys: continue if k not in conns.keys(): conns[k] = [] if k in std_inputs.keys(): conns[k].extend([i for i, _ in std_inputs[k]]) if k in mod_inputs.keys(): conns[k].extend([i for i, _ in mod_inputs[k]]) sorted_keys = order_of_activation(conns, input_keys, output_keys) #Edge case: when a genome has no connections, sorted keys ends up empty and crashes the program #If this happens, just activate the output nodes with the default activation: 0 if not sorted_keys: sorted_keys = output_keys for node_key in sorted_keys: #all the children are handled with the parent if node_key not in parents: continue #if the node we are examining is not in our keys set then skip it. It means that it is not required for output. # if node_key not in node_keys: # continue #if the node one of the originals present in the genotype, i.e. it's not one of the nodes we added for the #extended one to one scheme if node_key in genome.nodes: node = genome.nodes[node_key] node_map = [node_key] + children[node_key] if node.is_switch: # If the switch neuron does not have any incoming cnnections if node_key not in std_inputs.keys( ) and node_key not in mod_inputs.keys(): for n in node_map: std_inputs[n] = [] mod_inputs[n] = [] # if the switch neuron only has modulatory weights then we copy those weights for the standard part as well. # this is not the desired behaviour but it is done to avoid errors during forward pass. if node_key not in std_inputs.keys( ) and node_key in mod_inputs.keys(): for n in node_map: std_inputs[n] = mod_inputs[n] if node_key not in mod_inputs.keys(): for n in node_map: mod_inputs[n] = [] #For the guided maps, all modulatory weights to switch neurons now weight 1/m if mod_inputs[node_key]: for n in node_map: new_w = 1 / len(std_inputs[n]) new_mod_w = [(inp, new_w) for inp, w in mod_inputs[n]] mod_inputs[n] = new_mod_w for n in node_map: nodes.append(SwitchNeuron(n, std_inputs[n], mod_inputs[n])) continue ###################### Switch neuron part ends here for n in node_map: if n not in std_inputs: std_inputs[n] = [] #For these guided maps, every hidden neuron that is not a switch neuron is a gating neuron if node_key in output_keys: # Create the standard part dictionary for the neuron #We also pre-determine the output neuron to help NEAT even more params = { 'activation_function': identity, 'integration_function': sum, 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': std_inputs[n] } #Everything else is a gating neuron else: params = { 'activation_function': identity, 'integration_function': prod, 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': std_inputs[n] } nodes.append(Neuron(n, params)) #if the node is one of those we added with the extended one to one scheme else: node_map = [node_key] + children[node_key] for n in node_map: if n not in std_inputs: std_inputs[n] = [] # Create the standard part dictionary for the neuron for n in node_map: params = { 'activation_function': identity, 'integration_function': sum, 'bias': 0, 'activity': 0, 'output': 0, 'weights': std_inputs[n] } nodes.append(Neuron(n, params)) return SwitchNeuronNetwork(input_keys, output_keys, nodes)
def create(genome, config, map_size): """ Receives a genome and returns its phenotype (a SwitchNeuronNetwork). """ genome_config = config.genome_config required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections) input_keys = genome_config.input_keys output_keys = genome_config.output_keys # Gather inputs and expressed connections. std_inputs = {} mod_inputs = {} children = {} node_keys = set(genome.nodes.keys()) # + list(genome_config.input_keys[:]) # Here we populate the children dictionay for each unique not isolated node. for n in genome.nodes.keys(): children[n] = [] if not genome.nodes[n].is_isolated: for _ in range(1, map_size): new_idx = max(node_keys) + 1 children[n].append(new_idx) node_keys.add(new_idx) # We don't scale the output with the map size to keep passing the parameters of the network easy. # This part can be revised in the future for n in chain(input_keys, output_keys): children[n] = [] #Iterate over every connection for cg in itervalues(genome.connections): #If it's not enabled don't include it if not cg.enabled: continue i, o = cg.key #If neither node is required for output then skip the connection if o not in required and i not in required: continue #Find the map corresponding to each node of the connection in_map = [i] + children[i] out_map = [o] + children[o] #If the connection is modulatory and the output neuron a switch neuron then the new weights are stored #in the mod dictionary. We assume that only switch neurons have a modulatory part. if cg.is_mod and genome.nodes[o].is_switch: node_inputs = mod_inputs else: node_inputs = std_inputs for n in out_map: if n not in node_inputs.keys(): node_inputs[n] = [] if len(in_map) == map_size and len(out_map) == map_size: # Map to map connectivity if cg.one2one: # 1-to-1 mapping weight = cg.weight for i in range(map_size): node_inputs[out_map[i]].append((in_map[i], weight)) else: # 1-to-all if not cg.uniform: # Step start = -cg.weight end = cg.weight step = (end - start) / (map_size - 1) for o_n in out_map: s = start for i_n in in_map: node_inputs[o_n].append((i_n, s)) s += step else: # Uniform for o_n in out_map: for i_n in in_map: node_inputs[o_n].append((i_n, cg.weight)) else: # Map-to-isolated or isolated-to-isolated if not cg.uniform: # Step start = -cg.weight end = cg.weight step = (end - start) / (map_size - 1) for o_n in out_map: s = start for i_n in in_map: node_inputs[o_n].append((i_n, s)) s += step else: # Uniform for o_n in out_map: for i_n in in_map: node_inputs[o_n].append((i_n, cg.weight)) nodes = [] #Sometimes the output neurons end up not having any connections during the evolutionary process. While we do not #desire such networks, we should still allow them to make predictions to avoid fatal errors. for okey in output_keys: if okey not in node_keys: node_keys.add(okey) std_inputs[okey] = [] # While we cannot deduce the order of activations of the neurons due to the fact that we allow for arbitrary connection # schemes, we certainly want the output neurons to activate last. input_keys = genome_config.input_keys output_keys = genome_config.output_keys conns = {} for k in genome.nodes.keys(): if k not in std_inputs: std_inputs[k] = [] if k in children: for c in children[k]: std_inputs[c] = [] conns[k] = [i for i, _ in std_inputs[k]] sorted_keys = order_of_activation(conns, input_keys, output_keys) for node_key in sorted_keys: #if the node we are examining is not in our keys set then skip it. It means that it is not required for output. if node_key not in node_keys: continue node = genome.nodes[node_key] node_map = [node_key] + children[node_key] if node.is_switch: #If the node doesn't have any inputs then it is not needed if node_key not in std_inputs.keys( ) and node_key not in mod_inputs.keys(): continue # if the switch neuron only has modulatory weights then we copy those weights for the standard part as well. # this is not the desired behaviour but it is done to avoid errors during forward pass. if node_key not in std_inputs.keys( ) and node_key in mod_inputs.keys(): for n in node_map: std_inputs[n] = mod_inputs[n] if node_key not in mod_inputs: for n in node_map: mod_inputs[n] = [] for n in node_map: nodes.append(SwitchNeuron(n, std_inputs[n], mod_inputs[n])) continue for n in node_map: if n not in std_inputs: std_inputs[n] = [] # Create the standard part dictionary for the neuron for n in node_map: params = { 'activation_function': genome_config.activation_defs.get(node.activation), 'integration_function': genome_config.aggregation_function_defs.get(node.aggregation), 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': std_inputs[n] } nodes.append(Neuron(n, params)) return SwitchNeuronNetwork(input_keys, output_keys, nodes)
def create(genome, config): genome_config = config.genome_config required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections) input_keys = genome_config.input_keys output_keys = genome_config.output_keys #A dictionary where we keep the modulatory weights for every node mod_weights = {} #A dictionary where we keep the standard weights for every node std_weights = {} #Create a set with the keys of the nodes in the network keys = set() #Iterate over the connections for cg in itervalues(genome.connections): #if not cg.enabled: # continue i, o = cg.key #If neither of the nodes in the connection are required for output then skip this connection if o not in required and i not in required: continue if i not in input_keys: keys.add(i) keys.add(o) #In this implementation, only switch neurons have a modulatory part if genome.nodes[o].is_switch: #Add the weight to the modulatory part of the o node and continue with the next connection if cg.is_mod: if o not in mod_weights.keys(): mod_weights[o] = [(i, cg.weight)] else: mod_weights[o].append((i, cg.weight)) continue #If the connection is not modulatory #Add the weight to the standard weight of the o node. if o not in std_weights.keys(): std_weights[o] = [(i, cg.weight)] else: std_weights[o].append((i, cg.weight)) #Create the array with the network's nodes nodes = [] #Sometimes the output neurons end up not having any connections during the evolutionary process. While we do not #desire such networks, we should still allow them to make predictions to avoid fatal errors. for okey in output_keys: keys.add(okey) for k in keys: if k not in std_weights: std_weights[k] = [] #While we cannot deduce the order of activations of the neurons due to the fact that we allow for arbitrary connection #schemes, we certainly want the output neurons to activate last. conns = {} for k in keys: conns[k] = [i for i, w in std_weights[k]] sorted_keys = order_of_activation(conns, input_keys, output_keys) #Create the nodes of the network based on the weights dictionaries created above and the genome. for node_key in sorted_keys: if node_key not in keys: continue node = genome.nodes[node_key] if node.is_switch: #If the node doesn't have any connections then it is not needed if node_key not in std_weights.keys( ) and node_key not in mod_weights.keys(): continue #if the switch neuron only has modulatory weights then we copy those weights for the standard part as well. #this is not the desired behaviour but it is done to avoid errors during forward pass. if node_key not in std_weights.keys() and node_key in mod_weights: std_weights[node_key] = mod_weights[node_key] if node_key not in mod_weights.keys(): mod_weights[node_key] = [] nodes.append( SwitchNeuron(node_key, std_weights[node_key], mod_weights[node_key])) continue if node_key not in std_weights: std_weights[node_key] = [] #Create the standard part dictionary for the neuron params = { 'activation_function': genome_config.activation_defs.get(node.activation), 'integration_function': genome_config.aggregation_function_defs.get(node.aggregation), 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': std_weights[node_key] } nodes.append(Neuron(node_key, params)) return SwitchNeuronNetwork(input_keys, output_keys, nodes)
def create(genome, config): genome_config = config.genome_config required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections) input_keys = genome_config.input_keys output_keys = genome_config.output_keys #A dictionary where we keep the standard weights for every node std_weights = {} #Create a set with the keys of the nodes in the network keys = set() #Iterate over the connections for cg in itervalues(genome.connections): #if not cg.enabled: # continue i, o = cg.key #If neither of the nodes in the connection are required for output then skip this connection if o not in required and i not in required: continue if i not in input_keys: keys.add(i) keys.add(o) #Add the weight to the standard weight of the o node. if o not in std_weights.keys(): std_weights[o] = [(i,cg.weight)] else: std_weights[o].append((i,cg.weight)) #Create the array with the network's nodes nodes = [] #Sometimes the output neurons end up not having any connections during the evolutionary process. While we do not #desire such networks, we should still allow them to make predictions to avoid fatal errors. for okey in output_keys: if okey not in keys: keys.add(okey) std_weights[okey] = [] #Sometimes a neuron emerges which is only connected to the output neurons with an outgoing connection for k in keys: if k not in std_weights.keys(): std_weights[k] = [] #Deduce the order of activation of the neurons conns = {} for node in std_weights.keys(): conns[node] = [inp for inp, weight in std_weights[node]] sorted_keys = order_of_activation(conns, input_keys, output_keys) #Create the nodes of the network based on the weights dictionaries created above and the genome. for node_key in sorted_keys: if node_key not in keys: continue node = genome.nodes[node_key] #Create the standard part dictionary for the neuron params = { 'activation_function' : genome_config.activation_defs.get(node.activation), 'integration_function' : genome_config.aggregation_function_defs.get(node.aggregation), 'bias' : node.bias, 'activity' : 0, 'output' : 0, 'weights' : std_weights[node_key] } nodes.append(Neuron(node_key,params)) return RecurrentNetwork(input_keys,output_keys,nodes)
def create(genome, config, map_size): """ Receives a genome and returns its phenotype (a MapNetwork). """ genome_config = config.genome_config required = required_for_output(genome_config.input_keys, genome_config.output_keys, genome.connections) # Gather inputs and expressed connections. node_inputs = {} children = {} node_keys = list( genome.nodes.keys())[:] #+ list(genome_config.input_keys[:]) # for key in genome_config.input_keys + genome_config.output_keys: # children[key] = [] # for i in range(1,map_size): # if key < 0: # new_idx = min(node_keys) - 1 # else: # new_idx = max(node_keys) + 1 # children[key].append(new_idx) # node_keys.append(new_idx) for cg in itervalues(genome.connections): if not cg.enabled: continue i, o = cg.key if o not in required and i not in required: continue for n in [i, o]: if n in children.keys(): continue children[n] = [] if n in genome_config.input_keys or n in genome_config.output_keys: continue if not genome.nodes[n].is_isolated: for _ in range(1, map_size): new_idx = max(node_keys) + 1 children[n].append(new_idx) node_keys.append(new_idx) in_map = [i] + children[i] out_map = [o] + children[o] for n in out_map: if n not in node_inputs.keys(): node_inputs[n] = [] if len(in_map) == map_size and len(out_map) == map_size: #Map to map connectivity if cg.one_to_one: #1-to-1 mapping weight = 5 * cg.weight for i_n in range(map_size): node_inputs[out_map[i_n]].append((in_map[i_n], weight)) else: #1-to-all if cg.is_gaussian: #Gaussian for o_n in out_map: for i_n in in_map: node_inputs[o_n].append( (i_n, np.random.normal(cg.weight, cg.sigma))) else: #Uniform for o_n in out_map: for i_n in in_map: node_inputs[o_n].append((i_n, 5 * cg.weight)) else: #Map-to-isolated or isolated-to-isolated if cg.is_gaussian: # Gaussian for o_n in out_map: for i_n in in_map: node_inputs[o_n].append( (i_n, np.random.normal(cg.weight, cg.sigma))) else: # Uniform for o_n in out_map: for i_n in in_map: \ node_inputs[o_n].append((i_n, 5 * cg.weight)) input_keys = genome_config.input_keys output_keys = genome_config.output_keys conns = {} for k in genome.nodes.keys(): if k not in node_inputs: node_inputs[k] = [] if k in children: for c in children[k]: node_inputs[c] = [] conns[k] = [i for i, _ in node_inputs[k]] sorted_keys = order_of_activation(conns, input_keys, output_keys) nodes = [] for node_key in sorted_keys: if node_key not in genome.nodes.keys(): continue node = genome.nodes[node_key] activation_function = genome_config.activation_defs.get( node.activation) aggregation_function = genome_config.aggregation_function_defs.get( node.aggregation) nodes.append( Neuron( node_key, { 'activation_function': activation_function, 'integration_function': aggregation_function, 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': node_inputs[node_key] })) if node_key not in children: continue for n in children[node_key]: nodes.append( Neuron( n, { 'activation_function': activation_function, 'integration_function': aggregation_function, 'bias': node.bias, 'activity': 0, 'output': 0, 'weights': node_inputs[n] })) # for key in genome_config.input_keys: # input_keys.append(key) # if key in children.keys(): # for child in children[key]: # input_keys.append(child) # # for key in genome_config.output_keys: # output_keys.append(key) # if key in children.keys(): # for child in children[key]: # output_keys.append(child) return MapNetwork(input_keys, output_keys, nodes)