def __init__(self, dimensions, n_neurons_per_ensemble=50, mutual_inhib=1.0, threshold=0.0, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Thalamus") super().__init__(**kwargs) with self: self.actions = EnsembleArray( n_neurons_per_ensemble, dimensions, intercepts=Uniform(threshold, 1), encoders=Choice([[1.0]]), label="actions", ) Connection( self.actions.output, self.actions.input, transform=(np.eye(dimensions) - 1) * mutual_inhib, ) self.bias = Node([1], label="thalamus bias") Connection(self.bias, self.actions.input, transform=np.ones((dimensions, 1))) self.input = self.actions.input self.output = self.actions.output
def add_default_output_vector( self, output_vector, output_name="output", n_neurons=50, min_activation_value=0.5, ): """Adds a default output vector to the associative memory network. The default output vector is chosen if the input matches none of the given input vectors. Parameters ---------- output_vector: array_like The vector to be produced if the input value matches none of the vectors in the input vector list. output_name: str, optional The name of the input to which the default output vector should be applied. n_neurons: int, optional Number of neurons to use for the default output vector ensemble. min_activation_value: float, optional Minimum activation value (i.e. threshold) to use to disable the default output vector. """ with self.default_ens_config: default_vector_ens = Ensemble( n_neurons, 1, label=f"Default {output_name} vector") Connection(self.bias_node, default_vector_ens, synapse=None) tr = -(1.0 / min_activation_value) * np.ones((1, self.n_items)) if self.thresh_ens is not None: c = Connection(self.thresh_ens.output, default_vector_ens, transform=tr) else: c = Connection(self.elem_output, default_vector_ens, transform=tr) # Add the output connection to the output connection list self.default_vector_inhibit_conns.append(c) # Make new output class attribute and connect to it output = getattr(self, output_name) Connection( default_vector_ens, output, transform=np.array(output_vector, ndmin=2).T, synapse=None, ) if self.inhibit is not None: Connection( self.inhibit, default_vector_ens, transform=-self._inhib_scale, synapse=None, )
def __init__(self, recurrent_tau, n_neurons, dimensions, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Integrator") super().__init__(**kwargs) with self: self.input = Node(size_in=dimensions) self.ensemble = Ensemble(n_neurons, dimensions=dimensions) Connection(self.ensemble, self.ensemble, synapse=recurrent_tau) Connection(self.input, self.ensemble, transform=recurrent_tau, synapse=None) self.output = self.ensemble
def add_neuron_output(self): """Adds a node that collects the neural output of all ensembles. Direct neuron output is useful for plotting the spike raster of all neurons in the ensemble array. This node is accessible through the 'neuron_output' attribute of this ensemble array. """ if self.neuron_output is not None: warnings.warn("neuron_output already exists. Returning.") return self.neuron_output if isinstance(self.ea_ensembles[0].neuron_type, Direct): raise ValidationError( "Ensembles use Direct neuron type. " "Cannot get neuron output from Direct neurons.", attr="ea_ensembles[0].neuron_type", obj=self, ) self.neuron_output = Node( size_in=self.n_neurons_per_ensemble * self.n_ensembles, label="neuron_output", ) for i, ens in enumerate(self.ea_ensembles): Connection( ens.neurons, self.neuron_output[i * self.n_neurons_per_ensemble:(i + 1) * self.n_neurons_per_ensemble], synapse=None, ) return self.neuron_output
def conn_probe(model, probe): """Build a "connection" probe type. Connection probes create a connection from the target, and probe the resulting signal (used when you want to probe the default output of an object, which may not have a predefined signal). """ conn = Connection( probe.target, probe, synapse=probe.synapse, solver=probe.solver, add_to_container=False, ) # Set connection's seed to probe's (which isn't used elsewhere) model.seeded[conn] = model.seeded[probe] model.seeds[conn] = model.seeds[probe] # Make a sink signal for the connection model.sig[probe]["in"] = Signal(shape=conn.size_out, name=str(probe)) model.add_op(Reset(model.sig[probe]["in"])) # Build the connection model.build(conn)
def add_neuron_input(self): """Adds a node that provides input to the neurons of all ensembles. Direct neuron input is useful for inhibiting the activity of all neurons in the ensemble array. This node is accessible through the 'neuron_input' attribute of this ensemble array. """ if self.neuron_input is not None: warnings.warn("neuron_input already exists. Returning.") return self.neuron_input if isinstance(self.ea_ensembles[0].neuron_type, Direct): raise ValidationError( "Ensembles use Direct neuron type. " "Cannot give neuron input to Direct neurons.", attr="ea_ensembles[0].neuron_type", obj=self, ) self.neuron_input = Node(size_in=self.n_neurons_per_ensemble * self.n_ensembles, label="neuron_input") for i, ens in enumerate(self.ea_ensembles): Connection( self.neuron_input[i * self.n_neurons_per_ensemble:(i + 1) * self.n_neurons_per_ensemble], ens.neurons, synapse=None, ) return self.neuron_input
def __init__(self, recurrent_tau, frequency, n_neurons, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Oscillator") super().__init__(**kwargs) with self: self.input = Node(label="In", size_in=2) self.ensemble = Ensemble(n_neurons, dimensions=2, label="Oscillator") tA = [[1, -frequency * recurrent_tau], [frequency * recurrent_tau, 1]] Connection( self.ensemble, self.ensemble, synapse=recurrent_tau, transform=tA ) Connection(self.input, self.ensemble, synapse=None) self.output = self.ensemble
def reroute(conn): c = Connection( self.thresh_ens.output, conn.post, transform=conn.transform, synapse=conn.synapse, ) self.connections.remove(conn) return c
def __init__( self, n_neurons, n_ensembles, ens_dimensions=1, label=None, seed=None, add_to_container=None, **ens_kwargs, ): if "dimensions" in ens_kwargs: raise ValidationError( "'dimensions' is not a valid argument to EnsembleArray. " "To set the number of ensembles, use 'n_ensembles'. To set " "the number of dimensions per ensemble, use 'ens_dimensions'.", attr="dimensions", obj=self, ) super().__init__(label, seed, add_to_container) for param, value in ens_kwargs.items(): if is_iterable(value): ens_kwargs[param] = Samples(value) self.config[Ensemble].update(ens_kwargs) label_prefix = "" if label is None else label + "_" self.n_neurons_per_ensemble = n_neurons self.n_ensembles = n_ensembles self.dimensions_per_ensemble = ens_dimensions # These may be set in add_neuron_input and add_neuron_output self.neuron_input, self.neuron_output = None, None self.ea_ensembles = [] with self: self.input = Node(size_in=self.dimensions, label="input") for i in range(n_ensembles): e = Ensemble( n_neurons, self.dimensions_per_ensemble, label=f"{label_prefix}{i}", ) Connection( self.input[i * ens_dimensions : (i + 1) * ens_dimensions], e, synapse=None, ) self.ea_ensembles.append(e) self.add_output("output", function=None)
def add_output_mapping(self, name, output_vectors): """Adds another output to the associative memory network. Creates a transform with the given output vectors between the associative memory element output and a named output node to enable the selection of output vectors by the associative memory. Parameters ---------- name: str Name to use for the output node. This name will be used as the name of the attribute for the associative memory network. output_vectors: array_like The list of vectors to be produced for each match. """ # --- Put arguments in canonical form if is_iterable(output_vectors): output_vectors = np.array(output_vectors, ndmin=2) # --- Check preconditions if hasattr(self, name): raise ValidationError( "Name '%s' already exists as a node in the " "associative memory." % name, attr="name", ) # --- Make the output node and connect it output = Node(size_in=output_vectors.shape[1], label=name) setattr(self, name, output) if self.thresh_ens is not None: c = Connection(self.thresh_ens.output, output, synapse=None, transform=output_vectors.T) else: c = Connection(self.elem_output, output, synapse=None, transform=output_vectors.T) self.out_conns.append(c)
def conn_probe(model, probe): conn = Connection(probe.target, probe, synapse=probe.synapse, solver=probe.solver, add_to_container=False) # Set connection's seed to probe's (which isn't used elsewhere) model.seeds[conn] = model.seeds[probe] # Make a sink signal for the connection model.sig[probe]['in'] = Signal(np.zeros(conn.size_out), name=str(probe)) model.add_op(Reset(model.sig[probe]['in'])) # Build the connection model.build(conn)
def add_input_mapping(self, name, input_vectors, input_scales=1.0): """Adds a set of input vectors to the associative memory network. Creates a transform with the given input vectors between the a named input node and associative memory element input to enable the inputs to be mapped onto ensembles of the Associative Memory. Parameters ---------- name: str Name to use for the input node. This name will be used as the name of the attribute for the associative memory network. input_vectors: array_like The list of vectors to be compared against. input_scales: float or array_like, optional Scaling factor to apply on each of the input vectors. Note that it is possible to scale each vector independently. """ # --- Put arguments in canonical form n_vectors, d_vectors = input_vectors.shape if is_iterable(input_vectors): input_vectors = np.array(input_vectors, ndmin=2) if not is_iterable(input_scales): input_scales = input_scales * np.ones((1, n_vectors)) else: input_scales = np.array(input_scales, ndmin=2) # --- Check some preconditions if input_scales.shape[1] != n_vectors: raise ValidationError( "Number of input_scale values (%d) does not " "match number of input vectors (%d)." % (input_scales.shape[1], n_vectors), attr="input_scales", ) if hasattr(self, name): raise ValidationError( "Name '%s' already exists as a node in the " "associative memory." % name, attr="name", ) # --- Finally, make the input node and connect it in_node = Node(size_in=d_vectors, label=name) setattr(self, name, in_node) Connection( in_node, self.elem_input, synapse=None, transform=input_vectors * input_scales.T, )
def __init__( self, n_neurons, dimensions, invert_a=False, invert_b=False, input_magnitude=1.0, **kwargs, ): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Circular convolution") super().__init__(**kwargs) tr_a = transform_in(dimensions, "A", invert_a) tr_b = transform_in(dimensions, "B", invert_b) tr_out = transform_out(dimensions) with self: self.input_a = Node(size_in=dimensions, label="input_a") self.input_b = Node(size_in=dimensions, label="input_b") self.product = Product(n_neurons, tr_out.shape[1], input_magnitude=input_magnitude * 2) self.output = Node(size_in=dimensions, label="output") Connection(self.input_a, self.product.input_a, transform=tr_a, synapse=None) Connection(self.input_b, self.product.input_b, transform=tr_b, synapse=None) Connection(self.product.output, self.output, transform=tr_out, synapse=None)
def conn_probe(model, probe): # Connection probes create a connection from the target, and probe # the resulting signal (used when you want to probe the default # output of an object, which may not have a predefined signal) conn = Connection(probe.target, probe, synapse=probe.synapse, solver=probe.solver, add_to_container=False) # Set connection's seed to probe's (which isn't used elsewhere) model.seeds[conn] = model.seeds[probe] # Make a sink signal for the connection model.sig[probe]['in'] = Signal(np.zeros(conn.size_out), name=str(probe)) model.add_op(Reset(model.sig[probe]['in'])) # Build the connection model.build(conn)
def _create_replacement_connection(c_in, c_out): """Generate a new Connection to replace two through a passthrough Node.""" # imported here to avoid circular imports # pylint: disable=import-outside-toplevel from nengo.connection import Connection assert c_in.post_obj is c_out.pre_obj assert c_in.post_obj.output is None # determine the filter for the new Connection if c_in.synapse is None: synapse = c_out.synapse elif c_out.synapse is None: synapse = c_in.synapse else: raise Unconvertible("Cannot merge two filters") # Note: the algorithm below is in the right ballpark, # but isn't exactly the same as two low-pass filters # filter = c_out.filter + c_in.filter function = c_in.function if c_out.function is not None: raise Unconvertible("Cannot remove a connection with a function") # compute the combined transform transform = np.dot(full_transform(c_out), full_transform(c_in)) # check if the transform is 0 (this happens a lot # with things like identity transforms) if np.all(transform == 0): return None c = Connection( c_in.pre_obj, c_out.post_obj, synapse=synapse, transform=transform, function=function, add_to_container=False, ) return c
def add_wta_network(self, inhibit_scale=1.5, inhibit_synapse=0.005): """Add a winner-take-all (WTA) network to associative memory output. Parameters ---------- inhibit_scale: float, optional Mutual inhibition scaling factor. inhibit_synapse: float, optional Mutual inhibition synapse time constant. """ if not self.is_wta: Connection( self.elem_output, self.elem_input, synapse=inhibit_synapse, transform=((np.eye(self.n_items) - 1) * inhibit_scale), ) self.is_wta = True else: warnings.warn("AssociativeMemory network is already configured " "with a WTA network. Additional `add_wta_network` " "calls are ignored.")
def __init__(self, n_neurons, dimensions, input_magnitude=1.0, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Product") super().__init__(**kwargs) with self: self.input_a = Node(size_in=dimensions, label="input_a") self.input_b = Node(size_in=dimensions, label="input_b") self.output = Node(size_in=dimensions, label="output") self.sq1 = EnsembleArray( max(1, n_neurons // 2), n_ensembles=dimensions, ens_dimensions=1, radius=input_magnitude * np.sqrt(2), ) self.sq2 = EnsembleArray( max(1, n_neurons // 2), n_ensembles=dimensions, ens_dimensions=1, radius=input_magnitude * np.sqrt(2), ) tr = 1.0 / np.sqrt(2.0) Connection(self.input_a, self.sq1.input, transform=tr, synapse=None) Connection(self.input_b, self.sq1.input, transform=tr, synapse=None) Connection(self.input_a, self.sq2.input, transform=tr, synapse=None) Connection(self.input_b, self.sq2.input, transform=-tr, synapse=None) sq1_out = self.sq1.add_output("square", np.square) Connection(sq1_out, self.output, transform=0.5, synapse=None) sq2_out = self.sq2.add_output("square", np.square) Connection(sq2_out, self.output, transform=-0.5, synapse=None)
def add_output(self, name, function, synapse=None, **conn_kwargs): """Adds a node that collects the decoded output of all ensembles. By default, this is called once in ``__init__`` with ``function=None``. However, this can be called multiple times with different functions, similar to the way in which an ensemble can be connected to many downstream ensembles with different functions. Note that in addition to the parameters below, parameters affecting all of the connections from the sub-ensembles to the new node can be passed to this function. For example: .. testcode:: ea.add_output("lstsq_output", None, solver=nengo.solvers.Lstsq()) creates a new output at ``ea.lstsq_output`` with the decoders of each connection solved for with the `.Lstsq` solver. Parameters ---------- name : str The name of the output. This will also be the name of the attribute set on the ensemble array. function : callable or iterable of callables The function to compute across the connection from sub-ensembles to the new output node. If function is an iterable, it must be an iterable consisting of one function for each sub-ensemble. synapse : Synapse, optional The synapse model with which to filter the connections from sub-ensembles to the new output node. This is kept separate from the other ``conn_kwargs`` because this defaults to None rather than the default synapse model. In almost all cases the synapse should stay as None, and synaptic filtering should be performed in the connection from the output node. """ if hasattr(self, name): raise ValidationError( f"Cannot add output '{name}'; there is already an attribute " "with this name", attr="name", obj=self, ) dims_per_ens = self.dimensions_per_ensemble # get output size for each ensemble sizes = np.zeros(self.n_ensembles, dtype=int) if is_iterable(function) and all(callable(f) for f in function): if len(list(function)) != self.n_ensembles: raise ValidationError( "Must have one function per ensemble", attr="function", obj=self ) for i, func in enumerate(function): sizes[i] = np.asarray(func(np.zeros(dims_per_ens))).size elif callable(function): sizes[:] = np.asarray(function(np.zeros(dims_per_ens))).size function = [function] * self.n_ensembles elif function is None: sizes[:] = dims_per_ens function = [None] * self.n_ensembles else: raise ValidationError( "'function' must be a callable, list of callables, or None", attr="function", obj=self, ) output = Node(output=None, size_in=sizes.sum(), label=name) setattr(self, name, output) indices = np.zeros(len(sizes) + 1, dtype=int) indices[1:] = np.cumsum(sizes) for i, e in enumerate(self.ea_ensembles): Connection( e, output[indices[i] : indices[i + 1]], function=function[i], synapse=synapse, **conn_kwargs, ) return output
def __init__( # noqa: C901 self, input_vectors, output_vectors=None, n_neurons=50, threshold=0.3, input_scales=1.0, inhibitable=False, label=None, seed=None, add_to_container=None, ): super().__init__(label, seed, add_to_container) # --- Put arguments in canonical form if output_vectors is None: # If output vocabulary is not specified, use input vector list # (i.e autoassociative memory) output_vectors = input_vectors if is_iterable(input_vectors): input_vectors = np.array(input_vectors, ndmin=2) if is_iterable(output_vectors): output_vectors = np.array(output_vectors, ndmin=2) if input_vectors.shape[0] == 0: raise ValidationError("Number of input vectors cannot be 0.", attr="input_vectors", obj=self) elif input_vectors.shape[0] != output_vectors.shape[0]: # Fail if number of input items and number of output items don't # match raise ValidationError( "Number of input vectors does not match number of output " "vectors. %d != %d" % (input_vectors.shape[0], output_vectors.shape[0]), attr="input_vectors", obj=type(self), ) # Handle possible different threshold / input_scale values for each # element in the associative memory if not is_iterable(threshold): threshold = threshold * np.ones(input_vectors.shape[0]) else: threshold = np.array(threshold) # --- Check preconditions self.n_items = input_vectors.shape[0] if threshold.shape[0] != self.n_items: raise ValidationError( "Number of threshold values (%d) does not match number of " "input vectors (%d)." % (threshold.shape[0], self.n_items), attr="threshold", obj=self, ) # --- Set parameters self.out_conns = [] # Used in `add_threshold_to_output` # Used in `add_threshold_to_output` self.default_vector_inhibit_conns = [] self.thresh_ens = None # Will hold thresholded outputs self.is_wta = False self._inhib_scale = 1.5 # -- Create the core network with self, self.am_ens_config: self.bias_node = Node(output=1) self.elem_input = Node(size_in=self.n_items, label="element input") self.elem_output = Node(size_in=self.n_items, label="element output") self.utilities = self.elem_output self.am_ensembles = [] label_prefix = "" if label is None else label + "_" filt_scale = 15 filt_step_func = lambda x: filtered_step(x, 0.0, scale=filt_scale) for i in range(self.n_items): e = Ensemble(n_neurons, 1, label=label_prefix + str(i)) self.am_ensembles.append(e) # Connect input and output nodes Connection(self.bias_node, e, transform=-threshold[i]) Connection(self.elem_input[i], e) Connection(e, self.elem_output[i], function=filt_step_func) if inhibitable: # Input node for inhibitory gating signal (if enabled) self.inhibit = Node(size_in=1, label="inhibit") Connection( self.inhibit, self.elem_input, transform=-np.ones((self.n_items, 1)) * self._inhib_scale, ) # Note: We can use a decoded connection here because all the # am_ensembles have [1] encoders else: self.inhibit = None self.thresh_bias = None self.thresholded_utilities = None self.add_input_mapping("input", input_vectors, input_scales) self.add_output_mapping("output", output_vectors)
def add_threshold_to_outputs(self, n_neurons=50, inhibit_scale=10): """Adds a thresholded output to the associative memory. Parameters ---------- n_neurons: int, optional Number of neurons to use for the default output vector ensemble. inhibit_scale: float, optional Mutual inhibition scaling factor. """ if self.thresh_ens is not None: warnings.warn("AssociativeMemory network is already configured " "with thresholded outputs. Additional " "`add_threshold_to_output` calls are ignored.") return with self.thresh_ens_config: self.thresh_bias = EnsembleArray(n_neurons, self.n_items, label="thresh_bias") self.thresh_ens = EnsembleArray(n_neurons, self.n_items, label="thresh_ens") self.thresholded_utilities = self.thresh_ens.output Connection( self.bias_node, self.thresh_bias.input, transform=np.ones((self.n_items, 1)), synapse=None, ) Connection( self.bias_node, self.thresh_ens.input, transform=np.ones((self.n_items, 1)), synapse=None, ) Connection(self.elem_output, self.thresh_bias.input, transform=-inhibit_scale) Connection(self.thresh_bias.output, self.thresh_ens.input, transform=-inhibit_scale) # Reroute connections from elem_output to be connections # from thresh_ens.output def reroute(conn): c = Connection( self.thresh_ens.output, conn.post, transform=conn.transform, synapse=conn.synapse, ) self.connections.remove(conn) return c self.default_vector_inhibit_conns = [ reroute(c) for c in self.default_vector_inhibit_conns ] self.out_conns = [reroute(c) for c in self.out_conns] # Make inhibitory connection if inhibit option is set if self.inhibit is not None: for e in self.thresh_ens.ensembles: Connection(self.inhibit, e, transform=-self._inhib_scale, synapse=None)
def __init__(self, dimensions, n_neurons_per_ensemble=100, output_weight=-3.0, input_bias=0.0, ampa_config=None, gaba_config=None, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Basal Ganglia") super().__init__(**kwargs) ampa_config, override_ampa = config_with_default_synapse( ampa_config, Lowpass(0.002)) gaba_config, override_gaba = config_with_default_synapse( gaba_config, Lowpass(0.008)) # Affects all ensembles / connections in the BG # unless they've been overridden on `self.config` config = Config(Ensemble, Connection) config[Ensemble].radius = 1.5 config[Ensemble].encoders = Choice([[1]]) try: # Best, if we have SciPy config[Connection].solver = NnlsL2nz() except ImportError: # Warn if we can't use the better decoder solver. warnings.warn("SciPy is not installed, so BasalGanglia will " "use the default decoder solver. Installing SciPy " "may improve BasalGanglia performance.") ea_params = { "n_neurons": n_neurons_per_ensemble, "n_ensembles": dimensions } with self, config: self.strD1 = EnsembleArray( label="Striatal D1 neurons", intercepts=Uniform(Weights.e, 1), **ea_params, ) self.strD2 = EnsembleArray( label="Striatal D2 neurons", intercepts=Uniform(Weights.e, 1), **ea_params, ) self.stn = EnsembleArray( label="Subthalamic nucleus", intercepts=Uniform(Weights.ep, 1), **ea_params, ) self.gpi = EnsembleArray( label="Globus pallidus internus", intercepts=Uniform(Weights.eg, 1), **ea_params, ) self.gpe = EnsembleArray( label="Globus pallidus externus", intercepts=Uniform(Weights.ee, 1), **ea_params, ) self.input = Node(label="input", size_in=dimensions) self.output = Node(label="output", size_in=dimensions) # add bias input (BG performs best in the range 0.5--1.5) if abs(input_bias) > 0.0: self.bias_input = Node(np.ones(dimensions) * input_bias, label="basal ganglia bias") Connection(self.bias_input, self.input) # spread the input to StrD1, StrD2, and STN Connection( self.input, self.strD1.input, synapse=None, transform=Weights.ws * (1 + Weights.lg), ) Connection( self.input, self.strD2.input, synapse=None, transform=Weights.ws * (1 - Weights.le), ) Connection(self.input, self.stn.input, synapse=None, transform=Weights.wt) # connect the striatum to the GPi and GPe (inhibitory) strD1_output = self.strD1.add_output("func_str", Weights.str_func) strD2_output = self.strD2.add_output("func_str", Weights.str_func) with gaba_config: Connection(strD1_output, self.gpi.input, transform=-Weights.wm) Connection(strD2_output, self.gpe.input, transform=-Weights.wm) # connect the STN to GPi and GPe (broad and excitatory) tr = Weights.wp * np.ones((dimensions, dimensions)) stn_output = self.stn.add_output("func_stn", Weights.stn_func) with ampa_config: Connection(stn_output, self.gpi.input, transform=tr) Connection(stn_output, self.gpe.input, transform=tr) # connect the GPe to GPi and STN (inhibitory) gpe_output = self.gpe.add_output("func_gpe", Weights.gpe_func) with gaba_config: Connection(gpe_output, self.gpi.input, transform=-Weights.we) Connection(gpe_output, self.stn.input, transform=-Weights.wg) # connect GPi to output (inhibitory) gpi_output = self.gpi.add_output("func_gpi", Weights.gpi_func) Connection(gpi_output, self.output, synapse=None, transform=output_weight) # Return ampa_config and gaba_config to previous states, if changed if override_ampa: del ampa_config[Connection].synapse if override_gaba: del gaba_config[Connection].synapse