def __init__( self, stim_provider, items, contexts, n_pos, rng=None, extra_pos=3): super(Vocabularies, self).__init__() vocabs = Config.default(spa.Network, 'vocabs') if vocabs is None: vocabs = VocabularyMap(rng=rng) self.vocabs = vocabs self.items = items if is_integer(contexts): contexts = spa.Vocabulary(contexts, rng=rng) self.contexts = contexts self.positions = spa.Vocabulary(self.items.dimensions, rng=rng) self.items.populate(';'.join(stim_provider.get_all_items())) if stim_provider.n_distractors_per_epoch > 0: self.items.populate(';'.join(stim_provider.get_all_distractors())) for i in range(self.items.dimensions): self.contexts.populate('CTX' + str(i)) for i in range(n_pos + extra_pos): self.positions.populate('P' + str(i))
def config_with_default_synapse(config, synapse): if config is None: config = Config(Connection) config[Connection].synapse = synapse override = "synapse" not in config[Connection] if override: config[Connection].synapse = synapse return config, override
def coerce(self, conn, solver): if solver is ConnectionDefault: solver = Config.default(Connection, 'solver') solver = super().coerce(conn, solver) if solver is not None and solver.weights: raise ValidationError("weight solvers only work for ensemble to " "ensemble connections, not probes", attr=self.name, obj=conn) return solver
def thresh_ens_config(self): """(Config) Defaults for threshold ensemble creation.""" cfg = Config(Ensemble) cfg[Ensemble].update({ "radius": 1, "intercepts": Uniform(0.5, 1.0), "encoders": Choice([[1]]), "eval_points": Uniform(0.75, 1.1), "n_eval_points": self.n_eval_points, }) return cfg
def coerce(self, conn, solver): if solver is ConnectionDefault: solver = Config.default(Connection, 'solver') solver = super().coerce(conn, solver) if solver is not None and solver.weights: raise ValidationError( "weight solvers only work for ensemble to " "ensemble connections, not probes", attr=self.name, obj=conn) return solver
def default_ens_config(self): """(Config) Defaults for other ensemble creation.""" cfg = Config(Ensemble) cfg[Ensemble].update({ "radius": 1, "intercepts": Exponential(self.exp_scale, 0.0, 1.0), "encoders": Choice([[1]]), "eval_points": Uniform(0.0, 1.0), "n_eval_points": self.n_eval_points, }) return cfg
def __setattr__(self, name, val): if hasattr(self, '_initialized') and not hasattr(self, name): warnings.warn( "Creating new attribute '%s' on '%s'. " "Did you mean to change an existing attribute?" % (name, self), SyntaxWarning) if val is Default: val = Config.default(type(self), name) try: super(NengoObject, self).__setattr__(name, val) except Exception as e: arg0 = '' if len(e.args) == 0 else e.args[0] arg0 = ("Validation error when setting '%s.%s': %s" % (self.__class__.__name__, name, arg0)) e.args = (arg0,) + e.args[1:] raise
def __setattr__(self, name, val): if hasattr(self, '_initialized') and not hasattr(self, name): warnings.warn( "Creating new attribute '%s' on '%s'. " "Did you mean to change an existing attribute?" % (name, self), SyntaxWarning) if val is Default: val = Config.default(type(self), name) try: super(NengoObject, self).__setattr__(name, val) except Exception as e: arg0 = '' if len(e.args) == 0 else e.args[0] arg0 = ("Validation error when setting '%s.%s': %s" % (self.__class__.__name__, name, arg0)) e.args = (arg0, ) + e.args[1:] raise
def am_ens_config(self): """(Config) Defaults for associative memory ensemble creation.""" cfg = Config(Ensemble, Connection) cfg[Ensemble].update({ "radius": 1, "intercepts": Exponential(self.exp_scale, 0.0, 1.0), "encoders": Choice([[1]]), "eval_points": Uniform(0.0, 1.0), "n_eval_points": self.n_eval_points, }) cfg[Connection].synapse = None return cfg
def __setattr__(self, name, val): if hasattr(self, '_initialized') and not hasattr(self, name): warnings.warn( "Creating new attribute '%s' on '%s'. " "Did you mean to change an existing attribute?" % (name, self), SyntaxWarning) if val is Default: val = Config.default(type(self), name) if rc.getboolean('exceptions', 'simplified'): try: super(NengoObject, self).__setattr__(name, val) except ValidationError: exc_info = sys.exc_info() reraise(exc_info[0], exc_info[1], None) else: super(NengoObject, self).__setattr__(name, val)
def ThresholdingEnsembles(threshold, intercept_width=0.15, radius=1.0): """Configuration preset for a thresholding ensemble. This preset adjust ensemble parameters for thresholding. The ensemble's neurons will only fire for values above threshold. One can either decode the represented value (if it is above the threshold) or decode a step function if binary classification is desired. This preset: - Sets intercepts to be between ``threshold`` and ``radius`` with an exponential distribution (shape parameter of ``intercept_width``). This clusters intercepts near the threshold for better approximation. - Sets encoders to 1. - Sets evaluation points to be uniformly distributed between ``threshold`` and ``radius``. - Sets the radius. Parameters ---------- threshold : float Point at which ensembles should start firing. intercept_width : float, optional Controls how widely distributed the intercepts are. Smaller values give more clustering at the threshold, larger values give a more uniform distribution. radius : float, optional Ensemble radius. Returns ------- `nengo.Config` Configuration with presets. """ config = Config(Ensemble) config[Ensemble].radius = radius config[Ensemble].intercepts = Exponential(intercept_width, threshold, radius) config[Ensemble].encoders = Choice([[1]]) config[Ensemble].eval_points = Uniform(threshold / radius, 1) return config
def __init__(self, label=None, seed=None, add_to_container=None, vocabs=None): super(Network, self).__init__(label, seed, add_to_container) self.config.configures(Network) if vocabs is None: vocabs = Config.default(Network, 'vocabs') if vocabs is None and len(Network.context) > 0: vocabs = self._master_vocabs.get(Network.context[0], None) if vocabs is None: if seed is not None: rng = np.random.RandomState(seed) else: rng = None vocabs = VocabularyMap(rng=rng) if len(Network.context) > 0: self.__class__._master_vocabs[Network.context[0]] = vocabs self.vocabs = vocabs self.config[Network].vocabs = vocabs self._stimuli = None
def __set__(self, instance, value): if value is ConnectionDefault: value = Config.default(Connection, 'solver') super(ProbeSolverParam, self).__set__(instance, value)
def default_config(): """Constructs a Config object for setting Nengo object defaults.""" return Config(Connection, Ensemble, Node, Probe)
def default_config(): config = Config() config.configures(Connection) config.configures(Ensemble) config.configures(Network) config.configures(Node) config.configures(Probe) return config
def coerce(self, conn, solver): # pylint: disable=arguments-renamed if solver is ConnectionDefault: solver = Config.default(Connection, "solver") return super().coerce(conn, solver)
def default_config(): """Constructs a `~.Config` object for setting defaults.""" return Config(Connection, Ensemble, Node, Probe)
def __set__(self, instance, value): if value is ConnectionDefault: value = Config.default(Connection, "solver") super(ProbeSolverParam, self).__set__(instance, value)
def coerce(self, conn, solver): if solver is ConnectionDefault: solver = Config.default(Connection, "solver") return super().coerce(conn, solver)
def __init__(self, dimensions, n_neurons_per_ensemble=100, output_weight=-3.0, input_bias=0.0, ampa_config=None, gaba_config=None, **kwargs): if "net" in kwargs: raise ObsoleteError("The 'net' argument is no longer supported.") kwargs.setdefault("label", "Basal Ganglia") super().__init__(**kwargs) ampa_config, override_ampa = config_with_default_synapse( ampa_config, Lowpass(0.002)) gaba_config, override_gaba = config_with_default_synapse( gaba_config, Lowpass(0.008)) # Affects all ensembles / connections in the BG # unless they've been overridden on `self.config` config = Config(Ensemble, Connection) config[Ensemble].radius = 1.5 config[Ensemble].encoders = Choice([[1]]) try: # Best, if we have SciPy config[Connection].solver = NnlsL2nz() except ImportError: # Warn if we can't use the better decoder solver. warnings.warn("SciPy is not installed, so BasalGanglia will " "use the default decoder solver. Installing SciPy " "may improve BasalGanglia performance.") ea_params = { "n_neurons": n_neurons_per_ensemble, "n_ensembles": dimensions } with self, config: self.strD1 = EnsembleArray( label="Striatal D1 neurons", intercepts=Uniform(Weights.e, 1), **ea_params, ) self.strD2 = EnsembleArray( label="Striatal D2 neurons", intercepts=Uniform(Weights.e, 1), **ea_params, ) self.stn = EnsembleArray( label="Subthalamic nucleus", intercepts=Uniform(Weights.ep, 1), **ea_params, ) self.gpi = EnsembleArray( label="Globus pallidus internus", intercepts=Uniform(Weights.eg, 1), **ea_params, ) self.gpe = EnsembleArray( label="Globus pallidus externus", intercepts=Uniform(Weights.ee, 1), **ea_params, ) self.input = Node(label="input", size_in=dimensions) self.output = Node(label="output", size_in=dimensions) # add bias input (BG performs best in the range 0.5--1.5) if abs(input_bias) > 0.0: self.bias_input = Node(np.ones(dimensions) * input_bias, label="basal ganglia bias") Connection(self.bias_input, self.input) # spread the input to StrD1, StrD2, and STN Connection( self.input, self.strD1.input, synapse=None, transform=Weights.ws * (1 + Weights.lg), ) Connection( self.input, self.strD2.input, synapse=None, transform=Weights.ws * (1 - Weights.le), ) Connection(self.input, self.stn.input, synapse=None, transform=Weights.wt) # connect the striatum to the GPi and GPe (inhibitory) strD1_output = self.strD1.add_output("func_str", Weights.str_func) strD2_output = self.strD2.add_output("func_str", Weights.str_func) with gaba_config: Connection(strD1_output, self.gpi.input, transform=-Weights.wm) Connection(strD2_output, self.gpe.input, transform=-Weights.wm) # connect the STN to GPi and GPe (broad and excitatory) tr = Weights.wp * np.ones((dimensions, dimensions)) stn_output = self.stn.add_output("func_stn", Weights.stn_func) with ampa_config: Connection(stn_output, self.gpi.input, transform=tr) Connection(stn_output, self.gpe.input, transform=tr) # connect the GPe to GPi and STN (inhibitory) gpe_output = self.gpe.add_output("func_gpe", Weights.gpe_func) with gaba_config: Connection(gpe_output, self.gpi.input, transform=-Weights.we) Connection(gpe_output, self.stn.input, transform=-Weights.wg) # connect GPi to output (inhibitory) gpi_output = self.gpi.add_output("func_gpi", Weights.gpi_func) Connection(gpi_output, self.output, synapse=None, transform=output_weight) # Return ampa_config and gaba_config to previous states, if changed if override_ampa: del ampa_config[Connection].synapse if override_gaba: del gaba_config[Connection].synapse