Example #1
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        return npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        return gen_eval_points(
            conn.pre_obj, conn.eval_points, rng, conn.scale_eval_points)
Example #2
0
def build_linear_system(model, conn, rng):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    if conn.eval_points is None:
        eval_points = npext.array(model.params[conn.pre_obj].eval_points,
                                  min_dims=2)
    else:
        eval_points = gen_eval_points(conn.pre_obj, conn.eval_points, rng,
                                      conn.scale_eval_points)
    z = encoders.T / dc.Decimal(conn.pre_obj.radius)
    #z=np.array([dc.Decimal(p) for p in z])
    x = np.dot(eval_points, z)
    activities = conn.pre_obj.neuron_type.rates(x, gain, bias)
    if np.count_nonzero(activities) == 0:
        raise RuntimeError(
            "Building %s: 'activites' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    if conn.function is None:
        targets = eval_points[:, conn.pre_slice]
    else:
        targets = npext.castDecimal(np.zeros(
            (len(eval_points), conn.size_mid)))
        for i, ep in enumerate(eval_points[:, conn.pre_slice]):
            targets[i] = conn.function(ep)

    return eval_points, activities, targets
Example #3
0
def build_linear_system(model, conn, rng):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    if conn.eval_points is None:
        eval_points = npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        eval_points = gen_eval_points(
            conn.pre_obj, conn.eval_points, rng, conn.scale_eval_points)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    activities = conn.pre_obj.neuron_type.rates(x, gain, bias)
    if np.count_nonzero(activities) == 0:
        raise RuntimeError(
            "Building %s: 'activites' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    if conn.function is None:
        targets = eval_points[:, conn.pre_slice]
    else:
        targets = np.zeros((len(eval_points), conn.size_mid))
        for i, ep in enumerate(eval_points[:, conn.pre_slice]):
            targets[i] = conn.function(ep)

    return eval_points, activities, targets
Example #4
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        view = model.params[conn.pre_obj].eval_points.view()
        view.setflags(write=False)
        return view
    else:
        return gen_eval_points(conn.pre_obj, conn.eval_points, rng, conn.scale_eval_points)
Example #5
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        view = model.params[conn.pre_obj].eval_points.view()
        view.setflags(write=False)
        return view
    else:
        return gen_eval_points(conn.pre_obj, conn.eval_points, rng,
                               conn.scale_eval_points)
Example #6
0
def build_ensemble(model, ens):
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up encoders
    if isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders,
                               ens.n_neurons,
                               ens.dimensions,
                               rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)

    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng,
                                                      model.intercept_limit)

    block = LoihiBlock(ens.n_neurons, label='%s' % ens)
    block.compartment.bias[:] = bias
    model.build(ens.neuron_type, ens.neurons, block)

    # set default filter just in case no other filter gets set
    block.compartment.configure_default_filter(model.decode_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    # we exclude the radius to keep scaling reasonable for decode neurons
    scaled_encoders = encoders * gain[:, np.newaxis]

    model.add_block(block)

    model.objs[ens]['in'] = block
    model.objs[ens]['out'] = block
    model.objs[ens.neurons]['in'] = block
    model.objs[ens.neurons]['out'] = block
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
Example #7
0
def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens)
        )

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        scaled_encoders=scaled_encoders,
        max_rates=max_rates,
        intercepts=intercepts,
        gain=gain,
        bias=bias
    )

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
Example #8
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        view = model.params[conn.pre_obj].eval_points.view()
        view.setflags(write=False)
        assert view.dtype == rc.float_dtype
        return view
    else:
        return gen_eval_points(
            conn.pre_obj,
            conn.eval_points,
            rng,
            conn.scale_eval_points,
            dtype=rc.float_dtype,
        )
def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens))

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      scaled_encoders=scaled_encoders,
                                      max_rates=max_rates,
                                      intercepts=intercepts,
                                      gain=gain,
                                      bias=bias)

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
Example #10
0
def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        return npext.array(model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        return gen_eval_points(conn.pre_obj, conn.eval_points, rng,
                               conn.scale_eval_points)
Example #11
0
def build_ensemble(model, ens):
    if isinstance(ens.neuron_type, nengo.Direct):
        raise NotImplementedError("Direct neurons not implemented")

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(
        ens, ens.eval_points, rng=rng, dtype=nengo.rc.float_dtype
    )

    # Set up encoders
    if isinstance(ens.encoders, Distribution):
        encoders = get_samples(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=nengo.rc.float_dtype)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=nengo.rc.float_dtype)

    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    if np.any(np.isnan(encoders)):
        raise BuildError(
            f"{ens}: NaNs detected in encoders. This usually means that you have "
            "zero-length encoders; when normalized, these result in NaNs. Ensure all "
            "encoders have non-zero length, or set `normalize_encoders=False`."
        )

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(
        ens, rng, intercept_limit=model.intercept_limit, dtype=nengo.rc.float_dtype
    )

    block = LoihiBlock(ens.n_neurons, label="%s" % ens)
    block.compartment.bias[:] = bias

    # build the neuron_type (see builders below)
    model.build(ens.neuron_type, ens.neurons, block)

    # set default filter just in case no other filter gets set
    block.compartment.configure_default_filter(model.decode_tau, dt=model.dt)

    if ens.noise is not None:
        raise NotImplementedError("Ensemble noise not implemented")

    # Scale the encoders
    # we exclude the radius to keep scaling reasonable for decode neurons
    scaled_encoders = encoders * gain[:, np.newaxis]

    # add instructions for splitting
    model.block_shapes[block] = model.config[ens].block_shape

    model.add_block(block)

    model.objs[ens]["in"] = block
    model.objs[ens]["out"] = block
    model.objs[ens.neurons]["in"] = block
    model.objs[ens.neurons]["out"] = block
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        intercepts=intercepts,
        max_rates=max_rates,
        scaled_encoders=scaled_encoders,
        gain=gain,
        bias=bias,
    )
Example #12
0
def tune_ens_parameters(ens, function=None, solver=None, rng=None, n=1000):
    """Find good ensemble parameters for decoding a particular function.

    Randomly generate many sets of parameters and determine the decoding error
    for each. Then set the ensemble parameters to those with the lowest
    decoding error. The "ensemble parameters" are the encoders, gains, biases,
    and evaluation points.

    Parameters
    ----------
    ens : Ensemble
        The ensemble to optimize.
    function : callable, optional
        The target function to optimize for. Defaults to the identity function.
    solver : nengo.solvers.Solver, optional
        The solver to use for finding the decoders. Default: ``LstsqL2()``
    rng : numpy.random.RandomState, optional
        The random number generator to use. Default: ``np.random``
    n : int, optional
        The number of random combinations to test. Default: 1000
    """
    from nengo.dists import Distribution
    from nengo.neurons import Direct
    from nengo.solvers import LstsqL2
    from nengo.builder.connection import solve_for_decoders
    from nengo.builder.ensemble import gen_eval_points

    if solver is None:
        solver = LstsqL2()
    if rng is None:
        rng = np.random
    if isinstance(ens.neuron_type, Direct):
        raise ValueError("Parameters do not apply to Direct mode ensembles")

    sample = lambda dist, n, d=None: (dist.sample(n, d=d, rng=rng)
                                      if isinstance(dist, Distribution) else np
                                      .asarray(dist))

    # use the same evaluation points for all trials
    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)
    targets = (np.array([function(ep) for ep in eval_points])
               if function is not None else eval_points)

    # --- try random parameters and record error
    errors = []
    for i in range(n):
        # --- generate random parameters
        if ens.gain is None and ens.bias is None:
            max_rates = sample(ens.max_rates, ens.n_neurons)
            intercepts = sample(ens.intercepts, ens.n_neurons)
            gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
        elif ens.gain is not None and ens.bias is not None:
            gain = sample(ens.gain, ens.n_neurons)
            bias = sample(ens.bias, ens.n_neurons)
        else:
            raise NotImplementedError("Mixed gain/bias and rates/ints")

        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions)

        # --- determine residual
        x = np.dot(eval_points, encoders.T / ens.radius)
        decoders, info = solve_for_decoders(solver, ens.neuron_type, gain,
                                            bias, x, targets, rng)
        error = info['rmses'].mean()

        errors.append((error, encoders, gain, bias, eval_points))

    # --- set parameters to those with the lowest error
    errors.sort(key=lambda x: x[0])
    ens.encoders, ens.gain, ens.bias, ens.eval_points = errors[0][1:]
Example #13
0
def tune_ens_parameters(ens, function=None, solver=None, rng=None, n=1000):
    """Find good ensemble parameters for decoding a particular function.

    Randomly generate many sets of parameters and determine the decoding error
    for each. Then set the ensemble parameters to those with the lowest
    decoding error. The "ensemble parameters" are the encoders, gains, biases,
    and evaluation points.

    Parameters
    ----------
    ens : Ensemble
        The ensemble to optimize.
    function : callable, optional
        The target function to optimize for. Defaults to the identity function.
    solver : nengo.solvers.Solver, optional
        The solver to use for finding the decoders. Default: ``LstsqL2()``
    rng : numpy.random.RandomState, optional
        The random number generator to use. Default: ``np.random``
    n : int, optional
        The number of random combinations to test. Default: 1000
    """
    from nengo.dists import Distribution
    from nengo.neurons import Direct
    from nengo.solvers import LstsqL2
    from nengo.builder.connection import solve_for_decoders
    from nengo.builder.ensemble import gen_eval_points

    if solver is None:
        solver = LstsqL2()
    if rng is None:
        rng = np.random
    if isinstance(ens.neuron_type, Direct):
        raise ValueError("Parameters do not apply to Direct mode ensembles")

    sample = lambda dist, n, d=None: (
        dist.sample(n, d=d, rng=rng) if isinstance(dist, Distribution)
        else np.asarray(dist))

    # use the same evaluation points for all trials
    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)
    targets = (np.array([function(ep) for ep in eval_points])
               if function is not None else eval_points)

    # --- try random parameters and record error
    errors = []
    for i in range(n):
        # --- generate random parameters
        if ens.gain is None and ens.bias is None:
            max_rates = sample(ens.max_rates, ens.n_neurons)
            intercepts = sample(ens.intercepts, ens.n_neurons)
            gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
        elif ens.gain is not None and ens.bias is not None:
            gain = sample(ens.gain, ens.n_neurons)
            bias = sample(ens.bias, ens.n_neurons)
        else:
            raise NotImplementedError("Mixed gain/bias and rates/ints")

        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions)

        # --- determine residual
        x = np.dot(eval_points, encoders.T / ens.radius)
        decoders, info = solve_for_decoders(
            solver, ens.neuron_type, gain, bias, x, targets, rng)
        error = info['rmses'].mean()

        errors.append((error, encoders, gain, bias, eval_points))

    # --- set parameters to those with the lowest error
    errors.sort(key=lambda x: x[0])
    ens.encoders, ens.gain, ens.bias, ens.eval_points = errors[0][1:]