Esempio n. 1
0
def build_aml(model, aml, rule):
    conn = rule.connection
    rng = np.random.RandomState(model.seeds[conn])

    error = Signal(np.zeros(rule.size_in), name="aml:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    pre = model.sig[conn.pre_obj]['in']
    decoders = model.sig[conn]['weights']
    delta = model.sig[rule]['delta']

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T)

    wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
                      if model.seeded[conn] else solve_for_decoders)
    base_decoders, _ = wrapped_solver(conn, gain, bias, x, targets, rng=rng)

    model.add_op(SimAML(
        aml.learning_rate, base_decoders, pre, error, decoders, delta))
Esempio n. 2
0
def build_weight_symmetry_learning(model, weight_symmetry_learning, rule):
    if weight_symmetry_learning.seed is None:
        rng = np.random
    else:
        rng = np.random.RandomState(weight_symmetry_learning.seed)

    conn = rule.connection

    pre = model.sig[conn.pre_obj.neurons]['out']

    decoders = model.sig[conn]['weights']

    scale = Signal(np.zeros(rule.size_in), name="WeightSymmetryLearn:scale")
    model.add_op(Reset(scale))
    model.sig[rule]['in'] = scale

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)

    base_decoders, _ = solve_for_decoders(conn,
                                          gain,
                                          bias,
                                          x,
                                          targets,
                                          rng=rng)

    model.add_op(
        SimWeightSymmetryLearning(weight_symmetry_learning.learning_rate,
                                  base_decoders, pre, decoders, scale))
Esempio n. 3
0
def build_decoders(model, conn, rng):
    # Copied from older version of Nengo
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = connection_b.get_eval_points(model, conn, rng)
    targets = connection_b.get_targets(model, conn, eval_points)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    E = None
    if conn.solver.weights:
        E = model.params[conn.post_obj].scaled_encoders.T[conn.post_slice]
        # include transform in solved weights
        targets = connection_b.multiply(targets, conn.transform.T)

    try:
        wrapped_solver = model.decoder_cache.wrap_solver(
            connection_b.solve_for_decoders)
        decoders, solver_info = wrapped_solver(conn.solver,
                                               conn.pre_obj.neuron_type,
                                               gain,
                                               bias,
                                               x,
                                               targets,
                                               rng=rng,
                                               E=E)
    except BuildError:
        raise BuildError(
            "Building %s: 'activities' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    return eval_points, decoders, solver_info
Esempio n. 4
0
def build_aml(model, aml, rule):
    if aml.seed is None:
        rng = np.random
    else:
        rng = np.random.RandomState(aml.seed)

    conn = rule.connection

    error = Signal(np.zeros(rule.size_in), name="aml:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error

    pre = model.sig[conn.pre_obj]['in']
    decoders = model.sig[conn]['weights']

    # TODO caching
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = eval_points

    x = np.dot(eval_points, encoders.T)

    base_decoders, _ = solve_for_decoders(conn,
                                          gain,
                                          bias,
                                          x,
                                          targets,
                                          rng=rng)

    model.add_op(SimAML(aml.learning_rate, base_decoders, pre, error,
                        decoders))
Esempio n. 5
0
def build_decoders(model, conn, rng, sampled_transform):
    # Copied from Nengo, except where noted below

    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = get_eval_points(model, conn, rng)
    targets = get_targets(conn, eval_points)

    if conn.solver.weights and not conn.solver.compositional:
        # solver is solving for the whole weight matrix, so apply
        # transform/encoders to targets

        # CHANGE: backwards compatibility with nengo<=2.8.0
        # if not isinstance(conn.transform, Dense):
        #     raise BuildError(
        #         "Non-compositional solvers only work with Dense transforms")
        # transform = conn.transform.sample(rng=rng)
        # targets = np.dot(targets, transform.T)
        if nengo_transforms is not None and not isinstance(
                conn.transform, nengo_transforms.Dense):  # pragma: no cover
            raise BuildError(
                "Non-compositional solvers only work with Dense transforms")
        targets = np.dot(targets, sampled_transform.T)

        # weight solvers only allowed on ensemble->ensemble connections
        assert isinstance(conn.post_obj, Ensemble)
        post_enc = model.params[conn.post_obj].scaled_encoders
        targets = np.dot(targets, post_enc.T[conn.post_slice])

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)

    # CHANGE: we pass `dt` to `solve_for_decoders`,
    # and do not support the decoder cache.
    # wrapped_solver = (model.decoder_cache.wrap_solver(solve_for_decoders)
    #                   if model.seeded[conn] else solve_for_decoders)
    # decoders, solver_info = wrapped_solver(
    #     conn, gain, bias, x, targets, rng=rng)
    decoders, solver_info = solve_for_decoders(conn,
                                               gain,
                                               bias,
                                               x,
                                               targets,
                                               rng=rng,
                                               dt=model.dt)

    return eval_points, decoders.T, solver_info
Esempio n. 6
0
def build_decoders(model, conn, rng):
    # Copied from older version of Nengo
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = connection_b.get_eval_points(model, conn, rng)

    try:
        targets = connection_b.get_targets(conn, eval_points)
    except:
        # nengo <= 2.3.0
        targets = connection_b.get_targets(model, conn, eval_points)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    E = None
    if conn.solver.weights:
        E = model.params[conn.post_obj].scaled_encoders.T[conn.post_slice]
        # include transform in solved weights
        targets = connection_b.multiply(targets, conn.transform.T)

    try:
        wrapped_solver = model.decoder_cache.wrap_solver(
            connection_b.solve_for_decoders
        )
        try:
            decoders, solver_info = wrapped_solver(
                conn, gain, bias, x, targets,
                rng=rng, E=E)
        except TypeError:
            # fallback for older nengo versions
            decoders, solver_info = wrapped_solver(
                conn.solver, conn.pre_obj.neuron_type, gain, bias, x, targets,
                rng=rng, E=E)
    except BuildError:
        raise BuildError(
            "Building %s: 'activities' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    return eval_points, decoders, solver_info