Пример #1
0
def build_moja(model, moja, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]["out"]
    post_activities = model.sig[get_post_ens(conn).neurons]["out"]
    pre_filtered = build_or_passthrough(model, moja.pre_synapse,
                                        pre_activities)
    post_filtered = build_or_passthrough(model, moja.post_synapse,
                                         post_activities)

    pos_memristors, \
    neg_memristors, \
    r_min_noisy, \
    r_max_noisy, \
    exponent_noisy = initialise_memristors( moja, pre_filtered.shape[ 0 ], post_filtered.shape[ 0 ] )

    model.sig[conn]["pos_memristors"] = pos_memristors
    model.sig[conn]["neg_memristors"] = neg_memristors

    model.add_op(
        SimmOja(pre_filtered, post_filtered, moja.beta,
                model.sig[conn]["pos_memristors"],
                model.sig[conn]["neg_memristors"], model.sig[conn]["weights"],
                moja.noise_percentage, moja.gain, r_min_noisy, r_max_noisy,
                exponent_noisy, moja.voltage, moja.initial_state))

    # expose these for probes
    model.sig[rule]["pre_filtered"] = pre_filtered
    model.sig[rule]["post_filtered"] = post_filtered
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors
Пример #2
0
def build_stdp(model, stdp, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]['out']
    post_activities = model.sig[get_post_ens(conn).neurons]['out']
    pre_trace = Signal(np.zeros(pre_activities.size), name="pre_trace")
    post_trace = Signal(np.zeros(post_activities.size), name="post_trace")


    model.add_op(SimSTDP(
        pre_activities,
        post_activities,
        pre_trace,
        post_trace,
        model.sig[conn]['weights'],
        model.sig[rule]['delta'],
        pre_tau=stdp.pre_tau,
        post_tau=stdp.post_tau,
        alf_p=stdp.alf_p,
        alf_n=stdp.alf_n,
        beta_p=stdp.beta_p,
        beta_n=stdp.beta_n,
        max_weight=stdp.max_weight,
        min_weight=stdp.min_weight,
        learning_rate=stdp.learning_rate,
    ))

    # expose these for probes
    model.sig[rule]['pre_trace'] = pre_trace
    model.sig[rule]['post_trace'] = post_trace
    
    model.params[rule] = None  # no build-time info to return
Пример #3
0
def build_mpes(model, mpes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(shape=(rule.size_in, ), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    acts = build_or_passthrough(model, mpes.pre_synapse,
                                model.sig[conn.pre_obj]["out"])

    post = get_post_ens(conn)
    encoders = model.sig[post]["encoders"]

    pos_memristors, neg_memristors, r_min_noisy, r_max_noisy, exponent_noisy = initialise_memristors(
        mpes, acts.shape[0], encoders.shape[0])

    model.sig[conn]["pos_memristors"] = pos_memristors
    model.sig[conn]["neg_memristors"] = neg_memristors

    if conn.post_obj is not conn.post:
        # in order to avoid slicing encoders along an axis > 0, we pad
        # `error` out to the full base dimensionality and then do the
        # dotinc with the full encoder matrix
        # comes into effect when slicing post connection
        padded_error = Signal(shape=(encoders.shape[1], ))
        model.add_op(Copy(error, padded_error, dst_slice=conn.post_slice))
    else:
        padded_error = error

    # error = dot(encoders, error)
    local_error = Signal(shape=(post.n_neurons, ))
    model.add_op(Reset(local_error))
    model.add_op(DotInc(encoders, padded_error, local_error, tag="PES:encode"))

    model.operators.append(
        SimmPES(acts, local_error, model.sig[conn]["pos_memristors"],
                model.sig[conn]["neg_memristors"], model.sig[conn]["weights"],
                mpes.noise_percentage, mpes.gain, r_min_noisy, r_max_noisy,
                exponent_noisy, mpes.initial_state))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors
Пример #4
0
def build_stdp(model, stdp, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]["out"]
    post_activities = model.sig[get_post_ens(conn).neurons]["out"]
    pre_trace = Signal(np.zeros(pre_activities.size), name="pre_trace")
    post_trace = Signal(np.zeros(post_activities.size), name="post_trace")
    pre_scale = Signal(np.zeros(model.sig[conn]["weights"].shape),
                       name="pre_scale")
    post_scale = Signal(np.zeros(model.sig[conn]["weights"].shape),
                        name="post_scale")

    model.add_op(
        SimSTDP(
            pre_activities,
            post_activities,
            pre_trace,
            post_trace,
            pre_scale,
            post_scale,
            model.sig[conn]["weights"],
            model.sig[rule]["delta"],
            learning_rate=stdp.learning_rate,
            pre_tau=stdp.pre_tau,
            post_tau=stdp.post_tau,
            pre_amp=stdp.pre_amp,
            post_amp=stdp.post_amp,
            bounds=stdp.bounds,
            max_weight=stdp.max_weight,
            min_weight=stdp.min_weight,
        ))

    # expose these for probes
    model.sig[rule]["pre_trace"] = pre_trace
    model.sig[rule]["post_trace"] = post_trace
    model.sig[rule]["pre_scale"] = pre_scale
    model.sig[rule]["post_scale"] = post_scale

    model.params[rule] = None  # no build-time info to return
Пример #5
0
def build_tripletstdp(model, stdp, rule):
    conn = rule.connection
    pre_activities = model.sig[get_pre_ens(conn).neurons]["out"]
    post_activities = model.sig[get_post_ens(conn).neurons]["out"]
    pre_trace1 = Signal(np.zeros(pre_activities.size), name="pre_trace1")
    post_trace1 = Signal(np.zeros(post_activities.size), name="post_trace1")
    pre_trace2 = Signal(np.zeros(pre_activities.size), name="pre_trace2")
    post_trace2 = Signal(np.zeros(post_activities.size), name="post_trace2")

    model.add_op(
        SimTripletSTDP(
            pre_activities,
            post_activities,
            pre_trace1,
            post_trace1,
            pre_trace2,
            post_trace2,
            model.sig[rule]["delta"],
            learning_rate=stdp.learning_rate,
            pre_tau=stdp.pre_tau,
            pre_taux=stdp.pre_taux,
            post_tau=stdp.post_tau,
            post_tauy=stdp.post_tauy,
            pre_amp2=stdp.pre_amp2,
            pre_amp3=stdp.pre_amp3,
            post_amp2=stdp.post_amp2,
            post_amp3=stdp.post_amp3,
            nearest_spike=stdp.nearest_spike,
        ))

    # expose these for probes
    model.sig[rule]["pre_trace1"] = pre_trace1
    model.sig[rule]["post_trace1"] = post_trace1
    model.sig[rule]["pre_trace2"] = pre_trace2
    model.sig[rule]["post_trace2"] = post_trace2

    model.params[rule] = None  # no build-time info to return
Пример #6
0
def build_pes(model, pes, rule):
    """
    Builds a `nengo.PES` object into a model.

    Parameters
    ----------
    model : Model
        The model to build into.
    pes : PES
        Learning rule type to build.
    rule : LearningRule
        The learning rule object corresponding to the neuron type.

    Notes
    -----
    Does not modify ``model.params[]`` and can therefore be called
    more than once with the same `nengo.PES` instance.
    """

    conn = rule.connection

    # Create input error signal
    error = Signal(np.zeros(rule.size_in), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]['in'] = error  # error connection will attach here

    if LooseVersion(nengo_version) < "2.7.1":
        acts = model.build(
            Lowpass(pes.pre_tau), model.sig[conn.pre_obj]["out"])
    else:
        acts = model.build(pes.pre_synapse, model.sig[conn.pre_obj]["out"])

    if not conn.is_decoded:
        # multiply error by post encoders to get a per-neuron error

        post = get_post_ens(conn)
        encoders = model.sig[post]["encoders"]

        if conn.post_obj is not conn.post:
            # in order to avoid slicing encoders along an axis > 0, we pad
            # `error` out to the full base dimensionality and then do the
            # dotinc with the full encoder matrix
            padded_error = Signal(np.zeros(encoders.shape[1]))
            model.add_op(Copy(error, padded_error,
                              dst_slice=conn.post_slice))
        else:
            padded_error = error

        # error = dot(encoders, error)
        local_error = Signal(np.zeros(post.n_neurons), name="PES:encoded")
        model.add_op(Reset(local_error))
        model.add_op(DotInc(encoders, padded_error, local_error,
                            tag="PES:encode"))
    else:
        local_error = error

    model.operators.append(SimPES(acts, local_error, model.sig[rule]["delta"],
                                  pes.learning_rate))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
def build_mpes(model, mpes, rule):
    conn = rule.connection

    # Create input error signal
    error = Signal(shape=(rule.size_in, ), name="PES:error")
    model.add_op(Reset(error))
    model.sig[rule]["in"] = error  # error connection will attach here

    acts = build_or_passthrough(model, mpes.pre_synapse,
                                model.sig[conn.pre_obj]["out"])

    post = get_post_ens(conn)
    encoders = model.sig[post]["encoders"]

    out_size = encoders.shape[0]
    in_size = acts.shape[0]

    from scipy.stats import truncnorm

    def get_truncated_normal(mean, sd, low, upp):
        try:
            return truncnorm( (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd ) \
                .rvs( out_size * in_size ) \
                .reshape( (out_size, in_size) )
        except ZeroDivisionError:
            return np.full((out_size, in_size), mean)

    np.random.seed(mpes.seed)
    r_min_noisy = get_truncated_normal(mpes.r_min,
                                       mpes.r_min * mpes.noise_percentage[0],
                                       0, np.inf)
    np.random.seed(mpes.seed)
    r_max_noisy = get_truncated_normal(mpes.r_max,
                                       mpes.r_max * mpes.noise_percentage[1],
                                       np.max(r_min_noisy), np.inf)
    np.random.seed(mpes.seed)
    exponent_noisy = np.random.normal(
        mpes.exponent,
        np.abs(mpes.exponent) * mpes.noise_percentage[2], (out_size, in_size))
    np.random.seed(mpes.seed)
    pos_mem_initial = np.random.normal(1e8, 1e8 * mpes.noise_percentage[3],
                                       (out_size, in_size))
    np.random.seed(mpes.seed + 1)
    neg_mem_initial = np.random.normal(1e8, 1e8 * mpes.noise_percentage[3],
                                       (out_size, in_size))

    pos_memristors = Signal(shape=(out_size, in_size),
                            name="mPES:pos_memristors",
                            initial_value=pos_mem_initial)
    neg_memristors = Signal(shape=(out_size, in_size),
                            name="mPES:neg_memristors",
                            initial_value=neg_mem_initial)

    model.sig[conn]["pos_memristors"] = pos_memristors
    model.sig[conn]["neg_memristors"] = neg_memristors

    if conn.post_obj is not conn.post:
        # in order to avoid slicing encoders along an axis > 0, we pad
        # `error` out to the full base dimensionality and then do the
        # dotinc with the full encoder matrix
        # comes into effect when slicing post connection
        padded_error = Signal(shape=(encoders.shape[1], ))
        model.add_op(Copy(error, padded_error, dst_slice=conn.post_slice))
    else:
        padded_error = error

    # error = dot(encoders, error)
    local_error = Signal(shape=(post.n_neurons, ))
    model.add_op(Reset(local_error))
    model.add_op(DotInc(encoders, padded_error, local_error, tag="PES:encode"))

    model.operators.append(
        SimmPES(acts, local_error, mpes.learning_rate,
                model.sig[conn]["pos_memristors"],
                model.sig[conn]["neg_memristors"], model.sig[conn]["weights"],
                mpes.noise_percentage, mpes.gain, r_min_noisy, r_max_noisy,
                exponent_noisy))

    # expose these for probes
    model.sig[rule]["error"] = error
    model.sig[rule]["activities"] = acts
    model.sig[rule]["pos_memristors"] = pos_memristors
    model.sig[rule]["neg_memristors"] = neg_memristors