Пример #1
0
    // Calculate how much dopamine has decayed since last update
    const scalar dopamineDT = $(t) - $(seT_pre);
    const scalar dopamineDecay = exp(-dopamineDT / $(tauD));
    // Calculate offset to integrate over correct area
    const scalar offset = (tc <= $(seT_pre)) ? exp(-($(seT_pre) - tc) / $(tauC)) : exp(-(tc - $(seT_pre)) / $(tauD));
    // Update weight and clamp
    $(g) += ($(c) * $(D_pre) * $(scale)) * ((tagDecay * dopamineDecay) - offset);
    $(g) = fmax($(wMin), fmin($(wMax), $(g)));
    """
izhikevich_stdp_model = genn_model.create_custom_weight_update_class(
    "izhikevich_stdp",
    
    param_names=["tauPlus",  "tauMinus", "tauC", "tauD", "aPlus", "aMinus",
                 "wMin", "wMax"],
    derived_params=[
        ("scale", genn_model.create_dpf_class(lambda pars, dt: 1.0 / -((1.0 / pars[2]) + (1.0 / pars[3])))())],
    var_name_types=[("g", "scalar"), ("c", "scalar")],

    sim_code=
        """
        $(addToInSyn, $(g));
        // Calculate time of last tag update
        const scalar tc = fmax($(prev_sT_pre), fmax($(prev_sT_post), $(prev_seT_pre)));
        """
        + izhikevich_stdp_tag_update_code +
        """
        // Decay tag and apply STDP
        scalar newTag = $(c) * tagDecay;
        const scalar dt = $(t) - $(sT_post);
        if (dt > 0) {
            scalar timing = exp(-dt / $(tauMinus));
               np.column_stack(data),
               fmt=["%f", "%d"],
               delimiter=",",
               header="Time [ms], Neuron ID")


#----------------------------------------------------------------------------
# Neuron models
#----------------------------------------------------------------------------
recurrent_lif_model = genn_model.create_custom_neuron_class(
    "recurrent_lif",
    param_names=["TauM", "Vthresh", "TauRefrac"],
    var_name_types=[("V", "scalar"), ("RefracTime", "scalar")],
    derived_params=[
        ("Alpha",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[0]))())
    ],
    sim_code="""
    $(V) = ($(Alpha) * $(V)) + $(Isyn);
    if ($(RefracTime) > 0.0) {
      $(RefracTime) -= DT;
    }
    """,
    reset_code="""
    $(RefracTime) = $(TauRefrac);
    $(V) -= $(Vthresh);
    """,
    threshold_condition_code="""
    $(RefracTime) <= 0.0 && $(V) >= $(Vthresh)
    """,
    is_auto_refractory_required=False)
Пример #3
0
SYNAPTIC_WEIGHT_PA = convert_synapse_weight(TAU_M, TAU_SYN, C_M_PF) * 0.14

NU_THRESH = V_THRESH / (NUM_INCOMING_EXCITATORY * TAU_M / C_M_PF *
                        SYNAPTIC_WEIGHT_PA * np.exp(1.) * TAU_SYN)
NU_EXT = NU_THRESH * 1.685

# ----------------------------------------------------------------------------
# Custom models
# ----------------------------------------------------------------------------
alpha_curr_model = genn_model.create_custom_postsynaptic_class(
    "alpha_curr",
    param_names=["tau"],
    var_name_types=[("x", "scalar")],
    derived_params=[
        ("ExpDecay",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[0]))()
         ),
        ("Init",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(1) / pars[0])())
    ],
    decay_code="""
        $(x) = $(ExpDecay) * ((DT * $(inSyn) * $(Init)) + $(x));
        $(inSyn)*=$(ExpDecay);
        """,
    apply_input_code="""
        $(Isyn) += $(x);
        """)

poisson_alpha_model = genn_model.create_custom_current_source_class(
    "poisson_alpha",
    param_names=["weight", "tauSyn", "rate"],
import numpy as np
import matplotlib.pyplot as plt
from time import time

from pygenn import genn_wrapper
from pygenn import genn_model

# STDP synapse with additive weight dependence
stdp_additive = genn_model.create_custom_weight_update_class(
    "STDPAdditive",
    param_names=["tauPlus", "tauMinus", "aPlus", "aMinus", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],
    pre_var_name_types=[("preTrace", "scalar")],
    post_var_name_types=[("postTrace", "scalar")],
    derived_params=[("aPlusScaled",
                     genn_model.create_dpf_class(lambda pars, dt: pars[2] *
                                                 (pars[5] - pars[4]))()),
                    ("aMinusScaled",
                     genn_model.create_dpf_class(lambda pars, dt: pars[3] *
                                                 (pars[5] - pars[4]))())],
    sim_code="""
        $(addToInSyn, $(g));
        const scalar dt = $(t) - $(sT_post);
        if(dt > 0) {
            const scalar timing = exp(-dt / $(tauMinus));
            const scalar newWeight = $(g) - ($(aMinusScaled) * $(postTrace) * timing);
            $(g) = min($(wMax), max($(wMin), newWeight));
        }
        """,
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        if(dt > 0) {
    def _generate_init_snippet(self):
        def _max_col_len(num_pre, num_post, pars):
            max_d = (2.0 * pars[1] + 1.0)
            max_conns = 1.0

            if pars[2] > 1:
                max_conns *= min(max_d / pars[8], pars[2])

            if pars[3] > 1:
                max_conns *= min(max_d / pars[9], pars[3])

            if pars[4] > 1:
                max_conns *= min(max_d / pars[10], pars[4])


            if pars[0] > 0.95:
                return int(max_conns)
            else:
                return int(binom.ppf(0.9999 ** (1.0 / num_post),
                           n=max_conns, p=pars[0]))

        def _max_row_len(num_pre, num_post, pars):
            max_d = (2.0 * pars[1] + 1.0)
            max_conns = 1.0
            if pars[11] > 1:
                max_conns *= min(max_d / pars[17], pars[11])

            if pars[12] > 1:
                max_conns *= min(max_d / pars[18], pars[12])

            if pars[13] > 1:
                max_conns *= min(max_d / pars[19], pars[13])

            if pars[0] > 0.95:
                return int(max_conns)
            else:
                return int(binom.ppf(0.9999 ** (1.0 / num_pre),
                           n=max_conns, p=pars[0]))

        _param_space = self._conn_init_params
        shp = self.shapes
        pre_per_row = int(shp['pre_nx'] * shp['pre_nz'])
        post_per_row = int(shp['post_nx'] * shp['post_nz'])
        delta_row = int(
            ((self.max_dist / shp['post_dy']) + 2) * post_per_row
        )
        n_post = shp['post_nx'] * shp['post_ny'] * shp['post_nz']
        names = [
            "prob", "max_dist",
            "pre_nx", "pre_ny", "pre_nz",
            "pre_x0", "pre_y0", "pre_z0",
            "pre_dx", "pre_dy", "pre_dz",
            "post_nx", "post_ny", "post_nz",
            "post_x0", "post_y0", "post_z0",
            "post_dx", "post_dy", "post_dz"
        ]
        state_vars = [
            # ("perRow", "int", per_row),
            # ("deltaRow", "int", delta_row),
            ("preRow", "int",
             "($(id_pre) / {}) * $(pre_dy) + $(pre_y0)".format(pre_per_row)),
            ("prevJ", "int",
             "fmax(-1.0f,\n"
             "    (float)( ((preRow - $(post_y0)) / $(post_dy)) * {} - {} - 1 )\n"
             ")".format(post_per_row, delta_row)),
            ("endJ", "int",
             "fmin({}f, \n"
             "    (float)( ((preRow - $(post_y0)) / $(post_dy)) * {} + {} + 1 )\n"
             ")".format(float(n_post), post_per_row, delta_row)),
        ]
        derived = [
            ("probLogRecip",
                genn_model.create_dpf_class(
                    lambda pars, dt: (1.0 / np.log(1.0 - pars[0])))()
             )
        ]

        _code = """
            #ifndef toCoords
            #define toCoords(idx, nx, ny, nz, x, y, z) { \\
                int a = (int)(nx * nz);                  \\
                int inz = (int)nz;                       \\
                y = (float)(idx / a);                    \\
                x = (float)((idx - ((int)y * a)) / inz); \\
                z = (float)((idx - ((int)y * a)) % inz); \\
            }
            #endif
            
            #ifndef inDist
            #define inDist(pre, pre_nx, pre_ny, pre_nz, \\
                pre_dx, pre_dy, pre_dz, pre_x0, pre_y0, pre_z0, \\
                post, post_nx, post_ny, post_nz, \\
                post_dx, post_dy, post_dz, post_x0, post_y0, post_z0, \\
                max_dist, output) { \\
                float pre_x, pre_y, pre_z, post_x, post_y, post_z; \\
                toCoords(pre, pre_nx, pre_ny, pre_nz, pre_x, pre_y, pre_z); \\
                toCoords(post, post_nx, post_ny, post_nz, post_x, post_y, post_z); \\
                pre_x = pre_x * pre_dx + pre_x0; \\
                pre_y = pre_y * pre_dy + pre_y0; \\
                pre_z = pre_z * pre_dz + pre_z0; \\
                post_x = post_x * post_dx + post_x0; \\
                post_y = post_y * post_dy + post_y0; \\
                post_z = post_z * post_dz + post_z0; \\
                float dx = post_x - pre_x, \\
                      dy = post_y - pre_y, \\
                      dz = post_z - pre_z; \\
                output = ( sqrt( (dx * dx) + (dy * dy) + (dz * dz) ) <= max_dist ); \\
            }
            #endif
                        
            const scalar u = $(gennrand_uniform);
            prevJ += (1 + (int)(log(u) * $(probLogRecip)));
            
            if(prevJ < endJ) {
                int out = 0;
                
                inDist($(id_pre), $(pre_nx), $(pre_ny), $(pre_nz), 
                       $(pre_dx), $(pre_dy), $(pre_dz), 
                       $(pre_x0), $(pre_y0), $(pre_z0), 
                       prevJ, $(post_nx), $(post_ny), $(post_nz),
                       $(post_dx), $(post_dy), $(post_dz), 
                       $(post_x0), $(post_y0), $(post_z0), 
                       $(max_dist), out);
                if(out){
                    $(addSynapse, prevJ + $(id_post_begin));
                }
            }
            else {
                $(endRow);
            }

        """
        _snip = genn_model.create_custom_sparse_connect_init_snippet_class(
            "max_distance_fixed_probability",
            param_names=names,
            row_build_state_vars=state_vars,
            derived_params=derived,
            calc_max_row_len_func=genn_model.create_cmlf_class(_max_row_len)(),
            calc_max_col_len_func=genn_model.create_cmlf_class(_max_col_len)(),
            row_build_code=_code)

        return genn_model.init_connectivity(_snip, _param_space)
Пример #6
0
from pygenn.genn_model import create_dpf_class, create_custom_neuron_class
from pygenn.genn_wrapper.Models import VarAccess_READ_ONLY_DUPLICATE
from ml_genn.layers.input_neurons import InputNeurons

fs_relu_input_model = create_custom_neuron_class(
    'fs_relu_input',
    param_names=['K', 'alpha'],
    derived_params=[
        ("scale", create_dpf_class(lambda pars, dt: pars[1] * 2**(-pars[0]))())
    ],
    var_name_types=[('input', 'scalar', VarAccess_READ_ONLY_DUPLICATE),
                    ('Vmem', 'scalar')],
    sim_code='''
    // Convert K to integer
    const int kInt = (int)$(K);
    
    // Get timestep within presentation
    const int pipeTimestep = (int)($(t) / DT);

    // If this is the first timestep, apply input
    if(pipeTimestep == 0) {
        $(Vmem) = $(input);
    }
    
    const scalar hT = $(scale) * (1 << (kInt - (1 + pipeTimestep)));
    ''',
    threshold_condition_code='''
    $(Vmem) >= hT
    ''',
    reset_code='''
    $(Vmem) -= hT;
Пример #7
0
    $(reducedGradient) = $(gradient);
    $(gradient) = 0;
    """)

#----------------------------------------------------------------------------
# Neuron models
#----------------------------------------------------------------------------
recurrent_alif_model = genn_model.create_custom_neuron_class(
    "recurrent_alif",
    param_names=["TauM", "TauAdap", "Vthresh", "TauRefrac", "Beta"],
    var_name_types=[("V", "scalar"), ("A", "scalar"), ("RefracTime", "scalar"),
                    ("E", "scalar")],
    additional_input_vars=[("ISynFeedback", "scalar", 0.0)],
    derived_params=[
        ("Alpha",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[0]))()
         ),
        ("Rho",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[1]))())
    ],
    sim_code="""
    $(E) = $(ISynFeedback);
    $(V) = ($(Alpha) * $(V)) + $(Isyn);
    $(A) *= $(Rho);
    if ($(RefracTime) > 0.0) {
      $(RefracTime) -= DT;
    }
    """,
    reset_code="""
    $(RefracTime) = $(TauRefrac);
    $(V) -= $(Vthresh);
Пример #8
0
    // at each time step, calculate m
    $(m) += g;
    if ((int)round($(t)) % (int)$(update_t) == 0 && (int)round($(t)) != 0) {
        const scalar grad = $(m)/$(update_t);
        // calculate learning rate r
        $(upsilon) = fmax($(upsilon) * $(ExpRMS) , grad*grad);
        const scalar r = $(r0) / (sqrt($(upsilon))+$(epsilon));
        // update synaptic weight
        $(w) += r * grad;
        $(w) = fmin($(wmax), fmax($(wmin), $(w)));
        $(m) = 0.0;
    }
    """,
    derived_params=[
        ("ExpRMS",
         create_dpf_class(lambda pars, dt: exp(-pars[-1] / pars[2]))())
    ])

SUPERSPIKE_PARAMS = {
    "t_rise": t_rise,
    "t_decay": t_decay,
    "tau_rms": tau_rms,
    "wmax": wmax,
    "wmin": wmin,
    "epsilon": epsilon,
    "update_t": update_time_ms
}

superspike_init = {
    "w": init_var("Uniform", {
        "min": -0.001,
def calc_t_peak(tau_rise, tau_decay):
    return ((tau_decay * tau_rise) / (tau_decay - tau_rise)) * np.log(tau_decay / tau_rise)

def write_spike_file(filename, data):
    np.savetxt(filename, np.column_stack(data), fmt=["%f","%d"], 
               delimiter=",", header="Time [ms], Neuron ID")

# ----------------------------------------------------------------------------
# Custom models
# ----------------------------------------------------------------------------
r_max_prop_model = genn_model.create_custom_custom_update_class(
    "r_max_prop",
    param_names=["updateTime", "tauRMS", "epsilon", "wMin", "wMax"],
    var_name_types=[("upsilon", "scalar")],
    extra_global_params=[("r0", "scalar")],
    derived_params=[("updateTimesteps", genn_model.create_dpf_class(lambda pars, dt: pars[0] / dt)()),
                    ("expRMS", genn_model.create_dpf_class(lambda pars, dt: np.exp(-pars[0] / pars[1]))())],
    var_refs=[("m", "scalar"), ("variable", "scalar")],
    update_code="""
    // Get gradients
    const scalar gradient = $(m) / $(updateTimesteps);
    // Calculate learning rate r
    $(upsilon) = fmax($(upsilon) * $(expRMS), gradient * gradient);
    const scalar r = $(r0) / (sqrt($(upsilon)) + $(epsilon));
    // Update synaptic parameter
    $(variable) += r * gradient;
    $(variable) = fmin($(wMax), fmax($(wMin), $(variable)));
    $(m) = 0.0;
    """)

superspike_model = genn_model.create_custom_weight_update_class(
Пример #10
0
    const scalar S_real = $(RefracTime) <= 0.0 && $(V) >= $(Vthresh) ? 1.0 : 0.0;
    const scalar mismatch = S_pred - S_real;
    $(err_rise) = ($(err_rise) * $(t_rise_mult)) + mismatch;
    $(err_decay) = ($(err_decay) * $(t_decay_mult)) + mismatch;
    $(err_tilda) = ($(err_decay) - $(err_rise)) * $(norm_factor);
    // calculate average error trace
    const scalar temp = $(err_tilda) * $(err_tilda) * DT * 0.001;
    $(avg_sq_err) *= $(mul_avgerr);
    $(avg_sq_err) += temp;
    """,
    reset_code="""
    $(RefracTime) = $(TauRefrac);
    """,
    threshold_condition_code="$(RefracTime) <= 0.0 && $(V) >= $(Vthresh)",
    derived_params=[
        ("ExpTC", create_dpf_class(lambda pars, dt: exp(-dt / pars[1]))()),
        ("Rmembrane", create_dpf_class(lambda pars, dt: pars[1] / pars[0])()),
        ("norm_factor",
         create_dpf_class(lambda pars, dt: 1.0 / (-exp(-pars[9] / pars[6]) +
                                                  exp(-pars[9] / pars[7])))()),
        ("t_rise_mult",
         create_dpf_class(lambda pars, dt: exp(-dt / pars[6]))()),
        ("t_decay_mult",
         create_dpf_class(lambda pars, dt: exp(-dt / pars[7]))()),
        ("mul_avgerr",
         create_dpf_class(lambda pars, dt: exp(-dt / pars[10]))())
    ],
    extra_global_params=[("spikeTimes", "scalar*")])

OUTPUT_PARAMS = {
    "C": 10.0,
Пример #11
0
# Neurons for input layer
input_model = genn_model.create_custom_neuron_class(
    "input",
    extra_global_params=[("ImageData", "float*"), ("ImageOffset", "unsigned int*")],
    threshold_condition_code="$(gennrand_uniform) < $(ImageData)[$(ImageOffset)[0] + $(id)]",
    is_auto_refractory_required=False
)

# Neurons for hidden layers
hidden_layer_model = genn_model.create_custom_neuron_class(
    "hidden_layer",
    param_names=["Tau"],
    var_name_types=[("V", "scalar"), ("U", "scalar"), ("PhiF", "scalar"), ("PsiH", "scalar")],
    additional_input_vars=[("IsynBack", "scalar", 0.0)],
    derived_params=[
        ("OneMinusTau", genn_model.create_dpf_class(lambda pars, dt: 1.0 - pars[0])())
    ], 
    sim_code="""
        // Update voltage
        $(V) = ($(OneMinusTau) * $(V)) + ($(Tau) * $(Isyn));
        
        // Calculate sigmoid of voltage and hence positive weighted function
        const scalar sigmoidV = 1.0 / (1.0 + exp(-$(V)));
        $(PsiH) = sigmoidV * (1.0 - sigmoidV);
        
        // Update U
        $(U) = ($(OneMinusTau) * $(U)) + ($(Tau) * $(IsynBack));
        
        // Calculate sigmoid of U
        $(PhiF) = 1.0 / (1.0 + exp(-$(U)));
        """,