コード例 #1
0
stdp_additive = genn_model.create_custom_weight_update_class(
    "STDPAdditive",
    param_names=["tauPlus", "tauMinus", "aPlus", "aMinus", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],
    pre_var_name_types=[("preTrace", "scalar")],
    post_var_name_types=[("postTrace", "scalar")],
    derived_params=[("aPlusScaled",
                     genn_model.create_dpf_class(lambda pars, dt: pars[2] *
                                                 (pars[5] - pars[4]))()),
                    ("aMinusScaled",
                     genn_model.create_dpf_class(lambda pars, dt: pars[3] *
                                                 (pars[5] - pars[4]))())],
    sim_code="""
        $(addToInSyn, $(g));
        const scalar dt = $(t) - $(sT_post);
        if(dt > 0) {
            const scalar timing = exp(-dt / $(tauMinus));
            const scalar newWeight = $(g) - ($(aMinusScaled) * $(postTrace) * timing);
            $(g) = min($(wMax), max($(wMin), newWeight));
        }
        """,
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        if(dt > 0) {
            const scalar timing = exp(-dt / $(tauPlus));
            const scalar newWeight = $(g) + ($(aPlusScaled) * $(preTrace) * timing);
            $(g) = min($(wMax), max($(wMin), newWeight));
        }
        """,
    pre_spike_code="""
        const scalar dt = $(t) - $(sT_pre);
        $(preTrace) = $(preTrace) * exp(-dt / $(tauPlus)) + 1.0;
        """,
    post_spike_code="""
        const scalar dt = $(t) - $(sT_post);
        $(postTrace) = $(postTrace) * exp(-dt / $(tauMinus)) + 1.0;
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)
コード例 #2
0
ファイル: common.py プロジェクト: BrainsOnBoard/pygenn_paper
izhikevich_stdp_model = genn_model.create_custom_weight_update_class(
    "izhikevich_stdp",
    
    param_names=["tauPlus",  "tauMinus", "tauC", "tauD", "aPlus", "aMinus",
                 "wMin", "wMax"],
    derived_params=[
        ("scale", genn_model.create_dpf_class(lambda pars, dt: 1.0 / -((1.0 / pars[2]) + (1.0 / pars[3])))())],
    var_name_types=[("g", "scalar"), ("c", "scalar")],

    sim_code=
        """
        $(addToInSyn, $(g));
        // Calculate time of last tag update
        const scalar tc = fmax($(prev_sT_pre), fmax($(prev_sT_post), $(prev_seT_pre)));
        """
        + izhikevich_stdp_tag_update_code +
        """
        // Decay tag and apply STDP
        scalar newTag = $(c) * tagDecay;
        const scalar dt = $(t) - $(sT_post);
        if (dt > 0) {
            scalar timing = exp(-dt / $(tauMinus));
            newTag -= ($(aMinus) * timing);
        }
        // Write back updated tag and update time
        $(c) = newTag;
        """,
    event_code=
        """
        // Calculate time of last tag update
        const scalar tc = fmax($(sT_pre), fmax($(prev_sT_post), $(prev_seT_pre)));
        """
        + izhikevich_stdp_tag_update_code +
        """
        // Decay tag
        $(c) *= tagDecay;
        """,
    learn_post_code=
        """
        // Calculate time of last tag update
        const scalar tc = fmax($(sT_pre), fmax($(prev_sT_post), $(seT_pre)));
        """
        + izhikevich_stdp_tag_update_code + 
        """
        // Decay tag and apply STDP
        scalar newTag = $(c) * tagDecay;
        const scalar dt = $(t) - $(sT_pre);
        if (dt > 0) {
            scalar timing = exp(-dt / $(tauPlus));
            newTag += ($(aPlus) * timing);
        }
        // Write back updated tag and update time
        $(c) = newTag;
        """,
    event_threshold_condition_code="injectDopamine",

    is_pre_spike_time_required=True, 
    is_post_spike_time_required=True,
    is_pre_spike_event_time_required=True,
    
    is_prev_pre_spike_time_required=True, 
    is_prev_post_spike_time_required=True,
    is_prev_pre_spike_event_time_required=True)
コード例 #3
0
    $(V) = 0.0;
    $(SpikeCount)++;
    """,
    threshold_condition_code="$(V) >= $(Vthr)")

# Current source model, allowing current to be injected into neuron from variable
cs_model = create_custom_current_source_class(
    "cs_model",
    var_name_types=[("magnitude", "scalar")],
    injection_code="$(injectCurrent, $(magnitude));")

# Model for graded synapses with exponential activation
graded_synapse_model = create_custom_weight_update_class(
    "graded_synapse_model",
    param_names=["Vmid", "Vslope", "Vthresh"],
    var_name_types=[("g", "scalar")],
    event_code=
    "$(addToInSyn, DT * $(g) * fmax(0.0, 1.0 / (1.0 + exp(($(Vmid) - $(V_pre)) / $(Vslope)))));",
    event_threshold_condition_code="$(V_pre) > $(Vthresh)")

lateral_inhibition = create_custom_init_var_snippet_class(
    "lateral_inhibition",
    param_names=["g"],
    var_init_code="$(value)=($(id_pre)==$(id_post)) ? 0.0 : $(g);")

# STDP synapse with additive weight dependence
symmetric_stdp = create_custom_weight_update_class(
    "symmetric_stdp",
    param_names=["tau", "rho", "eta", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],
    sim_code="""
コード例 #4
0
from pygenn import genn_model

# Additive STDP model with nearest neighbour spike pairing
hebbian_stdp_model = genn_model.create_custom_weight_update_class(
    "hebbian_stdp",
    param_names=["tauPlus", "tauMinus", "aPlus", "aMinus", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],

    # Code that gets called whenever a presynaptic spike arrives at the synapse
    sim_code="""
        $(addToInSyn, $(g));
        const scalar dt = $(t) - $(sT_post);
        if(dt > 0) {
            const scalar newWeight = $(g) - ($(aMinus) * exp(-dt / $(tauMinus)));
            $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        }
        """,

    # Code that gets called whenever a back-propagated postsynaptic spike arrives at the synapse
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        if(dt > 0) {
            const scalar newWeight = $(g) + ($(aPlus) * exp(-dt / $(tauPlus)));
            $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        }
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)

# Model parameters
NUM_NEURONS = 14
コード例 #5
0
cs_model = genn_model.create_custom_current_source_class(
    "cs_model",
    var_name_types=[("magnitude", "scalar")],
    injection_code="$(injectCurrent, $(magnitude));")

# STDP synapse with additive weight dependence
symmetric_stdp = genn_model.create_custom_weight_update_class(
    "symmetric_stdp",
    param_names=["tau", "rho", "eta", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],
    sim_code=
        """
        const scalar dt = $(t) - $(sT_post);
        const scalar timing = exp(-dt / $(tau)) - $(rho);
        const scalar newWeight = $(g) + ($(eta) * timing);
        $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        """,
    learn_post_code=
        """
        const scalar dt = $(t) - $(sT_pre);
        const scalar timing = exp(-dt / $(tau)) - $(rho);
        const scalar newWeight = $(g) + ($(eta) * timing);
        $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)

# Create model
model = genn_model.GeNNModel("float", "mnist_mb")
model.dT = DT
model._model.set_seed(1337)
コード例 #6
0
stdp_model = genn_model.create_custom_weight_update_class(
    "stdp",
    param_names=["tauPlus", "tauMinus", "lambda", "alpha", "mu", "denDelay"],
    derived_params=[("denDelayStep",
                     genn_model.create_dpf_class(
                         lambda pars, dt: np.floor(pars[5] / dt) - 1.0)())],
    var_name_types=[("g", "scalar")],
    pre_var_name_types=[("preTrace", "scalar")],
    post_var_name_types=[("postTrace", "scalar")],
    sim_code="""
        const scalar dt = $(t) - $(sT_post);
        if (dt > 0) {
            const scalar timing = $(postTrace) * exp(-dt / $(tauMinus));
            const scalar deltaG = -$(lambda) * $(alpha) * $(g) * timing;
            $(g) += deltaG;
        }
        $(addToInSynDelay, $(g), (unsigned int)$(denDelayStep));
        """,
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        if (dt > 0) {
            const scalar timing = $(preTrace) * exp(-dt / $(tauPlus));
            const scalar deltaG = $(lambda) * pow($(g), $(mu)) * timing;
            $(g) += deltaG;
        }
        """,
    pre_spike_code="""
        scalar dt = $(t) - $(sT_pre);
        $(preTrace) = ($(preTrace) * exp(-dt / $(tauPlus))) + 1.0;
        """,
    post_spike_code="""
        scalar dt = $(t) - $(sT_post);
        $(postTrace) = ($(postTrace) * exp(-dt / $(tauMinus))) + 1.0;
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)
コード例 #7
0
ファイル: mnist.py プロジェクト: ajaysub110/mnist-pygenn
    ])

# STDP
stdp_model = genn_model.create_custom_weight_update_class(
    "stdp_model",
    param_names=["tauMinus", "gMax", "Xtar", "mu"],
    var_name_types=[("g", "scalar"), ("eta", "scalar")],
    pre_var_name_types=[("Xpre", "scalar")],
    sim_code="""
        $(addToInSyn, $(g));
        """,
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        if(dt > 0) {
            const scalar expXpre = $(Xpre) * exp(-dt / $(tauMinus));
            const scalar newG = $(g) - (($(eta) * (expXpre - $(Xtar)) * pow(($(gMax) - $(g)),$(mu))));
            $(g) = $(gMax) <= newG ? $(gMax) : newG;
        }
        """,
    pre_spike_code="""
        const scalar dt = $(t) - $(sT_pre);
        if(dt > 0) {
            const scalar expXpre = exp(-dt / $(tauMinus));
            $(Xpre) = expXpre + 1.0;
        }
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)

poisson_model = genn_model.create_custom_neuron_class(
    'poisson_model',
コード例 #8
0
ファイル: model.py プロジェクト: chanokin/pynn_genn
    def build_genn_wum(self, conn_params, init_vals):
        # Take a deep copy of the definitions
        genn_defs = deepcopy(self.wum_defs)

        # Check that it doesn't already have its
        # variables and parameters seperated out
        assert "param_names" not in genn_defs
        assert "var_name_types" not in genn_defs

        # Extract variables from copy of defs and remove
        # **NOTE** all vars are by default GeNN variables
        vars = genn_defs["vars"]
        del genn_defs["vars"]

        # Start with an empty list of parameterss
        param_names = []

        # Get set of forcibly mutable vars if synapse type has one
        mutable_vars = (self.mutable_vars
                        if hasattr(self, "mutable_vars") else set())

        # Loop through connection parameters
        for n, p in iteritems(conn_params):
            # If this parameter is in the variable dictionary,
            # but it is homogenous
            if n in vars and np.allclose(p, p[0]) and n not in mutable_vars:
                # Remove from vars
                del vars[n]

                # Add it to params
                param_names.append(n)

        # Copy updated vars and parameters back into defs
        genn_defs["var_name_types"] = vars.items()
        genn_defs["param_names"] = param_names

        # Create custom model
        genn_model = create_custom_weight_update_class(self.__class__.__name__,
                                                       **genn_defs)()

        # Use first entry in conn param for parameters
        wum_params = {n: conn_params[n][0] for n in genn_defs["param_names"]}

        # Convert variables to arrays with correct data type
        wum_init = {
            n: conn_params[n].astype(genn_to_numpy_types[t] if t
                                     in genn_to_numpy_types else np.float32,
                                     copy=False)
            for n, t in iteritems(vars)
        }

        # Zero all presynaptic variables
        # **TODO** other means of initialisation
        wum_pre_init = (None if "pre_var_name_types" not in genn_defs else
                        {n[0]: 0.0
                         for n in genn_defs["pre_var_name_types"]})

        # Zero all postsynaptic variables
        # **TODO** other means of initialisation
        wum_post_init = (None if "post_var_name_types" not in genn_defs else
                         {n[0]: 0.0
                          for n in genn_defs["post_var_name_types"]})

        return genn_model, wum_params, wum_init, wum_pre_init, wum_post_init
コード例 #9
0
superspike_model = genn_model.create_custom_weight_update_class(
    "superspike",
    param_names=["tauRise", "tauDecay", "beta"],
    var_name_types=[("w", "scalar"), ("e", "scalar"), 
                    ("lambda", "scalar"), ("m", "scalar")],
    pre_var_name_types=[("z", "scalar"), ("zTilda", "scalar")],
    post_var_name_types=[("sigmaPrime", "scalar")],

    sim_code="""
    $(addToInSyn, $(w));
    """,

    pre_spike_code="""
    $(z) += 1.0;
    """,
    pre_dynamics_code="""
    // filtered presynaptic trace
    $(z) += (-$(z) / $(tauRise)) * DT;
    $(zTilda) += ((-$(zTilda) + $(z)) / $(tauDecay)) * DT;
    """,

    post_dynamics_code="""
    // filtered partial derivative
    if($(V_post) < -80.0) {
       $(sigmaPrime) = 0.0;
    }
    else {
       const scalar onePlusHi = 1.0 + fabs($(beta) * 0.001 * ($(V_post) - $(Vthresh_post)));
       $(sigmaPrime) = $(beta) / (onePlusHi * onePlusHi);
    }
    """,

    synapse_dynamics_code="""
    // Filtered eligibility trace
    $(e) += ($(zTilda) * $(sigmaPrime) - $(e) / $(tauRise))*DT;
    $(lambda) += ((-$(lambda) + $(e)) / $(tauDecay)) * DT;
    // Get error from neuron model and compute full
    // expression under integral and calculate m
    $(m) += $(lambda) * $(errTilda_post);
    """)
コード例 #10
0
superspike_model = create_custom_weight_update_class(
    "superspike_model",
    param_names=[
        "t_rise", "t_decay", "tau_rms", "wmax", "wmin", "epsilon", "update_t"
    ],
    var_name_types=[("w", "scalar"), ("e", "scalar"), ("lambda", "scalar"),
                    ("upsilon", "scalar"), ("m", "scalar"), ("r0", "scalar")],
    sim_code="""
    $(addToInSyn, $(w));
    """,
    synapse_dynamics_code="""
    // Filtered eligibility trace
    $(e) += ($(z_tilda_pre) * $(sigma_prime_post) - $(e)/$(t_rise))*DT;
    $(lambda) += ((- $(lambda) + $(e)) / $(t_decay)) * DT;
    // get error from neuron model and compute full expression under integral
    const scalar g = $(lambda) * $(err_tilda_post);
    // at each time step, calculate m
    $(m) += g;
    if ((int)round($(t)) % (int)$(update_t) == 0 && (int)round($(t)) != 0) {
        const scalar grad = $(m)/$(update_t);
        // calculate learning rate r
        $(upsilon) = fmax($(upsilon) * $(ExpRMS) , grad*grad);
        const scalar r = $(r0) / (sqrt($(upsilon))+$(epsilon));
        // update synaptic weight
        $(w) += r * grad;
        $(w) = fmin($(wmax), fmax($(wmin), $(w)));
        $(m) = 0.0;
    }
    """,
    derived_params=[
        ("ExpRMS",
         create_dpf_class(lambda pars, dt: exp(-pars[-1] / pars[2]))())
    ])
コード例 #11
0
#     is_pre_spike_time_required=True,
#     is_post_spike_time_required=True
# )

stdp_model = create_custom_weight_update_class(
    "stdp_model",
    param_names=["tau", "rho", "eta", "gmin", "gmax"],
    var_name_types=[("g", "scalar")],
    sim_code="""
        $(addToInSyn, $(g));
        scalar deltat = $(t) - $(sT_post);
        if (deltat > 0) {
            scalar timing = exp(-deltat / $(tau)) - $(rho);
            scalar newg = $(g) + ($(eta) * timing);
            $(g) = fmin($(gmax), fmax($(gmin), newg));
        }
        """,
    learn_post_code="""
        const scalar deltat = $(t) - $(sT_pre);
        if (deltat > 0) {
            scalar timing = exp(-deltat / $(tau));
            scalar newg = $(g) + ($(eta) * timing);
            $(g) = fmin($(gmax), fmax($(gmin), newg));
        }
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)

# Current source model which injects current with a magnitude specified by a state variable
cs_model = create_custom_current_source_class(
    "cs_model",
コード例 #12
0
from pygenn.genn_model import create_custom_weight_update_class
from pygenn.genn_wrapper.Models import VarAccess_READ_ONLY

signed_static_pulse = create_custom_weight_update_class(
    'signed_static_pulse',
    var_name_types=[("g", "scalar", VarAccess_READ_ONLY)],
    sim_code='''
    $(addToInSyn, $(g));
    ''',
    event_code='''
    $(addToInSyn, -$(g));
    ''',
    event_threshold_condition_code='''
    $(input_pre) < 0.0 && spike
    ''')
コード例 #13
0
        
        // Thus calculate U 
        const scalar phiH = spike ? 1.0 : 0.0;
        $(U) = ($(OneMinusTau) * $(U)) + ($(Tau) * (phiH - phiStar));
        
        // Calculate sigmoid of U
        $(PhiF) = 1.0 / (1.0 + exp(-$(U)));
        """,
    threshold_condition_code="spike",
    is_auto_refractory_required=False
)

# Weight update model for continuous, backwards updates
backward_continuous = genn_model.create_custom_weight_update_class(
    "backward_continuous",
    var_name_types=[("g", "scalar")],
    synapse_dynamics_code="$(addToInSyn, $(g) * $(PhiF_pre));",
)

# Postsynaptic update model to deliver input to IsynBack
backwards_delta = genn_model.create_custom_postsynaptic_class(
    "backwards_delta",
    apply_input_code="$(IsynBack) += $(inSyn); $(inSyn) = 0;"
)

# Learning rule running 
forward_learning = genn_model.create_custom_weight_update_class(
    "forward_learning",
    param_names=["nu"],
    var_name_types=[("g", "scalar")],
 
コード例 #14
0
ファイル: fusi.py プロジェクト: agarwalmanvi/gennzoo
fusi_model = create_custom_weight_update_class(
    "fusi_model",
    param_names=[
        "tauC", "a", "b", "thetaV", "thetaLUp", "thetaLDown", "thetaHUp",
        "thetaHDown", "thetaX", "alpha", "beta", "Xmax", "Xmin", "JC", "Jplus",
        "Jminus"
    ],
    var_name_types=[("X", "scalar"), ("last_tpre", "scalar"), ("g", "scalar")],
    post_var_name_types=[("C", "scalar")],
    sim_code="""
    $(addToInSyn, $(g));
    const scalar dt = $(t) - $(sT_post);
    const scalar decayC = $(C) * exp(-dt / $(tauC));
    if ($(V_post) > $(thetaV) && $(thetaLUp) < decayC && decayC < $(thetaHUp)) {
        $(X) += $(a);
    }
    else if ($(V_post) <= $(thetaV) && $(thetaLDown) < decayC && decayC < $(thetaHDown)) {
        $(X) -= $(b);
    }
    else {
        const scalar X_dt = $(t) - $(last_tpre);
        if ($(X) > $(thetaX)) {
            $(X) += $(alpha) * X_dt;
        }
        else {
            $(X) -= $(beta) * X_dt;
        }
    }
    $(X) = fmin($(Xmax), fmax($(Xmin), $(X)));
    $(g) = ($(X) > $(thetaX)) ? $(Jplus) : $(Jminus);
    $(last_tpre) = $(t);
    """,
    post_spike_code="""
    const scalar dt = $(t) - $(sT_post);
    $(C) = ($(C) * exp(-dt / $(tauC))) + $(JC);
    """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)
コード例 #15
0
ファイル: model.py プロジェクト: genn-team/pynn_genn
    def build_genn_wum(self, conn_params, init_vals):
        # Take a deep copy of the definitions
        genn_defs = deepcopy(self.wum_defs)

        # Check that it doesn't already have its
        # variables and parameters separated out
        assert "param_names" not in genn_defs
        assert "var_name_types" not in genn_defs

        # Extract variables from copy of defs and remove
        # **NOTE** all vars are by default GeNN variables
        vars = genn_defs["vars"]
        del genn_defs["vars"]

        # extract the connector from the connectivity parameters
        conn = conn_params.pop('connector')

        # Start with an empty list of parameters
        param_names = []

        # Get set of forcibly mutable vars if synapse type has one
        mutable_vars = (self.mutable_vars
                        if hasattr(self, "mutable_vars") else set())

        wum_params = {}
        heterogeneous_on_device = []
        # Loop through connection parameters
        for n, p in iteritems(conn_params):
            # if parameter is of type LazyArray it means that we prevented
            # expansion via PyNN and will be generated on device
            if isinstance(p, LazyArray):
                # if mutable or will be randomly generated on device
                # we add it to host_heterogeneous_var_names so that later code
                # properly initializes them
                if n in mutable_vars or isinstance(p.base_value,
                                                   RandomDistribution):
                    heterogeneous_on_device.append(n)
                    continue

                # if the parameter is in the variable dictionary but it is homogeneous
                if (n in vars and n in conn.on_device_init_params
                        and conn.on_device_init_params[n].is_homogeneous):
                    # remove it from vars
                    del vars[n]
                    # add to param_names
                    param_names.append(n)
                    # and add to homogeneous wum_params
                    wum_params[n] = conn.on_device_init_params[n].base_value

            # if the parameter was already expanded because the user chose to or
            # it is not convenient to expand on device (e.g. a FromListConnector)
            else:
                # If this parameter is in the variable dictionary,
                # but it is homogeneous
                if (n in vars and np.allclose(p, p[0])
                        and n not in mutable_vars):
                    # remove it from vars
                    del vars[n]
                    # add to param_names
                    param_names.append(n)
                    # and add to homogeneous wum_params
                    wum_params[n] = p[0]

        # Copy updated vars and parameters back into defs
        genn_defs["var_name_types"] = vars.items()
        genn_defs["param_names"] = param_names

        # Create custom model
        genn_model = create_custom_weight_update_class(self.__class__.__name__,
                                                       **genn_defs)

        # Loop through GeNN variables
        wum_init = {}
        for n, t in iteritems(vars):
            # Get type to use for variable
            var_type = (genn_to_numpy_types[t]
                        if t in genn_to_numpy_types else np.float32)

            # If this variable is set by connection parameters,
            # use these as initial values
            if n in conn_params and n not in heterogeneous_on_device:
                wum_init[n] = conn_params[n].astype(var_type, copy=False)
            # If there is a default in the model, use that
            elif n in self.default_initial_values:
                wum_init[n] = self.default_initial_values[n]
            # Otherwise, if the parameter is to be initialized on device
            elif n in conn.on_device_init_params:
                wum_init[n] = self._init_variable(
                    True, conn.on_device_init_params[n])
            else:
                raise Exception("Variable '{}' not "
                                "correctly initialised".format(n))
        # Zero all pre-synaptic variables
        # **TODO** other means of initialisation
        wum_pre_init = (None if "pre_var_name_types" not in genn_defs else
                        {n[0]: 0.0
                         for n in genn_defs["pre_var_name_types"]})

        # Zero all post-synaptic variables
        # **TODO** other means of initialisation
        wum_post_init = (None if "post_var_name_types" not in genn_defs else
                         {n[0]: 0.0
                          for n in genn_defs["post_var_name_types"]})

        return genn_model, wum_params, wum_init, wum_pre_init, wum_post_init
コード例 #16
0
stdp = create_custom_weight_update_class("stdp",
                                         param_names=["gMax", "gMin"],
                                         var_name_types=[("g", "scalar")],
                                         sim_code="""
        $(addToInSyn, $(g));
        const scalar dt = $(sT_post) - $(sT_pre);
        const scalar newG;
        if ((dt>20) && (dt<=200)) {
                newG = $(g) - 0.0125*$(inSyn);}
        else { 
            if ((dt > 2) && (dt <= 20)) {
                    newG = $(g) - 0.0117 * $(inSyn)*dt + 0.223 * $(inSyn);}
            else {if ((dt > -200) && (dt <= 2)) {
                    newG = $(g) - 0.0025 * $(inSyn);}
                else {newG = 0;}}
            }
        $(g) = fmin($(gMax), fmax($(gMin), newG));
        """,
                                         learn_post_code="""
        const scalar dt = $(sT_post) - $(sT_pre);
        const scalar newG;
        if ((dt>20) && (dt<=200)) {
                newG = $(g) - 0.0125*$(inSyn);}
        else { 
            if ((dt > 2) && (dt <= 20)) {
                    newG = $(g) - 0.0117 * $(inSyn)*dt + 0.223 * $(inSyn);}
            else {if ((dt > -200) && (dt <= 2)) {
                    newG = $(g) - 0.0025 * $(inSyn);}
                else {newG = 0;}}
            }
        $(g) = fmin($(gMax), fmax($(gMin), newG));
        """,
                                         is_pre_spike_time_required=True,
                                         is_post_spike_time_required=True)
コード例 #17
0
    sumExpPi +=  __shfl_xor_sync(0xFFFFFFFF, sumExpPi, 0x10);
    $(Pi) = expPi / sumExpPi;

    const scalar piStar = ($(id) == $(labels)[$(batch)]) ? 1.0 : 0.0;
    $(E) = $(Pi) - piStar;

    $(DeltaB) += $(E);
    """,
    is_auto_refractory_required=False)

#----------------------------------------------------------------------------
# Weight update models
#----------------------------------------------------------------------------
feedback_model = genn_model.create_custom_weight_update_class(
    "feedback",
    var_name_types=[("g", "scalar", VarAccess_READ_ONLY)],
    synapse_dynamics_code="""
    $(addToInSyn, $(g) * $(E_pre));
    """)

eprop_alif_model = genn_model.create_custom_weight_update_class(
    "eprop_alif",
    param_names=["TauE", "TauA", "CReg", "FTarget", "TauFAvg", "Beta"],
    derived_params=[
        ("Alpha",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[0]))()
         ),
        ("Rho",
         genn_model.create_dpf_class(lambda pars, dt: np.exp(-dt / pars[1]))()
         ),
        ("FTargetTimestep",
         genn_model.create_dpf_class(lambda pars, dt: (pars[3] * dt) / 1000.0)
コード例 #18
0
supervised_stdp = create_custom_weight_update_class(
    "supervised_stdp",
    param_names=["tauMinus", "tauPlus", "A", "B", "gMax", "gMin", "index"],
    var_name_types=[("g", "scalar"), ("label", "scalar")],
    sim_code="""
        $(addToInSyn, $(g));
        scalar dt = $(t) - $(sT_post);
        if(dt > 0) {
            if($(index) == $(label)){
                scalar timing = exp(-dt / $(tauMinus));
                scalar newWeight = $(g) - ($(B) * timing);
                $(g) = fmax($(gMin), newWeight);
                            }

            else{
                scalar timing = exp(-dt / $(tauPlus));
                scalar newWeight = $(g) + ($(A) * timing);
                $(g) = fmin($(gMax), newWeight);  
            }
        }

        """,
    learn_post_code="""
        scalar dt = $(t) - $(sT_pre);
        if (dt > 0) {
            if($(index) == $(label)){
                scalar timing = exp(-dt / $(tauPlus));
                scalar newWeight = $(g) + ($(A) * timing);
                $(g) = fmin($(gMax), newWeight);
                            }

            else{
                scalar timing = exp(-dt / $(tauMinus));
                scalar newWeight = $(g) - ($(B) * timing);
                $(g) = fmax($(gMin), newWeight);
            }
        }
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True,
)
コード例 #19
0
cs_model = create_custom_current_source_class(
    "cs_model",
    var_name_types=[("magnitude", "scalar")],
    injection_code="$(injectCurrent, $(magnitude));")

# STDP synapse with additive weight dependence
symmetric_stdp = create_custom_weight_update_class(
    "symmetric_stdp",
    param_names=["tau", "rho", "eta", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],
    sim_code="""
        $(addToInSyn, $(g));
        const scalar dt = $(t) - $(sT_post);
        const scalar timing = exp(-dt / $(tau)) - $(rho);
        const scalar newWeight = $(g) + ($(eta) * timing);
        $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        """,
    learn_post_code="""
        const scalar dt = $(t) - $(sT_pre);
        const scalar timing = exp(-dt / $(tau));
        const scalar newWeight = $(g) + ($(eta) * timing);
        $(g) = fmin($(wMax), fmax($(wMin), newWeight));
        """,
    is_pre_spike_time_required=True,
    is_post_spike_time_required=True)

# STDP synapse with multiplicative weight dependence
stdp_multiplicative = create_custom_weight_update_class(
    "STDPMultiplicative",
    param_names=["tauPlus", "tauMinus", "aPlus", "aMinus", "wMin", "wMax"],
    var_name_types=[("g", "scalar")],