Esempio n. 1
0
 def __init__(self, **parameters):
     super(StaticSynapse, self).__init__(**parameters)
     # we have to define the translations on a per-instance basis because
     # they depend on whether the synapses are current-, conductance- or voltage-based.
     self.translations = build_translations(
                             ('weight', 'weight', "weight*weight_units", "weight/weight_units"),
                             ('delay', 'delay', ms))
def test_describe():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A'),
        )
    assert isinstance(M().describe(), basestring)
def test_computed_parameters():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A'),
        )
    assert_equal(M().computed_parameters(), ['c'])
Esempio n. 4
0
def test_translate_with_divide_by_zero_error():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 'b/0', 'B*0'),
    )
    assert_raises(ZeroDivisionError,
                  M.translate, {'a': 23.4, 'b': 34.5})
def test_reverse_translate():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A'),
        )
    assert_equal(_parameter_space_to_dict(M().reverse_translate(ParameterSpace({'A': 23.4, 'B': 34500.0, 'C': 69.0})), 88),
                 {'a': 23.4, 'b': 34.5, 'c': 45.6})
Esempio n. 6
0
def test_translate():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A'),
        )
    assert_equal(M.translate({'a': 23.4, 'b': 34.5, 'c': 45.6}),
                 {'A': 23.4, 'B': 34500.0, 'C': 69.0})
def test_reverse_translate_with_invalid_transformation():
    M = StandardModelType
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 'b + z', 'B-Z'),
    )
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    #really we should trap such errors in build_translations(), not in reverse_translate()
    assert_raises(NameError,
                  M().reverse_translate,
                  {'A': 23.4, 'B': 34.5})
def test_translate_with_invalid_transformation():
    M = StandardModelType
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 'b + z', 'B-Z'),
    )
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    #really we should trap such errors in build_translations(), not in translate()
    m = M()
    assert_raises(NameError,
                  m.translate,
                  ParameterSpace({'a': 23.4, 'b': 34.5}, m.get_schema(), None))
def test_translate():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A'),
        )
    m = M()
    native_parameters = m.translate(ParameterSpace({'a': 23.4, 'b': 34.5, 'c': 45.6}, m.get_schema(), None))
    assert_equal(_parameter_space_to_dict(native_parameters, 77),
                 {'A': 23.4, 'B': 34500.0, 'C': 69.0})
Esempio n. 10
0
def test_translate_with_divide_by_zero_error():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    M.translations = build_translations(
            ('a', 'A'),
            ('b', 'B', 'b/0', 'B*0'),
    )
    m = M()
    native_parameters = m.translate(ParameterSpace({'a': 23.4, 'b': 34.5}, m.get_schema(), 77))
    assert_raises(ZeroDivisionError,
                  native_parameters.evaluate,
                  simplify=True)
Esempio n. 11
0
class IF_cond_exp_gsfa_grr(cells.IF_cond_exp_gsfa_grr):
    eqs = adapt_iaf + conductance_based_alpha_synapses
    translations = deepcopy(adapt_iaf_translations)
    translations.update(conductance_based_synapse_translations)
    state_variable_translations = build_translations(
        ('v', 'v', lambda p: p * mV, lambda p: p / mV),
        ('g_s', 'g_s', lambda p: p * uS,
         lambda p: p / uS),  # should be uS - needs changed for all back-ends
        ('g_r', 'g_r', lambda p: p * uS, lambda p: p / uS),
        ('gsyn_exc', 'ge', lambda p: p * uS, lambda p: p / uS),
        ('gsyn_inh', 'gi', lambda p: p * uS, lambda p: p / uS))
    post_synaptic_variables = {'excitatory': 'ge', 'inhibitory': 'gi'}
    brian2_model = AdaptiveNeuronGroup2
Esempio n. 12
0
def test_translate():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3, 'c': 44.4}
    M.translations = build_translations(
        ('a', 'A'),
        ('b', 'B', 1000.0),
        ('c', 'C', 'c + a', 'C - A'),
    )
    m = M()
    native_parameters = m.translate(ParameterSpace(
        {'a': 23.4, 'b': 34.5, 'c': 45.6}, m.get_schema(), None))
    assert_equal(_parameter_space_to_dict(native_parameters, 77),
                 {'A': 23.4, 'B': 34500.0, 'C': 69.0})
Esempio n. 13
0
class SpikeSourcePoisson(cells.SpikeSourcePoisson):
    
    __doc__ = cells.SpikeSourcePoisson.__doc__ 

    translations = build_translations(
        ('rate', 'rate', 0.001),
        ('start', 'start'),
        ('duration', 'duration')
    )

    nemo_name = "PoissonSource"

    indices = {'rate' : 0}
Esempio n. 14
0
class SpikeSourcePoisson(cells.SpikeSourcePoisson):
    __doc__ = cells.SpikeSourcePoisson.__doc__

    translations = build_translations(
        ('rate', 'firing_rate', lambda **p: p["rate"] * Hz,
         lambda **p: p["firing_rate"] / Hz),
        ('start', 'start_time', lambda **p: p["start"] * ms,
         lambda **p: p["start_time"] / ms),
        ('duration', 'duration', lambda **p: p["duration"] * ms,
         lambda **p: p["duration"] / ms),
    )
    eqs = None
    brian2_model = PoissonGroup
Esempio n. 15
0
class HH_cond_exp(cells.HH_cond_exp, IF_base):
    """ Single-compartment Hodgkin-Huxley model."""

    n = 0
    translations = standardmodels.build_translations(
        *[(name, name) for name in cells.HH_cond_exp.default_parameters])

    def __init__(self, parameters):
        cells.HH_cond_exp.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.synapse_type = "exp_syn"
        self.__class__.n += 1
        logger.debug("HH_cond_exp created")
Esempio n. 16
0
class IF_cond_exp(cells.IF_cond_exp, IF_base):
    """Leaky integrate and fire model with fixed threshold and 
    decaying-exponential post-synaptic conductance."""

    n = 0
    translations = standardmodels.build_translations(
        *[(name, name) for name in cells.IF_cond_exp.default_parameters])

    def __init__(self, parameters):
        cells.IF_cond_exp.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.synapse_type = "doub_exp_syn"
        self.__class__.n += 1
Esempio n. 17
0
class SpikeSourceArray(cells.SpikeSourceArray, NotImplementedModel):
    """Spike source generating spikes at the times given in the spike_times array."""

    n = 0
    translations = standardmodels.build_translations(
        *[(name, name)
          for name in cells.SpikeSourcePoisson.default_parameters])

    def __init__(self, parameters):
        NotImplementedModel.__init__(self)
        cells.SpikeSourceARRAY.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.__class__.n += 1
Esempio n. 18
0
class AdditivePotentiationMultiplicativeDepression(synapses.AdditivePotentiationMultiplicativeDepression):
    __doc__ = synapses.AdditivePotentiationMultiplicativeDepression.__doc__

    translations = build_translations(
        ('w_max', 'w_max'),
        ('w_min', 'w_min'),
    )

    def _set_target_type(self, weight_units):
        self.translations["w_max"]["forward_transform"] = lambda **P: P["w_max"] * weight_units
        self.translations["w_max"]["reverse_transform"] = lambda **P: P["w_max"] / weight_units
        self.translations["w_min"]["forward_transform"] = lambda **P: P["w_min"] * weight_units
        self.translations["w_min"]["reverse_transform"] = lambda **P: P["w_min"] / weight_units
Esempio n. 19
0
class MultiplicativeWeightDependence(synapses.MultiplicativeWeightDependence):
    __doc__ = synapses.MultiplicativeWeightDependence.__doc__

    translations = build_translations(
        ('w_max', 'w_max'),
        ('w_min', 'w_min'),
    )

    def _set_target_type(self, weight_units):
        self.translations["w_max"]["forward_transform"] = lambda **P: P["w_max"] * weight_units
        self.translations["w_max"]["reverse_transform"] = lambda **P: P["w_max"] / weight_units
        self.translations["w_min"]["forward_transform"] = lambda **P: P["w_min"] * weight_units
        self.translations["w_min"]["reverse_transform"] = lambda **P: P["w_min"] / weight_units
Esempio n. 20
0
def test_build_translations():
    t = build_translations(("a", "A"), ("b", "B", 1000.0), ("c", "C", "c + a", "C - A"))
    assert_equal(set(t.keys()), set(["a", "b", "c"]))
    assert_equal(set(t["a"].keys()), set(["translated_name", "forward_transform", "reverse_transform"]))
    assert_equal(t["a"]["translated_name"], "A")
    assert_equal(t["a"]["forward_transform"], "a")
    assert_equal(t["a"]["reverse_transform"], "A")
    assert_equal(t["b"]["translated_name"], "B")
    assert_equal(t["b"]["forward_transform"], "float(1000)*b")
    assert_equal(t["b"]["reverse_transform"], "B/float(1000)")
    assert_equal(t["c"]["translated_name"], "C")
    assert_equal(t["c"]["forward_transform"], "c + a")
    assert_equal(t["c"]["reverse_transform"], "C - A")
Esempio n. 21
0
class GutigWeightDependence(synapses.GutigWeightDependence, WeightDependence):
    __doc__ = synapses.GutigWeightDependence.__doc__

    vars = deepcopy(WeightDependence.vars)
    vars.update({"muPlus": "scalar", "muMinus": "scalar"})

    depression_update_code = "$(g) -=  pow(($(g) - $(Wmin)), $(muMinus)) * update;\n"

    potentiation_update_code = "$(g) += pow(($(Wmax) - $(g)), $(muPlus)) * update;\n"

    translations = build_translations(("mu_plus", "muPlus"),
                                      ("mu_minus", "muMinus"),
                                      *WeightDependence.wd_translations)
Esempio n. 22
0
class TsodyksMarkramSynapse(synapses.TsodyksMarkramSynapse):
    __doc__ = synapses.TsodyksMarkramSynapse.__doc__

    translations = build_translations(('weight', 'WEIGHT'), ('delay', 'DELAY'),
                                      ('U', 'UU'), ('tau_rec', 'TAU_REC'),
                                      ('tau_facil', 'TAU_FACIL'), ('u0', 'U0'),
                                      ('x0', 'X'), ('y0', 'Y'))

    def _get_minimum_delay(self):
        d = state.min_delay
        if d == 'auto':
            d = state.dt
        return d
Esempio n. 23
0
class SpikeSourcePoisson(cells.SpikeSourcePoisson, NotImplementedModel):
    """Spike source, generating spikes according to a Poisson process."""

    n = 0
    translations = standardmodels.build_translations(
        *[(name, name)
          for name in cells.SpikeSourcePoisson.default_parameters])

    def __init__(self, parameters):
        NotImplementedModel.__init__(self)
        cells.SpikeSourcePoisson.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.__class__.n += 1
Esempio n. 24
0
class IF_cond_exp_gsfa_grr(base_cells.IF_cond_exp_gsfa_grr):

    __doc__ = base_cells.IF_cond_exp_gsfa_grr.__doc__

    translations = build_translations(
        ('v_rest', 'v_reset'), ('v_reset', 'v_reset'), ('cm', 'c_m'),
        ('tau_m', 'tau_m'), ('tau_refrac', 't_refrac'), ('tau_syn_E', 'tau_e'),
        ('tau_syn_I', 'tau_i'), ('v_thresh', 'v_thresh'),
        ('i_offset', 'i_offset'), ('e_rev_E', 'e_e'), ('e_rev_I', 'e_i'),
        ('tau_sfa', 'tau_sfa'), ('e_rev_sfa', 'e_sfa'), ('q_sfa', 'q_sfa'),
        ('tau_rr', 'tau_rr'), ('e_rev_rr', 'e_rr'), ('q_rr', 'q_rr'))
    model = GsfaGrrIF
    extra_parameters = {'syn_type': 'conductance', 'syn_shape': 'exp'}
Esempio n. 25
0
class SpikePairRule(synapses.SpikePairRule):

    translations = build_translations(
        ('tau_plus', 'tau_plus'),
        ('tau_minus', 'tau_minus'),  # defined in post-synaptic neuron
    )
    possible_models = set(['stdp_synapse'])  #,'stdp_synapse_hom'])

    def __init__(self, tau_plus=20.0, tau_minus=20.0):
        #synapses.SpikePairRule.__init__(self, tau_plus, tau_minus)
        parameters = dict(locals())
        parameters.pop('self')
        self.parameters = self.translate(parameters)
Esempio n. 26
0
class IF_facets_hardware1(cells.IF_facets_hardware1):

    __doc__ = cells.IF_facets_hardware1.__doc__

    # in 'iaf_cond_exp', the dimension of C_m is pF,
    # while in the pyNN context, cm is given in nF
    translations = build_translations(
        ('v_reset', 'V_reset'), ('v_rest', 'E_L'), ('v_thresh', 'V_th'),
        ('e_rev_I', 'E_in'), ('tau_syn_E', 'tau_syn_ex'),
        ('tau_syn_I', 'tau_syn_in'), ('g_leak', 'g_L'))
    nest_name = {"on_grid": "iaf_cond_exp", "off_grid": "iaf_cond_exp"}
    standard_receptor_type = True
    extra_parameters = {'C_m': 200.0, 't_ref': 1.0, 'E_ex': 0.0}
Esempio n. 27
0
class Izhikevich(cells.Izhikevich):
    __doc__ = cells.Izhikevich.__doc__

    translations = build_translations(
        ('a', 'a'),
        ('b', 'b'),
        ('c', 'c'),
        ('d', 'd'),
        ('i_offset', 'I_e', 1000.0),
    )
    nest_name = {"on_grid": "izhikevich", "off_grid": "izhikevich"}
    standard_receptor_type = True
    receptor_scale = 1e-3  # synaptic weight is in mV, so need to undo usual weight scaling
Esempio n. 28
0
class EIF_cond_exp_isfa_ista(cells.EIF_cond_exp_isfa_ista):
    __doc__ = cells.EIF_cond_exp_isfa_ista.__doc__
    eqs = adexp_iaf + conductance_based_exponential_synapses
    translations = deepcopy(adexp_iaf_translations)
    translations.update(conductance_based_synapse_translations)
    state_variable_translations = build_translations(
        ('v', 'v', lambda p: p * mV, lambda p: p / mV),
        ('w', 'w', lambda p: p * nA, lambda p: p / nA),
        ('gsyn_exc', 'ge', lambda p: p * uS, lambda p: p / uS),
        ('gsyn_inh', 'gi', lambda p: p * uS, lambda p: p / uS))

    post_synaptic_variables = {'excitatory': 'ge', 'inhibitory': 'gi'}
    brian2_model = AdaptiveNeuronGroup
Esempio n. 29
0
class IF_cond_alpha(cells.IF_cond_alpha, IF_base):
    """Leaky integrate and fire model with fixed threshold and alpha-function-
    shaped post-synaptic conductance."""

    n = 0
    translations = standardmodels.build_translations(
        *[(name, name) for name in cells.IF_cond_alpha.default_parameters])

    def __init__(self, parameters):
        cells.IF_cond_alpha.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.synapse_type = "alpha_syn"
        self.__class__.n += 1
Esempio n. 30
0
class MockStandardCellType(StandardCellType):
    default_parameters = {
        "foo": 99.9,
        "hoo": 100.0,
        "woo": 5.0,
    }
    default_initial_values = {
        "v": 0.0,
    }
    translations = build_translations(
        ('foo', 'FOO'),
        ('hoo', 'HOO', 3.0),
        ('woo', 'WOO', '2*woo + hoo', '(WOO - HOO)/2'),
    )
Esempio n. 31
0
def test_translate_with_invalid_transformation():
    M = StandardModelType
    M.translations = build_translations(
        ('a', 'A'),
        ('b', 'B', 'b + z', 'B-Z'),
    )
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    #really we should trap such errors in build_translations(), not in translate()
    m = M()
    assert_raises(NameError, m.translate,
                  ParameterSpace({
                      'a': 23.4,
                      'b': 34.5
                  }, m.get_schema(), None))
class STDPMechanism(synapses.STDPMechanism):
    __doc__ = synapses.STDPMechanism.__doc__

    base_translations = build_translations(
        ('weight', 'WEIGHT'),
        ('delay', 'DELAY'),
        ('dendritic_delay_fraction', 'dendritic_delay_fraction')
    )

    def _get_minimum_delay(self):
        d = state.min_delay
        if d == 'auto':
            d = state.dt
        return d
Esempio n. 33
0
class StaticSynapse(synapses.StaticSynapse, GeNNStandardSynapseType):
    __doc__ = synapses.StaticSynapse.__doc__

    wum_defs = {"sim_code": "$(addToInSyn, $(g));\n", "vars": {"g": "scalar"}}

    translations = build_translations(
        ("weight", "g"),
        ("delay", "delaySteps", delayMsToSteps, delayStepsToMs))

    def _get_minimum_delay(self):
        if state._min_delay == "auto":
            return state.dt
        else:
            return state._min_delay
class IF_curr_exp(cells.IF_curr_exp):
    __doc__ = cells.IF_curr_exp.__doc__

    translations = build_translations(  # should add some computed/scaled parameters
        ('tau_m',      'TAU_M'),
        ('cm',         'CM'),
        ('v_rest',     'V_REST'),
        ('v_thresh',   'V_THRESH'),
        ('v_reset',    'V_RESET'),
        ('tau_refrac', 'T_REFRAC'),
        ('i_offset',   'I_OFFSET'),
        ('tau_syn_E',  'TAU_SYN_E'),
        ('tau_syn_I',  'TAU_SYN_I'),
    )
Esempio n. 35
0
class SpikeSourcePoisson(cells.SpikeSourcePoisson):

    __doc__ = cells.SpikeSourcePoisson.__doc__

    translations = build_translations(('start', 'Tstart', 1e-3),
                                      ('rate', 'rate'),
                                      ('duration', 'duration', 1e-3))
    pcsim_name = 'PoissonInputNeuron'
    simObjFactory = None
    setterMethods = {}

    def __init__(self, parameters):
        cells.SpikeSourcePoisson.__init__(self, parameters)
        self.simObjFactory = pypcsim.PoissonInputNeuron(**self.parameters)
Esempio n. 36
0
def test_translate_with_divide_by_zero_error():
    M = StandardModelType
    M.default_parameters = {'a': 22.2, 'b': 33.3}
    M.translations = build_translations(
        ('a', 'A'),
        ('b', 'B', 'b/0', 'B*0'),
    )
    m = M()
    native_parameters = m.translate(
        ParameterSpace({
            'a': 23.4,
            'b': 34.5
        }, m.get_schema(), 77))
    assert_raises(ZeroDivisionError, native_parameters.evaluate, simplify=True)
Esempio n. 37
0
class SpikePairRule(synapses.SpikePairRule):

    __doc__ = synapses.SpikePairRule.__doc__    

    translations = build_translations(
        ('tau_plus',  'tauLTP'),
        ('tau_minus', 'tauLTD'),
    )
    possible_models = set(['StdwaSA', 'StdwaSoft', 'StdwaGuetig'])
    
    def __init__(self, tau_plus=20.0, tau_minus=20.0):
        #synapses.SpikePairRule.__init__(self, tau_plus, tau_minus)
        self.parameters = self.translate({'tau_plus': tau_plus,
                                          'tau_minus': tau_minus})
Esempio n. 38
0
class SpikeSourceArray(cells.SpikeSourceArray):
    """Spike source generating spikes at the times given in the spike_times array."""
    translations = build_translations(('spike_times', 'spiketimes', ms), )

    @classmethod
    def translate(cls, parameters):
        if 'spike_times' in parameters:
            try:
                parameters['spike_times'] = numpy.array(
                    parameters['spike_times'], float)
            except ValueError:
                raise errors.InvalidParameterValueError(
                    "spike times must be floats")
        return super(SpikeSourceArray, cls).translate(parameters)
Esempio n. 39
0
class TsodyksMarkramSynapse(synapses.TsodyksMarkramSynapse, NESTSynapseMixin):
    __doc__ = synapses.TsodyksMarkramSynapse.__doc__

    translations = build_translations(
        ('weight', 'weight', 1000.0),
        ('delay', 'delay'),
        ('U', 'U'),
        ('tau_rec', 'tau_rec'),
        ('tau_facil', 'tau_fac'),
        ('u0', 'u'),  # this could cause problems for reverse translation
        ('x0', 'x'),  # (as for V_m) in cell models, since the initial value
        ('y0', 'y')  # is not stored, only set.
    )
    nest_name = 'tsodyks_synapse'
Esempio n. 40
0
class IF_curr_alpha(cells.IF_curr_alpha, NotImplementedModel):
    """Leaky integrate and fire model with fixed threshold and alpha-function-
    shaped post-synaptic current."""
    
    n = 0
    translations = standardmodels.build_translations(*[(name, name)
                                               for name in cells.IF_curr_alpha.default_parameters])
    
    def __init__(self, parameters):
        NotImplementedModel.__init__(self)
        cells.IF_curr_exp.__init__(self, parameters)
        self.label = '%s%d' % (self.__class__.__name__, self.__class__.n)
        self.synapse_type = "doub_exp_syn"
        self.__class__.n += 1
Esempio n. 41
0
class StepCurrentSource(NeuronCurrentSource, electrodes.StepCurrentSource):

    __doc__ = electrodes.StepCurrentSource.__doc__

    translations = build_translations(
        ('amplitudes',  'amplitudes'),
        ('times',       'times')
    )

    _is_playable = True
    _is_computed = False

    def _generate(self):
        pass
Esempio n. 42
0
def test_build_translations():
    t = build_translations(
            ('a', 'A'),
            ('b', 'B', 1000.0),
            ('c', 'C', 'c + a', 'C - A')
        )
    assert_equal(set(t.keys()), set(['a', 'b', 'c']))
    assert_equal(set(t['a'].keys()),
                 set(['translated_name', 'forward_transform', 'reverse_transform']))
    assert_equal(t['a']['translated_name'], 'A')
    assert_equal(t['a']['forward_transform'], 'a')
    assert_equal(t['a']['reverse_transform'], 'A')
    assert_equal(t['b']['translated_name'], 'B')
    assert_equal(t['b']['forward_transform'], 'float(1000)*b')
    assert_equal(t['b']['reverse_transform'], 'B/float(1000)')
    assert_equal(t['c']['translated_name'], 'C')
    assert_equal(t['c']['forward_transform'], 'c + a')
    assert_equal(t['c']['reverse_transform'], 'C - A')
from functools import partial
from pyNN.standardmodels import build_translations
from pynn_spinnaker.standardmodels.cells import calc_max_neurons_per_core

# Import globals
from pynn_spinnaker.standardmodels.cells import (if_curr_neuron_translations,
                                                 if_curr_neuron_immutable_param_map,
                                                 if_curr_neuron_mutable_param_map)

# ----------------------------------------------------------------------------
# Synapse type translations
# ----------------------------------------------------------------------------
# Build translations from PyNN to SpiNNaker synapse model parameters
dual_exp_synapse_translations = build_translations(
    ("tau_syn_E",   "tau_syn_e"),
    ("tau_syn_E2",   "tau_syn_e2"),
    ("tau_syn_I",   "tau_syn_i"),
)

# ----------------------------------------------------------------------------
# Synapse shaping region maps
# ----------------------------------------------------------------------------
dual_exp_synapse_immutable_param_map = [
    ("tau_syn_e", "u4", lazy_param_map.u032_exp_decay),
    ("tau_syn_e", "i4", lazy_param_map.s1615_exp_init),
    ("tau_syn_e2", "u4", lazy_param_map.u032_exp_decay),
    ("tau_syn_e2", "i4", lazy_param_map.s1615_exp_init),
    ("tau_syn_i", "u4", lazy_param_map.u032_exp_decay),
    ("tau_syn_i", "i4", lazy_param_map.s1615_exp_init),
]
Esempio n. 44
0
            ''')

current_based_alpha_synapses = brian.Equations('''
                die/dt = (2.7182818284590451*ye-ie)/tau_syn_e : nA
                dye/dt = -ye/tau_syn_e                        : nA
                dii/dt = (2.7182818284590451*yi-ii)/tau_syn_e : nA
                dyi/dt = -yi/tau_syn_e                        : nA
                i_syn = ie + ii                               : nA
                tau_syn_e                                     : ms
                tau_syn_i                                     : ms
            ''')

leaky_iaf_translations = build_translations(
                ('v_rest',     'v_rest',     mV),
                ('v_reset',    'v_reset',    mV),
                ('cm',         'c_m',        nF),
                ('tau_m',      'tau_m',      ms),
                ('tau_refrac', 'tau_refrac', ms),
                ('v_thresh',   'v_thresh',   mV),
                ('i_offset',   'i_offset',   nA))

adexp_iaf_translations = build_translations(
                ('v_rest',     'v_rest',     mV),
                ('v_reset',    'v_reset',    mV),
                ('cm',         'c_m',        nF),
                ('tau_m',      'tau_m',      ms),
                ('tau_refrac', 'tau_refrac', ms),
                ('v_thresh',   'v_thresh',   mV),
                ('i_offset',   'i_offset',   nA),
                ('a',          'a',          nA),
                ('b',          'b',          nA),
                ('delta_T',    'delta_T',    mV),
# Import globals
from pynn_spinnaker.standardmodels.cells import (exp_synapse_translations,
                                                 exp_synapse_immutable_param_map,
                                                 exp_synapse_curr_mutable_param_map)

# ----------------------------------------------------------------------------
# Neuron type translations
# ----------------------------------------------------------------------------
# Build translations from PyNN to SpiNNaker neuron model parameters
if_curr_ca2_adaptive_neuron_translations = build_translations(
    ("tau_m",       "tau_m"),
    ("cm",          "r_membrane", "tau_m / cm", ""),
    ("v_rest",      "v_rest"),
    ("v_thresh",    "v_thresh"),
    ("v_reset",     "v_reset"),
    ("tau_refrac",  "tau_refrac"),
    ("tau_ca2",     "tau_ca2"),
    ("i_offset",    "i_offset"),
    ("i_alpha",     "i_alpha"),
)

# ----------------------------------------------------------------------------
# Neuron region maps
# ----------------------------------------------------------------------------
# Build maps of where and how parameters need to be written into neuron regions
if_curr_ca2_adaptive_neuron_immutable_param_map = [
    ("v_thresh",    "i4", lazy_param_map.s1615),
    ("v_reset",     "i4", lazy_param_map.s1615),
    ("v_rest",      "i4", lazy_param_map.s1615),
    ("i_offset",    "i4", lazy_param_map.s1615),
Esempio n. 46
0
    "tau_z": 5.0,               # Time constant of primary trace (ms)
    "tau_p": 1000.0,            # Time constant of probability trace (ms)
    "f_max": 20.0,              # Firing frequency representing certainty (Hz)
    "phi": 0.05,                # Scaling of intrinsic bias current from probability to current domain (nA)
    "bias_enabled": True,       # Are the learnt biases passed to the neuron
    "plasticity_enabled": True  # Is plasticity enabled
}

# ----------------------------------------------------------------------------
# Intrinsic plasticity translations
# ----------------------------------------------------------------------------
intrinsic_plasticity_translations = build_translations(
    ("tau_z",               "tau_z"),
    ("tau_p",               "tau_p"),

    ("f_max",               "minus_a",  "1000.0 / (f_max * (tau_p - tau_z))", ""),
    ("phi",                 "phi"),
    ("bias_enabled",        "epsilon",  "1000.0 / (f_max * tau_p)", ""),

    ("plasticity_enabled",  "mode",     "bias_enabled + (plasticity_enabled * 2)", "")
)

# ----------------------------------------------------------------------------
# Intrinsic plasticity region map
# ----------------------------------------------------------------------------
intrinsic_plasticity_param_map = [
    ("minus_a", "i4", s1813),
    ("phi", "i4", lazy_param_map.s1615),
    ("epsilon", "i4", s1813),
    ("tau_z", "i4", s1813_exp_decay),
    ("tau_p", "i4", s1813_exp_decay),
    ("mode", "u4", lazy_param_map.integer),
Esempio n. 47
0
            ''')

current_based_alpha_synapses = brian.Equations('''
                die/dt = (2.7182818284590451*ye-ie)/tau_syn_e : nA
                dye/dt = -ye/tau_syn_e                        : nA
                dii/dt = (2.7182818284590451*yi-ii)/tau_syn_e : nA
                dyi/dt = -yi/tau_syn_e                        : nA
                i_syn = ie + ii                               : nA
                tau_syn_e                                     : ms
                tau_syn_i                                     : ms
            ''')

leaky_iaf_translations = build_translations(
                ('v_rest',     'v_rest',     mV),
                ('v_reset',    'v_reset',    mV),
                ('cm',         'c_m',        nF),
                ('tau_m',      'tau_m',      ms),
                ('tau_refrac', 'tau_refrac', ms),
                ('v_thresh',   'v_thresh',   mV),
                ('i_offset',   'i_offset',   nA))

adexp_iaf_translations = build_translations(
                ('v_rest',     'v_rest',     mV),
                ('v_reset',    'v_reset',    mV),
                ('cm',         'c_m',        nF),
                ('tau_m',      'tau_m',      ms),
                ('tau_refrac', 'tau_refrac', ms),
                ('v_thresh',   'v_thresh',   mV),
                ('i_offset',   'i_offset',   nA),
                ('a',          'a',          nA),
                ('b',          'b',          nA),
                ('delta_T',    'delta_T',    mV),
Esempio n. 48
0
def test_translate_with_divide_by_zero_error():
    M = StandardModelType
    M.default_parameters = {"a": 22.2, "b": 33.3}
    M.translations = build_translations(("a", "A"), ("b", "B", "b/0", "B*0"))
    assert_raises(ZeroDivisionError, M.translate, {"a": 23.4, "b": 34.5})
Esempio n. 49
0
def test_reverse_translate_with_invalid_transformation():
    M = StandardModelType
    M.translations = build_translations(("a", "A"), ("b", "B", "b + z", "B-Z"))
    M.default_parameters = {"a": 22.2, "b": 33.3}
    # really we should trap such errors in build_translations(), not in reverse_translate()
    assert_raises(NameError, M.reverse_translate, {"A": 23.4, "B": 34.5})
Esempio n. 50
0
def test_translate():
    M = StandardModelType
    M.default_parameters = {"a": 22.2, "b": 33.3, "c": 44.4}
    M.translations = build_translations(("a", "A"), ("b", "B", 1000.0), ("c", "C", "c + a", "C - A"))
    assert_equal(M.translate({"a": 23.4, "b": 34.5, "c": 45.6}), {"A": 23.4, "B": 34500.0, "C": 69.0})
Esempio n. 51
0
def test_describe():
    M = StandardModelType
    M.default_parameters = {"a": 22.2, "b": 33.3, "c": 44.4}
    M.translations = build_translations(("a", "A"), ("b", "B", 1000.0), ("c", "C", "c + a", "C - A"))
    m = M({})
    assert isinstance(m.describe(), basestring)
Esempio n. 52
0
def test_computed_parameters():
    M = StandardModelType
    M.default_parameters = {"a": 22.2, "b": 33.3, "c": 44.4}
    M.translations = build_translations(("a", "A"), ("b", "B", 1000.0), ("c", "C", "c + a", "C - A"))
    assert_equal(M.computed_parameters(), ["c"])