def __init__(self, n, p, approximate=True, name='_binomial*'): Nameable.__init__(self, name) #Python implementation use_normal = approximate and (n*p > 5) and n*(1-p) > 5 if use_normal: loc = n*p scale = np.sqrt(n*p*(1-p)) def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.normal(loc, scale, size=N) else: def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.binomial(n, p, size=N) Function.__init__(self, pyfunc=lambda: sample_function(1), arg_units=[], return_unit=1, stateless=False) self.implementations.add_implementation('numpy', sample_function) for target, func in BinomialFunction.implementations.iteritems(): code, dependencies = func(n=n, p=p, use_normal=use_normal, name=self.name) self.implementations.add_implementation(target, code, dependencies=dependencies, name=self.name)
def _init_2d(self): dimensions = self.dim unit = get_unit(dimensions) values = self.values dt = self.dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(i=1, t=second, result=unit) def timed_array_func(t, i): # We round according to the current defaultclock.dt K = _find_K(float(defaultclock.dt), dt) epsilon = dt / K time_step = np.clip(np.int_(np.round(np.asarray(t/epsilon)) / K), 0, len(values)-1) return Quantity(values[time_step, i], dim=dimensions) Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) n_values = len(values) epsilon = dt / K def unitless_timed_array_func(t, i): timestep = np.clip(np.int_(np.round(t/epsilon) / K), 0, n_values-1) return values[timestep, i] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation('numpy', create_numpy_implementation) values_flat = self.values.astype(np.double, order='C', copy=False).ravel() namespace = lambda owner: {'%s_values' % self.name: values_flat} for target, (_, func_2d) in TimedArray.implementations.items(): self.implementations.add_dynamic_implementation(target, func_2d(self.values, self.dt, self.name), namespace=namespace, name=self.name)
def __init__(self, values, dt, name=None): if name is None: name = '_timedarray*' Nameable.__init__(self, name) unit = get_unit(values) values = np.asarray(values) self.values = values dt = float(dt) self.dt = dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(t=second, result=unit) def timed_array_func(t): i = np.clip(np.int_(np.float_(t) / dt + 0.5), 0, len(values)-1) return values[i] * unit Function.__init__(self, pyfunc=timed_array_func) # Implementation for numpy, without units def unitless_timed_array_func(t): i = np.clip(np.int_(np.float_(t) / dt + 0.5), 0, len(values)-1) return values[i] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit # Implementation for C++ cpp_code = {'support_code': ''' inline double _timedarray_%NAME%(const double t, const double _dt, const int _num_values, const double* _values) { int i = (int)(t/_dt + 0.5); // rounds to nearest int for positive values if(i<0) i = 0; if(i>=_num_values) i = _num_values-1; return _values[i]; } '''.replace('%NAME%', self.name), 'hashdefine_code': ''' #define %NAME%(t) _timedarray_%NAME%(t, _%NAME%_dt, _%NAME%_num_values, _%NAME%_values) '''.replace('%NAME%', self.name)} namespace = {'_%s_dt' % self.name: self.dt, '_%s_num_values' % self.name: len(self.values), '_%s_values' % self.name: self.values} add_implementations(self, codes={'cpp': cpp_code, 'numpy': unitless_timed_array_func}, namespaces={'cpp': namespace}, names={'cpp': self.name})
def test_apply_loop_invariant_optimisation_boolean(): variables = {'v1': Variable('v1', scalar=False), 'v2': Variable('v2', scalar=False), 'N': Constant('N', 10), 'b': Variable('b', scalar=True, dtype=bool), 'c': Variable('c', scalar=True, dtype=bool), 'int': DEFAULT_FUNCTIONS['int'], 'foo': Function(lambda x: None, arg_units=[Unit(1)], return_unit=Unit(1), arg_types=['boolean'], return_type='float', stateless=False) } # The calls for "foo" cannot be pulled out, since foo is marked as stateful statements = [Statement('v1', '=', '1.0*int(b and c)', '', np.float32), Statement('v1', '=', '1.0*foo(b and c)', '', np.float32), Statement('v2', '=', 'int(not b and True)', '', np.float32), Statement('v2', '=', 'foo(not b and True)', '', np.float32) ] scalar, vector = optimise_statements([], statements, variables) assert len(scalar) == 4 assert scalar[0].expr == '1.0 * int(b and c)' assert scalar[1].expr == 'b and c' assert scalar[2].expr == 'int((not b) and True)' assert scalar[3].expr == '(not b) and True' assert len(vector) == 4 assert vector[0].expr == '_lio_1' assert vector[1].expr == 'foo(_lio_2)' assert vector[2].expr == '_lio_3' assert vector[3].expr == 'foo(_lio_4)'
def _resolve_external(self, identifier, run_namespace, user_identifier=True, internal_variable=None): ''' Resolve an external identifier in the context of a `Group`. If the `Group` declares an explicit namespace, this namespace is used in addition to the standard namespace for units and functions. Additionally, the namespace in the `run_namespace` argument (i.e. the namespace provided to `Network.run`) is used. Parameters ---------- identifier : str The name to resolve. group : `Group` The group that potentially defines an explicit namespace for looking up external names. run_namespace : dict A namespace (mapping from strings to objects), as provided as an argument to the `Network.run` function or returned by `get_local_namespace`. user_identifier : bool, optional Whether this is an identifier that was used by the user (and not something automatically generated that the user might not even know about). Will be used to determine whether to display a warning in the case of namespace clashes. Defaults to ``True``. internal_variable : `Variable`, optional The internal variable object that corresponds to this name (if any). This is used to give warnings if it also corresponds to a variable from an external namespace. ''' # We save tuples of (namespace description, referred object) to # give meaningful warnings in case of duplicate definitions matches = [] namespaces = OrderedDict() # Default namespaces (units and functions) namespaces['constants'] = DEFAULT_CONSTANTS namespaces['units'] = DEFAULT_UNITS namespaces['functions'] = DEFAULT_FUNCTIONS if getattr(self, 'namespace', None) is not None: namespaces['group-specific'] = self.namespace # explicit or implicit run namespace namespaces['run'] = run_namespace for description, namespace in namespaces.iteritems(): if identifier in namespace: match = namespace[identifier] if ((isinstance(match, (numbers.Number, np.ndarray, np.number, Function, Variable))) or (inspect.isfunction(match) and hasattr(match, '_arg_units') and hasattr(match, '_return_unit'))): matches.append((description, match)) if len(matches) == 0: # No match at all if internal_variable is not None: return None else: raise KeyError(('The identifier "%s" could not be resolved.') % (identifier)) elif len(matches) > 1: # Possibly, all matches refer to the same object first_obj = matches[0][1] found_mismatch = False for m in matches: if _same_value(m[1], first_obj): continue if _same_function(m[1], first_obj): continue try: proxy = weakref.proxy(first_obj) if m[1] is proxy: continue except TypeError: pass # Found a mismatch found_mismatch = True break if found_mismatch and user_identifier and internal_variable is None: _conflict_warning( ('The name "%s" refers to different objects ' 'in different namespaces used for resolving ' 'names in the context of group "%s". ' 'Will use the object from the %s namespace ' 'with the value %s,') % (identifier, getattr(self, 'name', '<unknown>'), matches[0][0], _display_value(first_obj)), matches[1:]) if internal_variable is not None and user_identifier: # Filter out matches that are identical (a typical case being an # externally defined "N" with the the number of neurons and a later # use of "N" in an expression (which refers to the internal variable # storing the number of neurons in the group) if isinstance(internal_variable, Constant): filtered_matches = [] for match in matches: if not _same_value(match[1], internal_variable): filtered_matches.append(match) else: filtered_matches = matches if len(filtered_matches) == 0: pass # Nothing to warn about else: warning_message = ('"{name}" is an internal variable of group ' '"{group}", but also exists in the ') if len(matches) == 1: warning_message += ('{namespace} namespace with the value ' '{value}. ').format( namespace=filtered_matches[0][0], value=_display_value( filtered_matches[0][1])) else: warning_message += ('following namespaces: ' '{namespaces}. ').format( namespaces=' ,'.join( match[0] for match in filtered_matches)) warning_message += 'The internal variable will be used.' logger.warn(warning_message.format(name=identifier, group=self.name), 'Group.resolve.resolution_conflict', once=True) if internal_variable is not None: return None # We were only interested in the warnings above # use the first match (according to resolution order) resolved = matches[0][1] # Replace pure Python functions by a Functions object if callable(resolved) and not isinstance(resolved, Function): resolved = Function(resolved, stateless=False) if not isinstance(resolved, (Function, Variable)): # Wrap the value in a Constant object unit = get_unit(resolved) value = np.asarray(resolved) if value.shape != (): raise KeyError('Variable %s was found in the namespace, but is' ' not a scalar value' % identifier) resolved = Constant(identifier, unit=unit, value=value) return resolved
def _init_2d(self): unit = self.unit values = self.values dt = self.dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(i=1, t=second, result=unit) def timed_array_func(t, i): # We round according to the current defaultclock.dt K = _find_K(float(defaultclock.dt), dt) epsilon = dt / K time_step = np.clip(np.int_(np.round(np.asarray(t/epsilon)) / K), 0, len(values)-1) return values[time_step, i] * unit Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) n_values = len(values) epsilon = dt / K def unitless_timed_array_func(t, i): timestep = np.clip(np.int_(np.round(t/epsilon) / K), 0, n_values-1) return values[timestep, i] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation('numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) support_code = ''' inline double %NAME%(const double t, const int i) { const double epsilon = %DT% / %K%; if (i < 0 || i >= %COLS%) return NAN; int timestep = (int)((t/epsilon + 0.5)/%K%); if(timestep < 0) timestep = 0; else if(timestep >= %ROWS%) timestep = %ROWS%-1; return _namespace%NAME%_values[timestep*%COLS% + i]; } ''' support_code = replace(support_code, {'%NAME%': self.name, '%DT%': '%.18f' % dt, '%K%': str(K), '%COLS%': str(self.values.shape[1]), '%ROWS%': str(self.values.shape[0])}) cpp_code = {'support_code': support_code} return cpp_code def create_cpp_namespace(owner): return {'%s_values' % self.name: self.values.astype(np.double, order='C', copy=False).ravel()} self.implementations.add_dynamic_implementation('cpp', code=create_cpp_implementation, namespace=create_cpp_namespace, name=self.name) def create_cython_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) code = ''' cdef double %NAME%(const double t, const int i): global _namespace%NAME%_values cdef double epsilon = %DT% / %K%; if i < 0 or i >= %COLS%: return _numpy.nan cdef int timestep = (int)((t/epsilon + 0.5)/%K%) if timestep < 0: timestep = 0 elif timestep >= %ROWS%: timestep = %ROWS%-1 return _namespace%NAME%_values[timestep*%COLS% + i] ''' code = replace(code, {'%NAME%': self.name, '%DT%': '%.18f' % dt, '%K%': str(K), '%COLS%': str(self.values.shape[1]), '%ROWS%': str(self.values.shape[0])}) return code def create_cython_namespace(owner): return {'%s_values' % self.name: self.values.astype(np.double, order='C', copy=False).ravel()} self.implementations.add_dynamic_implementation('cython', code=create_cython_implementation, namespace=create_cython_namespace, name=self.name)
def _init_1d(self): unit = self.unit values = self.values dt = self.dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(t=second, result=unit) def timed_array_func(t): # We round according to the current defaultclock.dt K = _find_K(float(defaultclock.dt), dt) epsilon = dt / K i = np.clip(np.int_(np.round(np.asarray(t/epsilon)) / K), 0, len(values)-1) return values[i] * unit Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) n_values = len(values) epsilon = dt / K def unitless_timed_array_func(t): timestep = np.clip(np.int_(np.round(t/epsilon) / K), 0, n_values-1) return values[timestep] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation('numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) support_code = ''' inline double %NAME%(const double t) { const double epsilon = %DT% / %K%; int i = (int)((t/epsilon + 0.5)/%K%); if(i < 0) i = 0; if(i >= %NUM_VALUES%) i = %NUM_VALUES%-1; return _namespace%NAME%_values[i]; } '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace('%K%', str(K)).replace('%NUM_VALUES%', str(len(self.values))) cpp_code = {'support_code': support_code} return cpp_code def create_cpp_namespace(owner): return {'%s_values' % self.name: self.values} self.implementations.add_dynamic_implementation('cpp', code=create_cpp_implementation, namespace=create_cpp_namespace, name=self.name) def create_cython_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) code = ''' cdef double %NAME%(const double t): global _namespace%NAME%_values cdef double epsilon = %DT% / %K% cdef int i = (int)((t/epsilon + 0.5)/%K%) if i < 0: i = 0 if i >= %NUM_VALUES%: i = %NUM_VALUES% - 1 return _namespace%NAME%_values[i] '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace('%K%', str(K)).replace('%NUM_VALUES%', str(len(self.values))) return code def create_cython_namespace(owner): return {'%s_values' % self.name: self.values} self.implementations.add_dynamic_implementation('cython', code=create_cython_implementation, namespace=create_cython_namespace, name=self.name)
def __init__(self, n, p, approximate=True, name='_binomial*'): Nameable.__init__(self, name) #Python implementation use_normal = approximate and (n*p > 5) and n*(1-p) > 5 if use_normal: loc = n*p scale = np.sqrt(n*p*(1-p)) def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.normal(loc, scale, size=N) else: def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.binomial(n, p, size=N) Function.__init__(self, pyfunc=lambda: sample_function(1), arg_units=[], return_unit=1, stateless=False) self.implementations.add_implementation('numpy', sample_function) # Common pre-calculations for C++ and Cython if use_normal: loc = n*p scale = np.sqrt(n*p*(1-p)) else: reverse = p > 0.5 if reverse: P = 1.0 - p else: P = p q = 1.0 - P qn = np.exp(n * np.log(q)) bound = min(n, n*P + 10.0*np.sqrt(n*P*q + 1)) # C++ implementation # Inversion transform sampling if use_normal: loc = n*p scale = np.sqrt(n*p*(1-p)) cpp_code = ''' float %NAME%(const int vectorisation_idx) { return _randn(vectorisation_idx) * %SCALE% + %LOC%; } ''' cpp_code = replace(cpp_code, {'%SCALE%': '%.15f' % scale, '%LOC%': '%.15f' % loc, '%NAME%': self.name}) dependencies = {'_randn': DEFAULT_FUNCTIONS['randn']} else: # The following code is an almost exact copy of numpy's # rk_binomial_inversion function # (numpy/random/mtrand/distributions.c) cpp_code = ''' long %NAME%(const int vectorisation_idx) { long X = 0; double px = %QN%; double U = _rand(vectorisation_idx); while (U > px) { X++; if (X > %BOUND%) { X = 0; px = %QN%; U = _rand(vectorisation_idx); } else { U -= px; px = ((%N%-X+1) * %P% * px)/(X*%Q%); } } return %RETURN_VALUE%; } ''' cpp_code = replace(cpp_code, {'%N%': '%d' % n, '%P%': '%.15f' % P, '%Q%': '%.15f' % q, '%QN%': '%.15f' % qn, '%BOUND%': '%.15f' % bound, '%RETURN_VALUE%': '%d-X' % n if reverse else 'X', '%NAME%': self.name}) dependencies = {'_rand': DEFAULT_FUNCTIONS['rand']} self.implementations.add_implementation('cpp', {'support_code': cpp_code}, dependencies=dependencies, name=self.name) # Cython implementation # Inversion transform sampling if use_normal: cython_code = ''' cdef float %NAME%(const int vectorisation_idx): return _randn(vectorisation_idx) * %SCALE% + %LOC% ''' cython_code = replace(cython_code, {'%SCALE%': '%.15f' % scale, '%LOC%': '%.15f' % loc, '%NAME%': self.name}) dependencies = {'_randn': DEFAULT_FUNCTIONS['randn']} else: # The following code is an almost exact copy of numpy's # rk_binomial_inversion function # (numpy/random/mtrand/distributions.c) cython_code = ''' cdef long %NAME%(const int vectorisation_idx): cdef long X = 0 cdef double px = %QN% cdef double U = _rand(vectorisation_idx) while U > px: X += 1 if X > %BOUND%: X = 0 px = %QN% U = _rand(vectorisation_idx) else: U -= px px = ((%N%-X+1) * %P% * px)/(X*%Q%) return %RETURN_VALUE% ''' cython_code = replace(cython_code, {'%N%': '%d' % n, '%P%': '%.15f' % p, '%Q%': '%.15f' % q, '%QN%': '%.15f' % qn, '%BOUND%': '%.15f' % bound, '%RETURN_VALUE%': '%d-X' % n if reverse else 'X', '%NAME%': self.name}) dependencies = {'_rand': DEFAULT_FUNCTIONS['rand']} self.implementations.add_implementation('cython', cython_code, dependencies=dependencies, name=self.name)
def _init_1d(self): dimensions = self.dim unit = get_unit(dimensions) values = self.values dt = self.dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(t=second, result=unit) def timed_array_func(t): # We round according to the current defaultclock.dt K = _find_K(float(defaultclock.dt), dt) epsilon = dt / K i = np.clip(np.int_(np.round(np.asarray(t / epsilon)) / K), 0, len(values) - 1) return Quantity(values[i], dim=dimensions) Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) n_values = len(values) epsilon = dt / K def unitless_timed_array_func(t): timestep = np.clip(np.int_(np.round(t / epsilon) / K), 0, n_values - 1) return values[timestep] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation( 'numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) support_code = ''' static inline double %NAME%(const double t) { const double epsilon = %DT% / %K%; int i = (int)((t/epsilon + 0.5)/%K%); if(i < 0) i = 0; if(i >= %NUM_VALUES%) i = %NUM_VALUES%-1; return _namespace%NAME%_values[i]; } '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace( '%K%', str(K)).replace('%NUM_VALUES%', str(len(self.values))) cpp_code = {'support_code': support_code} return cpp_code def create_cpp_namespace(owner): return {'%s_values' % self.name: self.values} self.implementations.add_dynamic_implementation( 'cpp', code=create_cpp_implementation, namespace=create_cpp_namespace, name=self.name) def create_cython_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) code = ''' cdef double %NAME%(const double t): global _namespace%NAME%_values cdef double epsilon = %DT% / %K% cdef int i = (int)((t/epsilon + 0.5)/%K%) if i < 0: i = 0 if i >= %NUM_VALUES%: i = %NUM_VALUES% - 1 return _namespace%NAME%_values[i] '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace( '%K%', str(K)).replace('%NUM_VALUES%', str(len(self.values))) return code def create_cython_namespace(owner): return {'%s_values' % self.name: self.values} self.implementations.add_dynamic_implementation( 'cython', code=create_cython_implementation, namespace=create_cython_namespace, name=self.name)
def __init__(self, values, dt, name=None): if name is None: name = '_timedarray*' Nameable.__init__(self, name) unit = get_unit(values) values = np.asarray(values) self.values = values dt = float(dt) self.dt = dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(t=second, result=unit) def timed_array_func(t): i = np.clip(np.int_(np.float_(t) / dt + 0.5), 0, len(values) - 1) return values[i] * unit Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) epsilon = dt / K n_values = len(values) def unitless_timed_array_func(t): timestep = np.clip( np.int_(np.round(t / epsilon)) / K, 0, n_values - 1) return values[timestep] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation( 'numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) cpp_code = { 'support_code': ''' inline double _timedarray_%NAME%(const double t, const int _num_values, const double* _values) { const double epsilon = %DT% / %K%; int i = (int)((t/epsilon + 0.5)/%K%); // rounds to nearest int for positive values if(i<0) i = 0; if(i>=_num_values) i = _num_values-1; return _values[i]; } '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace( '%K%', str(K)), 'hashdefine_code': ''' #define %NAME%(t) _timedarray_%NAME%(t, _%NAME%_num_values, _%NAME%_values) '''.replace('%NAME%', self.name) } return cpp_code def create_cpp_namespace(owner): return { '_%s_num_values' % self.name: len(self.values), '_%s_values' % self.name: self.values } self.implementations.add_dynamic_implementation( 'cpp', create_cpp_implementation, create_cpp_namespace, name=self.name)
import numpy as np import sympy from brian2.core.functions import DEFAULT_FUNCTIONS, Function if 'sign' not in DEFAULT_FUNCTIONS: # Only a temporary workaround until this code is merged into the main # Brian repository DEFAULT_FUNCTIONS['sign'] = Function(pyfunc=np.sign, sympy_func=sympy.sign, arg_units=[None], return_unit=1) # Implementation for C++ sign_code = ''' template <typename T> int sign_(T val) { return (T(0) < val) - (val < T(0)); } ''' DEFAULT_FUNCTIONS['sign'].implementations.add_implementation( 'cpp', code=sign_code, name='sign_') # Implementation for Cython sign_code = ''' ctypedef fused _to_sign: char short int float double cdef int _int(_to_sign x):
from brian2.devices.device import reinit_and_delete, set_device def pytest_ignore_collect(path, config): if config.option.doctestmodules: if 'tests' in str(path): return True # Ignore tests package for doctests # Do not test brian2.hears bridge (needs Brian1) if str(path).endswith('hears.py'): return True # The "random" values are always 0.5 def fake_randn(vectorisation_idx): return 0.5*np.ones_like(vectorisation_idx) fake_randn = Function(fake_randn, arg_units=[], return_unit=1, auto_vectorise=True, stateless=False) fake_randn.implementations.add_implementation('cpp', ''' double randn(int vectorisation_idx) { return 0.5; } ''') fake_randn.implementations.add_implementation('cython',''' cdef double randn(int vectorisation_idx): return 0.5 ''') @pytest.fixture def fake_randn_randn_fixture(): orig_randn = DEFAULT_FUNCTIONS['randn'] DEFAULT_FUNCTIONS['randn'] = fake_randn
def _resolve_external(self, identifier, run_namespace=None, level=0, do_warn=True): ''' Resolve an external identifier in the context of a `Group`. If the `Group` declares an explicit namespace, this namespace is used in addition to the standard namespace for units and functions. Additionally, the namespace in the `run_namespace` argument (i.e. the namespace provided to `Network.run`) or, if this argument is unspecified, the implicit namespace of surrounding variables in the stack frame where the original call was made is used (to determine this stack frame, the `level` argument has to be set correctly). Parameters ---------- identifier : str The name to resolve. group : `Group` The group that potentially defines an explicit namespace for looking up external names. run_namespace : dict, optional A namespace (mapping from strings to objects), as provided as an argument to the `Network.run` function. level : int, optional How far to go up in the stack to find the calling frame. do_warn : int, optional Whether to display a warning if an identifier resolves to different objects in different namespaces. Defaults to ``True``. ''' # We save tuples of (namespace description, referred object) to # give meaningful warnings in case of duplicate definitions matches = [] namespaces = OrderedDict() # Default namespaces (units and functions) namespaces['constants'] = DEFAULT_CONSTANTS namespaces['units'] = DEFAULT_UNITS namespaces['functions'] = DEFAULT_FUNCTIONS if getattr(self, 'namespace', None) is not None: namespaces['group-specific'] = self.namespace # explicit or implicit run namespace if run_namespace is not None: namespaces['run'] = run_namespace else: namespaces['implicit'] = get_local_namespace(level + 1) for description, namespace in namespaces.iteritems(): if identifier in namespace: matches.append((description, namespace[identifier])) if len(matches) == 0: # No match at all raise KeyError( ('The identifier "%s" could not be resolved.') % (identifier)) elif len(matches) > 1: # Possibly, all matches refer to the same object first_obj = matches[0][1] found_mismatch = False for m in matches: if _same_value(m[1], first_obj): continue if _same_function(m[1], first_obj): continue try: proxy = weakref.proxy(first_obj) if m[1] is proxy: continue except TypeError: pass # Found a mismatch found_mismatch = True break if found_mismatch and do_warn: _conflict_warning( ('The name "%s" refers to different objects ' 'in different namespaces used for resolving ' 'names in the context of group "%s". ' 'Will use the object from the %s namespace ' 'with the value %r') % (identifier, getattr( self, 'name', '<unknown>'), matches[0][0], first_obj), matches[1:]) # use the first match (according to resolution order) resolved = matches[0][1] # Replace pure Python functions by a Functions object if callable(resolved) and not isinstance(resolved, Function): resolved = Function(resolved) if not isinstance(resolved, (Function, Variable)): # Wrap the value in a Constant object unit = get_unit(resolved) value = np.asarray(resolved) if value.shape != (): raise KeyError('Variable %s was found in the namespace, but is' ' not a scalar value' % identifier) resolved = Constant(identifier, unit=unit, value=value) return resolved
def __init__(self, values, dt, name=None): if name is None: name = '_timedarray*' Nameable.__init__(self, name) unit = get_unit(values) values = np.asarray(values) self.values = values dt = float(dt) self.dt = dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(t=second, result=unit) def timed_array_func(t): i = np.clip(np.int_(np.float_(t) / dt + 0.5), 0, len(values)-1) return values[i] * unit Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) epsilon = dt / K n_values = len(values) def unitless_timed_array_func(t): timestep = np.clip(np.int_(np.round(t/epsilon)) / K, 0, n_values-1) return values[timestep] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation('numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) cpp_code = {'support_code': ''' inline double _timedarray_%NAME%(const double t, const int _num_values, const double* _values) { const double epsilon = %DT% / %K%; int i = (int)((t/epsilon + 0.5)/%K%); // rounds to nearest int for positive values if(i<0) i = 0; if(i>=_num_values) i = _num_values-1; return _values[i]; } '''.replace('%NAME%', self.name).replace('%DT%', '%.18f' % dt).replace('%K%', str(K)), 'hashdefine_code': ''' #define %NAME%(t) _timedarray_%NAME%(t, _%NAME%_num_values, _%NAME%_values) '''.replace('%NAME%', self.name)} return cpp_code def create_cpp_namespace(owner): return {'_%s_num_values' % self.name: len(self.values), '_%s_values' % self.name: self.values} self.implementations.add_dynamic_implementation('cpp', create_cpp_implementation, create_cpp_namespace, name=self.name)
def _init_2d(self): dimensions = self.dim unit = get_unit(dimensions) values = self.values dt = self.dt # Python implementation (with units), used when calling the TimedArray # directly, outside of a simulation @check_units(i=1, t=second, result=unit) def timed_array_func(t, i): # We round according to the current defaultclock.dt K = _find_K(float(defaultclock.dt), dt) epsilon = dt / K time_step = np.clip(np.int_(np.round(np.asarray(t / epsilon)) / K), 0, len(values) - 1) return Quantity(values[time_step, i], dim=dimensions) Function.__init__(self, pyfunc=timed_array_func) # we use dynamic implementations because we want to do upsampling # in a way that avoids rounding problems with the group's dt def create_numpy_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) n_values = len(values) epsilon = dt / K def unitless_timed_array_func(t, i): timestep = np.clip(np.int_(np.round(t / epsilon) / K), 0, n_values - 1) return values[timestep, i] unitless_timed_array_func._arg_units = [second] unitless_timed_array_func._return_unit = unit return unitless_timed_array_func self.implementations.add_dynamic_implementation( 'numpy', create_numpy_implementation) def create_cpp_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) support_code = ''' static inline double %NAME%(const double t, const int i) { const double epsilon = %DT% / %K%; if (i < 0 || i >= %COLS%) return NAN; int timestep = (int)((t/epsilon + 0.5)/%K%); if(timestep < 0) timestep = 0; else if(timestep >= %ROWS%) timestep = %ROWS%-1; return _namespace%NAME%_values[timestep*%COLS% + i]; } ''' support_code = replace( support_code, { '%NAME%': self.name, '%DT%': '%.18f' % dt, '%K%': str(K), '%COLS%': str(self.values.shape[1]), '%ROWS%': str(self.values.shape[0]) }) cpp_code = {'support_code': support_code} return cpp_code def create_cpp_namespace(owner): return { '%s_values' % self.name: self.values.astype(np.double, order='C', copy=False).ravel() } self.implementations.add_dynamic_implementation( 'cpp', code=create_cpp_implementation, namespace=create_cpp_namespace, name=self.name) def create_cython_implementation(owner): group_dt = owner.clock.dt_ K = _find_K(group_dt, dt) code = ''' cdef double %NAME%(const double t, const int i): global _namespace%NAME%_values cdef double epsilon = %DT% / %K% if i < 0 or i >= %COLS%: return _numpy.nan cdef int timestep = (int)((t/epsilon + 0.5)/%K%) if timestep < 0: timestep = 0 elif timestep >= %ROWS%: timestep = %ROWS%-1 return _namespace%NAME%_values[timestep*%COLS% + i] ''' code = replace( code, { '%NAME%': self.name, '%DT%': '%.18f' % dt, '%K%': str(K), '%COLS%': str(self.values.shape[1]), '%ROWS%': str(self.values.shape[0]) }) return code def create_cython_namespace(owner): return { '%s_values' % self.name: self.values.astype(np.double, order='C', copy=False).ravel() } self.implementations.add_dynamic_implementation( 'cython', code=create_cython_implementation, namespace=create_cython_namespace, name=self.name)
def __init__(self, n, p, approximate=True, name='_binomial*'): Nameable.__init__(self, name) #Python implementation use_normal = approximate and (n * p > 5) and n * (1 - p) > 5 if use_normal: loc = n * p scale = np.sqrt(n * p * (1 - p)) def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.normal(loc, scale, size=N) else: def sample_function(vectorisation_idx): try: N = len(vectorisation_idx) except TypeError: N = int(vectorisation_idx) return np.random.binomial(n, p, size=N) Function.__init__(self, pyfunc=lambda: sample_function(1), arg_units=[], return_unit=1, stateless=False) self.implementations.add_implementation('numpy', sample_function) # Common pre-calculations for C++ and Cython if use_normal: loc = n * p scale = np.sqrt(n * p * (1 - p)) else: reverse = p > 0.5 if reverse: P = 1.0 - p else: P = p q = 1.0 - P qn = np.exp(n * np.log(q)) bound = min(n, n * P + 10.0 * np.sqrt(n * P * q + 1)) # C++ implementation # Inversion transform sampling if use_normal: loc = n * p scale = np.sqrt(n * p * (1 - p)) cpp_code = ''' float %NAME%(const int vectorisation_idx) { return _randn(vectorisation_idx) * %SCALE% + %LOC%; } ''' cpp_code = replace( cpp_code, { '%SCALE%': '%.15f' % scale, '%LOC%': '%.15f' % loc, '%NAME%': self.name }) dependencies = {'_randn': DEFAULT_FUNCTIONS['randn']} else: # The following code is an almost exact copy of numpy's # rk_binomial_inversion function # (numpy/random/mtrand/distributions.c) cpp_code = ''' long %NAME%(const int vectorisation_idx) { long X = 0; double px = %QN%; double U = _rand(vectorisation_idx); while (U > px) { X++; if (X > %BOUND%) { X = 0; px = %QN%; U = _rand(vectorisation_idx); } else { U -= px; px = ((%N%-X+1) * %P% * px)/(X*%Q%); } } return %RETURN_VALUE%; } ''' cpp_code = replace( cpp_code, { '%N%': '%d' % n, '%P%': '%.15f' % P, '%Q%': '%.15f' % q, '%QN%': '%.15f' % qn, '%BOUND%': '%.15f' % bound, '%RETURN_VALUE%': '%d-X' % n if reverse else 'X', '%NAME%': self.name }) dependencies = {'_rand': DEFAULT_FUNCTIONS['rand']} self.implementations.add_implementation('cpp', {'support_code': cpp_code}, dependencies=dependencies, name=self.name) # Cython implementation # Inversion transform sampling if use_normal: cython_code = ''' cdef float %NAME%(const int vectorisation_idx): return _randn(vectorisation_idx) * %SCALE% + %LOC% ''' cython_code = replace( cython_code, { '%SCALE%': '%.15f' % scale, '%LOC%': '%.15f' % loc, '%NAME%': self.name }) dependencies = {'_randn': DEFAULT_FUNCTIONS['randn']} else: # The following code is an almost exact copy of numpy's # rk_binomial_inversion function # (numpy/random/mtrand/distributions.c) cython_code = ''' cdef long %NAME%(const int vectorisation_idx): cdef long X = 0 cdef double px = %QN% cdef double U = _rand(vectorisation_idx) while U > px: X += 1 if X > %BOUND%: X = 0 px = %QN% U = _rand(vectorisation_idx) else: U -= px px = ((%N%-X+1) * %P% * px)/(X*%Q%) return %RETURN_VALUE% ''' cython_code = replace( cython_code, { '%N%': '%d' % n, '%P%': '%.15f' % p, '%Q%': '%.15f' % q, '%QN%': '%.15f' % qn, '%BOUND%': '%.15f' % bound, '%RETURN_VALUE%': '%d-X' % n if reverse else 'X', '%NAME%': self.name }) dependencies = {'_rand': DEFAULT_FUNCTIONS['rand']} self.implementations.add_implementation('cython', cython_code, dependencies=dependencies, name=self.name)
bit_and_cpp = { 'support_code': ''' uint32_t _bit_and(uint32_t a, uint32_t b) { return a & b; } ''' } bit_and_cython = ''' cdef int _bit_and(int a, int b): return a & b ''' bit_and_obj = Function(bit_and, arg_units=[1, 1], return_unit=1, arg_types=['integer', 'integer'], return_type='integer') bit_and_obj.implementations.add_implementation(NumpyCodeGenerator, code=bit_and) bit_and_obj.implementations.add_implementation(CPPCodeGenerator, name='_bit_and', code=bit_and_cpp) bit_and_obj.implementations.add_implementation(CythonCodeGenerator, name='_bit_and', code=bit_and_cython) DEFAULT_FUNCTIONS['bit_and'] = bit_and_obj def lfsr_rand(lfsr_reg): # XNOR taps from 32, 22, 2, 1