def test_set_reset_device_implicit(): test_device1 = ATestDevice() all_devices['test1'] = test_device1 test_device2 = ATestDevice() all_devices['test2'] = test_device2 set_device('test1', build_on_run=False, my_opt=1) set_device('test2', build_on_run=True, my_opt=2) assert get_device() is test_device2 assert get_device()._options['my_opt'] == 2 assert get_device().build_on_run reset_device() assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run reset_device() assert get_device() is runtime_device reset_device() # If there is no previous device, will reset to runtime device assert get_device() is runtime_device del all_devices['test1'] del all_devices['test2']
def test_set_reset_device_implicit(): import brian2.devices.device as device_module old_prev_devices = list(device_module.previous_devices) device_module.previous_devices = [] test_device1 = ATestDevice() all_devices['test1'] = test_device1 test_device2 = ATestDevice() all_devices['test2'] = test_device2 set_device('test1', build_on_run=False, my_opt=1) set_device('test2', build_on_run=True, my_opt=2) assert get_device() is test_device2 assert get_device()._options['my_opt'] == 2 assert get_device().build_on_run reset_device() assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run reset_device() assert get_device() is runtime_device reset_device() # If there is no previous device, will reset to runtime device assert get_device() is runtime_device del all_devices['test1'] del all_devices['test2'] device_module.previous_devices = old_prev_devices
def __init__(self, variables, variable_indices, owner, iterate_all, codeobj_class, name, template_name, override_conditional_write=None, allows_scalar_write=False): # We have to do the import here to avoid circular import dependencies. from brian2.devices.device import get_device self.device = get_device() self.variables = variables self.variable_indices = variable_indices self.func_name_replacements = {} for varname, var in variables.iteritems(): if isinstance(var, Function): if codeobj_class in var.implementations: impl_name = var.implementations[codeobj_class].name if impl_name is not None: self.func_name_replacements[varname] = impl_name self.iterate_all = iterate_all self.codeobj_class = codeobj_class self.owner = owner if override_conditional_write is None: self.override_conditional_write = set() else: self.override_conditional_write = set(override_conditional_write) self.allows_scalar_write = allows_scalar_write self.name = name self.template_name = template_name
def device_override_decorated_function(*args, **kwds): from brian2.devices.device import get_device curdev = get_device() if hasattr(curdev, name): return getattr(curdev, name)(*args, **kwds) else: return func(*args, **kwds)
def __init__(self, dt=None, clock=None, when='start', order=0, name='brianobject*'): if dt is not None and clock is not None: raise ValueError('Can only specify either a dt or a clock, not both.') if not isinstance(when, basestring): from brian2.core.clocks import Clock # Give some helpful error messages for users coming from the alpha # version if isinstance(when, Clock): raise TypeError(("Do not use the 'when' argument for " "specifying a clock, either provide a " "timestep for the 'dt' argument or a Clock " "object for 'clock'.")) if isinstance(when, tuple): raise TypeError("Use the separate keyword arguments, 'dt' (or " "'clock'), 'when', and 'order' instead of " "providing a tuple for 'when'. Only use the " "'when' argument for the scheduling slot.") # General error raise TypeError("The 'when' argument has to be a string " "specifying the scheduling slot (e.g. 'start').") Nameable.__init__(self, name) #: The clock used for simulating this object self._clock = clock if clock is None: from brian2.core.clocks import Clock, defaultclock if dt is not None: self._clock = Clock(dt=dt, name=self.name+'_clock*') else: self._clock = defaultclock if getattr(self._clock, '_is_proxy', False): from brian2.devices.device import get_device self._clock = get_device().defaultclock #: Used to remember the `Network` in which this object has been included #: before, to raise an error if it is included in a new `Network` self._network = None #: The ID string determining when the object should be updated in `Network.run`. self.when = when #: The order in which objects with the same clock and ``when`` should be updated self.order = order self._dependencies = set() self._contained_objects = [] self._code_objects = [] self._active = True #: The scope key is used to determine which objects are collected by magic self._scope_key = self._scope_current_key logger.debug("Created BrianObject with name {self.name}, " "clock={self._clock}, " "when={self.when}, order={self.order}".format(self=self))
def determine_keywords(self): # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation pointers = [] # It is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself handled_pointers = set() template_kwds = {} # Again, do the import here to avoid a circular dependency. from brian2.devices.device import get_device device = get_device() for varname, var in self.variables.iteritems(): if isinstance(var, ArrayVariable): # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'dimensions', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently line = '{0}* {1} {2} = {3};'.format(self.c_data_type(var.dtype), self.restrict, pointer_name, array_name) pointers.append(line) handled_pointers.add(pointer_name) # set up the functions user_functions = [] support_code = [] hash_defines = [] for varname, variable in self.variables.items(): if isinstance(variable, Function): hd, ps, sc, uf = self._add_user_function(varname, variable) user_functions.extend(uf) support_code.extend(sc) pointers.extend(ps) hash_defines.extend(hd) # delete the user-defined functions from the namespace and add the # function namespaces (if any) for funcname, func in user_functions: del self.variables[funcname] func_namespace = func.implementations[self.codeobj_class].get_namespace(self.owner) if func_namespace is not None: self.variables.update(func_namespace) support_code.append(self.universal_support_code) keywords = {'pointers_lines': stripped_deindented_lines('\n'.join(pointers)), 'support_code_lines': stripped_deindented_lines('\n'.join(support_code)), 'hashdefine_lines': stripped_deindented_lines('\n'.join(hash_defines)), 'denormals_code_lines': stripped_deindented_lines('\n'.join(self.denormals_to_zero_code())), } keywords.update(template_kwds) return keywords
def get_array_name(var, access_data=True): # We have to do the import here to avoid circular import dependencies. from brian2.devices.device import get_device device = get_device() if access_data: return '_ptr' + device.get_array_name(var) else: return device.get_array_name(var, access_data=False)
def __init__(self, owner, code, variables, name='numpy_code_object*'): from brian2.devices.device import get_device self.device = get_device() self.namespace = {'_owner': owner, # TODO: This should maybe go somewhere else 'logical_not': np.logical_not} CodeObject.__init__(self, owner, code, variables, name=name) self.variables_to_namespace()
def initialise_queue(self): if self.queue is None: self.queue = get_device().spike_queue(self.source.start, self.source.stop) # Update the dt (might have changed between runs) self.dt = self.synapses.clock.dt_ self.queue.prepare(self._delays.get_value(), self.dt, self.synapse_sources.get_value())
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self._done_first_run = False self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, variable_indices, template_name, template_source, name=name) self.compiler, self.extra_compile_args = get_compiler_and_args() self.define_macros = list(prefs['codegen.cpp.define_macros']) if self.compiler == 'msvc': self.define_macros.extend([ ('INFINITY', '(std::numeric_limits<double>::infinity())'), ('NAN', '(std::numeric_limits<double>::quiet_NaN())'), ('M_PI', '3.14159265358979323846') ]) self.extra_link_args = list(prefs['codegen.cpp.extra_link_args']) self.include_dirs = list(prefs['codegen.cpp.include_dirs']) self.include_dirs += [os.path.join(sys.prefix, 'include')] # TODO: We should probably have a special folder just for header # files that are shared between different codegen targets import brian2.synapses as synapses synapses_dir = os.path.dirname(synapses.__file__) self.include_dirs.append(synapses_dir) self.library_dirs = list(prefs['codegen.cpp.library_dirs']) self.library_dirs += [os.path.join(sys.prefix, 'lib')] update_for_cross_compilation(self.library_dirs, self.extra_compile_args, self.extra_link_args, logger=logger) self.runtime_library_dirs = list(prefs['codegen.cpp.runtime_library_dirs']) self.libraries = list(prefs['codegen.cpp.libraries']) self.headers = ['<algorithm>', '<limits>', '"stdint_compat.h"'] + prefs['codegen.cpp.headers'] self.annotated_code = self.code.main+''' /* The following code is just compiler options for the call to weave.inline. By including them here, we force a recompile if the compiler options change, which is a good thing (e.g. switching -ffast-math on and off). support_code: {self.code.support_code} compiler: {self.compiler} define_macros: {self.define_macros} extra_compile_args: {self.extra_compile_args} extra_link_args: {self.extra_link_args} include_dirs: {self.include_dirs} library_dirs: {self.library_dirs} runtime_library_dirs: {self.runtime_library_dirs} libraries: {self.libraries} */ '''.format(self=self) self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def test_schedule_warning(): previous_device = get_device() from uuid import uuid4 # TestDevice1 supports arbitrary schedules, TestDevice2 does not class TestDevice1(Device): # These functions are needed during the setup of the defaultclock def get_value(self, var): return np.array([0.0001]) def add_array(self, var): pass def init_with_zeros(self, var, dtype): pass def fill_with_array(self, var, arr): pass class TestDevice2(TestDevice1): def __init__(self): super(TestDevice2, self).__init__() self.network_schedule = ['start', 'groups', 'synapses', 'thresholds', 'resets', 'end'] # Unique names are important for getting the warnings again for multiple # runs of the test suite name1 = 'testdevice_' + str(uuid4()) name2 = 'testdevice_' + str(uuid4()) all_devices[name1] = TestDevice1() all_devices[name2] = TestDevice2() set_device(name1) assert schedule_propagation_offset() == 0*ms net = Network() assert schedule_propagation_offset(net) == 0*ms # Any schedule should work net.schedule = list(reversed(net.schedule)) with catch_logs() as l: net.run(0*ms) assert len(l) == 0, 'did not expect a warning' assert schedule_propagation_offset(net) == defaultclock.dt set_device(name2) assert schedule_propagation_offset() == defaultclock.dt # Using the correct schedule should work net.schedule = ['start', 'groups', 'synapses', 'thresholds', 'resets', 'end'] with catch_logs() as l: net.run(0*ms) assert len(l) == 0, 'did not expect a warning' assert schedule_propagation_offset(net) == defaultclock.dt # Using another (e.g. the default) schedule should raise a warning net.schedule = None with catch_logs() as l: net.run(0*ms) assert len(l) == 1 and l[0][1].endswith('schedule_conflict') reset_device(previous_device)
def __init__(self, owner, code, variables, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, name=name) self.compiler = brian_prefs['codegen.runtime.weave.compiler'] self.extra_compile_args = brian_prefs['codegen.runtime.weave.extra_compile_args'] self.include_dirs = brian_prefs['codegen.runtime.weave.include_dirs'] self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='numba_code_object*'): from brian2.devices.device import get_device self.device = get_device() self.namespace = {'_owner': owner, # TODO: This should maybe go somewhere else 'logical_not': np.logical_not, 'log10':math.log10} CodeObject.__init__(self, owner, code, variables, variable_indices, template_name, template_source, name=name) self.variables_to_namespace()
def constant_or_scalar(varname, variable): ''' Convenience function to generate code to access the value of a variable. Will return ``'varname'`` if the ``variable`` is a constant, and ``array_name[0]`` if it is a scalar array. ''' from brian2.devices.device import get_device # avoid circular import if variable.array: return '%s[0]' % get_device().get_array_name(variable) else: return '%s' % varname
def test_schedule_warning(): previous_device = get_device() from uuid import uuid4 # TestDevice1 supports arbitrary schedules, TestDevice2 does not class TestDevice1(Device): # These functions are needed during the setup of the defaultclock def add_array(self, var): pass def init_with_zeros(self, var, dtype): pass def fill_with_array(self, var, arr): pass class TestDevice2(TestDevice1): def __init__(self): super(TestDevice2, self).__init__() self.network_schedule = [ 'start', 'groups', 'synapses', 'thresholds', 'resets', 'end' ] # Unique names are important for getting the warnings again for multiple # runs of the test suite name1 = 'testdevice_' + str(uuid4()) name2 = 'testdevice_' + str(uuid4()) all_devices[name1] = TestDevice1() all_devices[name2] = TestDevice2() set_device(name1) net = Network() # Any schedule should work net.schedule = list(reversed(net.schedule)) with catch_logs() as l: net.run(0 * ms) assert len(l) == 0, 'did not expect a warning' set_device(name2) # Using the correct schedule should work net.schedule = [ 'start', 'groups', 'synapses', 'thresholds', 'resets', 'end' ] with catch_logs() as l: net.run(0 * ms) assert len(l) == 0, 'did not expect a warning' # Using another (e.g. the default) schedule should raise a warning net.schedule = None with catch_logs() as l: net.run(0 * ms) assert len(l) == 1 and l[0][1].endswith('schedule_conflict') reset_device(previous_device)
def determine_keywords(self): # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation pointers = [] # It is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself handled_pointers = set() template_kwds = {} # Again, do the import here to avoid a circular dependency. from brian2.devices.device import get_device device = get_device() for varname, var in self.variables.iteritems(): if isinstance(var, ArrayVariable): # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'ndim', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently restrict = self.restrict # turn off restricted pointers for scalars for safety if var.scalar: restrict = ' ' line = '{0}* {1} {2} = {3};'.format(self.c_data_type(var.dtype), restrict, pointer_name, array_name) pointers.append(line) handled_pointers.add(pointer_name) # set up the functions user_functions = [] support_code = [] hash_defines = [] for varname, variable in self.variables.items(): if isinstance(variable, Function): hd, ps, sc, uf = self._add_user_function(varname, variable) user_functions.extend(uf) support_code.extend(sc) pointers.extend(ps) hash_defines.extend(hd) support_code.append(self.universal_support_code) keywords = {'pointers_lines': stripped_deindented_lines('\n'.join(pointers)), 'support_code_lines': stripped_deindented_lines('\n'.join(support_code)), 'hashdefine_lines': stripped_deindented_lines('\n'.join(hash_defines)), 'denormals_code_lines': stripped_deindented_lines('\n'.join(self.denormals_to_zero_code())), } keywords.update(template_kwds) return keywords
def __init__(self, owner, default_index='_idx'): #: A reference to the `Group` owning these variables self.owner = owner # The index that is used for arrays if no index is given explicitly self.default_index = default_index # We do the import here to avoid a circular dependency. from brian2.devices.device import get_device self.device = get_device() self._variables = {} #: A dictionary given the index name for every array name self.indices = collections.defaultdict(lambda: default_index)
def test_set_reset_device_explicit(): original_device = get_device() test_device1 = ATestDevice() all_devices['test1'] = test_device1 test_device2 = ATestDevice() all_devices['test2'] = test_device2 test_device3 = ATestDevice() all_devices['test3'] = test_device3 set_device('test1', build_on_run=False, my_opt=1) set_device('test2', build_on_run=True, my_opt=2) set_device('test3', build_on_run=False, my_opt=3) reset_device('test1') # Directly jump back to the first device assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run del all_devices['test1'] del all_devices['test2'] del all_devices['test3'] reset_device(original_device)
def __init__(self, owner, default_index='_idx'): #: A reference to the `Group` owning these variables self.owner = weakproxy_with_fallback(owner) # The index that is used for arrays if no index is given explicitly self.default_index = default_index # We do the import here to avoid a circular dependency. from brian2.devices.device import get_device self.device = get_device() self._variables = {} #: A dictionary given the index name for every array name self.indices = collections.defaultdict( functools.partial(str, default_index))
def __init__(self, owner, code, variables, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, name=name) self.compiler = brian_prefs['codegen.runtime.weave.compiler'] self.extra_compile_args = brian_prefs[ 'codegen.runtime.weave.extra_compile_args'] self.include_dirs = brian_prefs['codegen.runtime.weave.include_dirs'] self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, compiler_kwds, name='numpy_code_object*'): check_compiler_kwds(compiler_kwds, [], 'numpy') from brian2.devices.device import get_device self.device = get_device() self.namespace = {'_owner': owner, # TODO: This should maybe go somewhere else 'logical_not': np.logical_not} CodeObject.__init__(self, owner, code, variables, variable_indices, template_name, template_source, compiler_kwds=compiler_kwds, name=name) self.variables_to_namespace()
def variableview_get_subexpression_with_index_array( self, variableview, item, level=0, run_namespace=None): if not self.has_been_run: raise NotImplementedError( 'Cannot retrieve the values of state ' 'variables in standalone code before the ' 'simulation has been run.') # Temporarily switch to the runtime device to evaluate the subexpression # (based on the values stored on disk) backup_device = get_device() set_device('runtime') result = VariableView.get_subexpression_with_index_array( variableview, item, level=level + 2, run_namespace=run_namespace) set_device(backup_device) return result
def __init__(self, variables, variable_indices, owner, iterate_all, codeobj_class, override_conditional_write=None, allows_scalar_write=False): # We have to do the import here to avoid circular import dependencies. from brian2.devices.device import get_device self.device = get_device() self.variables = variables self.variable_indices = variable_indices self.iterate_all = iterate_all self.codeobj_class = codeobj_class self.owner = owner if override_conditional_write is None: self.override_conditional_write = set() else: self.override_conditional_write = set(override_conditional_write) self.allows_scalar_write = allows_scalar_write
def variableview_get_subexpression_with_index_array(self, variableview, item, level=0, run_namespace=None): if not self.has_been_run: raise NotImplementedError('Cannot retrieve the values of state ' 'variables in standalone code before the ' 'simulation has been run.') # Temporarily switch to the runtime device to evaluate the subexpression # (based on the values stored on disk) backup_device = get_device() set_device('runtime') result = VariableView.get_subexpression_with_index_array(variableview, item, level=level+2, run_namespace=run_namespace) set_device(backup_device) return result
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self._done_first_run = False self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, variable_indices, template_name, template_source, name=name) self.compiler, self.extra_compile_args = get_compiler_and_args() self.define_macros = list(prefs['codegen.cpp.define_macros']) if self.compiler == 'msvc': self.define_macros.extend([ ('INFINITY', '(std::numeric_limits<double>::infinity())'), ('NAN', '(std::numeric_limits<double>::quiet_NaN())'), ('M_PI', '3.14159265358979323846') ]) self.extra_link_args = list(prefs['codegen.cpp.extra_link_args']) self.include_dirs = list(prefs['codegen.cpp.include_dirs']) self.include_dirs += [os.path.join(sys.prefix, 'include')] self.library_dirs = list(prefs['codegen.cpp.library_dirs']) self.runtime_library_dirs = list(prefs['codegen.cpp.runtime_library_dirs']) self.libraries = list(prefs['codegen.cpp.libraries']) self.headers = ['<algorithm>', '<limits>'] + prefs['codegen.cpp.headers'] self.annotated_code = self.code.main+''' /* The following code is just compiler options for the call to weave.inline. By including them here, we force a recompile if the compiler options change, which is a good thing (e.g. switching -ffast-math on and off). support_code: {self.code.support_code} compiler: {self.compiler} define_macros: {self.define_macros} extra_compile_args: {self.extra_compile_args} extra_link_args: {self.extra_link_args} include_dirs: {self.include_dirs} library_dirs: {self.library_dirs} runtime_library_dirs: {self.runtime_library_dirs} libraries: {self.libraries} */ '''.format(self=self) self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def is_cpp_standalone(self): ''' Check whether we're running with cpp_standalone. Test if `get_device()` is instance `CPPStandaloneDevice`. Returns ------- is_cpp_standalone : bool whether currently using cpp_standalone device See Also -------- is_constant_and_cpp_standalone : uses the returned value ''' # imports here to avoid circular imports from brian2.devices.device import get_device from brian2.devices.cpp_standalone.device import CPPStandaloneDevice device = get_device() return isinstance(device, CPPStandaloneDevice)
def __getitem__(self, item): if isinstance(item, str): variables = Variables(None) variables.add_auxiliary_variable('_indices', dtype=np.int32) variables.add_auxiliary_variable('_cond', dtype=bool) abstract_code = '_cond = ' + item namespace = get_local_namespace(level=1) from brian2.devices.device import get_device device = get_device() codeobj = create_runner_codeobj(self.group, abstract_code, 'group_get_indices', run_namespace=namespace, additional_variables=variables, codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target') ) return codeobj() else: return self.indices(item)
def __getitem__(self, item): if isinstance(item, basestring): variables = Variables(None) variables.add_auxiliary_variable('_indices', dtype=np.int32) variables.add_auxiliary_variable('_cond', dtype=np.bool) abstract_code = '_cond = ' + item namespace = get_local_namespace(level=1) from brian2.devices.device import get_device device = get_device() codeobj = create_runner_codeobj(self.group, abstract_code, 'group_get_indices', run_namespace=namespace, additional_variables=variables, codeobj_class=device.code_object_class(fallback_pref='codegen.string_expression_target') ) return codeobj() else: return self.indices(item)
def __init__(self, variables, variable_indices, owner, iterate_all, codeobj_class, name, template_name, override_conditional_write=None, allows_scalar_write=False): # We have to do the import here to avoid circular import dependencies. from brian2.devices.device import get_device self.device = get_device() self.variables = variables self.variable_indices = variable_indices self.func_name_replacements = {} for varname, var in variables.items(): if isinstance(var, Function): if codeobj_class in var.implementations: impl_name = var.implementations[codeobj_class].name if impl_name is not None: self.func_name_replacements[varname] = impl_name self.iterate_all = iterate_all self.codeobj_class = codeobj_class self.owner = owner if override_conditional_write is None: self.override_conditional_write = set() else: self.override_conditional_write = set(override_conditional_write) self.allows_scalar_write = allows_scalar_write self.name = name self.template_name = template_name # Gather the names of functions that should get an additional # "_vectorisation_idx" argument in the generated code. Take care # of storing their translated name (e.g. "_rand" instead of "rand") # if necessary self.auto_vectorise = { self.func_name_replacements.get(name, name) for name in self.variables if getattr(self.variables[name], 'auto_vectorise', False) }
def get_array_name(var, access_data=True): ''' Get a globally unique name for a `ArrayVariable`. Parameters ---------- var : `ArrayVariable` The variable for which a name should be found. access_data : bool, optional For `DynamicArrayVariable` objects, specifying `True` here means the name for the underlying data is returned. If specifying `False`, the name of object itself is returned (e.g. to allow resizing). Returns ------- name : str A uniqe name for `var`. ''' # We have to do the import here to avoid circular import dependencies. from brian2.devices.device import get_device device = get_device() return device.get_array_name(var, access_data=access_data)
def test_schedule_warning(): previous_device = get_device() from uuid import uuid4 # TestDevice1 supports arbitrary schedules, TestDevice2 does not class TestDevice1(Device): pass class TestDevice2(Device): def __init__(self): super(TestDevice2, self).__init__() self.network_schedule = ['start', 'groups', 'synapses', 'thresholds', 'resets', 'end'] # Unique names are important for getting the warnings again for multiple # runs of the test suite name1 = 'testdevice_' + str(uuid4()) name2 = 'testdevice_' + str(uuid4()) all_devices[name1] = TestDevice1() all_devices[name2] = TestDevice2() set_device(name1) net = Network() # Any schedule should work net.schedule = list(reversed(net.schedule)) with catch_logs() as l: net.run(0*ms) assert len(l) == 0, 'did not expect a warning' set_device(name2) # Using the correct schedule should work net.schedule = ['start', 'groups', 'synapses', 'thresholds', 'resets', 'end'] with catch_logs() as l: net.run(0*ms) assert len(l) == 0, 'did not expect a warning' # Using another (e.g. the default) schedule should raise a warning net.schedule = None with catch_logs() as l: net.run(0*ms) assert len(l) == 1 and l[0][1].endswith('schedule_conflict') set_device(previous_device)
def test_transmission_scalar_delay_different_clocks(): inp = SpikeGeneratorGroup(2, [0, 1], [0, 1]*ms, dt=0.5*ms, # give the group a unique name to always # get a 'fresh' warning name='sg_%d' % uuid.uuid4()) target = NeuronGroup(2, 'v:1', dt=0.1*ms) S = Synapses(inp, target, pre='v+=1', delay=0.5*ms, connect='i==j') mon = StateMonitor(target, 'v', record=True, when='end') if get_device() == all_devices['runtime']: # We should get a warning when using inconsistent dts with catch_logs() as l: run(2*ms) assert len(l) == 1, 'expected a warning, got %d' % len(l) assert l[0][1].endswith('synapses_dt_mismatch') run(0*ms) assert_equal(mon[0].v[mon.t<0.5*ms], 0) assert_equal(mon[0].v[mon.t>=0.5*ms], 1) assert_equal(mon[1].v[mon.t<1.5*ms], 0) assert_equal(mon[1].v[mon.t>=1.5*ms], 1)
def schedule_propagation_offset(net=None): """ Returns the minimal time difference for a post-synaptic effect after a spike. With the default schedule, this time difference is 0, since the ``thresholds`` slot precedes the ``synapses`` slot. For the GeNN device, however, a post-synaptic effect will occur in the following time step, this function therefore returns one ``dt``. Parameters ---------- net : `Network` The network to check (uses the magic network if not specified). Returns ------- offset : `Quantity` The minimum spike propagation delay: ``0*ms`` for the standard schedule but ``dt`` for schedules where ``synapses`` precedes ``thresholds``. Notes ----- This function always returns ``0*ms`` or ``defaultclock.dt`` -- no attempt is made to deal with other clocks. """ from brian2.devices.device import get_device from brian2.core.magic import magic_network device = get_device() if device.network_schedule is not None: schedule = device.network_schedule else: if net is None: net = magic_network schedule = net.schedule if schedule.index("thresholds") < schedule.index("synapses"): return 0 * second else: return defaultclock.dt
def schedule_propagation_offset(net=None): ''' Returns the minimal time difference for a post-synaptic effect after a spike. With the default schedule, this time difference is 0, since the ``thresholds`` slot precedes the ``synapses`` slot. For the GeNN device, however, a post-synaptic effect will occur in the following time step, this function therefore returns one ``dt``. Parameters ---------- net : `Network` The network to check (uses the magic network if not specified). Returns ------- offset : `Quantity` The minimum spike propagation delay: ``0*ms`` for the standard schedule but ``dt`` for schedules where ``synapses`` precedes ``thresholds``. Notes ----- This function always returns ``0*ms`` or ``defaultclock.dt`` -- no attempt is made to deal with other clocks. ''' from brian2.devices.device import get_device from brian2.core.magic import magic_network device = get_device() if device.network_schedule is not None: schedule = device.network_schedule else: if net is None: net = magic_network schedule = net.schedule if schedule.index('thresholds') < schedule.index('synapses'): return 0 * second else: return defaultclock.dt
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self._done_first_run = False self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, variable_indices, template_name, template_source, name=name) self.compiler, self.extra_compile_args = get_compiler_and_args() self.include_dirs = list(prefs['codegen.cpp.include_dirs']) self.include_dirs += [os.path.join(sys.prefix, 'include')] self.code.support_code = compiler_defines(self.compiler)+self.code.support_code self.annotated_code = compiler_defines(self.compiler)+self.code.main+''' /* The following code is just compiler options for the call to weave.inline. By including them here, we force a recompile if the compiler options change, which is a good thing (e.g. switching -ffast-math on and off). support_code: {support_code} compiler: {compiler} extra_compile_args: {extra_compile_args} include_dirs: {include_dirs} */ '''.format(support_code=self.code.support_code, compiler=self.compiler, extra_compile_args=self.extra_compile_args, include_dirs=self.include_dirs) self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def test_connection_array_standalone(): previous_device = get_device() set_device('cpp_standalone') # use a clock with 1s timesteps to avoid rounding issues G1 = SpikeGeneratorGroup(4, np.array([0, 1, 2, 3]), [0, 1, 2, 3]*second, dt=1*second) G2 = NeuronGroup(8, 'v:1') S = Synapses(G1, G2, '', pre='v+=1', dt=1*second) S.connect([0, 1, 2, 3], [0, 2, 4, 6]) mon = StateMonitor(G2, 'v', record=True, name='mon', dt=1*second, when='end') net = Network(G1, G2, S, mon) net.run(5*second) tempdir = tempfile.mkdtemp() device.build(directory=tempdir, compile=True, run=True, with_output=False) expected = np.array([[1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0, 0]], dtype=np.float64) assert_equal(mon.v, expected) set_device(previous_device)
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='numpy_code_object*'): from brian2.devices.device import get_device self.device = get_device() self.namespace = { '_owner': owner, # TODO: This should maybe go somewhere else 'logical_not': np.logical_not } CodeObject.__init__(self, owner, code, variables, variable_indices, template_name, template_source, name=name) self.variables_to_namespace()
def variables_to_array_names(variables, access_data=True): from brian2.devices.device import get_device device = get_device() names = [device.get_array_name(var, access_data=access_data) for var in variables] return names
def run(codegen_targets=None, long_tests=False, test_codegen_independent=True, test_standalone=None): ''' Run brian's test suite. Needs an installation of the nose testing tool. For testing, the preferences will be reset to the default preferences. After testing, the user preferences will be restored. Parameters ---------- codegen_targets : list of str or str A list of codegeneration targets or a single target, e.g. ``['numpy', 'weave']`` to test. The whole test suite will be repeatedly run with `codegen.target` set to the respective value. If not specified, all available code generation targets will be tested. long_tests : bool, optional Whether to run tests that take a long time. Defaults to ``False``. test_codegen_independent : bool, optional Whether to run tests that are independent of code generation. Defaults to ``True``. test_standalone : str, optional Whether to run tests for a standalone mode. Should be the name of a standalone mode (e.g. ``'cpp_standalone'``) and expects that a device of that name and an accordingly named "simple" device (e.g. ``'cpp_standalone_simple'`` exists that can be used for testing (see `CPPStandaloneSimpleDevice` for details. Defaults to ``None``, meaning that no standalone device is tested. ''' try: import nose except ImportError: raise ImportError( 'Running the test suite requires the "nose" package.') if codegen_targets is None: codegen_targets = ['numpy'] try: import scipy.weave codegen_targets.append('weave') except ImportError: try: import weave codegen_targets.append('weave') except ImportError: pass try: import Cython codegen_targets.append('cython') except ImportError: pass elif isinstance(codegen_targets, basestring): # allow to give a single target codegen_targets = [codegen_targets] dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # We write to stderr since nose does all of its output on stderr as well sys.stderr.write('Running tests in "%s" ' % dirname) if codegen_targets: sys.stderr.write('for targets %s' % (', '.join(codegen_targets))) ex_in = 'including' if long_tests else 'excluding' sys.stderr.write(' (%s long tests)\n' % ex_in) if test_standalone: if not isinstance(test_standalone, basestring): raise ValueError( 'test_standalone argument has to be the name of a ' 'standalone device (e.g. "cpp_standalone")') if test_standalone not in all_devices: raise ValueError( 'test_standalone argument "%s" is not a known ' 'device. Known devices are: ' '%s' % (test_standalone, ', '.join(repr(d) for d in all_devices))) sys.stderr.write('Testing standalone \n') if test_codegen_independent: sys.stderr.write('Testing codegen-independent code \n') sys.stderr.write('\n') # Store the currently set preferences and reset to default preferences stored_prefs = prefs.as_file prefs.read_preference_file(StringIO(prefs.defaults_as_file)) # Switch off code optimization to get faster compilation times prefs['codegen.cpp.extra_compile_args_gcc'] = ['-w', '-O0'] try: success = [] if test_codegen_independent: sys.stderr.write('Running tests that do not use code generation\n') # Some doctests do actually use code generation, use numpy for that prefs.codegen.target = 'numpy' prefs._backup() success.append( nose.run(argv=[ '', dirname, '-c=', # no config file loading '-I', '^hears\.py$', '-I', '^\.', '-I', '^_', '--with-doctest', "-a", "codegen-independent", '--nologcapture', '--exe' ])) for target in codegen_targets: sys.stderr.write('Running tests for target %s:\n' % target) prefs.codegen.target = target prefs._backup() exclude_str = "!standalone-only,!codegen-independent" if not long_tests: exclude_str += ',!long' # explicitly ignore the brian2.hears file for testing, otherwise the # doctest search will import it, failing on Python 3 success.append( nose.run(argv=[ '', dirname, '-c=', # no config file loading '-I', '^hears\.py$', '-I', '^\.', '-I', '^_', # Do not run standalone or # codegen-independent tests "-a", exclude_str, '--nologcapture', '--exe' ])) if test_standalone: from brian2.devices.device import get_device, set_device previous_device = get_device() set_device(test_standalone + '_simple') sys.stderr.write('Testing standalone device "%s"\n' % test_standalone) sys.stderr.write('Running standalone-compatible standard tests\n') exclude_str = ',!long' if not long_tests else '' success.append( nose.run(argv=[ '', dirname, '-c=', # no config file loading '-I', '^hears\.py$', '-I', '^\.', '-I', '^_', # Only run standalone tests '-a', 'standalone-compatible' + exclude_str, '--nologcapture', '--exe' ])) set_device(previous_device) sys.stderr.write('Running standalone-specific tests\n') success.append( nose.run(argv=[ '', dirname, '-c=', # no config file loading '-I', '^hears\.py$', '-I', '^\.', '-I', '^_', # Only run standalone tests '-a', test_standalone + exclude_str, '--nologcapture', '--exe' ])) all_success = all(success) if not all_success: sys.stderr.write(('ERROR: %d/%d test suite(s) did not complete ' 'successfully (see above).\n') % (len(success) - sum(success), len(success))) else: sys.stderr.write( ('OK: %d/%d test suite(s) did complete ' 'successfully.\n') % (len(success), len(success))) return all_success finally: # Restore the user preferences prefs.read_preference_file(StringIO(stored_prefs)) prefs._backup()
def get_codeobj_class(self): ''' Return codeobject class based on target language and device. Choose which version of the GSL `CodeObject` to use. If ```isinstance(device, CPPStandaloneDevice)```, then we want the `GSLCPPStandaloneCodeObject`. Otherwise the return value is based on prefs.codegen.target. Returns ------- code_object : class The respective `CodeObject` class (i.e. either `GSLCythonCodeObject` or `GSLCPPStandaloneCodeObject`). ''' # imports in this function to avoid circular imports from brian2.devices.cpp_standalone.device import CPPStandaloneDevice from brian2.devices.device import get_device from ..codegen.runtime.GSLcython_rt import GSLCythonCodeObject device = get_device() if device.__class__ is CPPStandaloneDevice: # We do not want to accept subclasses here from ..devices.cpp_standalone.GSLcodeobject import GSLCPPStandaloneCodeObject # In runtime mode (i.e. Cython), the compiler settings are # added for each `CodeObject` (only the files that use the GSL are # linked to the GSL). However, in C++ standalone mode, there are global # compiler settings that are used for all files (stored in the # `CPPStandaloneDevice`). Furthermore, header file includes are directly # inserted into the template instead of added during the compilation # phase. Therefore, we have to add the options here # instead of in `GSLCPPStandaloneCodeObject` # Add the GSL library if it has not yet been added if 'gsl' not in device.libraries: device.libraries += ['gsl', 'gslcblas'] device.headers += [ '<stdio.h>', '<stdlib.h>', '<gsl/gsl_odeiv2.h>', '<gsl/gsl_errno.h>', '<gsl/gsl_matrix.h>' ] if sys.platform == 'win32': device.define_macros += [('WIN32', '1'), ('GSL_DLL', '1')] if prefs.GSL.directory is not None: device.include_dirs += [prefs.GSL.directory] return GSLCPPStandaloneCodeObject elif isinstance(device, RuntimeDevice): if prefs.codegen.target == 'auto': target_name = auto_target().class_name else: target_name = prefs.codegen.target if target_name == 'cython': return GSLCythonCodeObject raise NotImplementedError( ("GSL integration has not been implemented for " "for the '{target_name}' code generation target." "\nUse the 'cython' code generation target, " "or switch to the 'cpp_standalone' device.").format( target_name=target_name)) else: device_name = [ name for name, dev in all_devices.items() if dev is device ] assert len(device_name) == 1 raise NotImplementedError( ("GSL integration has not been implemented for " "for the '{device}' device." "\nUse either the 'cpp_standalone' device, " "or the runtime device with target language " "'cython'.").format(device=device_name[0]))
def before_run(self, run_namespace=None, level=0): ''' before_run(namespace) Prepares the `Network` for a run. Objects in the `Network` are sorted into the correct running order, and their `BrianObject.before_run` methods are called. Parameters ---------- namespace : dict-like, optional A namespace in which objects which do not define their own namespace will be run. ''' from brian2.devices.device import get_device, all_devices prefs.check_all_validated() # Check names in the network for uniqueness names = [obj.name for obj in self.objects] non_unique_names = [name for name, count in Counter(names).iteritems() if count > 1] if len(non_unique_names): formatted_names = ', '.join("'%s'" % name for name in non_unique_names) raise ValueError('All objects in a network need to have unique ' 'names, the following name(s) were used more than ' 'once: %s' % formatted_names) self._stopped = False Network._globally_stopped = False device = get_device() if device.network_schedule is not None: # The device defines a fixed network schedule if device.network_schedule != self.schedule: # TODO: The human-readable name of a device should be easier to get device_name = all_devices.keys()[all_devices.values().index(device)] logger.warn(("The selected device '{device_name}' only " "supports a fixed schedule, but this schedule is " "not consistent with the network's schedule. The " "simulation will use the device's schedule.\n" "Device schedule: {device.network_schedule}\n" "Network schedule: {net.schedule}\n" "Set the network schedule explicitly or set the " "core.network.default_schedule preference to " "avoid this warning.").format(device_name=device_name, device=device, net=self), name_suffix='schedule_conflict', once=True) self._sort_objects() logger.debug("Preparing network {self.name} with {numobj} " "objects: {objnames}".format(self=self, numobj=len(self.objects), objnames=', '.join(obj.name for obj in self.objects)), "before_run") self.check_dependencies() for obj in self.objects: if obj.active: try: obj.before_run(run_namespace, level=level+2) except DimensionMismatchError as ex: raise DimensionMismatchError(('An error occured preparing ' 'object "%s":\n%s') % (obj.name, ex.desc), *ex.dims) # Check that no object has been run as part of another network before for obj in self.objects: if obj._network is None: obj._network = self.id elif obj._network != self.id: raise RuntimeError(('%s has already been run in the ' 'context of another network. Use ' 'add/remove to change the objects ' 'in a simulated network instead of ' 'creating a new one.') % obj.name) logger.debug("Network {self.name} has {num} " "clocks: {clocknames}".format(self=self, num=len(self._clocks), clocknames=', '.join(obj.name for obj in self._clocks)), "before_run")
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self._done_first_run = False self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, variable_indices, template_name, template_source, name=name) self.compiler, self.extra_compile_args = get_compiler_and_args() self.define_macros = list(prefs['codegen.cpp.define_macros']) if self.compiler == 'msvc': self.define_macros.extend([ ('INFINITY', '(std::numeric_limits<double>::infinity())'), ('NAN', '(std::numeric_limits<double>::quiet_NaN())'), ('M_PI', '3.14159265358979323846') ]) self.extra_link_args = list(prefs['codegen.cpp.extra_link_args']) self.include_dirs = list(prefs['codegen.cpp.include_dirs']) self.include_dirs += [os.path.join(sys.prefix, 'include')] # TODO: We should probably have a special folder just for header # files that are shared between different codegen targets import brian2.synapses as synapses synapses_dir = os.path.dirname(synapses.__file__) self.include_dirs.append(synapses_dir) self.library_dirs = list(prefs['codegen.cpp.library_dirs']) if (platform.system() == 'Linux' and platform.architecture()[0] == '32bit' and platform.machine() == 'x86_64'): # We are cross-compiling to 32bit on a 64bit platform logger.info( 'Cross-compiling to 32bit on a 64bit platform, a set ' 'of standard compiler options will be appended for ' 'this purpose (note that you need to have a 32bit ' 'version of the standard library for this to work).', '64bit_to_32bit', once=True) self.library_dirs += ['/lib32', '/usr/lib32'] self.extra_compile_args += ['-m32'] self.extra_link_args += ['-m32'] self.runtime_library_dirs = list( prefs['codegen.cpp.runtime_library_dirs']) self.libraries = list(prefs['codegen.cpp.libraries']) self.headers = ['<algorithm>', '<limits>', '"stdint_compat.h"' ] + prefs['codegen.cpp.headers'] self.annotated_code = self.code.main + ''' /* The following code is just compiler options for the call to weave.inline. By including them here, we force a recompile if the compiler options change, which is a good thing (e.g. switching -ffast-math on and off). support_code: {self.code.support_code} compiler: {self.compiler} define_macros: {self.define_macros} extra_compile_args: {self.extra_compile_args} extra_link_args: {self.extra_link_args} include_dirs: {self.include_dirs} library_dirs: {self.library_dirs} runtime_library_dirs: {self.runtime_library_dirs} libraries: {self.libraries} */ '''.format(self=self) self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def determine_keywords(self): from brian2.devices.device import get_device device = get_device() # load variables from namespace load_namespace = [] support_code = [] handled_pointers = set() user_functions = [] for varname, var in self.variables.items(): if isinstance(var, Variable) and not isinstance( var, (Subexpression, AuxiliaryVariable)): load_namespace.append( '_var_{0} = _namespace["_var_{1}"]'.format( varname, varname)) if isinstance(var, AuxiliaryVariable): line = "{varname}".format(varname=varname) load_namespace.append(line) elif isinstance(var, Subexpression): dtype = get_numba_dtype(var.dtype) line = "{varname}".format(varname=varname) load_namespace.append(line) elif isinstance(var, Constant): dtype_name = get_numba_dtype(var.value) line = '{varname} = _namespace["{varname}"]'.format( varname=varname) load_namespace.append(line) elif isinstance(var, Variable): if var.dynamic: load_namespace.append('{0} = _namespace["{1}"]'.format( self.get_array_name(var, False), self.get_array_name(var, False))) # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'dimensions', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently if get_dtype_str(var.dtype) == 'bool': newlines = [ "_buf_{array_name} = _namespace['{array_name}']", "{array_name} = _buf_{array_name}.data" ] else: newlines = [ "_buf_{array_name} = _namespace['{array_name}']", "{array_name} = _buf_{array_name}.data" ] if not var.scalar: newlines += [ "_num{array_name} = len(_namespace['{array_name}'])" ] if var.scalar and var.constant: newlines += ['{varname} = _namespace["{varname}"]'] for line in newlines: line = line.format( numba_dtype=get_numba_dtype(var.dtype), numpy_dtype=get_numpy_dtype(var.dtype), pointer_name=pointer_name, array_name=array_name, varname=varname, ) load_namespace.append(line) handled_pointers.add(pointer_name) elif isinstance(var, Function): sc, ln, uf = self._add_user_function(varname, var) support_code.extend(sc) load_namespace.extend(ln) user_functions.extend(uf) else: # fallback to Python object load_namespace.append('{0} = _namespace["{1}"]'.format( varname, varname)) # delete the user-defined functions from the namespace and add the # function namespaces (if any) for funcname, func in user_functions: del self.variables[funcname] func_namespace = func.implementations[ self.codeobj_class].get_namespace(self.owner) if func_namespace is not None: self.variables.update(func_namespace) print "NAMESPACE IS" print load_namespace print "END NAMESPACE" #raise Exception return { 'load_namespace': '\n'.join(load_namespace), 'support_code': '\n'.join(support_code) }
def determine_keywords(self): # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation lines = [] # It is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself handled_pointers = set() template_kwds = {} # Again, do the import here to avoid a circular dependency. from brian2.devices.device import get_device device = get_device() for varname, var in self.variables.iteritems(): if isinstance(var, ArrayVariable): # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'dimensions', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently line = self.c_data_type(var.dtype) + ' * ' + self.restrict + pointer_name + ' = ' + array_name + ';' lines.append(line) handled_pointers.add(pointer_name) pointers = '\n'.join(lines) # set up the functions user_functions = [] support_code = '' hash_defines = '' for varname, variable in self.variables.items(): if isinstance(variable, Function): user_functions.append((varname, variable)) funccode = variable.implementations[self.codeobj_class].get_code(self.owner) if isinstance(funccode, basestring): funccode = {'support_code': funccode} if funccode is not None: support_code += '\n' + deindent(funccode.get('support_code', '')) hash_defines += '\n' + deindent(funccode.get('hashdefine_code', '')) # add the Python function with a leading '_python', if it # exists. This allows the function to make use of the Python # function via weave if necessary (e.g. in the case of randn) if not variable.pyfunc is None: pyfunc_name = '_python_' + varname if pyfunc_name in self.variables: logger.warn(('Namespace already contains function %s, ' 'not replacing it') % pyfunc_name) else: self.variables[pyfunc_name] = variable.pyfunc # delete the user-defined functions from the namespace and add the # function namespaces (if any) for funcname, func in user_functions: del self.variables[funcname] func_namespace = func.implementations[self.codeobj_class].get_namespace(self.owner) if func_namespace is not None: self.variables.update(func_namespace) keywords = {'pointers_lines': stripped_deindented_lines(pointers), 'support_code_lines': stripped_deindented_lines(support_code), 'hashdefine_lines': stripped_deindented_lines(hash_defines), 'denormals_code_lines': stripped_deindented_lines(self.denormals_to_zero_code()), } keywords.update(template_kwds) return keywords
def run(self): get_device().main_queue.append(('run_code_object', (self, )))
def before_run(self, run_namespace=None, level=0): ''' before_run(namespace) Prepares the `Network` for a run. Objects in the `Network` are sorted into the correct running order, and their `BrianObject.before_run` methods are called. Parameters ---------- namespace : dict-like, optional A namespace in which objects which do not define their own namespace will be run. ''' from brian2.devices.device import get_device, all_devices prefs.check_all_validated() # Check names in the network for uniqueness names = [obj.name for obj in self.objects] non_unique_names = [ name for name, count in Counter(names).iteritems() if count > 1 ] if len(non_unique_names): formatted_names = ', '.join("'%s'" % name for name in non_unique_names) raise ValueError( 'All objects in a network need to have unique ' 'names, the following name(s) were used more than ' 'once: %s' % formatted_names) self._stopped = False Network._globally_stopped = False device = get_device() if device.network_schedule is not None: # The device defines a fixed network schedule if device.network_schedule != self.schedule: # TODO: The human-readable name of a device should be easier to get device_name = all_devices.keys()[all_devices.values().index( device)] logger.warn( ("The selected device '{device_name}' only " "supports a fixed schedule, but this schedule is " "not consistent with the network's schedule. The " "simulation will use the device's schedule.\n" "Device schedule: {device.network_schedule}\n" "Network schedule: {net.schedule}\n" "Set the network schedule explicitly or set the " "core.network.default_schedule preference to " "avoid this warning.").format(device_name=device_name, device=device, net=self), name_suffix='schedule_conflict', once=True) self._sort_objects() logger.debug( "Preparing network {self.name} with {numobj} " "objects: {objnames}".format( self=self, numobj=len(self.objects), objnames=', '.join(obj.name for obj in self.objects)), "before_run") self.check_dependencies() for obj in self.objects: if obj.active: try: obj.before_run(run_namespace, level=level + 2) except DimensionMismatchError as ex: raise DimensionMismatchError( ('An error occured preparing ' 'object "%s":\n%s') % (obj.name, ex.desc), *ex.dims) # Check that no object has been run as part of another network before for obj in self.objects: if obj._network is None: obj._network = self.id elif obj._network != self.id: raise RuntimeError(('%s has already been run in the ' 'context of another network. Use ' 'add/remove to change the objects ' 'in a simulated network instead of ' 'creating a new one.') % obj.name) logger.debug( "Network {self.name} has {num} " "clocks: {clocknames}".format( self=self, num=len(self._clocks), clocknames=', '.join(obj.name for obj in self._clocks)), "before_run")
set_device('test2', build_on_run=True, my_opt=2) set_device('test3', build_on_run=False, my_opt=3) reset_device('test1') # Directly jump back to the first device assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run del all_devices['test1'] del all_devices['test2'] del all_devices['test3'] reset_device(original_device) @pytest.mark.skipif( not isinstance(get_device(), RuntimeDevice), reason='Getting/setting random number state only supported ' 'for runtime device.') def test_get_set_random_generator_state(): group = NeuronGroup(10, 'dv/dt = -v/(10*ms) + (10*ms)**-0.5*xi : 1', method='euler') group.v = 'rand()' run(10 * ms) assert np.var(group.v) > 0 # very basic test for randomness ;) old_v = np.array(group.v) random_state = get_device().get_random_state() group.v = 'rand()' run(10 * ms) assert np.var(group.v - old_v) > 0 # just checking for *some* difference old_v = np.array(group.v)
def __init__(self, owner, code, variables, variable_indices, template_name, template_source, name='weave_code_object*'): from brian2.devices.device import get_device self.device = get_device() self._done_first_run = False self.namespace = {'_owner': owner} super(WeaveCodeObject, self).__init__(owner, code, variables, variable_indices, template_name, template_source, name=name) self.compiler, self.extra_compile_args = get_compiler_and_args() self.define_macros = list(prefs['codegen.cpp.define_macros']) if self.compiler == 'msvc': self.define_macros.extend([ ('INFINITY', '(std::numeric_limits<double>::infinity())'), ('NAN', '(std::numeric_limits<double>::quiet_NaN())'), ('M_PI', '3.14159265358979323846') ]) self.extra_link_args = list(prefs['codegen.cpp.extra_link_args']) self.include_dirs = list(prefs['codegen.cpp.include_dirs']) if sys.platform == 'win32': self.include_dirs += [os.path.join(sys.prefix, 'Library', 'include')] else: self.include_dirs += [os.path.join(sys.prefix, 'include')] # TODO: We should probably have a special folder just for header # files that are shared between different codegen targets import brian2.synapses as synapses synapses_dir = os.path.dirname(synapses.__file__) self.include_dirs.append(synapses_dir) self.library_dirs = list(prefs['codegen.cpp.library_dirs']) if sys.platform == 'win32': self.library_dirs += [os.path.join(sys.prefix, 'Library', 'lib')] else: self.library_dirs += [os.path.join(sys.prefix, 'lib')] update_for_cross_compilation(self.library_dirs, self.extra_compile_args, self.extra_link_args, logger=logger) self.runtime_library_dirs = list(prefs['codegen.cpp.runtime_library_dirs']) self.libraries = list(prefs['codegen.cpp.libraries']) self.headers = ['<algorithm>', '<limits>', '"stdint_compat.h"'] + prefs['codegen.cpp.headers'] self.annotated_code = self.code.main+''' /* The following code is just compiler options for the call to weave.inline. By including them here, we force a recompile if the compiler options change, which is a good thing (e.g. switching -ffast-math on and off). support_code: {self.code.support_code} compiler: {self.compiler} define_macros: {self.define_macros} extra_compile_args: {self.extra_compile_args} extra_link_args: {self.extra_link_args} include_dirs: {self.include_dirs} library_dirs: {self.library_dirs} runtime_library_dirs: {self.runtime_library_dirs} libraries: {self.libraries} */ '''.format(self=self) self.python_code_namespace = {'_owner': owner} self.variables_to_namespace()
def determine_keywords(self): # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation lines = [] # it is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself handled_pointers = set() template_kwds = {} # again, do the import here to avoid a circular dependency. from brian2.devices.device import get_device device = get_device() for varname, var in self.variables.iteritems(): if isinstance(var, ArrayVariable): # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'ndim', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently line = self.c_data_type(var.dtype) + ' * ' + self.restrict + pointer_name + ' = ' + array_name + ';' lines.append(line) handled_pointers.add(pointer_name) pointers = '\n'.join(lines) # set up the functions user_functions = [] support_code = '' hash_defines = '' # set convertion types for standard C99 functions in device code if prefs.codegen.generators.cuda.default_functions_integral_convertion == np.float64: default_func_type = 'double' other_func_type = 'float' else: # np.float32 default_func_type = 'float' other_func_type = 'double' # set clip function to either use all float or all double arguments # see #51 for details if prefs['core.default_float_dtype'] == np.float64: float_dtype = 'float' else: # np.float32 float_dtype = 'double' for varname, variable in self.variables.items(): if isinstance(variable, Function): user_functions.append((varname, variable)) funccode = variable.implementations[self.codeobj_class].get_code(self.owner) if varname in functions_C99: funccode = funccode.format(default_type=default_func_type, other_type=other_func_type) if varname == 'clip': funccode = funccode.format(float_dtype=float_dtype) if isinstance(funccode, basestring): funccode = {'support_code': funccode} if funccode is not None: support_code += '\n' + deindent(funccode.get('support_code', '')) hash_defines += '\n' + deindent(funccode.get('hashdefine_code', '')) # add the Python function with a leading '_python', if it # exists. This allows the function to make use of the Python # function via weave if necessary (e.g. in the case of randn) if not variable.pyfunc is None: pyfunc_name = '_python_' + varname if pyfunc_name in self.variables: logger.warn(('Namespace already contains function %s, ' 'not replacing it') % pyfunc_name) else: self.variables[pyfunc_name] = variable.pyfunc # delete the user-defined functions from the namespace and add the # function namespaces (if any) for funcname, func in user_functions: del self.variables[funcname] func_namespace = func.implementations[self.codeobj_class].get_namespace(self.owner) if func_namespace is not None: self.variables.update(func_namespace) support_code += '\n' + deindent(self.universal_support_code) keywords = {'pointers_lines': stripped_deindented_lines(pointers), 'support_code_lines': stripped_deindented_lines(support_code), 'hashdefine_lines': stripped_deindented_lines(hash_defines), 'denormals_code_lines': stripped_deindented_lines(self.denormals_to_zero_code()), 'uses_atomics': self.uses_atomics } keywords.update(template_kwds) return keywords
def run(self): get_device().main_queue.append(('run_code_object', (self,)))
def determine_keywords(self): from brian2.devices.device import get_device device = get_device() # load variables from namespace load_namespace = [] support_code = [] handled_pointers = set() user_functions = [] for varname, var in self.variables.items(): if isinstance(var, AuxiliaryVariable): line = "cdef {dtype} {varname}".format( dtype=get_cpp_dtype(var.dtype), varname=varname) load_namespace.append(line) elif isinstance(var, AttributeVariable): val = getattr(var.obj, var.attribute) if isinstance(val, np.ndarray) and val.ndim: line = "cdef _numpy.ndarray[{cpp_dtype}, ndim=1, mode='c'] {varname} = _namespace['{varname}']".format( numpy_dtype=get_numpy_dtype(val), varname=varname, cpp_dtype=get_cpp_dtype(val)) else: line = "cdef {cpp_dtype} {varname} = _namespace['{varname}']".format( cpp_dtype=get_cpp_dtype(val), varname=varname) load_namespace.append(line) if isinstance(val, np.ndarray) and val.ndim: line = "cdef int _num{varname} = len(_namespace['{varname}'])".format(varname=varname) load_namespace.append(line) elif isinstance(var, Subexpression): dtype = get_cpp_dtype(var.dtype) line = "cdef {dtype} {varname}".format(dtype=dtype, varname=varname) load_namespace.append(line) elif isinstance(var, Constant): dtype_name = get_cpp_dtype(var.value) line = 'cdef {dtype} {varname} = _namespace["{varname}"]'.format(dtype=dtype_name, varname=varname) load_namespace.append(line) elif isinstance(var, Variable): if var.dynamic: load_namespace.append('{0} = _namespace["{1}"]'.format(self.get_array_name(var, False), self.get_array_name(var, False))) # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'dimensions', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently newlines = [ "cdef _numpy.ndarray[{cpp_dtype}, ndim=1, mode='c'] _buf_{array_name} = _namespace['{array_name}'].view(dtype=_numpy.{numpy_dtype})", "cdef {cpp_dtype} * {array_name} = <{cpp_dtype} *> _buf_{array_name}.data",] if not var.scalar: newlines += ["cdef int _num{array_name} = len(_namespace['{array_name}'])"] newlines += ["cdef {cpp_dtype} {varname}"] for line in newlines: line = line.format(cpp_dtype=get_cpp_dtype(var.dtype), numpy_dtype=get_numpy_dtype(var.dtype), pointer_name=pointer_name, array_name=array_name, varname=varname, ) load_namespace.append(line) handled_pointers.add(pointer_name) elif isinstance(var, Function): sc, ln, uf = self._add_user_function(varname, var) support_code.extend(sc) load_namespace.extend(ln) user_functions.extend(uf) else: # fallback to Python object print var for k, v in var.__dict__.iteritems(): print ' ', k, v load_namespace.append('{0} = _namespace["{1}"]'.format(varname, varname)) # delete the user-defined functions from the namespace and add the # function namespaces (if any) for funcname, func in user_functions: del self.variables[funcname] func_namespace = func.implementations[self.codeobj_class].get_namespace(self.owner) if func_namespace is not None: self.variables.update(func_namespace) return {'load_namespace': '\n'.join(load_namespace), 'support_code': '\n'.join(support_code)}
def __init__(self, dt=None, clock=None, when='start', order=0, name='brianobject*'): # Setup traceback information for this object creation_stack = [] bases = [] for modulename in ['brian2']: if modulename in sys.modules: base, _ = os.path.split(sys.modules[modulename].__file__) bases.append(base) for fname, linenum, funcname, line in traceback.extract_stack(): if all(base not in fname for base in bases): s = ' File "{fname}", line {linenum}, in {funcname}\n {line}'.format( fname=fname, linenum=linenum, funcname=funcname, line=line) creation_stack.append(s) creation_stack = [''] + creation_stack #: A string indicating where this object was created (traceback with any parts of Brian code removed) self._creation_stack = ( 'Object was created here (most recent call only, full details in ' 'debug log):\n' + creation_stack[-1]) self._full_creation_stack = 'Object was created here:\n' + '\n'.join( creation_stack) if dt is not None and clock is not None: raise ValueError( 'Can only specify either a dt or a clock, not both.') if not isinstance(when, basestring): from brian2.core.clocks import Clock # Give some helpful error messages for users coming from the alpha # version if isinstance(when, Clock): raise TypeError(("Do not use the 'when' argument for " "specifying a clock, either provide a " "timestep for the 'dt' argument or a Clock " "object for 'clock'.")) if isinstance(when, tuple): raise TypeError("Use the separate keyword arguments, 'dt' (or " "'clock'), 'when', and 'order' instead of " "providing a tuple for 'when'. Only use the " "'when' argument for the scheduling slot.") # General error raise TypeError("The 'when' argument has to be a string " "specifying the scheduling slot (e.g. 'start').") Nameable.__init__(self, name) #: The clock used for simulating this object self._clock = clock if clock is None: from brian2.core.clocks import Clock, defaultclock if dt is not None: self._clock = Clock(dt=dt, name=self.name + '_clock*') else: self._clock = defaultclock if getattr(self._clock, '_is_proxy', False): from brian2.devices.device import get_device self._clock = get_device().defaultclock #: Used to remember the `Network` in which this object has been included #: before, to raise an error if it is included in a new `Network` self._network = None #: The ID string determining when the object should be updated in `Network.run`. self.when = when #: The order in which objects with the same clock and ``when`` should be updated self.order = order self._dependencies = set() self._contained_objects = [] self._code_objects = [] self._active = True #: The scope key is used to determine which objects are collected by magic self._scope_key = self._scope_current_key logger.diagnostic( "Created BrianObject with name {self.name}, " "clock={self._clock}, " "when={self.when}, order={self.order}".format(self=self))
def determine_keywords(self): from brian2.devices.device import get_device device = get_device() # load variables from namespace load_namespace = [] support_code = [] handled_pointers = set() user_functions = [] for varname, var in sorted(self.variables.items()): if isinstance(var, Variable) and not isinstance( var, (Subexpression, AuxiliaryVariable)): load_namespace.append( '_var_{0} = _namespace["_var_{1}"]'.format( varname, varname)) if isinstance(var, AuxiliaryVariable): line = "cdef {dtype} {varname}".format(dtype=get_cpp_dtype( var.dtype), varname=varname) load_namespace.append(line) elif isinstance(var, Subexpression): dtype = get_cpp_dtype(var.dtype) line = "cdef {dtype} {varname}".format(dtype=dtype, varname=varname) load_namespace.append(line) elif isinstance(var, Constant): dtype_name = get_cpp_dtype(var.value) line = 'cdef {dtype} {varname} = _namespace["{varname}"]'.format( dtype=dtype_name, varname=varname) load_namespace.append(line) elif isinstance(var, Variable): if var.dynamic: load_namespace.append('{0} = _namespace["{1}"]'.format( self.get_array_name(var, False), self.get_array_name(var, False))) # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'ndim', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently if get_dtype_str(var.dtype) == 'bool': newlines = [ "cdef _numpy.ndarray[char, ndim=1, mode='c', cast=True] _buf_{array_name} = _namespace['{array_name}']", "cdef {cpp_dtype} * {array_name} = <{cpp_dtype} *> _buf_{array_name}.data" ] else: newlines = [ "cdef _numpy.ndarray[{cpp_dtype}, ndim=1, mode='c'] _buf_{array_name} = _namespace['{array_name}']", "cdef {cpp_dtype} * {array_name} = <{cpp_dtype} *> _buf_{array_name}.data" ] if not var.scalar: newlines += [ "cdef int _num{array_name} = len(_namespace['{array_name}'])" ] if var.scalar and var.constant: newlines += [ 'cdef {cpp_dtype} {varname} = _namespace["{varname}"]' ] else: newlines += ["cdef {cpp_dtype} {varname}"] for line in newlines: line = line.format( cpp_dtype=get_cpp_dtype(var.dtype), numpy_dtype=get_numpy_dtype(var.dtype), pointer_name=pointer_name, array_name=array_name, varname=varname, ) load_namespace.append(line) handled_pointers.add(pointer_name) elif isinstance(var, Function): sc, ln, uf = self._add_user_function(varname, var) support_code.extend(sc) load_namespace.extend(ln) user_functions.extend(uf) else: # fallback to Python object load_namespace.append('{0} = _namespace["{1}"]'.format( varname, varname)) for varname, dtype in sorted(self.temporary_vars): cpp_dtype = get_cpp_dtype(dtype) line = "cdef {cpp_dtype} {varname}".format(cpp_dtype=cpp_dtype, varname=varname) load_namespace.append(line) return { 'load_namespace': '\n'.join(load_namespace), 'support_code_lines': support_code }
def determine_keywords(self): # set up the restricted pointers, these are used so that the compiler # knows there is no aliasing in the pointers, for optimisation pointers = [] # Add additional lines inside the kernel functions kernel_lines = [] # It is possible that several different variable names refer to the # same array. E.g. in gapjunction code, v_pre and v_post refer to the # same array if a group is connected to itself handled_pointers = set() template_kwds = {} # Again, do the import here to avoid a circular dependency. from brian2.devices.device import get_device device = get_device() for varname, var in self.variables.iteritems(): if isinstance(var, ArrayVariable): # This is the "true" array name, not the restricted pointer. array_name = device.get_array_name(var) pointer_name = self.get_array_name(var) if pointer_name in handled_pointers: continue if getattr(var, 'ndim', 1) > 1: continue # multidimensional (dynamic) arrays have to be treated differently restrict = self.restrict # turn off restricted pointers for scalars for safety if var.scalar: restrict = ' ' line = '{0}* {1} {2} = {3};'.format( self.c_data_type(var.dtype), restrict, pointer_name, array_name) pointers.append(line) handled_pointers.add(pointer_name) # set up the functions user_functions = [] support_code = [] hash_defines = [] for varname, variable in self.variables.items(): if isinstance(variable, Function): hd, ps, sc, uf, kl = self._add_user_function(varname, variable) user_functions.extend(uf) support_code.extend(sc) pointers.extend(ps) hash_defines.extend(hd) kernel_lines.extend(kl) support_code.append(self.universal_support_code) # Clock variables (t, dt, timestep) are passed by value to kernels and # need to be translated back into pointers for scalar/vector code. for varname, variable in self.variables.iteritems(): if hasattr(variable, 'owner') and isinstance( variable.owner, Clock): # get arrayname without _ptr suffix (e.g. _array_defaultclock_dt) arrayname = self.get_array_name(variable, pointer=False) line = "const {dtype}* _ptr{arrayname} = &_value{arrayname};" line = line.format(dtype=c_data_type(variable.dtype), arrayname=arrayname) if line not in kernel_lines: kernel_lines.append(line) keywords = { 'pointers_lines': stripped_deindented_lines('\n'.join(pointers)), 'support_code_lines': stripped_deindented_lines('\n'.join(support_code)), 'hashdefine_lines': stripped_deindented_lines('\n'.join(hash_defines)), 'denormals_code_lines': stripped_deindented_lines('\n'.join( self.denormals_to_zero_code())), 'kernel_lines': stripped_deindented_lines('\n'.join(kernel_lines)), 'uses_atomics': self.uses_atomics } keywords.update(template_kwds) return keywords
def _add_user_function(self, varname, variable): impl = variable.implementations[self.codeobj_class] support_code = [] hash_defines = [] pointers = [] kernel_lines = [] user_functions = [(varname, variable)] funccode = impl.get_code(self.owner) ### Different from CPPCodeGenerator: We format the funccode dtypes here from brian2.devices.device import get_device device = get_device() if varname in functions_C99: funccode = funccode.format(default_type=self.default_func_type, other_type=self.other_func_type) if varname == 'clip': funccode = funccode.format(float_dtype=self.float_dtype) ### if isinstance(funccode, basestring): funccode = {'support_code': funccode} if funccode is not None: # To make namespace variables available to functions, we # create global variables and assign to them in the main # code func_namespace = impl.get_namespace(self.owner) or {} for ns_key, ns_value in func_namespace.iteritems(): # This section is adapted from CPPCodeGenerator such that file # global namespace pointers can be used in both host and device # code. assert hasattr(ns_value, 'dtype'), \ 'This should not have happened. Please report at ' \ 'https://github.com/brian-team/brian2cuda/issues/new' if ns_value.shape == (): raise NotImplementedError( ('Directly replace scalar values in the function ' 'instead of providing them via the namespace')) type_str = self.c_data_type(ns_value.dtype) + '*' namespace_ptr = ''' #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0)) __device__ {dtype} _namespace{name}; #else {dtype} _namespace{name}; #endif '''.format(dtype=type_str, name=ns_key) support_code.append(namespace_ptr) # pointer lines will be used in codeobjects running on the host pointers.append( '_namespace{name} = {name};'.format(name=ns_key)) # kernel lines will be used in codeobjects running on the device kernel_lines.append(''' #if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ > 0)) _namespace{name} = d{name}; #else _namespace{name} = {name}; #endif '''.format(name=ns_key)) support_code.append(deindent(funccode.get('support_code', ''))) hash_defines.append(deindent(funccode.get('hashdefine_code', ''))) dep_hash_defines = [] dep_pointers = [] dep_support_code = [] dep_kernel_lines = [] if impl.dependencies is not None: for dep_name, dep in impl.dependencies.iteritems(): if dep_name not in self.variables: self.variables[dep_name] = dep hd, ps, sc, uf, kl = self._add_user_function(dep_name, dep) dep_hash_defines.extend(hd) dep_pointers.extend(ps) dep_support_code.extend(sc) user_functions.extend(uf) dep_kernel_lines.extend(kl) return (dep_hash_defines + hash_defines, dep_pointers + pointers, dep_support_code + support_code, user_functions, dep_kernel_lines + kernel_lines)
def create_runner_codeobj(group, code, template_name, run_namespace, user_code=None, variable_indices=None, name=None, check_units=True, needed_variables=None, additional_variables=None, template_kwds=None, override_conditional_write=None, codeobj_class=None): ''' Create a `CodeObject` for the execution of code in the context of a `Group`. Parameters ---------- group : `Group` The group where the code is to be run code : str or dict of str The code to be executed. template_name : str The name of the template to use for the code. run_namespace : dict-like An additional namespace that is used for variable lookup (either an explicitly defined namespace or one taken from the local context). user_code : str, optional The code that had been specified by the user before other code was added automatically. If not specified, will be assumed to be identical to ``code``. variable_indices : dict-like, optional A mapping from `Variable` objects to index names (strings). If none is given, uses the corresponding attribute of `group`. name : str, optional A name for this code object, will use ``group + '_codeobject*'`` if none is given. check_units : bool, optional Whether to check units in the statement. Defaults to ``True``. needed_variables: list of str, optional A list of variables that are neither present in the abstract code, nor in the ``USES_VARIABLES`` statement in the template. This is only rarely necessary, an example being a `StateMonitor` where the names of the variables are neither known to the template nor included in the abstract code statements. additional_variables : dict-like, optional A mapping of names to `Variable` objects, used in addition to the variables saved in `group`. template_kwds : dict, optional A dictionary of additional information that is passed to the template. override_conditional_write: list of str, optional A list of variable names which are used as conditions (e.g. for refractoriness) which should be ignored. codeobj_class : class, optional The `CodeObject` class to run code with. If not specified, defaults to the `group`'s ``codeobj_class`` attribute. ''' if name is None: if group is not None: name = '%s_%s_codeobject*' % (group.name, template_name) else: name = '%s_codeobject*' % template_name if user_code is None: user_code = code if isinstance(code, str): code = {None: code} user_code = {None: user_code} msg = 'Creating code object (group=%s, template name=%s) for abstract code:\n' % ( group.name, template_name) msg += indent(code_representation(code)) logger.diagnostic(msg) from brian2.devices import get_device device = get_device() if override_conditional_write is None: override_conditional_write = set([]) else: override_conditional_write = set(override_conditional_write) if codeobj_class is None: codeobj_class = device.code_object_class(group.codeobj_class) else: codeobj_class = device.code_object_class(codeobj_class) template = getattr(codeobj_class.templater, template_name) template_variables = getattr(template, 'variables', None) all_variables = dict(group.variables) if additional_variables is not None: all_variables.update(additional_variables) # Determine the identifiers that were used identifiers = set() user_identifiers = set() for v, u_v in zip(code.values(), user_code.values()): _, uk, u = analyse_identifiers(v, all_variables, recursive=True) identifiers |= uk | u _, uk, u = analyse_identifiers(u_v, all_variables, recursive=True) user_identifiers |= uk | u # Add variables that are not in the abstract code, nor specified in the # template but nevertheless necessary if needed_variables is None: needed_variables = [] # Resolve all variables (variables used in the code and variables needed by # the template) variables = group.resolve_all( identifiers | set(needed_variables) | set(template_variables), # template variables are not known to the user: user_identifiers=user_identifiers, additional_variables=additional_variables, run_namespace=run_namespace) # We raise this error only now, because there is some non-obvious code path # where Jinja tries to get a Synapse's "name" attribute via syn['name'], # which then triggers the use of the `group_get_indices` template which does # not exist for standalone. Putting the check for template == None here # means we will first raise an error about the unknown identifier which will # then make Jinja try syn.name if template is None: codeobj_class_name = codeobj_class.class_name or codeobj_class.__name__ raise AttributeError( ('"%s" does not provide a code generation ' 'template "%s"') % (codeobj_class_name, template_name)) conditional_write_variables = {} # Add all the "conditional write" variables for var in variables.itervalues(): cond_write_var = getattr(var, 'conditional_write', None) if cond_write_var in override_conditional_write: continue if cond_write_var is not None: if (cond_write_var.name in variables and not variables[cond_write_var.name] is cond_write_var): logger.diagnostic(('Variable "%s" is needed for the ' 'conditional write mechanism of variable ' '"%s". Its name is already used for %r.') % (cond_write_var.name, var.name, variables[cond_write_var.name])) else: conditional_write_variables[ cond_write_var.name] = cond_write_var variables.update(conditional_write_variables) if check_units: for c in code.values(): # This is the first time that the code is parsed, catch errors try: check_units_statements(c, variables) except (SyntaxError, ValueError) as ex: error_msg = _error_msg(c, name) raise ValueError(error_msg + str(ex)) all_variable_indices = copy.copy(group.variables.indices) if additional_variables is not None: all_variable_indices.update(additional_variables.indices) if variable_indices is not None: all_variable_indices.update(variable_indices) # Make "conditional write" variables use the same index as the variable # that depends on them for varname, var in variables.iteritems(): cond_write_var = getattr(var, 'conditional_write', None) if cond_write_var is not None: all_variable_indices[ cond_write_var.name] = all_variable_indices[varname] # Add the indices needed by the variables varnames = variables.keys() for varname in varnames: var_index = all_variable_indices[varname] if not var_index in ('_idx', '0'): variables[var_index] = all_variables[var_index] return device.code_object( owner=group, name=name, abstract_code=code, variables=variables, template_name=template_name, variable_indices=all_variable_indices, template_kwds=template_kwds, codeobj_class=codeobj_class, override_conditional_write=override_conditional_write, )