def test_stochastic_variable(): ''' Test that a NeuronGroup with a stochastic variable can be simulated. Only makes sure no error occurs. ''' tau = 10 * ms G = NeuronGroup(1, 'dv/dt = -v/tau + xi*tau**-0.5: 1') net = Network(G) net.run(defaultclock.dt)
def test_stochastic_variable_multiplicative(): ''' Test that a NeuronGroup with multiplicative noise can be simulated. Only makes sure no error occurs. ''' mu = 0.5/second # drift sigma = 0.1/second #diffusion G = NeuronGroup(1, 'dX/dt = (mu - 0.5*second*sigma**2)*X + X*sigma*xi*second**.5: 1') net = Network(G) net.run(defaultclock.dt)
def test_custom_events(): G = NeuronGroup(2, '''event_time1 : second event_time2 : second''', events={'event1': 't>=i*ms and t<i*ms+dt', 'event2': 't>=(i+1)*ms and t<(i+1)*ms+dt'}) G.run_on_event('event1', 'event_time1 = t') G.run_on_event('event2', 'event_time2 = t') net = Network(G) net.run(2.1*ms) assert_allclose(G.event_time1[:], [0, 1]*ms) assert_allclose(G.event_time2[:], [1, 2]*ms)
def test_threshold_reset(): ''' Test that threshold and reset work in the expected way. ''' # Membrane potential does not change by itself G = NeuronGroup(3, 'dv/dt = 0 / second : 1', threshold='v > 1', reset='v=0.5') G.v = np.array([0, 1, 2]) net = Network(G) net.run(defaultclock.dt) assert_equal(G.v[:], np.array([0, 1, 0.5]))
def test_stochastic_variable(): ''' Test that a NeuronGroup with a stochastic variable can be simulated. Only makes sure no error occurs. ''' tau = 10 * ms for codeobj_class in codeobj_classes: G = NeuronGroup(1, 'dv/dt = -v/tau + xi*tau**-0.5: 1', codeobj_class=codeobj_class) net = Network(G) net.run(defaultclock.dt)
def test_linked_var_in_reset_size_1(): G1 = NeuronGroup(1, 'x:1') G2 = NeuronGroup(1, '''x_linked : 1 (linked) y:1''', threshold='y>1', reset='y=0; x_linked += 1') G2.x_linked = linked_var(G1, 'x') G2.y = 1.1 net = Network(G1, G2) # In this context, x_linked should not be considered as a scalar variable # and therefore the reset statement should be allowed net.run(3*defaultclock.dt) assert_equal(G1.x[:], 1)
def test_threshold_reset(): ''' Test that threshold and reset work in the expected way. ''' for codeobj_class in codeobj_classes: # Membrane potential does not change by itself G = NeuronGroup(3, 'dv/dt = 0 / second : 1', threshold='v > 1', reset='v=0.5', codeobj_class=codeobj_class) G.v = np.array([0, 1, 2]) net = Network(G) net.run(defaultclock.dt) assert_equal(G.v[:], np.array([0, 1, 0.5]))
def test_random_vector_values(): # Make sure that the new "loop-invariant optimisation" does not pull out # the random number generation and therefore makes all neurons receiving # the same values tau = 10*ms G = NeuronGroup(100, 'dv/dt = -v / tau + xi*tau**-0.5: 1') G.v[:] = 'rand()' assert np.var(G.v[:]) > 0 G.v[:] = 0 net = Network(G) net.run(defaultclock.dt) assert np.var(G.v[:]) > 0
def test_stochastic_variable_multiplicative(): ''' Test that a NeuronGroup with multiplicative noise can be simulated. Only makes sure no error occurs. ''' for codeobj_class in codeobj_classes: mu = 0.5/second # drift sigma = 0.1/second #diffusion G = NeuronGroup(1, 'dX/dt = (mu - 0.5*second*sigma**2)*X + X*sigma*xi*second**.5: 1', codeobj_class=codeobj_class) net = Network(G) net.run(defaultclock.dt)
def test_linked_variable_scalar(): ''' Test linked variable from a size 1 group. ''' G1 = NeuronGroup(1, 'dx/dt = -x / (10*ms) : 1') G2 = NeuronGroup(10, '''dy/dt = (-y + x) / (20*ms) : 1 x : 1 (linked)''') G1.x = 1 G2.y = np.linspace(0, 1, 10) G2.x = linked_var(G1.x) mon = StateMonitor(G2, 'y', record=True) net = Network(G1, G2, mon) net.run(10*ms)
def test_aliasing_in_statements(): ''' Test an issue around variables aliasing other variables (#259) ''' runner_code = '''x_1 = x_0 x_0 = -1''' g = NeuronGroup(1, model='''x_0 : 1 x_1 : 1 ''', codeobj_class=NumpyCodeObject) custom_code_obj = g.custom_operation(runner_code) net = Network(g, custom_code_obj) net.run(defaultclock.dt) assert_equal(g.x_0_[:], np.array([-1])) assert_equal(g.x_1_[:], np.array([0]))
def test_linked_var_in_reset_incorrect(): # Raise an error if a scalar variable (linked variable from a group of size # 1 is set in a reset statement of a group with size > 1) G1 = NeuronGroup(1, 'x:1') G2 = NeuronGroup(2, '''x_linked : 1 (linked) y:1''', threshold='y>1', reset='y=0; x_linked += 1') G2.x_linked = linked_var(G1, 'x') G2.y = 1.1 net = Network(G1, G2) # It is not well-defined what x_linked +=1 means in this context # (as for any other shared variable) assert_raises(SyntaxError, lambda: net.run(0*ms))
def test_unit_errors_threshold_reset(): ''' Test that unit errors in thresholds and resets are detected. ''' # Unit error in threshold group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='v > -20*mV') assert_raises(DimensionMismatchError, lambda: Network(group).run(0*ms)) # Unit error in reset group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='True', reset='v = -65*mV') assert_raises(DimensionMismatchError, lambda: Network(group).run(0*ms)) # More complicated unit reset with an intermediate variable # This should pass group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='False', reset='''temp_var = -65 v = temp_var''') run(0*ms) # throw in an empty line (should still pass) group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='False', reset='''temp_var = -65 v = temp_var''') run(0*ms) # This should fail group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='False', reset='''temp_var = -65*mV v = temp_var''') assert_raises(DimensionMismatchError, lambda: Network(group).run(0*ms)) # Resets with an in-place modification # This should work group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='False', reset='''v /= 2''') run(0*ms) # This should fail group = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1', threshold='False', reset='''v -= 60*mV''') assert_raises(DimensionMismatchError, lambda: Network(group).run(0*ms))
def test_referred_scalar_variable(): ''' Test the correct handling of referred scalar variables in subexpressions ''' G = NeuronGroup(10, '''out = sin(2*pi*t*freq) + x: 1 x : 1 freq : Hz (shared)''') G.freq = 1*Hz G.x = np.arange(10) G2 = NeuronGroup(10, '') G2.variables.add_reference('out', G) net = Network(G, G2) net.run(.25*second) assert_allclose(G2.out[:], np.arange(10)+1)
def test_linked_variable_correct(): ''' Test correct uses of linked variables. ''' tau = 10*ms G1 = NeuronGroup(10, 'dv/dt = -v / tau : volt') G1.v = np.linspace(0*mV, 20*mV, 10) G2 = NeuronGroup(10, 'v : volt (linked)') G2.v = linked_var(G1.v) mon1 = StateMonitor(G1, 'v', record=True) mon2 = StateMonitor(G2, 'v', record=True) net = Network(G1, G2, mon1, mon2) net.run(10*ms) assert_equal(mon1.v[:, :], mon2.v[:, :])
def test_custom_events_schedule(): # In the same time step: event2 will be checked and its code executed # before event1 is checked and its code executed G = NeuronGroup(2, '''x : 1 event_time : second''', events={'event1': 'x>0', 'event2': 't>=(i+1)*ms and t<(i+1)*ms+dt'}) G.set_event_schedule('event1', when='after_resets') G.run_on_event('event2', 'x = 1', when='resets') G.run_on_event('event1', '''event_time = t x = 0''', when='after_resets', order=1) net = Network(G) net.run(2.1*ms) assert_allclose(G.event_time[:], [1, 2]*ms)
def find_synapses(self): # Write the global objects networks = [ net() for net in Network.__instances__() if net().name != '_fake_network' ] synapses = [] for net in networks: net_synapses = [s for s in net.objects if isinstance(s, Synapses)] synapses.extend(net_synapses) # We don't currently support pathways with scalar delays for synapse_obj in net_synapses: for pathway in synapse_obj._pathways: if not isinstance(pathway.variables['delay'], DynamicArrayVariable): error_msg = ( 'The "%s" pathway uses a scalar ' 'delay (instead of a delay per synapse). ' 'This is not yet supported. Do not ' 'specify a delay in the Synapses(...) ' 'call but instead set its delay attribute ' 'afterwards.') % (pathway.name) raise NotImplementedError(error_msg) self.networks = networks self.net_synapses = synapses
def test_aliasing_in_statements(): ''' Test an issue around variables aliasing other variables (#259) ''' if prefs.codegen.target != 'numpy': raise SkipTest('numpy-only test') runner_code = '''x_1 = x_0 x_0 = -1''' g = NeuronGroup(1, model='''x_0 : 1 x_1 : 1 ''') g.run_regularly(runner_code) net = Network(g) net.run(defaultclock.dt) assert_equal(g.x_0_[:], np.array([-1])) assert_equal(g.x_1_[:], np.array([0]))
def test_referred_scalar_variable(): ''' Test the correct handling of referred scalar variables in subexpressions ''' for codeobj_class in codeobj_classes: G = NeuronGroup(10, '''out = sin(2*pi*t*freq) + x: 1 x : 1 freq : Hz (shared)''', codeobj_class=codeobj_class) G.freq = 1*Hz G.x = np.arange(10) G2 = NeuronGroup(10, '', codeobj_class=codeobj_class) G2.variables.add_reference('out', G) net = Network(G, G2) net.run(.25*second) assert_allclose(G2.out[:], np.arange(10)+1)
def find_synapses(self): # Write the global objects networks = [net() for net in Network.__instances__() if net().name != "_fake_network"] synapses = [] for net in networks: net_synapses = [s for s in net.objects if isinstance(s, Synapses)] synapses.extend(net_synapses) self.networks = networks self.net_synapses = synapses
def test_syntax_errors(): ''' Test that syntax errors are already caught at initialization time. For equations this is already tested in test_equations ''' # We do not specify the exact type of exception here: Python throws a # SyntaxError while C++ results in a ValueError # Syntax error in threshold group = NeuronGroup(1, 'dv/dt = 5*Hz : 1', threshold='>1') assert_raises(Exception, lambda: Network(group).run(0*ms)) # Syntax error in reset group = NeuronGroup(1, 'dv/dt = 5*Hz : 1', threshold='True', reset='0') assert_raises(Exception, lambda: Network(group).run(0*ms))
def test_linked_subexpression_2(): ''' Test a linked variable referring to a subexpression without indices ''' G = NeuronGroup(2, '''dv/dt = 100*Hz : 1 I = clip(v, 0, inf) : 1''', threshold='v>1', reset='v=0') G.v = [0, .5] G2 = NeuronGroup(2, '''I_l : 1 (linked) ''') G2.I_l = linked_var(G.I) mon1 = StateMonitor(G, 'I', record=True) mon = StateMonitor(G2, 'I_l', record=True) net = Network(G, G2, mon, mon1) net.run(5*ms) assert all(mon[0].I_l == mon1[0].I) assert all(mon[1].I_l == mon1[1].I)
def test_scalar_variable(): ''' Test the correct handling of scalar variables ''' tau = 10*ms G = NeuronGroup(10, '''E_L : volt (shared) s2 : 1 (shared) dv/dt = (E_L - v) / tau : volt''') # Setting should work in these ways G.E_L = -70*mV assert_allclose(G.E_L[:], -70*mV) G.E_L[:] = -60*mV assert_allclose(G.E_L[:], -60*mV) G.E_L = 'E_L + s2*mV - 10*mV' assert_allclose(G.E_L[:], -70*mV) G.E_L[:] = '-75*mV' assert_allclose(G.E_L[:], -75*mV) net = Network(G) net.run(defaultclock.dt)
def test_namespace_errors(): # model equations use unknown identifier G = NeuronGroup(1, 'dv/dt = -v/tau : 1') net = Network(G) assert_raises(KeyError, lambda: net.run(1*ms)) # reset uses unknown identifier G = NeuronGroup(1, 'dv/dt = -v/tau : 1', threshold='False', reset='v = v_r') net = Network(G) assert_raises(KeyError, lambda: net.run(1*ms)) # threshold uses unknown identifier G = NeuronGroup(1, 'dv/dt = -v/tau : 1', threshold='v > v_th') net = Network(G) assert_raises(KeyError, lambda: net.run(1*ms))
def test_linked_subexpression(): ''' Test a subexpression referring to a linked variable. ''' G = NeuronGroup(2, 'dv/dt = 100*Hz : 1', threshold='v>1', reset='v=0') G.v = [0, .5] G2 = NeuronGroup(10, '''I = clip(x, 0, inf) : 1 x : 1 (linked) ''') G2.x = linked_var(G.v, index=np.array([0, 1]).repeat(5)) mon = StateMonitor(G2, 'I', record=True) net = Network(G, G2, mon) net.run(5*ms) # Due to the linking, the first 5 and the second 5 recorded I vectors should # be identical assert all((all(mon[i].I == mon[0].I) for i in xrange(5))) assert all((all(mon[i+5].I == mon[5].I) for i in xrange(5)))
def test_linked_variable_scalar(): ''' Test linked variable from a size 1 group. ''' G1 = NeuronGroup(1, 'dx/dt = -x / (10*ms) : 1') G2 = NeuronGroup(10, '''dy/dt = (-y + x) / (20*ms) : 1 x : 1 (linked)''') G1.x = 1 G2.y = np.linspace(0, 1, 10) G2.x = linked_var(G1.x) mon = StateMonitor(G2, 'y', record=True) net = Network(G1, G2, mon) # We don't test anything for now, except that it runs without raising an # error net.run(10*ms) # Make sure that printing the variable values works assert len(str(G2.x)) > 0 assert len(repr(G2.x)) > 0 assert len(str(G2.x[:])) > 0 assert len(repr(G2.x[:])) > 0
def test_linked_subexpression_3(): ''' Test a linked variable referring to a subexpression with indices ''' G = NeuronGroup(2, '''dv/dt = 100*Hz : 1 I = clip(v, 0, inf) : 1''', threshold='v>1', reset='v=0') G.v = [0, .5] G2 = NeuronGroup(10, '''I_l : 1 (linked) ''') G2.I_l = linked_var(G.I, index=np.array([0, 1]).repeat(5)) mon1 = StateMonitor(G, 'I', record=True) mon = StateMonitor(G2, 'I_l', record=True) net = Network(G, G2, mon, mon1) net.run(5*ms) # Due to the linking, the first 5 and the second 5 recorded I vectors should # refer to the assert all((all(mon[i].I_l == mon1[0].I) for i in xrange(5))) assert all((all(mon[i+5].I_l == mon1[1].I) for i in xrange(5)))
def find_synapses(self): # Write the global objects networks = [ net() for net in Network.__instances__() if net().name != '_fake_network' ] synapses = [] for net in networks: net_synapses = [s for s in net.objects if isinstance(s, Synapses)] synapses.extend(net_synapses) self.networks = networks self.net_synapses = synapses
def test_incomplete_namespace(): ''' Test that the namespace does not have to be complete at creation time. ''' # This uses tau which is not defined yet (explicit namespace) G = NeuronGroup(1, 'dv/dt = -v/tau : 1', namespace={}) G.namespace['tau'] = 10*ms net = Network(G) net.run(0*ms) # This uses tau which is not defined yet (implicit namespace) G = NeuronGroup(1, 'dv/dt = -v/tau : 1') tau = 10*ms net = Network(G) net.run(0*ms)
def test_incorrect_custom_event_definition(): # Incorrect event name assert_raises(TypeError, lambda: NeuronGroup(1, '', events={'1event': 'True'})) # duplicate definition of 'spike' event assert_raises(ValueError, lambda: NeuronGroup(1, '', threshold='True', events={'spike': 'False'})) # not a threshold G = NeuronGroup(1, '', events={'my_event': 10*mV}) assert_raises(TypeError, lambda: Network(G).run(0*ms)) # schedule for a non-existing event G = NeuronGroup(1, '', threshold='False', events={'my_event': 'True'}) assert_raises(ValueError, lambda: G.set_event_schedule('another_event')) # code for a non-existing event assert_raises(ValueError, lambda: G.run_on_event('another_event', ''))
def find_synapses(self): # Write the global objects networks = [net() for net in Network.__instances__() if net().name != '_fake_network'] synapses = [] for net in networks: net_synapses = [s for s in net.objects if isinstance(s, Synapses)] synapses.extend(net_synapses) # We don't currently support pathways with scalar delays for synapse_obj in net_synapses: for pathway in synapse_obj._pathways: if not isinstance(pathway.variables['delay'], DynamicArrayVariable): error_msg = ('The "%s" pathway uses a scalar ' 'delay (instead of a delay per synapse). ' 'This is not yet supported. Do not ' 'specify a delay in the Synapses(...) ' 'call but instead set its delay attribute ' 'afterwards.') % (pathway.name) raise NotImplementedError(error_msg) self.networks = networks self.net_synapses = synapses
def create_lems_model(self, network=None, namespace={}, initializers={}, constants_file=None, includes=[], recordingsname='recording'): """ From given *network* returns LEMS model object. Parameters ---------- network : str, optional all brian objects collected into netowrk or None. In the second case brian2 objects are collected autmatically from the above scope. namespace : dict namespace variables defining extra model parameters initializers : dict all values which need to be initialized before simulation running constants_file : str, optional file with units as constants definitions, if None an LEMS_CONSTANTS_XML is added automatically includes : list of str all additional XML files added in preamble recordingsname : str, optional output of LEMS simulation recordings, values with extension .dat and spikes with .spikes, default 'recording' """ if network is None: net = Network(collect(level=1)) else: net = network if not constants_file: self._model.add(lems.Include(LEMS_CONSTANTS_XML)) else: self._model.add(lems.Include(constants_file)) includes = set(includes) for incl in INCLUDES: includes.add(incl) neuron_groups = [o for o in net.objects if type(o) is NeuronGroup] state_monitors = [o for o in net.objects if type(o) is StateMonitor] spike_monitors = [o for o in net.objects if type(o) is SpikeMonitor] for o in net.objects: if type(o) not in [NeuronGroup, StateMonitor, SpikeMonitor, Thresholder, Resetter, StateUpdater]: logger.warn("""{} export functionality is not implemented yet.""".format(type(o).__name__)) # --- not fully implemented synapses = [o for o in net.objects if type(o) is Synapses] netinputs = [o for o in net.objects if type(o) is PoissonInput] # --- #if len(synapses) > 0: # logger.warn("Synpases export functionality is not implemented yet.") #if len(netinputs) > 0: # logger.warn("Network Input export functionality is not implemented yet.") if len(netinputs) > 0: includes.add(LEMS_INPUTS) for incl in includes: self.add_include(incl) # First step is to add individual neuron deifinitions and initialize # them by MultiInstantiate for e, obj in enumerate(neuron_groups): self.add_neurongroup(obj, e, namespace, initializers) # DOM structure of the whole model is constructed below self._dommodel = self._model.export_to_dom() # input support - currently only Poisson Inputs for e, obj in enumerate(netinputs): self.add_input(obj, counter=e) # A population should be created in *make_multiinstantiate* # so we can add it to our DOM structure. if self._population: self._extend_dommodel(self._population) # if some State or Spike Monitors occur we support them by # Simulation tag self._model_namespace['simulname'] = "sim1" self._simulation = NeuroMLSimulation(self._model_namespace['simulname'], self._model_namespace['networkname']) for e, obj in enumerate(state_monitors): self.add_statemonitor(obj, filename=recordingsname, outputfile=True) for e, obj in enumerate(spike_monitors): self.add_spikemonitor(obj, filename=recordingsname) simulation = self._simulation.build() self._extend_dommodel(simulation) target = NeuroMLTarget(self._model_namespace['simulname']) target = target.build() self._extend_dommodel(target)
def build(self, directory='output', compile=True, run=True, debug=False, clean=True, with_output=True, native=True, additional_source_files=None, additional_header_files=None, main_includes=None, run_includes=None, run_args=None, **kwds): ''' Build the project TODO: more details Parameters ---------- directory : str The output directory to write the project to, any existing files will be overwritten. compile : bool Whether or not to attempt to compile the project run : bool Whether or not to attempt to run the built project if it successfully builds. debug : bool Whether to compile in debug mode. with_output : bool Whether or not to show the ``stdout`` of the built program when run. native : bool Whether or not to compile for the current machine's architecture (best for speed, but not portable) clean : bool Whether or not to clean the project before building additional_source_files : list of str A list of additional ``.cpp`` files to include in the build. additional_header_files : list of str A list of additional ``.h`` files to include in the build. main_includes : list of str A list of additional header files to include in ``main.cpp``. run_includes : list of str A list of additional header files to include in ``run.cpp``. ''' renames = {'project_dir': 'directory', 'compile_project': 'compile', 'run_project': 'run'} if len(kwds): msg = '' for kwd in kwds: if kwd in renames: msg += ("Keyword argument '%s' has been renamed to " "'%s'. ") % (kwd, renames[kwd]) else: msg += "Unknown keyword argument '%s'. " % kwd raise TypeError(msg) if additional_source_files is None: additional_source_files = [] if additional_header_files is None: additional_header_files = [] if main_includes is None: main_includes = [] if run_includes is None: run_includes = [] if run_args is None: run_args = [] self.project_dir = directory ensure_directory(directory) compiler, extra_compile_args = get_compiler_and_args() compiler_flags = ' '.join(extra_compile_args) for d in ['code_objects', 'results', 'static_arrays']: ensure_directory(os.path.join(directory, d)) writer = CPPWriter(directory) # Get the number of threads if specified in an openmp context nb_threads = prefs.devices.cpp_standalone.openmp_threads # If the number is negative, we need to throw an error if (nb_threads < 0): raise ValueError('The number of OpenMP threads can not be negative !') logger.debug("Writing C++ standalone project to directory "+os.path.normpath(directory)) if nb_threads > 0: logger.warn("OpenMP code is not yet well tested, and may be inaccurate.", "openmp", once=True) logger.debug("Using OpenMP with %d threads " % nb_threads) for codeobj in self.code_objects.itervalues(): if not 'IS_OPENMP_COMPATIBLE' in codeobj.template_source: raise RuntimeError(("Code object '%s' uses the template %s " "which is not compatible with " "OpenMP.") % (codeobj.name, codeobj.template_name)) arange_arrays = sorted([(var, start) for var, start in self.arange_arrays.iteritems()], key=lambda (var, start): var.name) # # Find np arrays in the namespaces and convert them into static # # arrays. Hopefully they are correctly used in the code: For example, # # this works for the namespaces for functions with C++ (e.g. TimedArray # # treats it as a C array) but does not work in places that are # # implicitly vectorized (state updaters, resets, etc.). But arrays # # shouldn't be used there anyway. for code_object in self.code_objects.itervalues(): for name, value in code_object.variables.iteritems(): if isinstance(value, np.ndarray): self.static_arrays[name] = value # write the static arrays logger.debug("static arrays: "+str(sorted(self.static_arrays.keys()))) static_array_specs = [] for name, arr in sorted(self.static_arrays.items()): arr.tofile(os.path.join(directory, 'static_arrays', name)) static_array_specs.append((name, c_data_type(arr.dtype), arr.size, name)) # Write the global objects networks = [net() for net in Network.__instances__() if net().name != '_fake_network'] synapses = [] for net in networks: net_synapses = [s for s in net.objects if isinstance(s, Synapses)] synapses.extend(net_synapses) # We don't currently support pathways with scalar delays for synapse_obj in net_synapses: for pathway in synapse_obj._pathways: if not isinstance(pathway.variables['delay'], DynamicArrayVariable): error_msg = ('The "%s" pathway uses a scalar ' 'delay (instead of a delay per synapse). ' 'This is not yet supported. Do not ' 'specify a delay in the Synapses(...) ' 'call but instead set its delay attribute ' 'afterwards.') % (pathway.name) raise NotImplementedError(error_msg) # Not sure what the best place is to call Network.after_run -- at the # moment the only important thing it does is to clear the objects stored # in magic_network. If this is not done, this might lead to problems # for repeated runs of standalone (e.g. in the test suite). for net in networks: net.after_run() arr_tmp = CPPStandaloneCodeObject.templater.objects( None, None, array_specs=self.arrays, dynamic_array_specs=self.dynamic_arrays, dynamic_array_2d_specs=self.dynamic_arrays_2d, zero_arrays=self.zero_arrays, arange_arrays=arange_arrays, synapses=synapses, clocks=self.clocks, static_array_specs=static_array_specs, networks=networks) writer.write('objects.*', arr_tmp) main_lines = [] procedures = [('', main_lines)] runfuncs = {} for func, args in self.main_queue: if func=='run_code_object': codeobj, = args main_lines.append('_run_%s();' % codeobj.name) elif func=='run_network': net, netcode = args main_lines.extend(netcode) elif func=='set_by_array': arrayname, staticarrayname = args code = ''' {pragma} for(int i=0; i<_num_{staticarrayname}; i++) {{ {arrayname}[i] = {staticarrayname}[i]; }} '''.format(arrayname=arrayname, staticarrayname=staticarrayname, pragma=openmp_pragma('static')) main_lines.extend(code.split('\n')) elif func=='set_by_single_value': arrayname, item, value = args code = '{arrayname}[{item}] = {value};'.format(arrayname=arrayname, item=item, value=value) main_lines.extend([code]) elif func=='set_array_by_array': arrayname, staticarrayname_index, staticarrayname_value = args code = ''' {pragma} for(int i=0; i<_num_{staticarrayname_index}; i++) {{ {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i]; }} '''.format(arrayname=arrayname, staticarrayname_index=staticarrayname_index, staticarrayname_value=staticarrayname_value, pragma=openmp_pragma('static')) main_lines.extend(code.split('\n')) elif func=='insert_code': main_lines.append(args) elif func=='start_run_func': name, include_in_parent = args if include_in_parent: main_lines.append('%s();' % name) main_lines = [] procedures.append((name, main_lines)) elif func=='end_run_func': name, include_in_parent = args name, main_lines = procedures.pop(-1) runfuncs[name] = main_lines name, main_lines = procedures[-1] else: raise NotImplementedError("Unknown main queue function type "+func) # generate the finalisations for codeobj in self.code_objects.itervalues(): if hasattr(codeobj.code, 'main_finalise'): main_lines.append(codeobj.code.main_finalise) # Generate data for non-constant values code_object_defs = defaultdict(list) for codeobj in self.code_objects.itervalues(): lines = [] for k, v in codeobj.variables.iteritems(): if isinstance(v, AttributeVariable): # We assume all attributes are implemented as property-like methods line = 'const {c_type} {varname} = {objname}.{attrname}();' lines.append(line.format(c_type=c_data_type(v.dtype), varname=k, objname=v.obj.name, attrname=v.attribute)) elif isinstance(v, ArrayVariable): try: if isinstance(v, DynamicArrayVariable): if v.dimensions == 1: dyn_array_name = self.dynamic_arrays[v] array_name = self.arrays[v] line = '{c_type}* const {array_name} = &{dyn_array_name}[0];' line = line.format(c_type=c_data_type(v.dtype), array_name=array_name, dyn_array_name=dyn_array_name) lines.append(line) line = 'const int _num{k} = {dyn_array_name}.size();' line = line.format(k=k, dyn_array_name=dyn_array_name) lines.append(line) else: lines.append('const int _num%s = %s;' % (k, v.size)) except TypeError: pass for line in lines: # Sometimes an array is referred to by to different keys in our # dictionary -- make sure to never add a line twice if not line in code_object_defs[codeobj.name]: code_object_defs[codeobj.name].append(line) # Generate the code objects for codeobj in self.code_objects.itervalues(): ns = codeobj.variables # TODO: fix these freeze/CONSTANTS hacks somehow - they work but not elegant. code = freeze(codeobj.code.cpp_file, ns) code = code.replace('%CONSTANTS%', '\n'.join(code_object_defs[codeobj.name])) code = '#include "objects.h"\n'+code writer.write('code_objects/'+codeobj.name+'.cpp', code) writer.write('code_objects/'+codeobj.name+'.h', codeobj.code.h_file) # The code_objects are passed in the right order to run them because they were # sorted by the Network object. To support multiple clocks we'll need to be # smarter about that. main_tmp = CPPStandaloneCodeObject.templater.main(None, None, main_lines=main_lines, code_objects=self.code_objects.values(), report_func=self.report_func, dt=float(defaultclock.dt), additional_headers=main_includes, ) writer.write('main.cpp', main_tmp) if compiler=='msvc': std_move = 'std::move' else: std_move = '' network_tmp = CPPStandaloneCodeObject.templater.network(None, None, std_move=std_move) writer.write('network.*', network_tmp) synapses_classes_tmp = CPPStandaloneCodeObject.templater.synapses_classes(None, None) writer.write('synapses_classes.*', synapses_classes_tmp) # Generate the run functions run_tmp = CPPStandaloneCodeObject.templater.run(None, None, run_funcs=runfuncs, code_objects=self.code_objects.values(), additional_headers=run_includes, ) writer.write('run.*', run_tmp) # Copy the brianlibdirectory brianlib_dir = os.path.join(os.path.split(inspect.getsourcefile(CPPStandaloneCodeObject))[0], 'brianlib') brianlib_files = copy_directory(brianlib_dir, os.path.join(directory, 'brianlib')) for file in brianlib_files: if file.lower().endswith('.cpp'): writer.source_files.append('brianlib/'+file) elif file.lower().endswith('.h'): writer.header_files.append('brianlib/'+file) # Copy the CSpikeQueue implementation shutil.copy2(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'cspikequeue.cpp'), os.path.join(directory, 'brianlib', 'spikequeue.h')) shutil.copy2(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'stdint_compat.h'), os.path.join(directory, 'brianlib', 'stdint_compat.h')) writer.source_files.extend(additional_source_files) writer.header_files.extend(additional_header_files) if compiler=='msvc': if native: arch_flag = '' try: from cpuinfo import cpuinfo res = cpuinfo.get_cpu_info() if 'sse' in res['flags']: arch_flag = '/arch:SSE' if 'sse2' in res['flags']: arch_flag = '/arch:SSE2' except ImportError: logger.warn('Native flag for MSVC compiler requires installation of the py-cpuinfo module') compiler_flags += ' '+arch_flag if nb_threads>1: openmp_flag = '/openmp' else: openmp_flag = '' # Generate the visual studio makefile source_bases = [fname.replace('.cpp', '').replace('/', '\\') for fname in writer.source_files] win_makefile_tmp = CPPStandaloneCodeObject.templater.win_makefile( None, None, source_bases=source_bases, compiler_flags=compiler_flags, openmp_flag=openmp_flag, ) writer.write('win_makefile', win_makefile_tmp) else: # Generate the makefile if os.name=='nt': rm_cmd = 'del *.o /s\n\tdel main.exe $(DEPS)' else: rm_cmd = 'rm $(OBJS) $(PROGRAM) $(DEPS)' makefile_tmp = CPPStandaloneCodeObject.templater.makefile(None, None, source_files=' '.join(writer.source_files), header_files=' '.join(writer.header_files), compiler_flags=compiler_flags, rm_cmd=rm_cmd) writer.write('makefile', makefile_tmp) # build the project if compile: with in_directory(directory): if compiler=='msvc': # TODO: handle debug if debug: logger.warn('Debug flag currently ignored for MSVC') vcvars_search_paths = [ # futureproofing! r'c:\Program Files\Microsoft Visual Studio 15.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 15.0\VC\vcvarsall.bat', r'c:\Program Files\Microsoft Visual Studio 14.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat', r'c:\Program Files\Microsoft Visual Studio 13.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 13.0\VC\vcvarsall.bat', r'c:\Program Files\Microsoft Visual Studio 12.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat', r'c:\Program Files\Microsoft Visual Studio 11.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 11.0\VC\vcvarsall.bat', r'c:\Program Files\Microsoft Visual Studio 10.0\VC\vcvarsall.bat', r'c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\vcvarsall.bat', ] vcvars_loc = prefs['codegen.cpp.msvc_vars_location'] if vcvars_loc=='': for fname in vcvars_search_paths: if os.path.exists(fname): vcvars_loc = fname break if vcvars_loc=='': raise IOError("Cannot find vcvarsall.bat on standard search path.") # TODO: copy vcvars and make replacements for 64 bit automatically arch_name = prefs['codegen.cpp.msvc_architecture'] if arch_name=='': mach = platform.machine() if mach=='AMD64': arch_name = 'x86_amd64' else: arch_name = 'x86' vcvars_cmd = '"{vcvars_loc}" {arch_name}'.format( vcvars_loc=vcvars_loc, arch_name=arch_name) make_cmd = 'nmake /f win_makefile' if os.path.exists('winmake.log'): os.remove('winmake.log') with std_silent(debug): if clean: os.system('%s >>winmake.log 2>&1 && %s clean >>winmake.log 2>&1' % (vcvars_cmd, make_cmd)) x = os.system('%s >>winmake.log 2>&1 && %s >>winmake.log 2>&1' % (vcvars_cmd, make_cmd)) if x!=0: raise RuntimeError("Project compilation failed") else: with std_silent(debug): if clean: os.system('make clean') if debug: x = os.system('make debug') elif native: x = os.system('make native') else: x = os.system('make') if x!=0: raise RuntimeError("Project compilation failed") if run: if not with_output: stdout = open(os.devnull, 'w') else: stdout = None if os.name=='nt': x = subprocess.call(['main'] + run_args, stdout=stdout) else: x = subprocess.call(['./main'] + run_args, stdout=stdout) if x: raise RuntimeError("Project run failed") self.has_been_run = True
def test_namespace_warnings(): G = NeuronGroup(1, '''x : 1 y : 1''', # unique names to get warnings every time: name='neurongroup_'+str(uuid.uuid4()).replace('-', '_')) # conflicting variable in namespace y = 5 with catch_logs() as l: G.x = 'y' assert len(l) == 1, 'got %s as warnings' % str(l) assert l[0][1].endswith('.resolution_conflict') del y # conflicting variables with special meaning i = 5 N = 3 with catch_logs() as l: G.x = 'i / N' assert len(l) == 2, 'got %s as warnings' % str(l) assert l[0][1].endswith('.resolution_conflict') assert l[1][1].endswith('.resolution_conflict') del i del N # conflicting variables in equations y = 5*Hz G = NeuronGroup(1, '''y : Hz dx/dt = y : 1''', # unique names to get warnings every time: name='neurongroup_'+str(uuid.uuid4()).replace('-', '_')) net = Network(G) with catch_logs() as l: net.run(0*ms) assert len(l) == 1, 'got %s as warnings' % str(l) assert l[0][1].endswith('.resolution_conflict') del y i = 5 # i is referring to the neuron number: G = NeuronGroup(1, '''dx/dt = i*Hz : 1''', # unique names to get warnings every time: name='neurongroup_'+str(uuid.uuid4()).replace('-', '_')) net = Network(G) with catch_logs() as l: net.run(0*ms) assert len(l) == 1, 'got %s as warnings' % str(l) assert l[0][1].endswith('.resolution_conflict') del i # Variables that are used internally but not in equations should not raise # a warning N = 3 i = 5 dt = 1*ms G = NeuronGroup(1, '''dx/dt = x/(10*ms) : 1''', # unique names to get warnings every time: name='neurongroup_'+str(uuid.uuid4()).replace('-', '_')) net = Network(G) with catch_logs() as l: net.run(0*ms) assert len(l) == 0, 'got %s as warnings' % str(l)
def build(self, project_dir='output', compile_project=True, run_project=False, debug=True, with_output=True): ensure_directory(project_dir) for d in ['code_objects', 'results', 'static_arrays']: ensure_directory(os.path.join(project_dir, d)) logger.debug("Writing C++ standalone project to directory "+os.path.normpath(project_dir)) # # Find numpy arrays in the namespaces and convert them into static # # arrays. Hopefully they are correctly used in the code: For example, # # this works for the namespaces for functions with C++ (e.g. TimedArray # # treats it as a C array) but does not work in places that are # # implicitly vectorized (state updaters, resets, etc.). But arrays # # shouldn't be used there anyway. for code_object in self.code_objects.itervalues(): for name, value in code_object.variables.iteritems(): if isinstance(value, numpy.ndarray): self.static_arrays[name] = value # write the static arrays logger.debug("static arrays: "+str(sorted(self.static_arrays.keys()))) static_array_specs = [] for name, arr in self.static_arrays.iteritems(): arr.tofile(os.path.join(project_dir, 'static_arrays', name)) static_array_specs.append((name, c_data_type(arr.dtype), arr.size, name)) # Write the global objects networks = [net() for net in Network.__instances__() if net().name!='_fake_network'] synapses = [S() for S in Synapses.__instances__()] arr_tmp = CPPStandaloneCodeObject.templater.objects(None, array_specs=self.arrays, dynamic_array_specs=self.dynamic_arrays, dynamic_array_2d_specs=self.dynamic_arrays_2d, zero_arrays=self.zero_arrays, arange_arrays=self.arange_arrays, synapses=synapses, clocks=self.clocks, static_array_specs=static_array_specs, networks=networks, ) logger.debug("objects: "+str(arr_tmp)) open(os.path.join(project_dir, 'objects.cpp'), 'w').write(arr_tmp.cpp_file) open(os.path.join(project_dir, 'objects.h'), 'w').write(arr_tmp.h_file) main_lines = [] for func, args in self.main_queue: if func=='run_code_object': codeobj, = args main_lines.append('_run_%s(t);' % codeobj.name) elif func=='run_network': net, netcode = args main_lines.extend(netcode) elif func=='set_by_array': arrayname, staticarrayname = args code = ''' for(int i=0; i<_num_{staticarrayname}; i++) {{ {arrayname}[i] = {staticarrayname}[i]; }} '''.format(arrayname=arrayname, staticarrayname=staticarrayname) main_lines.extend(code.split('\n')) elif func=='set_array_by_array': arrayname, staticarrayname_index, staticarrayname_value = args code = ''' for(int i=0; i<_num_{staticarrayname_index}; i++) {{ {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i]; }} '''.format(arrayname=arrayname, staticarrayname_index=staticarrayname_index, staticarrayname_value=staticarrayname_value) main_lines.extend(code.split('\n')) elif func=='insert_code': main_lines.append(args) else: raise NotImplementedError("Unknown main queue function type "+func) # generate the finalisations for codeobj in self.code_objects.itervalues(): if hasattr(codeobj.code, 'main_finalise'): main_lines.append(codeobj.code.main_finalise) # Generate data for non-constant values handled_arrays = defaultdict(set) code_object_defs = defaultdict(list) for codeobj in self.code_objects.itervalues(): for k, v in codeobj.variables.iteritems(): if k=='t': pass elif isinstance(v, Subexpression): pass elif isinstance(v, AttributeVariable): c_type = c_data_type(v.dtype) # TODO: Handle dt in the correct way if v.attribute == 'dt_': code = ('const {c_type} {k} = ' '{value};').format(c_type=c_type, k=k, value=v.get_value()) else: code = ('const {c_type} {k} = ' '{name}.{attribute};').format(c_type=c_type, k=k, name=v.obj.name, attribute=v.attribute) code_object_defs[codeobj.name].append(code) elif isinstance(v, ArrayVariable): try: if isinstance(v, DynamicArrayVariable): if v.dimensions == 1: dyn_array_name = self.dynamic_arrays[v] array_name = self.arrays[v] code_object_defs[codeobj.name].append('{c_type}* const {array_name} = &{dyn_array_name}[0];'.format(c_type=c_data_type(v.dtype), array_name=array_name, dyn_array_name=dyn_array_name)) code_object_defs[codeobj.name].append('const int _num{k} = {dyn_array_name}.size();'.format(k=k, dyn_array_name=dyn_array_name)) else: code_object_defs[codeobj.name].append('const int _num%s = %s;' % (k, v.size)) except TypeError: pass # Generate the code objects for codeobj in self.code_objects.itervalues(): ns = codeobj.variables # TODO: fix these freeze/CONSTANTS hacks somehow - they work but not elegant. code = freeze(codeobj.code.cpp_file, ns) code = code.replace('%CONSTANTS%', '\n'.join(code_object_defs[codeobj.name])) code = '#include "objects.h"\n'+code open(os.path.join(project_dir, 'code_objects', codeobj.name+'.cpp'), 'w').write(code) open(os.path.join(project_dir, 'code_objects', codeobj.name+'.h'), 'w').write(codeobj.code.h_file) # The code_objects are passed in the right order to run them because they were # sorted by the Network object. To support multiple clocks we'll need to be # smarter about that. main_tmp = CPPStandaloneCodeObject.templater.main(None, main_lines=main_lines, code_objects=self.code_objects.values(), dt=float(defaultclock.dt), ) logger.debug("main: "+str(main_tmp)) open(os.path.join(project_dir, 'main.cpp'), 'w').write(main_tmp) # Copy the brianlibdirectory brianlib_dir = os.path.join(os.path.split(inspect.getsourcefile(CPPStandaloneCodeObject))[0], 'brianlib') copy_directory(brianlib_dir, os.path.join(project_dir, 'brianlib')) # Copy the CSpikeQueue implementation shutil.copy(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'cspikequeue.cpp'), os.path.join(project_dir, 'brianlib', 'spikequeue.h')) # build the project if compile_project: with in_directory(project_dir): if debug: x = os.system('g++ -I. -g *.cpp code_objects/*.cpp brianlib/*.cpp -o main') else: x = os.system('g++ -I. -O3 -ffast-math -march=native *.cpp code_objects/*.cpp brianlib/*.cpp -o main') if x==0: if run_project: if not with_output: stdout = open(os.devnull, 'w') else: stdout = None if os.name=='nt': x = subprocess.call('main', stdout=stdout) else: x = subprocess.call('./main', stdout=stdout) if x: raise RuntimeError("Project run failed") else: raise RuntimeError("Project compilation failed")
def build(self, project_dir='output', compile_project=True, run_project=False, debug=True, with_output=True, native=True, additional_source_files=None, additional_header_files=None, main_includes=None, run_includes=None, run_args=None, ): ''' Build the project TODO: more details Parameters ---------- project_dir : str The output directory to write the project to, any existing files will be overwritten. compile_project : bool Whether or not to attempt to compile the project using GNU make. run_project : bool Whether or not to attempt to run the built project if it successfully builds. debug : bool Whether to compile in debug mode. with_output : bool Whether or not to show the ``stdout`` of the built program when run. native : bool Whether or not to compile natively using the ``--march=native`` gcc option. additional_source_files : list of str A list of additional ``.cpp`` files to include in the build. additional_header_files : list of str A list of additional ``.h`` files to include in the build. main_includes : list of str A list of additional header files to include in ``main.cpp``. run_includes : list of str A list of additional header files to include in ``run.cpp``. ''' if additional_source_files is None: additional_source_files = [] if additional_header_files is None: additional_header_files = [] if main_includes is None: main_includes = [] if run_includes is None: run_includes = [] if run_args is None: run_args = [] self.project_dir = project_dir ensure_directory(project_dir) for d in ['code_objects', 'results', 'static_arrays']: ensure_directory(os.path.join(project_dir, d)) writer = CPPWriter(project_dir) logger.debug("Writing C++ standalone project to directory "+os.path.normpath(project_dir)) arange_arrays = sorted([(var, start) for var, start in self.arange_arrays.iteritems()], key=lambda (var, start): var.name) # # Find np arrays in the namespaces and convert them into static # # arrays. Hopefully they are correctly used in the code: For example, # # this works for the namespaces for functions with C++ (e.g. TimedArray # # treats it as a C array) but does not work in places that are # # implicitly vectorized (state updaters, resets, etc.). But arrays # # shouldn't be used there anyway. for code_object in self.code_objects.itervalues(): for name, value in code_object.variables.iteritems(): if isinstance(value, np.ndarray): self.static_arrays[name] = value # write the static arrays logger.debug("static arrays: "+str(sorted(self.static_arrays.keys()))) static_array_specs = [] for name, arr in sorted(self.static_arrays.items()): arr.tofile(os.path.join(project_dir, 'static_arrays', name)) static_array_specs.append((name, c_data_type(arr.dtype), arr.size, name)) # Write the global objects networks = [net() for net in Network.__instances__() if net().name!='_fake_network'] synapses = [S() for S in Synapses.__instances__()] arr_tmp = CPPStandaloneCodeObject.templater.objects( None, None, array_specs=self.arrays, dynamic_array_specs=self.dynamic_arrays, dynamic_array_2d_specs=self.dynamic_arrays_2d, zero_arrays=self.zero_arrays, arange_arrays=arange_arrays, synapses=synapses, clocks=self.clocks, static_array_specs=static_array_specs, networks=networks, ) writer.write('objects.*', arr_tmp) main_lines = [] procedures = [('', main_lines)] runfuncs = {} for func, args in self.main_queue: if func=='run_code_object': codeobj, = args main_lines.append('_run_%s();' % codeobj.name) elif func=='run_network': net, netcode = args main_lines.extend(netcode) elif func=='set_by_array': arrayname, staticarrayname = args code = ''' for(int i=0; i<_num_{staticarrayname}; i++) {{ {arrayname}[i] = {staticarrayname}[i]; }} '''.format(arrayname=arrayname, staticarrayname=staticarrayname) main_lines.extend(code.split('\n')) elif func=='set_array_by_array': arrayname, staticarrayname_index, staticarrayname_value = args code = ''' for(int i=0; i<_num_{staticarrayname_index}; i++) {{ {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i]; }} '''.format(arrayname=arrayname, staticarrayname_index=staticarrayname_index, staticarrayname_value=staticarrayname_value) main_lines.extend(code.split('\n')) elif func=='insert_code': main_lines.append(args) elif func=='start_run_func': name, include_in_parent = args if include_in_parent: main_lines.append('%s();' % name) main_lines = [] procedures.append((name, main_lines)) elif func=='end_run_func': name, include_in_parent = args name, main_lines = procedures.pop(-1) runfuncs[name] = main_lines name, main_lines = procedures[-1] else: raise NotImplementedError("Unknown main queue function type "+func) # generate the finalisations for codeobj in self.code_objects.itervalues(): if hasattr(codeobj.code, 'main_finalise'): main_lines.append(codeobj.code.main_finalise) # Generate data for non-constant values code_object_defs = defaultdict(list) for codeobj in self.code_objects.itervalues(): lines = [] for k, v in codeobj.variables.iteritems(): if isinstance(v, AttributeVariable): # We assume all attributes are implemented as property-like methods line = 'const {c_type} {varname} = {objname}.{attrname}();' lines.append(line.format(c_type=c_data_type(v.dtype), varname=k, objname=v.obj.name, attrname=v.attribute)) elif isinstance(v, ArrayVariable): try: if isinstance(v, DynamicArrayVariable): if v.dimensions == 1: dyn_array_name = self.dynamic_arrays[v] array_name = self.arrays[v] line = '{c_type}* const {array_name} = &{dyn_array_name}[0];' line = line.format(c_type=c_data_type(v.dtype), array_name=array_name, dyn_array_name=dyn_array_name) lines.append(line) line = 'const int _num{k} = {dyn_array_name}.size();' line = line.format(k=k, dyn_array_name=dyn_array_name) lines.append(line) else: lines.append('const int _num%s = %s;' % (k, v.size)) except TypeError: pass for line in lines: # Sometimes an array is referred to by to different keys in our # dictionary -- make sure to never add a line twice if not line in code_object_defs[codeobj.name]: code_object_defs[codeobj.name].append(line) # Generate the code objects for codeobj in self.code_objects.itervalues(): ns = codeobj.variables # TODO: fix these freeze/CONSTANTS hacks somehow - they work but not elegant. code = freeze(codeobj.code.cpp_file, ns) code = code.replace('%CONSTANTS%', '\n'.join(code_object_defs[codeobj.name])) code = '#include "objects.h"\n'+code writer.write('code_objects/'+codeobj.name+'.cpp', code) writer.write('code_objects/'+codeobj.name+'.h', codeobj.code.h_file) # The code_objects are passed in the right order to run them because they were # sorted by the Network object. To support multiple clocks we'll need to be # smarter about that. main_tmp = CPPStandaloneCodeObject.templater.main(None, None, main_lines=main_lines, code_objects=self.code_objects.values(), report_func=self.report_func, dt=float(defaultclock.dt), additional_headers=main_includes, ) writer.write('main.cpp', main_tmp) # Generate the run functions run_tmp = CPPStandaloneCodeObject.templater.run(None, None, run_funcs=runfuncs, code_objects=self.code_objects.values(), additional_headers=run_includes, ) writer.write('run.*', run_tmp) # Copy the brianlibdirectory brianlib_dir = os.path.join(os.path.split(inspect.getsourcefile(CPPStandaloneCodeObject))[0], 'brianlib') brianlib_files = copy_directory(brianlib_dir, os.path.join(project_dir, 'brianlib')) for file in brianlib_files: if file.lower().endswith('.cpp'): writer.source_files.append('brianlib/'+file) elif file.lower().endswith('.h'): writer.header_files.append('brianlib/'+file) # Copy the CSpikeQueue implementation spikequeue_h = os.path.join(project_dir, 'brianlib', 'spikequeue.h') shutil.copy2(os.path.join(os.path.split(inspect.getsourcefile(Synapses))[0], 'cspikequeue.cpp'), spikequeue_h) #writer.header_files.append(spikequeue_h) writer.source_files.extend(additional_source_files) writer.header_files.extend(additional_header_files) # Generate the makefile if os.name=='nt': rm_cmd = 'del' else: rm_cmd = 'rm' makefile_tmp = CPPStandaloneCodeObject.templater.makefile(None, None, source_files=' '.join(writer.source_files), header_files=' '.join(writer.header_files), rm_cmd=rm_cmd) writer.write('makefile', makefile_tmp) # build the project if compile_project: with in_directory(project_dir): if debug: x = os.system('make debug') elif native: x = os.system('make native') else: x = os.system('make') if x==0: if run_project: if not with_output: stdout = open(os.devnull, 'w') else: stdout = None if os.name=='nt': x = subprocess.call(['main'] + run_args, stdout=stdout) else: x = subprocess.call(['./main'] + run_args, stdout=stdout) if x: raise RuntimeError("Project run failed") self.has_been_run = True else: raise RuntimeError("Project compilation failed")
def before_run(self, run_namespace=None, level=0): self._update_magic_objects(level=level+1) Network.before_run(self, run_namespace, level=level+1)
def build( self, project_dir='output', compile_project=True, run_project=False, debug=True, with_output=True, native=True, additional_source_files=None, additional_header_files=None, main_includes=None, run_includes=None, run_args=None, ): ''' Build the project TODO: more details Parameters ---------- project_dir : str The output directory to write the project to, any existing files will be overwritten. compile_project : bool Whether or not to attempt to compile the project using GNU make. run_project : bool Whether or not to attempt to run the built project if it successfully builds. debug : bool Whether to compile in debug mode. with_output : bool Whether or not to show the ``stdout`` of the built program when run. native : bool Whether or not to compile natively using the ``--march=native`` gcc option. additional_source_files : list of str A list of additional ``.cpp`` files to include in the build. additional_header_files : list of str A list of additional ``.h`` files to include in the build. main_includes : list of str A list of additional header files to include in ``main.cpp``. run_includes : list of str A list of additional header files to include in ``run.cpp``. ''' if additional_source_files is None: additional_source_files = [] if additional_header_files is None: additional_header_files = [] if main_includes is None: main_includes = [] if run_includes is None: run_includes = [] if run_args is None: run_args = [] self.project_dir = project_dir ensure_directory(project_dir) for d in ['code_objects', 'results', 'static_arrays']: ensure_directory(os.path.join(project_dir, d)) writer = CPPWriter(project_dir) logger.debug("Writing C++ standalone project to directory " + os.path.normpath(project_dir)) arange_arrays = sorted( [(var, start) for var, start in self.arange_arrays.iteritems()], key=lambda (var, start): var.name) # # Find np arrays in the namespaces and convert them into static # # arrays. Hopefully they are correctly used in the code: For example, # # this works for the namespaces for functions with C++ (e.g. TimedArray # # treats it as a C array) but does not work in places that are # # implicitly vectorized (state updaters, resets, etc.). But arrays # # shouldn't be used there anyway. for code_object in self.code_objects.itervalues(): for name, value in code_object.variables.iteritems(): if isinstance(value, np.ndarray): self.static_arrays[name] = value # write the static arrays logger.debug("static arrays: " + str(sorted(self.static_arrays.keys()))) static_array_specs = [] for name, arr in sorted(self.static_arrays.items()): arr.tofile(os.path.join(project_dir, 'static_arrays', name)) static_array_specs.append( (name, c_data_type(arr.dtype), arr.size, name)) # Write the global objects networks = [ net() for net in Network.__instances__() if net().name != '_fake_network' ] synapses = [S() for S in Synapses.__instances__()] arr_tmp = CPPStandaloneCodeObject.templater.objects( None, None, array_specs=self.arrays, dynamic_array_specs=self.dynamic_arrays, dynamic_array_2d_specs=self.dynamic_arrays_2d, zero_arrays=self.zero_arrays, arange_arrays=arange_arrays, synapses=synapses, clocks=self.clocks, static_array_specs=static_array_specs, networks=networks, ) writer.write('objects.*', arr_tmp) main_lines = [] procedures = [('', main_lines)] runfuncs = {} for func, args in self.main_queue: if func == 'run_code_object': codeobj, = args main_lines.append('_run_%s();' % codeobj.name) elif func == 'run_network': net, netcode = args main_lines.extend(netcode) elif func == 'set_by_array': arrayname, staticarrayname = args code = ''' for(int i=0; i<_num_{staticarrayname}; i++) {{ {arrayname}[i] = {staticarrayname}[i]; }} '''.format(arrayname=arrayname, staticarrayname=staticarrayname) main_lines.extend(code.split('\n')) elif func == 'set_array_by_array': arrayname, staticarrayname_index, staticarrayname_value = args code = ''' for(int i=0; i<_num_{staticarrayname_index}; i++) {{ {arrayname}[{staticarrayname_index}[i]] = {staticarrayname_value}[i]; }} '''.format(arrayname=arrayname, staticarrayname_index=staticarrayname_index, staticarrayname_value=staticarrayname_value) main_lines.extend(code.split('\n')) elif func == 'insert_code': main_lines.append(args) elif func == 'start_run_func': name, include_in_parent = args if include_in_parent: main_lines.append('%s();' % name) main_lines = [] procedures.append((name, main_lines)) elif func == 'end_run_func': name, include_in_parent = args name, main_lines = procedures.pop(-1) runfuncs[name] = main_lines name, main_lines = procedures[-1] else: raise NotImplementedError("Unknown main queue function type " + func) # generate the finalisations for codeobj in self.code_objects.itervalues(): if hasattr(codeobj.code, 'main_finalise'): main_lines.append(codeobj.code.main_finalise) # Generate data for non-constant values code_object_defs = defaultdict(list) for codeobj in self.code_objects.itervalues(): lines = [] for k, v in codeobj.variables.iteritems(): if isinstance(v, AttributeVariable): # We assume all attributes are implemented as property-like methods line = 'const {c_type} {varname} = {objname}.{attrname}();' lines.append( line.format(c_type=c_data_type(v.dtype), varname=k, objname=v.obj.name, attrname=v.attribute)) elif isinstance(v, ArrayVariable): try: if isinstance(v, DynamicArrayVariable): if v.dimensions == 1: dyn_array_name = self.dynamic_arrays[v] array_name = self.arrays[v] line = '{c_type}* const {array_name} = &{dyn_array_name}[0];' line = line.format( c_type=c_data_type(v.dtype), array_name=array_name, dyn_array_name=dyn_array_name) lines.append(line) line = 'const int _num{k} = {dyn_array_name}.size();' line = line.format( k=k, dyn_array_name=dyn_array_name) lines.append(line) else: lines.append('const int _num%s = %s;' % (k, v.size)) except TypeError: pass for line in lines: # Sometimes an array is referred to by to different keys in our # dictionary -- make sure to never add a line twice if not line in code_object_defs[codeobj.name]: code_object_defs[codeobj.name].append(line) # Generate the code objects for codeobj in self.code_objects.itervalues(): ns = codeobj.variables # TODO: fix these freeze/CONSTANTS hacks somehow - they work but not elegant. code = freeze(codeobj.code.cpp_file, ns) code = code.replace('%CONSTANTS%', '\n'.join(code_object_defs[codeobj.name])) code = '#include "objects.h"\n' + code writer.write('code_objects/' + codeobj.name + '.cpp', code) writer.write('code_objects/' + codeobj.name + '.h', codeobj.code.h_file) # The code_objects are passed in the right order to run them because they were # sorted by the Network object. To support multiple clocks we'll need to be # smarter about that. main_tmp = CPPStandaloneCodeObject.templater.main( None, None, main_lines=main_lines, code_objects=self.code_objects.values(), report_func=self.report_func, dt=float(defaultclock.dt), additional_headers=main_includes, ) writer.write('main.cpp', main_tmp) # Generate the run functions run_tmp = CPPStandaloneCodeObject.templater.run( None, None, run_funcs=runfuncs, code_objects=self.code_objects.values(), additional_headers=run_includes, ) writer.write('run.*', run_tmp) # Copy the brianlibdirectory brianlib_dir = os.path.join( os.path.split(inspect.getsourcefile(CPPStandaloneCodeObject))[0], 'brianlib') brianlib_files = copy_directory(brianlib_dir, os.path.join(project_dir, 'brianlib')) for file in brianlib_files: if file.lower().endswith('.cpp'): writer.source_files.append('brianlib/' + file) elif file.lower().endswith('.h'): writer.header_files.append('brianlib/' + file) # Copy the CSpikeQueue implementation spikequeue_h = os.path.join(project_dir, 'brianlib', 'spikequeue.h') shutil.copy2( os.path.join( os.path.split(inspect.getsourcefile(Synapses))[0], 'cspikequeue.cpp'), spikequeue_h) #writer.header_files.append(spikequeue_h) writer.source_files.extend(additional_source_files) writer.header_files.extend(additional_header_files) # Generate the makefile if os.name == 'nt': rm_cmd = 'del' else: rm_cmd = 'rm' makefile_tmp = CPPStandaloneCodeObject.templater.makefile( None, None, source_files=' '.join(writer.source_files), header_files=' '.join(writer.header_files), rm_cmd=rm_cmd) writer.write('makefile', makefile_tmp) # build the project if compile_project: with in_directory(project_dir): if debug: x = os.system('make debug') elif native: x = os.system('make native') else: x = os.system('make') if x == 0: if run_project: if not with_output: stdout = open(os.devnull, 'w') else: stdout = None if os.name == 'nt': x = subprocess.call(['main'] + run_args, stdout=stdout) else: x = subprocess.call(['./main'] + run_args, stdout=stdout) if x: raise RuntimeError("Project run failed") self.has_been_run = True else: raise RuntimeError("Project compilation failed")
inputParsName) if os.path.isdir(opDir): ch = input('Results already exist at {}. Delete?(y/n):'.format(opDir)) if ch == 'y': shutil.rmtree(opDir) os.makedirs(opDir) period265 = (1 / 265) inputPars = getattr(inputParsList, inputParsName) JO = JOSpikes265(nOutputs=1, simSettleTime=simSettleTime, **inputPars) dlint2.addExp2Synapses(name='JO', nSyn=1, sourceNG=JO.JOSGG, sourceInd=0, **getattr(synapsePropsList, NeuronSynapseProps)) net = Network() net.add(JO.JOSGG) dlint2.addToNetwork(net) defaultclock.dt = simStepSize totalSimDur = simDuration + simSettleTime net.run(totalSimDur, report='text') simT, memV = dlint2.getMemVTrace() spikeTimes = dlint2.getSpikes() fig, axs = plt.subplots(nrows=2, figsize=(10, 6.25), sharex='col') axs[0].plot(simT / units.ms, memV / units.mV) spikesY = memV.min() + 1.05 * (memV.max() - memV.min()) axs[0].plot(spikeTimes / units.ms, [spikesY / units.mV] * spikeTimes.shape[0], 'k^') axs[0].set_ylabel('DLInt1 \nmemV (mV)') axs[0].set_xlim([simSettleTime / units.ms - 50, totalSimDur / units.ms + 50])
def before_run(self, namespace): self._update_magic_objects() Network.before_run(self, namespace)
ch = input('Results already exist at {}. Delete?(y/n):'.format(OPNixFile)) if ch == 'y': os.remove(OPNixFile) if os.path.isfile(opFileDLInt1): os.remove(opFileDLInt1) if os.path.isfile(opFileDLInt2): os.remove(opFileDLInt2) else: sys.exit('User Abort!') elif not os.path.isdir(opDir): os.makedirs(opDir) inputPars = getattr(inputParsList, inputParsName) net = Network() JO = JOSpikes265(nOutputs=1, simSettleTime=simSettleTime, **inputPars) net.add(JO.JOSGG) DLInt1PropsDict = getattr(AdExpPars, DLInt1ModelProps) dlint1 = VSNeuron(**AdExp, inits=DLInt1PropsDict, name='dlint1') dlint1.recordSpikes() dlint1.recordMembraneV() if DLInt1SynapsePropsE: synPropsEDLInt1 = getattr(synapsePropsList, DLInt1SynapsePropsE) dlint1.addSynapse(synName="ExiJO", sourceNG=JO.JOSGG, **exp2Syn, synParsInits=synPropsEDLInt1, synStateInits=exp2SynStateInits,