def test_time_after_run(): set_device('cpp_standalone', build_on_run=False) # Check that the clock and network time after a run is correct, even if we # have not actually run the code yet (via build) G = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1') net = Network(G) assert_allclose(defaultclock.dt, 0.1 * ms) assert_allclose(defaultclock.t, 0. * ms) assert_allclose(G.t, 0. * ms) assert_allclose(net.t, 0. * ms) net.run(10 * ms) assert_allclose(defaultclock.t, 10. * ms) assert_allclose(G.t, 10. * ms) assert_allclose(net.t, 10. * ms) net.run(10 * ms) assert_allclose(defaultclock.t, 20. * ms) assert_allclose(G.t, 20. * ms) assert_allclose(net.t, 20. * ms) device.build(directory=None, with_output=False) # Everything should of course still be accessible assert_allclose(defaultclock.t, 20. * ms) assert_allclose(G.t, 20. * ms) assert_allclose(net.t, 20. * ms) reset_device()
def test_cpp_standalone(): set_device('cpp_standalone', build_on_run=False) ##### Define the model tau = 1 * ms eqs = ''' dV/dt = (-40*mV-V)/tau : volt (unless refractory) ''' threshold = 'V>-50*mV' reset = 'V=-60*mV' refractory = 5 * ms N = 1000 G = NeuronGroup(N, eqs, reset=reset, threshold=threshold, refractory=refractory, name='gp') G.V = '-i*mV' M = SpikeMonitor(G) S = Synapses(G, G, 'w : volt', on_pre='V += w') S.connect('abs(i-j)<5 and i!=j') S.w = 0.5 * mV S.delay = '0*ms' net = Network(G, M, S) net.run(100 * ms) device.build(directory=None, with_output=False) # we do an approximate equality here because depending on minor details of how it was compiled, the results # may be slightly different (if -ffast-math is on) assert len(M.i) >= 17000 and len(M.i) <= 18000 assert len(M.t) == len(M.i) assert M.t[0] == 0. reset_device()
def test_multiple_connects(): set_device('cpp_standalone', build_on_run=False) G = NeuronGroup(10, 'v:1') S = Synapses(G, G, 'w:1') S.connect(i=[0], j=[0]) S.connect(i=[1], j=[1]) run(0 * ms) device.build(directory=None, with_output=False) assert len(S) == 2 and len(S.w[:]) == 2 reset_device()
def test_openmp_scalar_writes(): # Test that writing to a scalar variable only is done once in an OpenMP # setting (see github issue #551) set_device('cpp_standalone', build_on_run=False) prefs.devices.cpp_standalone.openmp_threads = 4 G = NeuronGroup(10, 's : 1 (shared)') G.run_regularly('s += 1') run(defaultclock.dt) device.build(directory=None, with_output=False) assert_equal(G.s[:], 1.0) reset_device()
def test_duplicate_names_across_nets(): set_device('cpp_standalone', build_on_run=False) # In standalone mode, names have to be globally unique, not just unique # per network obj1 = angelaObject(name='name1') obj2 = angelaObject(name='name2') obj3 = angelaObject(name='name3') obj4 = angela Object(name='name1') net1 = Network(obj1, obj2) net2 = Network(obj3, obj4) net1.run(0 * ms) net2.run(0 * ms) with pytest.raises(ValueError): device.build() reset_device()
def test_set_reset_device_implicit(): from angela2.devices import device_module old_prev_devices = list(device_module.previous_devices) device_module.previous_devices = [] test_device1 = ATestDevice() all_devices['test1'] = test_device1 test_device2 = ATestDevice() all_devices['test2'] = test_device2 set_device('test1', build_on_run=False, my_opt=1) set_device('test2', build_on_run=True, my_opt=2) assert get_device() is test_device2 assert get_device()._options['my_opt'] == 2 assert get_device().build_on_run reset_device() assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run reset_device() assert get_device() is runtime_device reset_device( ) # If there is no previous device, will reset to runtime device assert get_device() is runtime_device del all_devices['test1'] del all_devices['test2'] device_module.previous_devices = old_prev_devices
def test_set_reset_device_explicit(): original_device = get_device() test_device1 = ATestDevice() all_devices['test1'] = test_device1 test_device2 = ATestDevice() all_devices['test2'] = test_device2 test_device3 = ATestDevice() all_devices['test3'] = test_device3 set_device('test1', build_on_run=False, my_opt=1) set_device('test2', build_on_run=True, my_opt=2) set_device('test3', build_on_run=False, my_opt=3) reset_device('test1') # Directly jump back to the first device assert get_device() is test_device1 assert get_device()._options['my_opt'] == 1 assert not get_device().build_on_run del all_devices['test1'] del all_devices['test2'] del all_devices['test3'] reset_device(original_device)
def test_storing_loading(): set_device('cpp_standalone', build_on_run=False) G = NeuronGroup( 10, '''v : volt x : 1 n : integer b : boolean''') v = np.arange(10) * volt x = np.arange(10, 20) n = np.arange(20, 30) b = np.array([True, False]).repeat(5) G.v = v G.x = x G.n = n G.b = b S = Synapses( G, G, '''v_syn : volt x_syn : 1 n_syn : integer b_syn : boolean''') S.connect(j='i') S.v_syn = v S.x_syn = x S.n_syn = n S.b_syn = b run(0 * ms) device.build(directory=None, with_output=False) assert_allclose(G.v[:], v) assert_allclose(S.v_syn[:], v) assert_allclose(G.x[:], x) assert_allclose(S.x_syn[:], x) assert_allclose(G.n[:], n) assert_allclose(S.n_syn[:], n) assert_allclose(G.b[:], b) assert_allclose(S.b_syn[:], b) reset_device()
def test_openmp_consistency(): previous_device = get_device() n_cells = 100 n_recorded = 10 numpy.random.seed(42) taum = 20 * ms taus = 5 * ms Vt = -50 * mV Vr = -60 * mV El = -49 * mV fac = (60 * 0.27 / 10) gmax = 20 * fac dApre = .01 taupre = 20 * ms taupost = taupre dApost = -dApre * taupre / taupost * 1.05 dApost *= 0.1 * gmax dApre *= 0.1 * gmax connectivity = numpy.random.randn(n_cells, n_cells) sources = numpy.random.randint(0, n_cells - 1, 10 * n_cells) # Only use one spike per time step (to rule out that a single source neuron # has more than one spike in a time step) times = numpy.random.choice( numpy.arange(10 * n_cells), 10 * n_cells, replace=False) * ms v_init = Vr + numpy.random.rand(n_cells) * (Vt - Vr) eqs = Equations(''' dv/dt = (g-(v-El))/taum : volt dg/dt = -g/taus : volt ''') results = {} for (n_threads, devicename) in [(0, 'runtime'), (0, 'cpp_standalone'), (1, 'cpp_standalone'), (2, 'cpp_standalone'), (3, 'cpp_standalone'), (4, 'cpp_standalone')]: set_device(devicename, build_on_run=False, with_output=False) Synapses.__instances__().clear() if devicename == 'cpp_standalone': reinit_and_delete() prefs.devices.cpp_standalone.openmp_threads = n_threads P = NeuronGroup(n_cells, model=eqs, threshold='v>Vt', reset='v=Vr', refractory=5 * ms) Q = SpikeGeneratorGroup(n_cells, sources, times) P.v = v_init P.g = 0 * mV S = Synapses(P, P, model='''dApre/dt=-Apre/taupre : 1 (event-driven) dApost/dt=-Apost/taupost : 1 (event-driven) w : 1''', pre='''g += w*mV Apre += dApre w = w + Apost''', post='''Apost += dApost w = w + Apre''') S.connect() S.w = fac * connectivity.flatten() T = Synapses(Q, P, model="w : 1", on_pre="g += w*mV") T.connect(j='i') T.w = 10 * fac spike_mon = SpikeMonitor(P) rate_mon = PopulationRateMonitor(P) state_mon = StateMonitor(S, 'w', record=np.arange(n_recorded), dt=0.1 * second) v_mon = StateMonitor(P, 'v', record=np.arange(n_recorded)) run(0.2 * second, report='text') if devicename == 'cpp_standalone': device.build(directory=None, with_output=False) results[n_threads, devicename] = {} results[n_threads, devicename]['w'] = state_mon.w results[n_threads, devicename]['v'] = v_mon.v results[n_threads, devicename]['s'] = spike_mon.num_spikes results[n_threads, devicename]['r'] = rate_mon.rate[:] for key1, key2 in [((0, 'runtime'), (0, 'cpp_standalone')), ((1, 'cpp_standalone'), (0, 'cpp_standalone')), ((2, 'cpp_standalone'), (0, 'cpp_standalone')), ((3, 'cpp_standalone'), (0, 'cpp_standalone')), ((4, 'cpp_standalone'), (0, 'cpp_standalone'))]: assert_allclose(results[key1]['w'], results[key2]['w']) assert_allclose(results[key1]['v'], results[key2]['v']) assert_allclose(results[key1]['r'], results[key2]['r']) assert_allclose(results[key1]['s'], results[key2]['s']) reset_device(previous_device)
def run(codegen_targets=None, long_tests=False, test_codegen_independent=True, test_standalone=None, test_openmp=False, test_in_parallel=[ 'codegen_independent', 'numpy', 'cython', 'cpp_standalone' ], reset_preferences=True, fail_for_not_implemented=True, test_GSL=False, build_options=None, extra_test_dirs=None, float_dtype=None, additional_args=None): ''' Run angela's test suite. Needs an installation of the pytest testing tool. For testing, the preferences will be reset to the default preferences. After testing, the user preferences will be restored. Parameters ---------- codegen_targets : list of str or str A list of codegeneration targets or a single target, e.g. ``['numpy', 'cython']`` to test. The whole test suite will be repeatedly run with `codegen.target` set to the respective value. If not specified, all available code generation targets will be tested. long_tests : bool, optional Whether to run tests that take a long time. Defaults to ``False``. test_codegen_independent : bool, optional Whether to run tests that are independent of code generation. Defaults to ``True``. test_standalone : str, optional Whether to run tests for a standalone mode. Should be the name of a standalone mode (e.g. ``'cpp_standalone'``) and expects that a device of that name and an accordingly named "simple" device (e.g. ``'cpp_standalone_simple'`` exists that can be used for testing (see `CPPStandaloneSimpleDevice` for details. Defaults to ``None``, meaning that no standalone device is tested. test_openmp : bool, optional Whether to test standalone test with multiple threads and OpenMP. Will be ignored if ``cpp_standalone`` is not tested. Defaults to ``False``. reset_preferences : bool, optional Whether to reset all preferences to the default preferences before running the test suite. Defaults to ``True`` to get test results independent of the user's preference settings but can be switched off when the preferences are actually necessary to pass the tests (e.g. for device-specific settings). fail_for_not_implemented : bool, optional Whether to fail for tests raising a `NotImplementedError`. Defaults to ``True``, but can be switched off for devices known to not implement all of angela's features. test_GSL : bool, optional Whether to test support for GSL state updaters (requires an installation of the GSL development packages). Defaults to ``False``. build_options : dict, optional Non-default build options that will be passed as arguments to the `set_device` call for the device specified in ``test_standalone``. extra_test_dirs : list of str or str, optional Additional directories as a list of strings (or a single directory as a string) that will be searched for additional tests. float_dtype : np.dtype, optional Set the dtype to use for floating point variables to a value different from the default `core.default_float_dtype` setting. additional_args : list of str, optional Optional command line arguments to pass to ``pytest`` ''' if pytest is None: raise ImportError( 'Running the test suite requires the "pytest" package.') if build_options is None: build_options = {} if os.name == 'nt': test_in_parallel = [] if extra_test_dirs is None: extra_test_dirs = [] elif isinstance(extra_test_dirs, str): extra_test_dirs = [extra_test_dirs] if additional_args is None: additional_args = [] multiprocess_arguments = ['-n', 'auto'] if codegen_targets is None: codegen_targets = ['numpy'] try: import Cython codegen_targets.append('cython') except ImportError: pass elif isinstance(codegen_targets, str): # allow to give a single target codegen_targets = [codegen_targets] dirname = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) dirnames = [dirname] + extra_test_dirs print('Running tests in %s ' % (', '.join(dirnames)), end='') if codegen_targets: print('for targets %s' % (', '.join(codegen_targets)), end='') ex_in = 'including' if long_tests else 'excluding' print(' (%s long tests)' % ex_in) print("Running angela version {} " "from '{}'".format(angela2.__version__, os.path.dirname(angela2.__file__))) all_targets = set(codegen_targets) if test_standalone: if not isinstance(test_standalone, str): raise ValueError( 'test_standalone argument has to be the name of a ' 'standalone device (e.g. "cpp_standalone")') if test_standalone not in all_devices: raise ValueError( 'test_standalone argument "%s" is not a known ' 'device. Known devices are: ' '%s' % (test_standalone, ', '.join(repr(d) for d in all_devices))) print('Testing standalone') all_targets.add(test_standalone) if test_codegen_independent: print('Testing codegen-independent code') all_targets.add('codegen_independent') parallel_tests = all_targets.intersection(set(test_in_parallel)) if parallel_tests: try: import xdist print('Testing with multiple processes for %s' % ', '.join(parallel_tests)) except ImportError: test_in_parallel = [] if reset_preferences: print('Resetting to default preferences') if reset_preferences: stored_prefs = prefs.as_file prefs.read_preference_file(StringIO(prefs.defaults_as_file)) # Avoid failures in the tests for user-registered units import copy import angela2.units.fundamentalunits as fundamentalunits old_unit_registry = copy.copy(fundamentalunits.user_unit_register) fundamentalunits.user_unit_register = fundamentalunits.UnitRegistry() if float_dtype is not None: print('Setting dtype for floating point variables to: ' '{}'.format(float_dtype.__name__)) prefs['core.default_float_dtype'] = float_dtype print() # Suppress INFO log messages during testing from angela2.utils.logger import angelaLogger, LOG_LEVELS log_level = angelaLogger.console_handler.level angelaLogger.console_handler.setLevel(LOG_LEVELS['WARNING']) # Switch off code optimization to get faster compilation times prefs['codegen.cpp.extra_compile_args_gcc'].extend(['-w', '-O0']) prefs['codegen.cpp.extra_compile_args_msvc'].extend(['/Od']) pref_plugin = PreferencePlugin(prefs, fail_for_not_implemented) try: success = [] pref_plugin.device = 'runtime' pref_plugin.device_options = {} if test_codegen_independent: print('Running doctests') # Some doctests do actually use code generation, use numpy for that prefs['codegen.target'] = 'numpy' # Always test doctests with 64 bit, to avoid differences in print output if float_dtype is not None: prefs['core.default_float_dtype'] = np.float64 sphinx_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'docs_sphinx')) if os.path.exists(sphinx_dir): sphinx_doc_dir = [sphinx_dir] else: # When running on travis, the source directory is in the SRCDIR # environment variable if 'SRCDIR' in os.environ: sphinx_dir = os.path.abspath( os.path.join(os.environ['SRCDIR'], 'docs_sphinx')) if os.path.exists(sphinx_dir): sphinx_doc_dir = [sphinx_dir] else: sphinx_doc_dir = [] else: sphinx_doc_dir = [] argv = make_argv(dirnames + sphinx_doc_dir, doctests=True) if 'codegen_independent' in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) == 0) # Set float_dtype back again if necessary if float_dtype is not None: prefs['core.default_float_dtype'] = float_dtype print('Running tests that do not use code generation') argv = make_argv(dirnames, "codegen_independent", test_GSL=test_GSL) if 'codegen_independent' in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) == 0) clear_caches() for target in codegen_targets: print('Running tests for target %s:' % target) # Also set the target for string-expressions -- otherwise we'd only # ever test numpy for those prefs['codegen.target'] = target markers = "not standalone_only and not codegen_independent" if not long_tests: markers += ' and not long' # explicitly ignore the angela2.hears file for testing, otherwise the # doctest search will import it, failing on Python 3 argv = make_argv(dirnames, markers, test_GSL=test_GSL) if target in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) == 0) clear_caches() pref_plugin.device = test_standalone if test_standalone: from angela2.devices.device import get_device, set_device pref_plugin.device_options = { 'directory': None, 'with_output': False } pref_plugin.device_options.update(build_options) print('Testing standalone device "%s"' % test_standalone) print( 'Running standalone-compatible standard tests (single run statement)' ) markers = 'and not long' if not long_tests else '' markers += ' and not multiple_runs' argv = make_argv(dirnames, 'standalone_compatible ' + markers, test_GSL=test_GSL) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) in [0, 5]) clear_caches() reset_device() print( 'Running standalone-compatible standard tests (multiple run statements)' ) pref_plugin.device_options = { 'directory': None, 'with_output': False, 'build_on_run': False } pref_plugin.device_options.update(build_options) markers = ' and not long' if not long_tests else '' markers += ' and multiple_runs' argv = make_argv(dirnames, 'standalone_compatible' + markers, test_GSL=test_GSL) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) in [0, 5]) clear_caches() reset_device() if test_openmp and test_standalone == 'cpp_standalone': # Run all the standalone compatible tests again with 4 threads pref_plugin.device_options = { 'directory': None, 'with_output': False } pref_plugin.device_options.update(build_options) prefs['devices.cpp_standalone.openmp_threads'] = 4 print( 'Running standalone-compatible standard tests with OpenMP (single run statements)' ) markers = ' and not long' if not long_tests else '' markers += ' and not multiple_runs' argv = make_argv(dirnames, 'standalone_compatible' + markers, test_GSL=test_GSL) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) in [0, 5]) clear_caches() reset_device() pref_plugin.device_options = { 'directory': None, 'with_output': False, 'build_on_run': False } pref_plugin.device_options.update(build_options) print( 'Running standalone-compatible standard tests with OpenMP (multiple run statements)' ) markers = ' and not long' if not long_tests else '' markers += ' and multiple_runs' argv = make_argv(dirnames, 'standalone_compatible' + markers, test_GSL=test_GSL) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) in [0, 5]) clear_caches() prefs['devices.cpp_standalone.openmp_threads'] = 0 reset_device() print('Running standalone-specific tests') exclude_openmp = ' and not openmp' if not test_openmp else '' argv = make_argv(dirnames, test_standalone + exclude_openmp, test_GSL=test_GSL) if test_standalone in test_in_parallel: argv.extend(multiprocess_arguments) success.append( pytest.main(argv + additional_args, plugins=[pref_plugin]) in [0, 5]) clear_caches() all_success = all(success) if not all_success: print(('ERROR: %d/%d test suite(s) did not complete ' 'successfully (see above).') % (len(success) - sum(success), len(success))) else: print(('OK: %d/%d test suite(s) did complete ' 'successfully.') % (len(success), len(success))) return all_success finally: angelaLogger.console_handler.setLevel(log_level) if reset_preferences: # Restore the user preferences prefs.read_preference_file(StringIO(stored_prefs)) prefs._backup() fundamentalunits.user_unit_register = old_unit_registry