def autogenerate(cls): flag_list = [ { 'fastmath': False, 'error_model': 'numpy', 'name': 'usecase' }, { 'fastmath': True, 'error_model': 'numpy', 'name': 'fastmath_usecase' }, ] # main loop covering all the modes and use-cases for dtype in ( 'complex64', 'float64', 'float32', 'int32', ): for vlen in vlen2cpu: for flags in flag_list: for mode in "scalar", "range", "prange", "numpy": cls._inject_test(dtype, mode, vlen, dict(flags)) # mark important for n in ( "test_int32_range4_usecase", # issue #3016 ): setattr(cls, n, tag("important")(getattr(cls, n)))
def autogenerate(cls): test_flags = [ 'fastmath', ] # TODO: add 'auto_parallel' ? # generate all the combinations of the flags test_flags = sum([list(combinations(test_flags, x)) for x in range( \ len(test_flags)+1)], []) flag_list = [] # create Flag class instances for ft in test_flags: flags = Flags() flags.set('nrt') flags.set('error_model', 'numpy') flags.__name__ = '_'.join(ft + ('usecase', )) for f in ft: flags.set(f, {'fastmath': cpu.FastMathOptions(True)}.get(f, True)) flag_list.append(flags) # main loop covering all the modes and use-cases for dtype in ( 'complex64', 'float64', 'float32', 'int32', ): for vlen in vlen2cpu: for flags in flag_list: for mode in "scalar", "range", "prange", "numpy": cls._inject_test(dtype, mode, vlen, flags) # mark important for n in ( "test_int32_range4_usecase", # issue #3016 ): setattr(cls, n, tag("important")(getattr(cls, n)))
def generate_binop_tests(ns, usecases, tp_runners, npm_array=False): for usecase in usecases: for tp_name, runner_name in tp_runners.items(): for nopython in (False, True): test_name = "test_%s_%s" % (usecase, tp_name) if nopython: test_name += "_npm" flags = Noflags if nopython else force_pyobj_flags usecase_name = "%s_usecase" % usecase def inner(self, runner_name=runner_name, usecase_name=usecase_name, flags=flags): runner = getattr(self, runner_name) op_usecase = getattr(self.op, usecase_name) runner(op_usecase, flags) if nopython and 'array' in tp_name and not npm_array: def test_meth(self): with self.assertTypingError(): inner() else: test_meth = inner test_meth.__name__ = test_name if nopython: test_meth = tag('important')(test_meth) ns[test_name] = test_meth
def _inject(cls, p, name, backend, backend_guard): themod = cls.__module__ thecls = TestParallelBackend.__name__ methname = "test_" + p + '_' + name injected_method = '%s.%s.%s' % (themod, thecls, methname) def test_template(self): o, e = self.run_test_in_separate_process(injected_method, backend) if self._DEBUG: print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e)) self.assertIn('OK', e) self.assertTrue('FAIL' not in e) self.assertTrue('ERROR' not in e) injected_test = "test_%s_%s_%s" % (p, name, backend) # Mark as long_running setattr(cls, injected_test, tag('long_running')(backend_guard(test_template)))
def _inject(cls, backend, backend_guard): def test_template(self): body = """if 1: X = np.arange(1000000.) Y = np.arange(1000000.) Z = busy_func(X, Y) assert numba.threading_layer() == '%s' """ runme = self.template % (body % backend) cmdline = [sys.executable, "-c", runme] env = os.environ.copy() env["NUMBA_THREADING_LAYER"] = str(backend) out, err = self.run_cmd(cmdline, env=env) if self._DEBUG: print(out, err) injected_test = "test_threading_layer_selector_%s" % backend setattr(cls, injected_test, tag("important")(backend_guard(test_template)))
def _inject(cls, name, backend, backend_guard, num_threads): themod = cls.__module__ thecls = cls._class.__name__ injected_method = '%s.%s.%s' % (themod, thecls, name) def test_template(self): o, e = self.run_test_in_separate_process(injected_method, backend, num_threads) if self._DEBUG: print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e)) self.assertIn('OK', e) self.assertTrue('FAIL' not in e) self.assertTrue('ERROR' not in e) m = re.search(r"\.\.\. skipped '(.*?)'", e) if m: self.skipTest(m.group(1)) injected_test = "%s_%s_%s_threads" % (name[1:], backend, num_threads) setattr(cls, injected_test, tag('long_running')(backend_guard(test_template)))
_is_arm = _arch_name in {'aarch64', 'armv7l'} not_arm = unittest.skipIf(_is_arm, "testing disabled on ARM") _gdb_cond = os.environ.get('GDB_TEST', None) == '1' needs_gdb_harness = unittest.skipUnless(_gdb_cond, "needs gdb harness") # check if gdb is present and working try: _confirm_gdb() _HAVE_GDB = True except Exception: _HAVE_GDB = False _msg = "functioning gdb with correct ptrace permissions is required" needs_gdb = unittest.skipUnless(_HAVE_GDB, _msg) long_running = tag('long_running') _dbg_njit = njit(debug=True) _dbg_jit = jit(forceobj=True, debug=True) def impl_gdb_call(a): gdb('-ex', 'set confirm off', '-ex', 'c', '-ex', 'q') b = a + 1 c = a * 2.34 d = (a, b, c) print(a, b, c, d) def impl_gdb_call_w_bp(a): gdb_init('-ex', 'set confirm off', '-ex', 'c', '-ex', 'q')
_is_arm = _arch_name in {"aarch64", "armv7l"} not_arm = unittest.skipIf(_is_arm, "testing disabled on ARM") _gdb_cond = os.environ.get("GDB_TEST", None) == "1" needs_gdb_harness = unittest.skipUnless(_gdb_cond, "needs gdb harness") # check if gdb is present and working try: _confirm_gdb() _HAVE_GDB = True except Exception: _HAVE_GDB = False _msg = "functioning gdb with correct ptrace permissions is required" needs_gdb = unittest.skipUnless(_HAVE_GDB, _msg) long_running = tag("long_running") _dbg_njit = njit(debug=True) _dbg_jit = jit(forceobj=True, debug=True) def impl_gdb_call(a): gdb("-ex", "set confirm off", "-ex", "c", "-ex", "q") b = a + 1 c = a * 2.34 d = (a, b, c) print(a, b, c, d) def impl_gdb_call_w_bp(a): gdb_init("-ex", "set confirm off", "-ex", "c", "-ex", "q")