def test_error_model(self): """ Caching must not mix up different error models. """ def inv(x): return 1.0 / x inv_sig = typing.signature(types.float64, types.float64) def compile_inv(context): return context.compile_subroutine(builder, inv, inv_sig) with self._context_builder_sig_args() as ( context, builder, sig, args, ): py_error_model = callconv.create_error_model('python', context) np_error_model = callconv.create_error_model('numpy', context) py_context1 = context.subtarget(error_model=py_error_model) py_context2 = context.subtarget(error_model=py_error_model) np_context = context.subtarget(error_model=np_error_model) # Note the parent context's cache is shared by subtargets self.assertEqual(0, len(context.cached_internal_func)) # Compiling with the same error model reuses the same cache slot compile_inv(py_context1) self.assertEqual(1, len(context.cached_internal_func)) compile_inv(py_context2) self.assertEqual(1, len(context.cached_internal_func)) # Compiling with another error model creates a new cache slot compile_inv(np_context) self.assertEqual(2, len(context.cached_internal_func))
def force_error_model(context, model_name='numpy'): """ Temporarily change the context's error model. """ from numba.core import callconv old_error_model = context.error_model context.error_model = callconv.create_error_model(model_name, context) try: yield finally: context.error_model = old_error_model
def _make_subtarget(targetctx, flags): """ Make a new target context from the given target context and flags. """ subtargetoptions = {} if flags.debuginfo: subtargetoptions['enable_debuginfo'] = True if flags.boundscheck: subtargetoptions['enable_boundscheck'] = True if flags.nrt: subtargetoptions['enable_nrt'] = True if flags.auto_parallel: subtargetoptions['auto_parallel'] = flags.auto_parallel if flags.fastmath: subtargetoptions['fastmath'] = flags.fastmath error_model = callconv.create_error_model(flags.error_model, targetctx) subtargetoptions['error_model'] = error_model return targetctx.subtarget(**subtargetoptions)