def test_map(self, flags=enable_pyobj_flags): pyfunc = map_usecase cr = compile_isolated( pyfunc, (types.Dummy('list'), types.Dummy('function_ptr')), flags=flags) cfunc = cr.entry_point map_func = lambda x: x * 2 x = [0, 1, 2, 3, 4] self.assertSequenceEqual(list(cfunc(x, map_func)), list(pyfunc(x, map_func)))
def test_filter(self, flags=enable_pyobj_flags): pyfunc = filter_usecase cr = compile_isolated( pyfunc, (types.Dummy("list"), types.Dummy("function_ptr")), flags=flags) cfunc = cr.entry_point filter_func = lambda x: x % 2 x = [0, 1, 2, 3, 4] self.assertSequenceEqual(list(cfunc(x, filter_func)), list(pyfunc(x, filter_func)))
def test_cache_trimming(self): # Test that the cache doesn't grow in size when types are # created and disposed of. cache = _typecache gc.collect() # Keep strong references to existing types, to avoid spurious failures existing_types = [wr() for wr in cache] # noqa: F841 cache_len = len(cache) a = types.Dummy('xyzzyx') b = types.Dummy('foox') self.assertEqual(len(cache), cache_len + 2) del a, b gc.collect() self.assertEqual(len(cache), cache_len)
def test_type_casting_rules(self): tm = TypeManager() tcr = TypeCastingRules(tm) i16 = types.int16 i32 = types.int32 i64 = types.int64 f64 = types.float64 f32 = types.float32 f16 = types.float16 made_up = types.Dummy("made_up") tcr.promote_unsafe(i32, i64) tcr.safe_unsafe(i32, f64) tcr.promote_unsafe(f32, f64) tcr.promote_unsafe(f16, f32) tcr.unsafe_unsafe(i16, f16) def base_test(): # As declared self.assertEqual(tm.check_compatible(i32, i64), Conversion.promote) self.assertEqual(tm.check_compatible(i32, f64), Conversion.safe) self.assertEqual(tm.check_compatible(f16, f32), Conversion.promote) self.assertEqual(tm.check_compatible(f32, f64), Conversion.promote) self.assertEqual(tm.check_compatible(i64, i32), Conversion.unsafe) self.assertEqual(tm.check_compatible(f64, i32), Conversion.unsafe) self.assertEqual(tm.check_compatible(f64, f32), Conversion.unsafe) # Propagated self.assertEqual(tm.check_compatible(i64, f64), Conversion.unsafe) self.assertEqual(tm.check_compatible(f64, i64), Conversion.unsafe) self.assertEqual(tm.check_compatible(i64, f32), Conversion.unsafe) self.assertEqual(tm.check_compatible(i32, f32), Conversion.unsafe) self.assertEqual(tm.check_compatible(f32, i32), Conversion.unsafe) self.assertEqual(tm.check_compatible(i16, f16), Conversion.unsafe) self.assertEqual(tm.check_compatible(f16, i16), Conversion.unsafe) # Test base graph base_test() self.assertIsNone(tm.check_compatible(i64, made_up)) self.assertIsNone(tm.check_compatible(i32, made_up)) self.assertIsNone(tm.check_compatible(f32, made_up)) self.assertIsNone(tm.check_compatible(made_up, f64)) self.assertIsNone(tm.check_compatible(made_up, i64)) # Add new test tcr.promote(f64, made_up) tcr.unsafe(made_up, i32) # Ensure the graph did not change by adding the new type base_test() # To "made up" type self.assertEqual(tm.check_compatible(i64, made_up), Conversion.unsafe) self.assertEqual(tm.check_compatible(i32, made_up), Conversion.safe) self.assertEqual(tm.check_compatible(f32, made_up), Conversion.promote) self.assertEqual(tm.check_compatible(made_up, f64), Conversion.unsafe) self.assertEqual(tm.check_compatible(made_up, i64), Conversion.unsafe)
def test_reduce(self, flags=enable_pyobj_flags): pyfunc = reduce_usecase cr = compile_isolated( pyfunc, (types.Dummy('function_ptr'), types.Dummy('list')), flags=flags) cfunc = cr.entry_point reduce_func = lambda x, y: x + y x = range(10) self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x)) x = [x + x / 10.0 for x in range(10)] self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x)) x = [complex(x, x) for x in range(10)] self.assertPreciseEqual(cfunc(reduce_func, x), pyfunc(reduce_func, x))
def test_interning(self): # Test interning and lifetime of dynamic types. a = types.Dummy('xyzzyx') code = a._code b = types.Dummy('xyzzyx') self.assertIs(b, a) wr = weakref.ref(a) del a gc.collect() c = types.Dummy('xyzzyx') self.assertIs(c, b) # The code is always the same self.assertEqual(c._code, code) del b, c gc.collect() self.assertIs(wr(), None) d = types.Dummy('xyzzyx') # The original code wasn't reused. self.assertNotEqual(d._code, code)
def test_unbox_runtime_error(self): # Dummy type has no unbox support def foo(x): pass cres = compile_isolated(foo, (types.Dummy("dummy_type"),)) with self.assertRaises(TypeError) as raises: # Can pass in whatever and the unbox logic will always raise # without checking the input value. cres.entry_point(None) self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_bool_nonnumber(self, flags=enable_pyobj_flags): pyfunc = bool_usecase cr = compile_isolated(pyfunc, (types.string, ), flags=flags) cfunc = cr.entry_point for x in ['x', '']: self.assertPreciseEqual(cfunc(x), pyfunc(x)) cr = compile_isolated(pyfunc, (types.Dummy('list'), ), flags=flags) cfunc = cr.entry_point for x in [[1], []]: self.assertPreciseEqual(cfunc(x), pyfunc(x))
def test_sum(self, flags=enable_pyobj_flags): pyfunc = sum_usecase cr = compile_isolated(pyfunc, (types.Dummy('list'), ), flags=flags) cfunc = cr.entry_point x = range(10) self.assertPreciseEqual(cfunc(x), pyfunc(x)) x = [x + x / 10.0 for x in range(10)] self.assertPreciseEqual(cfunc(x), pyfunc(x)) x = [complex(x, x) for x in range(10)] self.assertPreciseEqual(cfunc(x), pyfunc(x))
def check_min_max_invalid_types(self, pyfunc, flags=enable_pyobj_flags): cr = compile_isolated(pyfunc, (types.int32, types.Dummy('list')), flags=flags) cfunc = cr.entry_point cfunc(1, [1])