def make_vector_type(name: str, base_type: types.Type, attr_names: Tuple[str, ...], user_facing_object) -> types.Type: """Create a vector type. Parameters ---------- name: str The name of the type. base_type: numba.types.Type The primitive type for each element in the vector. attr_names: tuple of str Name for each attribute. user_facing_object: object The handle to be used in cuda kernel. """ class _VectorType(VectorType): """Internal instantiation of VectorType.""" pass class VectorTypeModel(models.StructModel): def __init__(self, dmm, fe_type): members = [(attr_name, base_type) for attr_name in attr_names] super().__init__(dmm, fe_type, members) vector_type = _VectorType(name, base_type, attr_names, user_facing_object) register_model(_VectorType)(VectorTypeModel) for attr_name in attr_names: make_attribute_wrapper(_VectorType, attr_name, attr_name) return vector_type
def setUp(self): # Use test_id to makesure no collision is possible. test_id = self.id() DummyType = type('DummyTypeFor{}'.format(test_id), (types.Opaque,), {}) dummy_type = DummyType("my_dummy") register_model(DummyType)(OpaqueModel) class Dummy(object): pass @typeof_impl.register(Dummy) def typeof_Dummy(val, c): return dummy_type @unbox(DummyType) def unbox_index(typ, obj, c): return NativeValue(c.context.get_dummy_value()) self.Dummy = Dummy self.DummyType = DummyType
def test_mixin_against_real_example(self): # See issue #4970, this checks that unicode eq/ne now ignores extension # types. with self.create_temp_module(self.source_lines) as test_module: FooType = test_module.FooType self.assertFalse(FooType().is_internal) # set up an extension type class Foo(object): pass register_model(FooType)(models.OpaqueModel) @typeof_impl.register(Foo) def _typ_foo(val, c): return FooType() @unbox(FooType) def unbox_foo(typ, obj, c): return NativeValue(c.context.get_dummy_value()) @overload(operator.eq) def foo_eq(a, b): if a == FooType(): return lambda a, b: "RAN CUSTOM EQ OVERLOAD" @overload(operator.ne) def foo_ne(a, b): if a == FooType(): return lambda a, b: "RAN CUSTOM NE OVERLOAD" @njit def f(a): return a == "A", a != "A" self.assertEqual(("RAN CUSTOM EQ OVERLOAD", "RAN CUSTOM NE OVERLOAD"), f(Foo()))
def test_externally_defined_type_is_external(self): with self.create_temp_module(self.source_lines) as test_module: FooType = test_module.FooType self.assertFalse(FooType().is_internal) # set up an extension type class Foo(object): pass register_model(FooType)(models.OpaqueModel) @typeof_impl.register(Foo) def _typ_foo(val, c): return FooType() @unbox(FooType) def unbox_foo(typ, obj, c): return NativeValue(c.context.get_dummy_value()) # function to overload def false_if_not_array(a): pass # Set up an overload which will accept all types irrespective of # whether they are from Numba's closed type system @overload(false_if_not_array) def ol_false_if_not_array(a): if isinstance(a, types.Array): return lambda a : True else: return lambda a : False @njit def call_false_if_not_array(a): return false_if_not_array(a) self.assertTrue(call_false_if_not_array(np.zeros(10))) self.assertFalse(call_false_if_not_array(10)) # The extension type was accepted self.assertFalse(call_false_if_not_array(Foo())) # Now do the same sort of overload but put in a guard based on the # use of internal types def false_if_not_array_closed_system(a): pass @overload(false_if_not_array_closed_system) def ol_false_if_not_array_closed_system(a): if a.is_internal: # guard if isinstance(a, types.Array): return lambda a : True else: return lambda a : False @njit def call_false_if_not_array_closed_system(a): return false_if_not_array_closed_system(a) self.assertTrue(call_false_if_not_array_closed_system(np.zeros(10))) self.assertFalse(call_false_if_not_array_closed_system(10)) with self.assertRaises(errors.TypingError) as raises: call_false_if_not_array_closed_system(Foo()) estr = str(raises.exception) self.assertIn(_header_lead, estr) self.assertIn("false_if_not_array_closed_system", estr) self.assertIn("(Foo)", estr)
na_type = NAType() @typeof_impl.register(_NAType) def typeof_na(val, c): """ Tie instances of _NAType (cudf.NA) to our NAType. Effectively make it so numba sees `cudf.NA` as an instance of this NAType -> handle it accordingly. """ return na_type register_model(NAType)(models.OpaqueModel) # Ultimately, we want numba to produce PTX code that specifies how to implement # an operation on two singular `Masked` structs together, which is defined # as producing a new `Masked` with the right validity and if valid, # the correct value. This happens in two phases: # 1. Specify that `Masked` <op> `Masked` exists and what it should return # 2. Implement how to actually do (1) at the LLVM level # The following code accomplishes (1) - it is really just a way of specifying # that the <op> has a CUDA overload that accepts two `Masked` that # are parameterized with `value_type` and what flavor of `Masked` to return. class MaskedScalarArithOp(AbstractTemplate): def generic(self, args, kws): """ Typing for `Masked` <op> `Masked`
@register_model(Dim3) class Dim3Model(models.StructModel): def __init__(self, dmm, fe_type): members = [('x', types.int32), ('y', types.int32), ('z', types.int32)] super().__init__(dmm, fe_type, members) @register_model(GridGroup) class GridGroupModel(models.PrimitiveModel): def __init__(self, dmm, fe_type): be_type = ir.IntType(64) super().__init__(dmm, fe_type, be_type) @register_default(types.Float) class FloatModel(models.PrimitiveModel): def __init__(self, dmm, fe_type): if fe_type == types.float16: be_type = ir.IntType(16) elif fe_type == types.float32: be_type = ir.FloatType() elif fe_type == types.float64: be_type = ir.DoubleType() else: raise NotImplementedError(fe_type) super(FloatModel, self).__init__(dmm, fe_type, be_type) register_model(CUDADispatcher)(models.OpaqueModel)