예제 #1
0
    def generic(self, args, kws):
        assert not kws

        if len(args) == 1:
            # 0-dim arrays return one result array
            ary = args[0]
            ndim = max(ary.ndim, 1)
            retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)
            return signature(retty, ary)

        elif len(args) == 3:
            cond, x, y = args
            retdty = from_dtype(
                np.promote_types(as_dtype(getattr(args[1], 'dtype', args[1])),
                                 as_dtype(getattr(args[2], 'dtype', args[2]))))
            if isinstance(cond, types.Array):
                # array where()
                if isinstance(x, types.Array) and isinstance(y, types.Array):
                    if (cond.ndim == x.ndim == y.ndim):
                        if x.layout == y.layout == cond.layout:
                            retty = types.Array(retdty, x.ndim, x.layout)
                        else:
                            retty = types.Array(retdty, x.ndim, 'C')
                        return signature(retty, *args)
                else:
                    # x and y both scalar
                    retty = types.Array(retdty, cond.ndim, cond.layout)
                    return signature(retty, *args)
            else:
                # scalar where()
                if not isinstance(x, types.Array):
                    retty = types.Array(retdty, 0, 'C')
                    return signature(retty, *args)
예제 #2
0
def _parse_nested_sequence(context, typ):
    """
    Parse a (possibly 0d) nested sequence type.
    A (ndim, dtype) tuple is returned.  Note the sequence may still be
    heterogeneous, as long as it converts to the given dtype.
    """
    if isinstance(typ, (types.Buffer, )):
        raise TypingError("%r not allowed in a homogeneous sequence" % typ)
    elif isinstance(typ, (types.Sequence, )):
        n, dtype = _parse_nested_sequence(context, typ.dtype)
        return n + 1, dtype
    elif isinstance(typ, (types.BaseTuple, )):
        if typ.count == 0:
            # Mimick Numpy's behaviour
            return 1, types.float64
        n, dtype = _parse_nested_sequence(context, typ[0])
        dtypes = [dtype]
        for i in range(1, typ.count):
            _n, dtype = _parse_nested_sequence(context, typ[i])
            if _n != n:
                raise TypingError("type %r does not have a regular shape" %
                                  (typ, ))
            dtypes.append(dtype)
        dtype = context.unify_types(*dtypes)
        if dtype is None:
            raise TypingError("cannot convert %r to a homogeneous type" % typ)
        return n + 1, dtype
    else:
        # Scalar type => check it's valid as a Numpy array dtype
        as_dtype(typ)
        return 0, typ
예제 #3
0
파일: nb.py 프로젝트: vroomzel/vectorbt
def expand_mapped_nb(mapped_arr: tp.Array1d, col_arr: tp.Array1d,
                     idx_arr: tp.Array1d, target_shape: tp.Shape,
                     fill_value: float) -> tp.Array2d:
    """Set each element to a value by boolean mask."""
    nb_enabled = not isinstance(mapped_arr, np.ndarray)
    if nb_enabled:
        mapped_arr_dtype = as_dtype(mapped_arr.dtype)
        fill_value_dtype = as_dtype(fill_value)
    else:
        mapped_arr_dtype = mapped_arr.dtype
        fill_value_dtype = np.array(fill_value).dtype
    dtype = np.promote_types(mapped_arr_dtype, fill_value_dtype)

    def _expand_mapped_nb(mapped_arr, col_arr, idx_arr, target_shape,
                          fill_value):
        out = np.full(target_shape, fill_value, dtype=dtype)

        for r in range(mapped_arr.shape[0]):
            out[idx_arr[r], col_arr[r]] = mapped_arr[r]
        return out

    if not nb_enabled:
        return _expand_mapped_nb(mapped_arr, col_arr, idx_arr, target_shape,
                                 fill_value)

    return _expand_mapped_nb
예제 #4
0
파일: nb.py 프로젝트: vroomzel/vectorbt
def stack_expand_mapped_nb(mapped_arr: tp.Array1d, col_map: tp.ColMap,
                           fill_value: float) -> tp.Array2d:
    """Expand mapped array by stacking without using index data."""
    nb_enabled = not isinstance(mapped_arr, np.ndarray)
    if nb_enabled:
        mapped_arr_dtype = as_dtype(mapped_arr.dtype)
        fill_value_dtype = as_dtype(fill_value)
    else:
        mapped_arr_dtype = mapped_arr.dtype
        fill_value_dtype = np.array(fill_value).dtype
    dtype = np.promote_types(mapped_arr_dtype, fill_value_dtype)

    def _stack_expand_mapped_nb(mapped_arr, col_map, fill_value):
        col_idxs, col_lens = col_map
        col_start_idxs = np.cumsum(col_lens) - col_lens
        out = np.full((np.max(col_lens), col_lens.shape[0]),
                      fill_value,
                      dtype=dtype)

        for col in range(col_lens.shape[0]):
            col_len = col_lens[col]
            if col_len == 0:
                continue
            col_start_idx = col_start_idxs[col]
            ridxs = col_idxs[col_start_idx:col_start_idx + col_len]
            out[:col_len, col] = mapped_arr[ridxs]

        return out

    if not nb_enabled:
        return _stack_expand_mapped_nb(mapped_arr, col_map, fill_value)

    return _stack_expand_mapped_nb
예제 #5
0
def ga_or(a, b):
    if isinstance(a, MultiVectorType) and isinstance(b, MultiVectorType):
        if a.layout_type != b.layout_type:
            raise numba.TypingError(
                "MultiVector objects belong to different layouts")
        imt_func = a.layout_type.obj.imt_func

        def impl(a, b):
            return a.layout.MultiVector(imt_func(a.value, b.value))

        return impl
    elif isinstance(a, types.abstract.Number) and isinstance(
            b, MultiVectorType):
        ret_type = np.result_type(_numpy_support.as_dtype(a),
                                  _numpy_support.as_dtype(b.value_type.dtype))

        def impl(a, b):
            return b.layout.MultiVector(np.zeros_like(b.value, dtype=ret_type))

        return impl
    elif isinstance(a, MultiVectorType) and isinstance(b,
                                                       types.abstract.Number):
        ret_type = np.result_type(_numpy_support.as_dtype(a.value_type.dtype),
                                  _numpy_support.as_dtype(b))

        def impl(a, b):
            return a.layout.MultiVector(np.zeros_like(a.value, dtype=ret_type))

        return impl
예제 #6
0
def ga_sub(a, b):
    if isinstance(a, MultiVectorType) and isinstance(b, MultiVectorType):
        if a.layout_type != b.layout_type:
            raise numba.TypingError(
                "MultiVector objects belong to different layouts")

        def impl(a, b):
            return a.layout.MultiVector(a.value - b.value)

        return impl
    elif isinstance(a, types.abstract.Number) and isinstance(
            b, MultiVectorType):
        scalar_index = b.layout_type.obj._basis_blade_order.bitmap_to_index[0]
        ret_type = np.result_type(_numpy_support.as_dtype(a),
                                  _numpy_support.as_dtype(b.value_type.dtype))

        def impl(a, b):
            op = -b.value.astype(ret_type)
            op[scalar_index] += a
            return b.layout.MultiVector(op)

        return impl
    elif isinstance(a, MultiVectorType) and isinstance(b,
                                                       types.abstract.Number):
        scalar_index = a.layout_type.obj._basis_blade_order.bitmap_to_index[0]
        ret_type = np.result_type(_numpy_support.as_dtype(a.value_type.dtype),
                                  _numpy_support.as_dtype(b))

        def impl(a, b):
            op = a.value.astype(ret_type)
            op[scalar_index] -= b
            return a.layout.MultiVector(op)

        return impl
예제 #7
0
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
    """Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
    np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
    np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
    np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
    numba_common_dtype = numpy_support.from_dtype(np_common_dtype)

    return numba_common_dtype
예제 #8
0
 def kernel_wrapper(values):
     n = len(values)
     inputs = [np.empty(n, dtype=numpy_support.as_dtype(tp)) for tp in argtypes]
     output = np.empty(n, dtype=numpy_support.as_dtype(restype))
     for i, vs in enumerate(values):
         for v, inp in zip(vs, inputs):
             inp[i] = v
     args = [output] + inputs
     kernel[int(math.ceil(n / 256)), 256](*args)
     return list(output)
예제 #9
0
 def unify(self, typingctx, other):
     """
     Unify the two number types using Numpy's rules.
     """
     from numba.np import numpy_support
     if isinstance(other, Number):
         # XXX: this can produce unsafe conversions,
         # e.g. would unify {int64, uint64} to float64
         a = numpy_support.as_dtype(self)
         b = numpy_support.as_dtype(other)
         sel = np.promote_types(a, b)
         return numpy_support.from_dtype(sel)
예제 #10
0
 def check_round(cfunc, values, inty, outty, decimals):
     # Create input and output arrays of the right type
     arr = values.astype(as_dtype(inty))
     out = np.zeros_like(arr).astype(as_dtype(outty))
     pyout = out.copy()
     _fixed_np_round(arr, decimals, pyout)
     self.memory_leak_setup()
     cfunc(arr, decimals, out)
     self.memory_leak_teardown()
     np.testing.assert_allclose(out, pyout)
     # Output shape mismatch
     with self.assertRaises(ValueError) as raises:
         cfunc(arr, decimals, out[1:])
     self.assertEqual(str(raises.exception), "invalid output shape")
예제 #11
0
    def test_hypot(self, flags=enable_pyobj_flags):
        pyfunc = hypot
        x_types = [types.int64, types.uint64, types.float32, types.float64]
        x_values = [1, 2, 3, 4, 5, 6, .21, .34]
        y_values = [x + 2 for x in x_values]
        # Issue #563: precision issues with math.hypot() under Windows.
        prec = 'single'
        self.run_binary(pyfunc, x_types, x_values, y_values, flags, prec)

        # Check that values that overflow in naive implementations do not
        # in the numba impl

        def naive_hypot(x, y):
            return math.sqrt(x * x + y * y)

        for fltty in (types.float32, types.float64):
            cr = self.ccache.compile(pyfunc, (fltty, fltty), flags=flags)
            cfunc = cr.entry_point
            dt = numpy_support.as_dtype(fltty).type
            val = dt(np.finfo(dt).max / 30.)
            nb_ans = cfunc(val, val)
            self.assertPreciseEqual(nb_ans, pyfunc(val, val), prec='single')
            self.assertTrue(np.isfinite(nb_ans))

            with warnings.catch_warnings():
                warnings.simplefilter("error", RuntimeWarning)
                self.assertRaisesRegexp(RuntimeWarning,
                                        'overflow encountered in .*_scalars',
                                        naive_hypot, val, val)
예제 #12
0
def test_generic_ptx(dtype):

    size = 500

    lhs_arr = np.random.random(size).astype(dtype)
    lhs_col = Series(lhs_arr)._column

    rhs_arr = np.random.random(size).astype(dtype)
    rhs_col = Series(rhs_arr)._column

    def generic_function(a, b):
        return a ** 3 + b

    nb_type = numpy_support.from_dtype(cudf.dtype(dtype))
    type_signature = (nb_type, nb_type)

    ptx_code, output_type = compile_ptx(
        generic_function, type_signature, device=True
    )

    dtype = numpy_support.as_dtype(output_type).type

    out_col = libcudf.binaryop.binaryop_udf(lhs_col, rhs_col, ptx_code, dtype)

    result = lhs_arr ** 3 + rhs_arr

    np.testing.assert_almost_equal(result, out_col.to_array())
예제 #13
0
    def build(self, cres):
        """
        Returns (dtype numbers, function ptr, EnvironmentObject)
        """
        # Buider wrapper for ufunc entry point
        signature = cres.signature
        info = build_gufunc_wrapper(
            self.py_func,
            cres,
            self.sin,
            self.sout,
            cache=self.cache,
            is_parfors=False,
        )

        env = info.env
        ptr = info.library.get_pointer_to_function(info.name)
        # Get dtypes
        dtypenums = []
        for a in signature.args:
            if isinstance(a, types.Array):
                ty = a.dtype
            else:
                ty = a
            dtypenums.append(as_dtype(ty).num)
        return dtypenums, ptr, env
예제 #14
0
def map_struct_to_record_dtype(cffi_type):
    """Convert a cffi type into a NumPy Record dtype
    """
    fields = {
        'names': [],
        'formats': [],
        'offsets': [],
        'itemsize': ffi.sizeof(cffi_type),
    }
    is_aligned = True
    for k, v in cffi_type.fields:
        # guard unsupported values
        if v.bitshift != -1:
            msg = "field {!r} has bitshift, this is not supported"
            raise ValueError(msg.format(k))
        if v.flags != 0:
            msg = "field {!r} has flags, this is not supported"
            raise ValueError(msg.format(k))
        if v.bitsize != -1:
            msg = "field {!r} has bitsize, this is not supported"
            raise ValueError(msg.format(k))
        dtype = numpy_support.as_dtype(map_type(v.type,
                                                use_record_dtype=True), )
        fields['names'].append(k)
        fields['formats'].append(dtype)
        fields['offsets'].append(v.offset)
        # Check alignment
        is_aligned &= (v.offset % dtype.alignment == 0)

    return numpy_support.from_dtype(np.dtype(fields, align=is_aligned))
예제 #15
0
def compile_udf(udf, type_signature):
    """Compile ``udf`` with `numba`

    Compile a python callable function ``udf`` with
    `numba.cuda.compile_ptx_for_current_device(device=True)` using
    ``type_signature`` into CUDA PTX together with the generated output type.

    The output is expected to be passed to the PTX parser in `libcudf`
    to generate a CUDA device function to be inlined into CUDA kernels,
    compiled at runtime and launched.

    Parameters
    --------
    udf:
      a python callable function

    type_signature:
      a tuple that specifies types of each of the input parameters of ``udf``.
      The types should be one in `numba.types` and could be converted from
      numpy types with `numba.numpy_support.from_dtype(...)`.

    Returns
    --------
    ptx_code:
      The compiled CUDA PTX

    output_type:
      An numpy type

    """

    # Check if we've already compiled a similar (but possibly distinct)
    # function before
    codebytes = udf.__code__.co_code
    if udf.__closure__ is not None:
        cvars = tuple([x.cell_contents for x in udf.__closure__])
        cvarbytes = dumps(cvars)
    else:
        cvarbytes = b""

    key = (type_signature, codebytes, cvarbytes)
    res = _udf_code_cache.get(key)
    if res:
        return res

    # We haven't compiled a function like this before, so need to fall back to
    # compilation with Numba
    ptx_code, return_type = cuda.compile_ptx_for_current_device(udf,
                                                                type_signature,
                                                                device=True)
    if not isinstance(return_type, cudf.core.udf.typing.MaskedType):
        output_type = numpy_support.as_dtype(return_type).type
    else:
        output_type = return_type

    # Populate the cache for this function
    res = (ptx_code, output_type)
    _udf_code_cache[key] = res

    return res
예제 #16
0
 def get_default_scalar(self, nbtype):
     meta_type = type(nbtype)
     if isinstance(nbtype, types.Number):
         res = as_dtype(nbtype).type(self._default_data[meta_type])
     elif isinstance(nbtype, types.UnicodeType):
         res = self._default_data[meta_type]
     return res
예제 #17
0
def test_generic_ptx(dtype):

    size = 500

    lhs_arr = np.random.random(size).astype(dtype)
    lhs_col = Series(lhs_arr)._column

    rhs_arr = np.random.random(size).astype(dtype)
    rhs_col = Series(rhs_arr)._column

    @numba.cuda.jit(device=True)
    def generic_function(a, b):
        return a ** 3 + b

    nb_type = numpy_support.from_dtype(np.dtype(dtype))
    type_signature = (nb_type, nb_type)

    result = generic_function.compile(type_signature)
    ptx = generic_function.inspect_ptx(type_signature)
    ptx_code = ptx.decode("utf-8")

    output_type = numpy_support.as_dtype(result.signature.return_type)

    out_col = libcudf.binaryop.binaryop_udf(
        lhs_col, rhs_col, ptx_code, output_type.type
    )

    result = lhs_arr ** 3 + rhs_arr

    np.testing.assert_almost_equal(result, out_col.to_array())
예제 #18
0
def _get_proper_func(func_32, func_64, dtype, dist_name="the given"):
    """
        Most of the standard NumPy distributions that accept dtype argument
        only support either np.float32 or np.float64 as dtypes.

        This is a helper function that helps Numba select the proper underlying
        implementation according to provided dtype.
    """
    if isinstance(dtype, types.Omitted):
        dtype = dtype.value

    np_dt = dtype
    if isinstance(dtype, type):
        nb_dt = from_dtype(np.dtype(dtype))
    elif isinstance(dtype, types.NumberClass):
        nb_dt = dtype
        np_dt = as_dtype(nb_dt)

    if np_dt not in [np.float32, np.float64]:
        raise TypingError("Argument dtype is not one of the" +
                          " expected type(s): " + " np.float32 or np.float64")

    if np_dt == np.float32:
        next_func = func_32
    else:
        next_func = func_64

    return next_func, nb_dt
예제 #19
0
파일: cudautils.py 프로젝트: wphicks/cudf
def compile_udf(udf, type_signature):
    """Compile ``udf`` with `numba`

    Compile a python callable function ``udf`` with
    `numba.cuda.compile_ptx_for_current_device(device=True)` using
    ``type_signature`` into CUDA PTX together with the generated output type.

    The output is expected to be passed to the PTX parser in `libcudf`
    to generate a CUDA device function to be inlined into CUDA kernels,
    compiled at runtime and launched.

    Parameters
    --------
    udf:
      a python callable function

    type_signature:
      a tuple that specifies types of each of the input parameters of ``udf``.
      The types should be one in `numba.types` and could be converted from
      numpy types with `numba.numpy_support.from_dtype(...)`.

    Returns
    --------
    ptx_code:
      The compiled CUDA PTX

    output_type:
      An numpy type

    """
    ptx_code, return_type = cuda.compile_ptx_for_current_device(udf,
                                                                type_signature,
                                                                device=True)
    output_type = numpy_support.as_dtype(return_type)
    return (ptx_code, output_type.type)
예제 #20
0
def _build_element_wise_ufunc_wrapper(cres, signature):
    '''Build a wrapper for the ufunc loop entry point given by the
    compilation result object, using the element-wise signature.
    '''
    ctx = cres.target_context
    library = cres.library
    fname = cres.fndesc.llvm_func_name

    with global_compiler_lock:
        info = build_ufunc_wrapper(library, ctx, fname, signature,
                                   cres.objectmode, cres)
        ptr = info.library.get_pointer_to_function(info.name)
    # Get dtypes
    dtypenums = [as_dtype(a).num for a in signature.args]
    dtypenums.append(as_dtype(signature.return_type).num)
    return dtypenums, ptr, cres.environment
예제 #21
0
def box_array(typ, val, c):
    assert c.context.enable_nrt
    np_dtype = numpy_support.as_dtype(typ.dtype)
    dtypeptr = c.env_manager.read_const(c.env_manager.add_const(np_dtype))
    newary = c.pyapi.nrt_adapt_ndarray_to_python(typ, val, dtypeptr)
    # Steals NRT ref
    c.context.nrt.decref(c.builder, typ, val)
    return newary
예제 #22
0
 def test_record_dtype_with_titles_roundtrip(self):
     recdtype = np.dtype([(("title a", 'a'), np.float), ('b', np.float)])
     nbtype = numpy_support.from_dtype(recdtype)
     self.assertTrue(nbtype.is_title('title a'))
     self.assertFalse(nbtype.is_title('a'))
     self.assertFalse(nbtype.is_title('b'))
     got = numpy_support.as_dtype(nbtype)
     self.assertTrue(got, recdtype)
예제 #23
0
 def get_random_sequence(self, nbtype, n=10):
     if isinstance(nbtype, types.Number):
         values = np.arange(n // 2, dtype=as_dtype(nbtype))
         res = np.random.choice(values, n)
     elif isinstance(nbtype, types.UnicodeType):
         values = gen_strlist(n // 2)
         res = list(np.random.choice(values, n))
     return res
예제 #24
0
 def _get_py_col_dtype(ctype):
     """ Re-creates column dtype as python type to be used in read_csv call """
     dtype = ctype.dtype
     if ctype == string_array_type:
         return str
     if isinstance(ctype, Categorical):
         return _reconstruct_CategoricalDtype(ctype.pd_dtype)
     return numpy_support.as_dtype(dtype)
예제 #25
0
 def gen_array_factory(numba_dtype):
     """
     Create a funtion that creates an array.
     Bind the numpy dtype because I don't know how to create
     the numba version
     """
     np_dtype = numpy_support.as_dtype(numba_dtype)
     return lambda shape: np.zeros(shape, np_dtype)
예제 #26
0
파일: numpy_funcs.py 프로젝트: pearu/rbc
def get_type_limits(eltype):
    np_dtype = numpy_support.as_dtype(eltype)
    if isinstance(eltype, types.Integer):
        return np.iinfo(np_dtype)
    elif isinstance(eltype, types.Float):
        return np.finfo(np_dtype)
    else:
        msg = 'Type {} not supported'.format(eltype)
        raise errors.TypingError(msg)
예제 #27
0
def compile_udf(udf, type_signature):
    """Compile ``udf`` with `numba`

    Compile a python callable function ``udf`` with
    `numba.cuda.compile_ptx_for_current_device(device=True)` using
    ``type_signature`` into CUDA PTX together with the generated output type.

    The output is expected to be passed to the PTX parser in `libcudf`
    to generate a CUDA device function to be inlined into CUDA kernels,
    compiled at runtime and launched.

    Parameters
    --------
    udf:
      a python callable function

    type_signature:
      a tuple that specifies types of each of the input parameters of ``udf``.
      The types should be one in `numba.types` and could be converted from
      numpy types with `numba.numpy_support.from_dtype(...)`.

    Returns
    --------
    ptx_code:
      The compiled CUDA PTX

    output_type:
      An numpy type

    """
    import cudf.core.udf

    key = make_cache_key(udf, type_signature)
    res = _udf_code_cache.get(key)
    if res:
        return res

    # We haven't compiled a function like this before, so need to fall back to
    # compilation with Numba
    ptx_code, return_type = cuda.compile_ptx_for_current_device(udf,
                                                                type_signature,
                                                                device=True)
    if not isinstance(return_type, cudf.core.udf.typing.MaskedType):
        output_type = numpy_support.as_dtype(return_type).type
    else:
        output_type = return_type

    # Populate the cache for this function
    res = (ptx_code, output_type)
    _udf_code_cache[key] = res

    return res
예제 #28
0
    def generic(self, args, kws):
        ufunc = self.ufunc
        base_types, explicit_outputs, ndims, layout = self._handle_inputs(
            ufunc, args, kws)
        ufunc_loop = ufunc_find_matching_loop(ufunc, base_types)
        if ufunc_loop is None:
            raise TypingError("can't resolve ufunc {0} for types {1}".format(
                ufunc.__name__, args))

        # check if all the types involved in the ufunc loop are supported in this mode
        if not supported_ufunc_loop(ufunc, ufunc_loop):
            msg = "ufunc '{0}' using the loop '{1}' not supported in this mode"
            raise TypingError(
                msg=msg.format(ufunc.__name__, ufunc_loop.ufunc_sig))

        # if there is any explicit output type, check that it is valid
        explicit_outputs_np = [as_dtype(tp.dtype) for tp in explicit_outputs]

        # Numpy will happily use unsafe conversions (although it will actually warn)
        if not all(
                np.can_cast(fromty, toty, 'unsafe')
                for (fromty, toty
                     ) in zip(ufunc_loop.numpy_outputs, explicit_outputs_np)):
            msg = "ufunc '{0}' can't cast result to explicit result type"
            raise TypingError(msg=msg.format(ufunc.__name__))

        # A valid loop was found that is compatible. The result of type inference should
        # be based on the explicit output types, and when not available with the type given
        # by the selected NumPy loop
        out = list(explicit_outputs)
        implicit_output_count = ufunc.nout - len(explicit_outputs)
        if implicit_output_count > 0:
            # XXX this is sometimes wrong for datetime64 and timedelta64,
            # as ufunc_find_matching_loop() doesn't do any type inference
            ret_tys = ufunc_loop.outputs[-implicit_output_count:]
            if ndims > 0:
                assert layout is not None
                ret_tys = [
                    types.Array(dtype=ret_ty, ndim=ndims, layout=layout)
                    for ret_ty in ret_tys
                ]
                ret_tys = [
                    resolve_output_type(self.context, args, ret_ty)
                    for ret_ty in ret_tys
                ]
            out.extend(ret_tys)

        # note: although the previous code should support multiple return values, only one
        #       is supported as of now (signature may not support more than one).
        #       there is an check enforcing only one output
        out.extend(args)
        return signature(*out)
예제 #29
0
def dtype_generated_usecase(a, b, dtype=None):
    if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
        out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
                                   for ary in (a, b)))
    elif isinstance(dtype, (types.DType, types.NumberClass)):
        out_dtype = as_dtype(dtype)
    else:
        raise TypeError("Unhandled Type %s" % type(dtype))

    def _fn(a, b, dtype=None):
        return np.ones(a.shape, dtype=out_dtype)

    return _fn
예제 #30
0
def box_array(typ, val, c):
    nativearycls = c.context.make_array(typ)
    nativeary = nativearycls(c.context, c.builder, value=val)
    if c.context.enable_nrt:
        np_dtype = numpy_support.as_dtype(typ.dtype)
        dtypeptr = c.env_manager.read_const(c.env_manager.add_const(np_dtype))
        # Steals NRT ref
        newary = c.pyapi.nrt_adapt_ndarray_to_python(typ, val, dtypeptr)
        return newary
    else:
        parent = nativeary.parent
        c.pyapi.incref(parent)
        return parent