Exemplo n.º 1
0
def numba_funcify_Mul(op, node, **kwargs):

    signature = create_numba_signature(node, force_scalar=True)

    nary_mul_fn = binary_to_nary_func(node.inputs, "mul", "*")

    return numba.njit(signature, inline="always")(nary_mul_fn)
Exemplo n.º 2
0
def jit_compile_reducer(node, fn, **kwds):
    """Compile Python source for reduction loops using additional optimizations.

    Parameters
    ==========
    node
        An node from which the signature can be derived.
    fn
        The Python function object to compile.
    kwds
        Extra keywords to be added to the :func:`numba.njit` function.

    Returns
    =======
    A :func:`numba.njit`-compiled function.

    """
    signature = create_numba_signature(node, reduce_to_scalar=True)

    # Eagerly compile the function using increased optimizations.  This should
    # help improve nested loop reductions.
    with use_optimized_cheap_pass():
        res = numba_basic.numba_njit(
            signature,
            boundscheck=False,
            fastmath=config.numba__fastmath,
            **kwds,
        )(fn)

    return res
Exemplo n.º 3
0
def create_vectorize_func(
    scalar_op_fn: Callable,
    node: Apply,
    use_signature: bool = False,
    identity: Optional[Any] = None,
    **kwargs,
) -> Callable:
    r"""Create a vectorized Numba function from a `Apply`\s Python function."""

    if len(node.outputs) > 1:
        raise NotImplementedError(
            "Multi-output Elemwise Ops are not supported by the Numba backend")

    if use_signature:
        signature = [create_numba_signature(node, force_scalar=True)]
    else:
        signature = []

    target = (getattr(node.tag, "numba__vectorize_target", None)
              or config.numba__vectorize_target)

    numba_vectorized_fn = numba_basic.numba_vectorize(
        signature,
        identity=identity,
        target=target,
        fastmath=config.numba__fastmath)

    py_scalar_func = getattr(scalar_op_fn, "py_func", scalar_op_fn)

    elemwise_fn = numba_vectorized_fn(scalar_op_fn)
    elemwise_fn.py_scalar_func = py_scalar_func

    return elemwise_fn
Exemplo n.º 4
0
def create_vectorize_func(op, node, use_signature=False, identity=None, **kwargs):
    scalar_op_fn = numba_funcify(op.scalar_op, node=node, inline="always", **kwargs)

    if len(node.outputs) > 1:
        raise NotImplementedError(
            "Multi-output Elemwise Ops are not supported by the Numba backend"
        )

    if use_signature:
        signature = [create_numba_signature(node, force_scalar=True)]
    else:
        signature = []

    target = (
        getattr(node.tag, "numba__vectorize_target", None)
        or config.numba__vectorize_target
    )

    numba_vectorized_fn = numba_basic.numba_vectorize(
        signature, identity=identity, target=target, fastmath=config.numba__fastmath
    )

    py_scalar_func = getattr(scalar_op_fn, "py_func", scalar_op_fn)

    elemwise_fn = numba_vectorized_fn(scalar_op_fn)
    elemwise_fn.py_scalar_func = py_scalar_func

    return elemwise_fn
Exemplo n.º 5
0
def numba_funcify_Composite(op, node, **kwargs):
    signature = create_numba_signature(node, force_scalar=True)
    composite_fn = numba_basic.numba_njit(
        signature,
        fastmath=config.numba__fastmath)(numba_funcify(op.fgraph,
                                                       squeeze_output=True,
                                                       **kwargs))
    return composite_fn
Exemplo n.º 6
0
def numba_funcify_Mul(op, node, **kwargs):

    signature = create_numba_signature(node, force_scalar=True)

    nary_mul_fn = binary_to_nary_func(node.inputs, "mul", "*")

    return numba_basic.numba_njit(signature,
                                  inline="always",
                                  fastmath=config.numba__fastmath)(nary_mul_fn)
Exemplo n.º 7
0
def numba_funcify_ScalarOp(op, node, **kwargs):
    # TODO: Do we need to cache these functions so that we don't end up
    # compiling the same Numba function over and over again?

    scalar_func_name = op.nfunc_spec[0]

    if scalar_func_name.startswith("scipy."):
        func_package = scipy
        scalar_func_name = scalar_func_name.split(".", 1)[-1]
    else:
        func_package = np

    if "." in scalar_func_name:
        scalar_func = reduce(getattr, [scipy] + scalar_func_name.split("."))
    else:
        scalar_func = getattr(func_package, scalar_func_name)

    scalar_op_fn_name = get_name_for_object(scalar_func)
    unique_names = unique_name_generator([scalar_op_fn_name, "scalar_func"],
                                         suffix_sep="_")

    input_names = ", ".join(
        [unique_names(v, force_unique=True) for v in node.inputs])

    global_env = {"scalar_func": scalar_func}

    scalar_op_src = f"""
def {scalar_op_fn_name}({input_names}):
    return scalar_func({input_names})
    """
    scalar_op_fn = compile_function_src(scalar_op_src, scalar_op_fn_name, {
        **globals(),
        **global_env
    })

    signature = create_numba_signature(node, force_scalar=True)

    return numba_basic.numba_njit(
        signature, inline="always",
        fastmath=config.numba__fastmath)(scalar_op_fn)
Exemplo n.º 8
0
def create_vectorize_func(op,
                          node,
                          use_signature=False,
                          identity=None,
                          **kwargs):
    scalar_op_fn = numba_funcify(op.scalar_op,
                                 node=node,
                                 inline="always",
                                 **kwargs)

    if len(node.outputs) > 1:
        raise NotImplementedError(
            "Multi-output Elemwise Ops are not supported by the Numba backend")

    if use_signature:
        signature = [create_numba_signature(node, force_scalar=True)]
    else:
        signature = []

    numba_vectorize = numba.vectorize(signature, identity=identity)
    elemwise_fn = numba_vectorize(scalar_op_fn)
    elemwise_fn.py_scalar_func = scalar_op_fn

    return elemwise_fn
Exemplo n.º 9
0
def numba_funcify_Composite(op, node, **kwargs):
    signature = create_numba_signature(node, force_scalar=True)
    composite_fn = numba.njit(signature)(numba_funcify(op.fgraph,
                                                       squeeze_output=True,
                                                       **kwargs))
    return composite_fn
Exemplo n.º 10
0
def numba_funcify_ScalarOp(op, node, **kwargs):
    # TODO: Do we need to cache these functions so that we don't end up
    # compiling the same Numba function over and over again?

    scalar_func_name = op.nfunc_spec[0]

    if scalar_func_name.startswith("scipy."):
        func_package = scipy
        scalar_func_name = scalar_func_name.split(".", 1)[-1]
    else:
        func_package = np

    if "." in scalar_func_name:
        scalar_func = reduce(getattr, [scipy] + scalar_func_name.split("."))
    else:
        scalar_func = getattr(func_package, scalar_func_name)

    scalar_op_fn_name = get_name_for_object(scalar_func)
    unique_names = unique_name_generator([scalar_op_fn_name, "scalar_func"],
                                         suffix_sep="_")

    global_env = {"scalar_func": scalar_func}

    input_tmp_dtypes = None
    if func_package == scipy and hasattr(scalar_func, "types"):
        # The `numba-scipy` bindings don't provide implementations for all
        # inputs types, so we need to convert the inputs to floats and back.
        inp_dtype_kinds = tuple(
            np.dtype(inp.type.dtype).kind for inp in node.inputs)
        accepted_inp_kinds = tuple(
            sig_type.split("->")[0] for sig_type in scalar_func.types)
        if not any(
                all(dk == ik for dk, ik in zip(inp_dtype_kinds, ok_kinds))
                for ok_kinds in accepted_inp_kinds):
            # They're usually ordered from lower-to-higher precision, so
            # we pick the last acceptable input types
            #
            # XXX: We should pick the first acceptable float/int types in
            # reverse, excluding all the incompatible ones (e.g. `"0"`).
            # The assumption is that this is only used by `numba-scipy`-exposed
            # functions, although it's possible for this to be triggered by
            # something else from the `scipy` package
            input_tmp_dtypes = tuple(
                np.dtype(k) for k in accepted_inp_kinds[-1])

    if input_tmp_dtypes is None:
        unique_names = unique_name_generator(
            [scalar_op_fn_name, "scalar_func"], suffix_sep="_")
        input_names = ", ".join(
            [unique_names(v, force_unique=True) for v in node.inputs])
        scalar_op_src = f"""
def {scalar_op_fn_name}({input_names}):
    return scalar_func({input_names})
        """
    else:
        global_env["direct_cast"] = numba_basic.direct_cast
        global_env["output_dtype"] = np.dtype(node.outputs[0].type.dtype)
        input_tmp_dtype_names = {
            f"inp_tmp_dtype_{i}": i_dtype
            for i, i_dtype in enumerate(input_tmp_dtypes)
        }
        global_env.update(input_tmp_dtype_names)

        unique_names = unique_name_generator(
            [scalar_op_fn_name, "scalar_func"] + list(global_env.keys()),
            suffix_sep="_")

        input_names = [unique_names(v, force_unique=True) for v in node.inputs]
        converted_call_args = ", ".join([
            f"direct_cast({i_name}, {i_tmp_dtype_name})"
            for i_name, i_tmp_dtype_name in zip(input_names,
                                                input_tmp_dtype_names.keys())
        ])
        scalar_op_src = f"""
def {scalar_op_fn_name}({', '.join(input_names)}):
    return direct_cast(scalar_func({converted_call_args}), output_dtype)
        """

    scalar_op_fn = compile_function_src(scalar_op_src, scalar_op_fn_name, {
        **globals(),
        **global_env
    })

    signature = create_numba_signature(node, force_scalar=True)

    return numba_basic.numba_njit(
        signature, inline="always",
        fastmath=config.numba__fastmath)(scalar_op_fn)