示例#1
0
文件: elemwise.py 项目: mgorny/aesara
def numba_funcify_CAReduce(op, node, **kwargs):
    axes = op.axis
    if axes is None:
        axes = list(range(node.inputs[0].ndim))

    if hasattr(op, "acc_dtype") and op.acc_dtype is not None:
        acc_dtype = op.acc_dtype
    else:
        acc_dtype = node.outputs[0].type.dtype

    np_acc_dtype = np.dtype(acc_dtype)

    scalar_op_identity = op.scalar_op.identity
    if np_acc_dtype.kind == "i" and not np.isfinite(scalar_op_identity):
        if np.isposinf(scalar_op_identity):
            scalar_op_identity = np.iinfo(np_acc_dtype).max
        else:
            scalar_op_identity = np.iinfo(np_acc_dtype).min

    # Make sure it has the correct dtype
    scalar_op_identity = np.array(scalar_op_identity, dtype=np_acc_dtype)

    input_name = get_name_for_object(node.inputs[0])
    ndim = node.inputs[0].ndim
    careduce_py_fn = create_multiaxis_reducer(
        op.scalar_op,
        scalar_op_identity,
        axes,
        ndim,
        np.dtype(node.outputs[0].type.dtype),
        input_name=input_name,
    )

    careduce_fn = jit_compile_reducer(node, careduce_py_fn)
    return careduce_fn
示例#2
0
def numba_funcify_ScalarOp(op, node, **kwargs):
    # TODO: Do we need to cache these functions so that we don't end up
    # compiling the same Numba function over and over again?

    scalar_func_name = op.nfunc_spec[0]

    if scalar_func_name.startswith("scipy."):
        func_package = scipy
        scalar_func_name = scalar_func_name.split(".", 1)[-1]
    else:
        func_package = np

    if "." in scalar_func_name:
        scalar_func = reduce(getattr, [scipy] + scalar_func_name.split("."))
    else:
        scalar_func = getattr(func_package, scalar_func_name)

    input_names = ", ".join([v.auto_name for v in node.inputs])

    global_env = {"scalar_func": scalar_func}

    scalar_op_fn_name = get_name_for_object(scalar_func)
    scalar_op_src = f"""
def {scalar_op_fn_name}({input_names}):
    return scalar_func({input_names})
    """
    scalar_op_fn = compile_function_src(scalar_op_src, scalar_op_fn_name,
                                        global_env)

    return numba.njit(scalar_op_fn)
示例#3
0
def numba_funcify_CAReduce(op, node, **kwargs):
    axes = op.axis
    if axes is None:
        axes = list(range(node.inputs[0].ndim))

    if hasattr(op, "acc_dtype") and op.acc_dtype is not None:
        acc_dtype = op.acc_dtype
    else:
        acc_dtype = node.outputs[0].type.dtype

    np_acc_dtype = np.dtype(acc_dtype)

    scalar_op_identity = np.asarray(op.scalar_op.identity, dtype=np_acc_dtype)

    input_name = get_name_for_object(node.inputs[0])
    ndim = node.inputs[0].ndim
    careduce_py_fn = create_multiaxis_reducer(
        op.scalar_op,
        scalar_op_identity,
        axes,
        ndim,
        np.dtype(node.outputs[0].type.dtype),
        input_name=input_name,
    )

    careduce_fn = jit_compile_reducer(node, careduce_py_fn)
    return careduce_fn
示例#4
0
def test_fgraph_to_python_names():
    import inspect

    x = scalar("1x")
    y = scalar("_")
    z = scalar()
    q = scalar("def")
    r = NoneConst

    out_fg = FunctionGraph([x, y, z, q, r], [x, y, z, q, r], clone=False)
    out_jx = fgraph_to_python(out_fg, to_python)

    sig = inspect.signature(out_jx)
    assert (x.auto_name, "_", z.auto_name, q.auto_name,
            r.name) == tuple(sig.parameters.keys())
    assert (1, 2, 3, 4, 5) == out_jx(1, 2, 3, 4, 5)

    obj = object()
    assert get_name_for_object(obj) == type(obj).__name__
示例#5
0
def numba_funcify_CAReduce(op, node, **kwargs):
    axes = op.axis
    if axes is None:
        axes = list(range(node.inputs[0].ndim))

    if hasattr(op, "acc_dtype") and op.acc_dtype is not None:
        acc_dtype = op.acc_dtype
    else:
        acc_dtype = node.outputs[0].type.dtype

    np_acc_dtype = np.dtype(acc_dtype)

    scalar_op_identity = np.asarray(op.scalar_op.identity, dtype=np_acc_dtype)

    acc_dtype = numba.np.numpy_support.from_dtype(np_acc_dtype)

    scalar_nfunc_spec = op.scalar_op.nfunc_spec

    # We construct a dummy `Apply` that has the minimum required number of
    # inputs for the scalar `Op`.  Without this, we would get a scalar function
    # with too few arguments.
    dummy_node = Apply(
        op,
        [tensor(acc_dtype, [False]) for i in range(scalar_nfunc_spec[1])],
        [tensor(acc_dtype, [False]) for o in range(scalar_nfunc_spec[2])],
    )
    elemwise_fn = numba_funcify_Elemwise(op,
                                         dummy_node,
                                         use_signature=True,
                                         **kwargs)

    input_name = get_name_for_object(node.inputs[0])
    ndim = node.inputs[0].ndim
    careduce_fn = create_multiaxis_reducer(elemwise_fn,
                                           scalar_op_identity,
                                           axes,
                                           ndim,
                                           acc_dtype,
                                           input_name=input_name)

    return numba.njit(careduce_fn)
示例#6
0
def numba_funcify_ScalarOp(op, node, **kwargs):
    # TODO: Do we need to cache these functions so that we don't end up
    # compiling the same Numba function over and over again?

    scalar_func_name = op.nfunc_spec[0]

    if scalar_func_name.startswith("scipy."):
        func_package = scipy
        scalar_func_name = scalar_func_name.split(".", 1)[-1]
    else:
        func_package = np

    if "." in scalar_func_name:
        scalar_func = reduce(getattr, [scipy] + scalar_func_name.split("."))
    else:
        scalar_func = getattr(func_package, scalar_func_name)

    scalar_op_fn_name = get_name_for_object(scalar_func)
    unique_names = unique_name_generator([scalar_op_fn_name, "scalar_func"],
                                         suffix_sep="_")

    input_names = ", ".join(
        [unique_names(v, force_unique=True) for v in node.inputs])

    global_env = {"scalar_func": scalar_func}

    scalar_op_src = f"""
def {scalar_op_fn_name}({input_names}):
    return scalar_func({input_names})
    """
    scalar_op_fn = compile_function_src(scalar_op_src, scalar_op_fn_name, {
        **globals(),
        **global_env
    })

    signature = create_numba_signature(node, force_scalar=True)

    return numba_basic.numba_njit(
        signature, inline="always",
        fastmath=config.numba__fastmath)(scalar_op_fn)
示例#7
0
def numba_funcify_ScalarOp(op, node, **kwargs):
    # TODO: Do we need to cache these functions so that we don't end up
    # compiling the same Numba function over and over again?

    scalar_func_name = op.nfunc_spec[0]

    if scalar_func_name.startswith("scipy."):
        func_package = scipy
        scalar_func_name = scalar_func_name.split(".", 1)[-1]
    else:
        func_package = np

    if "." in scalar_func_name:
        scalar_func = reduce(getattr, [scipy] + scalar_func_name.split("."))
    else:
        scalar_func = getattr(func_package, scalar_func_name)

    scalar_op_fn_name = get_name_for_object(scalar_func)
    unique_names = unique_name_generator([scalar_op_fn_name, "scalar_func"],
                                         suffix_sep="_")

    global_env = {"scalar_func": scalar_func}

    input_tmp_dtypes = None
    if func_package == scipy and hasattr(scalar_func, "types"):
        # The `numba-scipy` bindings don't provide implementations for all
        # inputs types, so we need to convert the inputs to floats and back.
        inp_dtype_kinds = tuple(
            np.dtype(inp.type.dtype).kind for inp in node.inputs)
        accepted_inp_kinds = tuple(
            sig_type.split("->")[0] for sig_type in scalar_func.types)
        if not any(
                all(dk == ik for dk, ik in zip(inp_dtype_kinds, ok_kinds))
                for ok_kinds in accepted_inp_kinds):
            # They're usually ordered from lower-to-higher precision, so
            # we pick the last acceptable input types
            #
            # XXX: We should pick the first acceptable float/int types in
            # reverse, excluding all the incompatible ones (e.g. `"0"`).
            # The assumption is that this is only used by `numba-scipy`-exposed
            # functions, although it's possible for this to be triggered by
            # something else from the `scipy` package
            input_tmp_dtypes = tuple(
                np.dtype(k) for k in accepted_inp_kinds[-1])

    if input_tmp_dtypes is None:
        unique_names = unique_name_generator(
            [scalar_op_fn_name, "scalar_func"], suffix_sep="_")
        input_names = ", ".join(
            [unique_names(v, force_unique=True) for v in node.inputs])
        scalar_op_src = f"""
def {scalar_op_fn_name}({input_names}):
    return scalar_func({input_names})
        """
    else:
        global_env["direct_cast"] = numba_basic.direct_cast
        global_env["output_dtype"] = np.dtype(node.outputs[0].type.dtype)
        input_tmp_dtype_names = {
            f"inp_tmp_dtype_{i}": i_dtype
            for i, i_dtype in enumerate(input_tmp_dtypes)
        }
        global_env.update(input_tmp_dtype_names)

        unique_names = unique_name_generator(
            [scalar_op_fn_name, "scalar_func"] + list(global_env.keys()),
            suffix_sep="_")

        input_names = [unique_names(v, force_unique=True) for v in node.inputs]
        converted_call_args = ", ".join([
            f"direct_cast({i_name}, {i_tmp_dtype_name})"
            for i_name, i_tmp_dtype_name in zip(input_names,
                                                input_tmp_dtype_names.keys())
        ])
        scalar_op_src = f"""
def {scalar_op_fn_name}({', '.join(input_names)}):
    return direct_cast(scalar_func({converted_call_args}), output_dtype)
        """

    scalar_op_fn = compile_function_src(scalar_op_src, scalar_op_fn_name, {
        **globals(),
        **global_env
    })

    signature = create_numba_signature(node, force_scalar=True)

    return numba_basic.numba_njit(
        signature, inline="always",
        fastmath=config.numba__fastmath)(scalar_op_fn)