Esempio n. 1
0
def external_call_func(context, llvm_module, extfn, args=(), temp_name=None):
    '''Build a call node to the specified external function.

    context --- A numba context
    llvm_module --- A LLVM llvm_module
    name --- Name of the external function
    args --- [optional] argument of for the call
    temp_name --- [optional] Name of the temporary value in LLVM IR.
    '''
    from numba import nodes
    temp_name = temp_name or extfn.name
    assert temp_name is not None

    sig = extfn.signature
    lfunc = extfn.declare_lfunc(context, llvm_module)

    exc_check = dict(badval   = extfn.badval,
                     goodval  = extfn.goodval,
                     exc_msg  = extfn.exc_msg,
                     exc_type = extfn.exc_type,
                     exc_args = extfn.exc_args)

    result = nodes.NativeCallNode(sig, args, lfunc, name=temp_name, **exc_check)

    if extfn.check_pyerr_occurred:
        result = nodes.PyErr_OccurredNode(result)

    return result
Esempio n. 2
0
    def visit_Tuple(self, node):
        self.check_context(node)

        sig, lfunc = self.context.external_library.declare(
            self.llvm_module, 'PyTuple_Pack')
        objs = self.visitlist(nodes.CoercionNode.coerce(node.elts, object_))
        n = nodes.ConstNode(len(node.elts), minitypes.Py_ssize_t)
        args = [n] + objs
        new_node = nodes.NativeCallNode(sig, args, lfunc, name='tuple')
        # TODO: determine element type of node.elts
        new_node.type = typesystem.TupleType(object_, size=len(node.elts))
        return nodes.ObjectTempNode(new_node)
Esempio n. 3
0
    def _print(self, value, dest=None):
        signature, lfunc = self.context.external_library.declare(
            self.llvm_module, 'PyObject_CallMethod')

        if dest is None:
            dest = nodes.ObjectInjectNode(sys.stdout)

        value = function_util.external_call(self.context,
                                            self.llvm_module,
                                            "PyObject_Str",
                                            args=[value])
        args = [dest, nodes.ConstNode("write"), nodes.ConstNode("O"), value]
        return nodes.NativeCallNode(signature, args, lfunc)
Esempio n. 4
0
    def call(self, name, *args, **kw):
        temp_name = kw.pop('temp_name', name)
        sig, lfunc = self.function_by_name(name, **kw)

        if name in globals():
            external_func = globals()[name]
            exc_check = dict(badval=external_func.badval,
                             goodval=external_func.goodval,
                             exc_msg=external_func.exc_msg,
                             exc_type=external_func.exc_type,
                             exc_args=external_func.exc_args)
        else:
            exc_check = {}

        result = nodes.NativeCallNode(sig,
                                      args,
                                      lfunc,
                                      name=temp_name,
                                      **exc_check)
        return result
Esempio n. 5
0
def call_jit(jit_func, args):
    return nodes.NativeCallNode(jit_func.signature, args, jit_func.lfunc)
Esempio n. 6
0
def build_wrapper_function_ast(env, wrapper_lfunc, llvm_module):
    """
    Build AST for LLVM function wrapper.

        lfunc: LLVM function to wrap
        llvm_module: module the wrapper is being defined in

    The resulting AST has a NativeCallNode to the wrapped function. The
    arguments are  LLVMValueRefNode nodes which still need their llvm_value
    set to the object from the tuple. This happens in visit_FunctionWrapperNode
    during codegen.
    """
    func = env.crnt.func
    func_signature = env.crnt.func_signature
    func_name = env.crnt.func_name

    # Insert external declaration
    lfunc = llvm_module.get_or_insert_function(
        func_signature.to_llvm(env.context),
        env.crnt.lfunc.name)

    # Build AST
    wrapper = nodes.FunctionWrapperNode(lfunc,
                                        func_signature,
                                        func,
                                        fake_pyfunc,
                                        func_name)

    error_return = ast.Return(nodes.CoercionNode(nodes.NULL_obj,
                                                 object_))

    is_closure = bool(closures.is_closure_signature(func_signature))
    nargs = len(func_signature.args) - is_closure

    # Call wrapped function with unpacked object arguments
    # (delay actual arguments)
    args = [nodes.LLVMValueRefNode(object_, None)
                for i in range(nargs)]

    if is_closure:
        # Insert m_self as scope argument type
        closure_scope = get_closure_scope(func_signature, wrapper_lfunc.args[0])
        args.insert(0, closure_scope)

    func_call = nodes.NativeCallNode(func_signature, args, lfunc)

    if not is_obj(func_signature.return_type):
        # Check for error using PyErr_Occurred()
        func_call = nodes.PyErr_OccurredNode(func_call)

    # Coerce and return result
    if func_signature.return_type.is_void:
        wrapper.body = func_call
        result_node = nodes.ObjectInjectNode(None)
    else:
        wrapper.body = None
        result_node = func_call

    wrapper.return_result = ast.Return(value=nodes.CoercionNode(result_node,
                                                                object_))

    # Update wrapper
    wrapper.error_return = error_return
    wrapper.cellvars = []

    wrapper.wrapped_nargs = nargs
    wrapper.wrapped_args = args[is_closure:]

    return wrapper
Esempio n. 7
0
    def register_array_expression(self, node, lhs=None):
        super(ArrayExpressionRewriteNative, self).register_array_expression(
            node, lhs)

        lhs_type = lhs.type if lhs else node.type
        is_expr = lhs is None

        if node.type.is_array and lhs_type.ndim < node.type.ndim:
            # TODO: this is valid in NumPy if the leading dimensions of the
            # TODO: RHS have extent 1
            raise error.NumbaError(
                node, "Right hand side must have a "
                      "dimensionality <= %d" % lhs_type.ndim)

        # Create ufunc scalar kernel
        ufunc_ast, signature, ufunc_builder = self.get_py_ufunc_ast(lhs, node)
        signature.struct_by_reference = True

        # Compile ufunc scalar kernel with numba
        ast.fix_missing_locations(ufunc_ast)
        func_env, (_, _, _) = pipeline.run_pipeline2(
            self.env, None, ufunc_ast, signature,
            function_globals={},
        )

        # Manual linking
        lfunc = func_env.lfunc

        # print lfunc
        operands = ufunc_builder.operands
        functions.keep_alive(self.func, lfunc)

        operands = [nodes.CloneableNode(operand) for operand in operands]

        if lhs is not None:
            lhs = nodes.CloneableNode(lhs)
            broadcast_operands = [lhs] + operands
            lhs = lhs.clone
        else:
            broadcast_operands = operands[:]

        shape = slicenodes.BroadcastNode(lhs_type, broadcast_operands)
        operands = [op.clone for op in operands]

        if lhs is None and self.nopython:
            raise error.NumbaError(
                node, "Cannot allocate new memory in nopython context")
        elif lhs is None:
            # TODO: determine best output order at runtime
            shape = shape.cloneable
            lhs = nodes.ArrayNewEmptyNode(lhs_type, shape.clone,
                                          lhs_type.is_f_contig).cloneable

        # Build minivect wrapper kernel
        context = NumbaproStaticArgsContext()
        context.llvm_module = self.env.llvm_context.module
        # context.debug = True
        context.optimize_broadcasting = False
        b = context.astbuilder

        variables = [b.variable(name_node.type, "op%d" % i)
                     for i, name_node in enumerate([lhs] + operands)]
        miniargs = [b.funcarg(variable) for variable in variables]
        body = miniutils.build_kernel_call(lfunc.name, signature, miniargs, b)

        minikernel = b.function_from_numpy(
            templating.temp_name("array_expression"), body, miniargs)
        lminikernel, ctypes_kernel = context.run_simple(
            minikernel, specializers.StridedSpecializer)

        # Build call to minivect kernel
        operands.insert(0, lhs)
        args = [shape]
        scalar_args = []
        for operand in operands:
            if operand.type.is_array:
                data_p = self.array_attr(operand, 'data')
                data_p = nodes.CoercionNode(data_p,
                                            operand.type.dtype.pointer())
                if not isinstance(operand, nodes.CloneNode):
                    operand = nodes.CloneNode(operand)
                strides_p = self.array_attr(operand, 'strides')
                args.extend((data_p, strides_p))
            else:
                scalar_args.append(operand)

        args.extend(scalar_args)
        result = nodes.NativeCallNode(minikernel.type, args, lminikernel)

        # Use native slicing in array expressions
        slicenodes.mark_nopython(ast.Suite(body=result.args))

        if not is_expr:
            # a[:] = b[:] * c[:]
            return result

        # b[:] * c[:], return new array as expression
        return nodes.ExpressionNode(stmts=[result], expr=lhs.clone)