Example #1
0
    def test1(self):
        typingctx = typing.Context()
        targetctx = cpu.CPUContext(typingctx)
        test_ir = compiler.run_frontend(test_will_propagate)
        with cpu_target.nested_context(typingctx, targetctx):
            typingctx.refresh()
            targetctx.refresh()
            args = (types.int64, types.int64, types.int64)
            typemap, return_type, calltypes = type_inference_stage(
                typingctx, test_ir, args, None)
            type_annotation = type_annotations.TypeAnnotation(
                func_ir=test_ir,
                typemap=typemap,
                calltypes=calltypes,
                lifted=(),
                lifted_from=None,
                args=args,
                return_type=return_type,
                html_output=config.HTML)
            remove_dels(test_ir.blocks)
            in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
            apply_copy_propagate(test_ir.blocks, in_cps,
                                 get_name_var_table(test_ir.blocks), typemap,
                                 calltypes)

            remove_dead(test_ir.blocks, test_ir.arg_names, test_ir)
            self.assertFalse(findLhsAssign(test_ir, "x"))
Example #2
0
    def test2(self):
        typingctx = typing.Context()
        targetctx = cpu.CPUContext(typingctx)
        test_ir = compiler.run_frontend(test_wont_propagate)
        #print("Num blocks = ", len(test_ir.blocks))
        #print(test_ir.dump())
        with cpu_target.nested_context(typingctx, targetctx):
            typingctx.refresh()
            targetctx.refresh()
            args = (types.int64, types.int64, types.int64)
            typemap, return_type, calltypes = type_inference_stage(
                typingctx, test_ir, args, None)
            type_annotation = type_annotations.TypeAnnotation(
                func_ir=test_ir,
                typemap=typemap,
                calltypes=calltypes,
                lifted=(),
                lifted_from=None,
                args=args,
                return_type=return_type,
                html_output=config.HTML)
            in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
            apply_copy_propagate(test_ir.blocks, in_cps,
                                 get_name_var_table(test_ir.blocks), typemap,
                                 calltypes)

            self.assertTrue(findAssign(test_ir, "x"))
 def test_mk_func_literal(self):
     """make sure make_function is passed to typer class as a literal"""
     test_ir = compiler.run_frontend(mk_func_test_impl)
     typingctx = cpu_target.typing_context
     typingctx.refresh()
     typemap, _, _ = type_inference_stage(typingctx, test_ir, (), None)
     self.assertTrue(
         any(isinstance(a, types.MakeFunctionLiteral) for a in typemap.values())
     )
Example #4
0
    def generic(self, args, kws):
        """
        Type the overloaded function by compiling the appropriate
        implementation for the given args.
        """
        disp, new_args = self._get_impl(args, kws)
        if disp is None:
            return
        # Compile and type it for the given types
        disp_type = types.Dispatcher(disp)
        # Store the compiled overload for use in the lowering phase if there's
        # no inlining required (else functions are being compiled which will
        # never be used as they are inlined)
        if not self._inline.is_never_inline:
            # need to run the compiler front end up to type inference to compute
            # a signature
            from numba.core import typed_passes, compiler

            ir = compiler.run_frontend(disp_type.dispatcher.py_func)
            resolve = disp_type.dispatcher.get_call_template
            template, pysig, folded_args, kws = resolve(new_args, kws)

            typemap, return_type, calltypes = typed_passes.type_inference_stage(
                self.context, ir, folded_args, None)
            sig = Signature(return_type, folded_args, None)
            # this stores a load of info for the cost model function if supplied
            # it by default is None
            self._inline_overloads[sig.args] = {"folded_args": folded_args}
            # this stores the compiled overloads, if there's no compiled
            # overload available i.e. function is always inlined, the key still
            # needs to exist for type resolution

            # NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
            #       the inliner has failed to inline this entry corretly.
            impl_init = _EmptyImplementationEntry("always inlined")
            self._compiled_overloads[sig.args] = impl_init
            if not self._inline.is_always_inline:
                # this branch is here because a user has supplied a function to
                # determine whether to inline or not. As a result both compiled
                # function and inliner info needed, delaying the computation of
                # this leads to an internal state mess at present. TODO: Fix!
                sig = disp_type.get_call_type(self.context, new_args, kws)
                self._compiled_overloads[sig.args] = disp_type.get_overload(
                    sig)
                # store the inliner information, it's used later in the cost
                # model function call
                iinfo = _inline_info(ir, typemap, calltypes, sig)
                self._inline_overloads[sig.args] = {
                    "folded_args": folded_args,
                    "iinfo": iinfo,
                }
        else:
            sig = disp_type.get_call_type(self.context, new_args, kws)
            self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
        return sig
Example #5
0
    def _run_parfor(cls, test_func, args, swap_map=None):
        # TODO: refactor this with get_optimized_numba_ir() where this is
        #       copied from
        typingctx = typing.Context()
        targetctx = cpu.CPUContext(typingctx)
        test_ir = compiler.run_frontend(test_func)
        options = cpu.ParallelOptions(True)

        tp = MyPipeline(typingctx, targetctx, args, test_ir)

        with cpu_target.nested_context(typingctx, targetctx):
            typingctx.refresh()
            targetctx.refresh()

            inline_pass = inline_closurecall.InlineClosureCallPass(
                tp.state.func_ir, options, typed=True
            )
            inline_pass.run()

            rewrites.rewrite_registry.apply("before-inference", tp.state)

            untyped_passes.ReconstructSSA().run_pass(tp.state)

            (
                tp.state.typemap,
                tp.state.return_type,
                tp.state.calltypes,
                _
            ) = typed_passes.type_inference_stage(
                tp.state.typingctx, tp.state.func_ir, tp.state.args, None
            )

            typed_passes.PreLowerStripPhis().run_pass(tp.state)

            diagnostics = numba.parfors.parfor.ParforDiagnostics()

            preparfor_pass = numba.parfors.parfor.PreParforPass(
                tp.state.func_ir,
                tp.state.typemap,
                tp.state.calltypes,
                tp.state.typingctx,
                options,
                swapped=diagnostics.replaced_fns,
                replace_functions_map=swap_map,
            )
            preparfor_pass.run()

            rewrites.rewrite_registry.apply("after-inference", tp.state)
            return tp, options, diagnostics, preparfor_pass
    def test_obj_func_match(self):
        """Test matching of an object method (other than Array see #3449)"""
        def test_func():
            d = Dummy([1])
            d.val.append(2)

        test_ir = compiler.run_frontend(test_func)
        typingctx = cpu_target.typing_context
        typemap, _, _ = type_inference_stage(typingctx, test_ir, (), None)
        matched_call = ir_utils.find_callname(test_ir,
                                              test_ir.blocks[0].body[8].value,
                                              typemap)
        self.assertTrue(
            isinstance(matched_call, tuple) and len(matched_call) == 2
            and matched_call[0] == "append")
Example #7
0
    def test1(self):
        typingctx = typing.Context()
        targetctx = cpu.CPUContext(typingctx)
        test_ir = compiler.run_frontend(test_will_propagate)
        with cpu_target.nested_context(typingctx, targetctx):
            typingctx.refresh()
            targetctx.refresh()
            args = (types.int64, types.int64, types.int64)
            typemap, _, calltypes, _ = type_inference_stage(
                typingctx, targetctx, test_ir, args, None)
            remove_dels(test_ir.blocks)
            in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
            apply_copy_propagate(test_ir.blocks, in_cps,
                                 get_name_var_table(test_ir.blocks), typemap,
                                 calltypes)

            remove_dead(test_ir.blocks, test_ir.arg_names, test_ir)
            self.assertFalse(findLhsAssign(test_ir, "x"))
Example #8
0
    def get_return_type(self, argtys):
        if config.DEBUG_ARRAY_OPT >= 1:
            print("get_return_type", argtys)
            ir_utils.dump_blocks(self.kernel_ir.blocks)

        if not isinstance(argtys[0], types.npytypes.Array):
            raise ValueError("The first argument to a stencil kernel must "
                             "be the primary input array.")

        from numba.core import typed_passes
        typemap, return_type, calltypes = typed_passes.type_inference_stage(
            self._typingctx, self.kernel_ir, argtys, None, {})
        if isinstance(return_type, types.npytypes.Array):
            raise ValueError(
                "Stencil kernel must return a scalar and not a numpy array.")

        real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
                                        argtys[0].layout)
        return (real_ret, typemap, calltypes)
Example #9
0
    def generic(self, args, kws):
        """
        Type the overloaded function by compiling the appropriate
        implementation for the given args.
        """
        disp, new_args = self._get_impl(args, kws)
        if disp is None:
            return
        # Compile and type it for the given types
        disp_type = types.Dispatcher(disp)
        # Store the compiled overload for use in the lowering phase if there's
        # no inlining required (else functions are being compiled which will
        # never be used as they are inlined)
        if not self._inline.is_never_inline:
            # need to run the compiler front end up to type inference to compute
            # a signature
            from numba.core import typed_passes, compiler
            from numba.core.inline_closurecall import InlineWorker
            fcomp = disp._compiler
            flags = compiler.Flags()

            # Updating these causes problems?!
            #fcomp.targetdescr.options.parse_as_flags(flags,
            #                                         fcomp.targetoptions)
            #flags = fcomp._customize_flags(flags)

            # spoof a compiler pipline like the one that will be in use
            tyctx = fcomp.targetdescr.typing_context
            tgctx = fcomp.targetdescr.target_context
            compiler_inst = fcomp.pipeline_class(
                tyctx,
                tgctx,
                None,
                None,
                None,
                flags,
                None,
            )
            inline_worker = InlineWorker(
                tyctx,
                tgctx,
                fcomp.locals,
                compiler_inst,
                flags,
                None,
            )

            # If the inlinee contains something to trigger literal arg dispatch
            # then the pipeline call will unconditionally fail due to a raised
            # ForceLiteralArg exception. Therefore `resolve` is run first, as
            # type resolution must occur at some point, this will hit any
            # `literally` calls and because it's going via the dispatcher will
            # handle them correctly i.e. ForceLiteralArg propagates. This having
            # the desired effect of ensuring the pipeline call is only made in
            # situations that will succeed. For context see #5887.
            resolve = disp_type.dispatcher.get_call_template
            template, pysig, folded_args, kws = resolve(new_args, kws)
            ir = inline_worker.run_untyped_passes(disp_type.dispatcher.py_func)

            (typemap, return_type, calltypes,
             _) = typed_passes.type_inference_stage(self.context, ir,
                                                    folded_args, None)
            sig = Signature(return_type, folded_args, None)
            # this stores a load of info for the cost model function if supplied
            # it by default is None
            self._inline_overloads[sig.args] = {'folded_args': folded_args}
            # this stores the compiled overloads, if there's no compiled
            # overload available i.e. function is always inlined, the key still
            # needs to exist for type resolution

            # NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
            #       the inliner has failed to inline this entry corretly.
            impl_init = _EmptyImplementationEntry('always inlined')
            self._compiled_overloads[sig.args] = impl_init
            if not self._inline.is_always_inline:
                # this branch is here because a user has supplied a function to
                # determine whether to inline or not. As a result both compiled
                # function and inliner info needed, delaying the computation of
                # this leads to an internal state mess at present. TODO: Fix!
                sig = disp_type.get_call_type(self.context, new_args, kws)
                self._compiled_overloads[sig.args] = disp_type.get_overload(
                    sig)
                # store the inliner information, it's used later in the cost
                # model function call
            iinfo = _inline_info(ir, typemap, calltypes, sig)
            self._inline_overloads[sig.args] = {
                'folded_args': folded_args,
                'iinfo': iinfo
            }
        else:
            sig = disp_type.get_call_type(self.context, new_args, kws)
            self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
        return sig
Example #10
0
def get_stencil_ir(sf, typingctx, args, scope, loc, input_dict, typemap,
                   calltypes):
    """get typed IR from stencil bytecode
    """
    from numba.core.cpu import CPUContext
    from numba.core.registry import cpu_target
    from numba.core.annotations import type_annotations
    from numba.core.typed_passes import type_inference_stage

    # get untyped IR
    stencil_func_ir = sf.kernel_ir.copy()
    # copy the IR nodes to avoid changing IR in the StencilFunc object
    stencil_blocks = copy.deepcopy(stencil_func_ir.blocks)
    stencil_func_ir.blocks = stencil_blocks

    name_var_table = ir_utils.get_name_var_table(stencil_func_ir.blocks)
    if "out" in name_var_table:
        raise ValueError(
            "Cannot use the reserved word 'out' in stencil kernels.")

    # get typed IR with a dummy pipeline (similar to test_parfors.py)
    targetctx = CPUContext(typingctx)
    with cpu_target.nested_context(typingctx, targetctx):
        tp = DummyPipeline(typingctx, targetctx, args, stencil_func_ir)

        rewrites.rewrite_registry.apply('before-inference', tp.state)

        tp.state.typemap, tp.state.return_type, tp.state.calltypes = type_inference_stage(
            tp.state.typingctx, tp.state.func_ir, tp.state.args, None)

        type_annotations.TypeAnnotation(func_ir=tp.state.func_ir,
                                        typemap=tp.state.typemap,
                                        calltypes=tp.state.calltypes,
                                        lifted=(),
                                        lifted_from=None,
                                        args=tp.state.args,
                                        return_type=tp.state.return_type,
                                        html_output=config.HTML)

    # make block labels unique
    stencil_blocks = ir_utils.add_offset_to_labels(stencil_blocks,
                                                   ir_utils.next_label())
    min_label = min(stencil_blocks.keys())
    max_label = max(stencil_blocks.keys())
    ir_utils._max_label = max_label

    if config.DEBUG_ARRAY_OPT >= 1:
        print("Initial stencil_blocks")
        ir_utils.dump_blocks(stencil_blocks)

    # rename variables,
    var_dict = {}
    for v, typ in tp.state.typemap.items():
        new_var = ir.Var(scope, mk_unique_var(v), loc)
        var_dict[v] = new_var
        typemap[new_var.name] = typ  # add new var type for overall function
    ir_utils.replace_vars(stencil_blocks, var_dict)

    if config.DEBUG_ARRAY_OPT >= 1:
        print("After replace_vars")
        ir_utils.dump_blocks(stencil_blocks)

    # add call types to overall function
    for call, call_typ in tp.state.calltypes.items():
        calltypes[call] = call_typ

    arg_to_arr_dict = {}
    # replace arg with arr
    for block in stencil_blocks.values():
        for stmt in block.body:
            if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Arg):
                if config.DEBUG_ARRAY_OPT >= 1:
                    print("input_dict", input_dict, stmt.value.index,
                          stmt.value.name, stmt.value.index in input_dict)
                arg_to_arr_dict[stmt.value.name] = input_dict[
                    stmt.value.index].name
                stmt.value = input_dict[stmt.value.index]

    if config.DEBUG_ARRAY_OPT >= 1:
        print("arg_to_arr_dict", arg_to_arr_dict)
        print("After replace arg with arr")
        ir_utils.dump_blocks(stencil_blocks)

    ir_utils.remove_dels(stencil_blocks)
    stencil_func_ir.blocks = stencil_blocks
    return stencil_func_ir, sf.get_return_type(args)[0], arg_to_arr_dict