示例#1
0
def _run_ssa(blocks):
    """Run SSA reconstruction on IR blocks of a function.
    """
    if not blocks:
        # Empty blocks?
        return {}
    # Run CFG on the blocks
    cfg = compute_cfg_from_blocks(blocks)
    df_plus = _iterated_domfronts(cfg)
    # Find SSA violators
    violators = _find_defs_violators(blocks)
    # Process one SSA-violating variable at a time
    for varname in violators:
        _logger.debug(
            "Fix SSA violator on var %s", varname,
        )
        # Fix up the LHS
        # Put fresh variables for all assignments to the variable
        blocks, defmap = _fresh_vars(blocks, varname)
        _logger.debug("Replaced assignments: %s", pformat(defmap))
        # Fix up the RHS
        # Re-associate the variable uses with the reaching definition
        blocks = _fix_ssa_vars(blocks, varname, defmap, cfg, df_plus)

    # Post-condition checks.
    # CFG invariant
    cfg_post = compute_cfg_from_blocks(blocks)
    if cfg_post != cfg:
        raise errors.CompilerError("CFG mutated in SSA pass")
    return blocks
示例#2
0
    def _rebalance_arrs(self, array_dists, parfor_dists):
        # rebalance an array if it is accessed in a parfor that has output
        # arrays or is in a loop

        # find sequential loop bodies
        cfg = analysis.compute_cfg_from_blocks(self.func_ir.blocks)
        loop_bodies = set()
        for loop in cfg.loops().values():
            loop_bodies |= loop.body

        rebalance_arrs = set()

        for label, block in self.func_ir.blocks.items():
            for inst in block.body:
                # TODO: handle hiframes filter etc.
                if (isinstance(inst, Parfor)
                        and parfor_dists[inst.id] == Distribution.OneD_Var):
                    array_accesses = _get_array_accesses(
                        inst.loop_body, self.func_ir, self.typemap)
                    onedv_arrs = set(
                        arr for (arr, ind) in array_accesses
                        if arr in array_dists
                        and array_dists[arr] == Distribution.OneD_Var)
                    if (label in loop_bodies
                            or _arrays_written(onedv_arrs, inst.loop_body)):
                        rebalance_arrs |= onedv_arrs

        if len(rebalance_arrs) != 0:
            self._gen_rebalances(rebalance_arrs, self.func_ir.blocks)
            return True

        return False
示例#3
0
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
    """
    Loop lifting transformation.

    Given a interpreter `func_ir` returns a 2 tuple of
    `(toplevel_interp, [loop0_interp, loop1_interp, ....])`
    """
    func_ir = _pre_looplift_transform(func_ir)
    blocks = func_ir.blocks.copy()
    cfg = compute_cfg_from_blocks(blocks)
    loopinfos = _loop_lift_get_candidate_infos(
        cfg, blocks, func_ir.variable_lifetime.livemap)
    loops = []
    if loopinfos:
        _logger.debug('loop lifting this IR with %d candidates:\n%s',
                      len(loopinfos), func_ir.dump_to_string())
    for loopinfo in loopinfos:
        lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx,
                                          targetctx, flags, locals)
        loops.append(lifted)

    # Make main IR
    main = func_ir.derive(blocks=blocks)

    return main, loops
示例#4
0
    def find_ranges(blocks):

        cfg = compute_cfg_from_blocks(blocks)
        sus_setups, sus_pops = set(), set()
        # traverse the cfg and collect all suspected SETUP_WITH and POP_BLOCK
        # statements so that we can iterate over them
        for label, block in blocks.items():
            for stmt in block.body:
                if ir_utils.is_setup_with(stmt):
                    sus_setups.add(label)
                if ir_utils.is_pop_block(stmt):
                    sus_pops.add(label)

        # now that we do have the statements, iterate through them in reverse
        # topo order and from each start looking for pop_blocks
        setup_with_to_pop_blocks_map = defaultdict(set)
        for setup_block in cfg.topo_sort(sus_setups, reverse=True):
            # begin pop_block, search
            to_visit, seen = [], []
            to_visit.append(setup_block)
            while to_visit:
                # get whatever is next and record that we have seen it
                block = to_visit.pop()
                seen.append(block)
                # go through the body of the block, looking for statements
                for stmt in blocks[block].body:
                    # raise detected before pop_block
                    if ir_utils.is_raise(stmt):
                        raise errors.CompilerError(
                            'unsupported control flow due to raise '
                            'statements inside with block')
                    # special case 3.7, return before POP_BLOCK
                    if PYVERSION < (3, 8) and ir_utils.is_return(stmt):
                        raise errors.CompilerError(
                            'unsupported control flow: due to return '
                            'statements inside with block')
                    # if a pop_block, process it
                    if ir_utils.is_pop_block(stmt) and block in sus_pops:
                        # record the jump target of this block belonging to this setup
                        setup_with_to_pop_blocks_map[setup_block].add(block)
                        # remove the block from blocks to be matched
                        sus_pops.remove(block)
                        # stop looking, we have reached the frontier
                        break
                    # if we are still here, by the block terminator,
                    # add all its targets to the to_visit stack, unless we
                    # have seen them already
                    if ir_utils.is_terminator(stmt):
                        for t in stmt.get_targets():
                            if t not in seen:
                                to_visit.append(t)

        return setup_with_to_pop_blocks_map
示例#5
0
def _pre_looplift_transform(func_ir):
    """Canonicalize loops for looplifting.
    """
    from numba.core.postproc import PostProcessor

    cfg = compute_cfg_from_blocks(func_ir.blocks)
    # For every loop that has multiple exits, combine the exits into one.
    for loop_info in cfg.loops().values():
        if _has_multiple_loop_exits(cfg, loop_info):
            func_ir, _common_key = _fix_multi_exit_blocks(
                func_ir, loop_info.exits)
    # Reset and reprocess the func_ir
    func_ir._reset_analysis_variables()
    PostProcessor(func_ir).run()
    return func_ir
示例#6
0
def _fix_ssa_vars(blocks, varname, defmap, cfg, df_plus):
    """Rewrite all uses to ``varname`` given the definition map
    """
    states = _make_states(blocks)
    states['varname'] = varname
    states['defmap'] = defmap
    states['phimap'] = phimap = defaultdict(list)
    states['cfg'] = cfg = compute_cfg_from_blocks(blocks)
    states['phi_locations'] = _compute_phi_locations(cfg, defmap)
    newblocks = _run_block_rewrite(blocks, states, _FixSSAVars())
    # insert phi nodes
    for label, philist in phimap.items():
        curblk = newblocks[label]
        # Prepend PHI nodes to the block
        curblk.body = philist + curblk.body
    return newblocks
示例#7
0
def _fix_ssa_vars(blocks, varname, defmap):
    """Rewrite all uses to ``varname`` given the definition map"""
    states = _make_states(blocks)
    states["varname"] = varname
    states["defmap"] = defmap
    states["phimap"] = phimap = defaultdict(list)
    states["cfg"] = cfg = compute_cfg_from_blocks(blocks)
    states["df+"] = _iterated_domfronts(cfg)
    newblocks = _run_block_rewrite(blocks, states, _FixSSAVars())
    # check for unneeded phi nodes
    _remove_unneeded_phis(phimap)
    # insert phi nodes
    for label, philist in phimap.items():
        curblk = newblocks[label]
        # Prepend PHI nodes to the block
        curblk.body = philist + curblk.body
    return newblocks
示例#8
0
 def cfg(self):
     return analysis.compute_cfg_from_blocks(self._blocks)
示例#9
0
def canonicalize_cfg_single_backedge(blocks):
    """
    Rewrite loops that have multiple backedges.
    """
    cfg = compute_cfg_from_blocks(blocks)
    newblocks = blocks.copy()

    def new_block_id():
        return max(newblocks.keys()) + 1

    def has_multiple_backedges(loop):
        count = 0
        for k in loop.body:
            blk = blocks[k]
            edges = blk.terminator.get_targets()
            # is a backedge?
            if loop.header in edges:
                count += 1
                if count > 1:
                    # early exit
                    return True
        return False

    def yield_loops_with_multiple_backedges():
        for lp in cfg.loops().values():
            if has_multiple_backedges(lp):
                yield lp

    def replace_target(term, src, dst):
        def replace(target):
            return (dst if target == src else target)

        if isinstance(term, ir.Branch):
            return ir.Branch(cond=term.cond,
                             truebr=replace(term.truebr),
                             falsebr=replace(term.falsebr),
                             loc=term.loc)
        elif isinstance(term, ir.Jump):
            return ir.Jump(target=replace(term.target), loc=term.loc)
        else:
            assert not term.get_targets()
            return term

    def rewrite_single_backedge(loop):
        """
        Add new tail block that gathers all the backedges
        """
        header = loop.header
        tailkey = new_block_id()
        for blkkey in loop.body:
            blk = newblocks[blkkey]
            if header in blk.terminator.get_targets():
                newblk = blk.copy()
                # rewrite backedge into jumps to new tail block
                newblk.body[-1] = replace_target(blk.terminator, header,
                                                 tailkey)
                newblocks[blkkey] = newblk
        # create new tail block
        entryblk = newblocks[header]
        tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)
        # add backedge
        tailblk.append(ir.Jump(target=header, loc=tailblk.loc))
        newblocks[tailkey] = tailblk

    for loop in yield_loops_with_multiple_backedges():
        rewrite_single_backedge(loop)

    return newblocks