コード例 #1
0
def guard(clusters):
    """
    Return a new :class:`ClusterGroup` with a new :class:`PartialCluster`
    for each conditional expression encountered in ``clusters``.
    """
    processed = ClusterGroup()
    for c in clusters:
        free = []
        for e in c.exprs:
            if e.conditionals:
                # Expressions that need no guarding are kept in a separate Cluster
                if free:
                    processed.append(PartialCluster(free, c.ispace, c.dspace, c.atomics))
                    free = []
                # Create a guarded PartialCluster
                guards = {}
                for d in e.conditionals:
                    condition = guards.setdefault(d.parent, [])
                    condition.append(d.condition or CondEq(d.parent % d.factor, 0))
                guards = {k: sympy.And(*v, evaluate=False) for k, v in guards.items()}
                processed.append(PartialCluster(e, c.ispace, c.dspace, c.atomics, guards))
            else:
                free.append(e)
        # Leftover
        if free:
            processed.append(PartialCluster(free, c.ispace, c.dspace, c.atomics))

    return ClusterGroup(processed)
コード例 #2
0
ファイル: algorithms.py プロジェクト: RajatRasal/devito
def guard(clusters):
    """
    Return a new :class:`ClusterGroup` with a new :class:`PartialCluster`
    for each conditional expression encountered in ``clusters``.
    """
    processed = ClusterGroup()
    for c in clusters:
        # Separate the expressions that should be guarded from the free ones
        mapper = OrderedDict()
        free = []
        for e in c.exprs:
            found = [d for d in e.dimensions if d.is_Conditional]
            if found:
                mapper.setdefault(tuple(filter_sorted(found)), []).append(e)
            else:
                free.append(e)

        # Some expressions may not require guards at all. We put them in their
        # own cluster straigh away
        if free:
            processed.append(PartialCluster(free, c.ispace, c.dspace, c.atomics))

        # Then we add in all guarded clusters
        for k, v in mapper.items():
            guards = {d.parent: CondEq(d.parent % d.factor, 0) for d in k}
            processed.append(PartialCluster(v, c.ispace, c.dspace, c.atomics, guards))

    return ClusterGroup(processed)
コード例 #3
0
ファイル: algorithms.py プロジェクト: fymenq/devito
def clusterize(exprs):
    """Group a sequence of :class:`ir.Eq`s into one or more :class:`Cluster`s."""

    # Build a graph capturing the dependencies among the input tensor expressions
    mapper = OrderedDict()
    for i, e1 in enumerate(exprs):
        trace = [e2 for e2 in exprs[:i] if Scope([e2, e1]).has_dep] + [e1]
        trace.extend([e2 for e2 in exprs[i + 1:] if Scope([e1, e2]).has_dep])
        mapper[e1] = Bunch(trace=trace, ispace=e1.ispace)

    # Derive the iteration spaces
    queue = list(mapper)
    while queue:
        target = queue.pop(0)

        ispaces = [mapper[i].ispace for i in mapper[target].trace]

        coerced_ispace = mapper[target].ispace.intersection(*ispaces)

        if coerced_ispace != mapper[target].ispace:
            # Something has changed, need to propagate the update
            mapper[target].ispace = coerced_ispace
            queue.extend([i for i in mapper[target].trace if i not in queue])

    # Build a PartialCluster for each tensor expression
    clusters = ClusterGroup()
    for k, v in mapper.items():
        if k.is_Tensor:
            scalars = [i for i in v.trace[:v.trace.index(k)] if i.is_Scalar]
            clusters.append(PartialCluster(scalars + [k], v.ispace))

    # Group PartialClusters together where possible
    clusters = groupby(clusters)

    return clusters.finalize()
コード例 #4
0
def clusterize(exprs):
    """
    Group a sequence of :class:`ir.Eq`s into one or more :class:`Cluster`s.
    """
    clusters = ClusterGroup()
    flowmap = detect_flow_directions(exprs)
    prev = None
    for idx, e in enumerate(exprs):
        if e.is_Tensor:
            scalars = [i for i in exprs[prev:idx] if i.is_Scalar]
            # Iteration space
            ispace = IterationSpace.merge(e.ispace, *[i.ispace for i in scalars])
            # Enforce iteration directions
            fdirs, _ = force_directions(flowmap, lambda d: ispace.directions.get(d))
            ispace = IterationSpace(ispace.intervals, ispace.sub_iterators, fdirs)
            # Data space
            dspace = DataSpace.merge(e.dspace, *[i.dspace for i in scalars])
            # Prepare for next range
            prev = idx

            clusters.append(PartialCluster(scalars + [e], ispace, dspace))

    # Group PartialClusters together where possible
    clusters = groupby(clusters)

    # Introduce conditional PartialClusters
    clusters = guard(clusters)

    return clusters.finalize()
コード例 #5
0
def guard(clusters):
    """
    Return a new :class:`ClusterGroup` including new :class:`PartialCluster`s
    for each conditional expression encountered in ``clusters``.
    """
    processed = ClusterGroup()
    for c in clusters:
        # Find out what expressions in /c/ should be guarded
        mapper = {}
        for e in c.exprs:
            for k, v in e.ispace.sub_iterators.items():
                for i in v:
                    if i.dim.is_Conditional:
                        mapper.setdefault(i.dim, []).append(e)

        # Build conditional expressions to guard clusters
        conditions = {d: CondEq(d.parent % d.factor, 0) for d in mapper}
        negated = {d: CondNe(d.parent % d.factor, 0) for d in mapper}

        # Expand with guarded clusters
        combs = list(powerset(mapper))
        for dims, ndims in zip(combs, reversed(combs)):
            banned = flatten(v for k, v in mapper.items() if k not in dims)
            exprs = [
                e.xreplace({i: IntDiv(i.parent, i.factor)
                            for i in mapper}) for e in c.exprs
                if e not in banned
            ]
            guards = [(i.parent, conditions[i]) for i in dims]
            guards.extend([(i.parent, negated[i]) for i in ndims])
            cluster = PartialCluster(exprs, c.ispace, c.dspace, c.atomics,
                                     dict(guards))
            processed.append(cluster)

    return processed
コード例 #6
0
ファイル: algorithms.py プロジェクト: dalide/devito
def clusterize(exprs, stencils):
    """
    Derive :class:`Cluster` objects from an iterable of expressions; a stencil for
    each expression must be provided.
    """
    assert len(exprs) == len(stencils)

    exprs, stencils = aggregate(exprs, stencils)

    # Create a PartialCluster for each sequence of expressions computing a tensor
    mapper = OrderedDict()
    g = TemporariesGraph(exprs)
    for (k, v), j in zip(g.items(), stencils):
        if v.is_tensor:
            exprs = g.trace(k)
            exprs += tuple(i for i in g.trace(k, readby=True)
                           if i not in exprs)
            mapper[k] = PartialCluster(exprs, j)

    # Update the PartialClusters' Stencils by looking at the Stencil of the
    # surrounding PartialClusters.
    queue = list(mapper)
    while queue:
        target = queue.pop(0)

        pc = mapper[target]
        strict_trace = [i.lhs for i in pc.exprs if i.lhs != target]

        stencil = pc.stencil.copy()
        for i in strict_trace:
            if i in mapper:
                stencil = stencil.add(mapper[i].stencil)

        if stencil != pc.stencil:
            # Something has changed, need to propagate the update
            pc.stencil = stencil
            queue.extend([i for i in strict_trace if i not in queue])

    # Drop all non-output tensors, as computed by other clusters
    clusters = ClusterGroup()
    for target, pc in mapper.items():
        exprs = [i for i in pc.exprs if i.lhs.is_Symbol or i.lhs == target]
        clusters.append(PartialCluster(exprs, pc.stencil))

    # Attempt grouping as many PartialClusters as possible together
    return groupby(clusters)
コード例 #7
0
ファイル: algorithms.py プロジェクト: skkamyab/devito
def clusterize(exprs):
    """Group a sequence of :class:`ir.Eq`s into one or more :class:`Cluster`s."""

    # Group expressions based on data dependences
    groups = group_expressions(exprs)

    clusters = ClusterGroup()
    for g in groups:
        # Coerce iteration space of each expression in each group
        mapper = OrderedDict([(e, e.ispace) for e in g])
        flowmap = detect_flow_directions(g)
        queue = list(g)
        while queue:
            v = queue.pop(0)

            intervals, sub_iterators, directions = mapper[v].args
            forced, clashes = force_directions(flowmap,
                                               lambda i: directions.get(i))
            for e in g:
                intervals = intervals.intersection(
                    mapper[e].intervals.drop(clashes))
            directions = {i: forced[i] for i in directions}
            coerced_ispace = IterationSpace(intervals, sub_iterators,
                                            directions)

            # Need update propagation ?
            if coerced_ispace != mapper[v]:
                mapper[v] = coerced_ispace
                queue.extend([i for i in g if i not in queue])

        # Wrap each tensor expression in a PartialCluster
        for k, v in mapper.items():
            if k.is_Tensor:
                scalars = [i for i in g[:g.index(k)] if i.is_Scalar]
                clusters.append(PartialCluster(scalars + [k], v))

    # Group PartialClusters together where possible
    clusters = groupby(clusters)

    # Introduce conditional PartialClusters
    clusters = guard(clusters)

    return clusters.finalize()
コード例 #8
0
def clusterize(exprs):
    """Group a sequence of :class:`ir.Eq`s into one or more :class:`Cluster`s."""
    # Group expressions based on data dependences
    groups = group_expressions(exprs)

    clusters = ClusterGroup()

    # Coerce iteration direction of each expression in each group
    for g in groups:
        mapper = OrderedDict([(e, e.directions) for e in g])
        flowmap = detect_flow_directions(g)
        queue = list(g)
        while queue:
            k = queue.pop(0)
            directions, _ = force_directions(flowmap,
                                             lambda i: mapper[k].get(i))
            directions = {i: directions[i] for i in mapper[k]}
            # Need update propagation ?
            if directions != mapper[k]:
                mapper[k] = directions
                queue.extend([i for i in g if i not in queue])

        # Wrap each tensor expression in a PartialCluster
        for k, v in mapper.items():
            if k.is_Tensor:
                scalars = [i for i in g[:g.index(k)] if i.is_Scalar]
                intervals, sub_iterators, _ = k.ispace.args
                ispace = IterationSpace(intervals, sub_iterators, v)
                clusters.append(PartialCluster(scalars + [k], ispace,
                                               k.dspace))

    # Group PartialClusters together where possible
    clusters = groupby(clusters)

    # Introduce conditional PartialClusters
    clusters = guard(clusters)

    return clusters.finalize()
コード例 #9
0
def clusterize(exprs):
    """Group a sequence of LoweredEqs into one or more Clusters."""
    clusters = ClusterGroup()

    # Wrap each LoweredEq in `exprs` within a PartialCluster. The PartialCluster's
    # iteration direction is enforced based on the iteration direction of the
    # surrounding LoweredEqs
    flowmap = detect_flow_directions(exprs)
    for e in exprs:
        directions, _ = force_directions(flowmap,
                                         lambda d: e.ispace.directions.get(d))
        ispace = IterationSpace(e.ispace.intervals, e.ispace.sub_iterators,
                                directions)

        clusters.append(PartialCluster(e, ispace, e.dspace))

    # Group PartialClusters together where possible
    clusters = groupby(clusters)

    # Introduce conditional PartialClusters
    clusters = guard(clusters)

    return clusters.finalize()