def test_collect_aliases(fa, fb, fc, fd, t0, t1, t2, t3, exprs, expected): scope = [fa, fb, fc, fd, t0, t1, t2, t3] mapper = dict([(EVAL(k, *scope), v) for k, v in expected.items()]) _, aliases = collect(EVAL(exprs, *scope)) for k, v in aliases.items(): assert k in mapper assert (len(v.aliased) == 1 and mapper[k] is None) or v.anti_stencil == mapper[k]
def test_multiple_eqs(self, exprs, expected, ti0, ti1, ti3, fa): """ Tests data dependences across ordered sequences of equations representing a scope. ``expected`` is a list of comma-separated words, each word representing a dependence in the scope and consisting of three pieces of information: * the name of the function inducing a dependence * if it's a flow, anti, or output dependence * the dimension causing the dependence """ exprs = [ LoweredEq(i) for i in EVAL(exprs, ti0.base, ti1.base, ti3.base, fa) ] expected = [tuple(i.split(',')) for i in expected] # Force innatural flow, only to stress the compiler to see if it was # capable of detecting anti-dependences for i in exprs: i.ispace._directions = {i: Forward for i in i.ispace.directions} scope = Scope(exprs) assert len(scope.d_all) == len(expected) for i in ['flow', 'anti', 'output']: for dep in getattr(scope, 'd_%s' % i): item = (dep.function.name, i, str(dep.cause)) assert item in expected expected.remove(item) # Sanity check: we did find all of the expected dependences assert len(expected) == 0
def test_common_subexprs_elimination(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected): make = lambda i: Scalar(name='r%d' % i).indexify() processed = common_subexprs_elimination( EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1), make) assert len(processed) == len(expected) assert all(str(i.rhs) == j for i, j in zip(processed, expected))
def test_dependences_scope(exprs, expected, ti0, ti1, ti3, fa): """ Tests data dependences across ordered sequences of equations representing a scope. ``expected`` is a list of comma-separated words, each word representing a dependence in the scope and consisting of three pieces of information: * the name of the function inducing a dependence * if it's a flow, anti, or output dependence * the dimension causing the dependence """ exprs = EVAL(exprs, ti0.base, ti1.base, ti3.base, fa) expected = [tuple(i.split(',')) for i in expected] scope = Scope(exprs) assert len(scope.d_all) == len(expected) for i in ['flow', 'anti', 'output']: for dep in getattr(scope, 'd_%s' % i): item = (dep.function.name, i, str(dep.cause)) assert item in expected expected.remove(item) # Sanity check: we did find all of the expected dependences assert len(expected) == 0
def test_single_eq(self, expr, expected, ti0, ti1, fa): """ Tests data dependences within a single equation consisting of only two Indexeds. ``expected`` is a comma-separated word consisting of four pieces of information: * if it's a flow, anti, or output dependence * if it's loop-carried or loop-independent * the dimension causing the dependence * whether it's direct or indirect (i.e., through A[B[i]]) """ expr = LoweredEq(EVAL(expr, ti0.base, ti1.base, fa)) # Force innatural flow, only to stress the compiler to see if it was # capable of detecting anti-dependences expr.ispace._directions = {i: Forward for i in expr.ispace.directions} scope = Scope(expr) deps = scope.d_all if expected is None: assert len(deps) == 0 return else: type, mode, exp_cause, regular = expected.split(',') if type == 'all': assert len(deps) == 2 else: assert len(deps) == 1 dep = deps[0] # Check type types = ['flow', 'anti'] if type != 'all': types.remove(type) assert len(getattr(scope, 'd_%s' % type)) == 1 assert all(len(getattr(scope, 'd_%s' % i)) == 0 for i in types) else: assert all(len(getattr(scope, 'd_%s' % i)) == 1 for i in types) # Check mode assert getattr(dep, 'is_%s' % mode)() # Check cause if exp_cause == 'None': assert not dep.cause return else: assert len(dep.cause) == 1 cause = dep.cause.pop() assert cause.name == exp_cause # Check mode restricted to the cause assert getattr(dep, 'is_%s' % mode)(cause) non_causes = [i for i in [x, y, z] if i is not cause] assert all(not getattr(dep, 'is_%s' % mode)(i) for i in non_causes) # Check if it's regular or irregular assert getattr(dep.source, 'is_%s' % regular) or\ getattr(dep.sink, 'is_%s' % regular)
def test_yreplace_time_invariants(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected): exprs = EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1) counter = generator() make = lambda: Scalar(name='r%d' % counter()).indexify() processed, found = yreplace(exprs, make, make_is_time_invariant(exprs), lambda i: estimate_cost(i) > 0) assert len(found) == len(expected) assert all(str(i.rhs) == j for i, j in zip(found, expected))
def test_xreplace_constrained_time_varying(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected): exprs = EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1) make = lambda i: Scalar(name='r%d' % i).indexify() processed, found = xreplace_constrained( exprs, make, iq_timevarying(TemporariesGraph(exprs)), lambda i: estimate_cost(i) > 0) assert len(found) == len(expected) assert all(str(i.rhs) == j for i, j in zip(found, expected))
def test_directly_indexed_expression(self, fa, ti0, t0, exprs): """ Emulates a potential implementation of boundary condition loops """ eqs = EVAL(exprs, ti0.base, t0) op = Operator(eqs, dse='noop', dle='noop') trees = retrieve_iteration_tree(op) assert len(trees) == 2 assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs
def test_directly_indexed_expression(self, fa, ti0, t0, exprs): """ Test that equations using integer indices are inserted in the right loop nest, at the right loop nest depth. """ eqs = EVAL(exprs, ti0.base, t0) op = Operator(eqs, dse='noop', dle='noop') trees = retrieve_iteration_tree(op) assert len(trees) == 2 assert trees[0][-1].nodes[0].exprs[0].expr.rhs == eqs[0].rhs assert trees[1][-1].nodes[0].exprs[0].expr.rhs == eqs[1].rhs
def test_dependences_eq(expr, expected, ti0, ti1, fa): """ Tests data dependences within a single equation consisting of only two Indexeds. ``expected`` is a comma-separated word consisting of four pieces of information: * if it's a flow, anti, or output dependence * if it's loop-carried or loop-independent * the dimension causing the dependence * whether it's direct or indirect (i.e., through A[B[i]]) """ expr = EVAL(expr, ti0.base, ti1.base, fa) scope = Scope(expr) deps = scope.d_all if expected is None: assert len(deps) == 0 return else: type, mode, cause, direct = expected.split(',') if type == 'all': assert len(deps) == 2 else: assert len(deps) == 1 dep = deps[0] # Check type types = ['flow', 'anti'] if type != 'all': types.remove(type) assert len(getattr(scope, 'd_%s' % type)) == 1 assert all(len(getattr(scope, 'd_%s' % i)) == 0 for i in types) else: assert all(len(getattr(scope, 'd_%s' % i)) == 1 for i in types) # Check mode assert getattr(dep, 'is_%s' % mode)() # Check cause if cause == 'None': assert dep.cause is None return else: assert dep.cause.name == cause # Check mode restricted to the cause assert getattr(dep, 'is_%s' % mode)(dep.cause) non_causes = [i for i in [x, y, z] if i is not dep.cause] assert all(not getattr(dep, 'is_%s' % mode)(i) for i in non_causes) # Check if it's direct or indirect assert getattr(dep, 'is_%s' % direct)
def test_consistency_anti_dependences(self, exprs, axis, expected, visit, ti0, ti1, ti3, tu, tv, tw): """ Test that anti dependences end up generating multi loop nests, rather than a single loop nest enclosing all of the equations. """ eq1, eq2, eq3 = EVAL(exprs, ti0.base, ti1.base, ti3.base, tu.base, tv.base, tw.base) op = Operator([eq1, eq2, eq3], dse='noop', dle='noop', time_axis=axis) trees = retrieve_iteration_tree(op) assert len(trees) == len(expected) assert ["".join(i.dim.name for i in j) for j in trees] == expected iters = FindNodes(Iteration).visit(op) assert "".join(i.dim.name for i in iters) == visit
def test_yreplace_time_invariants(exprs, expected): grid = Grid((3, 3, 3)) dims = grid.dimensions tu = TimeFunction(name="tu", grid=grid, space_order=4).indexify() tv = TimeFunction(name="tv", grid=grid, space_order=4).indexify() tw = TimeFunction(name="tw", grid=grid, space_order=4).indexify() ti0 = Array(name='ti0', shape=(3, 5, 7), dimensions=dims).indexify() ti1 = Array(name='ti1', shape=(3, 5, 7), dimensions=dims).indexify() t0 = Scalar(name='t0').indexify() t1 = Scalar(name='t1').indexify() exprs = EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1) counter = generator() make = lambda: Scalar(name='r%d' % counter()).indexify() processed, found = yreplace(exprs, make, make_is_time_invariant(exprs), lambda i: estimate_cost(i) > 0) assert len(found) == len(expected) assert all(str(i.rhs) == j for i, j in zip(found, expected))
def test_consistency_coupled_w_ofs(self, exprs, ti0, ti1, ti3): """ Test that no matter what is the order in which the equations are provided to an Operator, the resulting loop nest is the same. The array accesses in the equations may or may not use offsets; these impact the loop bounds, but not the resulting tree structure. """ eq1, eq2, eq3 = EVAL(exprs, ti0.base, ti1.base, ti3.base) op1 = Operator([eq1, eq2, eq3], dse='noop', dle='noop') op2 = Operator([eq2, eq1, eq3], dse='noop', dle='noop') op3 = Operator([eq3, eq2, eq1], dse='noop', dle='noop') trees = [retrieve_iteration_tree(i) for i in [op1, op2, op3]] assert all(len(i) == 1 for i in trees) trees = [i[0] for i in trees] for tree in trees: assert IsPerfectIteration().visit(tree[0]) exprs = FindNodes(Expression).visit(tree[-1]) assert len(exprs) == 3
def test_consistency_anti_dependences(self, exprs, directions, expected, visit, ti0, ti1, ti3, tu, tv, tw): """ Test that anti dependences end up generating multi loop nests, rather than a single loop nest enclosing all of the equations. """ eq1, eq2, eq3 = EVAL(exprs, ti0.base, ti1.base, ti3.base, tu.base, tv.base, tw.base) op = Operator([eq1, eq2, eq3], dse='noop', dle='noop') trees = retrieve_iteration_tree(op) iters = FindNodes(Iteration).visit(op) assert len(trees) == len(expected) assert len(iters) == len(directions) # mapper just makes it quicker to write out the test parametrization mapper = {'time': 't'} assert ["".join(mapper.get(i.dim.name, i.dim.name) for i in j) for j in trees] == expected assert "".join(mapper.get(i.dim.name, i.dim.name) for i in iters) == visit # mapper just makes it quicker to write out the test parametrization mapper = {'+': Forward, '-': Backward, '*': Any} assert all(i.direction == mapper[j] for i, j in zip(iters, directions))
def test_loops_collapsed(fe, t0, t1, t2, t3, exprs, expected, iters): scope = [fe, t0, t1, t2, t3] node_exprs = [Expression(DummyEq(EVAL(i, *scope))) for i in exprs] ast = iters[6](iters[7](iters[8](node_exprs))) ast = iet_analyze(ast) nodes = transform(ast, mode='openmp').nodes iterations = FindNodes(Iteration).visit(nodes) assert len(iterations) == len(expected) # Check for presence of pragma omp for i, j in zip(iterations, expected): pragmas = i.pragmas if j is True: assert len(pragmas) == 1 pragma = pragmas[0] assert 'omp for collapse' in pragma.value else: for k in pragmas: assert 'omp for collapse' not in k.value
def test_loops_ompized(fa, fb, fc, fd, t0, t1, t2, t3, exprs, expected, iters): scope = [fa, fb, fc, fd, t0, t1, t2, t3] node_exprs = [Expression(EVAL(i, *scope)) for i in exprs] ast = iters[6](iters[7](node_exprs)) nodes = transform(ast, mode='openmp').nodes assert len(nodes) == 1 ast = nodes[0] iterations = FindNodes(Iteration).visit(ast) assert len(iterations) == len(expected) # Check for presence of pragma omp for i, j in zip(iterations, expected): pragmas = i.pragmas if j is True: assert len(pragmas) == 1 pragma = pragmas[0] assert 'omp for' in pragma.value else: for k in pragmas: assert 'omp for' not in k.value
def test_estimate_cost(fa, fb, fc, t0, t1, t2, expr, expected): # Note: integer arithmetic isn't counted assert estimate_cost(EVAL(expr, fa, fb, fc, t0, t1, t2)) == expected
def test_graph_trace(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected): g = FlowGraph(EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1)) mapper = eval(expected) for i in [tu, tv, tw, ti0, ti1, t0, t1]: assert set([j.lhs for j in g.trace(i)]) == mapper[i]
def test_graph_isindex(fa, fb, fc, t0, t1, t2, exprs, expected): g = FlowGraph(EVAL(exprs, fa, fb, fc, t0, t1, t2)) mapper = eval(expected) for k, v in mapper.items(): assert g.is_index(k) == v
def test_common_subexprs_elimination(tu, tv, tw, ti0, ti1, t0, t1, exprs, expected): processed = common_subexprs_elimination( EVAL(exprs, tu, tv, tw, ti0, ti1, t0, t1), lambda i: Symbol('r%d' % i)) assert len(processed) == len(expected) assert all(str(i.rhs) == j for i, j in zip(processed, expected))