コード例 #1
0
def test_compose():
    a1 = Range.from_string('0, 0:N, 10:20')
    a2 = Range.from_string('0, 0:N, 5:10')

    a_res = Range.from_string('0, 0:N, 15:20')
    assert a_res == a1.compose(a2)

    b1 = Range.from_string('0,0,0:M,0:N')
    b2 = Range.from_string('0,0,0,0:N')

    b_res = Range.from_string('0,0,0,0:N')
    assert b_res == b1.compose(b2)

    c1 = Range.from_string('0, 0:N, 0:M, 50:100')
    c2 = Range.from_string('0, 0, 0, 20:40')
    c3 = Indices.from_string('0 , 0 , 0 , 0')

    c_res1 = Range.from_string('0, 0, 0, 70:90')
    c_res2 = Indices.from_string('0, 0, 0, 50')
    assert c_res1 == c1.compose(c2)
    assert c_res2 == c1.compose(c3)

    d1 = Range.from_string('i,j,0:N')
    d2 = Indices.from_string('0,0,k')

    d_res = Indices.from_string('i,j,k')
    assert d_res == d1.compose(d2)
コード例 #2
0
def test_find_contiguous_subsets():
    subset_list = [
        Range([(i, i, 1), (j, j, 1)]),
        Range([(i, i, 1), (j + 3, j + 3, 1)]),
        Range([(i, i, 1), (j + 1, j + 2, 1)]),
        Range([(i - 2, i - 1, 1), (j, j + 3, 1)]),
    ]

    result = DeduplicateAccess.find_contiguous_subsets(subset_list)
    assert len(result) == 1
    assert list(result)[0] == Range([(i - 2, i, 1), (j, j + 3, 1)])
コード例 #3
0
def test_squeeze_unsqueeze_ranges():

    a1 = Range.from_string('0:10, 0')
    expected_squeezed = [1]
    a2 = deepcopy(a1)
    not_squeezed = a2.squeeze()
    squeezed = [i for i in range(len(a1)) if i not in not_squeezed]
    unsqueezed = a2.unsqueeze(squeezed)
    assert (squeezed == unsqueezed)
    assert (expected_squeezed == squeezed)
    assert (a1 == a2)

    b1 = Range.from_string('0, 0:10')
    expected_squeezed = [0]
    b2 = deepcopy(b1)
    not_squeezed = b2.squeeze()
    squeezed = [i for i in range(len(b1)) if i not in not_squeezed]
    unsqueezed = b2.unsqueeze(squeezed)
    assert (squeezed == unsqueezed)
    assert (expected_squeezed == squeezed)
    assert (b1 == b2)

    c1 = Range.from_string('0:10, 0, 0')
    expected_squeezed = [1, 2]
    c2 = deepcopy(c1)
    not_squeezed = c2.squeeze()
    squeezed = [i for i in range(len(c1)) if i not in not_squeezed]
    unsqueezed = c2.unsqueeze(squeezed)
    assert (squeezed == unsqueezed)
    assert (expected_squeezed == squeezed)
    assert (c1 == c2)

    d1 = Range.from_string('0, 0:10, 0')
    expected_squeezed = [0, 2]
    d2 = deepcopy(d1)
    not_squeezed = d2.squeeze()
    squeezed = [i for i in range(len(d1)) if i not in not_squeezed]
    unsqueezed = d2.unsqueeze(squeezed)
    assert (squeezed == unsqueezed)
    assert (expected_squeezed == squeezed)
    assert (d1 == d2)

    e1 = Range.from_string('0, 0, 0:10')
    expected_squeezed = [0, 1]
    e2 = deepcopy(e1)
    not_squeezed = e2.squeeze()
    squeezed = [i for i in range(len(e1)) if i not in not_squeezed]
    unsqueezed = e2.unsqueeze(squeezed)
    assert (squeezed == unsqueezed)
    assert (expected_squeezed == squeezed)
    assert (e1 == e2)
コード例 #4
0
ファイル: copy_to_map.py プロジェクト: am-ivanov/dace
    def delinearize_linearize(
            self, desc: data.Array, copy_shape: Tuple[symbolic.SymbolicType],
            rng: subsets.Range) -> Tuple[symbolic.SymbolicType]:
        """
        Converts one N-dimensional iteration space to another M-dimensional space via linearization
        followed by delinearization.
        """
        indices = [
            symbolic.pystr_to_symbolic(f'__i{i}')
            for i in range(len(copy_shape))
        ]

        # Special case for when both dimensionalities are equal
        if tuple(desc.shape) == tuple(copy_shape):
            return subsets.Range([(ind, ind, 1) for ind in indices])

        if rng is not None:  # Deal with offsets and strides in range
            indices = rng.coord_at(indices)

        linear_index = sum(indices[i] * data._prod(copy_shape[i + 1:])
                           for i in range(len(indices)))

        cur_index = [0] * len(desc.shape)
        divide_by = 1
        for i in reversed(range(len(desc.shape))):
            cur_index[i] = (linear_index / divide_by) % desc.shape[i]
            divide_by = divide_by * desc.shape[i]

        return subsets.Range([(ind, ind, 1) for ind in cur_index])
コード例 #5
0
def test_add():
    A = np.random.rand(M, N)
    sdfg = add.to_sdfg()
    result = sdfg(A=A)

    # Check validity of result
    assert np.allclose(result, A + A)

    # Check map sequence
    me = next(n for n, _ in sdfg.all_nodes_recursive()
              if isinstance(n, dace.nodes.MapEntry))
    assert me.map.range == Range([(0, 23, 1), (0, 24, 1)])
コード例 #6
0
def test_add_11dim():
    A = np.random.rand(*(2 if i < 9 else 3 for i in range(11)))
    sdfg = addunk.to_sdfg(A)
    result = sdfg(A=A)

    # Check validity of result
    assert np.allclose(result, A + A)

    # Check map sequence
    me = next(n for n, _ in sdfg.all_nodes_recursive()
              if isinstance(n, dace.nodes.MapEntry))
    assert me.map.range == Range([(0, 1, 1) if i < 9 else (0, 2, 1)
                                  for i in range(11)])
コード例 #7
0
def test_find_contiguous_subsets_nonsquare():
    subset_list = [
        Range([(i, i, 1), (j, j, 1)]),
        Range([(i, i, 1), (j + 3, j + 3, 1)]),
        Range([(i, i, 1), (j + 1, j + 2, 1)]),
        Range([(i + 2, i + 2, 1), (j, j, 1)]),
        Range([(i + 2, i + 2, 1), (j + 3, j + 3, 1)]),
        Range([(i + 2, i + 2, 1), (j + 1, j + 2, 1)]),
        Range([(i + 1, i + 1, 1), (j - 1, j - 1, 1)]),
        Range([(i + 1, i + 1, 1), (j, j, 1)]),
        Range([(i + 1, i + 1, 1), (j + 1, j + 1, 1)]),
    ]

    # Prioritize on first dimension
    result2 = DeduplicateAccess.find_contiguous_subsets(subset_list, 0)
    result2 = DeduplicateAccess.find_contiguous_subsets(result2, None)
    assert len(result2) == 2

    # Prioritize on second dimension
    result3 = DeduplicateAccess.find_contiguous_subsets(subset_list, 1)
    assert len(result3) == 3
    result3 = DeduplicateAccess.find_contiguous_subsets(result3, None)
    assert len(result3) == 3
コード例 #8
0
ファイル: helpers.py プロジェクト: mfkiwl/dace
def nest_state_subgraph(sdfg: SDFG,
                        state: SDFGState,
                        subgraph: SubgraphView,
                        name: Optional[str] = None,
                        full_data: bool = False) -> nodes.NestedSDFG:
    """ Turns a state subgraph into a nested SDFG. Operates in-place.
        :param sdfg: The SDFG containing the state subgraph.
        :param state: The state containing the subgraph.
        :param subgraph: Subgraph to nest.
        :param name: An optional name for the nested SDFG.
        :param full_data: If True, nests entire input/output data.
        :return: The nested SDFG node.
        :raise KeyError: Some or all nodes in the subgraph are not located in
                         this state, or the state does not belong to the given
                         SDFG.
        :raise ValueError: The subgraph is contained in more than one scope.
    """
    if state.parent != sdfg:
        raise KeyError('State does not belong to given SDFG')
    if subgraph is not state and subgraph.graph is not state:
        raise KeyError('Subgraph does not belong to given state')

    # Find the top-level scope
    scope_tree = state.scope_tree()
    scope_dict = state.scope_dict()
    scope_dict_children = state.scope_children()
    top_scopenode = -1  # Initialized to -1 since "None" already means top-level

    for node in subgraph.nodes():
        if node not in scope_dict:
            raise KeyError('Node not found in state')

        # If scope entry/exit, ensure entire scope is in subgraph
        if isinstance(node, nodes.EntryNode):
            scope_nodes = scope_dict_children[node]
            if any(n not in subgraph.nodes() for n in scope_nodes):
                raise ValueError('Subgraph contains partial scopes (entry)')
        elif isinstance(node, nodes.ExitNode):
            entry = state.entry_node(node)
            scope_nodes = scope_dict_children[entry] + [entry]
            if any(n not in subgraph.nodes() for n in scope_nodes):
                raise ValueError('Subgraph contains partial scopes (exit)')

        scope_node = scope_dict[node]
        if scope_node not in subgraph.nodes():
            if top_scopenode != -1 and top_scopenode != scope_node:
                raise ValueError('Subgraph is contained in more than one scope')
            top_scopenode = scope_node

    scope = scope_tree[top_scopenode]
    ###

    # Consolidate edges in top scope
    utils.consolidate_edges(sdfg, scope)
    snodes = subgraph.nodes()

    # Collect inputs and outputs of the nested SDFG
    inputs: List[MultiConnectorEdge] = []
    outputs: List[MultiConnectorEdge] = []
    for node in snodes:
        for edge in state.in_edges(node):
            if edge.src not in snodes:
                inputs.append(edge)
        for edge in state.out_edges(node):
            if edge.dst not in snodes:
                outputs.append(edge)

    # Collect transients not used outside of subgraph (will be removed of
    # top-level graph)
    data_in_subgraph = set(n.data for n in subgraph.nodes() if isinstance(n, nodes.AccessNode))
    # Find other occurrences in SDFG
    other_nodes = set(n.data for s in sdfg.nodes() for n in s.nodes()
                      if isinstance(n, nodes.AccessNode) and n not in subgraph.nodes())
    subgraph_transients = set()
    for data in data_in_subgraph:
        datadesc = sdfg.arrays[data]
        if datadesc.transient and data not in other_nodes:
            subgraph_transients.add(data)

    # All transients of edges between code nodes are also added to nested graph
    for edge in subgraph.edges():
        if (isinstance(edge.src, nodes.CodeNode) and isinstance(edge.dst, nodes.CodeNode)):
            subgraph_transients.add(edge.data.data)

    # Collect data used in access nodes within subgraph (will be referenced in
    # full upon nesting)
    input_arrays = set()
    output_arrays = {}
    for node in subgraph.nodes():
        if (isinstance(node, nodes.AccessNode) and node.data not in subgraph_transients):
            if node.has_reads(state):
                input_arrays.add(node.data)
            if node.has_writes(state):
                output_arrays[node.data] = state.in_edges(node)[0].data.wcr

    # Create the nested SDFG
    nsdfg = SDFG(name or 'nested_' + state.label)

    # Transients are added to the nested graph as-is
    for name in subgraph_transients:
        nsdfg.add_datadesc(name, sdfg.arrays[name])

    # Input/output data that are not source/sink nodes are added to the graph
    # as non-transients
    for name in (input_arrays | output_arrays.keys()):
        datadesc = copy.deepcopy(sdfg.arrays[name])
        datadesc.transient = False
        nsdfg.add_datadesc(name, datadesc)

    # Connected source/sink nodes outside subgraph become global data
    # descriptors in nested SDFG
    input_names = {}
    output_names = {}
    global_subsets: Dict[str, Tuple[str, Subset]] = {}
    for edge in inputs:
        if edge.data.data is None:  # Skip edges with an empty memlet
            continue
        name = edge.data.data
        if name not in global_subsets:
            datadesc = copy.deepcopy(sdfg.arrays[edge.data.data])
            datadesc.transient = False
            if not full_data:
                datadesc.shape = edge.data.subset.size()
            new_name = nsdfg.add_datadesc(name, datadesc, find_new_name=True)
            global_subsets[name] = (new_name, edge.data.subset)
        else:
            new_name, subset = global_subsets[name]
            if not full_data:
                new_subset = union(subset, edge.data.subset)
                if new_subset is None:
                    new_subset = Range.from_array(sdfg.arrays[name])
                global_subsets[name] = (new_name, new_subset)
                nsdfg.arrays[new_name].shape = new_subset.size()
        input_names[edge] = new_name
    for edge in outputs:
        if edge.data.data is None:  # Skip edges with an empty memlet
            continue
        name = edge.data.data
        if name not in global_subsets:
            datadesc = copy.deepcopy(sdfg.arrays[edge.data.data])
            datadesc.transient = False
            if not full_data:
                datadesc.shape = edge.data.subset.size()
            new_name = nsdfg.add_datadesc(name, datadesc, find_new_name=True)
            global_subsets[name] = (new_name, edge.data.subset)
        else:
            new_name, subset = global_subsets[name]
            if not full_data:
                new_subset = union(subset, edge.data.subset)
                if new_subset is None:
                    new_subset = Range.from_array(sdfg.arrays[name])
                global_subsets[name] = (new_name, new_subset)
                nsdfg.arrays[new_name].shape = new_subset.size()
        output_names[edge] = new_name
    ###################

    # Add scope symbols to the nested SDFG
    defined_vars = set(
        symbolic.pystr_to_symbolic(s) for s in (state.symbols_defined_at(top_scopenode).keys()
                                                | sdfg.symbols))
    for v in defined_vars:
        if v in sdfg.symbols:
            sym = sdfg.symbols[v]
            nsdfg.add_symbol(v, sym.dtype)

    # Add constants to nested SDFG
    for cstname, cstval in sdfg.constants.items():
        nsdfg.add_constant(cstname, cstval)

    # Create nested state
    nstate = nsdfg.add_state()

    # Add subgraph nodes and edges to nested state
    nstate.add_nodes_from(subgraph.nodes())
    for e in subgraph.edges():
        nstate.add_edge(e.src, e.src_conn, e.dst, e.dst_conn, copy.deepcopy(e.data))

    # Modify nested SDFG parents in subgraph
    for node in subgraph.nodes():
        if isinstance(node, nodes.NestedSDFG):
            node.sdfg.parent = nstate
            node.sdfg.parent_sdfg = nsdfg
            node.sdfg.parent_nsdfg_node = node

    # Add access nodes and edges as necessary
    edges_to_offset = []
    for edge, name in input_names.items():
        node = nstate.add_read(name)
        new_edge = copy.deepcopy(edge.data)
        new_edge.data = name
        edges_to_offset.append((edge, nstate.add_edge(node, None, edge.dst, edge.dst_conn, new_edge)))
    for edge, name in output_names.items():
        node = nstate.add_write(name)
        new_edge = copy.deepcopy(edge.data)
        new_edge.data = name
        edges_to_offset.append((edge, nstate.add_edge(edge.src, edge.src_conn, node, None, new_edge)))

    # Offset memlet paths inside nested SDFG according to subsets
    for original_edge, new_edge in edges_to_offset:
        for edge in nstate.memlet_tree(new_edge):
            edge.data.data = new_edge.data.data
            if not full_data:
                edge.data.subset.offset(global_subsets[original_edge.data.data][1], True)

    # Add nested SDFG node to the input state
    nested_sdfg = state.add_nested_sdfg(nsdfg, None,
                                        set(input_names.values()) | input_arrays,
                                        set(output_names.values()) | output_arrays.keys())

    # Reconnect memlets to nested SDFG
    reconnected_in = set()
    reconnected_out = set()
    empty_input = None
    empty_output = None
    for edge in inputs:
        if edge.data.data is None:
            empty_input = edge
            continue

        name = input_names[edge]
        if name in reconnected_in:
            continue
        if full_data:
            data = Memlet.from_array(edge.data.data, sdfg.arrays[edge.data.data])
        else:
            data = copy.deepcopy(edge.data)
            data.subset = global_subsets[edge.data.data][1]
        state.add_edge(edge.src, edge.src_conn, nested_sdfg, name, data)
        reconnected_in.add(name)

    for edge in outputs:
        if edge.data.data is None:
            empty_output = edge
            continue

        name = output_names[edge]
        if name in reconnected_out:
            continue
        if full_data:
            data = Memlet.from_array(edge.data.data, sdfg.arrays[edge.data.data])
        else:
            data = copy.deepcopy(edge.data)
            data.subset = global_subsets[edge.data.data][1]
        data.wcr = edge.data.wcr
        state.add_edge(nested_sdfg, name, edge.dst, edge.dst_conn, data)
        reconnected_out.add(name)

    # Connect access nodes to internal input/output data as necessary
    entry = scope.entry
    exit = scope.exit
    for name in input_arrays:
        node = state.add_read(name)
        if entry is not None:
            state.add_nedge(entry, node, Memlet())
        state.add_edge(node, None, nested_sdfg, name, Memlet.from_array(name, sdfg.arrays[name]))
    for name, wcr in output_arrays.items():
        node = state.add_write(name)
        if exit is not None:
            state.add_nedge(node, exit, Memlet())
        state.add_edge(nested_sdfg, name, node, None, Memlet(data=name, wcr=wcr))

    # Graph was not reconnected, but needs to be
    if state.in_degree(nested_sdfg) == 0 and empty_input is not None:
        state.add_edge(empty_input.src, empty_input.src_conn, nested_sdfg, None, empty_input.data)
    if state.out_degree(nested_sdfg) == 0 and empty_output is not None:
        state.add_edge(nested_sdfg, None, empty_output.dst, empty_output.dst_conn, empty_output.data)

    # Remove subgraph nodes from graph
    state.remove_nodes_from(subgraph.nodes())

    # Remove subgraph transients from top-level graph
    for transient in subgraph_transients:
        del sdfg.arrays[transient]

    # Remove newly isolated nodes due to memlet consolidation
    for edge in inputs:
        if state.in_degree(edge.src) + state.out_degree(edge.src) == 0:
            state.remove_node(edge.src)
    for edge in outputs:
        if state.in_degree(edge.dst) + state.out_degree(edge.dst) == 0:
            state.remove_node(edge.dst)

    return nested_sdfg