Пример #1
0
def numerical_twirl(tensors, output_edges, input_edges, d_value,
                    n_tensor_factors, U):
    """
    Compute the numerical twirl U^{\otimes t}X U^{\dagger\otimes t} of some matrix X.
    
    Parameters
    ----------
    tensors: list
        The list of tensor network nodes from which the matrix X is constructed. For instance, this
        allows X to be expressed as a (matrix) product state. It must have shape
        (d_value, d_value) * n_tensor_factors.
    output_edges: list
        The list of output edges, i.e the edges connected to the line indices of the matrix.
    input_edges: list
        The list of input edges, i.e the edges connected to the column indices of the matrix.
    d_value: int
        The dimension of each tensor product factor.
    n_tensor_factors: int
        The number of tensor product factors (that is, t in the general definition above).
    U: Node
        The node representing the unitary matrix U to twirl with.
    
    Returns
    -------
    array
        The twirled matrix.
    """
    # Makes copies of tensors.
    tensors_copy, edges_copy = tn.copy(tensors)
    input_edges_copy = [edges_copy[input_edge] for input_edge in input_edges]
    output_edges_copy = [
        edges_copy[output_edge] for output_edge in output_edges
    ]
    U_copies = [tn.copy([U])[0][U] for _ in range(n_tensor_factors)]
    U_conj_copies = [
        tn.copy([U], conjugate=True)[0][U] for _ in range(n_tensor_factors)
    ]
    # Connect the U^dagger to the column (or "input") index of the matrix.
    for (input_edge_index, input_edge) in enumerate(input_edges_copy):
        input_edge ^ U_conj_copies[input_edge_index][1]
    # Connect the line (or "output") index of the matrix to the U.
    for (output_edge_index, output_edge) in enumerate(output_edges_copy):
        U_copies[output_edge_index][1] ^ output_edge
    # Contract the resulting tensor network.
    result = tn.contractors.greedy(
        list(tensors_copy.values()) + U_copies + U_conj_copies,
        output_edge_order=([U_copy[0] for U_copy in U_copies] +
                           [U_conj_copy[0] for U_conj_copy in U_conj_copies]))
    return result
Пример #2
0
def test_copy(backend):
    a = tn.Node(np.ones((2, 2, 2, 2)), backend=backend, name='a')
    b = tn.Node(np.ones((2, 2, 2, 2)), backend=backend, name='b')
    c = tn.Node(np.ones((2, 2, 2, 2)), backend=backend, name='c')

    a[0] ^ a[1]
    a[2] ^ b[1]
    b[3] ^ c[2]
    c[3] ^ b[0]
    nodes = [a, b]
    copied_nodes, copied_edges = tn.copy([a, b])
    assert len(copied_nodes) == 2
    assert len(copied_edges) == 6

    for n in nodes:
        assert n in copied_nodes
    for e, ce in copied_edges.items():
        print(e.node1, e.node2)
        if e.node1 in nodes and e.node2 not in nodes:
            assert ce.node1 is copied_nodes[e.node1]
        if e.node2 in nodes and e.node1 not in nodes:
            assert ce.node1 is copied_nodes[e.node2]
        if e.node2 in nodes and e.node1 in nodes:
            assert ce.node1 is copied_nodes[e.node1]
            assert ce.node2 is copied_nodes[e.node2]
        if e.node2 not in nodes and e.node1 not in nodes:
            assert False
Пример #3
0
def test_tnwork_copy_conj(backend):
    if backend == "pytorch":
        pytest.skip("Pytorch does not support complex numbers")
    a = tn.Node(np.array([1.0 + 2.0j, 2.0 - 1.0j]))
    nodes, _ = tn.copy({a}, conjugate=True)
    np.testing.assert_allclose(nodes[a].tensor,
                               np.array([1.0 - 2.0j, 2.0 + 1.0j]))
Пример #4
0
def test_network_copy_reordered(dtype, num_charges):
    a = tn.Node(get_random_symmetric((30, 30, 30), [False, False, False],
                                     num_charges,
                                     dtype=dtype),
                backend='symmetric')
    b = tn.Node(get_random_symmetric((30, 30, 30), [False, True, False],
                                     num_charges,
                                     dtype=dtype),
                backend='symmetric')
    c = tn.Node(get_random_symmetric((30, 30, 30), [True, False, True],
                                     num_charges,
                                     dtype=dtype),
                backend='symmetric')

    a[0] ^ b[1]
    a[1] ^ c[2]
    b[2] ^ c[0]

    edge_order = [a[2], c[1], b[0]]
    node_dict, edge_dict = tn.copy({a, b, c})
    tn.check_correct({a, b, c})

    res = a @ b @ c
    res.reorder_edges(edge_order)
    res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
    res_copy.reorder_edges([edge_dict[e] for e in edge_order])
    np.testing.assert_allclose(res.tensor.data, res_copy.tensor.data)
Пример #5
0
def copyState(psi, conj=False) -> List[tn.Node]:
    result = list(tn.copy(psi, conjugate=conj)[0].values())
    if conj:
        for node in result:
            for edge in node.edges:
                if edge.name[len(edge.name) - 1] == '*':
                    edge.name = edge.name[0:len(edge.name) - 1]
                else:
                    edge.name = edge.name + '*'
    return result
Пример #6
0
def test_tnwork_copy_identities(backend):
    a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
    b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
    c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
    a[0] ^ b[1]
    b[2] ^ c[0]
    node_dict, edge_dict = tn.copy({a, b, c})
    for node in {a, b, c}:
        assert not node_dict[node] is node
    for edge in tn.get_all_edges({a, b, c}):
        assert not edge_dict[edge] is edge
Пример #7
0
def test_split_edges_standard_contract_between(backend):
    a = tn.Node(np.random.randn(6, 3, 5), name="A", backend=backend)
    b = tn.Node(np.random.randn(2, 4, 6, 3), name="B", backend=backend)
    e1 = tn.connect(a[0], b[2], "Edge_1_1")  # to be split
    tn.connect(a[1], b[3], "Edge_1_2")  # background standard edge
    node_dict, _ = tn.copy({a, b})
    c_prior = node_dict[a] @ node_dict[b]
    shape = (2, 1, 3)
    tn.split_edge(e1, shape)
    tn.check_correct({a, b})
    c_post = tn.contract_between(a, b)
    np.testing.assert_allclose(c_prior.tensor, c_post.tensor)
Пример #8
0
def test_tnwork_copy_subgraph_2(backend):
    a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
    b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
    c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
    a[0] ^ b[1]
    edge2 = c[2] ^ b[0]
    node_dict, edge_dict = tn.copy({a, b})
    cut_edge = edge_dict[edge2]
    assert cut_edge.is_dangling()
    assert cut_edge.axis1 == 0
    assert cut_edge.get_nodes() == [node_dict[b], None]
    assert len(a.get_all_nondangling()) == 1
    def compute_dynamics(
            self,
            controls: Callable[[int], Tuple[np.ndarray, np.ndarray]],
            initial_state: Optional[np.ndarray] = None) -> Dynamics:
        """See BaseProcessTensorBackend.compute_dynamics() for docstring. """
        assert (initial_state is None) ^ (self._initial_tensor is None), \
            "Initial state must be either (exclusively) encoded in the " \
            + "process tensor or given as an argument."

        if self._caps is None:
            self._compute_caps()

        if self._initial_tensor is None:
            initial_tensor = add_singleton(initial_state, 0) #this seems to add an extra leg to the 1 leg initial density vector
            current = tn.Node(initial_tensor, backend=self._backend)
        else:
            current = self._initial_tensor.copy()

        current_bond_leg = current[0]
        current_state_leg = current[1]

        states = []

        for i, tensor in enumerate(self._tensors):
            pre, post = controls(i) #The liouvellian wrt to the symmertrised Trotter Splitting
            cap = self._caps[-1-i].copy() #I don't know
            pre_node = tn.Node(pre, backend=self._backend)
            post_node = tn.Node(post, backend=self._backend)

            node_dict, edge_dict = tn.copy([current])
            edge_dict[current_bond_leg] ^ cap[0]
            state_node = node_dict[current] @ cap
            states.append(state_node.get_tensor())
            # cap_value = cap.get_tensor()
            # print('step #{:2n} cap = {} cap shape = {}'.format(i,cap_value,cap_value.shape))


            current_bond_leg ^ tensor[0]
            current_state_leg ^ pre_node[0]
            pre_node[1] ^ tensor[2]
            tensor[3] ^ post_node[0]

            current_bond_leg = tensor[1]
            current_state_leg = post_node[1]
            current = current @ pre_node @ tensor @ post_node

        cap = self._caps[0]
        current_bond_leg ^ cap[0]
        final_state_node = current @ cap
        states.append(final_state_node.get_tensor())

        return states
Пример #10
0
def test_tnwork_copy(backend):
    a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    c = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    a[0] ^ b[1]
    a[1] ^ c[2]
    b[2] ^ c[0]
    node_dict, _ = tn.copy({a, b, c})
    tn.check_correct({node_dict[n] for n in {a, b, c}})

    res = a @ b @ c
    res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
    np.testing.assert_allclose(res.tensor, res_copy.tensor)
Пример #11
0
 def copy(self) -> Any:
     """Return a copy of the NodeArray. """
     ret = NodeArray([], name=self.name, backend=self.backend)
     node_dict, edge_dict = tn.copy(self.nodes)
     ret.nodes = [node_dict[node] for node in self.nodes]
     ret.left_edge = edge_dict[self.left_edge] if self.left else None
     ret.right_edge = edge_dict[self.right_edge] if self.right else None
     ret.bond_edges = [edge_dict[edge] for edge in self.bond_edges]
     array_edges = []
     for edges in self.array_edges:
         array_edges.append([edge_dict[edge] for edge in edges])
     ret.array_edges = array_edges
     return  ret
Пример #12
0
    def _copy(self, conj: bool = False) -> Tuple[List[tn.Node], List[tn.Edge]]:
        """
        copy all nodes and dangling edges correspondingly

        :return:
        """
        ndict, edict = tn.copy(self._nodes, conjugate=conj)
        newnodes = []
        for n in self._nodes:
            newnodes.append(ndict[n])
        newfront = []
        for e in self._front:
            newfront.append(edict[e])
        return newnodes, newfront
Пример #13
0
def numerical_haar_average_twirl(tensor, d_value, n_tensor_factors):
    """
    Compute the numerical Haar average of the twirl of some matrix X by a unitary.
    
    Parameters
    ----------
    tensor: Node
        The tensor network node representing the matrix. It must have shape
        (d_value, d_value) * n_tensor_factors.
    d_value: int
        The dimension of each tensor product factor.
    n_tensor_factors: int
        The number of tensor product factors (that is, t in the general definition above).
    
    Returns
    -------
    array
        The numerical Haar average
    """
    # Make copies of tensors.
    tensors_copy, edges_copy = tn.copy([tensor])
    tensor_copy = tensors_copy[tensor]
    output_edges_copy = [
        edges_copy[output_edge]
        for output_edge in tensor.edges[:int(len(tensor.edges) / 2)]
    ]
    input_edges_copy = [
        edges_copy[input_edge]
        for input_edge in tensor.edges[int(len(tensor.edges) / 2):]
    ]
    # Generate RTNI tensor network graph.
    graph = []
    graph.extend([[["X", 1, "in", input_edge_index + 1],
                   ["U*", input_edge_index + 1, "out", 1]]
                  for input_edge_index in range(len(input_edges_copy))])
    graph.extend([[["U", output_edge_index + 1, "in", 1],
                   ["X", 1, "out", output_edge_index + 1]]
                  for output_edge_index in range(len(output_edges_copy))])
    # Symbolically Haar-integrate this tensor network.
    d_symbol = sympy.symbols("d")
    symbolic_average = RTNI.integrateHaarUnitary(
        [graph, 1], ["U", [d_symbol], [d_symbol], d_symbol])
    # Convert the symbolic RTNI average to a numerical TensorNetwork one.
    numerical_average = converters.rtni_to_tn(
        symbolic_average, {"X": tensor},
        d_symbol=d_symbol,
        d_value=d_value,
        n_tensor_factors=n_tensor_factors)
    return numerical_average
    def compute_dynamics(
            self,
            controls: Callable[[int], Tuple[np.ndarray, np.ndarray]],
            initial_state: Optional[np.ndarray] = None) -> Dynamics:
        """See BaseProcessTensorBackend.compute_dynamics() for docstring. """
        assert (initial_state is None) ^ (self._initial_tensor is None), \
            "Initial state must be either (exclusively) encoded in the " \
            + "process tensor or given as an argument."

        if self._caps is None:
            self._compute_caps()

        if self._initial_tensor is None:
            initial_tensor = add_singleton(initial_state, 0)
            current = tn.Node(initial_tensor, backend=self._backend)
        else:
            current = self._initial_tensor.copy()

        current_bond_leg = current[0]
        current_state_leg = current[1]

        states = []

        for i, tensor in enumerate(self._tensors):
            pre, post = controls(i)
            cap = self._caps[-1 - i].copy()
            pre_node = tn.Node(pre, backend=self._backend)
            post_node = tn.Node(post, backend=self._backend)

            node_dict, edge_dict = tn.copy([current])
            edge_dict[current_bond_leg] ^ cap[0]
            state_node = node_dict[current] @ cap
            states.append(state_node.get_tensor())

            current_bond_leg ^ tensor[0]
            current_state_leg ^ pre_node[0]
            pre_node[1] ^ tensor[2]
            tensor[3] ^ post_node[0]

            current_bond_leg = tensor[1]
            current_state_leg = post_node[1]
            current = current @ pre_node @ tensor @ post_node

        cap = self._caps[0]
        current_bond_leg ^ cap[0]
        final_state_node = current @ cap
        states.append(final_state_node.get_tensor())

        return states
Пример #15
0
    def copy(self, conjugate=False):
        """
        Copies the whole MPS.

        Returns:
            MPS: A deep copy of self.
        """
        node_dict, _ = tn.copy(self.nodes, conjugate=conjugate)
        mps_copy = MPS(0)
        mps_copy.n_qubits = self.n_qubits
        for i in range(mps_copy.n_qubits):
            rel_node = node_dict[self.nodes[i]]
            mps_copy.nodes.append(rel_node)

        return mps_copy
Пример #16
0
def test_network_copy_reordered(backend):
    a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    c = tn.Node(np.random.rand(3, 3, 3), backend=backend)
    a[0] ^ b[1]
    a[1] ^ c[2]
    b[2] ^ c[0]

    edge_order = [a[2], c[1], b[0]]
    node_dict, edge_dict = tn.copy({a, b, c})
    tn.check_correct({a, b, c})

    res = a @ b @ c
    res.reorder_edges(edge_order)
    res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
    res_copy.reorder_edges([edge_dict[e] for e in edge_order])
    np.testing.assert_allclose(res.tensor, res_copy.tensor)
Пример #17
0
def copy_mps_co_mps(N, mps, co_mps):
    #for an MPS and its conjugate that are joined at the 0-th site,
    #it takes the combined structure and copies it.

    n_qubits = mps.n_qubits
    node_dict, _ = tn.copy([N] + mps.nodes[1:] + co_mps.nodes[1:])
    N2 = node_dict[N]

    mpss = []
    for t in [mps, co_mps]:
        m = MPS(0)
        m.nodes = [None] + [node_dict[t.nodes[i]] for i in range(1, n_qubits)]
        m.n_qubits = len(m.nodes)
        mpss.append(m)

    mps2, co_mps2 = mpss

    return N2, mps2, co_mps2
Пример #18
0
 def _copy_state_tensor(
         self,
         conj: bool = False,
         reuse: bool = True) -> Tuple[List[tn.Node], List[tn.Edge]]:
     if reuse:
         t = getattr(self, "state_tensor", None)
     else:
         t = None
     if t is None:
         nodes, d_edges = self._copy()
         t = contractor(nodes, output_edge_order=d_edges)
         setattr(self, "state_tensor", t)
     ndict, edict = tn.copy([t], conjugate=conj)
     newnodes = []
     newnodes.append(ndict[t])
     newfront = []
     for e in t.edges:
         newfront.append(edict[e])
     return newnodes, newfront
Пример #19
0
    def _copy(
        self,
        nodes: Sequence[tn.Node],
        dangling: Optional[Sequence[tn.Edge]] = None,
        conj: Optional[bool] = False,
    ) -> Tuple[List[tn.Node], List[tn.Edge]]:
        """
        copy all nodes and dangling edges correspondingly

        :return:
        """
        ndict, edict = tn.copy(nodes, conjugate=conj)
        newnodes = []
        for n in nodes:
            newnodes.append(ndict[n])
        newfront = []
        if not dangling:
            dangling = []
            for n in nodes:
                dangling.extend([e for e in n])
        for e in dangling:
            newfront.append(edict[e])
        return newnodes, newfront
Пример #20
0
def contract_children(tree, tree_root, tensors_by_root, edge_choices):
    """Contract the children of a node in a QAOA Tree Tensor Network and return the vector resulting
       from the contraction.
       
    Parameters
    ----------
    tree: NetworkX directed graph
        The underlying tree of the Tree Tensor Network.
    tree_root: node
        The root of the underlying tree of the Tree Tensor Network.
    tensors_by_root: dict
        A dictionary mapping each node of the underlying tree to the list of tensors based on this node
        (see also qaoa_tensor_network()).
    edge_choices: list
        A sequence of integers specifying the node whose children to contract (see edge_choices_to_node()).
    
    Returns
    -------
    TensorNetwork tensor
        A tensor representing the value of the node specified by argument 'edge_choices' once its children
        have been contracted.
    """
    #print("contracting children for {}".format(edge_choices))
    depth = tree_tools.tree_depth(tree, tree_root)
    root = edge_choices_to_node(tree, tree_root, edge_choices)
    degree = tree.degree(tree_root)
    if len(edge_choices) > depth:
        raise ValueError("too high internal node depth: {} >= {}".format(len(edge_choices), depth))
    elif len(edge_choices) == 0:
        partial_contractions = [
            contract_children(tree, tree_root, tensors_by_root, edge_choices + [i])
            for i in range(degree)
        ]
        tensors_copy = list(tn.copy(tensors_by_root[tree_root])[0].values())
        for i in range(len(tensors_copy) - 1):
            for partial_contraction_idx, partial_contraction in enumerate(partial_contractions):
                tensors_copy[int((i + 1) / 2)][(partial_contraction_idx + 1 + degree + 1) if i % 2 else partial_contraction_idx + 1] ^ partial_contraction[i]
                tensors_copy[-1 - int((i + 1) / 2)][partial_contraction_idx + 1 if i % 2 or i == 0 else (partial_contraction_idx + 1 + degree + 1)] ^ partial_contraction[-1 - i]
        for i in range(len(tensors_copy) - 1):
            tensors_copy[i][0] ^ tensors_copy[i + 1][(degree + 1) if i < len(tensors_copy) - 2 else 0]
        contraction = tn.contractors.greedy(
            tensors_copy + partial_contractions
        )
        return contraction
    elif tree_tools.tree_depth(nx.dfs_tree(tree, root), root) == 0:
        num_tensors_per_stack = len(tn.copy(tensors_by_root[tree_root])[0].values())
        return tn.Node(np.eye(2 ** (num_tensors_per_stack - 1)).reshape((2,) * 2 * (num_tensors_per_stack - 1)))
    else:
        partial_contractions = [
            contract_children(tree, tree_root, tensors_by_root, edge_choices + [i])
            for i in range(degree - 1)
        ]
        tensors_copy = list(tn.copy(tensors_by_root[root])[0].values())
        if len(edge_choices) % 2:
            for i in range(len(tensors_copy) - 1):
                for partial_contraction_idx, partial_contraction in enumerate(partial_contractions):
                    tensors_copy[int(i / 2)][(partial_contraction_idx + 1) if i % 2 else (partial_contraction_idx + 1 + degree)] ^ partial_contraction[i]
                    tensors_copy[-1 - int(i / 2)][(partial_contraction_idx + 1 + degree) if i % 2 else (partial_contraction_idx + 1)] ^ partial_contraction[-1 - i]
            for qubit_idx in range(degree):
                tensors_copy[int(len(tensors_copy) / 2) - 1][qubit_idx] ^ tensors_copy[int(len(tensors_copy) / 2)][qubit_idx + degree]
            contraction = tn.contractors.greedy(
                tensors_copy + partial_contractions,
                output_edge_order=sum([
                    [tensors_copy[i][degree], tensors_copy[i][0]]
                    for i in range(int(len(tensors_copy) / 2) - 1)
                ], []) + [
                    tensors_copy[int(len(tensors_copy) / 2) - 1][degree],
                    tensors_copy[int(len(tensors_copy) / 2)][0]
                ] + sum([
                    [tensors_copy[i][degree], tensors_copy[i][0]]
                    for i in range(int(len(tensors_copy) / 2) + 1, len(tensors_copy))
                ], [])
            )
        else:
            for i in range(len(tensors_copy) - 1):
                for partial_contraction_idx, partial_contraction in enumerate(partial_contractions):
                    tensors_copy[int((i + 1) / 2)][(partial_contraction_idx + 1 + degree) if i % 2 else (partial_contraction_idx + 1)] ^ partial_contraction[i]
                    tensors_copy[-1 - int((i + 1) / 2)][(partial_contraction_idx + 1) if i % 2 or i == 0 else (partial_contraction_idx + 1 + degree)] ^ partial_contraction[-1 - i]
            contraction = tn.contractors.greedy(
                tensors_copy + partial_contractions,
                output_edge_order=[
                    tensors_copy[0][0]
                ] + sum([
                    [tensors_copy[i][degree], tensors_copy[i][0]]
                    for i in range(1, len(tensors_copy) - 1)
                ], []) + [
                    tensors_copy[-1][0]
                ]
            )
        return contraction
Пример #21
0
def rtni_to_tn(rtni_tensors, rtni_tn_correspondance, d_symbol, d_value,
               n_tensor_factors):
    final_tensor = None
    for (contracted_edges, coeff) in rtni_tensors:
        tn_copies = tn.copy(rtni_tn_correspondance.values())[0]
        num_coeff = float(coeff.subs({d_symbol: d_value}))
        #print(f"num_coeff: {num_coeff}")
        root_tn = None
        input_nodes = [
            tn.Node(np.eye(d_value)) for _ in range(n_tensor_factors)
        ]
        output_nodes = [
            tn.Node(np.eye(d_value)) for _ in range(n_tensor_factors)
        ]
        encountered_dummy_tensor = False
        for contracted_edge in contracted_edges:
            contracted_edge_start, contracted_edge_end = contracted_edge
            if contracted_edge_start[0] == "@U*" and contracted_edge_start[2] == "in" \
                and contracted_edge_end[0] == "@U" and contracted_edge_end[2] == "out":
                #print("@U and @U* where expected")
                output_nodes[contracted_edge_end[1] -
                             1][1] ^ input_nodes[contracted_edge_start[1] -
                                                 1][0]
                encountered_dummy_tensor = True
            elif contracted_edge_start[0] != "@U*" and contracted_edge_end[
                    0] != "@U":
                #print("normal vertex")
                def rtni_to_tn_edge(tn_tensor, rtni_edge):
                    tn_edge = rtni_edge[1] - 1
                    if rtni_edge[0] == "in":
                        tn_edge += int(len(tn_tensor.edges) / 2)
                    return tn_edge

                tn_start = tn_copies[rtni_tn_correspondance[
                    contracted_edge_start[0]]]
                tn_end = tn_copies[rtni_tn_correspondance[
                    contracted_edge_end[0]]]
                edge_start = rtni_to_tn_edge(tn_start,
                                             contracted_edge_start[-2:])
                edge_end = rtni_to_tn_edge(tn_end, contracted_edge_end[-2:])
                tn_start[edge_start] ^ tn_end[edge_end]
                if root_tn == None:
                    root_tn = tn_start
            else:
                raise Exception(
                    "unexpected contracted edge {}".format(contracted_edge))
        if root_tn == None:
            raise Exception(
                "encountered no RTNI tensors apart from @U, @U*, should not happen"
            )
        output_edge_order = []
        if encountered_dummy_tensor:
            output_edge_order.extend([
                output_node[0] for output_node in output_nodes
                if output_node[0].is_dangling()
            ])
            output_edge_order.extend([
                input_node[1] for input_node in input_nodes
                if input_node[1].is_dangling()
            ])
        result = tn.contractors.greedy(
            list(tn_copies.values()) +
            ((output_nodes + input_nodes) if encountered_dummy_tensor else []),
            output_edge_order=output_edge_order)
        if final_tensor is None:
            final_tensor = num_coeff * result.tensor
        else:
            final_tensor += num_coeff * result.tensor
    return final_tensor