def test_split_node_names(backend): a = tn.Node(np.zeros((2, 3, 4, 5, 6)), backend=backend) left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right, _ = tn.split_node(a, left_edges, right_edges, left_name='left', right_name='right', edge_name='edge') assert left.name == 'left' assert right.name == 'right' assert left.edges[-1].name == 'edge' assert right.edges[0].name == 'edge'
def getDMRGH(N, onsiteTerms, neighborTerms, d=2): hSingles = [None] * N for i in range(N): hSingles[i] = tn.Node(onsiteTerms[i], name=('single' + str(i)), axis_names=['s' + str(i) + '*', 's' + str(i)]) hr2l = [None] * (N) hl2r = [None] * (N) for i in range(N-1): if d == 2: neighborTerm = np.reshape(neighborTerms[i], (2, 2, 2, 2)) elif d == 3: neighborTerm = np.reshape(neighborTerms[i], (3, 3, 3, 3)) pairOp = tn.Node(neighborTerm, \ axis_names=['s' + str(i) + '*', 's' + str(i+1) + '*', 's' + str(i), 's' + str(i+1)]) splitted = tn.split_node(pairOp, [pairOp[0], pairOp[2]], [pairOp[1], pairOp[3]], \ left_name=('l2r' + str(i)), right_name=('r2l' + str(i) + '*'), edge_name='m') hr2l[i+1] = bops.permute(splitted[1], [1, 2, 0]) hl2r[i] = splitted[0] return HOp(hSingles, hr2l, hl2r)
def test_split_node_names(num_charges): np.random.seed(10) a = tn.Node(get_random((2, 3, 4, 5, 6), num_charges=num_charges), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right, _ = tn.split_node(a, left_edges, right_edges, left_name='left', right_name='right', edge_name='edge') assert left.name == 'left' assert right.name == 'right' assert left.edges[-1].name == 'edge' assert right.edges[0].name == 'edge'
def apply_consecutive_gates(self, i, gate): """ Applies a two-qubit gate to the i'th and the (i+1)'th qubit. The gate must be either a 4x4 matrix or a 2x2x2x2 tensor. Comment: In tn.split_node, it may make sense to use max_truncation_err option instead of max_singular_values option, in case there was a mistake in my note. Args: i(int): Index of the qubit. gate(np.array): 4x4 matrix or 2x2x2x2 tensor. """ gate = normalize_gate(gate) j = (i + 1) % self.n_qubits # get dimension of inner bond between i and i+1 r = self.right_edges(i)[0].dimension gate = tn.Node(gate) # connect i and i+1 to the gate self.out_edge(i) ^ gate[0] self.out_edge(j) ^ gate[1] # Get non-dangling edges for each side left_edges = self.left_edges(i) + [gate[2]] right_edges = self.right_edges(j) + [gate[3]] # Contract edges C = (self.nodes[i] @ self.nodes[j]) D = C @ gate self.nodes[i], self.nodes[j], _ = tn.split_node( D, left_edges=left_edges, right_edges=right_edges, max_singular_values=4 * r, left_name=str(i), right_name=str(j))
def test_split_node(dtype, num_charges): np.random.seed(111) a = tn.Node(get_zeros((2, 3, 4, 5, 6), num_charges, dtype), backend='symmetric') left_edges = [] for i in range(3): left_edges.append(a[i]) right_edges = [] for i in range(3, 5): right_edges.append(a[i]) left, right, _ = tn.split_node(a, left_edges, right_edges) tn.check_correct({left, right}) actual = left @ right np.testing.assert_allclose(actual.tensor.shape, (2, 3, 4, 5, 6)) np.testing.assert_allclose(a.tensor.shape, (2, 3, 4, 5, 6)) np.testing.assert_allclose(left.tensor.data, 0) np.testing.assert_allclose(right.tensor.data, 0) assert np.all([ charge_equal(a.tensor._charges[n], actual.tensor._charges[n]) for n in range(len(a.tensor._charges)) ])
def test_split_node_orig_shape(backend): n1 = tn.Node(np.random.rand(3, 4, 5), backend=backend) tn.split_node(n1, [n1[0], n1[2]], [n1[1]]) np.testing.assert_allclose(n1.shape, (3, 4, 5))
def test_split_node_of_node_without_backend_raises_error(): node = np.random.rand(3, 3, 3) with pytest.raises(AttributeError): tn.split_node(node, left_edges=[], right_edges=[])
def _add_initial_state_nodes(self, tensors, tensor_wires, names): """Create the nodes representing the initial input state circuit. Input states can be factorized or entangled. If a state can be factorized into :math:`k` subsystems, then ``tensors``, ``wires``, and ``names`` should be sequences of length :math:`k`. ``self._free_wire_edges`` is updated with the dangling edges from the prepared state nodes. If ``self._rep == "mps"``, then the ``self.mps`` attribute is replaced with a new matrix product state object representing the prepared initial states. Args: tensors (Sequence[np.array, tf.Tensor, torch.Tensor]): the numerical tensors for each factorized component of the state (in the computational basis) tensor_wires (Sequence(Wires)): wires for each factorized component of the state names (Sequence[str]): name for each factorized component of the state """ # pylint: disable=too-many-branches if not len(tensors) == len(tensor_wires) == len(names): raise ValueError("tensors, wires, and names must all be the same length.") if self._rep == "exact": self._free_wire_edges = [] for tensor, wires, name in zip(tensors, tensor_wires, names): if len(tensor.shape) != len(wires): raise ValueError( "Tensor provided has shape={}, which is incompatible " "with provided wires {}.".format(tensor.shape, wires.tolist()) ) node = self._add_node(tensor, wires=wires, name=name) self._free_wire_edges.extend(node.edges) elif self._rep == "mps": nodes = [] for tensor, wires, name in zip(tensors, tensor_wires, names): if len(tensor.shape) != len(wires): raise ValueError( "Tensor provided has shape={}, which is incompatible " "with provided wires {}.".format(tensor.shape, wires.tolist()) ) tensor = self._expand_dims(tensor, 0) tensor = self._expand_dims(tensor, -1) if tensor.shape == (1, 2, 1): # MPS form node = self._add_node(tensor, wires=wires, name=name) nodes.append(node) else: # translate to wire labels used by device wire_indices = self.map_wires(wires) # break down non-factorized tensors into MPS form if max(wire_indices.labels) - min(wire_indices.labels) != len(wire_indices) - 1: raise NotImplementedError( "Multi-wire state initializations only supported for tensors on consecutive wires." ) DV = tensor for idx, wire in enumerate(wires): if idx < len(wires) - 1: node = tn.Node(DV, name=name, backend=self.backend) U, DV, _error = tn.split_node(node, node[:2], node[2:]) node = self._add_node(U, wires=wire, name=name) else: # final wire; no need to split further node = self._add_node(DV, wires=wire, name=name) nodes.append(node) self.mps = tn.matrixproductstates.finite_mps.FiniteMPS( [node.tensor for node in nodes], canonicalize=False, backend=self.backend, ) self._free_wire_edges = [node[1] for node in self.mps.nodes]
P = Psi #print("\n\n Psi is: \n",Psi.tensor,"\n\n") #%% Repeated SVD for decomposition into MPS Edges = P.edges L = [] for i in range(0, n - 1): # print("\n\n\n",i,"\n\n\n") Edges = list(P.get_all_dangling()) El = list(P.get_all_nondangling()) El.append(Edges[0]) Er = Edges[1:] l, P, _ = tn.split_node(P, El, Er, max_truncation_err=0.0) L.append(l) L.append(P) #%% Verification! t = 1 print("\n\n") for i in range(1, n - 1): print("The shape of node {0} is {1}.".format(i + 1, L[i].tensor.shape)) M = np.dot(np.transpose(L[i].tensor[:, 0, :]), L[i].tensor[:, 0, :]) M = M + np.dot(np.transpose(L[i].tensor[:, 1, :]), L[i].tensor[:, 1, :]) if (np.all(np.equal(M, np.eye(M.shape[0])))):
def func(self, inputs): # C * x_nodes * y_nodes peps_nodes = [] input_nodes = [] for i in range(self.xnodes): peps_line = [] input_line = [] for j in range(self.ynodes): peps_line.append( tn.Node(self.peps_var[i][j], name=f'p_{i}_{j}')) input_line.append(tn.Node(inputs[:, i, j], name=f'i_{i}_{j}')) peps_nodes.append(peps_line) input_nodes.append(input_line) # Connect the edges cx, cy = self.xnodes // 2, self.ynodes // 2 # Input Features for i in range(self.xnodes): for j in range(self.ynodes): input_nodes[i][j][0] ^ peps_nodes[i][j][0] # Y Bond for i in range(self.xnodes): for j in range(self.ynodes - 1): index1 = self.index_result[i, j, 1] index2 = self.index_result[i, j + 1, 3] peps_nodes[i][j][index1] ^ peps_nodes[i][j + 1][index2] # X Bond for j in range(self.ynodes): for i in range(self.xnodes - 1): index1 = self.index_result[i, j, 0] index2 = self.index_result[i + 1, j, 2] peps_nodes[i][j][index1] ^ peps_nodes[i + 1][j][index2] # Contract # Contract the features # contracted_nodes = [] for i in range(self.xnodes): for j in range(self.ynodes): input_nodes[i][j] = input_nodes[i][j] @ peps_nodes[i][j] input_nodes[i][j].name = f'p_{i}_{j}' input_nodes[i][j].tensor = input_nodes[i][j].tensor / \ input_nodes[i][j].tensor.norm() # contracted_nodes.append(input_nodes[i][j]) # Contract each row left_nodes: List[tn.Node] = input_nodes[0] right_nodes: List[tn.Node] = input_nodes[self.xnodes - 1] middle_nodes: List[tn.Node] = input_nodes[cx] for i in range(1, cx): for j in range(self.ynodes): left_nodes[j] = left_nodes[j] @ input_nodes[i][j] left_nodes[j].name = f'l_{j}' # Row Normalization row_norm = torch.mean( torch.stack([t.tensor.norm() for t in left_nodes])) for t in left_nodes: t.tensor = t.tensor / row_norm # RQ Decomposition for j in range(self.ynodes - 1): left_edges = [] right_edges = [] for edge in left_nodes[j].edges: nxt_node_name = edge.node1.name if edge.node1.name != f'l_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name[0] == 'p': right_edges.append(edge) elif nxt_node_name == f'l_{j-1}': right_edges.append(edge) else: left_edges.append(edge) node1, node2 = tn.split_node_rq(left_nodes[j], left_edges=left_edges, right_edges=right_edges) left_nodes[j] = node2 left_nodes[j + 1] = left_nodes[j + 1] @ node1 # left_nodes[j+1].tensor = left_nodes[j+1].tensor / \ # left_nodes[j+1].tensor.norm() left_nodes[j].name = f'l_{j}' left_nodes[j + 1].name = f'l_{j+1}' # SVD for j in range(self.ynodes - 1, 0, -1): tmp_node = left_nodes[j] @ left_nodes[j - 1] left_edges = [] right_edges = [] for edge in tmp_node.edges: nxt_node_name = edge.node1.name if edge.node1.name != f'l_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name == f'p_{i+1}_{j}': left_edges.append(edge) elif nxt_node_name == f'p_{i+1}_{j-1}': right_edges.append(edge) elif nxt_node_name == f'l_{j+1}': left_edges.append(edge) else: right_edges.append(edge) node1, node2, _ = tn.split_node( tmp_node, left_edges=left_edges, right_edges=right_edges, max_singular_values=self.max_singular_values) left_nodes[j] = node1 left_nodes[j - 1] = node2 left_nodes[j].name = f'l_{j}' left_nodes[j - 1].name = f'l_{j-1}' # QR Decomposition left_edges = [] right_edges = [] for edge in left_nodes[j].edges: if not edge.node2 and not edge.node1: continue nxt_node_name = edge.node1.name if edge.node1.name != f'l_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name[0] == 'p': left_edges.append(edge) elif nxt_node_name == f'l_{j+1}': left_edges.append(edge) else: right_edges.append(edge) node1, node2 = tn.split_node_qr(left_nodes[j], left_edges=left_edges, right_edges=right_edges) left_nodes[j] = node1 left_nodes[j].name = f'l_{j}' left_nodes[j - 1] = node2 @ left_nodes[j - 1] # left_nodes[j-1].tensor = left_nodes[j-1].tensor / \ # left_nodes[j-1].tensor.norm() left_nodes[j - 1].name = f'l_{j-1}' for i in range(self.xnodes - 2, cx, -1): for j in range(self.ynodes): right_nodes[j] = right_nodes[j] @ input_nodes[i][j] right_nodes[j].name = f'r_{j}' # Row Normalization row_norm = torch.mean( torch.stack([t.tensor.norm() for t in right_nodes])) for t in right_nodes: t.tensor = t.tensor / row_norm # RQ Decomposition for j in range(self.ynodes - 1): left_edges = [] right_edges = [] for edge in right_nodes[j].edges: if not edge.node2 and not edge.node1: continue nxt_node_name = edge.node1.name if edge.node1.name != f'r_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name[0] == 'p': right_edges.append(edge) elif nxt_node_name == f'r_{j-1}': right_edges.append(edge) else: left_edges.append(edge) node1, node2 = tn.split_node_rq(right_nodes[j], left_edges=left_edges, right_edges=right_edges) right_nodes[j] = node2 right_nodes[j + 1] = right_nodes[j + 1] @ node1 # right_nodes[j+1].tensor = right_nodes[j+1].tensor / \ # right_nodes[j+1].tensor.norm() right_nodes[j].name = f'r_{j}' right_nodes[j + 1].name = f'r_{j+1}' # SVD for j in range(self.ynodes - 1, 0, -1): tmp_node = right_nodes[j] @ right_nodes[j - 1] left_edges = [] right_edges = [] for edge in tmp_node.edges: if not edge.node2 and not edge.node1: continue nxt_node_name = edge.node1.name if edge.node1.name != f'r_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name == f'p_{i-1}_{j}': left_edges.append(edge) elif nxt_node_name == f'p_{i-1}_{j-1}': right_edges.append(edge) elif nxt_node_name == f'r_{j+1}': left_edges.append(edge) else: right_edges.append(edge) node1, node2, _ = tn.split_node( tmp_node, left_edges=left_edges, right_edges=right_edges, max_singular_values=self.max_singular_values) right_nodes[j] = node1 right_nodes[j - 1] = node2 right_nodes[j].name = f'r_{j}' right_nodes[j - 1].name = f'r_{j-1}' # QR Decomposition left_edges = [] right_edges = [] for edge in right_nodes[j].edges: if not edge.node2 and not edge.node1: continue nxt_node_name = edge.node1.name if edge.node1.name != f'r_{j}' and edge.node1.name != '__unnamed_node__' else edge.node2.name if nxt_node_name[0] == 'p': left_edges.append(edge) elif nxt_node_name == f'r_{j+1}': left_edges.append(edge) else: right_edges.append(edge) node1, node2 = tn.split_node_qr(right_nodes[j], left_edges=left_edges, right_edges=right_edges) right_nodes[j] = node1 right_nodes[j].name = f'r_{j}' right_nodes[j - 1] = node2 @ right_nodes[j - 1] # right_nodes[j-1].tensor = right_nodes[j-1].tensor / \ # right_nodes[j-1].tensor.norm() right_nodes[j - 1].name = f'r_{j-1}' for j in range(self.ynodes): middle_nodes[j] = left_nodes[j] @ middle_nodes[j] # middle_nodes[j].tensor = middle_nodes[j].tensor / \ # middle_nodes[j].tensor.norm() for j in range(self.ynodes): middle_nodes[j] = right_nodes[j] @ middle_nodes[j] # middle_nodes[j].tensor = middle_nodes[j].tensor / \ # middle_nodes[j].tensor.norm() down_node = middle_nodes[0] up_node = middle_nodes[self.ynodes - 1] for j in range(1, cy + 1): down_node = down_node @ middle_nodes[j] down_node.tensor = down_node.tensor / down_node.tensor.norm() for j in range(self.ynodes - 2, cy, -1): up_node = up_node @ middle_nodes[j] up_node.tensor = up_node.tensor / up_node.tensor.norm() result = (down_node @ up_node).tensor # Contract the remaining peps (With Auto Mode) # result = contractors.auto(contracted_nodes).tensor # print(result[0].item()) result = result.view([10]) / result.norm() return result