Beispiel #1
0
def tensor_expectation_value(circuit: cirq.Circuit,
                             pauli_string: cirq.PauliString,
                             max_ram_gb=16,
                             tol=1e-6) -> float:
    """Compute an expectation value for an operator and a circuit via tensor
    contraction.

    This will give up if it looks like the computation will take too much RAM.
    """
    circuit_sand = circuit_for_expectation_value(
        circuit, pauli_string / pauli_string.coefficient)
    qubits = sorted(circuit_sand.all_qubits())

    tensors, qubit_frontier, _ = circuit_to_tensors(circuit=circuit_sand,
                                                    qubits=qubits)
    end_bras = [
        qtn.Tensor(data=quimb.up().squeeze(),
                   inds=(f'i{qubit_frontier[q]}_q{q}', ),
                   tags={'Q0', 'bra0'}) for q in qubits
    ]
    tn = qtn.TensorNetwork(tensors + end_bras)
    tn.rank_simplify(inplace=True)
    path_info = tn.contract(get='path-info')
    ram_gb = path_info.largest_intermediate * 128 / 8 / 1024 / 1024 / 1024
    if ram_gb > max_ram_gb:
        raise MemoryError("We estimate that this contraction "
                          "will take too much RAM! {} GB".format(ram_gb))
    e_val = tn.contract(inplace=True)
    assert e_val.imag < tol
    assert pauli_string.coefficient.imag < tol
    return e_val.real * pauli_string.coefficient
Beispiel #2
0
def tensor_density_matrix(
        circuit: cirq.Circuit,
        qubits: Optional[List[cirq.Qid]] = None) -> np.ndarray:
    """Given a circuit with mixtures or channels, contract a tensor network
    representing the resultant density matrix.

    Note: If the circuit contains 6 qubits or fewer, we use a bespoke
    contraction ordering that corresponds to the "normal" in-time contraction
    ordering. Otherwise, the contraction order determination could take
    longer than doing the contraction. Your mileage may vary and benchmarking
    is encouraged for your particular problem if performance is important.
    """
    if qubits is None:
        qubits = sorted(circuit.all_qubits())

    tensors, qubit_frontier, _ = circuit_to_density_matrix_tensors(
        circuit=circuit, qubits=qubits)
    tn = qtn.TensorNetwork(tensors)
    f_inds = tuple(f'nf{qubit_frontier[q]}_q{q}' for q in qubits)
    b_inds = tuple(f'nb{qubit_frontier[q]}_q{q}' for q in qubits)
    if len(qubits) <= 6:
        # Heuristic: don't try to determine best order for low qubit number
        # Just contract in time.
        tags_seq = [(f'i{i}b', f'i{i}f') for i in range(len(circuit) + 1)]
        tn.contract_cumulative(tags_seq, inplace=True)
    else:
        tn.contract(inplace=True)
    return tn.to_dense(f_inds, b_inds)
Beispiel #3
0
def to_quimb_tensor(g: BaseGraph) -> 'qtn.TensorNetwork':
    """Converts tensor network representing the given :func:`pyzx.graph.Graph`.
    Pretty printing: to_tensor(g).draw(color = ['V', 'H'])
    
    Args:
        g: graph to be converted."""

    if qu is None:
        raise ImportError("quimb must be installed to use this function.")

    # copying a graph guarantees consecutive indices, which are needed for the tensor net
    g = g.copy()

    # only Z spiders are handled below
    to_gh(g)

    tensors = []

    # Here we have phase tensors corresponding to Z-spiders with only one output and no input.
    for v in g.vertices():
        if g.type(v) == VertexType.Z and g.phase(v) != 0:
            tensors.append(
                qtn.Tensor(data=[1, np.exp(1j * np.pi * g.phase(v))],
                           inds=(f'{v}', ),
                           tags=("V", )))

    # Hadamard or Kronecker tensors, one for each edge of the diagram.
    for i, edge in enumerate(g.edges()):
        x, y = edge
        isHadamard = g.edge_type(edge) == EdgeType.HADAMARD
        t = qtn.Tensor(data=qu.hadamard()
                       if isHadamard else np.array([1, 0, 0, 1]).reshape(2, 2),
                       inds=(f'{x}', f'{y}'),
                       tags=("H", ) if isHadamard else ("N", ))
        tensors.append(t)

    # TODO: This is not taking care of all the stuff that can be in g.scalar
    # In particular, it doesn't check g.scalar.phasenodes
    # TODO: This will give the wrong tensor when g.scalar.is_zero == True.
    # Grab the float factor and exponent from the scalar
    scalar_float = np.exp(1j * np.pi * g.scalar.phase) * g.scalar.floatfactor
    for node in g.scalar.phasenodes:  # Each node is a Fraction
        scalar_float *= 1 + np.exp(1j * np.pi * node)
    scalar_exp = math.log10(math.sqrt(2)) * g.scalar.power2

    # If the TN is empty, create a single 0-tensor with scalar factor, otherwise
    # multiply the scalar into one of the tensors.
    if len(tensors) == 0:
        tensors.append(qtn.Tensor(data=scalar_float, inds=(), tags=("S", )))
    else:
        tensors[0].modify(data=tensors[0].data * scalar_float)

    network = qtn.TensorNetwork(tensors)

    # the exponent can be very large, so distribute it evenly through the TN
    network.exponent = scalar_exp
    network.distribute_exponent()
    return network
Beispiel #4
0
    def __init__(self,
                 d=2,
                 L_y=4,
                 L_x=10,
                 dist_type='uniform',
                 data_type='float64',
                 seed_0=10):
        self.L_x = L_x
        self.d = d
        self.L_y = L_y
        self.type = data_type
        self.dist = dist_type

        peps = qtn.PEPS.rand(Lx=L_x, Ly=L_y, bond_dim=D, phys_dim=d, seed=4)

        rotate_ten_list = []
        for j in range(L_y - 1):
            for i in range(L_x):
                A = qtn.Tensor(qu.rand(2,
                                       seed=seed_0 + i + j,
                                       dist='uniform',
                                       dtype=data_type).reshape(2),
                               inds={f"k{i},{j}"},
                               tags={})
                rotate_ten_list.append(A)

        R_l = qtn.TensorNetwork(rotate_ten_list)
        self.tn = peps & R_l
        for j in range(L_y - 1):
            for i in range(L_x):
                self.tn.contract_ind(f"k{i},{j}", optimize='auto-hq')

        #TN_l.graph(color=peps.site_tags, show_tags=True, figsize=(10, 10))

        for j in range(L_y):
            for i in range(L_x):
                t = list((self.tn[{f'I{i},{j}', f'ROW{i}', f'COL{j}'}].inds))
                for m in range(len(t)):
                    if t[m] == f'k{i},{j}':
                        t[m] = f'k{i}'
                self.tn[{f'I{i},{j}', f'ROW{i}', f'COL{j}'}].modify(inds=t)

#   for j in range(L_y):
#    for i in range(L_x):
#      dim_tuple=self.tn[{ f'I{i},{j}', f'ROW{i}', f'COL{j}' }].shape
#      lis_a=list(dim_tuple)
#      dim=1
#      for i_val in range(len(lis_a)):
#       dim*=lis_a[i_val]
#      rand_tn=qu.rand(dim, dist='uniform', seed=seed_0, dtype=data_type).reshape(*dim_tuple)
#      rand_tn=rand_tn*LA.norm(rand_tn)**(-1.0)
#      self.tn[{f'COL{j}', f'I{i},{j}', f'ROW{i}' }].modify(data=rand_tn)

        self.tn.balance_bonds_()
        self.tn.equalize_norms_(2.0)
Beispiel #5
0
def rand_tn1d_sect(n, bd, dtype=complex):
    mps = qtn.MPS_rand_state(n + 2, bd, dtype=dtype)
    mpo = qtn.MPO_rand_herm(n + 2, 5, dtype=dtype)

    norm = qtn.TensorNetwork(qtn.align_TN_1D(mps.H, mpo, mps))

    lix = qtn.bonds(norm[0], norm[1])
    rix = qtn.bonds(norm[n], norm[n + 1])

    to = norm[1:n + 1]

    return qtn.TNLinearOperator1D(to, lix, rix, 1, n + 1)
Beispiel #6
0
def tensor_state_vector(
    circuit: cirq.Circuit, qubits: Optional[Sequence[cirq.Qid]] = None
) -> np.ndarray:
    """Given a circuit contract a tensor network into a final state vector."""
    if qubits is None:
        qubits = sorted(circuit.all_qubits())

    tensors, qubit_frontier, _ = circuit_to_tensors(circuit=circuit, qubits=qubits)
    tn = qtn.TensorNetwork(tensors)
    f_inds = tuple(f'i{qubit_frontier[q]}_q{q}' for q in qubits)
    tn.contract(inplace=True)
    return tn.to_dense(f_inds)
Beispiel #7
0
    def state_vector(self) -> np.ndarray:
        """Returns the full state vector.

        Returns:
            A vector that contains the full state.
        """
        tensor_network = qtn.TensorNetwork(self.M)
        state_vector = tensor_network.contract(inplace=False)

        # Here, we rely on the formatting of the indices, and the fact that we have enough
        # leading zeros so that 003 comes before 100.
        sorted_ind = tuple(sorted(state_vector.inds))
        return state_vector.fuse({'i': sorted_ind}).data
Beispiel #8
0
def rand_tn1d_sect(n, bd, dtype=complex):
    mps = qtn.MPS_rand_state(n + 2, bd, dtype=dtype)
    mpo = qtn.MPO_rand_herm(n + 2, 5, dtype=dtype)

    norm = qtn.TensorNetwork(qtn.align_TN_1D(mps.H, mpo, mps))

    # greedy not good and contracting with large bsz
    norm.structure_bsz = 2

    lix = qtn.bonds(norm[0], norm[1])
    rix = qtn.bonds(norm[n], norm[n + 1])

    to = norm[1:n + 1]

    return qtn.TNLinearOperator1D(to, lix, rix, 1, n + 1)
Beispiel #9
0
    def partial_trace(self, keep_qubits: Set[ops.Qid]) -> np.ndarray:
        """Traces out all qubits except keep_qubits.

        Args:
            keep_qubits: The set of qubits that are left after computing the
                partial trace. For example, if we have a circuit for 3 qubits
                and this parameter only has one qubit, the entire density matrix
                would be 8x8, but this function returns a 2x2 matrix.

        Returns:
            An array that contains the partial trace.
        """

        contracted_inds = set([
            self.i_str(i) for qubit, i in self.qubit_map.items()
            if qubit not in keep_qubits
        ])

        conj_pfx = "conj_"

        tensor_network = qtn.TensorNetwork(self.M)

        # Rename the internal indices to avoid collisions. Also rename the qubit
        # indices that are kept. We do not rename the qubit indices that are
        # traced out.
        conj_tensor_network = tensor_network.conj()
        reindex_mapping = {}
        for M in conj_tensor_network.tensors:
            for ind in M.inds:
                if ind not in contracted_inds:
                    reindex_mapping[ind] = conj_pfx + ind
        conj_tensor_network.reindex(reindex_mapping, inplace=True)
        partial_trace = conj_tensor_network @ tensor_network

        forward_inds = [
            self.i_str(self.qubit_map[keep_qubit])
            for keep_qubit in keep_qubits
        ]
        backward_inds = [
            conj_pfx + forward_ind for forward_ind in forward_inds
        ]
        return partial_trace.to_dense(forward_inds, backward_inds)
Beispiel #10
0
def tensor_expectation_value(circuit: cirq.Circuit,
                             pauli_string: cirq.PauliString,
                             max_ram_gb=16,
                             tol=1e-6) -> float:
    """Compute an expectation value for an operator and a circuit via tensor
    contraction.

    This will give up if it looks like the computation will take too much RAM.
    """
    circuit_sand = circuit_for_expectation_value(
        circuit, pauli_string / pauli_string.coefficient)
    qubits = sorted(circuit_sand.all_qubits())

    tensors, qubit_frontier, _ = circuit_to_tensors(circuit=circuit_sand,
                                                    qubits=qubits)
    end_bras = [
        qtn.Tensor(data=quimb.up().squeeze(),
                   inds=(f'i{qubit_frontier[q]}_q{q}', ),
                   tags={'Q0', 'bra0'}) for q in qubits
    ]
    tn = qtn.TensorNetwork(tensors + end_bras)
    if QUIMB_VERSION[0] < (1, 3):
        # coverage: ignore
        warnings.warn(f'quimb version {QUIMB_VERSION[1]} detected. Please use '
                      f'quimb>=1.3 for optimal performance in '
                      '`tensor_expectation_value`. '
                      'See https://github.com/quantumlib/Cirq/issues/3263')
    else:
        tn.rank_simplify(inplace=True)
    path_info = tn.contract(get='path-info')
    ram_gb = path_info.largest_intermediate * 128 / 8 / 1024 / 1024 / 1024
    if ram_gb > max_ram_gb:
        raise MemoryError(
            f"We estimate that this contraction will take too much RAM! {ram_gb} GB"
        )
    e_val = tn.contract(inplace=True)
    assert e_val.imag < tol
    assert pauli_string.coefficient.imag < tol
    return e_val.real * pauli_string.coefficient
Beispiel #11
0
 def __str__(self) -> str:
     return str(qtn.TensorNetwork(self.M))
    psi: TensorNetwork
    """
    return psi / (psi.H @ psi)**0.5


if not break_up:  # Take as one big tensor
    inds_list = [f'k{i}' for i in range(phys_inds)
                 ]  # Give each physical index a name ex. 'k1'
    tensor_shape = [phys_dim
                    for i in range(phys_inds)]  # Get the size of the tensor
    ket = qtn.tensor_gen.rand_tensor(shape=tensor_shape,
                                     inds=inds_list,
                                     tags={'KET'
                                           })  #Build a random starting tensor
    ame_trial = qtn.TensorNetwork([
        normalize_state(ket)
    ])  # Take this tensor to be its own tensor network
else:  # Or choose to split it up into a network of smaller tensors
    data_tensors = []
    for i in range(phys_inds):
        data_tensors.append(
            np.random.rand(phys_dim, virt_dim,
                           virt_dim))  # Make random ndarrays for each tensor
    kets = []
    # Make tensors naming the physical and virtual indices so they match up
    kets.append(
        qtn.Tensor(data_tensors[0],
                   inds=(f'k{0}', f'v{phys_inds-1}', f'v{0}'),
                   tags={'KET'}))
    for i in range(1, phys_inds):
        kets.append(
Beispiel #13
0
def to_tn(circuit: iter[BaseGate],
          complex_type: any = 'complex64',
          return_qubits_map: bool = False,
          leaves_prefix: str = 'q_') -> quimb.tensor.TensorNetwork:
    """
    Return `quimb.tensor.TensorNetwork` representing `circuit`. `to_tn` is
    deterministic, so it can be reused elsewhere.

    Parameters
    ----------
    circuit: iter[BaseGate]
        Circuit to get `quimb.tensor.TensorNetwork` representation from.
    complex_type: any, optional
        Complex type to use while getting the `quimb.tensor.TensorNetwork`
        representation.
    return_qubits_map: bool, optional
        Return map associated to the Circuit qubits.
    leaves_prefix: str, optional
        Specify prefix to use for leaves.

    Returns
    -------
    quimb.tensor.TensorNetwork
        Tensor representing `circuit`.

    Example
    -------
    >>> # Define circuit
    >>> circuit = Circuit(
    >>>     [Gate('X', qubits=[0])**1.2,
    >>>      Gate('ISWAP', qubits=[0, 1])**2.3], Gate('H', [1]))
    >>>
    >>> # Draw graph
    >>> utils.to_tn(circuit).graph()

    .. image:: ../../images/circuit_tn.png
    """
    import quimb.tensor as tn

    # Convert iterable to Circuit
    circuit = Circuit(circuit)

    # Get all qubits
    all_qubits = circuit.all_qubits()

    # Get qubits map
    qubits_map = {q: i for i, q in enumerate(all_qubits)}

    # Get last_tag
    last_tag = {q: 'i' for q in all_qubits}

    # Node generator
    def _get_node(t, gate):

        # Get matrix
        U = np.reshape(gate.matrix().astype(complex_type),
                       [2] * (2 * len(gate.qubits)))

        # Get indexes
        inds = [f'{leaves_prefix}_{qubits_map[q]}_{t}' for q in gate.qubits] + [
            f'{leaves_prefix}_{qubits_map[q]}_{last_tag[q]}'
            for q in gate.qubits
        ]

        # Update last_tag
        for q in gate.qubits:
            last_tag[q] = t

        # Return node
        return tn.Tensor(
            U.astype(complex_type),
            inds=inds,
            tags=[f'{leaves_prefix}_{qubits_map[q]}' for q in gate.qubits] +
            [f'gate-idx_{t}'])

    # Get list of tensors
    tensor = [_get_node(t, gate) for t, gate in enumerate(circuit)]

    # Generate new output map
    output_map = {
        f'{leaves_prefix}_{qubits_map[q]}_{t}':
        f'{leaves_prefix}_{qubits_map[q]}_f' for q, t in last_tag.items()
    }

    # Rename output legs
    for node in tensor:
        node.reindex(output_map, inplace=True)

    # Return tensor network
    if return_qubits_map:
        return tn.TensorNetwork(tensor), qubits_map
    else:
        return tn.TensorNetwork(tensor)
Beispiel #14
0
   mpo.add_tag(f"G{i}", where=None, which='all')
  list_mpo.append(mpo)





list_ids=[]
list_alphabet=list(string.ascii_lowercase)
for i in range(N_lay):
 list_ids.append(f"__ind_{list_alphabet[i]}{{}}__")

list_tn=align_TN_1D(*list_mpo, ind_ids={*list_ids}, inplace=False)


TN_l=qtn.TensorNetwork(list_tn)
  
rotate_ten_list=[]
for i in range(L):
 rotate_ten_list.append(qtn.Tensor(qu.rand(D*d).reshape(D, d), inds=(f'b{i}',f'k{i}'), tags={"R"}))

R_l=qtn.TensorNetwork(rotate_ten_list)

TN_l=TN_l & R_l


for i in range(L):
 TN_l.contract_ind(f'b{i}', optimize='auto-hq')

list_tags=[]
for i in range(N_lay):