Example #1
0
    def __measure_parameterless(self, state, which_qubits, result_desired):
        r"""进行 01 测量。

        Args:
            state (Tensor): 输入的量子态
            which_qubits (list): 测量作用的量子比特编号
            result_desired (str): 期望得到的测量结果

        Returns:
            Tensor: 测量坍塌后的量子态
            Tensor:测量坍塌得到的概率
            str: 测量得到的结果
        """
        n = self.get_qubit_number()
        assert len(which_qubits) == len(result_desired), \
            "the length of qubits wanted to be measured and the result desired should be same"
        op_list = [np.eye(2, dtype=np.complex128)] * n
        for i, ele in zip(which_qubits, result_desired):
            k = int(ele)
            rho = np.zeros((2, 2), dtype=np.complex128)
            rho[int(k), int(k)] = 1
            op_list[i] = rho
        if n > 1:
            measure_operator = paddle.to_tensor(NKron(*op_list))
        else:
            measure_operator = paddle.to_tensor(op_list[0])
        state_measured = matmul(matmul(measure_operator, state),
                                dagger(measure_operator))
        prob = real(
            trace(
                matmul(matmul(dagger(measure_operator), measure_operator),
                       state)))
        state_measured = divide(state_measured, prob)
        return state_measured, prob, result_desired
Example #2
0
    def forward(self, H, N, N_SYS_B, beta, D):
        # Apply quantum neural network onto the initial state
        rho_AB = U_theta(self.initial_state, self.theta, N, D)

        # Calculate the partial trace to get the state rho_B of subsystem B
        rho_B = partial_trace(rho_AB, 2**(N - N_SYS_B), 2**N_SYS_B, 1)

        # Calculate the three components of the loss function
        rho_B_squre = matmul(rho_B, rho_B)
        loss1 = paddle.real(trace(matmul(rho_B, H)))
        loss2 = paddle.real(trace(rho_B_squre)) * 2 / beta
        loss3 = -(paddle.real(trace(matmul(rho_B_squre, rho_B))) + 3) / (2 *
                                                                         beta)

        # Get the final loss function
        loss = loss1 + loss2 + loss3

        return loss, rho_B
Example #3
0
    def __measure_parameterized(self, state, which_qubits, result_desired,
                                theta):
        r"""进行参数化的测量。

        Args:
            state (Tensor): 输入的量子态
            which_qubits (list): 测量作用的量子比特编号
            result_desired (str): 期望得到的测量结果
            theta (Tensor): 测量运算的参数

        Returns:
            Tensor: 测量坍塌后的量子态
            Tensor:测量坍塌得到的概率
            str: 测量得到的结果
        """
        n = self.get_qubit_number()
        assert len(which_qubits) == len(result_desired), \
            "the length of qubits wanted to be measured and the result desired should be same"
        op_list = [paddle.to_tensor(np.eye(2, dtype=np.complex128))] * n
        for idx in range(0, len(which_qubits)):
            i = which_qubits[idx]
            ele = result_desired[idx]
            if int(ele) == 0:
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, cos(theta[idx]))
                rho1 = multiply(basis1, sin(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            elif int(ele) == 1:
                # rho = diag(concat([cos(theta[idx]), sin(theta[idx])]))
                # rho = paddle.to_tensor(rho, zeros((2, 2), dtype="float64"))
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, sin(theta[idx]))
                rho1 = multiply(basis1, cos(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            else:
                print("cannot recognize the result_desired.")
            # rho = paddle.to_tensor(ones((2, 2), dtype="float64"), zeros((2, 2), dtype="float64"))
        measure_operator = paddle.to_tensor(op_list[0])
        if n > 1:
            for idx in range(1, len(op_list)):
                measure_operator = kron(measure_operator, op_list[idx])
        state_measured = matmul(matmul(measure_operator, state),
                                dagger(measure_operator))
        prob = real(
            trace(
                matmul(matmul(dagger(measure_operator), measure_operator),
                       state)))
        state_measured = divide(state_measured, prob)
        return state_measured, prob, result_desired
Example #4
0
    def forward(self, N):
        # Apply quantum neural network onto the initial state
        U = U_theta(self.theta, N)

        # rho_tilda is the quantum state obtained by acting U on rho, which is U*rho*U^dagger
        rho_tilde = matmul(matmul(U, self.rho), dagger(U))
        
        # Calculate loss function
        loss = trace(matmul(self.sigma, rho_tilde))

        return paddle.real(loss), rho_tilde
Example #5
0
def test_paddle_backward():
    ## 必须每次重新构造矩阵
    I = paddle.to_tensor(np.eye(2, dtype=np.float32))
    U = paddle.to_tensor(
        np.array([[1, -1], [1, 1]], dtype=np.float32) / np.sqrt(2))
    layer = RLayer_d1()
    layer.set_theta(1.)
    U2 = layer(I)
    loss = 1 - paddle.trace(paddle.matmul(U, paddle.transpose(U2, [1, 0]))) / 2
    opt = paddle.optimizer.Adam(0.1, parameters=layer.parameters())
    for epoch in range(110):
        loss.backward(retain_graph=True)
        opt.step()
        opt.clear_grad()
        if (epoch + 1) % 10 == 0:
            print('epoch_%d: score=%g, theta=%g' %
                  (epoch, 1 - loss.numpy()[0], layer.theta.numpy()[0]))
            print(layer.matrix.numpy())
Example #6
0
    def expecval(self, H):
        r"""量子线路输出的量子态关于可观测量 H 的期望值。

        Hint:
            如果想输入的可观测量的矩阵为 :math:`0.7Z\otimes X\otimes I+0.2I\otimes Z\otimes I` 。则 ``H`` 应为 ``[[0.7, 'z0,x1'], [0.2, 'z1']]`` 。
        Args:
            H (list): 可观测量的相关信息
        Returns:
            Tensor: 量子线路输出的量子态关于 H 的期望值

        代码示例:
        
        .. code-block:: python
            
            import numpy as np
            import paddle
            from paddle_quantum.circuit import UAnsatz
            n = 5
            H_info = [[0.1, 'x1'], [0.2, 'y0,z4']]
            theta = paddle.to_tensor(np.ones(3))
            cir = UAnsatz(n)
            cir.rx(theta[0], 0)
            cir.rz(theta[1], 1)
            cir.rx(theta[2], 2)
            cir.run_state_vector()
            expect_value = cir.expecval(H_info).numpy()
            print(f'Calculated expectation value of {H_info} is {expect_value}')

        ::

            Calculated expectation value of [[0.1, 'x1'], [0.2, 'y0,z4']] is [-0.1682942]

        """
        if self.__run_state == 'state_vector':
            return real(vec_expecval(H, self.__state))
        elif self.__run_state == 'density_matrix':
            state = self.__state
            H_mat = paddle.to_tensor(pauli_str_to_matrix(H, self.n))
            return real(trace(matmul(state, H_mat)))
        else:
            # Raise error
            raise ValueError(
                "no state for measurement; please run the circuit first")
Example #7
0
def test_paddle_param():
    U = paddle.to_tensor(
        np.array([[1, -1], [1, 1]], dtype=np.float32) / np.sqrt(2))
    U.stop_gradient = True
    theta = paddle.static.create_parameter(shape=[1, 1], dtype='float32')
    cs = paddle.cos(theta / 2)
    sn = paddle.sin(theta / 2)
    matrix = paddle.concat([
        paddle.concat([cs, -sn], axis=1),
        paddle.concat([sn, cs], axis=1),
    ],
                           axis=0)
    loss = 1 - paddle.trace(paddle.matmul(U, paddle.transpose(matrix,
                                                              [1, 0]))) / 2
    # I = paddle.to_tensor(np.eye(2, dtype=np.float32))
    dt = paddle.grad(outputs=[loss],
                     inputs=[theta],
                     create_graph=False,
                     retain_graph=True)[0]
    print(dt)
Example #8
0
def trace(x, offset=0, dim1=0, dim2=1, out=None):
    return Tensor(paddle.trace(x, offset, dim1, dim2, out))