Esempio n. 1
0
def test_swap_pieces(nqubits):
    state = utils.random_tensorflow_complex((2**nqubits, ), dtype=tf.float64)
    target_state = tf.cast(np.copy(state.numpy()), dtype=state.dtype)
    shape = (2, int(state.shape[0]) // 2)

    for _ in range(10):
        global_qubit = np.random.randint(0, nqubits)
        local_qubit = np.random.randint(0, nqubits)
        while local_qubit == global_qubit:
            local_qubit = np.random.randint(0, nqubits)

        transpose_order = ([global_qubit] + list(range(global_qubit)) +
                           list(range(global_qubit + 1, nqubits)))

        qubits_t = qubits_tensor(nqubits, [global_qubit, local_qubit])
        target_state = op.apply_swap(target_state, qubits_t, nqubits,
                                     global_qubit, local_qubit)
        target_state = tf.reshape(target_state, nqubits * (2, ))
        target_state = tf.transpose(target_state, transpose_order)
        target_state = tf.reshape(target_state, shape)

        state = tf.reshape(state, nqubits * (2, ))
        state = tf.transpose(state, transpose_order)
        state = tf.reshape(state, shape)
        piece0, piece1 = state[0], state[1]
        if tf.config.list_physical_devices("GPU"):  # pragma: no cover
            # case not tested by GitHub workflows because it requires GPU
            check_unimplemented_error(op.swap_pieces, piece0, piece1,
                                      local_qubit - 1, nqubits - 1)
        else:
            op.swap_pieces(piece0, piece1,
                           local_qubit - int(global_qubit < local_qubit),
                           nqubits - 1)
            np.testing.assert_allclose(target_state[0], piece0.numpy())
            np.testing.assert_allclose(target_state[1], piece1.numpy())
Esempio n. 2
0
 def _swap(self, state: utils.DistributedState, global_qubit: int,
           local_qubit: int):
     m = self.queues.qubits.reduced_global[global_qubit]
     m = self.nglobal - m - 1
     t = 1 << m
     for g in range(self.ndevices // 2):
         i = ((g >> m) << (m + 1)) + (g & (t - 1))
         local_eff = self.queues.qubits.reduced_local[local_qubit]
         with tf.device(self.memory_device):
             op.swap_pieces(state.pieces[i], state.pieces[i + t], local_eff,
                            self.nlocal)
Esempio n. 3
0
def test_swap_pieces_zero_global(nqubits):
    state = utils.random_tensorflow_complex((2**nqubits, ), dtype=tf.float64)
    target_state = tf.cast(np.copy(state.numpy()), dtype=state.dtype)
    shape = (2, int(state.shape[0]) // 2)
    state = tf.reshape(state, shape)

    for _ in range(10):
        local = np.random.randint(1, nqubits)

        qubits_t = qubits_tensor(nqubits, [0, local])
        target_state = op.apply_swap(target_state, qubits_t, nqubits, 0, local)
        target_state = tf.reshape(target_state, shape)

        piece0, piece1 = state[0], state[1]
        if tf.config.list_physical_devices("GPU"):  # pragma: no cover
            # case not tested by GitHub workflows because it requires GPU
            check_unimplemented_error(op.swap_pieces, piece0, piece1,
                                      local - 1, nqubits - 1)
        else:
            op.swap_pieces(piece0, piece1, local - 1, nqubits - 1)
            np.testing.assert_allclose(target_state[0], piece0.numpy())
            np.testing.assert_allclose(target_state[1], piece1.numpy())