Beispiel #1
0
def _random_input_states(num_states, num_qubits):
    """Generate a bunch of random input ket states with qutip.

    Returns
    -------
    A list of `num_states` elements, with each element a `qutip.Qobj`
    of shape `(2**num_qubits, 1)`.
    """
    # `rand_ket_haar` seems to be slightly faster than `rand_ket`
    # The efficiency of this function can probably be dramatically improved.
    length_inputs = 2 ** num_qubits
    qutip_dims = [[2 for _ in range(num_qubits)],
                  [1 for _ in range(num_qubits)]]
    return [
        qutip.rand_ket_haar(length_inputs, dims=qutip_dims)
        for _ in range(num_states)
    ]
Beispiel #2
0
def create_target_states(n_qubits, n_samples, seed=None):
    """ Create multiple target states with given qubit number.

    Args:
        n_qubits: int, number of qubits
        n_samples: int, number of samples
        seed: int, random seed
    Returns:
        jnp.ndarray, state vectors of shape (n_samples, 2^n_qubits)
    """

    dim = 2**n_qubits
    haar_random_states = [
        qutip.rand_ket_haar(N=dim, seed=seed).get_data().toarray().T
        for _ in range(n_samples)
    ]
    return jnp.vstack(haar_random_states)
    def fidelity_test(self, n_samples=10, return_mean=True):
        """Compute fidelity with current interaction values with qutip.

        This can be used to compute the fidelity avoiding the
        compilation of the theano graph done by `self.fidelity`.

        Raises
        ------
        TargetGateNotGivenError if not target gate has been specified.
        """
        # compute fidelity for case of no ancillae
        if self.target_gate is None:
            raise TargetGateNotGivenError('You must give a target gate'
                                          ' first.')
        target_gate = self.target_gate
        gate = qutip.Qobj(self.get_current_gate(),
                          dims=[[2] * self.num_qubits] * 2)
        # each element of `fidelities` will contain the fidelity obtained with
        # a single randomly generated input state
        fidelities = np.zeros(n_samples)
        for idx in range(fidelities.shape[0]):
            # generate random input state (over system qubits only)
            psi_in = qutip.rand_ket_haar(2**self.num_system_qubits)
            psi_in.dims = [[2] * self.num_system_qubits,
                           [1] * self.num_system_qubits]
            # embed it into the bigger system+ancilla space (if necessary)
            if self.num_system_qubits < self.num_qubits:
                Psi_in = qutip.tensor(psi_in, self.ancillae_state)
            else:
                Psi_in = psi_in
            # evolve input state
            Psi_out = gate * Psi_in
            # trace out ancilla (if there is an ancilla to trace)
            if self.num_system_qubits < self.num_qubits:
                dm_out = Psi_out.ptrace(range(self.num_system_qubits))
            else:
                dm_out = qutip.ket2dm(Psi_out)
            # compute fidelity
            fidelity = (psi_in.dag() * target_gate.dag() * dm_out *
                        target_gate * psi_in)
            fidelities[idx] = fidelity[0, 0].real
        if return_mean:
            return fidelities.mean()
        else:
            return fidelities
Beispiel #4
0
    return reduce((lambda x, y: x + y), list_proj)


def gen_parity_op2(nb_qubits):
    return qt.tensor([Z for _ in range(nb_qubits)])


def gen_proj_onequbit(nb_qubits, which_qubit, proj):
    list_op = [I for _ in range(nb_qubits)]
    list_op[which_qubit] = proj.copy()
    return qt.tensor(list_op)


ghz_2q = ut.get_ghz(2)
ghz_2q_mixed = 0.90 * qt.ket2dm(ut.get_ghz(2)) + 0.1 * qt.ket2dm(
    qt.rand_ket_haar(4, [[2, 2], [1, 1]]))
ghz_2q_phase = ut.get_ghz(2, np.pi / 2)
ghz_3q = ut.get_ghz(3)
ghz_4q = ut.get_ghz(4)
steps = np.linspace(0, 4, 5000)

### Plotting parity expectations
pop_2q = [Ramsey_exp(ghz_2q, p * np.pi, np.pi / 2) for p in steps]
pop_2q_phase = [Ramsey_exp(ghz_2q_phase, p * np.pi, np.pi / 2) for p in steps]
pop_2q_mixed = [Ramsey_exp(ghz_2q_mixed, p * np.pi, np.pi / 2) for p in steps]
plt.plot(steps, pop_2q)
plt.plot(steps, pop_2q_phase)
plt.plot(steps, pop_2q_mixed)

### Plotting parity expectations
pop_3q = [Ramsey_exp(ghz_3q, p * np.pi, np.pi / 2) for p in steps]
Beispiel #5
0
 def test_pure_state_equivalent_to_overlap(self, dimension):
     """Check fidelity against pure-state overlap, see gh-361."""
     psi = rand_ket_haar(dimension)
     phi = rand_ket_haar(dimension)
     overlap = np.abs(psi.overlap(phi))
     assert fidelity(psi, phi) == pytest.approx(overlap, abs=1e-7)
Beispiel #6
0
def random_pure_state(steps, d):
    for _ in range(steps):
        q.rand_ket_haar(d)
Beispiel #7
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 20 18:48:58 2019

@author: fred
"""

import qutip as qt
import numpy as np
import matplotlib.pylab as plt
import utilities as ut

tgt_state = qt.rand_ket_haar()
tgt_dm = tgt_state * tgt_state.dag()
tgt_exp = [
    (op * tgt_dm).tr()
    for op in [qt.sigmax(), qt.sigmay(), qt.sigmaz()]
]


def fid_1qstate(tgt, state, nb_m=100, adapt=False, nb_r=1):
    if tgt.isket: tgt = tgt * tgt.dag()
    if state.isket: state = state * state.dag()
    if nb_m == np.inf:
        fid = (tgt * state).tr()
    else:
        op_meas = [qt.sigmax(), qt.sigmay(), qt.sigmaz()]
        obs_tgt = [(op * tgt).tr() for op in op_meas]
        if adapt: coeffs = np.abs(obs_tgt)
        else: coeffs = np.ones(len(op_meas))
Ex_1 = 8 * ens(int(N*1/4)) + 2 * ens(int(N*1/4)) - 3 * ens(int(N*1/4)) - 2 * ens(int(N*1/4))
Ex_2 = 8 * ens(int(N*8/15)) + 2 * ens(int(N*2/15)) - 3 * ens(int(N*1/5)) - 2 * ens(int(N*2/15))
print(np.sum(np.square(Ex_1)) / len(Ex_1))
print(np.sum(np.square(Ex_2)) / len(Ex_2))


# --------------------------------- STUDY 2 ----------------------------------#
# Is there a difference between deterministically preparing a state or not
# from a measurement perspective



# --------------------------------- STUDY 3 ----------------------------------#
# expected value with pure vs mixed states
nb_repeat = 1000
rho_pure = [qutip.ket2dm(qutip.rand_ket_haar(2)) for _ in range(nb_repeat)]
rho_mixed_2 = [0.5* qutip.ket2dm(qutip.rand_ket_haar(2)) + 0.5 * qutip.ket2dm(qutip.rand_ket_haar(2))  for _ in range(nb_repeat)]
rho_mixed_3 = [0.3333 * qutip.ket2dm(qutip.rand_ket_haar(2)) + 0.3333 * qutip.ket2dm(qutip.rand_ket_haar(2)) + 0.3333 * qutip.ket2dm(qutip.rand_ket_haar(2)) for _ in range(nb_repeat)]
rho_id = 1/2 * qutip.identity(2)
herm_list = [qutip.rand_herm(2) for _ in range(nb_repeat)]
res_exp = np.array([((rp * h).tr(), (rm2 * h).tr(), (rm3 * h).tr(), (rho_id * h).tr()) for h, rp, rm2, rm3 in zip(herm_list, rho_pure, rho_mixed_2, rho_mixed_3)])
np.average(res_exp, 0)
np.average(np.square(res_exp), 0)



def get_var_est(rho, obs, nb_m, nb_r):
    """ get exp observables for an initial state (rho) evolved under a unitary 
    (A) - it may evolve to incorporate more general processes, and where the 
    observable obs is made"""
    ref = (obs * rho).tr()
Beispiel #9
0
def random_pure_state(steps, d):
    for _ in range(steps):
        q.rand_ket_haar(d)
Beispiel #10
0
def plot_fidelity_vs_J_qutip(net,
                             xs,
                             index_to_vary,
                             states=None,
                             target_states=None,
                             n_states=5,
                             ax=None):
    """Plot the variation of the fidelity with an interaction parameter.

    Given an input `QubitNetwork` object, a sample of random input states is
    generated, and on each of them the fidelity is computed as a function of
    one of the interaction parameters.
    The resulting plot is updated every time the graph of a state is completed.

    Examples
    --------
    Load a pre-trained network from file, and plot the fidelity for a number
    of random input states as a function of the fifth interaction parameter
    `net.J[4]`, testing its values from -20 to 20 at intervals of 0.05:
    >>> import net_analysis_tools as nat
    >>> net = nat.load_network_from_file('path/to/net.pickle')
    >>> nat.plot_fidelity_vs_J_live(net, np.arange(-20, 20, 0.05), 4)
    <output graphics object>
    """
    import copy
    import matplotlib.pyplot as plt
    import theano
    # from IPython.core.debugger import set_trace; set_trace()
    if states is None or target_states is None:
        # states, target_states = net.generate_training_states(n_states)
        hs_dims = 2**net.num_system_qubits
        dims = [[2] * net.num_system_qubits, [1] * net.num_system_qubits]
        states = [
            qutip.rand_ket_haar(hs_dims, dims=dims) for _ in range(n_states)
        ]
        # states = np.asarray(states)
        target_gate = net.target_gate
        target_states = [target_gate * ket for ket in states]
        # if there are ancillae, they are added to the inputs
        if net.num_qubits > net.num_system_qubits:
            if net.ancillae_state is not None:
                ancillae = net.ancillae_state
            else:
                num_ancillae = net.num_qubits - net.num_system_qubits
                ancillae = qutip.tensor(*(qutip.basis(2, 0)
                                          for _ in range(num_ancillae)))
            states = [qutip.tensor(state, ancillae) for state in states]
        # target_states = np.einsum('ij,kj->ki', target_gate, states)
    # create another instance of model to avoid changing the original one
    _net = copy.deepcopy(net)
    # extract parameters
    try:
        pars_ref = _net.parameters
        pars_values = _net.parameters.get_value()
    except AttributeError:
        pars_ref = _net.J
        pars_values = _net.J.get_value()
    # initialise figure object, if the use does not want to plot on his own axis
    if ax is None:
        _, ax = plt.subplots(1, 1)
    # initialise array of fidelities (for all states)
    fidelities = np.zeros(shape=(len(states), len(xs)))
    # for state_idx, (state, target_state) in enumerate(zip(states, target_states)):
    import progressbar
    bar = progressbar.ProgressBar()
    for idx, x in enumerate(bar(xs)):
        # we need to copy the array here, otherwise we change the original
        new_pars = np.array(pars_values)
        if isinstance(index_to_vary, str) and index_to_vary == 'all':
            # in this case the range is intended as a percentage change
            new_pars *= x
        else:
            new_pars[index_to_vary] = x
        pars_ref.set_value(new_pars)
        current_gate = _net.get_current_gate()
        fids = []
        for state, target_state in zip(states, target_states):
            out_state = (current_gate * state).ptrace(
                range(_net.num_system_qubits))
            fids.append(qutip.fidelity(out_state, target_state))
        fidelities[:, idx] = fids

    ax.plot(xs, fidelities.T)
    def generate_training_states(self, num_states):
        """Create training states for the training.

        This function generates every time it is called a set of
        input and corresponding target output states, to be used during
        training. These values will be used during the computation
        through the `givens` parameter of `theano.function`.

        Returns
        -------
        A tuple with two elements: training vectors and labels.
        NOTE: The training and target vectors have different lengths!
              The latter span the whole space while the former only the
              system one.

        training_states: an array of vectors.
            Each vector represents a state in the full system+ancilla space,
            in big real form. These states span the whole space simply
            out of convenience, but are obtained as tensor product of
            the target states over the system qubits with the initial
            states of the ancillary qubits.
        target_states: an array of vectors.
            Each vector represents a state spanning only the system qubits,
            in big real form. Every such state is generated by evolving
            the corresponding `training_state` through the matrix
            `target_unitary`.

        This generation method is highly non-optimal. However, it takes
        about ~250ms to generate a (standard) training set of 100 states,
        which amounts to ~5 minutes over 1000 epochs with a training dataset
        size of 100, making this factor not particularly important.
        """
        assert self.target_gate is not None, 'target_gate not set'

        # 1) Generate random input states over system qubits
        # `rand_ket_haar` seems to be slightly faster than `rand_ket`
        length_inputs = 2**self.num_system_qubits
        qutip_dims = [[2 for _ in range(self.num_system_qubits)],
                      [1 for _ in range(self.num_system_qubits)]]
        training_inputs = [
            qutip.rand_ket_haar(length_inputs, dims=qutip_dims)
            for _ in range(num_states)
        ]
        # 2) Compute corresponding output states
        target_outputs = self._target_outputs_from_inputs(training_inputs)
        # 3) Tensor product of training input states with ancillae
        for idx, ket in enumerate(training_inputs):
            if self.num_system_qubits < self.num_qubits:
                ket = qutip.tensor(ket, self.ancillae_state)
            training_inputs[idx] = complex2bigreal(ket)
        training_inputs = np.asarray(training_inputs)
        # 4) Convert target outputs in big real form.
        # NOTE: the target states are kets if the target gate is unitary,
        #       and density matrices for target open maps.
        target_outputs = np.asarray(
            [complex2bigreal(st) for st in target_outputs])
        # return results as matrices
        _, len_inputs, _ = training_inputs.shape
        _, len_outputs, _ = target_outputs.shape
        training_inputs = training_inputs.reshape((num_states, len_inputs))
        target_outputs = target_outputs.reshape((num_states, len_outputs))
        return training_inputs, target_outputs