class BCSZChoiDistribution(DensityOperatorDistribution):
    """
    Samples Choi states for completely-positive (CP) or CP and
    trace-preserving (CPTP) maps, as generated
    by the BCSZ prior [BCSZ09]_. The sampled states are normalized
    as states (trace 1).
    """
    @due.dcite(
        Doi("10.1016/j.physleta.2008.11.043"),
        description="BCSZ distribution",
        tags=['implementation']
    )
    def __init__(self, basis, rank=None, enforce_tp=True):
        if isinstance(basis, int):
            basis = gell_mann_basis(basis)
        self._hdim = basis.dim

        # TODO: take basis on underlying space, tensor up?
        channel_basis = tensor_product_basis(basis, basis)
        # FIXME: this is a hack to get another level of nesting.
        channel_basis.dims = [basis.dims, basis.dims]
        channel_basis.superrep = 'choi'
        super(BCSZChoiDistribution, self).__init__(channel_basis)
        self._rank = rank
        self._enforce_tp = enforce_tp

    def _sample_dm(self):
        return qt.to_choi(
            qt.rand_super_bcsz(self._hdim, self._enforce_tp, self._rank)
        ).unit()
Beispiel #2
0
    if invA is None and A is None:
        raise ValueError("Must pass either inverse(A) or A.")

    if invA is None and A is not None:
        invA = la.inv(A)

    # Find the unit sphere volume.
    # http://en.wikipedia.org/wiki/Unit_sphere#General_area_and_volume_formulas
    n = invA.shape[0]
    Vn = (np.pi**(n / 2)) / gamma(1 + (n / 2))

    return Vn * la.det(sqrtm(invA))


@due.dcite(Doi("10.1016/j.dam.2007.02.013"),
           description="Khachiyan algorithm",
           tags=["implementation"])
def mvee(points, tol=0.001):
    """
    Returns the minimum-volume enclosing ellipse (MVEE)
    of a set of points, using the Khachiyan algorithm.
    """

    # This function is a port of the matlab function by
    # Nima Moshtagh found here:
    # https://www.mathworks.com/matlabcentral/fileexchange/9542-minimum-volume-enclosing-ellipsoid
    # with accompanying writup here:
    # https://www.researchgate.net/profile/Nima_Moshtagh/publication/254980367_MINIMUM_VOLUME_ENCLOSING_ELLIPSOIDS/links/54aab5260cf25c4c472f487a.pdf

    N, d = points.shape
Beispiel #3
0
class ALEApproximateModel(DerivedModel):
    r"""
    Given a :class:`~qinfer.abstract_model.Simulatable`, estimates the
    likelihood of that simulator by using adaptive likelihood estimation (ALE).
    
    :param qinfer.abstract_model.Simulatable simulator: Simulator to estimate
        the likelihood function of.
    :param float error_tol: Allowed error in the estimated likelihood. Note that
        the simulation cost scales as :math:`O(\epsilon^{-2})`, where
        :math:`\epsilon` is the error tolerance.
    :param int min_samp: Minimum number of samples to use in estimating the
        likelihood.
    :param int samp_step: Number of samples by which to increment if the error
        tolerance is not met.
    :param float est_hedge: Amount of hedging to use in reporting the final
        estimate.
    :param float adapt_hedge: Amount of hedging to use in deciding if the error
        tolerance has been met. Increasing this parameter will in general
        cause the algorithm to require more samples.
    """
    
    @due.dcite(
        Doi("10.1103/PhysRevLett.112.130402"),
        description="Adaptive likelihood estimation",
        tags=["implementation"]
    )
    def __init__(self, simulator,
        error_tol=1e-2, min_samp=10, samp_step=10,
        est_hedge=0.509, adapt_hedge=0.509
    ):
        
        ## INPUT VALIDATION ##
        if not isinstance(simulator, Simulatable):
            raise TypeError("Simulator must be an instance of Simulatable.")

        if error_tol <= 0:
            raise ValueError("Error tolerance must be strictly positive.")
        if error_tol > 1:
            raise ValueError("Error tolerance must be less than 1.")
            
        if min_samp <= 0:
            raise ValueError("Minimum number of samples (min_samp) must be positive.")
        if samp_step <= 0:
            raise ValueError("Sample step (samp_step) must be positive.")
        if est_hedge < 0:
            raise ValueError("Estimator hedging (est_hedge) must be non-negative.")
        if adapt_hedge < 0:
            raise ValueError("Adaptive hedging (adapt_hedge) must be non-negative.")

        # this simulator constraint makes implementation easier
        if not (simulator.is_n_outcomes_constant and simulator.n_outcomes(None) == 2):
            raise ValueError("Decorated model must be a two-outcome model.")

        # We had to have the simulator in place before we could call
        # the superclass.
        super(ALEApproximateModel, self).__init__(simulator)
        
        self._error_tol = float(error_tol)
        self._min_samp = int(min_samp)
        self._samp_step = int(samp_step)
        self._est_hedge = float(est_hedge)
        self._adapt_hedge = float(adapt_hedge)
        
    ## WRAPPED METHODS AND PROPERTIES ##
    # We only need to wrap sim_count and simulate_experiment,
    # since the rest are handled by DerivedModel.
    @property
    def sim_count(self): return self.underlying_model.sim_count

    def simulate_experiment(self, modelparams, expparams, repeat=1):
        return self.underlying_model.simulate_experiment(modelparams, expparams, repeat=repeat)
    
    ## IMPLEMENTATIONS OF MODEL METHODS ##
    
    def likelihood(self, outcomes, modelparams, expparams):
        # FIXME: at present, will proceed until ALL model experiment pairs
        #        are below error tol.
        #        Should disable one-by-one, but that's tricky.
        super(ALEApproximateModel, self).likelihood(outcomes, modelparams, expparams)
        simulator = self.underlying_model

        # We will use the fact we have assumed a two-outcome model to make the
        # problem easier. As such, we will rely on the static method 
        # FiniteOutcomeModel.pr0_to_likelihood_array.
        
        # Start off with min_samp samples.
        n = np.zeros((modelparams.shape[0], expparams.shape[0]))
        for N in count(start=self._min_samp, step=self._samp_step):
            sim_data = simulator.simulate_experiment(
                modelparams, expparams, repeat=self._samp_step
            )
            n += np.sum(sim_data, axis=0) # Sum over the outcomes axis to find the
                                          # number of 1s.
            error_est_p1 = binom_est_error(binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
            if np.all(error_est_p1 < self._error_tol): break
            
        return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Beispiel #4
0
class RandomizedBenchmarkingModel(FiniteOutcomeModel, DifferentiableModel):
    r"""
    Implements the randomized benchmarking or interleaved randomized
    benchmarking protocol, such that the depolarizing strength :math:`p`
    of the twirled channel is a parameter to be estimated, given a sequnce
    length :math:`m` as an experimental control. In addition, the zeroth-order
    "fitting"-parameters :math:`A` and :math:`B` are represented as model
    parameters to be estimated.
    
    :param bool interleaved: If `True`, the model implements the interleaved
        protocol, with :math:`\tilde{p}` being the depolarizing parameter for
        the interleaved gate and with :math:`p_{\text{ref}}` being the reference
        parameter.

    :modelparam p: Fidelity of the twirled error channel :math:`\Lambda`, represented as
        a decay rate :math:`p = (d F - 1) / (d - 1)`, where :math:`F`
        is the fidelity and :math:`d` is the dimension of the Hilbert space.
    :modelparam A: Scale of the randomized benchmarking decay, defined as
        :math:`\Tr[Q \Lambda(\rho - \ident / d)]`, where :math:`Q` is the final
        measurement, and where :math:`\ident` is the initial preparation.
    :modelparam B: Offset of the randomized benchmarking decay, defined as
        :math:`\Tr[Q \Lambda(\ident / d)]`.

    :expparam int m: Length of the randomized benchmarking sequence
        that was measured.
    """
    # TODO: add citations to the above docstring.

    @due.dcite(Doi("10.1088/1367-2630/17/1/013042"),
               description="Accelerated randomized benchmarking",
               tags=["implementation"])
    def __init__(self, interleaved=False, order=0):
        self._il = interleaved
        if order != 0:
            raise NotImplementedError(
                "Only zeroth-order is currently implemented.")
        super(RandomizedBenchmarkingModel, self).__init__()

    @property
    def n_modelparams(self):
        return 3 + (1 if self._il else 0)

    @property
    def modelparam_names(self):
        return (
            # We want to know \tilde{p} := p_C / p, and so we make it
            # a model parameter directly. This means that later, we'll
            # need to extract p_C = p \tilde{p}.
            [r'\tilde{p}', 'p', 'A', 'B'] if self._il else ['p', 'A', 'B'])

    @property
    def is_n_outcomes_constant(self):
        return True

    @property
    def expparams_dtype(self):
        return [('m', 'uint')] + ([('reference', bool)] if self._il else [])

    def n_outcomes(self, expparams):
        return 2

    def are_models_valid(self, modelparams):
        if self._il:
            p_C, p, A, B = modelparams.T
            return np.all([
                0 <= p, p <= 1, 0 <= p_C, p_C <= 1, 0 <= A, A <= 1, 0 <= B,
                B <= 1, A + B <= 1, A * p + B <= 1, A * p_C + B <= 1
            ],
                          axis=0)
        else:
            p, A, B = modelparams.T
            return np.all([
                0 <= p, p <= 1, 0 <= A, A <= 1, 0 <= B, B <= 1, A + B <= 1,
                A * p + B <= 1
            ],
                          axis=0)

    def likelihood(self, outcomes, modelparams, expparams):
        super(RandomizedBenchmarkingModel,
              self).likelihood(outcomes, modelparams, expparams)

        if self._il:
            p_tilde, p, A, B = modelparams.T[:, :, np.newaxis]

            p_C = p_tilde * p

            p = np.where(expparams['reference'][np.newaxis, :], p, p_C)
        else:
            p, A, B = modelparams.T[:, :, np.newaxis]

        m = expparams['m'][np.newaxis, :]

        pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
        pr0[:, :] = 1 - (A * (p**m) + B)

        return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, pr0)

    def score(self, outcomes, modelparams, expparams, return_L=False):

        na = np.newaxis
        n_m = modelparams.shape[0]
        n_e = expparams.shape[0]
        n_o = outcomes.shape[0]
        n_p = self.n_modelparams

        m = expparams['m'].reshape((1, 1, 1, n_e))

        L = self.likelihood(outcomes, modelparams, expparams)[na, ...]
        outcomes = outcomes.reshape((1, n_o, 1, 1))

        if not self._il:

            p, A, B = modelparams.T[:, :, np.newaxis]
            p = p.reshape((1, 1, n_m, 1))
            A = A.reshape((1, 1, n_m, 1))
            B = B.reshape((1, 1, n_m, 1))

            q = (-1)**(1 - outcomes) * np.concatenate(np.broadcast_arrays(
                A * m * (p**(m - 1)),
                p**m,
                np.ones_like(p),
            ),
                                                      axis=0) / L

        else:

            p_tilde, p_ref, A, B = modelparams.T[:, :, np.newaxis]
            p_C = p_tilde * p_ref

            mode = expparams['reference'][np.newaxis, :]

            p = np.where(mode, p_ref, p_C)

            p = p.reshape((1, 1, n_m, n_e))
            A = A.reshape((1, 1, n_m, 1))
            B = B.reshape((1, 1, n_m, 1))

            q = (-1)**(1 - outcomes) * np.concatenate(np.broadcast_arrays(
                np.where(mode, 0,
                         A * m * (p_tilde**(m - 1)) * (p_ref**m)),
                np.where(mode, A * m * (p_ref**(m - 1)),
                         A * m * (p_ref**(m - 1)) *
                         (p_tilde**m)), p**m, np.ones_like(p)),
                                                      axis=0) / L

        if return_L:
            # Need to strip off the extra axis we added for broadcasting to q.
            return q, L[0, ...]
        else:
            return q