Example #1
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis
        modelparams = 
            quantum state Bloch vector
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(RebitStatePauliModel, self).likelihood(outcomes, modelparams,
                                                     expparams)

        pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + np.sum(modelparams * expparams['axis'], 1))

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1

        pr0 = pr0[:, np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #2
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(MultiQubitStatePauliModel,
              self).likelihood(outcomes, modelparams, expparams)

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + modelparams[:, expparams['pauli']])

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1

        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #3
0
 def likelihood(self, outcomes, modelparams, expparams):
     """
     Calculates the likelihood function at the states specified 
     by modelparams and measurement specified by expparams.
     This is given by the Born rule and is the probability of
     outcomes given the state and measurement operator.
     
     Parameters
     ----------
     outcomes = 
         measurement outcome
     expparams = 
         Bloch vector of measurement axis
     modelparams = 
         quantum state Bloch vector
     """
     
     # By calling the superclass implementation, we can consolidate
     # call counting there.
     super(RebitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
     
     pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
     
     # Note that expparams['axis'] has shape (n_exp, 3).
     pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1))
     
     # Use the following hack if you don't want to ensure positive weights
     pr0[pr0 < 0] = 0
     pr0[pr0 > 1] = 1
     
     pr0 = pr0[:,np.newaxis]
     
     # Now we concatenate over outcomes.
     return Model.pr0_to_likelihood_array(outcomes, pr0)       
Example #4
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        """
        
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(MultiQubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
        
        
        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5*(1 + modelparams[:,expparams['pauli']])

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1
        
        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)        
Example #5
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis and visibility
        modelparams = 
            quantum state Bloch vector
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(QubitStatePauliModel, self).likelihood(outcomes, modelparams,
                                                     expparams)

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + np.sum(modelparams * expparams['axis'], 1))

        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        pr0 = pr0[:, np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #6
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis and visibility
        modelparams = 
            quantum state Bloch vector
        """
        
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(QubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
        
        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1))
        
        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        pr0 = pr0[:,np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)        
 def likelihood(self, outcomes, modelparams, expparams):
     # Unpack alpha and beta.
     a = expparams['alpha']
     b = expparams['beta']
     
     # Find the probability of getting a "0" outcome.
     pr0 = modelparams * a + (1 - modelparams) * b
     
     # Concatenate over outcomes.
     return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #8
0
    def likelihood(self, outcomes, modelparams, expparams):
        # Unpack alpha and beta.
        a = expparams['alpha']
        b = expparams['beta']

        # Find the probability of getting a "0" outcome.
        pr0 = modelparams * a + (1 - modelparams) * b

        # Concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #9
0
    def likelihood(self, outcomes, modelparams, expparams):
        m = self.n_had
        
        #the first and last m bits     
        F0  = self.f[:2**m]        
        F1  = self.f[-2**m:]        

        # count the number of times the last bit of F is 0
        count0 = np.sum((F0+1) % 2)      
        count1 = np.sum((F1+1) % 2)      
        
        #probability of getting 0
        pr0 = modelparams*count0/(2**m)+(1-modelparams)*count1/(2**m)
        
        #concatenate over outcomes
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #10
0
    def likelihood(self, outcomes, modelparams, expparams):
        m = self.n_had

        #the first and last m bits
        F0 = self.f[:2**m]
        F1 = self.f[-2**m:]

        # count the number of times the last bit of F is 0
        count0 = np.sum((F0 + 1) % 2)
        count1 = np.sum((F1 + 1) % 2)

        #probability of getting 0
        pr0 = modelparams * count0 / (2**m) + (1 - modelparams) * count1 / (2**
                                                                            m)

        #concatenate over outcomes
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #11
0
 def likelihood(self, outcomes, modelparams, expparams):
     # By calling the superclass implementation, we can consolidate
     # call counting there.
     super(SimplePrecessionModel, self).likelihood(outcomes, modelparams, expparams)
     
     # Possibly add a second axis to modelparams.
     if len(modelparams.shape) == 1:
         modelparams = modelparams[..., np.newaxis]
     
     # Allocating first serves to make sure that a shape mismatch later
     # will cause an error.
     pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
     
     arg = np.dot(modelparams, expparams[..., np.newaxis].T) / 2        
     pr0 = np.cos(arg) ** 2
     
     # Now we concatenate over outcomes.
     return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #12
0
 def likelihood(self, outcomes, modelparams, expparams):
     # By calling the superclass implementation, we can consolidate
     # call counting there.
     super(SimplePrecessionModel, self).likelihood(outcomes, modelparams, expparams)
     
     # Possibly add a second axis to modelparams.
     if len(modelparams.shape) == 1:
         modelparams = modelparams[..., np.newaxis]
     
     # Allocating first serves to make sure that a shape mismatch later
     # will cause an error.
     pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
     
     arg = np.dot(modelparams, expparams[..., np.newaxis].T) / 2        
     pr0 = np.cos(arg) ** 2
     
     # Now we concatenate over outcomes.
     return Model.pr0_to_likelihood_array(outcomes, pr0)
Example #13
0
 def likelihood(self, outcomes, modelparams, expparams):
     # FIXME: at present, will proceed until ALL model experiment pairs
     #        are below error tol.
     #        Should disable one-by-one, but that's tricky.
     super(ALEApproximateModel, self).likelihood(outcomes, modelparams, expparams)
     # We will use the fact we have assumed a two-outcome model to make the
     # problem easier. As such, we will rely on the static method 
     # Model.pr0_to_likelihood_array.
     
     # Start off with min_samp samples.
     n = np.zeros((modelparams.shape[0], expparams.shape[0]))
     for N in count(start=self._min_samp, step=self._samp_step):
         sim_data = self._simulator.simulate_experiment(
             modelparams, expparams, repeat=self._samp_step
         )
         n += np.sum(sim_data, axis=0) # Sum over the outcomes axis to find the
                                       # number of 1s.
         error_est_p1 = binom_est_error(binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
         if np.all(error_est_p1 < self._error_tol): break
         
     return Model.pr0_to_likelihood_array(outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Example #14
0
    def likelihood(self, outcomes, modelparams, expparams):
        # FIXME: at present, will proceed until ALL model experiment pairs
        #        are below error tol.
        #        Should disable one-by-one, but that's tricky.
        super(ALEApproximateModel, self).likelihood(outcomes, modelparams,
                                                    expparams)
        # We will use the fact we have assumed a two-outcome model to make the
        # problem easier. As such, we will rely on the static method
        # Model.pr0_to_likelihood_array.

        # Start off with min_samp samples.
        n = np.zeros((modelparams.shape[0], expparams.shape[0]))
        for N in count(start=self._min_samp, step=self._samp_step):
            sim_data = self._simulator.simulate_experiment(
                modelparams, expparams, repeat=self._samp_step)
            n += np.sum(sim_data,
                        axis=0)  # Sum over the outcomes axis to find the
            # number of 1s.
            error_est_p1 = binom_est_error(
                binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
            if np.all(error_est_p1 < self._error_tol): break

        return Model.pr0_to_likelihood_array(
            outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Example #15
0
 def __init__(self, n=6):
     self.n = n
     Model.__init__(self)
Example #16
0
    def __init__(self, n = 6):
	    self.n = n
	    Model.__init__(self)
Example #17
0
 def likelihood(self, outcomes, modelparams, expparams):
     return Model.pr0_to_likelihood_array(outcomes, modelparams)