Exemple #1
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis and visibility
        modelparams = 
            quantum state Bloch vector
        """
        
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(QubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
        
        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1))
        
        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        pr0 = pr0[:,np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)        
    def likelihood(self, outcomes, modelparams, expparams):
        # We first call the superclass method, which basically
        # just makes sure that call count diagnostics are properly
        # logged.
        super(MultiCosModel, self).likelihood(outcomes, modelparams, expparams)

        # Next, since we have a two-outcome model, everything is defined by
        # Pr(0 | modelparams; expparams), so we find the probability of 0
        # for each model and each experiment.
        #
        # We do so by taking a product along the modelparam index (len 2,
        # indicating omega_1 or omega_2), then squaring the result.
        pr0 = np.prod(
            np.cos(
                # shape (n_models, 1, 2)
                modelparams[:, np.newaxis, :] *
                # shape (n_experiments, 2)
                expparams['ts']
            ),  # <- broadcasts to shape (n_models, n_experiments, 2).
            axis=2  # <- product over the final index (len 2)
        )**2  # square each element

        # Now we use pr0_to_likelihood_array to turn this two index array
        # above into the form expected by SMCUpdater and other consumers
        # of likelihood().
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #3
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis
        modelparams = 
            quantum state Bloch vector
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(RebitStatePauliModel, self).likelihood(outcomes, modelparams,
                                                     expparams)

        pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + np.sum(modelparams * expparams['axis'], 1))

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1

        pr0 = pr0[:, np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #4
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(MultiQubitStatePauliModel,
              self).likelihood(outcomes, modelparams, expparams)

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + modelparams[:, expparams['pauli']])

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1

        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #5
0
 def likelihood(self, outcomes, modelparams, expparams):
     """
     Calculates the likelihood function at the states specified 
     by modelparams and measurement specified by expparams.
     This is given by the Born rule and is the probability of
     outcomes given the state and measurement operator.
     
     Parameters
     ----------
     outcomes = 
         measurement outcome
     expparams = 
         Bloch vector of measurement axis
     modelparams = 
         quantum state Bloch vector
     """
     
     # By calling the superclass implementation, we can consolidate
     # call counting there.
     super(RebitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
     
     pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
     
     # Note that expparams['axis'] has shape (n_exp, 3).
     pr0 = 0.5*(1 + np.sum(modelparams*expparams['axis'],1))
     
     # Use the following hack if you don't want to ensure positive weights
     pr0[pr0 < 0] = 0
     pr0[pr0 > 1] = 1
     
     pr0 = pr0[:,np.newaxis]
     
     # Now we concatenate over outcomes.
     return Model.pr0_to_likelihood_array(outcomes, pr0)       
Exemple #6
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        
        Parameters
        ----------
        outcomes = 
            measurement outcome
        expparams = 
            Bloch vector of measurement axis and visibility
        modelparams = 
            quantum state Bloch vector
        """

        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(QubitStatePauliModel, self).likelihood(outcomes, modelparams,
                                                     expparams)

        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5 * (1 + np.sum(modelparams * expparams['axis'], 1))

        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        pr0 = pr0[:, np.newaxis]

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #7
0
    def likelihood(self, outcomes, modelparams, expparams):
        """
        Calculates the likelihood function at the states specified 
        by modelparams and measurement specified by expparams.
        This is given by the Born rule and is the probability of
        outcomes given the state and measurement operator.
        """
        
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(MultiQubitStatePauliModel, self).likelihood(outcomes, modelparams, expparams)
        
        
        # Note that expparams['axis'] has shape (n_exp, 3).
        pr0 = 0.5*(1 + modelparams[:,expparams['pauli']])

        # Use the following hack if you don't want to ensure positive weights
        pr0[pr0 < 0] = 0
        pr0[pr0 > 1] = 1
        
        # Note that expparams['vis'] has shape (n_exp, ).
        pr0 = expparams['vis'] * pr0 + (1 - expparams['vis']) * 0.5

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)        
Exemple #8
0
    def likelihood(self, outcomes, modelparams, expparams):
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(AcceleratedPrecessionModel, self).likelihood(outcomes, modelparams, expparams)
        
        # Possibly add a second axis to modelparams.
        if len(modelparams.shape) == 1:
            modelparams = modelparams[..., np.newaxis]
        
        # Convert to float32 if needed.
        mps = modelparams.astype(np.float32)
        eps = expparams.astype(np.float32)

        # Allocating a buffer for the pr0 returns.
        pr0 = np.empty((mps.shape[0], eps.shape[0]), dtype=mps.dtype)

        # Move buffers to the GPU.
        mf = cl.mem_flags
        
        mps_buf = cl.Buffer(self._ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=mps)
        eps_buf = cl.Buffer(self._ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=eps)
        dest_buf = cl.Buffer(self._ctx, mf.WRITE_ONLY, pr0.nbytes)

        # Run the kernel with global worksize (n_models, n_experiments).
        self._prg.cos_model(self._queue, pr0.shape, None, np.int32(eps.shape[0]), mps_buf, eps_buf, dest_buf)

        # Copy the buffer back from the GPU and free memory there.
        cl.enqueue_copy(self._queue, pr0, dest_buf)
        mps_buf.release()
        eps_buf.release()
        dest_buf.release()
        
        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
 def likelihood(self, outcomes, modelparams, expparams):
     # We first call the superclass method, which basically
     # just makes sure that call count diagnostics are properly
     # logged.
     super(MultiCosModel, self).likelihood(outcomes, modelparams, expparams)
     
     # Next, since we have a two-outcome model, everything is defined by
     # Pr(0 | modelparams; expparams), so we find the probability of 0
     # for each model and each experiment.
     #
     # We do so by taking a product along the modelparam index (len 2,
     # indicating omega_1 or omega_2), then squaring the result.
     pr0 = np.prod(
         np.cos(
             # shape (n_models, 1, 2)
             modelparams[:, np.newaxis, :] *
             # shape (n_experiments, 2)
             expparams['ts']
         ), # <- broadcasts to shape (n_models, n_experiments, 2).
         axis=2 # <- product over the final index (len 2)
     ) ** 2 # square each element
     
     # Now we use pr0_to_likelihood_array to turn this two index array
     # above into the form expected by SMCUpdater and other consumers
     # of likelihood().
     return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #10
0
    def likelihood(self, outcomes, modelparams, expparams):
        m = self.n_had
        
        #the first and last m bits     
        F0  = self.f[:2**m]        
        F1  = self.f[-2**m:]        

        # count the number of times the last bit of F is 0
        count0 = np.sum((F0+1) % 2)      
        count1 = np.sum((F1+1) % 2)      
        
        #probability of getting 0
        pr0 = modelparams*count0/(2**m)+(1-modelparams)*count1/(2**m)
        
        #concatenate over outcomes
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #11
0
    def likelihood(self, outcomes, modelparams, expparams):
        m = self.n_had

        #the first and last m bits
        F0 = self.f[:2**m]
        F1 = self.f[-2**m:]

        # count the number of times the last bit of F is 0
        count0 = np.sum((F0 + 1) % 2)
        count1 = np.sum((F1 + 1) % 2)

        #probability of getting 0
        pr0 = modelparams * count0 / (2**m) + (1 - modelparams) * count1 / (2**
                                                                            m)

        #concatenate over outcomes
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #12
0
 def likelihood(self, outcomes, modelparams, expparams):
     super(RandomizedBenchmarkingModel, self).likelihood(outcomes, modelparams, expparams)
     
     if self._il:
         p_tilde, p, A, B = modelparams.T[:, :, np.newaxis]
         
         p_C = p_tilde * p
         
         p = np.where(expparams['reference'][np.newaxis, :], p, p_C)
     else:
         p, A, B = modelparams.T[:, :, np.newaxis]
         
     m = expparams['m'][np.newaxis, :]
     
     pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
     pr0[:, :] = 1 - (A * (p ** m) + B)
     
     return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #13
0
    def likelihood(self, outcomes, modelparams, expparams):
        super(RandomizedBenchmarkingModel,
              self).likelihood(outcomes, modelparams, expparams)

        if self._il:
            p_tilde, p, A, B = modelparams.T[:, :, np.newaxis]

            p_C = p_tilde * p

            p = np.where(expparams['reference'][np.newaxis, :], p, p_C)
        else:
            p, A, B = modelparams.T[:, :, np.newaxis]

        m = expparams['m'][np.newaxis, :]

        pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
        pr0[:, :] = 1 - (A * (p**m) + B)

        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #14
0
 def likelihood(self, outcomes, modelparams, expparams):
     # FIXME: at present, will proceed until ALL model experiment pairs
     #        are below error tol.
     #        Should disable one-by-one, but that's tricky.
     super(ALEApproximateModel, self).likelihood(outcomes, modelparams, expparams)
     # We will use the fact we have assumed a two-outcome model to make the
     # problem easier. As such, we will rely on the static method 
     # Model.pr0_to_likelihood_array.
     
     # Start off with min_samp samples.
     n = np.zeros((modelparams.shape[0], expparams.shape[0]))
     for N in count(start=self._min_samp, step=self._samp_step):
         sim_data = self._simulator.simulate_experiment(
             modelparams, expparams, repeat=self._samp_step
         )
         n += np.sum(sim_data, axis=0) # Sum over the outcomes axis to find the
                                       # number of 1s.
         error_est_p1 = binom_est_error(binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
         if np.all(error_est_p1 < self._error_tol): break
         
     return Model.pr0_to_likelihood_array(outcomes, 1 - binom_est_p(n, N, self._est_hedge))
    def likelihood(self, outcomes, modelparams, expparams):
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(AcceleratedPrecessionModel,
              self).likelihood(outcomes, modelparams, expparams)

        # Possibly add a second axis to modelparams.
        if len(modelparams.shape) == 1:
            modelparams = modelparams[..., np.newaxis]

        # Convert to float32 if needed.
        mps = modelparams.astype(np.float32)
        eps = expparams.astype(np.float32)

        # Allocating a buffer for the pr0 returns.
        pr0 = np.empty((mps.shape[0], eps.shape[0]), dtype=mps.dtype)

        # Move buffers to the GPU.
        mf = cl.mem_flags

        mps_buf = cl.Buffer(self._ctx,
                            mf.READ_ONLY | mf.COPY_HOST_PTR,
                            hostbuf=mps)
        eps_buf = cl.Buffer(self._ctx,
                            mf.READ_ONLY | mf.COPY_HOST_PTR,
                            hostbuf=eps)
        dest_buf = cl.Buffer(self._ctx, mf.WRITE_ONLY, pr0.nbytes)

        # Run the kernel with global worksize (n_models, n_experiments).
        self._prg.cos_model(self._queue, pr0.shape, None,
                            np.int32(eps.shape[0]), mps_buf, eps_buf, dest_buf)

        # Copy the buffer back from the GPU and free memory there.
        cl.enqueue_copy(self._queue, pr0, dest_buf)
        mps_buf.release()
        eps_buf.release()
        dest_buf.release()

        # Now we concatenate over outcomes.
        return Model.pr0_to_likelihood_array(outcomes, pr0)
Exemple #16
0
    def likelihood(self, outcomes, modelparams, expparams):
        # FIXME: at present, will proceed until ALL model experiment pairs
        #        are below error tol.
        #        Should disable one-by-one, but that's tricky.
        super(ALEApproximateModel, self).likelihood(outcomes, modelparams,
                                                    expparams)
        # We will use the fact we have assumed a two-outcome model to make the
        # problem easier. As such, we will rely on the static method
        # Model.pr0_to_likelihood_array.

        # Start off with min_samp samples.
        n = np.zeros((modelparams.shape[0], expparams.shape[0]))
        for N in count(start=self._min_samp, step=self._samp_step):
            sim_data = self._simulator.simulate_experiment(
                modelparams, expparams, repeat=self._samp_step)
            n += np.sum(sim_data,
                        axis=0)  # Sum over the outcomes axis to find the
            # number of 1s.
            error_est_p1 = binom_est_error(
                binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
            if np.all(error_est_p1 < self._error_tol): break

        return Model.pr0_to_likelihood_array(
            outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Exemple #17
0
 def likelihood(self, outcomes, modelparams, expparams):
     super(MockModel, self).likelihood(outcomes, modelparams, expparams)
     pr0 = np.ones((modelparams.shape[0], expparams.shape[0])) / 2
     return Model.pr0_to_likelihood_array(outcomes, pr0)