Esempio n. 1
0
    def likelihood(self, outcomes, modelparams, expparams):
        # FIXME: at present, will proceed until ALL model experiment pairs
        #        are below error tol.
        #        Should disable one-by-one, but that's tricky.
        super(ALEApproximateModel, self).likelihood(outcomes, modelparams,
                                                    expparams)
        simulator = self.underlying_model

        # We will use the fact we have assumed a two-outcome model to make the
        # problem easier. As such, we will rely on the static method
        # FiniteOutcomeModel.pr0_to_likelihood_array.

        # Start off with min_samp samples.
        n = np.zeros((modelparams.shape[0], expparams.shape[0]))
        for N in count(start=self._min_samp, step=self._samp_step):
            sim_data = simulator.simulate_experiment(modelparams,
                                                     expparams,
                                                     repeat=self._samp_step)
            n += np.sum(sim_data,
                        axis=0)  # Sum over the outcomes axis to find the
            # number of 1s.
            error_est_p1 = binom_est_error(
                binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
            if np.all(error_est_p1 < self._error_tol): break

        return FiniteOutcomeModel.pr0_to_likelihood_array(
            outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Esempio n. 2
0
 def likelihood(self, outcomes, modelparams, expparams):
     # By calling the superclass implementation, we can consolidate
     # call counting there.
     
     # Get the original, undisturbed likelihoods.
     super(PoisonedModel, self).likelihood(outcomes, modelparams, expparams)
     L = self.underlying_model.likelihood(
         outcomes, modelparams, expparams)
         
     # Now get the random variates from a standard normal [N(0, 1)]
     # distribution; we'll rescale them soon.
     epsilon = np.random.normal(size=L.shape)
     
     # If ALE, rescale by a constant tolerance.
     if self._mode == PoisonModes.ALE:
         epsilon *= self._tol
     # Otherwise, rescale by the estimated error in the binomial estimator.
     elif self._mode == PoisonModes.MLE:
         epsilon *= binom_est_error(p=L, N=self._n_samples, hedge=self._hedge)
     
     # Now we truncate and return.
     np.clip(L + epsilon, 0, 1, out=L)
     return L
Esempio n. 3
0
    def likelihood(self, outcomes, modelparams, expparams):
        # FIXME: at present, will proceed until ALL model experiment pairs
        #        are below error tol.
        #        Should disable one-by-one, but that's tricky.
        super(ALEApproximateModel, self).likelihood(outcomes, modelparams, expparams)
        simulator = self.underlying_model

        # We will use the fact we have assumed a two-outcome model to make the
        # problem easier. As such, we will rely on the static method 
        # FiniteOutcomeModel.pr0_to_likelihood_array.
        
        # Start off with min_samp samples.
        n = np.zeros((modelparams.shape[0], expparams.shape[0]))
        for N in count(start=self._min_samp, step=self._samp_step):
            sim_data = simulator.simulate_experiment(
                modelparams, expparams, repeat=self._samp_step
            )
            n += np.sum(sim_data, axis=0) # Sum over the outcomes axis to find the
                                          # number of 1s.
            error_est_p1 = binom_est_error(binom_est_p(n, N, self._adapt_hedge), N, self._adapt_hedge)
            if np.all(error_est_p1 < self._error_tol): break
            
        return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, 1 - binom_est_p(n, N, self._est_hedge))
Esempio n. 4
0
    def likelihood(self, outcomes, modelparams, expparams):
        # By calling the superclass implementation, we can consolidate
        # call counting there.

        # Get the original, undisturbed likelihoods.
        super(PoisonedModel, self).likelihood(outcomes, modelparams, expparams)
        L = self.underlying_model.likelihood(outcomes, modelparams, expparams)

        # Now get the random variates from a standard normal [N(0, 1)]
        # distribution; we'll rescale them soon.
        epsilon = np.random.normal(size=L.shape)

        # If ALE, rescale by a constant tolerance.
        if self._mode == PoisonModes.ALE:
            epsilon *= self._tol
        # Otherwise, rescale by the estimated error in the binomial estimator.
        elif self._mode == PoisonModes.MLE:
            epsilon *= binom_est_error(p=L,
                                       N=self._n_samples,
                                       hedge=self._hedge)

        # Now we truncate and return.
        np.clip(L + epsilon, 0, 1, out=L)
        return L