def test_log_det_ozone_shogun_exact(self): o = OzonePosterior() kappa = 2 ** (-13.1) Q = o.create_Q_matrix(kappa) self.assertAlmostEqual(OzonePosterior.log_det_shogun_exact(Q), 2.317769370813052e+06, delta=1)
def test_log_likelihood_scikits_exact(self): tau = 2 ** (-11.35) kappa = 2 ** (-13.1) o = OzonePosterior(prior=None, logdet_method="scikits", solve_method="scikits") self.assertAlmostEqual(o.log_likelihood(tau, kappa), - 9.336375798558606e+05, delta=0.001)
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) posterior = OzonePosterior(prior, logdet_alg="scikits", solve_method="scikits") proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_average_serial"]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def test_log_det_exact_toy_large_shogun(self): n = 1e6 d = abs(randn(n)) Q = spdiags(d, 0, n, n) self.assertAlmostEqual(OzonePosterior.log_det_shogun_exact(Q), sum(log(d)), delta=1e-5)
def test_log_det_exact_toy_small_scikits(self): n = 3 d = abs(randn(n)) Q = spdiags(d, 0, n, n) self.assertAlmostEqual(OzonePosterior.log_det_scikits(Q), sum(log(d)), delta=1e-15)
def __init__(self, aggregator, ozone_posterior, tau, kappa): IndependentJob.__init__(self, aggregator) self.ozone_posterior = OzonePosterior(ozone_posterior.prior, ozone_posterior.logdet_method, ozone_posterior.solve_method) self.tau = tau self.kappa = kappa
def compute(self): logging.debug("Entering") # needed for both matrices Q = self.ozone_posterior.create_Q_matrix(self.kappa); if self.matrix_type == "Q": logging.info("Matrix type Q") logdet = self.ozone_posterior.log_det_method(Q) elif self.matrix_type == "M": logging.info("Matrix type M") _, A = OzonePosterior.load_ozone_data() AtA = A.T.dot(A) M = Q + self.tau * AtA; logdet = self.ozone_posterior.log_det_method(M) else: raise ValueError("Unknown matrix type: %s" % self.matrix_type) result = ScalarResult(logdet) self.aggregator.submit_result(result) logging.debug("Leaving")
def compute(self): logging.debug("Entering") # needed for both matrices Q = self.ozone_posterior.create_Q_matrix(self.kappa) if self.matrix_type == "Q": logging.info("Matrix type Q") logdet = self.ozone_posterior.log_det_method(Q) elif self.matrix_type == "M": logging.info("Matrix type M") _, A = OzonePosterior.load_ozone_data() AtA = A.T.dot(A) M = Q + self.tau * AtA logdet = self.ozone_posterior.log_det_method(M) else: raise ValueError("Unknown matrix type: %s" % self.matrix_type) result = ScalarResult(logdet) self.aggregator.submit_result(result) logging.debug("Leaving")
def precompute_likelihood_estimates(self, tau, kappa): logging.debug("Entering") # submit all jobs for log-determinant Q aggregators_Q = [] for _ in range(self.num_estimates): job = OzoneLogDetJob(ScalarResultAggregator(), self, tau, kappa, "Q") aggregators_Q.append(self.computation_engine.submit_job(job)) # submit all jobs for log-determinant M aggregators_M = [] for _ in range(self.num_estimates): job = OzoneLogDetJob(ScalarResultAggregator(), self, tau, kappa, "M") aggregators_M.append(self.computation_engine.submit_job(job)) # submit job for remainder of likelihood job = OzoneLikelihoodWithoutLogDetJob(ScalarResultAggregator(), self, tau, kappa) aggregator_remainder = self.computation_engine.submit_job(job) # grab a coffee self.computation_engine.wait_for_all() # collect results from all aggregators log_dets_Q = zeros(self.num_estimates) log_dets_M = zeros(self.num_estimates) for i in range(self.num_estimates): aggregators_Q[i].finalize() aggregators_M[i].finalize() log_dets_Q[i] = aggregators_Q[i].get_final_result().result log_dets_M[i] = aggregators_M[i].get_final_result().result aggregators_Q[i].clean_up() aggregators_M[i].clean_up() aggregator_remainder.finalize() result_remainder = aggregator_remainder.get_final_result().result aggregator_remainder.clean_up() # load n since needed for likelihood y, _ = OzonePosterior.load_ozone_data() n = len(y) # construct all likelihood estimates log_det_parts = 0.5 * log_dets_Q + 0.5 * n * log( tau) - 0.5 * log_dets_M estimates = log_det_parts + result_remainder # crude check for an overflow to print error details limit = 1e100 indices = where(abs(estimates) > limit)[0] if len(indices) > 0: logging.info( "Log-likelihood estimates overflow occured at the following indices:" ) for idx in indices: logging.info("At index %d. Details are: " % idx) logging.info("log-det Q: " + aggregators_Q[idx].job_name + ". Result is %f" % log_dets_Q[idx]) logging.info("log-det M: " + aggregators_M[idx].job_name + ". Result is %f" % log_dets_M[idx]) logging.info("log-lik-without-log-det: " + aggregator_remainder.job_name + ". Result is %f" % result_remainder[idx]) logging.info("Removing mentioned estimates from list") estimates = estimates[abs(estimates) < limit] logging.info("New number of estimates is %d, old was %d" % (len(estimates), self.num_estimates)) logging.debug("Leaving") return estimates
def precompute_likelihood_estimates(self, tau, kappa): logging.debug("Entering") # submit all jobs for log-determinant Q aggregators_Q = [] for _ in range(self.num_estimates): job = OzoneLogDetJob(ScalarResultAggregator(), self, tau, kappa, "Q") aggregators_Q.append(self.computation_engine.submit_job(job)) # submit all jobs for log-determinant M aggregators_M = [] for _ in range(self.num_estimates): job = OzoneLogDetJob(ScalarResultAggregator(), self, tau, kappa, "M") aggregators_M.append(self.computation_engine.submit_job(job)) # submit job for remainder of likelihood job = OzoneLikelihoodWithoutLogDetJob(ScalarResultAggregator(), self, tau, kappa) aggregator_remainder = self.computation_engine.submit_job(job) # grab a coffee self.computation_engine.wait_for_all() # collect results from all aggregators log_dets_Q = zeros(self.num_estimates) log_dets_M = zeros(self.num_estimates) for i in range(self.num_estimates): aggregators_Q[i].finalize() aggregators_M[i].finalize() log_dets_Q[i] = aggregators_Q[i].get_final_result().result log_dets_M[i] = aggregators_M[i].get_final_result().result aggregators_Q[i].clean_up() aggregators_M[i].clean_up() aggregator_remainder.finalize() result_remainder = aggregator_remainder.get_final_result().result aggregator_remainder.clean_up() # load n since needed for likelihood y, _ = OzonePosterior.load_ozone_data() n = len(y) # construct all likelihood estimates log_det_parts = 0.5 * log_dets_Q + 0.5 * n * log(tau) - 0.5 * log_dets_M estimates = log_det_parts + result_remainder # crude check for an overflow to print error details limit = 1e100 indices = where(abs(estimates) > limit)[0] if len(indices) > 0: logging.info("Log-likelihood estimates overflow occured at the following indices:") for idx in indices: logging.info("At index %d. Details are: " % idx) logging.info("log-det Q: " + aggregators_Q[idx].job_name + ". Result is %f" % log_dets_Q[idx]) logging.info("log-det M: " + aggregators_M[idx].job_name + ". Result is %f" % log_dets_M[idx]) logging.info("log-lik-without-log-det: " + aggregator_remainder.job_name + ". Result is %f" % result_remainder[idx]) logging.info("Removing mentioned estimates from list") estimates = estimates[abs(estimates) < limit] logging.info("New number of estimates is %d, old was %d" % (len(estimates), self.num_estimates)) logging.debug("Leaving") return estimates
def test_load_data(self): OzonePosterior()
def __init__(self, num_estimates, prior): OzonePosterior.__init__(self, prior) self.num_estimates = num_estimates
def precompute_likelihood_estimates(self, tau, kappa): logging.debug("Entering") estimates = asarray([OzonePosterior.log_likelihood(self, tau, kappa) for _ in range(self.num_estimates)]) logging.debug("Leaving") return estimates