def test_equal_estimates(self): Log.set_loglevel(logging.DEBUG) rr = RussianRoulette(1e-5, block_size=100) log_estimates=randn(1000) log_estimates=ones(1000)*(-942478.011941) print rr.exponential(log_estimates)
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) posterior = OzonePosterior(prior, logdet_alg="scikits", solve_method="scikits") proposal_cov = diag([ 4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_average_serial"]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) posterior = OzonePosterior(prior, logdet_alg="scikits", solve_method="scikits") proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_average_serial"]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def prepare_engine(submit_type='local', duration_job_min=60*4): # --------------------- Log.set_loglevel(20) logger.info("Start") foldername = expanduser("~")+'/slurm_jobs' if not os.path.exists(foldername): os.makedirs(foldername) logger.info("Setting engine folder to %s" % foldername) logger.info("Creating batch parameter instance") johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" timestr = time.strftime("%Y%m%d-%H%M%S") batch_parameters = BatchClusterParameters(max_walltime=duration_job_min, foldername=foldername, job_name_base="sim_"+timestr+"_", parameter_prefix=johns_slurm_hack) if submit_type =='slurm': logger.info("Creating slurm engine instance") engine = SlurmComputationEngine(batch_parameters) elif submit_type == "local": logger.info("Creating serial engine instance") engine = SerialComputationEngine() # --------------------- return engine
def test_equal_estimates(self): Log.set_loglevel(logging.DEBUG) rr = RussianRoulette(1e-5, block_size=100) log_estimates = randn(1000) log_estimates = ones(1000) * (-942478.011941) print rr.exponential(log_estimates)
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 1000 home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_rr_sge"]) # cluster admin set project jump for me to exclusively allocate nodes parameter_prefix = "" # #$ -P jump" cluster_parameters = BatchClusterParameters( foldername=folder, memory=7.8, loglevel=logging.DEBUG, parameter_prefix=parameter_prefix, max_walltime=60 * 60 * 24 - 1) computation_engine = SGEComputationEngine(cluster_parameters, check_interval=10) rr_instance = RussianRoulette(1e-3, block_size=400) posterior = OzonePosteriorRREngine(rr_instance=rr_instance, computation_engine=computation_engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.55, -10.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) # chain.append_mcmc_output(PlottingOutput(None, plot_from=1, lag=1)) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) store_chain_output = StoreChainOutput(folder, lag=1) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) modulename = "sample_ozone_posterior_average_slurm" if not FileSystem.cmd_exists("sbatch"): engine = SerialComputationEngine() else: johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" johns_slurm_hack = "#SBATCH --partition=intel-ivy,compute" folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename]) batch_parameters = BatchClusterParameters( foldername=folder, max_walltime=24 * 60 * 60, resubmit_on_timeout=False, memory=3, parameter_prefix=johns_slurm_hack) engine = SlurmComputationEngine(batch_parameters, check_interval=1, do_clean_up=True) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 100 posterior = OzonePosteriorAverageEngine(computation_engine=engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=2000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, modulename]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 1000 home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_rr_sge"]) # cluster admin set project jump for me to exclusively allocate nodes parameter_prefix = "" # #$ -P jump" cluster_parameters = BatchClusterParameters(foldername=folder, memory=7.8, loglevel=logging.DEBUG, parameter_prefix=parameter_prefix, max_walltime=60 * 60 * 24 - 1) computation_engine = SGEComputationEngine(cluster_parameters, check_interval=10) rr_instance = RussianRoulette(1e-3, block_size=400) posterior = OzonePosteriorRREngine(rr_instance=rr_instance, computation_engine=computation_engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([ 4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.55, -10.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) # chain.append_mcmc_output(PlottingOutput(None, plot_from=1, lag=1)) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) store_chain_output = StoreChainOutput(folder, lag=1) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) modulename = "sample_ozone_posterior_average_slurm" if not FileSystem.cmd_exists("sbatch"): engine = SerialComputationEngine() else: johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" johns_slurm_hack = "#SBATCH --partition=intel-ivy,compute" folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename]) batch_parameters = BatchClusterParameters(foldername=folder, max_walltime=24 * 60 * 60, resubmit_on_timeout=False, memory=3, parameter_prefix=johns_slurm_hack) engine = SlurmComputationEngine(batch_parameters, check_interval=1, do_clean_up=True) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 100 posterior = OzonePosteriorAverageEngine(computation_engine=engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([ 4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=2000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, modulename]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 2 home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_rr_sge"]) computation_engine = SerialComputationEngine() rr_instance = RussianRoulette(1e-3, block_size=10) posterior = OzonePosteriorRREngine(rr_instance=rr_instance, computation_engine=computation_engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=200) chain = MCMCChain(mcmc_sampler, mcmc_params) # chain.append_mcmc_output(PlottingOutput(None, plot_from=1, lag=1)) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) store_chain_output = StoreChainOutput(folder, lag=50) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 2 home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_rr_sge"]) computation_engine = SerialComputationEngine() rr_instance = RussianRoulette(1e-3, block_size=10) posterior = OzonePosteriorRREngine( rr_instance=rr_instance, computation_engine=computation_engine, num_estimates=num_estimates, prior=prior ) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=200) chain = MCMCChain(mcmc_sampler, mcmc_params) # chain.append_mcmc_output(PlottingOutput(None, plot_from=1, lag=1)) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) store_chain_output = StoreChainOutput(folder, lag=50) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
if samples is not None: ax.scatter(samples[:, 0], samples[:, 1], c='r', s=1) if __name__ == '__main__': """ Example that just sends out jobs that store their result to a file when done; there is no control over the job after it has been submitted. No aggregators are stored and results can be picked up from disc when ready. This script also illustrates a typical use case in scientific computing: Run the same function with different parameters a certain number of times. Make sure to read the minimal example first. """ Log.set_loglevel(10) # filename of the result database home = expanduser("~") foldername = os.path.join(home, "test") db_fname = os.path.join(foldername, "test.txt") batch_parameters = BatchClusterParameters(foldername=foldername) engine = SerialComputationEngine() # engine = SlurmComputationEngine(batch_parameters) # here are some example parameters for jobs # we here create all combinations and then shuffle them # this randomizes the runs over the parameter space params_x = np.linspace(-3, 3, num=25) params_y = np.linspace(-2, 2, num=12)
if samples is not None: ax.scatter(samples[:, 0], samples[:, 1], c='r', s=1); if __name__ == '__main__': """ Example that just sends out jobs that store their result to a file when done; there is no control over the job after it has been submitted. No aggregators are stored and results can be picked up from disc when ready. This script also illustrates a typical use case in scientific computing: Run the same function with different parameters a certain number of times. Make sure to read the minimal example first. """ Log.set_loglevel(10) # filename of the result database home = expanduser("~") foldername = os.path.join(home, "test") db_fname = os.path.join(foldername, "test.txt") batch_parameters = BatchClusterParameters(foldername=foldername) engine = SerialComputationEngine() # engine = SlurmComputationEngine(batch_parameters) # here are some example parameters for jobs # we here create all combinations and then shuffle them # this randomizes the runs over the parameter space params_x = np.linspace(-3, 3, num=25) params_y = np.linspace(-2, 2, num=12)
from independent_jobs.aggregators.ScalarResultAggregator import ScalarResultAggregator from independent_jobs.engines.BatchClusterParameters import BatchClusterParameters from independent_jobs.engines.SGEComputationEngine import SGEComputationEngine from independent_jobs.engines.SerialComputationEngine import SerialComputationEngine from independent_jobs.examples.MyJob import MyJob from independent_jobs.tools.Log import Log from independent_jobs.tools.Log import logger import numpy as np # See other file for implementation of MyJob # Since we are using ScalarResult, we can use the already implemented aggregator # ScalarResultAggregator if __name__ == '__main__': Log.set_loglevel(logger.info) logger.info("Start") # create an instance of the SGE engine, with certain parameters # create folder name string home = expanduser("~") foldername = os.sep.join([home, "minimal_example"]) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername) # possibly create SGE engine instance, which can be used to submit jobs to # there are more engines available. # logger.info("creating SGE engine instance")