def main(): Log.set_loglevel(logging.DEBUG) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 1000 home = expanduser("~") folder = os.sep.join([home, "sample_ozone_posterior_rr_sge"]) # cluster admin set project jump for me to exclusively allocate nodes parameter_prefix = "" # #$ -P jump" cluster_parameters = BatchClusterParameters( foldername=folder, memory=7.8, loglevel=logging.DEBUG, parameter_prefix=parameter_prefix, max_walltime=60 * 60 * 24 - 1) computation_engine = SGEComputationEngine(cluster_parameters, check_interval=10) rr_instance = RussianRoulette(1e-3, block_size=400) posterior = OzonePosteriorRREngine(rr_instance=rr_instance, computation_engine=computation_engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.55, -10.1]) mcmc_params = MCMCParams(start=start, num_iterations=5000) chain = MCMCChain(mcmc_sampler, mcmc_params) # chain.append_mcmc_output(PlottingOutput(None, plot_from=1, lag=1)) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) store_chain_output = StoreChainOutput(folder, lag=1) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def main(): Log.set_loglevel(logging.DEBUG) modulename = "sample_ozone_posterior_average_slurm" if not FileSystem.cmd_exists("sbatch"): engine = SerialComputationEngine() else: johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" johns_slurm_hack = "#SBATCH --partition=intel-ivy,compute" folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename]) batch_parameters = BatchClusterParameters( foldername=folder, max_walltime=24 * 60 * 60, resubmit_on_timeout=False, memory=3, parameter_prefix=johns_slurm_hack) engine = SlurmComputationEngine(batch_parameters, check_interval=1, do_clean_up=True) prior = Gaussian(Sigma=eye(2) * 100) num_estimates = 100 posterior = OzonePosteriorAverageEngine(computation_engine=engine, num_estimates=num_estimates, prior=prior) posterior.logdet_method = "shogun_estimate" proposal_cov = diag([4.000000000000000e-05, 1.072091680000000e+02]) mcmc_sampler = StandardMetropolis(posterior, scale=1.0, cov=proposal_cov) start = asarray([-11.35, -13.1]) mcmc_params = MCMCParams(start=start, num_iterations=2000) chain = MCMCChain(mcmc_sampler, mcmc_params) chain.append_mcmc_output(StatisticsOutput(print_from=1, lag=1)) home = expanduser("~") folder = os.sep.join([home, modulename]) store_chain_output = StoreChainOutput(folder) chain.append_mcmc_output(store_chain_output) loaded = store_chain_output.load_last_stored_chain() if loaded is None: logging.info("Running chain from scratch") else: logging.info("Running chain from iteration %d" % loaded.iteration) chain = loaded chain.run() f = open(folder + os.sep + "final_chain", "w") dump(chain, f) f.close()
def test_shogun_on_sge_engine(self): home = expanduser("~") folder = os.sep.join([home, "unit_test_shogun_on_sge_dummy_result"]) try: shutil.rmtree(folder) except OSError: pass pbs_parameters = BatchClusterParameters(foldername=folder) engine = SGEComputationEngine(pbs_parameters, check_interval=1) num_submissions = 1 sleep_times = randint(0, 3, num_submissions) self.engine_tester(engine, sleep_times)
def test_slurm_engine_max_waiting_time(self): if not FileSystem.cmd_exists("sbatch"): raise SkipTest home = expanduser("~") folder = os.sep.join([home, "unit_test_dummy_slurm_result_max_wait"]) try: shutil.rmtree(folder) except OSError: pass batch_parameters = BatchClusterParameters(foldername=folder) engine = SlurmComputationEngine(batch_parameters, check_interval=1) sleep_times = [2, -1] self.engine_helper(engine, sleep_times)
def test_sge_engine_no_clean_up(self): if not FileSystem.cmd_exists("qsub"): raise SkipTest home = expanduser("~") folder = os.sep.join([home, "unit_test_sge_dummy_result"]) try: shutil.rmtree(folder) except OSError: pass batch_parameters = BatchClusterParameters(foldername=folder) engine = SGEComputationEngine(batch_parameters, check_interval=1, do_clean_up=False) num_submissions = 3 sleep_times = randint(0, 3, num_submissions) self.engine_helper(engine, sleep_times)
def run_dataset(prob_label): """Run the experiment""" sample_source, n = get_sample_source(prob_label) # /////// submit jobs ////////// # create folder name string home = os.path.expanduser("~") foldername = os.path.join(home, "freqopttest_slurm", 'e%d' % ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix="") # Use the following line if Slurm queue is not used. #engine = SerialComputationEngine() engine = SlurmComputationEngine(batch_parameters, do_clean_up=True) n_methods = len(method_job_funcs) # repetitions x #methods aggregators = np.empty((reps, n_methods), dtype=object) d = sample_source.dim() for r in range(reps): for mi, f in enumerate(method_job_funcs): # name used to save the result func_name = f.__name__ fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \ %(prob_label, func_name, J, r, d, alpha, tr_proportion) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info('%s exists. Load and return.' % fname) test_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() sra.submit_result(SingleResult(test_result)) aggregators[r, mi] = sra else: # result not exists or rerun job = Ex5Job(SingleResultAggregator(), prob_label, r, n, f) agg = engine.submit_job(job) aggregators[r, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") test_results = np.empty((reps, n_methods), dtype=object) for r in range(reps): for mi, f in enumerate(method_job_funcs): logger.info("Collecting result (%s, r=%d)" % (f.__name__, r)) # let the aggregator finalize things aggregators[r, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result test_result = aggregators[r, mi].get_final_result().result test_results[r, mi] = test_result func_name = f.__name__ fname = '%s-%s-J%d_r%d_d%d_a%.3f_trp%.2f.p' \ %(prob_label, func_name, J, r, d, alpha, tr_proportion) glo.ex_save_result(ex, test_result, prob_label, fname) func_names = [f.__name__ for f in method_job_funcs] func2labels = exglobal.get_func2label_map() method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results results = { 'results': test_results, 'n': n, 'data_fname': label2fname[prob_label], 'alpha': alpha, 'J': J, 'sample_source': sample_source, 'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs, 'prob_label': prob_label, 'method_labels': method_labels } # class name fname = 'ex%d-%s-me%d_J%d_rs%d_nma%d_d%d_a%.3f_trp%.2f.p' \ %(ex, prob_label, n_methods, J, reps, n, d, alpha, tr_proportion) glo.ex_save_result(ex, results, fname) logger.info('Saved aggregated results to %s' % fname)
# plain MCMC parameters, plan is to use every 200th sample thin_step = 1 num_iterations = 5200 num_warmup = 200 compute_local = False if not FileSystem.cmd_exists("sbatch") or compute_local: engine = SerialComputationEngine() else: johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename]) batch_parameters = BatchClusterParameters( foldername=folder, resubmit_on_timeout=False, parameter_prefix=johns_slurm_hack) engine = SlurmComputationEngine(batch_parameters, check_interval=1, do_clean_up=True) engine.max_jobs_in_queue = 1000 engine.store_fire_and_forget = True aggs = [] for i in range(num_repetitions): job = rw_generator_isotropic(num_warmup, thin_step) logger.info("Repetition %d/%d, %s" % (i + 1, num_repetitions, job.get_parameter_fname_suffix())) aggs += [engine.submit_job(job)]
there is no control over the job after it has been submitted. No aggregators are stored and results can be picked up from disc when ready. This script also illustrates a typical use case in scientific computing: Run the same function with different parameters a certain number of times. Make sure to read the minimal example first. """ Log.set_loglevel(10) # filename of the result database home = expanduser("~") foldername = os.path.join(home, "test") db_fname = os.path.join(foldername, "test.txt") batch_parameters = BatchClusterParameters(foldername=foldername) engine = SerialComputationEngine() # engine = SlurmComputationEngine(batch_parameters) # here are some example parameters for jobs # we here create all combinations and then shuffle them # this randomizes the runs over the parameter space params_x = np.linspace(-3, 3, num=25) params_y = np.linspace(-2, 2, num=12) all_parameters = itertools.product(params_x, params_y) all_parameters = list(all_parameters) shuffle(all_parameters) print "Number of parameter combinations:", len(all_parameters) for params in all_parameters[:len(all_parameters) / 300]: x = params[0]
def run_problem(prob_label): """Run the experiment""" L = get_pqsource_list(prob_label) prob_params, ps, data_sources = zip(*L) # make them lists prob_params = list(prob_params) ps = list(ps) data_sources = list(data_sources) # /////// submit jobs ////////// # create folder name string #result_folder = glo.result_folder() from kgof.config import expr_configs tmp_dir = expr_configs['scratch_path'] foldername = os.path.join(tmp_dir, 'kgof_slurm', 'e%d' % ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix="") # Use the following line if Slurm queue is not used. #engine = SerialComputationEngine() engine = SlurmComputationEngine(batch_parameters) #engine = SlurmComputationEngine(batch_parameters, partition='wrkstn,compute') n_methods = len(method_job_funcs) # repetitions x len(prob_params) x #methods aggregators = np.empty((reps, len(prob_params), n_methods), dtype=object) for r in range(reps): for pi, param in enumerate(prob_params): for mi, f in enumerate(method_job_funcs): # name used to save the result func_name = f.__name__ fname = '%s-%s-n%d_r%d_p%g_a%.3f_trp%.2f.p' \ %(prob_label, func_name, sample_size, r, param, alpha, tr_proportion) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info('%s exists. Load and return.' % fname) job_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() sra.submit_result(SingleResult(job_result)) aggregators[r, pi, mi] = sra else: # result not exists or rerun # p: an UnnormalizedDensity object p = ps[pi] job = Ex2Job(SingleResultAggregator(), p, data_sources[pi], prob_label, r, f, param) agg = engine.submit_job(job) aggregators[r, pi, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") job_results = np.empty((reps, len(prob_params), n_methods), dtype=object) for r in range(reps): for pi, param in enumerate(prob_params): for mi, f in enumerate(method_job_funcs): logger.info("Collecting result (%s, r=%d, param=%.3g)" % (f.__name__, r, param)) # let the aggregator finalize things aggregators[r, pi, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result job_result = aggregators[r, pi, mi].get_final_result().result job_results[r, pi, mi] = job_result #func_names = [f.__name__ for f in method_job_funcs] #func2labels = exglobal.get_func2label_map() #method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results results = { 'job_results': job_results, 'prob_params': prob_params, 'alpha': alpha, 'repeats': reps, 'ps': ps, 'list_data_source': data_sources, 'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs, 'prob_label': prob_label, 'sample_size': sample_size, } # class name fname = 'ex%d-%s-me%d_n%d_rs%d_pmi%g_pma%g_a%.3f_trp%.2f.p' \ %(ex, prob_label, n_methods, sample_size, reps, min(prob_params), max(prob_params), alpha, tr_proportion) glo.ex_save_result(ex, results, fname) logger.info('Saved aggregated results to %s' % fname)
def run_problem(folder_path, prob_label): """Run the experiment""" pl = exglo.parse_prob_label(prob_label) is_h0 = pl['is_h0'] n = pl['n'] # /////// submit jobs ////////// # create folder name string #result_folder = glo.result_folder() #tmp_dir = tempfile.gettempdir() from fsic.config import expr_configs tmp_dir = expr_configs['scratch_dir'] foldername = os.path.join(tmp_dir, 'wj_slurm', 'e%d' % ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix="") # Use the following line if Slurm queue is not used. #engine = SerialComputationEngine() engine = SlurmComputationEngine(batch_parameters) n_methods = len(method_job_funcs) # repetitions x sample_sizes x #methods aggregators = np.empty((reps, n_methods), dtype=object) for r in range(reps): for mi, f in enumerate(method_job_funcs): # name used to save the result func_name = f.__name__ fname = '%s-%s-r%d_a%.3f_trp%.2f.p' \ %(prob_label, func_name, r, alpha, tr_proportion) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info('%s exists. Load and return.' % fname) job_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() sra.submit_result(SingleResult(job_result)) aggregators[r, mi] = sra else: # result not exists or rerun job = Ex4Job(SingleResultAggregator(), folder_path, prob_label, r, f) agg = engine.submit_job(job) aggregators[r, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") job_results = np.empty((reps, n_methods), dtype=object) for r in range(reps): for mi, f in enumerate(method_job_funcs): logger.info("Collecting result (%s, r=%d, n=%d)" % (f.__name__, r, n)) # let the aggregator finalize things aggregators[r, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result job_result = aggregators[r, mi].get_final_result().result job_results[r, mi] = job_result #func_names = [f.__name__ for f in method_job_funcs] #func2labels = exglobal.get_func2label_map() #method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results # - Do not store PairedSource because it can be very big. results = { 'job_results': job_results, 'n': n, 'is_h0': is_h0, 'alpha': alpha, 'repeats': reps, 'tr_proportion': tr_proportion, 'method_job_funcs': method_job_funcs, 'prob_label': prob_label, } # class name fname = 'ex%d-%s-me%d_rs%d_a%.3f_trp%.2f.p' \ %(ex, prob_label, n_methods, reps, alpha, tr_proportion) glo.ex_save_result(ex, results, fname) logger.info('Saved aggregated results to %s' % fname)
def run_problem(prob_label): """Run the experiment""" # /////// submit jobs ////////// # create folder name string #result_folder = glo.result_folder() from kmod.config import expr_configs tmp_dir = expr_configs['scratch_path'] foldername = os.path.join(tmp_dir, 'kmod_slurm', 'e%d' % ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix="") # Use the following line if Slurm queue is not used. #engine = SerialComputationEngine() partitions = expr_configs['slurm_partitions'] if partitions is None: engine = SlurmComputationEngine(batch_parameters) else: engine = SlurmComputationEngine(batch_parameters, partition=partitions) n_methods = len(method_funcs) # problem setting ns, P, Q, ds, = get_ns_pqrsource(prob_label) # repetitions x len(ns) x #methods aggregators = np.empty((reps, len(ns), n_methods), dtype=object) for r in range(reps): for ni, n in enumerate(ns): for mi, f in enumerate(method_funcs): # name used to save the result func_name = f.__name__ fname = '%s-%s-n%d_r%d_a%.3f.p' \ %(prob_label, func_name, n, r, alpha,) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info('%s exists. Load and return.' % fname) job_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() sra.submit_result(SingleResult(job_result)) aggregators[r, ni, mi] = sra else: # result not exists or rerun job = Ex1Job(SingleResultAggregator(), P, Q, ds, prob_label, r, f, n) agg = engine.submit_job(job) aggregators[r, ni, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") job_results = np.empty((reps, len(ns), n_methods), dtype=object) for r in range(reps): for ni, n in enumerate(ns): for mi, f in enumerate(method_funcs): logger.info("Collecting result (%s, r=%d, n=%d)" % (f.__name__, r, n)) # let the aggregator finalize things aggregators[r, ni, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result job_result = aggregators[r, ni, mi].get_final_result().result job_results[r, ni, mi] = job_result #func_names = [f.__name__ for f in method_funcs] #func2labels = exglobal.get_func2label_map() #method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results results = { 'job_results': job_results, 'P': P, 'Q': Q, 'data_source': ds, 'alpha': alpha, 'repeats': reps, 'ns': ns, 'method_funcs': method_funcs, 'prob_label': prob_label, } # class name fname = 'ex%d-%s-me%d_rs%d_nmi%d_nma%d_a%.3f.p' \ %(ex, prob_label, n_methods, reps, min(ns), max(ns), alpha,) glo.ex_save_result(ex, results, fname) logger.info('Saved aggregated results to %s' % fname)
def run_problem(prob_label): """Run the experiment""" ns, p, ds = get_ns_pqsource(prob_label) # /////// submit jobs ////////// # create folder name string # result_folder = glo.result_folder() from sbibm.third_party.kgof.config import expr_configs tmp_dir = expr_configs["scratch_path"] foldername = os.path.join(tmp_dir, "kgof_slurm", "e%d" % ex) logger.info("Setting engine folder to %s" % foldername) # create parameter instance that is needed for any batch computation engine logger.info("Creating batch parameter instance") batch_parameters = BatchClusterParameters(foldername=foldername, job_name_base="e%d_" % ex, parameter_prefix="") # Use the following line if Slurm queue is not used. # engine = SerialComputationEngine() # engine = SlurmComputationEngine(batch_parameters, partition='wrkstn,compute') engine = SlurmComputationEngine(batch_parameters) n_methods = len(method_job_funcs) # repetitions x len(ns) x #methods aggregators = np.empty((reps, len(ns), n_methods), dtype=object) for r in range(reps): for ni, n in enumerate(ns): for mi, f in enumerate(method_job_funcs): # name used to save the result func_name = f.__name__ fname = "%s-%s-n%d_r%d_a%.3f_trp%.2f.p" % ( prob_label, func_name, n, r, alpha, tr_proportion, ) if not is_rerun and glo.ex_file_exists(ex, prob_label, fname): logger.info("%s exists. Load and return." % fname) job_result = glo.ex_load_result(ex, prob_label, fname) sra = SingleResultAggregator() sra.submit_result(SingleResult(job_result)) aggregators[r, ni, mi] = sra else: # result not exists or rerun # p: an UnnormalizedDensity object job = Ex1Job(SingleResultAggregator(), p, ds, prob_label, r, f, n) agg = engine.submit_job(job) aggregators[r, ni, mi] = agg # let the engine finish its business logger.info("Wait for all call in engine") engine.wait_for_all() # ////// collect the results /////////// logger.info("Collecting results") job_results = np.empty((reps, len(ns), n_methods), dtype=object) for r in range(reps): for ni, n in enumerate(ns): for mi, f in enumerate(method_job_funcs): logger.info("Collecting result (%s, r=%d, n=%rd)" % (f.__name__, r, n)) # let the aggregator finalize things aggregators[r, ni, mi].finalize() # aggregators[i].get_final_result() returns a SingleResult instance, # which we need to extract the actual result job_result = aggregators[r, ni, mi].get_final_result().result job_results[r, ni, mi] = job_result # func_names = [f.__name__ for f in method_job_funcs] # func2labels = exglobal.get_func2label_map() # method_labels = [func2labels[f] for f in func_names if f in func2labels] # save results results = { "job_results": job_results, "data_source": ds, "alpha": alpha, "repeats": reps, "ns": ns, "p": p, "tr_proportion": tr_proportion, "method_job_funcs": method_job_funcs, "prob_label": prob_label, } # class name fname = "ex%d-%s-me%d_rs%d_nmi%d_nma%d_a%.3f_trp%.2f.p" % ( ex, prob_label, n_methods, reps, min(ns), max(ns), alpha, tr_proportion, ) glo.ex_save_result(ex, results, fname) logger.info("Saved aggregated results to %s" % fname)
def compute(fname_base, job_generator, Ds, Ns, num_repetitions, num_steps, step_size, max_steps=None, compute_local=False): if not FileSystem.cmd_exists("sbatch") or compute_local: engine = SerialComputationEngine() else: johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute" folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", fname_base]) batch_parameters = BatchClusterParameters( foldername=folder, resubmit_on_timeout=False, parameter_prefix=johns_slurm_hack) engine = SlurmComputationEngine(batch_parameters, check_interval=1, do_clean_up=True) engine.max_jobs_in_queue = 1000 engine.store_fire_and_forget = True # fixed order of aggregators aggregators = [] for D in Ds: for N in Ns: for j in range(num_repetitions): logger.info("%s trajectory, D=%d/%d, N=%d/%d repetition %d/%d" % \ (str(job_generator), D, np.max(Ds), N, np.max(Ns), j + 1, num_repetitions)) job = job_generator(D, N, N) aggregators += [engine.submit_job(job)] time.sleep(0.1) # block until all done engine.wait_for_all() avg_accept = np.zeros((num_repetitions, len(Ds), len(Ns))) avg_accept_est = np.zeros((num_repetitions, len(Ds), len(Ns))) log_dets = np.zeros((num_repetitions, len(Ds), len(Ns))) log_dets_est = np.zeros((num_repetitions, len(Ds), len(Ns))) avg_steps_taken = np.zeros((num_repetitions, len(Ds), len(Ns))) agg_counter = 0 for i in range(len(Ds)): for k in range(len(Ns)): for j in range(num_repetitions): agg = aggregators[agg_counter] agg_counter += 1 agg.finalize() result = agg.get_final_result() agg.clean_up() avg_accept[j, i, k] = result.acc_mean avg_accept_est[j, i, k] = result.acc_est_mean log_dets[j, i, k] = result.vol log_dets_est[j, i, k] = result.vol_est avg_steps_taken[j, i, k] = result.steps_taken with open(fname_base + ".csv", 'a+') as f: line = np.array([ Ds[i], Ns[k], avg_accept[j, i, k], avg_accept_est[j, i, k], log_dets[j, i, k], log_dets_est[j, i, k], avg_steps_taken[j, i, k], ]) f.write(" ".join(map(str, line)) + os.linesep)