Beispiel #1
0
    compute_local = False

    if not FileSystem.cmd_exists("sbatch") or compute_local:
        engine = SerialComputationEngine()

    else:
        johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute"
        folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename])
        batch_parameters = BatchClusterParameters(
            foldername=folder,
            resubmit_on_timeout=False,
            parameter_prefix=johns_slurm_hack)
        engine = SlurmComputationEngine(batch_parameters,
                                        check_interval=1,
                                        do_clean_up=True)
        engine.max_jobs_in_queue = 1000
        engine.store_fire_and_forget = True

    aggs = []

    for i in range(num_repetitions):
        job = rw_generator_isotropic(num_warmup, thin_step)
        logger.info("Repetition %d/%d, %s" %
                    (i + 1, num_repetitions, job.get_parameter_fname_suffix()))
        aggs += [engine.submit_job(job)]

    engine.wait_for_all()

    for i, agg in enumerate(aggs):
        agg.finalize()
        result = agg.get_final_result()
Beispiel #2
0
def compute(fname_base,
            job_generator,
            Ds,
            Ns,
            num_repetitions,
            num_steps,
            step_size,
            max_steps=None,
            compute_local=False):
    if not FileSystem.cmd_exists("sbatch") or compute_local:
        engine = SerialComputationEngine()

    else:
        johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute"
        folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", fname_base])
        batch_parameters = BatchClusterParameters(
            foldername=folder,
            resubmit_on_timeout=False,
            parameter_prefix=johns_slurm_hack)
        engine = SlurmComputationEngine(batch_parameters,
                                        check_interval=1,
                                        do_clean_up=True)
        engine.max_jobs_in_queue = 1000
        engine.store_fire_and_forget = True

    # fixed order of aggregators
    aggregators = []
    for D in Ds:
        for N in Ns:
            for j in range(num_repetitions):
                logger.info("%s trajectory, D=%d/%d, N=%d/%d repetition %d/%d" % \
                            (str(job_generator), D, np.max(Ds), N, np.max(Ns), j + 1, num_repetitions))
                job = job_generator(D, N, N)
                aggregators += [engine.submit_job(job)]
                time.sleep(0.1)

    # block until all done
    engine.wait_for_all()

    avg_accept = np.zeros((num_repetitions, len(Ds), len(Ns)))
    avg_accept_est = np.zeros((num_repetitions, len(Ds), len(Ns)))
    log_dets = np.zeros((num_repetitions, len(Ds), len(Ns)))
    log_dets_est = np.zeros((num_repetitions, len(Ds), len(Ns)))
    avg_steps_taken = np.zeros((num_repetitions, len(Ds), len(Ns)))

    agg_counter = 0
    for i in range(len(Ds)):
        for k in range(len(Ns)):
            for j in range(num_repetitions):
                agg = aggregators[agg_counter]
                agg_counter += 1
                agg.finalize()
                result = agg.get_final_result()
                agg.clean_up()

                avg_accept[j, i, k] = result.acc_mean
                avg_accept_est[j, i, k] = result.acc_est_mean
                log_dets[j, i, k] = result.vol
                log_dets_est[j, i, k] = result.vol_est
                avg_steps_taken[j, i, k] = result.steps_taken

                with open(fname_base + ".csv", 'a+') as f:
                    line = np.array([
                        Ds[i],
                        Ns[k],
                        avg_accept[j, i, k],
                        avg_accept_est[j, i, k],
                        log_dets[j, i, k],
                        log_dets_est[j, i, k],
                        avg_steps_taken[j, i, k],
                    ])

                    f.write(" ".join(map(str, line)) + os.linesep)