Пример #1
0
                num_warmup, thin_step)

    # marginal sampler
    job.recompute_log_pdf = True

    job.walltime = 60 * 60

    # store results in home dir straight away
    d = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1]) + os.sep
    job.aggregator = MCMCJobResultAggregatorStoreHome(d)

    return job


if __name__ == "__main__":
    logger.setLevel(10)
    num_repetitions = 10

    # plain MCMC parameters, plan is to use every 200th sample
    thin_step = 1
    num_iterations = 5200
    num_warmup = 200

    compute_local = False

    if not FileSystem.cmd_exists("sbatch") or compute_local:
        engine = SerialComputationEngine()

    else:
        johns_slurm_hack = "#SBATCH --partition=intel-ivy,wrkstn,compute"
        folder = os.sep + os.sep.join(["nfs", "data3", "ucabhst", modulename])
Пример #2
0
import os

from kmc.tools.Log import logger
import numpy as np
from scripts.experiments.trajectories.independent_jobs_classes.random_feats.StudentTrajectoryJob import StudentTrajectoryJob
from scripts.experiments.trajectories.tools import process

modulename = __file__.split(os.sep)[-1].split('.')[-2]

if __name__ == "__main__":
    logger.setLevel(20)
    nu_q = 1.
    sigma_p = 1.
    Ds = np.sort(2**np.arange(8))[::-1]
    Ns = np.sort(
        [50, 100, 200, 500, 1000, 2000, 3000, 4000, 5000, 10000, 20000])[::-1]

    print(Ns)
    print(Ds)
    num_repetitions = 10
    num_steps = 100
    max_steps = 1000
    step_size = .1

    scale0 = 0.5
    lmbda0 = 0.00008

    job_generator = lambda D, N, m: StudentTrajectoryJob(N,
                                                         D,
                                                         m,
                                                         nu_q,