Esempio n. 1
0
def init():
    global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS, job_driver
    if _DB_DIR:
        return
    job.init()
    from sirepo import job_driver

    job_driver.init(pkinspect.this_module())
    _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR)
    cfg = pkconfig.init(
        job_cache_secs=(300, int, 'when to re-read job state from disk'),
        max_hours=dict(
            analysis=(.04, float, 'maximum run-time for analysis job',),
            parallel=(1, float, 'maximum run-time for parallel job (except sbatch)'),
            parallel_premium=(2, float, 'maximum run-time for parallel job for premium user (except sbatch)'),
            sequential=(.1, float, 'maximum run-time for sequential job'),
        ),
        purge_free_after_days=(1000, int, 'how many days to wait before purging a free users simulation'),
        purge_free_start=('02:00:00', str, 'time to first start purging free users simulations (%H:%M:%S)'),
        purge_free_period=('01:00:00', str, 'how often to purge free users simulations after start time (%H:%M:%S)'),
        sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'),
    )
    _NEXT_REQUEST_SECONDS = PKDict({
        job.PARALLEL: 2,
        job.SBATCH: cfg.sbatch_poll_secs,
        job.SEQUENTIAL: 1,
    })
    sirepo.auth_db.init(sirepo.srdb.root(), migrate_db_file=False)
    if sirepo.simulation_db.user_dir_name().exists():
        if not _DB_DIR.exists():
            pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR)
            import subprocess
            subprocess.check_call(
                (
                    'pyenv',
                    'exec',
                    'sirepo',
                    'db',
                    'upgrade_runner_to_job_db',
                    _DB_DIR,
                ),
                env=PKDict(os.environ).pkupdate(
                    PYENV_VERSION='py2',
                    SIREPO_AUTH_LOGGED_IN_USER='******',
                ),
            )
    else:
        pykern.pkio.mkdir_parent(_DB_DIR)
    tornado.ioloop.IOLoop.current().add_callback(
        _ComputeJob.purge_free_simulations,
        init=True,
    )
Esempio n. 2
0
def init():
    global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS, job_driver
    if _DB_DIR:
        return
    job.init()
    from sirepo import job_driver

    job_driver.init(pkinspect.this_module())
    _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR)
    cfg = pkconfig.init(
        job_cache_secs=(300, int, 'when to re-read job state from disk'),
        max_secs=dict(
            analysis=(144, pkconfig.parse_seconds, 'maximum run-time for analysis job',),
            parallel=(3600, pkconfig.parse_seconds, 'maximum run-time for parallel job (except sbatch)'),
            parallel_premium=(3600*2, pkconfig.parse_seconds, 'maximum run-time for parallel job for premium user (except sbatch)'),
            sequential=(360, pkconfig.parse_seconds, 'maximum run-time for sequential job'),
        ),
        purge_non_premium_after_secs=(0, pkconfig.parse_seconds, 'how long to wait before purging non-premium users simulations'),
        purge_non_premium_task_secs=(None, pkconfig.parse_seconds, 'when to clean up simulation runs of non-premium users (%H:%M:%S)'),
        sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'),
    )
    _NEXT_REQUEST_SECONDS = PKDict({
        job.PARALLEL: 2,
        job.SBATCH: cfg.sbatch_poll_secs,
        job.SEQUENTIAL: 1,
    })
    sirepo.auth_db.init()
    if sirepo.simulation_db.user_path().exists():
        if not _DB_DIR.exists():
            pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR)
            import subprocess
            subprocess.check_call(
                (
                    'pyenv',
                    'exec',
                    'sirepo',
                    'db',
                    'upgrade_runner_to_job_db',
                    _DB_DIR,
                ),
                env=PKDict(os.environ).pkupdate(
                    SIREPO_AUTH_LOGGED_IN_USER='******',
                ),
            )
    else:
        pykern.pkio.mkdir_parent(_DB_DIR)
    tornado.ioloop.IOLoop.current().add_callback(
        _ComputeJob.purge_free_simulations,
    )
Esempio n. 3
0
def init():
    global cfg, _DB_DIR, _NEXT_REQUEST_SECONDS, job_driver

    if cfg:
        return
    job.init()
    from sirepo import job_driver

    job_driver.init(pkinspect.this_module())
    cfg = pkconfig.init(
        job_cache_secs=(300, int, 'when to re-read job state from disk'),
        max_secs=dict(
            analysis=(
                144,
                pkconfig.parse_seconds,
                'maximum run-time for analysis job',
            ),
            parallel=(3600, pkconfig.parse_seconds,
                      'maximum run-time for parallel job (except sbatch)'),
            parallel_premium=
            (3600 * 2, pkconfig.parse_seconds,
             'maximum run-time for parallel job for premium user (except sbatch)'
             ),
            sequential=(360, pkconfig.parse_seconds,
                        'maximum run-time for sequential job'),
        ),
        purge_non_premium_after_secs=(
            0, pkconfig.parse_seconds,
            'how long to wait before purging non-premium users simulations'),
        purge_non_premium_task_secs=(
            None, pkconfig.parse_seconds,
            'when to clean up simulation runs of non-premium users (%H:%M:%S)'
        ),
        sbatch_poll_secs=(15, int,
                          'how often to poll squeue and parallel status'),
    )
    _DB_DIR = sirepo.srdb.supervisor_dir()
    _NEXT_REQUEST_SECONDS = PKDict({
        job.PARALLEL: 2,
        job.SBATCH: cfg.sbatch_poll_secs,
        job.SEQUENTIAL: 1,
    })
    sirepo.auth_db.init()
    tornado.ioloop.IOLoop.current().add_callback(
        _ComputeJob.purge_free_simulations, )
Esempio n. 4
0
def init():
    global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS
    if _DB_DIR:
        return
    job.init()
    job_driver.init()
    _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR)
    cfg = pkconfig.init(
        parallel=dict(
            max_hours=(1, float,
                       'maximum run-time for parallel job (except sbatch)'), ),
        sbatch_poll_secs=(60, int,
                          'how often to poll squeue and parallel status'),
        sequential=dict(max_hours=(.1, float,
                                   'maximum run-time for sequential job'), ),
    )
    for k in job.KINDS:
        _MAX_RUN_SECS[k] = int(cfg[k].max_hours * 3600)
    _NEXT_REQUEST_SECONDS = PKDict({
        job.PARALLEL: 2,
        job.SBATCH: cfg.sbatch_poll_secs,
        job.SEQUENTIAL: 1,
    })
    if sirepo.simulation_db.user_dir_name().exists():
        if not _DB_DIR.exists():
            pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR)
            import subprocess
            subprocess.check_call(
                (
                    'pyenv',
                    'exec',
                    'sirepo',
                    'db',
                    'upgrade_runner_to_job_db',
                    _DB_DIR,
                ),
                env=PKDict(os.environ).pkupdate(
                    PYENV_VERSION='py2',
                    SIREPO_AUTH_LOGGED_IN_USER='******',
                ),
            )
    else:
        pykern.pkio.mkdir_parent(_DB_DIR)