def init(app, simulation_db): """Convert route map to dispatchable callables Initializes `_uri_to_route` and adds a single flask route (`_dispatch`) to dispatch based on the map. Args: app (Flask): flask app """ if _uri_to_route: return from sirepo import feature_config for n in _REQUIRED_MODULES + tuple(sorted( feature_config.cfg().api_modules)): register_api_module(importlib.import_module('sirepo.' + n)) _init_uris(app, simulation_db, feature_config.cfg().sim_types) sirepo.http_request.init(simulation_db=simulation_db, ) sirepo.http_reply.init(simulation_db=simulation_db, ) sirepo.uri.init( http_reply=sirepo.http_reply, http_request=sirepo.http_request, simulation_db=simulation_db, uri_router=pkinspect.this_module(), )
def _init(): def _new(e): def _decorator(func): setattr(func, ATTR, e) return func return _decorator m = pkinspect.this_module() for e in iter(APIPerm): setattr(m, e.name.lower(), _new(e))
def init(): global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS, job_driver if _DB_DIR: return job.init() from sirepo import job_driver job_driver.init(pkinspect.this_module()) _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR) cfg = pkconfig.init( job_cache_secs=(300, int, 'when to re-read job state from disk'), max_hours=dict( analysis=(.04, float, 'maximum run-time for analysis job',), parallel=(1, float, 'maximum run-time for parallel job (except sbatch)'), parallel_premium=(2, float, 'maximum run-time for parallel job for premium user (except sbatch)'), sequential=(.1, float, 'maximum run-time for sequential job'), ), purge_free_after_days=(1000, int, 'how many days to wait before purging a free users simulation'), purge_free_start=('02:00:00', str, 'time to first start purging free users simulations (%H:%M:%S)'), purge_free_period=('01:00:00', str, 'how often to purge free users simulations after start time (%H:%M:%S)'), sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'), ) _NEXT_REQUEST_SECONDS = PKDict({ job.PARALLEL: 2, job.SBATCH: cfg.sbatch_poll_secs, job.SEQUENTIAL: 1, }) sirepo.auth_db.init(sirepo.srdb.root(), migrate_db_file=False) if sirepo.simulation_db.user_dir_name().exists(): if not _DB_DIR.exists(): pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR) import subprocess subprocess.check_call( ( 'pyenv', 'exec', 'sirepo', 'db', 'upgrade_runner_to_job_db', _DB_DIR, ), env=PKDict(os.environ).pkupdate( PYENV_VERSION='py2', SIREPO_AUTH_LOGGED_IN_USER='******', ), ) else: pykern.pkio.mkdir_parent(_DB_DIR) tornado.ioloop.IOLoop.current().add_callback( _ComputeJob.purge_free_simulations, init=True, )
def init(job_supervisor_module): global cfg, _CLASSES, _DEFAULT_CLASS, job_supervisor assert not cfg job_supervisor = job_supervisor_module cfg = pkconfig.init(modules=((_DEFAULT_MODULE, ), set, 'available job driver modules'), ) _CLASSES = PKDict() p = pkinspect.this_module().__name__ for n in cfg.modules: m = importlib.import_module(pkinspect.module_name_join((p, n))) _CLASSES[n] = m.CLASS.init_class(job_supervisor) _DEFAULT_CLASS = _CLASSES.get('docker') or _CLASSES.get(_DEFAULT_MODULE) pkdlog('modules={}', sorted(_CLASSES.keys()))
def init(): global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS, job_driver if _DB_DIR: return job.init() from sirepo import job_driver job_driver.init(pkinspect.this_module()) _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR) cfg = pkconfig.init( job_cache_secs=(300, int, 'when to re-read job state from disk'), max_secs=dict( analysis=(144, pkconfig.parse_seconds, 'maximum run-time for analysis job',), parallel=(3600, pkconfig.parse_seconds, 'maximum run-time for parallel job (except sbatch)'), parallel_premium=(3600*2, pkconfig.parse_seconds, 'maximum run-time for parallel job for premium user (except sbatch)'), sequential=(360, pkconfig.parse_seconds, 'maximum run-time for sequential job'), ), purge_non_premium_after_secs=(0, pkconfig.parse_seconds, 'how long to wait before purging non-premium users simulations'), purge_non_premium_task_secs=(None, pkconfig.parse_seconds, 'when to clean up simulation runs of non-premium users (%H:%M:%S)'), sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'), ) _NEXT_REQUEST_SECONDS = PKDict({ job.PARALLEL: 2, job.SBATCH: cfg.sbatch_poll_secs, job.SEQUENTIAL: 1, }) sirepo.auth_db.init() if sirepo.simulation_db.user_path().exists(): if not _DB_DIR.exists(): pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR) import subprocess subprocess.check_call( ( 'pyenv', 'exec', 'sirepo', 'db', 'upgrade_runner_to_job_db', _DB_DIR, ), env=PKDict(os.environ).pkupdate( SIREPO_AUTH_LOGGED_IN_USER='******', ), ) else: pykern.pkio.mkdir_parent(_DB_DIR) tornado.ioloop.IOLoop.current().add_callback( _ComputeJob.purge_free_simulations, )
def _init_full(): global visible_methods, valid_methods, non_guest_methods auth_db.init() p = pkinspect.this_module().__name__ visible_methods = [] valid_methods = cfg.methods.union(cfg.deprecated_methods) for n in valid_methods: m = importlib.import_module(pkinspect.module_name_join((p, n))) _METHOD_MODULES[n] = m if m.AUTH_METHOD_VISIBLE and n in cfg.methods: visible_methods.append(n) visible_methods = tuple(sorted(visible_methods)) non_guest_methods = tuple(m for m in visible_methods if m != METHOD_GUEST) cookie.auth_hook_from_header = _auth_hook_from_header
def init(): global cfg, _DB_DIR, _NEXT_REQUEST_SECONDS, job_driver if cfg: return job.init() from sirepo import job_driver job_driver.init(pkinspect.this_module()) cfg = pkconfig.init( job_cache_secs=(300, int, 'when to re-read job state from disk'), max_secs=dict( analysis=( 144, pkconfig.parse_seconds, 'maximum run-time for analysis job', ), parallel=(3600, pkconfig.parse_seconds, 'maximum run-time for parallel job (except sbatch)'), parallel_premium= (3600 * 2, pkconfig.parse_seconds, 'maximum run-time for parallel job for premium user (except sbatch)' ), sequential=(360, pkconfig.parse_seconds, 'maximum run-time for sequential job'), ), purge_non_premium_after_secs=( 0, pkconfig.parse_seconds, 'how long to wait before purging non-premium users simulations'), purge_non_premium_task_secs=( None, pkconfig.parse_seconds, 'when to clean up simulation runs of non-premium users (%H:%M:%S)' ), sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'), ) _DB_DIR = sirepo.srdb.supervisor_dir() _NEXT_REQUEST_SECONDS = PKDict({ job.PARALLEL: 2, job.SBATCH: cfg.sbatch_poll_secs, job.SEQUENTIAL: 1, }) sirepo.auth_db.init() tornado.ioloop.IOLoop.current().add_callback( _ComputeJob.purge_free_simulations, )
def init_apis(*args, **kwargs): global uri_router, simulation_db, visible_methods, valid_methods, non_guest_methods assert not _METHOD_MODULES assert not cfg.logged_in_user, \ 'Do not set $SIREPO_AUTH_LOGGED_IN_USER in server' uri_router = importlib.import_module('sirepo.uri_router') simulation_db = importlib.import_module('sirepo.simulation_db') auth_db.init() p = pkinspect.this_module().__name__ visible_methods = [] valid_methods = cfg.methods.union(cfg.deprecated_methods) for n in valid_methods: m = importlib.import_module(pkinspect.module_name_join((p, n))) uri_router.register_api_module(m) _METHOD_MODULES[n] = m if m.AUTH_METHOD_VISIBLE and n in cfg.methods: visible_methods.append(n) visible_methods = tuple(sorted(visible_methods)) non_guest_methods = tuple(m for m in visible_methods if m != METHOD_GUEST) cookie.auth_hook_from_header = _auth_hook_from_header
def init_apis(app, *args, **kwargs): global uri_router, simulation_db, _app, cfg assert not _METHOD_MODULES cfg = pkconfig.init( methods=((_METHOD_GUEST, ), tuple, 'for logging in'), deprecated_methods=(tuple(), tuple, 'for migrating to methods'), ) uri_router = importlib.import_module('sirepo.uri_router') simulation_db = importlib.import_module('sirepo.simulation_db') auth_db.init(app) _app = app this_module = pkinspect.this_module() p = this_module.__name__ valid_methods.extend(cfg.methods + cfg.deprecated_methods) for n in valid_methods: m = importlib.import_module(pkinspect.module_name_join((p, n))) uri_router.register_api_module(m) _METHOD_MODULES[n] = m if m.AUTH_METHOD_VISIBLE and n in cfg.methods: visible_methods.append(n) setattr(this_module, n, m) cookie.auth_hook_from_header = _auth_hook_from_header
def init_apis(*args, **kwargs): global uri_router, simulation_db, visible_methods, valid_methods, non_guest_methods assert not _METHOD_MODULES assert not cfg.logged_in_user, \ 'Do not set $SIREPO_AUTH_LOGGED_IN_USER in server' uri_router = importlib.import_module('sirepo.uri_router') simulation_db = importlib.import_module('sirepo.simulation_db') auth_db.init() p = pkinspect.this_module().__name__ visible_methods = [] valid_methods = cfg.methods.union(cfg.deprecated_methods) for n in valid_methods: m = importlib.import_module(pkinspect.module_name_join((p, n))) uri_router.register_api_module(m) _METHOD_MODULES[n] = m if m.AUTH_METHOD_VISIBLE and n in cfg.methods: visible_methods.append(n) visible_methods = tuple(sorted(visible_methods)) non_guest_methods = tuple(m for m in visible_methods if m != METHOD_GUEST) cookie.auth_hook_from_header = _auth_hook_from_header s = list(simulation_db.SCHEMA_COMMON.common.constants.paymentPlans.keys()) assert sorted(s) == sorted(_ALL_PAYMENT_PLANS), \ f'payment plans from SCHEMA_COMMON={s} not equal to _ALL_PAYMENT_PLANS={_ALL_PAYMENT_PLANS}'
import sirepo.util import sqlalchemy AUTH_METHOD = 'email' #: User can see it AUTH_METHOD_VISIBLE = True #: Used by auth_db AuthEmailUser = None #: Well known alias for auth UserModel = None #: module handle this_module = pkinspect.this_module() #: how long before token expires _EXPIRES_MINUTES = 8 * 60 #: for adding to now _EXPIRES_DELTA = datetime.timedelta(minutes=_EXPIRES_MINUTES) @api_perm.allow_cookieless_set_user def api_authEmailAuthorized(simulation_type, token): """Clicked by user in an email Token must exist in db and not be expired. """ if http_request.is_spider():
def all_modules(): from pykern import pkconfig from pykern import pkinspect return pkconfig.all_modules_in_load_path(pkinspect.this_module())