예제 #1
0
def default_command():
    global cfg

    cfg = pkconfig.init(
        debug=(pkconfig.channel_in('dev'), bool, 'run supervisor in debug mode'),
        ip=(sirepo.job.DEFAULT_IP, str, 'ip to listen on'),
        port=(sirepo.job.DEFAULT_PORT, int, 'what port to listen on'),
    )
    sirepo.job_supervisor.init()
    pkio.mkdir_parent(sirepo.job.DATA_FILE_ROOT)
    pkio.mkdir_parent(sirepo.job.LIB_FILE_ROOT)
    app = tornado.web.Application(
        [
            (sirepo.job.AGENT_URI, _AgentMsg),
            (sirepo.job.SERVER_URI, _ServerReq),
            (sirepo.job.SERVER_PING_URI, _ServerPing),
            (sirepo.job.DATA_FILE_URI + '/(.*)', _DataFileReq),
        ],
        debug=cfg.debug,
        static_path=sirepo.job.SUPERVISOR_SRV_ROOT.join(sirepo.job.LIB_FILE_URI),
        # tornado expects a trailing slash
        static_url_prefix=sirepo.job.LIB_FILE_URI + '/',
    )
    server = tornado.httpserver.HTTPServer(app, xheaders=True)
    server.listen(cfg.port, cfg.ip)
    signal.signal(signal.SIGTERM, _sigterm)
    signal.signal(signal.SIGINT, _sigterm)
    pkdlog('ip={} port={}', cfg.ip, cfg.port)
    tornado.ioloop.IOLoop.current().start()
예제 #2
0
파일: docker.py 프로젝트: JiayangY/sirepo
def init_class():
    global cfg

    cfg = pkconfig.init(
        dev_volumes=(pkconfig.channel_in('dev'), bool,
                     'mount ~/.pyenv, ~/.local and ~/src for development'),
        hosts=pkconfig.RequiredUnlessDev(tuple(), tuple, 'execution hosts'),
        image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
        parallel=dict(
            cores=(2, int, 'cores per parallel job'),
            gigabytes=(1, int, 'gigabytes per parallel job'),
            slots_per_host=(1, int, 'parallel slots per node'),
        ),
        sequential=dict(
            gigabytes=(1, int, 'gigabytes per sequential job'),
            slots_per_host=(1, int, 'sequential slots per node'),
        ),
        supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL,
        tls_dir=pkconfig.RequiredUnlessDev(None, _cfg_tls_dir,
                                           'directory containing host certs'),
    )
    if not cfg.tls_dir or not cfg.hosts:
        _init_dev_hosts()
    _init_hosts()
    return DockerDriver.init_class()
예제 #3
0
def init():
    global cfg

    if cfg:
        return

    cfg = pkconfig.init(
        max_message_bytes=(int(2e8), pkconfig.parse_bytes,
                           'maximum message size throughout system'),
        ping_interval_secs=(
            2 * 60, pkconfig.parse_seconds,
            'how long to wait between sending keep alive pings'),
        ping_timeout_secs=(4 * 60, pkconfig.parse_seconds,
                           'how long to wait for a ping response'),
        server_secret=(
            'a very secret, secret',
            str,
            'shared secret between supervisor and server',
        ),
        verify_tls=(not pkconfig.channel_in('dev'), bool,
                    'do not validate (self-signed) certs'),
    )
    global SUPERVISOR_SRV_ROOT, LIB_FILE_ROOT, DATA_FILE_ROOT

    SUPERVISOR_SRV_ROOT = sirepo.srdb.root().join(SUPERVISOR_SRV_SUBDIR)
    LIB_FILE_ROOT = SUPERVISOR_SRV_ROOT.join(LIB_FILE_URI[1:])
    DATA_FILE_ROOT = SUPERVISOR_SRV_ROOT.join(DATA_FILE_URI[1:])
예제 #4
0
파일: docker.py 프로젝트: mkeilman/sirepo
 def init_class(cls, job_supervisor):
     cls.cfg = pkconfig.init(
         agent_starting_secs=(
             cls._AGENT_STARTING_SECS_DEFAULT + 3,
             int,
             'how long to wait for agent start',
         ),
         constrain_resources=(True, bool,
                              'apply --cpus and --memory constraints'),
         dev_volumes=(pkconfig.channel_in('dev'), bool,
                      'mount ~/.pyenv, ~/.local and ~/src for development'),
         hosts=pkconfig.RequiredUnlessDev(tuple(), tuple,
                                          'execution hosts'),
         idle_check_secs=pkconfig.ReplacedBy(
             'sirepo.job_driver.idle_check_secs'),
         image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
         parallel=dict(
             cores=(2, int, 'cores per parallel job'),
             gigabytes=(1, int, 'gigabytes per parallel job'),
             slots_per_host=(1, int, 'parallel slots per node'),
         ),
         sequential=dict(
             gigabytes=(1, int, 'gigabytes per sequential job'),
             slots_per_host=(1, int, 'sequential slots per node'),
         ),
         supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL,
         tls_dir=pkconfig.RequiredUnlessDev(
             None, _cfg_tls_dir, 'directory containing host certs'),
     )
     if not cls.cfg.tls_dir or not cls.cfg.hosts:
         cls._init_dev_hosts()
     cls._init_hosts(job_supervisor)
     return cls
예제 #5
0
def _cfg():
    import netrc

    global cfg
    n = None
    p = pkcollections.Dict(
        api_pause_seconds=(
            0 if pkconfig.channel_in('dev') else 10,
            int,
            'pauses between backups',
        ),
        exclude_re=(None, _cfg_exclude_re,
                    'regular expression to exclude a repo'),
        keep_days=(
            _cfg_keep_days(2),
            _cfg_keep_days,
            'how many days of backups to keep',
        ),
        password=[None, str, 'github passsword'],
        test_mode=(
            pkconfig.channel_in('dev'),
            pkconfig.parse_bool,
            'only backs up test-github-backup repo',
        ),
        user=[None, str, 'github user'],
    )
    try:
        n = netrc.netrc().authenticators('github.com')
        for i, k in (0, 'user'), (2, 'password'):
            p[k][0] = n[i]
    except Exception:
        pass
    cfg = pkconfig.init(**p)
    assert cfg.test_mode or cfg.password is not None and cfg.user is not None, \
        'user and password required unless test_mode'
예제 #6
0
파일: github.py 프로젝트: cchall/sirepo
def init_apis(*args, **kwargs):
    def _init_model(base):
        """Creates User class bound to dynamic `db` variable"""
        global AuthGithubUser, UserModel

        class AuthGithubUser(base):
            __tablename__ = 'auth_github_user_t'
            oauth_id = sqlalchemy.Column(base.STRING_NAME, primary_key=True)
            user_name = sqlalchemy.Column(base.STRING_NAME,
                                          unique=True,
                                          nullable=False)
            uid = sqlalchemy.Column(base.STRING_ID, unique=True)

        UserModel = AuthGithubUser

    global cfg, AUTH_METHOD_VISIBLE
    cfg = pkconfig.init(
        callback_uri=(
            None, str,
            'Github callback URI (defaults to api_authGithubAuthorized)'),
        key=pkconfig.Required(str, 'Github key'),
        method_visible=(
            True,
            bool,
            'github auth method is visible to users when it is an enabled method',
        ),
        secret=pkconfig.Required(str, 'Github secret'),
    )
    AUTH_METHOD_VISIBLE = cfg.method_visible
    auth_db.init_model(_init_model)
예제 #7
0
def init_apis(*args, **kwargs):
    global cfg

    cfg = pkconfig.init(
        dst_db_root=(
            pkio.py_path(sirepo.srdb.root()).join('jupyterhub'),
            pkio.py_path,
            'new jupyter user db',
        ),
        rs_jupyter_migrate=(
            False, bool,
            'give user option to migrate data from jupyter.radiasoft.org'),
        src_db_root=(
            pkio.py_path('/var/empty'),
            pkio.py_path,
            'existing jupyter user db (ex /srv/jupyterhub)',
        ),
        uri_root=('jupyter', str, 'the root uri of jupyterhub'),
    )
    sirepo.auth_db.init_model(_init_model)
    sirepo.events.register({
        'auth_logout': _event_auth_logout,
        'end_api_call': _event_end_api_call,
        'github_authorized': _event_github_authorized,
    })
예제 #8
0
파일: srdb.py 프로젝트: cchall/sirepo
def _init_root():
    global _cfg, _root

    def _cfg_root(v):
        """Config value or root package's parent or cwd with `_DEFAULT_ROOT`"""
        if not os.path.isabs(v):
            pkconfig.raise_error(f'{v}: SIREPO_SRDB_ROOT must be absolute')
        if not os.path.isdir(v):
            pkconfig.raise_error(f'{v}: SIREPO_SRDB_ROOT must be a directory and exist')
        return pkio.py_path(v)

    _cfg = pkconfig.init(
        root=(None, _cfg_root, 'where database resides'),
    )
    _root = _cfg.root
    if _root:
        return _root
    assert pkconfig.channel_in('dev'), \
        'SIREPO_SRDB_ROOT must be configured except in dev'
    r = pkio.py_path(
        sys.modules[pkinspect.root_package(_init_root)].__file__,
    ).dirpath().dirpath()
    # Check to see if we are in our dev directory. This is a hack,
    # but should be reliable.
    if not r.join('requirements.txt').check():
        # Don't run from an install directory
        r = pkio.py_path('.')
    _root = pkio.mkdir_parent(r.join(_DEFAULT_ROOT))
    return _root
예제 #9
0
 def init_class(cls, job_supervisor_module):
     global job_supervisor
     job_supervisor = job_supervisor_module
     cls.cfg = pkconfig.init(
         agent_log_read_sleep=(
             5,
             int,
             'how long to wait before reading the agent log on start',
         ),
         agent_starting_secs=(
             cls._AGENT_STARTING_SECS_DEFAULT * 3,
             int,
             'how long to wait for agent start',
         ),
         cores=(None, int, 'dev cores config'),
         host=pkconfig.Required(str, 'host name for slum controller'),
         host_key=pkconfig.Required(str, 'host key'),
         run_slots=(1, int, 'number of concurrent OP_RUN for each user'),
         shifter_image=(None, str, 'needed if using Shifter'),
         sirepo_cmd=pkconfig.Required(str, 'how to run sirepo'),
         srdb_root=pkconfig.Required(
             _cfg_srdb_root,
             'where to run job_agent, must include {sbatch_user}'),
         supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL,
     )
     cls._KNOWN_HOSTS = (
         cls.cfg.host_key if cls.cfg.host in cls.cfg.host_key else
         '{} {}'.format(cls.cfg.host, cls.cfg.host_key)).encode('ascii')
     return cls
예제 #10
0
파일: basic.py 프로젝트: JiayangY/sirepo
def init_apis(*args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        uid=pkconfig.Required(_cfg_uid,
                              'single user allowed to login with basic auth'),
        password=pkconfig.Required(str, 'password for uid'),
    )
예제 #11
0
def _init():
    import sirepo.mpi

    global SCHEMA_COMMON, cfg, JOB_RUN_MODE_MAP
    cfg = pkconfig.init(
        nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'),
        nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'),
        sbatch_display=(None, str, 'how to display sbatch cluster to user'),
        tmp_dir=(None, pkio.py_path, 'Used by utilities (not regular config)'),
    )
    fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX))
    with open(str(fn)) as f:
        SCHEMA_COMMON = json_load(f)
    # In development, any schema update creates a new version
    if pkconfig.channel_in('dev'):
        SCHEMA_COMMON.version = max([
            _timestamp(pkio.py_path(fn).mtime()) \
            for fn in glob.glob(str(STATIC_FOLDER.join('json/*{}'.format(JSON_SUFFIX))))
        ])
    else:
        SCHEMA_COMMON.version = sirepo.__version__
    JOB_RUN_MODE_MAP = PKDict(
        sequential='Serial',
        parallel='{} cores (SMP)'.format(sirepo.mpi.cfg.cores),
    )
    if cfg.sbatch_display:
        JOB_RUN_MODE_MAP.sbatch = cfg.sbatch_display
예제 #12
0
파일: docker.py 프로젝트: njsmith/sirepo
def init_class(app, uwsgi):
    global cfg, _hosts, _parallel_cores

    if _hosts:
        return

    cfg = pkconfig.init(
        hosts=(None, _cfg_hosts, 'execution hosts'),
        image=('radiasoft/sirepo', str, 'docker image to run all jobs'),
        tls_dir=(None, _cfg_tls_dir, 'directory containing host certs'),
    )
    if not cfg.tls_dir or not cfg.hosts:
        _init_dev_hosts(app)
    _hosts = pkcollections.Dict()
    _parallel_cores = mpi.cfg.cores
    # Require at least three levels to the domain name
    # just to make the directory parsing easier.
    for h in cfg.hosts:
        d = cfg.tls_dir.join(h)
        _hosts[h] = pkcollections.Dict(name=h, cmd_prefix=_cmd_prefix(h, d))
        _init_host(h)
    assert len(_hosts) > 0, \
        '{}: no docker hosts found in directory'.format(_tls_d)
    _init_hosts_slots_balance()
    _init_slots()
    _init_parse_jobs()
    _init_slot_managers()
    return DockerJob
예제 #13
0
파일: job_agent.py 프로젝트: cchall/sirepo
def start():
    #TODO(robnagler) commands need their own init hook like the server has
    job.init()
    global cfg

    cfg = pkconfig.init(
        agent_id=pkconfig.Required(str, 'id of this agent'),
        fastcgi_sock_dir=(
            pkio.py_path('/tmp'), pkio.py_path,
            'directory of fastcfgi socket, must be less than 50 chars'),
        start_delay=(0, pkconfig.parse_seconds,
                     'delay startup in internal_test mode'),
        supervisor_uri=pkconfig.Required(
            str,
            'how to connect to the supervisor',
        ),
    )
    pkdlog('{}', cfg)
    if pkconfig.channel_in_internal_test() and cfg.start_delay:
        pkdlog('start_delay={}', cfg.start_delay)
        time.sleep(cfg.start_delay)
    i = tornado.ioloop.IOLoop.current()
    d = _Dispatcher()

    def s(*args):
        return i.add_callback_from_signal(_terminate, d)

    signal.signal(signal.SIGTERM, s)
    signal.signal(signal.SIGINT, s)
    i.spawn_callback(d.loop)
    i.start()
예제 #14
0
파일: service.py 프로젝트: mkeilman/sirepo
def _cfg():
    global __cfg
    if not __cfg:
        __cfg = pkconfig.init(
            ip=('0.0.0.0', _cfg_ip, 'what IP address to open'),
            jupyterhub_port=(8002, _cfg_port,
                             'port on which jupyterhub listens'),
            jupyterhub_debug=(
                True,
                bool,
                'turn on debugging for jupyterhub (hub, spawner, ConfigurableHTTPProxy)',
            ),
            nginx_proxy_port=(8080, _cfg_port,
                              'port on which nginx_proxy listens'),
            port=(8000, _cfg_port, 'port on which uwsgi or http listens'),
            processes=(1, _cfg_int(1,
                                   16), 'how many uwsgi processes to start'),
            run_dir=(None, str, 'where to run the program (defaults db_dir)'),
            # uwsgi got hung up with 1024 threads on a 4 core VM with 4GB
            # so limit to 128, which is probably more than enough with
            # this application.
            threads=(10, _cfg_int(1, 128),
                     'how many uwsgi threads in each process'),
            use_reloader=(pkconfig.channel_in('dev'), bool,
                          'use the Flask reloader'),
        )
    return __cfg
예제 #15
0
def init_apis(*args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        secret=pkconfig.Required(
            str,
            'Shared secret between Sirepo and BlueSky server',
        ),
    )
예제 #16
0
def _init():
    from pykern import pkconfig
    global _cfg

    def b(msg, dev=False):
        return (
            pkconfig.channel_in('dev')
            if dev else pkconfig.channel_in_internal_test(),
            bool,
            msg,
        )

    _cfg = pkconfig.init(
        # No secrets should be stored here (see sirepo.job.agent_env)
        api_modules=((), set, 'optional api modules, e.g. status'),
        default_proprietary_sim_types=
        (set(), set,
         'codes where all users are authorized by default but that authorization can be revoked'
         ),
        jspec=dict(derbenevskrinsky_force_formula=b(
            'Include Derbenev-Skrinsky force formula'), ),
        proprietary_sim_types=(set(), set, 'codes that require authorization'),
        #TODO(robnagler) make this a sim_type config like srw and warpvnd
        rs4pi_dose_calc=(False, bool, 'run the real dose calculator'),
        sim_types=(set(), set, 'simulation types (codes) to be imported'),
        srw=dict(
            app_url=('/en/xray-beamlines.html', str, 'URL for SRW link'),
            beamline3d=b('Show 3D beamline plot'),
            hide_guest_warning=b('Hide the guest warning in the UI', dev=True),
            mask_in_toolbar=b('Show the mask element in toolbar'),
            show_open_shadow=(
                pkconfig.channel_in_internal_test(), bool,
                'Show "Open as a New Shadow Simulation" menu item'),
            show_rsopt_ml=(pkconfig.channel_in_internal_test(), bool,
                           'Show "Export ML Script" menu item'),
        ),
        warpvnd=dict(
            allow_3d_mode=(True, bool,
                           'Include 3D features in the Warp VND UI'),
            display_test_boxes=b(
                'Display test boxes to visualize 3D -> 2D projections'),
        ),
    )
    i = _cfg.proprietary_sim_types.intersection(
        _cfg.default_proprietary_sim_types)
    assert not i, \
        f'{i}: cannot be in proprietary_sim_types and default_proprietary_sim_types'
    s = set(_cfg.sim_types or
            (PROD_FOSS_CODES if pkconfig.channel_in('prod') else _FOSS_CODES))
    s.update(_cfg.proprietary_sim_types, _cfg.default_proprietary_sim_types)
    for v in _DEPENDENT_CODES:
        if v[0] in s:
            s.add(v[1])
    x = s.difference(VALID_CODES)
    assert not x, \
        'sim_type(s) invalid={} expected={}'.format(x, VALID_CODES)
    _cfg.sim_types = frozenset(s)
    return _cfg
예제 #17
0
def _init():
    global cfg

    cfg = pkconfig.init(
        lib_file_resource_only=(False, bool, 'used by utility programs'),
        lib_file_list=(None, lambda v: pkio.read_text(v).split('\n'),
                       'directory listing of remote lib'),
        lib_file_uri=(None, str, 'where to get files from when remote'),
    )
예제 #18
0
def _init():
    global SCHEMA_COMMON
    with open(str(STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX)))) as f:
        SCHEMA_COMMON = json_load(f)
    global cfg
    cfg = pkconfig.init(
        nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'),
        nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'),
    )
예제 #19
0
파일: github.py 프로젝트: JiayangY/sirepo
def init_apis(app, *args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        key=pkconfig.Required(str, 'Github key'),
        secret=pkconfig.Required(str, 'Github secret'),
        callback_uri=(None, str, 'Github callback URI (defaults to api_authGithubAuthorized)'),
    )
    app.session_interface = _FlaskSessionInterface()
    auth_db.init_model(app, _init_model)
예제 #20
0
파일: zgoubi.py 프로젝트: QJohn2017/sirepo
def _init():
    global cfg
    if cfg:
        return
    p = os.environ.get('PYENV_ROOT')
    if p:
        p += '/versions/py2/bin/python'
    else:
        p = 'python2'
    cfg = pkconfig.init(python_path=(p, str, 'python executable'), )
예제 #21
0
def init_apis(*args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        key=pkconfig.Required(str, 'Github key'),
        secret=pkconfig.Required(str, 'Github secret'),
        callback_uri=(
            None, str,
            'Github callback URI (defaults to api_authGithubAuthorized)'),
    )
    auth_db.init_model(_init_model)
예제 #22
0
def default_command():
    global cfg

    assert pkconfig.channel_in('dev'), \
        'Only to be used in dev. channel={}'.format(pkconfig.cfg.channel)
    cfg = pkconfig.init(proprietary_code_uri=(
        f'file://{pathlib.Path.home()}/src/radiasoft/rsconf/proprietary',
        str,
        'root uri of proprietary codes files location',
    ), )
    _proprietary_codes()
예제 #23
0
def _init():
    global SCHEMA_COMMON, cfg
    fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX))
    with open(str(fn)) as f:
        SCHEMA_COMMON = json_load(f)
    # In development, you can touch schema-common to get a new version
    SCHEMA_COMMON.version = _timestamp(fn.mtime()) if pkconfig.channel_in('dev') else sirepo.__version__
    cfg = pkconfig.init(
        nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'),
        nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'),
    )
예제 #24
0
def _init():
    global SCHEMA_COMMON, cfg
    fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX))
    with open(str(fn)) as f:
        SCHEMA_COMMON = json_load(f)
    # In development, you can touch schema-common to get a new version
    SCHEMA_COMMON.version = _timestamp(fn.mtime()) if pkconfig.channel_in('dev') else sirepo.__version__
    cfg = pkconfig.init(
        nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'),
        nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'),
    )
예제 #25
0
파일: email.py 프로젝트: QJohn2017/sirepo
def init_apis(*args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        #TODO(robnagler) validate email
        from_email=pkconfig.Required(str, 'From email address'),
        from_name=pkconfig.Required(str, 'From display name'),
        smtp_password=pkconfig.Required(str, 'SMTP auth password'),
        smtp_server=pkconfig.Required(str, 'SMTP TLS server'),
        smtp_user=pkconfig.Required(str, 'SMTP auth user'),
    )
    auth_db.init_model(_init_model)
예제 #26
0
def init_class():
    global cfg

    cfg = pkconfig.init(
        slots=dict(
            parallel=(1, int, 'max parallel slots'),
            sequential=(1, int, 'max sequential slots'),
        ),
        supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL,
    )
    return LocalDriver.init_class()
예제 #27
0
def init_apis(*args, **kwargs):
    global cfg
    cfg = pkconfig.init(
        mail_server=(None, str, 'Mail server'),
        mail_username=(None, str, 'Mail user name'),
        mail_password=(None, str, 'Mail password'),
        mail_support_email=(None, str, 'Support email address'),
        mail_recipient_email=(None, str, 'Email to receive registration messages'),
    )
    assert cfg.mail_server and cfg.mail_username and cfg.mail_password \
        and cfg.mail_support_email and cfg.mail_recipient_email, \
        'Missing mail config'
예제 #28
0
def init():
    global _DB_DIR, cfg, _NEXT_REQUEST_SECONDS, job_driver
    if _DB_DIR:
        return
    job.init()
    from sirepo import job_driver

    job_driver.init(pkinspect.this_module())
    _DB_DIR = sirepo.srdb.root().join(_DB_SUBDIR)
    cfg = pkconfig.init(
        job_cache_secs=(300, int, 'when to re-read job state from disk'),
        max_hours=dict(
            analysis=(.04, float, 'maximum run-time for analysis job',),
            parallel=(1, float, 'maximum run-time for parallel job (except sbatch)'),
            parallel_premium=(2, float, 'maximum run-time for parallel job for premium user (except sbatch)'),
            sequential=(.1, float, 'maximum run-time for sequential job'),
        ),
        purge_free_after_days=(1000, int, 'how many days to wait before purging a free users simulation'),
        purge_free_start=('02:00:00', str, 'time to first start purging free users simulations (%H:%M:%S)'),
        purge_free_period=('01:00:00', str, 'how often to purge free users simulations after start time (%H:%M:%S)'),
        sbatch_poll_secs=(15, int, 'how often to poll squeue and parallel status'),
    )
    _NEXT_REQUEST_SECONDS = PKDict({
        job.PARALLEL: 2,
        job.SBATCH: cfg.sbatch_poll_secs,
        job.SEQUENTIAL: 1,
    })
    sirepo.auth_db.init(sirepo.srdb.root(), migrate_db_file=False)
    if sirepo.simulation_db.user_dir_name().exists():
        if not _DB_DIR.exists():
            pkdlog('calling upgrade_runner_to_job_db path={}', _DB_DIR)
            import subprocess
            subprocess.check_call(
                (
                    'pyenv',
                    'exec',
                    'sirepo',
                    'db',
                    'upgrade_runner_to_job_db',
                    _DB_DIR,
                ),
                env=PKDict(os.environ).pkupdate(
                    PYENV_VERSION='py2',
                    SIREPO_AUTH_LOGGED_IN_USER='******',
                ),
            )
    else:
        pykern.pkio.mkdir_parent(_DB_DIR)
    tornado.ioloop.IOLoop.current().add_callback(
        _ComputeJob.purge_free_simulations,
        init=True,
    )
예제 #29
0
def init(job_supervisor_module):
    global cfg, _CLASSES, _DEFAULT_CLASS, job_supervisor
    assert not cfg
    job_supervisor = job_supervisor_module
    cfg = pkconfig.init(modules=((_DEFAULT_MODULE, ), set,
                                 'available job driver modules'), )
    _CLASSES = PKDict()
    p = pkinspect.this_module().__name__
    for n in cfg.modules:
        m = importlib.import_module(pkinspect.module_name_join((p, n)))
        _CLASSES[n] = m.CLASS.init_class(job_supervisor)
    _DEFAULT_CLASS = _CLASSES.get('docker') or _CLASSES.get(_DEFAULT_MODULE)
    pkdlog('modules={}', sorted(_CLASSES.keys()))
예제 #30
0
파일: status.py 프로젝트: Landau1908/sirepo
def init_apis(app):
    uri_router.register_api_module()
    global cfg
    cfg = pkconfig.init(
        username=(None, str, 'Sirepo status user id'),
        password=(None, str, 'Basic Auth password'),
    )
    if not cfg.username or not cfg.password:
        raise RuntimeError('Missing Sirepo status config')
    app.config['BASIC_AUTH_USERNAME'] = cfg.username
    app.config['BASIC_AUTH_PASSWORD'] = cfg.password
    global _basic_auth
    _basic_auth = BasicAuth(app)
예제 #31
0
def _init():
    global SCHEMA_COMMON, cfg
    cfg = pkconfig.init(
        nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'),
        nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'),
        sbatch_display=(None, str, 'how to display sbatch cluster to user'),
        tmp_dir=(None, pkio.py_path, 'Used by utilities (not regular config)'),
    )
    fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX))
    with open(str(fn)) as f:
        SCHEMA_COMMON = json_load(f)
    # In development, you can touch schema-common to get a new version
    SCHEMA_COMMON.version = _timestamp(fn.mtime()) if pkconfig.channel_in('dev') else sirepo.__version__
    SCHEMA_COMMON.common.enum.JobRunMode = _init_JobRunMode()
예제 #32
0
파일: oauth.py 프로젝트: e-carlin/sirepo
def _init(app):
    global _db

    app.session_interface = _FlaskSessionInterface()
    app.config.update(
        SQLALCHEMY_DATABASE_URI='sqlite:///{}'.format(_db_filename(app)),
        SQLALCHEMY_COMMIT_ON_TEARDOWN=True,
        SQLALCHEMY_TRACK_MODIFICATIONS=False,
    )
    _db = SQLAlchemy(app, session_options=dict(autoflush=True))
    global cfg
    cfg = pkconfig.init(
        github_key=(None, str, 'GitHub application key'),
        github_secret=(None, str, 'GitHub application secret'),
        github_callback_uri=(None, str, 'GitHub application callback URI'),
    )
    if not cfg.github_key or not cfg.github_secret:
        raise RuntimeError('Missing GitHub oauth config')
예제 #33
0
def init_apis(app):
    global cfg
    cfg = pkconfig.init(
        mail_server=(None, str, 'Mail server'),
        mail_username=(None, str, 'Mail user name'),
        mail_password=(None, str, 'Mail password'),
        mail_support_email=(None, str, 'Support email address'),
        mail_recipient_email=(None, str, 'Email to receive registration messages'),
    )
    assert cfg.mail_server and cfg.mail_username and cfg.mail_password \
        and cfg.mail_support_email and cfg.mail_recipient_email, \
        'Missing mail config'
    app.config.update(
        MAIL_USE_TLS=True,
        MAIL_PORT=587,
        MAIL_SERVER=cfg.mail_server,
        MAIL_USERNAME=cfg.mail_username,
        MAIL_PASSWORD=cfg.mail_password,
    )
    global _mail
    _mail = flask_mail.Mail(app)
    uri_router.register_api_module()
예제 #34
0
파일: bluesky.py 프로젝트: e-carlin/sirepo
        )
    t = req.authNonce.split(_AUTH_NONCE_SEPARATOR)[0]
    try:
        t = int(t)
    except ValueError as e:
        util.raise_not_found(
            '{}: auth_nonce prefix not an int: nonce={}',
            t,
            req.authNonce,
        )
    delta = now - t
    if abs(delta) > _AUTH_NONCE_REPLAY_SECS:
        util.raise_not_found(
            '{}: auth_nonce time outside replay window={} now={} nonce={}',
            t,
            _AUTH_NONCE_REPLAY_SECS,
            now,
            req.authNonce,
        )


def init_apis(app):
    assert cfg.auth_secret, \
        'sirepo_bluesky_auth_secret is not configured'
    uri_router.register_api_module()


cfg = pkconfig.init(
    auth_secret=(None, str, 'Shared secret between Sirepo and BlueSky server'),
)
예제 #35
0
pkconfig.append_load_path('sirepo')

import importlib
import os
import sys

from pykern.pkdebug import pkdc, pkdp
from pykern import pkcollections

from celery import Celery

celery = Celery('sirepo')

cfg = pkconfig.init(
    broker_url=('amqp://guest@localhost//', str, 'Celery: queue broker url'),
    celeryd_concurrency=(1, int, 'how many tasks to run in parallel'),
    celeryd_task_time_limit=(3600, int, 'max run time for a task in seconds'),
)

celery.conf.update(
    pkcollections.map_items(cfg, op=lambda k, v: (k.upper(), v)),
)

celery.conf.update(
    CELERYD_LOG_COLOR=False,
    CELERYD_MAX_TASKS_PER_CHILD=1,
    CELERYD_PREFETCH_MULTIPLIER=1,
    CELERYD_TASK_SOFT_TIME_LIMIT=celery.conf['CELERYD_TASK_TIME_LIMIT'] - 10,
    CELERY_ACKS_LATE=True,
    CELERY_RESULT_BACKEND = 'rpc',
    CELERY_RESULT_PERSISTENT=True,
예제 #36
0

@pkconfig.parse_none
def _cfg_session_secret(value):
    """Reads file specified as config value"""
    if not value:
        assert pkconfig.channel_in('dev'), 'missing session secret configuration'
        return 'dev dummy secret'
    with open(value) as f:
        return f.read()


def _init_maps():
    import sirepo.cookie

    res = {
        'key': {
            _ORIG_KEY: sirepo.cookie._COOKIE_USER,
        },
        'value': {}
    }
    if oauth_hook:
        oauth_hook(res)
    return res


cfg = pkconfig.init(
    key=('sirepo_' + pkconfig.cfg.channel, str, 'Beaker: Name of the cookie key used to save the session under'),
    secret=(None, _cfg_session_secret, 'Beaker: Used with the HMAC to ensure session integrity'),
)
예제 #37
0
파일: m1.py 프로젝트: robnagler/pykern
    if v is None:
        return 999
    return int(v)

cfg = pkconfig.init(
    dict1=({
        'd1': 'default1',
        'd2': 'default2',
    }, dict, 'first param is dict'),
    list2=(['second1'], list, 'second param is list'),
    p3=(1313, int, 'third param is int'),
    p4=(None, int, 'fourth param is 10x p3'),
    p5=(os.environ['HOME'], str, 'value of $HOME'),
    p6=(None, _custom_p6, 'sixth param is a custom parser'),
    list7=(['default7'], list, 'seventh param is a list '),
    req8=pkconfig.Required(int, 'an eighth required parameter'),
    sub_params9=dict(
        sub9_1=(None, int, 'sub param is first of ninth group'),
        sub9_2=dict(
            sub9_2_1=(44, int, 'sub 9.2.1')
        ),
    ),
    dynamic_default10=(None, _some_key, 'sub dynamic default by parsing None'),
    bool1=(False, bool, 'a False boolean'),
    bool2=(True, bool, 'a True boolean'),
    bool3=(True, bool, 'a True boolean will be overriden'),
    bool4=(False, bool, 'a False boolean will be overriden'),
)
if cfg.p4 is None:
    cfg.p4 = cfg.p3 * 10
예제 #38
0
파일: server.py 프로젝트: mrakitin/sirepo

def _validate_serial(data):
    """Verify serial in data validates

    Args:
        data (dict): request with serial and possibly models

    Returns:
        object: None if all ok, or json response if invalid
    """
    res = simulation_db.validate_serial(data)
    if not res:
        return None
    return _json_response({
        'state': 'error',
        'error': 'invalidSerial',
        'simulationData': res,
    })


cfg = pkconfig.init(
    beaker_session=dict(
        key=('sirepo_{PYKERN_PKCONFIG_CHANNEL}', str, 'Beaker: Name of the cookie key used to save the session under'),
        secret=(None, _cfg_session_secret, 'Beaker: Used with the HMAC to ensure session integrity'),
        secure=(False, bool, 'Beaker: Whether or not the session cookie should be marked as secure'),
    ),
    job_queue=('Background', runner.cfg_job_queue, 'how to run long tasks: Celery or Background'),
    foreground_time_limit=(5 * 60, _cfg_time_limit, 'timeout for short (foreground) tasks'),
)
예제 #39
0
파일: __init__.py 프로젝트: e-carlin/sirepo
    pass


def _cfg_job_class(value):
    """Return job queue class based on name

    Args:
        value (object): May be class or str.

    Returns:
        object: `Background` or `Celery` class.

    """
    v = value.lower()
    assert v in _JOB_CLASSES, \
        '{}: invalid job_class, not in {}'.format(v, _JOB_CLASSES)
    return v


cfg = pkconfig.init(
    import_secs=(10, int, 'maximum runtime of backgroundImport'),
    # default is set in init(), because of server.cfg.job_gueue
    job_class=(
        None,
        _cfg_job_class,
        'how to run jobs: {}'.format(', '.join(_JOB_CLASSES)),
    ),
    parallel_secs=(3600, int, 'maximum runtime of serial job'),
    sequential_secs=(300, int, 'maximum runtime of serial job'),
)
예제 #40
0
#: Configuration
cfg = None

def for_sim_type(sim_type):
    """Get cfg for simulation type

    Args:
        sim_type (str): srw, warp, etc.

    Returns:
        dict: application specific config
    """
    if not sim_type in cfg:
        return {}
    return pkcollections.map_to_dict(cfg[sim_type])


@pkconfig.parse_none
def _cfg_bool(value):
    """Convert str to integer and then bool"""
    if isinstance(value, str):
        value = int(value)
    return bool(value)


cfg = pkconfig.init(
    srw=dict(
        mask_in_toolbar=(pkconfig.channel_in_internal_test(), _cfg_bool, 'Show the mask element in toolbar'),
    ),
)
예제 #41
0
파일: m1.py 프로젝트: elventear/pykern
def _custom_p6(v):
    import dateutil.parser
    return dateutil.parser.parse(v)

@pkconfig.parse_none
def _some_key(v):
    if v is None:
        return 999
    return int(v)

cfg = pkconfig.init(
    dict1=({
        'd1': 'default1',
        'd2': 'default2',
    }, dict, 'first param is dict'),
    list2=(['second1'], list, 'second param is list'),
    p3=(1313, int, 'third param is int'),
    p4=('{P1_M1_P3}0', int, 'fourth param is 10x p3'),
    p5=('{HOME}', str, 'value of $HOME'),
    p6=(None, _custom_p6, 'sixth param is a custom parser'),
    list7=(['default7'], list, 'seventh param is a list '),
    req8=pkconfig.Required(int, 'an eighth required parameter'),
    sub_params9=dict(
        sub9_1=(None, int, 'sub param is first of ninth group'),
        sub9_2=dict(
            sub9_2_1=(44, int, 'sub 9.2.1')
        ),
    ),
    dynamic_default10=(None, _some_key, 'sub dynamic default by parsing None'),
)
예제 #42
0
        assert self.average is not None, \
            'self.average is None and has not been initialized'
        return self.average

def _Privy(object):
    """This is a private class that does nothing"""
    pass


def _cfg_length(anything):
    """Configuration parser for any_length

    Args:
        anything (object): configured value

    Returns:
        int: value between 1 and 999
    """
    anything = int(anything)
    assert 0 < anything <= 999, \
        '{}: any_length must be from 1 to 999'
    return anything


# Finally we assign any length. Note that we include a trailing , at the
# end of every line in a list so that you don't have to remember to
# add the comma when you add another line.
cfg = pkconfig.init(
    any_length=(1, _cfg_length, 'A length used by this module'),
)
예제 #43
0
pkconfig.append_load_path('sirepo')

from celery import Celery
from pykern import pkcollections
from pykern import pkio
from pykern import pksubprocess
from pykern.pkdebug import pkdc, pkdexc, pkdp, pkdlog
from sirepo.template import template_common
import py.path


celery = Celery('sirepo')

cfg = pkconfig.init(
    broker_url=('amqp://guest@localhost//', str, 'Celery: queue broker url'),
    celery_result_backend=('rpc://', str, 'configure db other than default'),
    celeryd_concurrency=(1, int, 'how many worker processes to start'),
    celeryd_task_time_limit=(3600, int, 'max run time for a task in seconds'),
)

celery.conf.update(
    pkcollections.map_items(cfg, op=lambda k, v: (k.upper(), v)),
)

_SERIALIZER = 'json'

celery.conf.update(
    CELERYD_LOG_COLOR=False,
    CELERYD_MAX_TASKS_PER_CHILD=1,
    CELERYD_PREFETCH_MULTIPLIER=1,
    CELERYD_TASK_SOFT_TIME_LIMIT=celery.conf['CELERYD_TASK_TIME_LIMIT'] - 10,
    CELERY_ACCEPT_CONTENT=[_SERIALIZER],
예제 #44
0
파일: srw.py 프로젝트: mrakitin/sirepo
def _process_output(filename, model_data):
    simulation_db.write_result(extract_report_data(filename, model_data))


def _run_srw():
    #TODO(pjm): need to properly escape data values, untrusted from client
    data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
    if data['report'] == 'mirrorReport':
        #TODO(pjm): mirror report should use it's own jinja template
        _process_output(_mirror_plot(data), data)
        return
    # This defines the main() function:
    exec(pkio.read_text(template_common.PARAMETERS_PYTHON_FILE), locals(), locals())
    main()
    _process_output(get_filename_for_model(data['report']), data)


def _cfg_int(lower, upper):
    def wrapper(value):
        v = int(value)
        assert lower <= v <= upper, \
            'value must be from {} to {}'.format(lower, upper)
        return v
    return wrapper


cfg = pkconfig.init(
    particles_per_core=(5, int, 'particles for each core to process'),
)
예제 #45
0

@pkconfig.parse_none
def _cfg_sim_types(value):
    res = pkconfig.parse_tuple(value)
    if not res:
        return _codes()
    for c in res:
        assert c in _codes(), \
            'invalid sim_type={}, expected one of={}'.format(c, _codes())
    return res


def _codes(want_all=pkconfig.channel_in('dev')):
    return _ALL_CODES if want_all else _NON_DEV_CODES


cfg = pkconfig.init(
    api_modules=((), tuple, 'optional api modules, e.g. bluesky'),
    #TODO(robnagler) make sim_type config
    rs4pi_dose_calc=(False, bool, 'run the real dose calculator'),
    sim_types=(None, _cfg_sim_types, 'simulation types (codes) to be imported'),
    srw=dict(
        mask_in_toolbar=(pkconfig.channel_in_internal_test(), bool, 'Show the mask element in toolbar'),
    ),
    warpvnd=dict(
        allow_3d_mode=(pkconfig.channel_in_internal_test(), bool, 'Include 3D features in the Warp VND UI'),
        display_test_boxes=(pkconfig.channel_in_internal_test(), bool, 'Display test boxes to visualize 3D -> 2D projections'),
    ),
)
예제 #46
0
        outfile = v.si_fn
    elif data['report'] == 'mirrorReport':
        _process_output(_mirror_plot(data), data)
        return
    elif re.search('^watchpointReport', data['report']):
        op = get_beamline_optics()
        v.ws = True
        outfile = v.ws_fni
    else:
        raise Exception('unknown report: {}'.format(data['report']))
    if isinstance(mag, srwlib.SRWLGsnBm):
        mag = None
    srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
    _process_output(outfile, data)


def _cfg_int(lower, upper):
    def wrapper(value):
        v = int(value)
        assert lower <= v <= upper, \
            'value must be from {} to {}'.format(lower, upper)
        return v
    return wrapper


cfg = pkconfig.init(
    slave_processes=(1, int, 'cores to use for run_background slaves'),
    particles_per_slave=(5, int, 'particles for each core to process'),
    total_particles=(50000, int, 'total number of particles to process'),
)
예제 #47
0
파일: pkdebug.py 프로젝트: robnagler/pykern
        return None
    if isinstance(anything, _RE_TYPE):
        return anything
    return re.compile(anything, flags=re.IGNORECASE)


@pkconfig.parse_none
def _cfg_output(anything):
    if anything is None:
        return None
    if hasattr(anything, 'write'):
        return anything
    return open(anything, 'w')


def _z(msg):
    """Useful for debugging this module"""
    with open('/dev/tty', 'w') as f:
        f.write(str(msg) + '\n')


cfg = pkconfig.init(
    control=(None, _cfg_control, 'Pattern to match against pkdc messages'),
    output=(None, _cfg_output, 'Where to write messages either as a "writable" or file name'),
    redirect_logging=(False, bool, "Redirect Python's logging to output"),
    want_pid_time=(False, bool, 'Display pid and time in messages'),
)

if cfg:
    init()
예제 #48
0
파일: mpi.py 프로젝트: mrakitin/sirepo
        raise


def run_script(script):
    """Execute python script with mpi.

    Args:
        script (str): python text
    """
    abort = '''

from mpi4py import MPI
if MPI.COMM_WORLD.Get_rank():
    import signal
    signal.signal(signal.SIGTERM, lambda x, y: MPI.COMM_WORLD.Abort(1))

'''
    n = re.sub(r'^from __future.*', abort, script, count=1, flags=re.MULTILINE)
    script = abort + script if n == script else n
    fn = 'mpi_run.py'
    pkio.write_text(fn, script)
    p = None
    return run_program([sys.executable or 'python', fn])


cfg = pkconfig.init(
    cores=(1, int, 'cores to use per run'),
    slaves=(1, int, 'DEPRECATED: set $SIREPO_MPI_CORES'),
)
cfg.cores = max(cfg.cores, cfg.slaves)
예제 #49
0

def _cfg_int(lower, upper):
    def wrapper(value):
        v = int(value)
        assert lower <= v <= upper, \
            'value must be from {} to {}'.format(lower, upper)
        return v
    return wrapper


def _db_dir():
    return pkio.mkdir_parent(cfg.db_dir)


def _run_dir():
    return pkio.mkdir_parent(cfg.run_dir)


cfg = pkconfig.init(
    db_dir=(None, _cfg_db_dir, 'where database resides'),
    run_dir=('{SIREPO_PKCLI_SERVICE_DB_DIR}', str, 'where to run the program'),
    port=(8000, _cfg_int(5001, 32767), 'port to listen on'),
    processes=(1, _cfg_int(1, 16), 'how many uwsgi processes to start'),
    # uwsgi got hung up with 1024 threads on a 4 core VM with 4GB
    # so limit to 128, which is probably more than enough with
    # this application.
    threads=(10, _cfg_int(1, 128), 'how many uwsgi threads in each process'),
    ip=('0.0.0.0', _cfg_ip, 'what IP address to open'),
)
예제 #50
0
파일: service.py 프로젝트: e-carlin/sirepo
    return wrapper


def _cfg_ip(value):
    try:
        socket.inet_aton(value)
        return value
    except socket.error:
        pkcli.command_error('{}: ip is not a valid IPv4 address', value)


def _run_dir():
    from sirepo import server

    if not isinstance(cfg.run_dir, type(py.path.local())):
        cfg.run_dir = pkio.mkdir_parent(cfg.run_dir) if cfg.run_dir else server.cfg.db_dir.new()
    return cfg.run_dir


cfg = pkconfig.init(
    ip=('0.0.0.0', _cfg_ip, 'what IP address to open'),
    nginx_proxy_port=(8080, _cfg_int(5001, 32767), 'port on which nginx_proxy listens'),
    port=(8000, _cfg_int(5001, 32767), 'port on which uwsgi or http listens'),
    processes=(1, _cfg_int(1, 16), 'how many uwsgi processes to start'),
    run_dir=(None, str, 'where to run the program (defaults db_dir)'),
    # uwsgi got hung up with 1024 threads on a 4 core VM with 4GB
    # so limit to 128, which is probably more than enough with
    # this application.
    threads=(10, _cfg_int(1, 128), 'how many uwsgi threads in each process'),
)
예제 #51
0
파일: m1.py 프로젝트: elventear/pykern
# -*- coding: utf-8 -*-
u"""?

:copyright: Copyright (c) 2015 RadiaSoft LLC.  All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkdebug import pkdc, pkdp

from pykern import pkconfig

cfg = pkconfig.init(
    anything=(13, int, 'an int param'),
)
예제 #52
0
파일: github.py 프로젝트: robnagler/pykern

def _cfg_exclude_re(anything):
    if isinstance(anything, _RE_TYPE):
        return anything
    return re.compile(anything, flags=re.IGNORECASE)


def _cfg_keep_days(anything):
    if isinstance(anything, datetime.timedelta):
        return anything
    return datetime.timedelta(days=int(anything))


def _shell(cmd):
    subprocess.check_output(cmd, stderr=subprocess.STDOUT)


cfg = pkconfig.init(
    api_pause_seconds = (30, int, 'pauses between backups'),
    exclude_re=(None, _cfg_exclude_re, 'regular expression to exclude a repo'),
    keep_days=(
        _cfg_keep_days(2),
        _cfg_keep_days,
        'how many days of backups to keep',
    ),
    password=pkconfig.Required(str, 'github passsword'),
    test_mode=(False, pkconfig.parse_bool, 'only backup this repo'),
    user=pkconfig.Required(str, 'github user'),
)
예제 #53
0
파일: pkunit.py 프로젝트: robnagler/pykern
    """
    return _base_dir(_WORK_DIR_SUFFIX).ensure(dir=True)


def _base_dir(postfix):
    """Base name with directory.

    Args:
        postfix (str): what to append to base (``_data`` or ``_work``).

    Returns:
        py.path.local: base directory with postfix
    """
    m = module_under_test or pkinspect.caller_module()
    filename = py.path.local(m.__file__)
    b = re.sub(r'_test$|^test_', '', filename.purebasename)
    assert b != filename.purebasename, \
        '{}: module name must end in _test'.format(filename)
    return py.path.local(filename.dirname).join(b + postfix).realpath()

def _cfg_json(value):
    from pykern import pkjson
    if isinstance(value, pkcollections.Dict):
        return value
    return pkjson.load_any(value)


cfg = pkconfig.init(
    aux=(pkcollections.Dict(), _cfg_json, 'extra values for tests for CI (e.g. Travis)'),
)
예제 #54
0
파일: cookie.py 프로젝트: e-carlin/sirepo
            pkdlog('Cookie decoding failed: {} value={}', err, s)

    def _serialize(self):
        return _SERIALIZER_SEP.join(
            itertools.chain.from_iterable(
                [(k, self[k]) for k in sorted(self.keys())],
            ),
        )


@pkconfig.parse_none
def _cfg_http_name(value):
    assert re.search(r'^\w{1,32}$', value), \
        'must be 1-32 word characters; http_name={}'.format(value)
    return value


def _state():
    return flask.g.sirepo_cookie


cfg = pkconfig.init(
    http_name=('sirepo_' + pkconfig.cfg.channel, _cfg_http_name, 'Set-Cookie name'),
    private_key=(None, str, 'urlsafe base64 encrypted 32-byte key'),
    is_secure=(
        not pkconfig.channel_in('dev'),
        pkconfig.parse_bool,
        'Add secure attriute to Set-Cookie',
    )
)