def _cfg(): import netrc global cfg n = None p = pkcollections.Dict( api_pause_seconds=( 0 if pkconfig.channel_in('dev') else 10, int, 'pauses between backups', ), exclude_re=(None, _cfg_exclude_re, 'regular expression to exclude a repo'), keep_days=( _cfg_keep_days(2), _cfg_keep_days, 'how many days of backups to keep', ), password=[None, str, 'github passsword'], test_mode=( pkconfig.channel_in('dev'), pkconfig.parse_bool, 'only backs up test-github-backup repo', ), user=[None, str, 'github user'], ) try: n = netrc.netrc().authenticators('github.com') for i, k in (0, 'user'), (2, 'password'): p[k][0] = n[i] except Exception: pass cfg = pkconfig.init(**p) assert cfg.test_mode or cfg.password is not None and cfg.user is not None, \ 'user and password required unless test_mode'
def api_authState(): s = cookie.unchecked_get_value(_COOKIE_STATE) v = pkcollections.Dict( avatarUrl=None, displayName=None, needCompleteRegistration=s == _STATE_COMPLETE_REGISTRATION, isLoggedIn=_is_logged_in(s), method=cookie.unchecked_get_value(_COOKIE_METHOD), userName=None, visibleMethods=visible_methods, ) u = cookie.unchecked_get_value(_COOKIE_USER) if v.isLoggedIn: r = auth_db.UserRegistration.search_by(uid=u) if r: v.displayName = r.display_name _method_auth_state(v, u) if pkconfig.channel_in('dev'): # useful for testing/debugging v.uid = u return http_reply.render_static( 'auth-state', 'js', pkcollections.Dict(auth_state=v), )
def for_new_user(is_guest): if is_guest and pkconfig.channel_in('dev'): return get_all() return [ for_sim_type(x) for x in sirepo.feature_config.cfg().default_proprietary_sim_types ]
def _init(): import sirepo.mpi global SCHEMA_COMMON, cfg, JOB_RUN_MODE_MAP cfg = pkconfig.init( nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'), nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'), sbatch_display=(None, str, 'how to display sbatch cluster to user'), tmp_dir=(None, pkio.py_path, 'Used by utilities (not regular config)'), ) fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX)) with open(str(fn)) as f: SCHEMA_COMMON = json_load(f) # In development, any schema update creates a new version if pkconfig.channel_in('dev'): SCHEMA_COMMON.version = max([ _timestamp(pkio.py_path(fn).mtime()) \ for fn in glob.glob(str(STATIC_FOLDER.join('json/*{}'.format(JSON_SUFFIX)))) ]) else: SCHEMA_COMMON.version = sirepo.__version__ JOB_RUN_MODE_MAP = PKDict( sequential='Serial', parallel='{} cores (SMP)'.format(sirepo.mpi.cfg.cores), ) if cfg.sbatch_display: JOB_RUN_MODE_MAP.sbatch = cfg.sbatch_display
def _cfg_hosts(value): value = pkconfig.parse_tuple(value) if value: return value assert pkconfig.channel_in('dev'), \ 'required config' return None
def _init_root(): global _cfg, _root def _cfg_root(v): """Config value or root package's parent or cwd with `_DEFAULT_ROOT`""" if not os.path.isabs(v): pkconfig.raise_error(f'{v}: SIREPO_SRDB_ROOT must be absolute') if not os.path.isdir(v): pkconfig.raise_error(f'{v}: SIREPO_SRDB_ROOT must be a directory and exist') return pkio.py_path(v) _cfg = pkconfig.init( root=(None, _cfg_root, 'where database resides'), ) _root = _cfg.root if _root: return _root assert pkconfig.channel_in('dev'), \ 'SIREPO_SRDB_ROOT must be configured except in dev' r = pkio.py_path( sys.modules[pkinspect.root_package(_init_root)].__file__, ).dirpath().dirpath() # Check to see if we are in our dev directory. This is a hack, # but should be reliable. if not r.join('requirements.txt').check(): # Don't run from an install directory r = pkio.py_path('.') _root = pkio.mkdir_parent(r.join(_DEFAULT_ROOT)) return _root
def cfg_job_class(value): """Return job queue class based on name Args: value (object): May be class or str. Returns: object: `Background` or `Celery` class. """ if isinstance(value, type) and issubclass(value, (Celery, Background)): # Already initialized but may call initializer with original object return value if value == 'Celery': if pkconfig.channel_in('dev'): _assert_celery() return Celery elif value == 'Docker': return Docker elif value == 'Background': signal.signal(signal.SIGCHLD, Background._sigchld_handler) return Background elif value is None: return None else: raise AssertionError('{}: unknown job_class'.format(value))
def _init_root(*args): global _root if args: assert not cfg.root, \ 'Cannot set both SIREPO_SRDB_ROOT ({}) and SIREPO_SERVER_DB_DIR ({})'.format( cfg.root, args[0], ) cfg.root = args[0] v = cfg.root if v: assert os.path.isabs(v), \ '{}: SIREPO_SRDB_ROOT must be absolute'.format(v) assert os.path.isdir(v), \ '{}: SIREPO_SRDB_ROOT must be a directory and exist'.format(v) v = pkio.py_path(v) else: assert pkconfig.channel_in('dev'), \ 'SIREPO_SRDB_ROOT must be configured except in DEV' fn = sys.modules[pkinspect.root_package(_init_root)].__file__ root = pkio.py_path(pkio.py_path(pkio.py_path(fn).dirname).dirname) # Check to see if we are in our dev directory. This is a hack, # but should be reliable. if not root.join('requirements.txt').check(): # Don't run from an install directory root = pkio.py_path.local('.') v = pkio.mkdir_parent(root.join(_DEFAULT_ROOT)) _root = v
def _cfg(): global __cfg if not __cfg: __cfg = pkconfig.init( ip=('0.0.0.0', _cfg_ip, 'what IP address to open'), jupyterhub_port=(8002, _cfg_port, 'port on which jupyterhub listens'), jupyterhub_debug=( True, bool, 'turn on debugging for jupyterhub (hub, spawner, ConfigurableHTTPProxy)', ), nginx_proxy_port=(8080, _cfg_port, 'port on which nginx_proxy listens'), port=(8000, _cfg_port, 'port on which uwsgi or http listens'), processes=(1, _cfg_int(1, 16), 'how many uwsgi processes to start'), run_dir=(None, str, 'where to run the program (defaults db_dir)'), # uwsgi got hung up with 1024 threads on a 4 core VM with 4GB # so limit to 128, which is probably more than enough with # this application. threads=(10, _cfg_int(1, 128), 'how many uwsgi threads in each process'), use_reloader=(pkconfig.channel_in('dev'), bool, 'use the Flask reloader'), ) return __cfg
def _cfg_session_secret(value): """Reads file specified as config value""" if not value: assert pkconfig.channel_in('dev'), 'missing session secret configuration' return 'dev dummy secret' with open(value) as f: return f.read()
def init_class(): global cfg cfg = pkconfig.init( dev_volumes=(pkconfig.channel_in('dev'), bool, 'mount ~/.pyenv, ~/.local and ~/src for development'), hosts=pkconfig.RequiredUnlessDev(tuple(), tuple, 'execution hosts'), image=('radiasoft/sirepo', str, 'docker image to run all jobs'), parallel=dict( cores=(2, int, 'cores per parallel job'), gigabytes=(1, int, 'gigabytes per parallel job'), slots_per_host=(1, int, 'parallel slots per node'), ), sequential=dict( gigabytes=(1, int, 'gigabytes per sequential job'), slots_per_host=(1, int, 'sequential slots per node'), ), supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL, tls_dir=pkconfig.RequiredUnlessDev(None, _cfg_tls_dir, 'directory containing host certs'), ) if not cfg.tls_dir or not cfg.hosts: _init_dev_hosts() _init_hosts() return DockerDriver.init_class()
def default_command(): global cfg cfg = pkconfig.init( debug=(pkconfig.channel_in('dev'), bool, 'run supervisor in debug mode'), ip=(sirepo.job.DEFAULT_IP, str, 'ip to listen on'), port=(sirepo.job.DEFAULT_PORT, int, 'what port to listen on'), ) sirepo.job_supervisor.init() pkio.mkdir_parent(sirepo.job.DATA_FILE_ROOT) pkio.mkdir_parent(sirepo.job.LIB_FILE_ROOT) app = tornado.web.Application( [ (sirepo.job.AGENT_URI, _AgentMsg), (sirepo.job.SERVER_URI, _ServerReq), (sirepo.job.SERVER_PING_URI, _ServerPing), (sirepo.job.DATA_FILE_URI + '/(.*)', _DataFileReq), ], debug=cfg.debug, static_path=sirepo.job.SUPERVISOR_SRV_ROOT.join(sirepo.job.LIB_FILE_URI), # tornado expects a trailing slash static_url_prefix=sirepo.job.LIB_FILE_URI + '/', ) server = tornado.httpserver.HTTPServer(app, xheaders=True) server.listen(cfg.port, cfg.ip) signal.signal(signal.SIGTERM, _sigterm) signal.signal(signal.SIGINT, _sigterm) pkdlog('ip={} port={}', cfg.ip, cfg.port) tornado.ioloop.IOLoop.current().start()
def _send_login_email(user, uri): global _smtp if not _smtp: if not (pkconfig.channel_in('dev') and cfg.smtp_server == _DEV_SMTP_SERVER): a = sirepo.util.flask_app() a.config.update( MAIL_USE_TLS=True, MAIL_PORT=587, MAIL_SERVER=cfg.smtp_server, MAIL_USERNAME=cfg.smtp_user, MAIL_PASSWORD=cfg.smtp_password, ) _smtp = flask_mail.Mail(a) else: pkdlog('{}', uri) return http_reply.gen_json_ok({'uri': uri}) login_text = u'sign in to' if user.user_name else \ u'confirm your email and finish creating' msg = flask_mail.Message(subject='Sign in to Sirepo', sender=(cfg.from_name, cfg.from_email), recipients=[user.unverified_email], body=u''' Click the link below to {} your Sirepo account. This link will expire in {} hours and can only be used once. {} '''.format(login_text, _EXPIRES_MINUTES / 60, uri)) _smtp.send(msg) return http_reply.gen_json_ok()
def init_class(cls, job_supervisor): cls.cfg = pkconfig.init( agent_starting_secs=( cls._AGENT_STARTING_SECS_DEFAULT + 3, int, 'how long to wait for agent start', ), constrain_resources=(True, bool, 'apply --cpus and --memory constraints'), dev_volumes=(pkconfig.channel_in('dev'), bool, 'mount ~/.pyenv, ~/.local and ~/src for development'), hosts=pkconfig.RequiredUnlessDev(tuple(), tuple, 'execution hosts'), idle_check_secs=pkconfig.ReplacedBy( 'sirepo.job_driver.idle_check_secs'), image=('radiasoft/sirepo', str, 'docker image to run all jobs'), parallel=dict( cores=(2, int, 'cores per parallel job'), gigabytes=(1, int, 'gigabytes per parallel job'), slots_per_host=(1, int, 'parallel slots per node'), ), sequential=dict( gigabytes=(1, int, 'gigabytes per sequential job'), slots_per_host=(1, int, 'sequential slots per node'), ), supervisor_uri=job.DEFAULT_SUPERVISOR_URI_DECL, tls_dir=pkconfig.RequiredUnlessDev( None, _cfg_tls_dir, 'directory containing host certs'), ) if not cls.cfg.tls_dir or not cls.cfg.hosts: cls._init_dev_hosts() cls._init_hosts(job_supervisor) return cls
def init(): global cfg if cfg: return cfg = pkconfig.init( max_message_bytes=(int(2e8), pkconfig.parse_bytes, 'maximum message size throughout system'), ping_interval_secs=( 2 * 60, pkconfig.parse_seconds, 'how long to wait between sending keep alive pings'), ping_timeout_secs=(4 * 60, pkconfig.parse_seconds, 'how long to wait for a ping response'), server_secret=( 'a very secret, secret', str, 'shared secret between supervisor and server', ), verify_tls=(not pkconfig.channel_in('dev'), bool, 'do not validate (self-signed) certs'), ) global SUPERVISOR_SRV_ROOT, LIB_FILE_ROOT, DATA_FILE_ROOT SUPERVISOR_SRV_ROOT = sirepo.srdb.root().join(SUPERVISOR_SRV_SUBDIR) LIB_FILE_ROOT = SUPERVISOR_SRV_ROOT.join(LIB_FILE_URI[1:]) DATA_FILE_ROOT = SUPERVISOR_SRV_ROOT.join(DATA_FILE_URI[1:])
def setup_dev(): import requests import shutil import sirepo.pkcli.admin def _get_file(dest): if cfg.dev_depot_url.startswith(_FILE_PREFIX): _local_file(dest) return _remote_file(dest) def _local_file(dest): shutil.copy( pkio.py_path(cfg.dev_depot_url.replace(_FILE_PREFIX, ''), ).join(dest.basename), dest) def _remote_file(dest): r = requests.get('{}/{}'.format(cfg.dev_depot_url, dest.basename)) r.raise_for_status() dest.write_binary(r.content) assert pkconfig.channel_in('dev'), \ 'Only to be used in dev. channel={}'.format(pkconfig.cfg.channel) _FILE_PREFIX = 'file://' t = 'flash' d = sirepo.pkcli.admin.proprietary_code_dir(t) pkio.mkdir_parent(d) s = sirepo.sim_data.get_class(t) for e in simulation_db.examples(t): _get_file(d.join(s.proprietary_lib_file_basename(e)))
def nginx_proxy(): """Starts nginx in container. Used for development only. """ import sirepo.feature_config assert pkconfig.channel_in('dev') run_dir = _run_dir().join('nginx_proxy').ensure(dir=True) with pkio.save_chdir(run_dir) as d: f = run_dir.join('default.conf') c = PKDict(_cfg()).pkupdate(run_dir=str(d)) if 'jupyterhublogin' in sirepo.feature_config.cfg().sim_types: import sirepo.sim_api.jupyterhublogin import sirepo.server sirepo.server.init() c.pkupdate( jupyterhub_root=sirepo.sim_api.jupyterhublogin.cfg.uri_root, ) pkjinja.render_resource('nginx_proxy.conf', c, output=f) cmd = [ 'nginx', '-c', str(f), ] pksubprocess.check_call_with_signals(cmd)
def b(msg, dev=False): return ( pkconfig.channel_in('dev') if dev else pkconfig.channel_in_internal_test(), bool, msg, )
def _init(): from pykern import pkconfig global _cfg def b(msg, dev=False): return ( pkconfig.channel_in('dev') if dev else pkconfig.channel_in_internal_test(), bool, msg, ) _cfg = pkconfig.init( # No secrets should be stored here (see sirepo.job.agent_env) api_modules=((), set, 'optional api modules, e.g. status'), default_proprietary_sim_types= (set(), set, 'codes where all users are authorized by default but that authorization can be revoked' ), jspec=dict(derbenevskrinsky_force_formula=b( 'Include Derbenev-Skrinsky force formula'), ), proprietary_sim_types=(set(), set, 'codes that require authorization'), #TODO(robnagler) make this a sim_type config like srw and warpvnd rs4pi_dose_calc=(False, bool, 'run the real dose calculator'), sim_types=(set(), set, 'simulation types (codes) to be imported'), srw=dict( app_url=('/en/xray-beamlines.html', str, 'URL for SRW link'), beamline3d=b('Show 3D beamline plot'), hide_guest_warning=b('Hide the guest warning in the UI', dev=True), mask_in_toolbar=b('Show the mask element in toolbar'), show_open_shadow=( pkconfig.channel_in_internal_test(), bool, 'Show "Open as a New Shadow Simulation" menu item'), show_rsopt_ml=(pkconfig.channel_in_internal_test(), bool, 'Show "Export ML Script" menu item'), ), warpvnd=dict( allow_3d_mode=(True, bool, 'Include 3D features in the Warp VND UI'), display_test_boxes=b( 'Display test boxes to visualize 3D -> 2D projections'), ), ) i = _cfg.proprietary_sim_types.intersection( _cfg.default_proprietary_sim_types) assert not i, \ f'{i}: cannot be in proprietary_sim_types and default_proprietary_sim_types' s = set(_cfg.sim_types or (PROD_FOSS_CODES if pkconfig.channel_in('prod') else _FOSS_CODES)) s.update(_cfg.proprietary_sim_types, _cfg.default_proprietary_sim_types) for v in _DEPENDENT_CODES: if v[0] in s: s.add(v[1]) x = s.difference(VALID_CODES) assert not x, \ 'sim_type(s) invalid={} expected={}'.format(x, VALID_CODES) _cfg.sim_types = frozenset(s) return _cfg
def app_version(): """Force the version to be dynamic if running in dev channel Returns: str: chronological version """ if pkconfig.channel_in('dev'): return datetime.datetime.utcnow().strftime('%Y%m%d.%H%M%S') return SCHEMA_COMMON['version']
def app_version(): """Force the version to be dynamic if running in dev channel Returns: str: chronological version """ if pkconfig.channel_in('dev'): return _timestamp() return SCHEMA_COMMON.version
def _cfg_tls_dir(value): if not value: assert pkconfig.channel_in('dev'), \ 'required config' return None res = pkio.py_path(value) assert res.check(dir=True), \ 'directory does not exist; value={}'.format(value) return res
def _crypto(self): if not self.crypto: if cfg.private_key is None: assert pkconfig.channel_in('dev'), \ 'must configure private_key in non-dev channel={}'.format(pkconfig.cfg.channel) cfg.private_key = base64.urlsafe_b64encode(b'01234567890123456789012345678912') assert len(base64.urlsafe_b64decode(cfg.private_key)) == 32, \ 'private_key must be 32 characters and encoded with urlsafe_b64encode' self.crypto = cryptography.fernet.Fernet(cfg.private_key) return self.crypto
def _cfg_srv_group(value): """Set srv_group""" import grp import os if value: return grp.getgrnam(value).gr_name assert pkconfig.channel_in('dev'), \ 'must be configured except in DEV' return grp.getgrgid(os.getgid()).gr_name
def default_command(): global cfg assert pkconfig.channel_in('dev'), \ 'Only to be used in dev. channel={}'.format(pkconfig.cfg.channel) cfg = pkconfig.init(proprietary_code_uri=( f'file://{pathlib.Path.home()}/src/radiasoft/rsconf/proprietary', str, 'root uri of proprietary codes files location', ), ) _proprietary_codes()
def srw_predefined(cls): import pykern.pkjson import sirepo.template.srw_common f = cls.resource_path(sirepo.template.srw_common.PREDEFINED_JSON) if not f.check(): assert pkconfig.channel_in('dev'), \ '{}: not found; call "sirepo srw create-predefined" before pip install'.format(f) import sirepo.pkcli.srw sirepo.pkcli.srw.create_predefined() return cls._memoize(pykern.pkjson.load_any(f))
def _init(): global SCHEMA_COMMON, cfg fn = STATIC_FOLDER.join('json/schema-common{}'.format(JSON_SUFFIX)) with open(str(fn)) as f: SCHEMA_COMMON = json_load(f) # In development, you can touch schema-common to get a new version SCHEMA_COMMON.version = _timestamp(fn.mtime()) if pkconfig.channel_in('dev') else sirepo.__version__ cfg = pkconfig.init( nfs_tries=(10, int, 'How many times to poll in hack_nfs_write_status'), nfs_sleep=(0.5, float, 'Seconds sleep per hack_nfs_write_status poll'), )
def _agent_start_dev(self): if not pkconfig.channel_in('dev'): return '' res = ''' scancel -u $USER >& /dev/null || true ''' if self.cfg.shifter_image: res += ''' (cd ~/src/radiasoft/sirepo && git pull -q) || true (cd ~/src/radiasoft/pykern && git pull -q) || true ''' return res
def flower(): """Start flower""" assert pkconfig.channel_in('dev') run_dir = _run_dir().join('flower').ensure(dir=True) with pkio.save_chdir(run_dir): command.FlowerCommand().execute_from_commandline([ 'flower', '--address=' + cfg.ip, '--app=sirepo.celery_tasks', '--no-color', '--persistent', ])
def __volumes(self): res = [] def _res(src, tgt): res.append('--volume={}:{}'.format(src, tgt)) if pkconfig.channel_in('dev'): for v in '~/src', '~/.pyenv': v = pkio.py_path('~/src') # pyenv and src shouldn't be writable, only rundir _res(v, v + ':ro') _res(self.run_dir, self.run_dir) return tuple(res)
def uwsgi(): """Starts UWSGI server""" run_dir = _run_dir() with pkio.save_chdir(run_dir): values = dict(pkcollections.map_items(cfg)) values['logto'] = None if pkconfig.channel_in('dev') else str(run_dir.join('uwsgi.log')) # uwsgi.py must be first, because values['uwsgi_py'] referenced by uwsgi.yml for f in ('uwsgi.py', 'uwsgi.yml'): output = run_dir.join(f) values[f.replace('.', '_')] = str(output) pkjinja.render_resource(f, values, output=output) cmd = ['uwsgi', '--yaml=' + values['uwsgi_yml']] pksubprocess.check_call_with_signals(cmd)
def flower(): """Start flower""" assert pkconfig.channel_in('dev') run_dir = _run_dir().join('flower').ensure(dir=True) with pkio.save_chdir(run_dir): from flower.command import FlowerCommand FlowerCommand().execute_from_commandline([ 'flower', '--address=' + cfg.ip, '--app=sirepo.celery_tasks', '--no-color', '--persistent', ])
def rabbitmq(): assert pkconfig.channel_in('dev') run_dir = _run_dir().join('rabbitmq').ensure(dir=True) with pkio.save_chdir(run_dir): cmd = [ 'docker', 'run', '--env=RABBITMQ_NODE_IP_ADDRESS=' + cfg.ip, '--net=host', '--rm', '--volume={}:/var/lib/rabbitmq'.format(run_dir), 'rabbitmq:management', ] pksubprocess.check_call_with_signals(cmd)
def celery(): """Start celery""" assert pkconfig.channel_in('dev') import celery.bin.celery import sirepo.celery_tasks run_dir = _run_dir().join('celery').ensure(dir=True) with pkio.save_chdir(run_dir): celery.bin.celery.main(argv=[ 'celery', 'worker', '--app=sirepo.celery_tasks', '--no-color', '-Ofair', '--queue=' + ','.join(sirepo.celery_tasks.QUEUE_NAMES), ])
def http(): """Starts Flask server in http mode. Used for development only. """ from sirepo import server with pkio.save_chdir(_run_dir()): use_reloader = pkconfig.channel_in('dev') app = server.init(use_reloader=use_reloader) # avoid WARNING: Do not use the development server in a production environment. app.env = 'development' app.run( host=cfg.ip, port=cfg.port, threaded=True, use_reloader=use_reloader, )
def rabbitmq(): assert pkconfig.channel_in('dev') run_dir = _run_dir().join('rabbitmq').ensure(dir=True) with pkio.save_chdir(run_dir): cmd = [ 'docker', 'run', '--env=RABBITMQ_NODE_IP_ADDRESS=' + cfg.ip, '--net=host', '--rm', '--volume={}:/var/lib/rabbitmq'.format(run_dir), 'rabbitmq:management', ] try: pksubprocess.check_call_with_signals(cmd) except OSError as e: if e.errno == errno.ENOENT: pkcli.command_error('docker is not installed')
def init_class(app, uwsgi): """Verify celery & rabbit are running""" if pkconfig.channel_in('dev'): return CeleryJob for x in range(10): err = None try: if not celery_tasks.celery.control.ping(): err = 'You need to start Celery:\nsirepo service celery' except Exception: err = 'You need to start Rabbit:\nsirepo service rabbitmq' # Rabbit doesn't have a long timeout, but celery ping does time.sleep(.5) if not err: return CeleryJob #TODO(robnagler) really should be pkconfig.Error() or something else # but this prints a nice message. Don't call sys.exit, not nice pkcli.command_error(err)
def nginx_proxy(): """Starts nginx in container. Used for development only. """ assert pkconfig.channel_in('dev') run_dir = _run_dir().join('nginx_proxy').ensure(dir=True) with pkio.save_chdir(run_dir): f = run_dir.join('default.conf') values = dict(pkcollections.map_items(cfg)) pkjinja.render_resource('nginx_proxy.conf', values, output=f) cmd = [ 'docker', 'run', '--net=host', '--rm', '--volume={}:/etc/nginx/conf.d/default.conf'.format(f), 'nginx', ] pksubprocess.check_call_with_signals(cmd)
def uwsgi(): """Starts UWSGI server""" in_dev = pkconfig.channel_in('dev') if in_dev: from sirepo import server, runner # uwsgi doesn't pass signals right so can't use _Background if not issubclass(server.cfg.job_queue, runner.Celery): pkcli.command_error('uwsgi only works if sirepo.server.cfg.job_queue=_Celery') db_dir =_db_dir() run_dir = _run_dir() with pkio.save_chdir(run_dir): values = dict(pkcollections.map_items(cfg)) values['logto'] = None if in_dev else str(run_dir.join('uwsgi.log')) # uwsgi.py must be first, because values['uwsgi_py'] referenced by uwsgi.yml for f in ('uwsgi.py', 'uwsgi.yml'): output = run_dir.join(f) values[f.replace('.', '_')] = str(output) pkjinja.render_resource(f, values, output=output) cmd = ['uwsgi', '--yaml=' + values['uwsgi_yml']] pksubprocess.check_call_with_signals(cmd)
def run_background(cfg_dir): """Run srw with mpi in ``cfg_dir`` Args: cfg_dir (str): directory to run srw in """ with pkio.save_chdir(cfg_dir): script = pkio.read_text(template_common.PARAMETERS_PYTHON_FILE) p = dict(pkcollections.map_items(cfg)) if pkconfig.channel_in('dev'): p['particles_per_core'] = 5 p['cores'] = mpi.cfg.cores script += ''' v.wm_na = v.sm_na = {particles_per_core} # Number of "iterations" per save is best set to num processes v.wm_ns = v.sm_ns = {cores} srwl_bl.SRWLBeamline(_name=v.name).calc_all(v, op) main() '''.format(**p) mpi.run_script(script) simulation_db.write_result({})
) celery.conf.update( pkcollections.map_items(cfg, op=lambda k, v: (k.upper(), v)), ) _SERIALIZER = 'json' celery.conf.update( CELERYD_LOG_COLOR=False, CELERYD_MAX_TASKS_PER_CHILD=1, CELERYD_PREFETCH_MULTIPLIER=1, CELERYD_TASK_SOFT_TIME_LIMIT=celery.conf['CELERYD_TASK_TIME_LIMIT'] - 10, CELERY_ACCEPT_CONTENT=[_SERIALIZER], CELERY_ACKS_LATE=True, CELERY_REDIRECT_STDOUTS=not pkconfig.channel_in('dev'), CELERY_RESULT_PERSISTENT=True, CELERY_RESULT_SERIALIZER=_SERIALIZER, CELERY_TASK_PUBLISH_RETRY=False, CELERY_TASK_RESULT_EXPIRES=None, CELERY_TASK_SERIALIZER=_SERIALIZER, ) # CREATE USER {user} WITH PASSWORD '{pass}'; # CREATE DATABASE {db} OWNER {user}; # export SIREPO_CELERY_TASKS_CELERY_RESULT_BACKEND='db+postgresql+psycopg2://{user}:{pass}@{host}/{db}' #TODO(robnagler) in case this happens #if 'postgresql' in cfg.celery_result_backend: # # db+postgresql+psycopg2://csruser:csrpass@localhost/celery_sirepo # celery.conf.update( # CELERY_RESULT_DB_SHORT_LIVED_SESSIONS=True,
def _codes(want_all=pkconfig.channel_in('dev')): return _ALL_CODES if want_all else _NON_DEV_CODES
pkdlog('Cookie decoding failed: {} value={}', err, s) def _serialize(self): return _SERIALIZER_SEP.join( itertools.chain.from_iterable( [(k, self[k]) for k in sorted(self.keys())], ), ) @pkconfig.parse_none def _cfg_http_name(value): assert re.search(r'^\w{1,32}$', value), \ 'must be 1-32 word characters; http_name={}'.format(value) return value def _state(): return flask.g.sirepo_cookie cfg = pkconfig.init( http_name=('sirepo_' + pkconfig.cfg.channel, _cfg_http_name, 'Set-Cookie name'), private_key=(None, str, 'urlsafe base64 encrypted 32-byte key'), is_secure=( not pkconfig.channel_in('dev'), pkconfig.parse_bool, 'Add secure attriute to Set-Cookie', ) )