def send_awscreds(suffix=None): ctx = get_context() if suffix is None: aws_dir = '.aws' boto_file = '.boto' s3cfg_file = '.s3cfg' else: aws_dir = '.aws{}'.format(suffix) boto_file = '.boto{}'.format(suffix) s3cfg_file = '.s3cfg{}'.format(suffix) if exists(aws_dir): run('rm -rf {}'.format(aws_dir)) mkdir(aws_dir, context['OPS_USER'], context['OPS_USER']) run('chmod 700 {}'.format(aws_dir)) upload_template('aws_config', '{}/config'.format(aws_dir), use_jinja=True, context=ctx, template_dir=get_user_files_path()) if ctx['AWS_ACCESS_KEY'] not in (None, ""): upload_template('aws_credentials', '{}/credentials'.format(aws_dir), use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 {}/*'.format(aws_dir)) if exists(boto_file): run('rm -rf {}'.format(boto_file)) upload_template('boto', boto_file, use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 {}'.format(boto_file)) if exists(s3cfg_file): run('rm -rf {}'.format(s3cfg_file)) upload_template('s3cfg', s3cfg_file, use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 {}'.format(s3cfg_file))
def send_awscreds(): ctx = get_context() if exists('.aws'): run('rm -rf .aws') mkdir('.aws', context['OPS_USER'], context['OPS_USER']) run('chmod 700 .aws') upload_template('aws_config', '.aws/config', use_jinja=True, context=ctx, template_dir=get_user_files_path()) if ctx['AWS_ACCESS_KEY'] not in (None, ""): upload_template('aws_credentials', '.aws/credentials', use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 .aws/*') if exists('.boto'): run('rm -rf .boto') upload_template('boto', '.boto', use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 .boto') if exists('.s3cfg'): run('rm -rf .s3cfg') upload_template('s3cfg', '.s3cfg', use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 .s3cfg')
def send_project_config(project): ctx = get_context() ctx.update({'project': project}) upload_template('install.sh', '~/verdi/ops/install.sh', use_jinja=True, context=ctx, template_dir=get_user_files_path()) upload_template('datasets.json.tmpl.asg', '~/verdi/etc/datasets.json', use_jinja=True, context=ctx, template_dir=get_user_files_path()) upload_template('supervisord.conf.tmpl', '~/verdi/etc/supervisord.conf.tmpl', use_jinja=True, context=ctx, template_dir=get_user_files_path())
def conf_sdsadm(tmpl, dest, shared=False): role, hysds_dir, hostname = resolve_role() if shared: tmpl_dir = os.path.join(get_user_files_path(), 'orch') else: if role in ('factotum', 'ci'): tmpl_dir = os.path.join(get_user_files_path(), 'orch', 'verdi') else: tmpl_dir = os.path.join(get_user_files_path(), 'orch', role) upload_template(tmpl, dest, use_jinja=True, context=get_context(role), template_dir=tmpl_dir)
def add_ci_job(repo, proto, branch=None, release=False): with settings(sudo_user=context["JENKINS_USER"]): job_name, config_tmpl = get_ci_job_info(repo, branch) ctx = get_context() ctx['PROJECT_URL'] = repo ctx['BRANCH'] = branch job_dir = '%s/jobs/%s' % (ctx['JENKINS_DIR'], job_name) dest_file = '%s/config.xml' % job_dir mkdir(job_dir, None, None) chmod('777', job_dir) if release: ctx['BRANCH_SPEC'] = "origin/tags/release-*" else: ctx['BRANCH_SPEC'] = "**" if proto in ('s3', 's3s'): ctx['STORAGE_URL'] = "%s://%s/%s/" % ( proto, ctx['S3_ENDPOINT'], ctx['CODE_BUCKET']) elif proto == 'gs': ctx['STORAGE_URL'] = "%s://%s/%s/" % ( proto, ctx['GS_ENDPOINT'], ctx['CODE_BUCKET']) elif proto in ('dav', 'davs'): ctx['STORAGE_URL'] = "%s://%s:%s@%s/repository/products/containers/" % \ (proto, ctx['DAV_USER'], ctx['DAV_PASSWORD'], ctx['DAV_SERVER']) else: raise RuntimeError( "Unrecognized storage type for containers: %s" % proto) upload_template(config_tmpl, "tmp-jenkins-upload", use_jinja=True, context=ctx, template_dir=get_user_files_path()) cp_rp("tmp-jenkins-upload", dest_file) run("rm tmp-jenkins-upload")
def ensure_ssl(node_type): ctx = get_context(node_type) if node_type == "grq": commonName = ctx['GRQ_FQDN'] elif node_type == "mozart": commonName = ctx['MOZART_FQDN'] else: raise RuntimeError("Unknown node type: %s" % node_type) prompts = { 'Enter pass phrase for server.key:': 'hysds', 'Enter pass phrase for server.key.org:': 'hysds', 'Verifying - Enter pass phrase for server.key:': 'hysds', } if not exists('ssl/server.key') or not exists('ssl/server.pem'): mkdir('ssl', context['OPS_USER'], context['OPS_USER']) upload_template('ssl_server.cnf', 'ssl/server.cnf', use_jinja=True, context={'commonName': commonName}, template_dir=get_user_files_path()) with cd('ssl'): with settings(prompts=prompts): run('openssl genrsa -des3 -out server.key 1024') run('OPENSSL_CONF=server.cnf openssl req -new -key server.key -out server.csr' ) run('cp server.key server.key.org') run('openssl rsa -in server.key.org -out server.key') run('chmod 600 server.key*') run('openssl x509 -req -days 99999 -in server.csr -signkey server.key -out server.pem' )
def send_template(tmpl, dest, tmpl_dir=None, node_type=None): if tmpl_dir is None: tmpl_dir = get_user_files_path() else: tmpl_dir = os.path.expanduser(tmpl_dir) upload_template(tmpl, dest, use_jinja=True, context=get_context(node_type), template_dir=tmpl_dir)
def send_hysds_ui_conf(): dest_file = '~/mozart/ops/hysds_ui/src/config/index.js' upload_template('index.template.js', dest_file, use_jinja=True, context=get_context('mozart'), template_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config')) user_path = get_user_files_path() tosca_cfg = '~/mozart/etc/tosca.js' if os.path.exists(os.path.join(user_path, 'tosca.js')): print('using custom tosca configuration in .sds/files') send_template_user_override('tosca.js', tosca_cfg, node_type='mozart') else: print('using default tosca configuration') send_template_user_override('tosca.template.js', tosca_cfg, tmpl_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config'), node_type='mozart') figaro_cfg = '~/mozart/etc/figaro.js' if os.path.exists(os.path.join(user_path, 'figaro.js')): print('using custom figaro configuration in .sds/files') send_template_user_override('figaro.js', figaro_cfg, node_type='mozart') else: print('using default figaro configuration') send_template_user_override('figaro.template.js', figaro_cfg, tmpl_dir=os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config'), node_type='mozart') # symlink to ~/mozart/ops/hysds_ui/src/config/ ln_sf(tosca_cfg, os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config', 'tosca.js')) ln_sf(figaro_cfg, os.path.join(ops_dir, 'mozart/ops/hysds_ui/src/config', 'figaro.js'))
def send_toscaconf(send_file='settings.cfg.tmpl', template_dir=get_user_files_path()): tmpl_dir = os.path.expanduser(template_dir) dest_file = '~/sciflo/ops/tosca/settings.cfg' upload_template(send_file, dest_file, use_jinja=True, context=get_context('grq'), template_dir=tmpl_dir) with prefix('source ~/sciflo/bin/activate'): with cd('~/sciflo/ops/tosca'): run('./db_create.py')
def send_azurecreds(): ctx = get_context() if exists('.azure'): run('rm -rf .azure') mkdir('.azure', context['OPS_USER'], context['OPS_USER']) run('chmod 700 .azure') upload_template('azure_credentials', '.azure/azure_credentials.json', use_jinja=True, context=ctx, template_dir=get_user_files_path()) run('chmod 600 .azure/*')
def send_logstash_jvm_options(node_type): ctx = get_context(node_type) ram_size_gb = int(get_ram_size_bytes())//1024**3 echo("instance RAM size: {}GB".format(ram_size_gb)) ram_size_gb_half = int(ram_size_gb//2) ctx['LOGSTASH_HEAP_SIZE'] = 8 if ram_size_gb_half >= 8 else ram_size_gb_half echo("configuring logstash heap size: {}GB".format(ctx['LOGSTASH_HEAP_SIZE'])) upload_template('jvm.options', '~/logstash/config/jvm.options', use_jinja=True, context=ctx, template_dir=get_user_files_path())
def send_figaroconf(): dest_file = '~/mozart/ops/figaro/settings.cfg' #upload_template('settings.cfg.tmpl', dest_file, use_jinja=True, context=get_context('mozart'), # template_dir=os.path.join(ops_dir, 'mozart/ops/figaro/settings')) upload_template('figaro_settings.cfg.tmpl', dest_file, use_jinja=True, context=get_context('mozart'), template_dir=get_user_files_path()) with prefix('source ~/mozart/bin/activate'): with cd('~/mozart/ops/figaro'): mkdir('~/mozart/ops/figaro/data', context['OPS_USER'], context['OPS_USER']) run('./db_create.py')
def send_peleconf(send_file='settings.cfg.tmpl', template_dir=get_user_files_path()): tmpl_dir = os.path.expanduser(template_dir) dest_file = '~/sciflo/ops/pele/settings.cfg' upload_template(send_file, dest_file, use_jinja=True, context=get_context('grq'), template_dir=tmpl_dir) with prefix('source ~/sciflo/bin/activate'): with cd('~/sciflo/ops/pele'): run('flask create-db') run('flask db init', warn_only=True) run('flask db migrate', warn_only=True)
def send_template_user_override(tmpl, dest, tmpl_dir=None, node_type=None): """ Write filled-out template to destination using the template found in a specified template directory. If template exists in the user files (i.e. ~/.sds/files), that template will be used. :param tmpl: template file name :param dest: output file name :param tmpl_dir: nominal directory containing the template :param node_type: node type/role :return: None """ if tmpl_dir is None: tmpl_dir = get_user_files_path() else: tmpl_dir = os.path.expanduser(tmpl_dir) upload_template(tmpl, dest, use_jinja=True, context=get_context(node_type), template_dir=resolve_files_dir(tmpl, tmpl_dir))
def ship_style(bucket=None, encrypt=False): ctx = get_context() if bucket is None: bucket = ctx['DATASET_BUCKET'] repo_dir = os.path.join(ops_dir, 'mozart/ops/s3-bucket-listing') index_file = os.path.join(repo_dir, 'tmp_index.html') list_js = os.path.join(repo_dir, 'list.js') index_style = os.path.join(repo_dir, 'index-style') upload_template('s3-bucket-listing.html.tmpl', index_file, use_jinja=True, context=ctx, template_dir=get_user_files_path()) if encrypt is False: run('aws s3 cp %s s3://%s/index.html' % (index_file, bucket)) run('aws s3 cp %s s3://%s/' % (list_js, bucket)) run('aws s3 sync %s s3://%s/index-style' % (index_style, bucket)) else: run('aws s3 cp --sse %s s3://%s/index.html' % (index_file, bucket)) run('aws s3 cp --sse %s s3://%s/' % (list_js, bucket)) run('aws s3 sync --sse %s s3://%s/index-style' % (index_style, bucket))
def add_ci_job(repo, proto, uid=1001, gid=1001, branch=None, release=False): with settings(sudo_user=context["JENKINS_USER"]): match = repo_re.search(repo) if not match: raise RuntimeError("Failed to parse repo owner and name: %s" % repo) owner, name = match.groups() if branch is None: job_name = "container-builder_%s_%s" % (owner, name) config_tmpl = 'config.xml' else: job_name = "container-builder_%s_%s_%s" % (owner, name, branch) config_tmpl = 'config-branch.xml' ctx = get_context() ctx['PROJECT_URL'] = repo ctx['BRANCH'] = branch ctx['UID'] = uid ctx['GID'] = gid job_dir = '%s/jobs/%s' % (ctx['JENKINS_DIR'], job_name) dest_file = '%s/config.xml' % job_dir mkdir(job_dir, None, None) chmod('777', job_dir) if release: ctx['BRANCH_SPEC'] = "origin/tags/release-*" else: ctx['BRANCH_SPEC'] = "**" if proto in ('s3', 's3s'): ctx['STORAGE_URL'] = "%s://%s/%s/" % (proto, ctx['S3_ENDPOINT'], ctx['CODE_BUCKET']) elif proto == 'gs': ctx['STORAGE_URL'] = "%s://%s/%s/" % (proto, ctx['GS_ENDPOINT'], ctx['CODE_BUCKET']) elif proto in ('dav', 'davs'): ctx['STORAGE_URL'] = "%s://%s:%s@%s/repository/products/containers/" % \ (proto, ctx['DAV_USER'], ctx['DAV_PASSWORD'], ctx['DAV_SERVER']) else: raise RuntimeError("Unrecognized storage type for containers: %s" % proto) upload_template(config_tmpl, "tmp-jenkins-upload", use_jinja=True, context=ctx, template_dir=get_user_files_path()) cp_rp("tmp-jenkins-upload", dest_file) run("rm tmp-jenkins-upload")
def init_ci(conf, comp='ci'): """"Initialize ci component.""" # progress bar with tqdm(total=6) as bar: # ensure venv set_bar_desc(bar, 'Ensuring HySDS venv') execute(fab.ensure_venv, 'verdi', system_site_packages=False, install_supervisor=False, roles=[comp]) bar.update() # rsync sdsadm set_bar_desc(bar, 'Syncing sdsadm') execute(fab.rm_rf, '~/verdi/ops/sdsadm', roles=[comp]) execute(fab.rsync_sdsadm, roles=[comp]) bar.update() # initialize sdsadm set_bar_desc(bar, 'Initializing ci') execute(fab.init_sdsadm, roles=[comp]) bar.update() set_bar_desc(bar, 'Initialized ci') # configure for cluster set_bar_desc(bar, 'Configuring ci') execute(fab.conf_sdsadm, 'celeryconfig.py', '~/verdi/etc/celeryconfig.py', roles=[comp]) bar.update() execute(fab.conf_sdsadm, 'datasets.json', '~/verdi/etc/datasets.json', True, roles=[comp]) bar.update() netrc = os.path.join(get_user_files_path(), 'netrc') if os.path.exists(netrc): set_bar_desc(bar, 'Configuring netrc') execute(fab.send_template, 'netrc', '.netrc', roles=[comp]) execute(fab.chmod, 600, '.netrc', roles=[comp]) execute(fab.send_awscreds, roles=[comp]) bar.update() set_bar_desc(bar, 'Configured ci')
def ensure_ssl(node_type): ctx = get_context(node_type) if node_type == "grq": commonName = ctx['GRQ_FQDN'] elif node_type == "mozart": commonName = ctx['MOZART_FQDN'] else: raise RuntimeError("Unknown node type: %s" % node_type) if not exists('ssl/server.key') or not exists('ssl/server.pem'): mkdir('ssl', context['OPS_USER'], context['OPS_USER']) upload_template('ssl_server.cnf', 'ssl/server.cnf', use_jinja=True, context={'commonName': commonName}, template_dir=get_user_files_path()) with cd('ssl'): run('openssl genrsa -des3 -passout pass:hysds -out server.key 1024', pty=False) run('OPENSSL_CONF=server.cnf openssl req -passin pass:hysds -new -key server.key -out server.csr', pty=False) run('cp server.key server.key.org') run('openssl rsa -passin pass:hysds -in server.key.org -out server.key', pty=False) run('chmod 600 server.key*') run('openssl x509 -passin pass:hysds -req -days 99999 -in server.csr -signkey server.key -out server.pem', pty=False)
def copy_files(): """Copy templates and files to user config files.""" files_path = get_user_files_path() logger.debug('files_path: %s' % files_path) validate_dir(files_path, mode=0700) sds_files_path = resource_filename( 'sdscli', os.path.join('adapters', 'hysds', 'files')) sds_files = glob(os.path.join(sds_files_path, '*')) for sds_file in sds_files: if os.path.basename(sds_file) == 'cluster.py': user_file = os.path.join(os.path.dirname(get_user_config_path()), os.path.basename(sds_file)) if not os.path.exists(user_file): shutil.copy(sds_file, user_file) else: user_file = os.path.join(files_path, os.path.basename(sds_file)) if os.path.isdir(sds_file) and not os.path.exists(user_file): shutil.copytree(sds_file, user_file) logger.debug("Copying dir %s to %s" % (sds_file, user_file)) elif os.path.isfile(sds_file) and not os.path.exists(user_file): shutil.copy(sds_file, user_file) logger.debug("Copying file %s to %s" % (sds_file, user_file))
def send_celeryconf(node_type): ctx = get_context(node_type) template_dir = os.path.join(ops_dir, 'mozart/ops/hysds/configs/celery') if node_type == 'mozart': base_dir = "mozart" elif node_type == 'metrics': base_dir = "metrics" elif node_type in ('verdi', 'verdi-asg'): base_dir = "verdi" elif node_type == 'grq': base_dir = "sciflo" else: raise RuntimeError("Unknown node type: %s" % node_type) tmpl = 'celeryconfig.py.tmpl' user_path = get_user_files_path() if node_type == 'verdi-asg': tmpl_asg = 'celeryconfig.py.tmpl.asg' if os.path.exists(os.path.join(user_path, tmpl_asg)): tmpl = tmpl_asg dest_file = '~/%s/ops/hysds/celeryconfig.py' % base_dir upload_template(tmpl, dest_file, use_jinja=True, context=ctx, template_dir=resolve_files_dir(tmpl, template_dir))
def resolve_files_dir(fname, files_dir): """Resolve file or template from user SDS files or default location.""" user_path = get_user_files_path() return user_path if os.path.exists(os.path.join(user_path, fname)) else files_dir
def ship_verdi(conf, encrypt=False, comp='ci'): """"Ship verdi code/config bundle.""" venue = conf.get('VENUE') queues = [i.strip() for i in conf.get('QUEUES').split()] # progress bar with tqdm(total=len(queues) + 2) as bar: # ensure venv set_bar_desc(bar, 'Ensuring HySDS venv') execute(fab.ensure_venv, comp, roles=[comp]) bar.update() # stop services set_bar_desc(bar, 'Stopping verdid') execute(fab.verdid_stop, roles=[comp]) execute(fab.kill_hung, roles=[comp]) bar.update() # iterate over queues for queue in queues: set_bar_desc(bar, 'Shipping {} queue'.format(queue)) # progress bar with tqdm(total=5) as queue_bar: # send queue-specific install.sh script and configs set_bar_desc(queue_bar, 'Sending queue-specific config') execute(fab.rm_rf, '~/verdi/ops/install.sh', roles=[comp]) execute(fab.rm_rf, '~/verdi/etc/datasets.json', roles=[comp]) execute(fab.rm_rf, '~/verdi/etc/supervisord.conf', roles=[comp]) execute(fab.rm_rf, '~/verdi/etc/supervisord.conf.tmpl', roles=[comp]) execute(fab.send_queue_config, queue, roles=[comp]) execute(fab.chmod, '755', '~/verdi/ops/install.sh', roles=[comp]) execute(fab.chmod, '644', '~/verdi/etc/datasets.json', roles=[comp]) queue_bar.update() # copy config set_bar_desc(queue_bar, 'Copying config') execute(fab.rm_rf, '~/verdi/ops/etc', roles=[comp]) execute(fab.cp_rp, '~/verdi/etc', '~/verdi/ops/', roles=[comp]) queue_bar.update() # copy creds set_bar_desc(queue_bar, 'Copying creds') execute(fab.rm_rf, '~/verdi/ops/creds', roles=[comp]) execute(fab.mkdir, '~/verdi/ops/creds', 'ops', 'ops', roles=[comp]) execute(fab.cp_rp_exists, '~/.netrc', '~/verdi/ops/creds/', roles=[comp]) execute(fab.cp_rp_exists, '~/.boto', '~/verdi/ops/creds/', roles=[comp]) execute(fab.cp_rp_exists, '~/.s3cfg', '~/verdi/ops/creds/', roles=[comp]) execute(fab.cp_rp_exists, '~/.aws', '~/verdi/ops/creds/', roles=[comp]) queue_bar.update() # send work directory stylesheets style_tar = os.path.join( get_user_files_path(), 'beefed-autoindex-open_in_new_win.tbz2') set_bar_desc(queue_bar, 'Sending work dir stylesheets') execute(fab.rm_rf, '~/verdi/ops/beefed-autoindex-open_in_new_win.tbz2', roles=[comp]) execute(fab.copy, style_tar, '~/verdi/ops/beefed-autoindex-open_in_new_win.tbz2', roles=[comp]) queue_bar.update() # create venue bundle set_bar_desc(queue_bar, 'Creating/shipping bundle') execute(fab.rm_rf, '~/{}-{}.tbz2'.format(queue, venue), roles=[comp]) execute(fab.ship_code, '~/verdi/ops', '~/{}-{}.tbz2'.format(queue, venue), encrypt, roles=[comp]) queue_bar.update() bar.update() set_bar_desc(bar, 'Finished shipping') print("")
def update_verdi(conf, ndeps=False, comp='verdi'): """"Update verdi component.""" # progress bar with tqdm(total=15) as bar: # ensure venv set_bar_desc(bar, 'Ensuring HySDS venv') execute(fab.ensure_venv, comp, roles=[comp]) bar.update() # stop services set_bar_desc(bar, 'Stopping verdid') execute(fab.verdid_stop, roles=[comp]) execute(fab.kill_hung, roles=[comp]) bar.update() # remove code bundle stuff set_bar_desc(bar, 'Remove code bundle') execute(fab.rm_rf, '~/verdi/ops/etc', roles=[comp]) execute(fab.rm_rf, '~/verdi/ops/install.sh', roles=[comp]) bar.update() # update set_bar_desc(bar, 'Syncing packages') execute(fab.rm_rf, '~/verdi/ops/*', roles=[comp]) execute(fab.rsync_code, 'verdi', roles=[comp]) execute(fab.set_spyddder_settings, roles=[comp]) bar.update() # update reqs set_bar_desc(bar, 'Updating HySDS core') execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/osaka', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/prov_es', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/hysds_commons', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/hysds/third_party/celery-v3.1.25.pqueue', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/hysds', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'verdi', '~/verdi/ops/sciflo', ndeps, roles=[comp]) bar.update() # update celery config set_bar_desc(bar, 'Updating celery config') execute(fab.rm_rf, '~/verdi/ops/hysds/celeryconfig.py', roles=[comp]) execute(fab.rm_rf, '~/verdi/ops/hysds/celeryconfig.pyc', roles=[comp]) execute(fab.send_celeryconf, 'verdi', roles=[comp]) bar.update() # update supervisor config set_bar_desc(bar, 'Updating supervisor config') execute(fab.rm_rf, '~/verdi/etc/supervisord.conf', roles=[comp]) execute(fab.send_template_user_override, 'supervisord.conf.verdi', '~/verdi/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor', roles=[comp]) bar.update() #update datasets config; overwrite datasets config with domain-specific config set_bar_desc(bar, 'Updating datasets config') execute(fab.rm_rf, '~/verdi/etc/datasets.json', roles=[comp]) execute(fab.send_template, 'datasets.json', '~/verdi/etc/datasets.json', roles=[comp]) bar.update() # expose hysds log dir via webdav set_bar_desc(bar, 'Expose logs') execute(fab.mkdir, '/data/work', None, None, roles=[comp]) execute(fab.ln_sf, '~/verdi/log', '/data/work/log', roles=[comp]) bar.update() # ship netrc netrc = os.path.join(get_user_files_path(), 'netrc') if os.path.exists(netrc): set_bar_desc(bar, 'Configuring netrc') execute(fab.copy, netrc, '.netrc', roles=[comp]) execute(fab.chmod, 600, '.netrc', roles=[comp]) # ship AWS creds set_bar_desc(bar, 'Configuring AWS creds') execute(fab.send_awscreds, roles=[comp]) bar.update() set_bar_desc(bar, 'Updated verdi')
def update_grq(conf, ndeps=False, comp='grq'): """"Update grq component.""" # progress bar with tqdm(total=21) as bar: # ensure venv set_bar_desc(bar, 'Ensuring HySDS venv') execute(fab.ensure_venv, 'sciflo', roles=[comp]) bar.update() # stop services set_bar_desc(bar, 'Stopping grqd') execute(fab.grqd_stop, roles=[comp]) bar.update() # update set_bar_desc(bar, 'Syncing packages') execute(fab.rm_rf, '~/sciflo/ops/*', roles=[comp]) execute(fab.rsync_code, 'grq', 'sciflo', roles=[comp]) execute(fab.pip_upgrade, 'gunicorn', 'sciflo', roles=[comp]) # ensure latest gunicorn bar.update() # update reqs set_bar_desc(bar, 'Updating HySDS core') execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/osaka', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/prov_es', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/hysds_commons', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/hysds/third_party/celery-v3.1.25.pqueue', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/hysds', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/sciflo', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/grq2', ndeps, roles=[comp]) bar.update() execute(fab.pip_install_with_req, 'sciflo', '~/sciflo/ops/tosca', ndeps, roles=[comp]) bar.update() # update celery config set_bar_desc(bar, 'Updating celery config') execute(fab.rm_rf, '~/sciflo/ops/hysds/celeryconfig.py', roles=[comp]) execute(fab.rm_rf, '~/sciflo/ops/hysds/celeryconfig.pyc', roles=[comp]) execute(fab.send_celeryconf, 'grq', roles=[comp]) bar.update() # update grq2 config set_bar_desc(bar, 'Updating grq2 config') execute(fab.rm_rf, '~/sciflo/ops/grq2/settings.cfg', roles=[comp]) execute(fab.send_grq2conf, roles=[comp]) bar.update() # update tosca config and facetview.html set_bar_desc(bar, 'Updating tosca config and facetview.html') execute(fab.rm_rf, '~/sciflo/ops/tosca/settings.cfg', roles=[comp]) execute(fab.send_toscaconf, 'tosca_settings.cfg.tmpl', roles=[comp]) tosca_fv = os.path.join(get_user_files_path(), 'tosca_facetview.html') if os.path.exists(tosca_fv): execute(fab.copy, tosca_fv, '~/sciflo/ops/tosca/tosca/templates/facetview.html', roles=[comp]) execute(fab.chmod, 644, '~/sciflo/ops/tosca/tosca/templates/facetview.html', roles=[comp]) bar.update() # update supervisor config set_bar_desc(bar, 'Updating supervisor config') execute(fab.rm_rf, '~/sciflo/etc/supervisord.conf', roles=[comp]) execute(fab.send_template_user_override, 'supervisord.conf.grq', '~/sciflo/etc/supervisord.conf', '~/mozart/ops/hysds/configs/supervisor', roles=[comp]) bar.update() #update datasets config; overwrite datasets config with domain-specific config set_bar_desc(bar, 'Updating datasets config') execute(fab.rm_rf, '~/sciflo/etc/datasets.json', roles=[comp]) execute(fab.send_template, 'datasets.json', '~/sciflo/etc/datasets.json', roles=[comp]) bar.update() # ensure self-signed SSL certs exist set_bar_desc(bar, 'Configuring SSL') execute(fab.ensure_ssl, 'grq', roles=[comp]) bar.update() # link ssl certs to apps execute(fab.ln_sf, '~/ssl/server.key', '~/sciflo/ops/grq2/server.key', roles=[comp]) execute(fab.ln_sf, '~/ssl/server.pem', '~/sciflo/ops/grq2/server.pem', roles=[comp]) execute(fab.ln_sf, '~/ssl/server.key', '~/sciflo/ops/tosca/server.key', roles=[comp]) execute(fab.ln_sf, '~/ssl/server.pem', '~/sciflo/ops/tosca/server.pem', roles=[comp]) bar.update() # expose hysds log dir via webdav set_bar_desc(bar, 'Expose logs') execute(fab.mkdir, '/data/work', None, None, roles=[comp]) execute(fab.ln_sf, '~/sciflo/log', '/data/work/log', roles=[comp]) bar.update() # update ES template set_bar_desc(bar, 'Update ES template') execute(fab.install_es_template, roles=[comp]) execute(fab.install_pkg_es_templates, roles=[comp]) bar.update() # ship AWS creds set_bar_desc(bar, 'Configuring AWS creds') execute(fab.send_awscreds, roles=[comp]) bar.update() set_bar_desc(bar, 'Updated grq')