def go(github_username='******', repository_name=None): """ Execute the bootstrap tasks for a new project. """ config_files = ' '.join(['PROJECT_README.md', 'app_config.py']) config = {} config['$NEW_PROJECT_SLUG'] = os.getcwd().split('/')[-1] config['$NEW_REPOSITORY_NAME'] = repository_name or config['$NEW_PROJECT_SLUG'] config['$NEW_PROJECT_FILENAME'] = config['$NEW_PROJECT_SLUG'].replace('-', '_') config['$NEW_DISQUS_UUID'] = str(uuid.uuid1()) utils.confirm("Have you created a Github repository named \"%s\"?" % config['$NEW_REPOSITORY_NAME']) for k, v in config.items(): local('sed -i "" \'s|%s|%s|g\' %s' % (k, v, config_files)) local('rm -rf .git') local('git init') local('mv PROJECT_README.md README.md') local('rm *.pyc') local('rm LICENSE') local('git add .') local('git add -f www/assets/.assetsignore') local('git commit -am "Initial import from app-template."') local('git remote add origin [email protected]:%s/%s.git' % (github_username, config['$NEW_REPOSITORY_NAME'])) local('git push -u origin master') # Update app data execute('update')
def _add_remote(self, name=None): if not env.host_string in env.git_reverse: name = functions.get_remote_name(env.host_string, self.config_section, name=name) execute('local.git.add_remote', remote_name=name, user_and_host=env.host_string) return name
def setMaster(): if exists('/etc/hosts0'): print 'etc/hosts0 exists' else: sudo('cp /etc/hosts /etc/hosts0') sudo('rm /etc/hosts') sudo('cp /etc/hosts0 /etc/hosts') put('hosts') sudo('cat hosts|sudo tee -a /etc/hosts') run('rm hosts') run('cat /etc/hosts') path1 = '/home/{0}'.format(parm['USER']) rsync_project(path1, exclude=['result']) path2 = join(path1, basename(realpath('.'))) path3 = join(path2, parm['programdir']) for dst in (path2, path3): fi = '{0}/{1}'.format(dst, parm['keyfile']) if not exists(fi, use_sudo=True): put(parm['keyfile'], dst) sudo('chmod 400 {0}'.format(fi)) execute('genkey')
def restart_services(): if not console.confirm('Are you sure you want to restart the services on ' '{env.environment}?'.format(env=env), default=False): utils.abort('Task aborted.') require('root', provided_by=('staging', 'preview', 'production', 'india')) execute(services_restart)
def run(self, name=None): master = self._get_master() self._update_config() self._secure_ssh() self._update_firewalls(self.config_section) execute('postgres.slave_setup', master=master) self._save_config()
def toast(config): def do(): with open(config, 'r') as ip: config_data = json.load(ip) dag_class = config_data['dag'] # push the toast config to the remote machine toast_config_worker_path = os.path.join( eggo_config.get('worker_env', 'work_path'), build_dest_filename(config)) put(local_path=config, remote_path=toast_config_worker_path) # TODO: run on central scheduler instead toast_cmd = ('toaster.py --local-scheduler {clazz} ' '--ToastConfig-config {toast_config}'.format( clazz=dag_class, toast_config=toast_config_worker_path)) hadoop_bin = os.path.join(eggo_config.get('worker_env', 'hadoop_home'), 'bin') toast_env = {'EGGO_HOME': eggo_config.get('worker_env', 'eggo_home'), # toaster.py imports eggo_config, which needs EGGO_HOME on worker 'EGGO_CONFIG': eggo_config.get('worker_env', 'eggo_config_path'), # bc toaster.py imports eggo_config which must be init on the worker 'LUIGI_CONFIG_PATH': eggo_config.get('worker_env', 'luigi_config_path'), 'AWS_ACCESS_KEY_ID': eggo_config.get('aws', 'aws_access_key_id'), # bc dataset dnload pushes data to S3 TODO: should only be added if the dfs is S3 'AWS_SECRET_ACCESS_KEY': eggo_config.get('aws', 'aws_secret_access_key'), # TODO: should only be added if the dfs is S3 'SPARK_HOME': eggo_config.get('worker_env', 'spark_home')} if exec_ctx == 'local': # this should copy vars that maintain venv info env_copy = os.environ.copy() env_copy.update(toast_env) toast_env = env_copy with path(hadoop_bin): with shell_env(**toast_env): wrun(toast_cmd) execute(do, hosts=get_master_host())
def messages_make_all(): """ Create or update translations for the whole project. """ execute(messages_make) for app in get_applications(): execute(messages_make, app=app)
def setup_minion(*roles): """Setup a minion server with a set of roles.""" require('environment') for r in roles: if r not in VALID_ROLES: abort('%s is not a valid server role for this project.' % r) config = { 'master': 'localhost' if env.master == env.host else env.master, 'output': 'mixed', 'grains': { 'environment': env.environment, 'roles': list(roles), }, 'mine_functions': { 'network.interfaces': [], 'network.ip_addrs': [] }, } _, path = tempfile.mkstemp() with open(path, 'w') as f: yaml.dump(config, f, default_flow_style=False) sudo("mkdir -p /etc/salt") put(local_path=path, remote_path="/etc/salt/minion", use_sudo=True) # install salt minion if it's not there already install_salt(SALT_VERSION, master=False, minion=True, restart=True) # queries server for its fully qualified domain name to get minion id key_name = run('python -c "import socket; print socket.getfqdn()"') execute(accept_key, key_name)
def print_worker_env(): def do(): wrun('java -version') wrun('javac -version') wrun('mvn -version') execute(do, hosts=get_master_host())
def setup_load_balancer(redis_host=None, upstreams=""): check_valid_os() check_docker() if not redis_host or not upstreams: # setup on this host execute(setup_redis) ret = execute(setup_app_router, env.host_string) h, upstream = ret.popitem() upstreams = upstream # setup upstreams print(":: Setting up Shipyard Load Balancer on {}".format(env.host_string)) with hide("stdout", "warnings"): build = True with settings(warn_only=True): out = sudo("docker ps | grep shipyard_lb") build = out.return_code if build: sudo("docker pull shipyard/lb") sudo( "docker run -i -t -d -p 80:80 -name shipyard_lb -e REDIS_HOST={} -e APP_ROUTER_UPSTREAMS={} shipyard/lb".format( redis_host, upstreams ) ) print("- Shipyard Load Balancer started") print("- Update DNS to use {} for your Shipyard Domain".format(env.host_string))
def sync(): """Rysnc local states and pillar data to the master, and checkout margarita.""" # Check for missing local secrets so that they don't get deleted # project.rsync_project fails if host is not set sudo("mkdir -p /srv") if not have_secrets(): get_secrets() else: # Check for differences in the secrets files for environment in [env.environment]: remote_file = os.path.join('/srv/pillar/', environment, 'secrets.sls') with lcd(os.path.join(CONF_ROOT, 'pillar', environment)): if files.exists(remote_file): get(remote_file, 'secrets.sls.remote') else: local('touch secrets.sls.remote') with settings(warn_only=True): result = local('diff -u secrets.sls.remote secrets.sls') if result.failed and files.exists(remote_file) and not confirm( red("Above changes will be made to secrets.sls. Continue?")): abort("Aborted. File have been copied to secrets.sls.remote. " + "Resolve conflicts, then retry.") else: local("rm secrets.sls.remote") salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/' project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True) sudo('rm -rf /srv/salt /srv/pillar') sudo('mv /tmp/salt/* /srv/') sudo('rm -rf /tmp/salt/') execute(margarita)
def run_command_on_selected_server(command, *args, **kwargs): select_instance() selected_hosts = [ 'ubuntu@' + env.active_instance.public_dns_name ] kwargs['hosts'] = selected_hosts execute(command, *args, **kwargs)
def safe_reboot(): """Reboot a mongo machine, stepping down if it is the primary""" import vm if not vm.reboot_required(): print("No reboot required") return while True: if cluster_is_ok(): break sleep(5) print("Waiting for cluster to be okay") primary = _find_primary() if primary == 'No primary currently elected': return primary if i_am_primary(primary): execute(step_down_primary) for i in range(5): if cluster_is_ok() and not i_am_primary(): break sleep(1) if not cluster_is_ok() or i_am_primary(): abort("Cluster has not recovered") execute(vm.reboot, hosts=[env['host_string']])
def prod_env_mobile_static_resources(game, region, component, root_dir_prod, root_dir_test, ip): @hosts(ip) def _prod_env_mobile_static_resources(): #备份 remote_backup_dir = "/app/opbak/prod_mobile_static_resources_release/{}_{}_{}".format(game, region, TIMESTAMP) remote_mkdir(remote_backup_dir) static_resources_dir_prod = '{}/static_resources'.format(root_dir_prod) remote_mkdir(static_resources_dir_prod) static_resources_dir_test = '{}/static_resources'.format(root_dir_test) with cd(static_resources_dir_prod): if remote_dir_exists(component): run('cp -r {} {}/'.format(component, remote_backup_dir)) #同步新版本的目录到生产环境 run('rsync -aqP --delete {test_dir}/{component}/ {prod_dir}/{component}/'.format(test_dir=static_resources_dir_test, prod_dir=static_resources_dir_prod, component=component)) #等待备份服务器同步完毕, 再接着更新 conf_wait_rsync = ConfigReader(game, region) if conf_wait_rsync.has_option("mobile_www_wait_rsync"): wait_rsync = conf_wait_rsync.getboolean("mobile_www_wait_rsync") if wait_rsync: rsync_to_backup(game, region) execute(_prod_env_mobile_static_resources)
def create_or_update_virtualenvs(env_name, topology_name, requirements_file, virtualenv_flags=None): """Create or update virtualenvs on remote servers. Assumes that virtualenv is on the path of the remote server(s). :param env_name: the name of the environment in config.json. :param topology_name: the name of the topology (and virtualenv). :param requirements_file: path to the requirements.txt file to use to update/install this virtualenv. """ activate_env(env_name) # Check to ensure streamparse is in requirements with open(requirements_file, "r") as fp: found_streamparse = False for line in fp: if "streamparse" in line: found_streamparse = True break if not found_streamparse: die("Could not find streamparse in your requirements file ({}). " "streamparse is required for all topologies." .format(requirements_file)) execute(_create_or_update_virtualenv, env.virtualenv_root, topology_name, requirements_file, virtualenv_flags=virtualenv_flags, hosts=env.storm_workers)
def hotfix_deploy(): """ deploy ONLY the code with no extra cleanup or syncing for small python-only hotfixes """ if not console.confirm('Are you sure you want to deploy {env.environment}?'.format(env=env), default=False) or \ not console.confirm('Did you run "fab {env.environment} preindex_views"? '.format(env=env), default=False) or \ not console.confirm('HEY!!!! YOU ARE ONLY DEPLOYING CODE. THIS IS NOT A NORMAL DEPLOY. COOL???', default=False): utils.abort('Deployment aborted.') _require_target() run('echo ping!') # workaround for delayed console response try: execute(update_code) except Exception: execute(mail_admins, "Deploy failed", "You had better check the logs.") # hopefully bring the server back to life execute(services_restart) raise else: execute(services_restart) execute(record_successful_deploy)
def restart_services(): _require_target() if not console.confirm('Are you sure you want to restart the services on ' '{env.environment}?'.format(env=env), default=False): utils.abort('Task aborted.') execute(services_restart)
def host_and_date(): # Uncomment the decorator and execute again with cd('/tmp'): run('pwd') if prompt('Get date and host type? (y/n)').lower() == 'y': execute(host_type) execute(serial_date)
def test_client_process(inqueue, outqueue): cur_proc = mp.current_process() for inreq in iter(inqueue.get, SENTINEL): ii, target = inreq # save all stdout to log file sys.stdout = open(LOGDIR + "/" + "%d_%s.log" % (ii, target["name"]), "w") print("[%s : client %d %s %s]" % (cur_proc.name, ii, target["ami"], target["name"])) instances[ii] = block_until_instance_ready(instances[ii]) print("server %s at %s" % (instances[ii], instances[ii].public_ip_address)) env.host_string = "%s@%s" % (target["user"], instances[ii].public_ip_address) print(env.host_string) try: install_and_launch_certbot(instances[ii], boulder_url, target) outqueue.put((ii, target, "pass")) print("%s - %s SUCCESS" % (target["ami"], target["name"])) except: outqueue.put((ii, target, "fail")) print("%s - %s FAIL" % (target["ami"], target["name"])) pass # append server certbot.log to each per-machine output log print("\n\ncertbot.log\n" + "-" * 80 + "\n") try: execute(grab_certbot_log) except: print("log fail\n") pass
def buildpackage(os_release=None, name=None, upload=True, ubuntu_release=None): """Build a package for the current repository.""" git.assert_in_repository() version = git_version() current_branch = git.current_branch() if os_release is None: os_release = parse_openstack_release(current_branch) deb_branch = discover_debian_branch(current_branch, version, os_release) with git.temporary_merge(deb_branch) as merge: source_package = dpkg_parsechangelog() current_version = source_package["Version"] version['debian'] = get_debian_commit_number() if ubuntu_release: dist = ubuntu_release else: dist = pbuilder.dist_from_release(os_release) dist_release = pbuilder.get_build_env(os_release, ubuntu_release) version['distribution'] = dist release_version = debian_version(current_version, version) local("dch -v {0} -D {1} --force-distribution 'Released'" .format(release_version, dist_release)) local("git add debian/changelog") local("git commit -m \"{0}\"".format("Updated Changelog")) git_buildpackage(current_branch, upstream_tree=merge.old_head, release=os_release, name=name, ubuntu_release=ubuntu_release) # Regenerate the source package information since it's changed # since we updated the changelog. source_package = dpkg_parsechangelog() changes = changes_filepath(source_package) if upload: execute(uploadpackage, changes)
def execute_args(parser, argv=None): if argv is None: args = parser.parse_args() else: args = parser.parse_args(argv) fabric.state.env['hosts'] = args.hosts fabric.state.env['roles'] = args.roles fabric.state.env['exclude_hosts'] = args.exclude_hosts fabric.state.env['instance_uuid'] = args.instance_uuid fabric.state.env['tenant_uuid'] = args.tenant_uuid if args.verbose > 0: output['everything'] = True output['user'] = True if hasattr(args, 'hivemind_func'): kwargs = dict( (func_arg, getattr(args, argparser_arg)) for (argparser_arg, func_arg) in args.hivemind_arg_mapping) execute(args.hivemind_func, **kwargs) return True return False
def remote_install(self, role_list): @task @roles(*role_list) @parallel def install(): self.install_package() execute(install)
def initiateSetup(dir,verbose='no'): ### Print stdout and error messages only on verbose if verbose.upper() == 'NO': output.update({'running': False, 'stdout': False, 'stderr': False}) log("Copying the files to lb node") ## Make a tar of the directory and send to remote base_dir=os.path.basename(dir) os.system('cd %s; tar zcf %s.tar.gz %s' % (os.path.dirname(dir),base_dir,base_dir)) ## Copy files to lb execute(putFiles,hosts=env.host,files = {dir + ".tar.gz": '/tmp'}) ### Untar and copy fab file and ssh private key and puppet code to lb run('cd /tmp; tar -zxf %s.tar.gz; cp -r /tmp/%s/jiocloud_puppet_builder/resource_spawner/fabfile.py ~/; cp -f /tmp/%s/id_rsa ~/.ssh' % (base_dir,base_dir,base_dir)) sudo ('cp -r /tmp/%s/jiocloud_puppet_builder /var/puppet' % base_dir) log("Setting up the system on %s" % env.host) log("Run userdata.sh on lb node") sudo("bash /tmp/%s/jiocloud_puppet_builder/resource_spawner/userdata.sh -r lb" % base_dir) log("Run fab from lb1 to setup the cloud: %s" % env.project) ## Enable output - it is required to print inner fab messages output.update({'running': True, 'stdout': True, 'stderr': True}) run('fab -f ~/fabfile -i ~ubuntu/.ssh/id_rsa --set cpservers=%s,ocservers=%s,stservers=%s,dbservers=%s,lbservers=%s setup:/tmp/%s,verbose:%s,verify=%s' % (env.cpservers,env.ocservers,env.stservers,env.dbservers,env.lbservers, base_dir,verbose,False))
def runPapply(num_exec=1): """Run puppet apply""" with hide('warnings'), settings(warn_only = True): attempt=1 while ( attempt <= num_exec ): log(env.host + ' Running Puppet (num_exec: %s)' % attempt) if env.upgrade == 'base': try: papply_out = sudo('/var/puppet/bin/papply -b') except Exception: log("Failed runPapply, retrying") papply_out = sudo('/var/puppet/bin/papply -b') else: try: papply_out = sudo('/var/puppet/bin/papply') except Exception: log("Failed runPapply, retrying") papply_out = sudo('/var/puppet/bin/papply') if re.search('dpkg was interrupted, you must manually run \'sudo dpkg --configure -a\' to correct the problem',papply_out): sudo('dpkg --configure -a') execute(rebootIfNeeded,hosts=env.host) attempt += 1 while not verifySshd([env.host],'ubuntu'): sleep(5) continue log("Done puppet apply on %s" % env.host)
def rebootIfNeeded(): """Reboot the node if required""" if env.host != '10.1.0.5': with hide('warnings'), settings(warn_only = True): rv_needreboot = sudo ('grep System.restart.required /var/run/reboot-required') if rv_needreboot.return_code == 0: execute(rebootNode,hosts=env.host)
def auto(env_name): """Auto migration based on the entry in the versions table. """ settings = get_settings(env_name) setup_db(settings) version = PersistentVersion(settings, 'service') migrated = True while migrated: migrated = False v = tuple([int(i) for i in version.version.split('.')]) for mv in sorted(MIGRATIONS.keys()): if mv > v: version_str = '.'.join([str(i) for i in mv]) migration = MIGRATIONS[mv] print migration.__doc__ print red('You are about to migrate from ' 'version %s to version %s!' % ( version.version, version_str) ) result = prompt("Run migration (y/N)?") if not result.lower().startswith('y'): abort(red('Migration aborted by user request!')) execute(partial(migration, env_name), hosts=settings['hosts']) # update the version in the database version.version = version_str migrated = True break
def index(env_name, *args): """Migrate a single index Upgrade an index to a new index schema. This script will do: 1) copy current index into a temporary index 2) drop existing index 3) recreate index with new structure 4) copy temporary index back Important: The index will be created based on the local sql definitions not the one which is deployed. If something goes wrong: After the migration an index with the name tmp_<index> is left. If something goes wrong while recreating the original index, this is the copy of the index when the migration was started. Note: The index will be temporarily unavailable (step 2-3) or not all data is available (step 4). """ if not args: abort(red('No index names provided!')) settings = get_settings(env_name) print index.__doc__ print red('You are about to run the migration for') for arg in args: print red(' - %s' % arg) result = prompt("Run migration (y/N)?") if not result.lower().startswith('y'): abort(red('Migration aborted by user request!')) execute(partial(do_index, env_name, *args), hosts=settings['hosts'])
def reload_jormun_safe_all(safe=True): """ Reload jormungandr on all servers, in a safe way if load balancers are available """ safe = get_bool_from_cli(safe) for server in env.roledefs['ws']: execute(reload_jormun_safe, server, safe)
def prod(): """Define prod stage""" env.roledefs = { 'web': ['{{ project_name }}.net'], 'lb': ['lb.{{ project_name }}.net'] } # env.user = '******' # user for ssh env.backends = env.roledefs['web'] env.server_name = '{{ project_name }}.net' env.short_server_name = '{{ project_name }}' env.static_folder = '/site_media/' env.server_ip = '' env.no_shared_sessions = False env.server_ssl_on = True env.path_to_cert = '/etc/ssl/certs/{{ project_name }}.net.pem' env.path_to_cert_key = '/etc/ssl/private/{{ project_name }}.net.key' env.goal = 'prod' env.socket_port = '' env.map_settings = { 'default_db_host': "DATABASES['default']['HOST']", 'default_db_user': "******", 'default_db_password': "******", 'default_db_name': "DATABASES['default']['NAME']", 'secret_key': "SECRET_KEY", } execute(build_env)
def docker_run_ct(): local('docker network create -d bridge --subnet 172.31.0.0/16 wktime_nw') local('docker run --net=wktime_nw --ip=172.31.1.1 -ti --privileged ' '-v /sys/fs/cgroup:/sys/fs/cgroup:ro --name=wktime ' '--hostname=wktime.local ubuntu:15.10 /bin/bash') local('docker exec wktime service ssh start ') execute(restart_services)
def rollback_formplayer(): execute(formplayer.rollback_formplayer) execute(supervisor.restart_formplayer)
def stop_celery(): execute(supervisor.stop_celery_tasks, True)
def force_update_static(): _require_target() execute(staticfiles.collectstatic, use_current_release=True) execute(staticfiles.compress, use_current_release=True) execute(staticfiles.update_manifest, use_current_release=True) silent_services_restart(use_current_release=True)
def clean_offline_releases(): """ Cleans all releases in home directory """ execute(release.clean_offline_releases)
def clean_releases(keep=3): """ Cleans old and failed deploys from the ~/www/<environment>/releases/ directory """ execute(release.clean_releases, keep)
def copy_release_files(full_cluster=True): execute(release.copy_localsettings(full_cluster)) if full_cluster: execute(release.copy_tf_localsettings) execute(release.copy_formplayer_properties) execute(release.copy_components) execute(release.copy_node_modules) execute(release.copy_compressed_js_staticfiles)
def update_current(release=None): execute(release.update_current, release)
def kill_stale_celery_workers(): """ Kills celery workers that failed to properly go into warm shutdown """ execute(release.kill_stale_celery_workers)
def upload_upload(): fab.execute(upload_dir, '../public/upload', 'upload', opts=['--ignore-existing'])
def deploy_formplayer(): execute(announce_formplayer_deploy_start) execute(formplayer.build_formplayer, True) execute(supervisor.restart_formplayer)
def all_hosts(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.hosts, **kargs)
def deploy(skip_slack=False): env = environment() fab.execute(ensure_not_dirty) # TODO # fab.execute(test) # maintenance mode on # push configs fab.execute(push_configs) # push staging robots.txt fab.execute(push_robots) # TODO refactor cwd cwd = '../public/local' with lcd(cwd): # local composer install if os.path.exists(os.path.join(cwd, 'composer.json')): fab.local('composer install') # local npm install and build assets fab.local(asset_build_command) if not env['local']: # sync directories: build, composer vendor, mockup for rel_path in ['templates/main/build', 'vendor']: # TODO optimize composer's vendor sync: look for changes in composer.json? fab.execute(upload_dir, '../public/local/' + rel_path, 'local/' + rel_path) fab.execute(upload_upload) # TODO `git-ftp init` for initial deployment? # git-ftp push fab.execute(git_ftp, 'push') # clear bitrix cache fab.execute(clear_cache) # migrate db # notify in slack if remote name = ', '.join(fab.env.roles) if not skip_slack: fab.execute( slack, 'Deployed to `{}` at {}, commit: {}'.format( name, env['ftp']['url'], last_commit_sha()))
def add_new_core(): """ Add a Solr core for the current OpenMunicipio instance. """ require('domain_root', 'app_domain', 'local_repo_root', 'solr_home', 'om_user', provided_by=('staging', 'production')) execute(update_core_conf) ## symlink configuration dir for the new core with hide('commands', 'warnings'): ln_dest = '%(solr_home)s/cores/%(app_domain)s' % env ln_src = os.path.join(env.domain_root, 'private', 'solr') if files.exists(ln_dest, use_sudo=True): fastprint("Removing file %s..." % ln_dest, show_prefix=True) sudo('rm -f %s' % ln_dest) fastprint(" done." % env, end='\n') fastprint("Symlinking core configuration...", show_prefix=True) sudo('ln -s %s %s' % (ln_src, ln_dest)) fastprint(" done." % env, end='\n') ## create a data dir for this core, if not existing with cd(os.path.join(env.solr_home, 'data')): if not files.exists(env.app_domain): fastprint("Creating a data dir for this core...", show_prefix=True) sudo('mkdir -p %(app_domain)s' % env) # Tomcat needs write permissions to cores' data dirs sudo('chmod 2770 %(app_domain)s' % env) fastprint(" done." % env, end='\n') ## add to ``solr.xml`` a definition for the new core (as an additional ``<core>`` element) with cd(env.solr_home): # copy remote version of ``solr.xml`` to the local machine fastprint("Adding new core definition to `solr.xml'... ", show_prefix=True) tmp_fname = os.path.join(env.local_repo_root, 'solr', 'solr.xml.remote') get(remote_path=os.path.join('cores', 'solr.xml'), local_path=tmp_fname) # parse ``solr.xml`` into a tree of Python objects tree = objectify.parse(tmp_fname) # retrieve the ``<cores>`` XML element cores_el = tree.getroot().cores # build a factory function for ``<core>`` elements E = objectify.ElementMaker(annotate=False) CORE = E.core # if a core definition for this OpenMunicipio instance already exists, # drop it. existing_cores = [ el.attrib['name'] for el in cores_el.iterchildren() ] if env.om_user in existing_cores: [ cores_el.remove(el) for el in cores_el.iterchildren() if el.attrib['name'] == env.om_user ] # append a new ``<core>`` element to ``<cores>`` cores_el.append( CORE(name=env.om_user, instanceDir=env.app_domain, dataDir='%(solr_home)s/data/%(app_domain)s' % env)) # write back to ``solr.xml.remote`` tree.write(tmp_fname, pretty_print=True, xml_declaration=True, encoding='UTF-8') # update ``solr.xml`` on the server machine src = tmp_fname dest = os.path.join('cores', 'solr.xml') put(src, dest, mode=0644) # cleanup local('rm %s' % tmp_fname) fastprint(" done." % env, end='\n') restart_tomcat()
def all_gateloads(task, *args, **kargs): self = args[0] return execute(parallel(task), *args, hosts=self.gateloads, **kargs)
def integ_test(gateway_host=None, test_host=None, trf_host=None, destroy_vm="False", no_build="False"): """ Run the integration tests. This defaults to running on local vagrant machines, but can also be pointed to an arbitrary host (e.g. amazon) by passing "address:port" as arguments gateway_host: The ssh address string of the machine to run the gateway services on. Formatted as "host:port". If not specified, defaults to the `cwag` vagrant box. test_host: The ssh address string of the machine to run the tests on on. Formatted as "host:port". If not specified, defaults to the `cwag_test` vagrant box. trf_host: The ssh address string of the machine to run the tests on on. Formatted as "host:port". If not specified, defaults to the `magma_trfserver` vagrant box. no_build: When set to true, this script will NOT rebuild all docker images. """ destroy_vm = bool(strtobool(destroy_vm)) no_build = bool(strtobool(no_build)) # Setup the gateway: use the provided gateway if given, else default to the # vagrant machine if not gateway_host: vagrant_setup("cwag", destroy_vm) else: ansible_setup(gateway_host, "cwag", "cwag_dev.yml") execute(_run_unit_tests) execute(_set_cwag_configs) cwag_host_to_mac = execute(_get_br_mac, CWAG_BR_NAME) host = env.hosts[0] cwag_br_mac = cwag_host_to_mac[host] # Transfer built images from local machine to CWAG host if gateway_host: execute(_transfer_docker_images) else: execute(_stop_gateway) if not no_build: execute(_build_gateway) execute(_run_gateway) # Setup the trfserver: use the provided trfserver if given, else default to the # vagrant machine with lcd(LTE_AGW_ROOT): if not trf_host: vagrant_setup("magma_trfserver", destroy_vm) else: ansible_setup(trf_host, "trfserver", "magma_trfserver.yml") execute(_start_trfserver) # Run the tests: use the provided test machine if given, else default to # the vagrant machine if not test_host: vagrant_setup("cwag_test", destroy_vm) else: ansible_setup(test_host, "cwag_test", "cwag_test.yml") cwag_test_host_to_mac = execute(_get_br_mac, CWAG_TEST_BR_NAME) host = env.hosts[0] cwag_test_br_mac = cwag_test_host_to_mac[host] execute(_set_cwag_test_configs) # Get back to the gateway vm to setup static arp if not gateway_host: vagrant_setup("cwag", destroy_vm) else: ansible_setup(gateway_host, "cwag", "cwag_dev.yml") execute(_set_cwag_networking, cwag_test_br_mac) # Start tests if not test_host: vagrant_setup("cwag_test", destroy_vm) else: ansible_setup(test_host, "cwag_test", "cwag_test.yml") execute(_start_ue_simulator) execute(_set_cwag_test_networking, cwag_br_mac) execute(_run_integ_tests, test_host, trf_host)
def integ_test(gateway_host=None, test_host=None, trf_host=None, gateway_vm="cwag", gateway_ansible_file="cwag_dev.yml", transfer_images=False, destroy_vm=False, no_build=False, tests_to_run="all", skip_unit_tests=False, test_re=None, test_result_xml=None, run_tests=True): """ Run the integration tests. This defaults to running on local vagrant machines, but can also be pointed to an arbitrary host (e.g. amazon) by passing "address:port" as arguments gateway_host: The ssh address string of the machine to run the gateway services on. Formatted as "host:port". If not specified, defaults to the `cwag` vagrant box. test_host: The ssh address string of the machine to run the tests on on. Formatted as "host:port". If not specified, defaults to the `cwag_test` vagrant box. trf_host: The ssh address string of the machine to run the tests on on. Formatted as "host:port". If not specified, defaults to the `magma_trfserver` vagrant box. no_build: When set to true, this script will NOT rebuild all docker images. """ try: tests_to_run = SubTests(tests_to_run) except ValueError: print("{} is not a valid value. We support {}".format( tests_to_run, SubTests.list())) return # Setup the gateway: use the provided gateway if given, else default to the # vagrant machine _switch_to_vm(gateway_host, gateway_vm, gateway_ansible_file, destroy_vm) # We will direct coredumps to be placed in this directory # Clean up before every run if files.exists("/var/opt/magma/cores/"): run("sudo rm /var/opt/magma/cores/*", warn_only=True) else: run("sudo mkdir -p /var/opt/magma/cores", warn_only=True) if not skip_unit_tests: execute(_run_unit_tests) execute(_set_cwag_configs, "gateway.mconfig") execute(_add_networkhost_docker) cwag_host_to_mac = execute(_get_br_mac, CWAG_BR_NAME) host = env.hosts[0] cwag_br_mac = cwag_host_to_mac[host] # Transfer built images from local machine to CWAG host if gateway_host or transfer_images: execute(_transfer_docker_images) else: execute(_stop_gateway) if not no_build: execute(_build_gateway) execute(_run_gateway) # Setup the trfserver: use the provided trfserver if given, else default to # the vagrant machine with lcd(LTE_AGW_ROOT): _switch_to_vm(gateway_host, "magma_trfserver", "magma_trfserver.yml", destroy_vm) execute(_start_trfserver) # Run the tests: use the provided test machine if given, else default to # the vagrant machine _switch_to_vm(gateway_host, "cwag_test", "cwag_test.yml", destroy_vm) cwag_test_host_to_mac = execute(_get_br_mac, CWAG_TEST_BR_NAME) host = env.hosts[0] cwag_test_br_mac = cwag_test_host_to_mac[host] execute(_set_cwag_test_configs) execute(_start_ipfix_controller) # Get back to the gateway vm to setup static arp _switch_to_vm_no_destroy(gateway_host, gateway_vm, gateway_ansible_file) execute(_set_cwag_networking, cwag_test_br_mac) # check if docker services are alive except for OCS2 and PCRF2 ignoreList = ["ocs2", "pcrf2"] execute(_check_docker_services, ignoreList) _switch_to_vm_no_destroy(gateway_host, "cwag_test", "cwag_test.yml") execute(_start_ue_simulator) execute(_set_cwag_test_networking, cwag_br_mac) if run_tests == "False": execute(_add_docker_host_remote_network_envvar) print("run_test was set to false. Test will not be run\n" "You can now run the tests manually from cwag_test") sys.exit(0) # HSSLESS tests are to be executed from gateway_host VM if tests_to_run.value == SubTests.HSSLESS.value: _switch_to_vm_no_destroy(gateway_host, gateway_vm, gateway_ansible_file) execute(_run_integ_tests, gateway_host, trf_host, tests_to_run, test_re) else: execute(_run_integ_tests, test_host, trf_host, tests_to_run, test_re, test_result_xml) # If we got here means everything work well!! if not test_host and not trf_host: # Clean up only for now when running locally execute(_clean_up) print('Integration Test Passed for "{}"!'.format(tests_to_run.value)) sys.exit(0)
def deploy(): execute(update) execute(restart)
def provision(): execute(install_required_packages) execute(install_virtualenvwrapper) execute(install_oracle_jdk) execute(install_apache_spark) execute(install_marvin_engine_executor) execute(create_marvin_engines_prefix) execute(configure_marvin_environment)
def setup_ipip(): execute(setup_ipip_router) execute(setup_ipip_client)
def deploy_all(): execute(deploy_db) execute(deploy_web)
def delete_nat(): execute(delete_nat1) execute(delete_route)
def clear_ipip_router(): execute(clear_nat_8080, inface='ipiptun1', outface='enp0s25') execute(setup_nat_8080, inface='enx00e04c534458', outface='enp0s25') sudo('ip tun del ipiptun1')
def restore_nat(): execute(restore_nat1) execute(restore_route)
def setup_ipip_router(): #execute(clear_nat,inface='enx00e04c534458',outface='enp0s25') with cd('roc/tunneling/'): sudo('./ipip-client.sh') execute(clear_nat_8080, inface='enx00e04c534458', outface='enp0s25') execute(setup_nat_8080, inface='ipiptun1', outface='enp0s25')
def clear_tcpvegas(): execute(clear_tcpvegas_router) execute(clear_tcpvegas_client)
def delete_nat1(): execute(clear_nat, inface='enx00e04c534458', outface='enp0s25') execute(clear_nat_8080, inface='enx00e04c534458', outface='enp0s25')
def setup_tcpvegas(): execute(setup_tcpvegas_client) execute(setup_tcpvegas_router)
def restore_nat1(): execute(setup_nat, inface='enx00e04c534458', outface='enp0s25') execute(setup_nat_8080, inface='enx00e04c534458', outface='enp0s25')
def clear_tcplp(): execute(clear_tcplp_router) execute(clear_tcplp_client)
def clear_tcpvegas_router(): sudo('ifconfig tcpvegas down') sudo('sudo screen -X -S tcpvegas quit') execute(clear_nat_8080, inface='tcpvegas', outface='enp0s25') execute(setup_nat_8080, inface='enx00e04c534458', outface='enp0s25')