def collect_reports(worker_root, project_name): """docstring for collect_reports""" results_path = os.path.join(worker_root, project_name, 'results') with cd(results_path): print "\nStart to collect result files" get('*.xml', './') run('rm -rf *.xml')
def test_get_with_format_chars_on_server(self): """ get('*') with format symbols (%) on remote paths should not break """ remote = '*' with hide('everything'): get(remote, self.path())
def get(file, target=None): """Download the specified files from the remote buildout folder""" if not target: target = file if not file.startswith('/'): file = api.env.path + '/' + file api.get(file, target)
def snapshot(): """Takes a snapshot""" ignore = " ".join(['-e %s' % i for i in dump_ignores]) run("workon %s; cd %s; python manage.py dumpdata --indent=2 %s > %s/dump.json" \ % (env.venv, env.remote_app_dir, ignore, env.remote_root_dir)) run("cd %s; gzip -9 -f dump.json" % env.remote_root_dir) get("%s/dump.json.gz" % env.remote_root_dir, "dump.json.gz")
def fetch_logs(): """ Fetch logs from the web instances into ~/logs. """ require("configuration") with cd(env.deploy_dir): get("codalab/var/*.log", "~/logs/%(host)s/%(path)s")
def pull(): """ updates development environment """ x = prompt(blue('Reset local database (r) or flush (f)?'), default="f") if x == 'r': reset_local_db() elif x == 'f': print(red(" * flushing database...")) local('cd %s ' '&& . %s ' '&& python manage.py flush' % (PROJECT_PATH, VIRTUAL_ENVIRONMENT)) print(red(" * creating database dump...")) run('cd %s ' '&& source venv/bin/activate ' '&& python manage.py dumpdata --natural-foreign -e contenttypes -e auth.Permission > data.json' % env.path) print(red(" * downloading dump...")) get('%s/data.json' % env.path, '/tmp/data.json') print(red(" * importing the dump locally...")) local('cd %s ' '&& . %s ' '&& python manage.py loaddata /tmp/data.json' % (PROJECT_PATH, VIRTUAL_ENVIRONMENT), capture=False) print(red(" * removing database dump...")) run('rm %s/data.json' % env.path) print(red(" * syncing media files...")) rsync_project('%s/' % env.media_path, settings.MEDIA_ROOT, upload=False, delete=True)
def sync(): """Rysnc local states and pillar data to the master, and checkout margarita.""" # Check for missing local secrets so that they don't get deleted # project.rsync_project fails if host is not set sudo("mkdir -p /srv") if not have_secrets(): get_secrets() else: # Check for differences in the secrets files for environment in [env.environment]: remote_file = os.path.join('/srv/pillar/', environment, 'secrets.sls') with lcd(os.path.join(CONF_ROOT, 'pillar', environment)): if files.exists(remote_file): get(remote_file, 'secrets.sls.remote') else: local('touch secrets.sls.remote') with settings(warn_only=True): result = local('diff -u secrets.sls.remote secrets.sls') if result.failed and files.exists(remote_file) and not confirm( red("Above changes will be made to secrets.sls. Continue?")): abort("Aborted. File have been copied to secrets.sls.remote. " + "Resolve conflicts, then retry.") else: local("rm secrets.sls.remote") salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/' project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True) sudo('rm -rf /srv/salt /srv/pillar') sudo('mv /tmp/salt/* /srv/') sudo('rm -rf /tmp/salt/') execute(margarita)
def copy_resource(container, resource, local_filename, contents_only=True): """ Copies a resource from a container to a compressed tarball and downloads it. :param container: Container name or id. :type container: unicode :param resource: Name of resource to copy. :type resource: unicode :param local_filename: Path to store the tarball locally. :type local_filename: unicode :param contents_only: In case ``resource`` is a directory, put all contents at the root of the tar file. If this is set to ``False``, the directory itself will be at the root instead. :type contents_only: bool """ with temp_dir() as remote_tmp: base_name = os.path.basename(resource) copy_path = posixpath.join(remote_tmp, 'copy_tmp') run(mkdir(copy_path, check_if_exists=True)) remote_name = posixpath.join(copy_path, base_name) archive_name = 'container_{0}.tar.gz'.format(container) archive_path = posixpath.join(remote_tmp, archive_name) run('docker cp {0}:{1} {2}'.format(container, resource, copy_path), shell=False) if contents_only and is_directory(remote_name): src_dir = remote_name src_files = '*' else: src_dir = copy_path src_files = base_name with cd(src_dir): run(targz(archive_path, src_files)) get(archive_path, local_filename)
def download_file(): print "Checking local disk space..." local("df -h") remote_path = prompt("Enter the remote file path:") local_path = prompt("Enter the local file path:") get(remote_path=remote_path, local_path=local_path) local("ls %s" %local_path)
def clone_repo(): """Clone the wonderhop repo on the server""" # Add known hosts for Github append("~/.ssh/known_hosts", [ "|1|AxYrTZcwBIPIFSdy29CGanv85ZE=|D0Xa0QCz1anXJ9JrH4eJI3EORH8= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==", "|1|ErT4pRs4faesbyNw+WB0hWuIycs=|9+4iN3FDijMOl1Z+2PNB9O9wXjw= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==", ]) if not exists("~/.ssh/id_github_deploy"): # Generate a public/private key pair run("ssh-keygen -q -t rsa -f ~/.ssh/id_github_deploy -N ''") ssh_pub_key = StringIO() get("~/.ssh/id_github_deploy.pub", ssh_pub_key) ssh_pub_key = ssh_pub_key.getvalue().strip() # Add it to Github gh_user = prompt("Github username?") gh_pass = getpass("Github password? ") urllib2.urlopen(urllib2.Request("https://api.github.com/repos/wonderhop/wonderhop/keys", json.dumps({ "title": "wonderhop@{0}".format(env.host), "key": ssh_pub_key, }), { "Content-Type": "application/json", "Authorization": "Basic {0}".format(base64.b64encode("{0}:{1}".format(gh_user, gh_pass))), })) # Specify that we should use the given key for Github append("~/.ssh/config", "Host github.com\nIdentityFile ~/.ssh/id_github_deploy") run("git clone [email protected]:wonderhop/wonderhop.git")
def get_secrets(): "Get files that aren't in the checkout, such as sitesettings.py" with lcd(env.local_dir): # We get a copy of the production sitesettings.py, but we don't use it for local # development (hence it's not kept in seabirds/). get('%(remote_dir)s/seabirds/sitesettings.py' % env, local_path='sitesettings_production.py') get('%(remote_dir)s/seabirds/secrets.py' % env, local_path='seabirds/')
def setup_capsules(path): """Reads the configuration, create capsules and start content sync on them. """ load_capsule_config(path) config = env.capsule_config server = config.server.host_string # Let Fabric know how to log into the hosts env.passwords = config.passwords env.key_filename = config.key_filenames # The oauth information is needed for every capsule register. Cache this # information. with settings(host_string=server): oauth_info = get_oauth_info() # Register each capsule on the server for capsule in config.capsules: with settings(host_string=server): cert_path = generate_capsule_certs(capsule.hostname) get(remote_path=cert_path, local_path=cert_path) with settings(host_string=capsule.host_string): register_capsule() put(local_path=cert_path) capsule_installer(capsule.hostname, cert_path, *oauth_info)
def _wrapped_env(*args, **kw): if args: environ = args[0] else: try: exp_repo = kw.get('exp_repo', '${ARCHIVE_OCEAN}/exp_repos') name = kw['name'] except KeyError: raise NoEnvironmentSetException environ = {'exp_repo': exp_repo, 'name': name, 'expfiles': '${HOME}/.bosun_exps'} environ = _expand_config_vars(environ) with hide('running', 'stdout', 'stderr', 'warnings'): if exists(fmt('{expfiles}', environ)): run(fmt('rm -rf {expfiles}', environ)) run(fmt('hg clone {exp_repo} {expfiles}', environ)) # else: # with cd(fmt('{expfiles}', environ)): # run('hg pull -u ') temp_exp = StringIO() get(fmt('{expfiles}/exp/{name}/namelist.yaml', environ), temp_exp) kw['expfiles'] = environ['expfiles'] environ = load_configuration(temp_exp.getvalue(), kw) kw.pop('expfiles', None) kw.pop('name', None) kw.pop('exp_repo', None) temp_exp.close() return func(environ, **kw)
def get_htaccess(): if not env.path: env.path = '' htaccess = StringIO() get(remote_path='%s/.htaccess' % env.path, local_path=htaccess) htaccess.seek(0) return htaccess
def syncdb(): """Sync loacl db with remote db""" if not REMOTE_DB_USERNAME or not REMOTE_DB_PASSWORD or not REMOTE_DB_NAME: print "Please setup remote db configs" return if not LOCAL_DB_USERNAME or not LOCAL_DB_PASSWORD or not LOCAL_DB_NAME: print "Please setup local db configs" return with cd("/tmp"): run("mysqldump -u%s -p%s %s > latest_db.sql" % (REMOTE_DB_USERNAME, REMOTE_DB_PASSWORD, REMOTE_DB_NAME)) run("tar cfz latest_db.sql.tgz latest_db.sql") # Download to local get("/tmp/latest_db.sql.tgz", "/tmp") with lcd("/tmp"): local("tar xfz latest_db.sql.tgz") local("mysql -u%s -p%s %s < latest_db.sql" % (LOCAL_DB_USERNAME, LOCAL_DB_PASSWORD, LOCAL_DB_NAME))
def fetch_conf(output=None, stagedir=None, local_exe=None): remote_output = "%s/bootout.json" % (stagedir) if local_exe: shutil.copy(remote_output, output) else: get(remote_output, output)
def build_ssh2(src, dest): """建立从src到dest的信任关系""" env.host_string = src['host'] env.user = src['user'] env.password = src['passwd'] cmd="[ -d ~/.ssh ] || mkdir ~/.ssh; " run(cmd) cmd='if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -N "" -f ~/.ssh/id_rsa ; fi' run(cmd) id_rsa_pub_file = '~/.ssh/{user}@{host}_id_rsa.pub'.format( user=src['user'], host=src['host']) get('~/.ssh/id_rsa.pub', id_rsa_pub_file) env.host_string = dest['host'] env.user = dest['user'] env.password = dest['passwd'] run('[ -d ~/.ssh ] || mkdir ~/.ssh') put(id_rsa_pub_file, id_rsa_pub_file) cmd = 'existed=0;content=`cat {pubkey}`; if [ -f ~/.ssh/authorized_keys ];then grep "$content" ~/.ssh/authorized_keys >/dev/null 2>&1; if [ $? -eq 0 ];then existed=1;fi;fi; ' cmd = cmd + ' if [ $existed -eq 0 ]; then cat {pubkey} >> ~/.ssh/authorized_keys ; fi && rm {pubkey} && chmod 600 ~/.ssh/authorized_keys && chmod og-w ~; chmod og-w ~/.ssh' cmd = cmd.format(pubkey=id_rsa_pub_file) run(cmd) #进行测试,并且消除第一次连接要敲回车的问题 env.host_string = src['host'] env.user = src['user'] env.password = src['passwd'] cmd='ssh {user}@{host} "ls"'.format(user=dest['user'], host=dest['host']) run(cmd)
def fetch_compressed(self, src, dest=None): """Create a tarball and fetch it locally""" self.logger.debug("Creating tarball of %s" % src) basename = os.path.basename(src) tar_file = basename + ".tgz" cmd = "tar czf %s %s" % (tar_file, src) _run(cmd, False) if not os.path.exists(dest): os.makedirs(dest) tmp_dir = tempfile.mkdtemp() fabric.get(tar_file, tmp_dir) dest_file = os.path.join(tmp_dir, tar_file) self._check_hash_sum(dest_file, tar_file) self.logger.debug("Untar packages file %s" % dest_file) cmd = """ cd %s tar xzf %s cp -r %s/* %s rm -r %s """ % (tmp_dir, tar_file, src, dest, tmp_dir) os.system(cmd) self.logger.info("Downloaded %s to %s" % (src, _green(dest)))
def test_abort_returns_nonzero_exit_code(self): from datetime import datetime error_msg = 'oops_we_got_an_error' timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') output_file_path = \ '/tmp/{0}-{1}.log'.format(self._testMethodName, timestamp) try: self._execute( 'test.run_script', script_path='scripts/script.sh', process={ 'env': { 'test_operation': self._testMethodName, 'error_msg': error_msg, 'output_file': output_file_path } }) self.fail('expected to raise an exception') except NonRecoverableError as e: self.assertEquals(error_msg, e.message) # verify that ctx outputs error message to stderr _, output_local_copy_path = tempfile.mkstemp() with context_managers.settings(**self.default_fabric_env): api.get(remote_path=output_file_path, local_path=output_local_copy_path) with open(output_local_copy_path, 'r') as output_file: self.assertEquals(error_msg, output_file.read().strip())
def get_postgres_dump(dbname, ignore_permissions=False, file_name=None): """Get a dump of the database from the server. :param dbname: name of the database to restore the dump into. :type dbname: str :param ignore_permissions: whether permissions in the created dump should be preserved. :type ignore_permissions: bool (default False) :param file_name: optional file name for the dump. The file name should exclude any path. If file_name is ommitted, the dump will be written to fabgis_resources/sql/dumps/<dbname>->date>.dmp where date is in the form dd-mm-yyyy. This is the default naming convention used by the :func:`restore_postgres_dump` function below. :type file_name: str """ setup_env() if file_name is None or file_name == '': date = run('date +%d-%B-%Y') my_file = '%s-%s.dmp' % (dbname, date) else: my_file = os.path.split(file_name)[1] put(file_name, '/tmp/%s' % my_file) if not ignore_permissions: extra_args = '' else: extra_args = '-x -O' run('pg_dump %s -Fc -f /tmp/%s %s' % (extra_args, my_file, dbname)) get('/tmp/%s' % my_file, 'fabgis_resources/sql/dumps/%s' % my_file)
def run(self): if self.stderr_temp: stderr_tuple = tempfile.mkstemp() os.close(stderr_tuple[0]) self.stderr_file = stderr_tuple[1] # We will manually check return code remote_stdout = run("mktemp /tmp/collectoutput.XXXXXXXXXX").strip() remote_stderr = run("mktemp /tmp/collecterror.XXXXXXXXXX").strip() remote_rawp = "/tmp/%s" % os.path.basename(self.rawp_file) put(self.rawp_file, remote_rawp) with settings(warn_only=True): collectl_command_line_builder = CollectlCommandLineBuilder(self.collectl_path) command_line = collectl_command_line_builder.get(remote_rawp) command_output = run("%s > %s 2> %s" % (command_line, remote_stdout, remote_stderr)) return_code = command_output.return_code local("rm %s" % self.collectl_output_file) local("rm %s" % self.stderr_file) get(remote_stdout, self.collectl_output_file) get(remote_stderr, self.stderr_file) if return_code != 0: stderr_contents = self.__read_stderr() raise RuntimeError("collectl did not return a status code of 0, process standard error was %s" % stderr_contents) if self.stderr_temp: os.remove(self.stderr_file)
def write_configfile(remote_path, content=None, filename=None): _info('attempting to write {}...'.format(remote_path)) rm_file = False if not filename: _, filename = tempfile.mkstemp() rm_file = True with open(filename, 'w') as f: f.write(content) _, old = tempfile.mkstemp() with hide('running', 'stdout', 'stderr'): if exists(remote_path): get(remote_path, old) with settings(hide('warnings'), warn_only=True): res = local('diff {} {}'.format(old, filename), capture=True) if res.failed: _bad('files differ') puts(res, show_prefix=False) if prompt('update file? [y/n]') == 'y': _info('writing new {}...'.format(remote_path)) put(filename, remote_path, use_sudo=True, mode=0644) else: _good('files already match') else: _good('no remote file exists, writing now') put(filename, remote_path, use_sudo=True, mode=0644) # remove files os.remove(old) if rm_file: os.remove(filename)
def crud_backup(): with cd(env.PROJECT.current): with prefix('source bin/activate'): run('python manage.py dumpdata crud.Activity2 crud.Organization2 ' + 'crud.Project2 crud.Investment2 --indent=4 --format=json ' + '> crud.json') get('crud.json', 'crud.json')
def build(): set_up_user("cpbuild") local("tar cpf workspace.tar --exclude workspace.tar ..") put("workspace.tar") put("build_cellprofiler.sh", "~", mode=0755) run("./build_cellprofiler.sh") get("cellprofiler.tar.gz", "cellprofiler.tar.gz")
def update_yaml(path, predicate, update, use_sudo=False): """ Load configuration from path and apply update. A yaml document at `path` is deserialized, and updated with the given `update` function if `predicate` holds. :param path: path to yaml document :type path: string :param predicate: function taking a python representation of the yaml document, and returns either `True` or `False` :type predicate: function :param update: function taking a python representation of the yaml document, and does in-place update """ doc_in = io.BytesIO() get(path, local_path=doc_in, use_sudo=use_sudo) doc_in.seek(0) doc = yaml.safe_load(doc_in) if predicate(doc): update(doc) doc_out = io.BytesIO() yaml.safe_dump(doc, stream=doc_out) doc_out.seek(0) put(doc_out, path, use_sudo=use_sudo)
def deploy(): with cd(remote_repo): sudo('git pull --ff-only origin master', user=remote_user) run('pip install -r requirements.txt') get('production/*.cfg', 'production/%(basename)s.cfg.last') put('production/*.cfg', 'production/', use_sudo=True) sudo('supervisorctl restart %s' % remote_proc)
def openvpn_download_visc(): """ Download OpenVPN configuration files for Viscosity """ hostname = prompt("Host name of the client:") if not exists('/root/easy-rsa/keys/%s.crt' % (hostname)): abort('Create client keys first with: openvpn_create_client') # set up a new directory to create our .visc configruation tmp_dir = '/tmp/%s' % (hostname + '.visc') if exists(tmp_dir): run('rm -fR %s' % (tmp_dir)) # vars for the configuration file client_conf = { "visc_name": hostname, "server": env.hosts[0] } # make tmp directory, copy required items into it run('mkdir %s' % (tmp_dir)) run('cp /etc/openvpn/ca.crt %s/ca.crt' % (tmp_dir)) run('cp /root/easy-rsa/keys/%s.crt %s/cert.crt' % (hostname, tmp_dir)) run('cp /root/easy-rsa/keys/%s.key %s/key.key' % (hostname, tmp_dir)) run('cp /etc/openvpn/ta.key %s/ta.key' % (tmp_dir)) upload_template('devbox_openvpn/configs/client.visc/config.conf', '%s/config.conf' % (tmp_dir), client_conf) run('chmod -R a+r %s' % (tmp_dir)) # download .vsic directory and then delete it from server get(tmp_dir, '.') run('rm -fR %s' % (tmp_dir))
def backup_php_site(server_name, site_name): """ legalsecretaryjournal_com TODO Only really need to backup everything in the 'images' and 'sites' folder. """ site_info = SiteInfo(server_name, site_name) backup_path = site_info.backup().get('path') print(green("Backup files on '{}'").format(env.host_string)) path = Path(site_name, 'files') print(yellow(path.remote_folder())) run('mkdir -p {0}'.format(path.remote_folder())) # remove '.gz' from end of tar file # tar_file = os.path.splitext(path.remote_file())[0] # with cd('/home/legalsec/legalsecretaryjournal.com/'): # first = True # for folder in path.php_folders(): # if first: # first = False # run('tar cvf {} {}'.format(tar_file, folder)) # else: # run('tar rvf {} {}'.format(tar_file, folder)) # run('gzip {}'.format(tar_file)) # # list the contents of the archive # run('tar ztvf {}'.format(path.remote_file())) with cd(backup_path): run('tar -cvzf {} .'.format(path.remote_file())) get(path.remote_file(), path.local_file())
def download(): """Download backup file locally""" """Run backup script on remote""" require('environment', provided_by=[stage]) get('%(application_tmp_path)s/%(application_backup_filename)s' % env['application'], '%(project_backup_path)s' % env['project'])
def download_data(): """Download Zope's Data.fs from the server.""" if not env.get('confirm'): confirm("This will destroy all current Zope data on your local machine. " \ "Are you sure you want to continue?") with cd('/home/%(prod_user)s/niteoweb.%(shortname)s/var' % env): ### Downlaod Data.fs ### # backup current Data.fs if os.path.exists('filestorage/Data.fs'): local('mv %(path)s/var/filestorage/Data.fs %(path)s/var/filestorage/Data.fs.bak' % env) # remove temporary Data.fs file from previous downloads if exists('/tmp/Data.fs', use_sudo=True): sudo('rm -rf /tmp/Data.fs') # downlaod Data.fs from server sudo('rsync -a filestorage/Data.fs /tmp/Data.fs') get('/tmp/Data.fs', '%(path)s/var/filestorage/Data.fs' % env) ### Download Blobs ### # backup current Blobs if os.path.exists('%(path)s/var/blobstorage' % env): local('mv %(path)s/var/blobstorage %(path)s/var/blobstorage_bak' % env) # remove temporary Blobs from previous downloads if exists('/tmp/blobstorage', use_sudo=True): sudo('rm -rf /tmp/blobstorage') # download Blobs from server -> use maintenance user for transfer sudo('rsync -a blobstorage /tmp/') sudo('chown -R %(user)s /tmp/blobstorage' % env) local('rsync -az %(user)s@%(server)s:/tmp/blobstorage %(path)s/var/' % env)
def configure_test_env(contrail_fab_path='/opt/contrail/utils', test_dir='/contrail-test'): """ Configure test environment by creating sanity_params.ini and sanity_testbed.json files """ print "Configuring test environment" sys.path.insert(0, contrail_fab_path) if hasattr(env, 'mytestbed'): tbd = __import__('fabfile.testbeds.%s' % env.mytestbed) testbed=eval('tbd.testbeds.' + getattr(env, 'mytestbed')) else: from fabfile.testbeds import testbed from fabfile.utils.host import get_openstack_internal_vip, \ get_control_host_string, get_authserver_ip, get_admin_tenant_name, \ get_authserver_port, get_env_passwords, get_authserver_credentials, \ get_vcenter_ip, get_vcenter_port, get_vcenter_username, \ get_vcenter_password, get_vcenter_datacenter, get_vcenter_compute, \ get_authserver_protocol, get_region_name, get_contrail_internal_vip, \ get_openstack_external_vip, get_contrail_external_vip, \ get_apiserver_protocol, get_apiserver_certfile, get_apiserver_keyfile, \ get_apiserver_cafile, get_keystone_insecure_flag, \ get_apiserver_insecure_flag, get_keystone_certfile, get_keystone_keyfile, \ get_keystone_cafile, get_keystone_version from fabfile.utils.multitenancy import get_mt_enable from fabfile.utils.interface import get_data_ip from fabfile.tasks.install import update_config_option, update_js_config from fabfile.utils.fabos import get_as_sudo logger = contrail_logging.getLogger(__name__) def validate_and_copy_file(filename, source_host): with settings(host_string='%s' %(source_host), warn_only=True, abort_on_prompts=False): if exists(filename): filedir = os.path.dirname(filename) if not os.path.exists(filedir): os.makedirs(filedir) get_as_sudo(filename, filename) return filename return "" cfgm_host = env.roledefs['cfgm'][0] auth_protocol = get_authserver_protocol() try: auth_server_ip = get_authserver_ip() except Exception: auth_server_ip = None auth_server_port = get_authserver_port() api_auth_protocol = get_apiserver_protocol() if api_auth_protocol == 'https': api_certfile = validate_and_copy_file(get_apiserver_certfile(), cfgm_host) api_keyfile = validate_and_copy_file(get_apiserver_keyfile(), cfgm_host) api_cafile = validate_and_copy_file(get_apiserver_cafile(), cfgm_host) api_insecure_flag = get_apiserver_insecure_flag() else: api_certfile = "" api_keyfile = "" api_cafile = "" api_insecure_flag = True cert_dir = os.path.dirname(api_certfile) if auth_protocol == 'https': keystone_cafile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_cafile()), cfgm_host) keystone_certfile = validate_and_copy_file(cert_dir + '/' +\ os.path.basename(get_keystone_certfile()), cfgm_host) keystone_keyfile = keystone_certfile keystone_insecure_flag = istrue(os.getenv('OS_INSECURE', \ get_keystone_insecure_flag())) else: keystone_certfile = "" keystone_keyfile = "" keystone_cafile = "" keystone_insecure_flag = True with settings(warn_only=True), hide('everything'): with lcd(contrail_fab_path): if local('git branch').succeeded: fab_revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): fab_revision = run('cat /opt/contrail/contrail_packages/VERSION') with lcd(test_dir): if local('git branch').succeeded: revision = local('git log --format="%H" -n 1', capture=True) else: with settings(host_string=cfgm_host), hide('everything'): revision = run('cat /opt/contrail/contrail_packages/VERSION') sanity_testbed_dict = { 'hosts': [], 'vgw': [], 'esxi_vms':[], 'vcenter_servers':[], 'hosts_ipmi': [], 'tor':[], 'sriov':[], 'dpdk':[], } sample_ini_file = test_dir + '/' + 'sanity_params.ini.sample' with open(sample_ini_file, 'r') as fd_sample_ini: contents_sample_ini = fd_sample_ini.read() sanity_ini_templ = string.Template(contents_sample_ini) if env.get('orchestrator', 'openstack') == 'openstack': with settings(host_string = env.roledefs['openstack'][0]), hide('everything'): openstack_host_name = run("hostname") with settings(host_string = env.roledefs['cfgm'][0]), hide('everything'): cfgm_host_name = run("hostname") control_host_names = [] for control_host in env.roledefs['control']: with settings(host_string = control_host), hide('everything'): host_name = run("hostname") control_host_names.append(host_name) cassandra_host_names = [] if 'database' in env.roledefs.keys(): for cassandra_host in env.roledefs['database']: with settings(host_string = cassandra_host), hide('everything'): host_name = run("hostname") cassandra_host_names.append(host_name) keystone_version = get_keystone_version() internal_vip = get_openstack_internal_vip() external_vip = get_openstack_external_vip() contrail_internal_vip = get_contrail_internal_vip() contrail_external_vip = get_contrail_external_vip() multi_role_test = False for host_string in env.roledefs['all']: if host_string in env.roledefs.get('test',[]): for role in env.roledefs.iterkeys(): if role in ['test','all']: continue if host_string in env.roledefs.get(role,[]): multi_role_test=True break if not multi_role_test: continue host_ip = host_string.split('@')[1] with settings(host_string = host_string), hide('everything'): try: host_name = run("hostname") host_fqname = run("hostname -f") except: logger.warn('Unable to login to %s'%host_ip) continue host_dict = {} host_dict['ip'] = host_ip host_dict['data-ip']= get_data_ip(host_string)[0] if host_dict['data-ip'] == host_string.split('@')[1]: host_dict['data-ip'] = get_data_ip(host_string)[0] host_dict['control-ip']= get_control_host_string(host_string).split('@')[1] host_dict['name'] = host_name host_dict['fqname'] = host_fqname host_dict['username'] = host_string.split('@')[0] host_dict['password'] =get_env_passwords(host_string) host_dict['roles'] = [] if env.get('qos', {}): if host_string in env.qos.keys(): role_dict = env.qos[host_string] host_dict['qos'] = role_dict if env.get('qos_niantic', {}): if host_string in env.qos_niantic.keys(): role_dict = env.qos_niantic[host_string] host_dict['qos_niantic'] = role_dict if host_string in env.roledefs['openstack']: role_dict = {'type': 'openstack', 'params': {'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if host_string in env.roledefs['cfgm']: role_dict = {'type': 'cfgm', 'params': {'collector': host_name, 'cassandra': ' '.join(cassandra_host_names)}} if env.get('orchestrator', 'openstack') == 'openstack': role_dict['openstack'] = openstack_host_name host_dict['roles'].append(role_dict) if host_string in env.roledefs['control']: role_dict = {'type': 'bgp', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} host_dict['roles'].append(role_dict) if 'database' in env.roledefs.keys() and host_string in env.roledefs['database']: role_dict = { 'type': 'database', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if host_string in env.roledefs['compute']: role_dict = {'type': 'compute', 'params': {'collector': cfgm_host_name, 'cfgm': cfgm_host_name}} role_dict['params']['bgp'] = [] if len(env.roledefs['control']) == 1: role_dict['params']['bgp'] = control_host_names else: for control_node in control_host_names: role_dict['params']['bgp'].append(control_node) # role_dict['params']['bgp'].extend(control_host_names[randrange(len(env.roledefs['control']))]) host_dict['roles'].append(role_dict) if 'collector' in env.roledefs.keys() and host_string in env.roledefs['collector']: role_dict = { 'type': 'collector', 'params': {'cassandra': ' '.join(cassandra_host_names)} } host_dict['roles'].append(role_dict) if 'webui' in env.roledefs.keys() and host_string in env.roledefs['webui']: role_dict = { 'type': 'webui', 'params': {'cfgm': cfgm_host_name} } host_dict['roles'].append(role_dict) sanity_testbed_dict['hosts'].append(host_dict) if env.has_key('vgw'): sanity_testbed_dict['vgw'].append(env.vgw) #get sriov info if env.has_key('sriov'): sanity_testbed_dict['sriov'].append(env.sriov) #get dpdk info if env.has_key('dpdk'): sanity_testbed_dict['dpdk'].append(env.dpdk) # Read ToR config sanity_tor_dict = {} if env.has_key('tor_agent'): sanity_testbed_dict['tor_agent'] = env.tor_agent # Read any tor-host config if env.has_key('tor_hosts'): sanity_testbed_dict['tor_hosts'] = env.tor_hosts if env.has_key('xmpp_auth_enable'): sanity_testbed_dict['xmpp_auth_enable'] = env.xmpp_auth_enable if env.has_key('xmpp_dns_auth_enable'): sanity_testbed_dict['xmpp_dns_auth_enable'] = env.xmpp_dns_auth_enable # Read any MX config (as physical_router ) if env.has_key('physical_routers'): sanity_testbed_dict['physical_routers'] = env.physical_routers esxi_hosts = getattr(testbed, 'esxi_hosts', None) if esxi_hosts: for esxi in esxi_hosts: host_dict = {} host_dict['ip'] = esxi_hosts[esxi]['ip'] host_dict['data-ip'] = host_dict['ip'] host_dict['control-ip'] = host_dict['ip'] host_dict['name'] = esxi host_dict['username'] = esxi_hosts[esxi]['username'] host_dict['password'] = esxi_hosts[esxi]['password'] #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'contrail_vm' in esxi_hosts[esxi]: host_dict['contrail_vm'] = esxi_hosts[esxi]['contrail_vm']['host'] host_dict['roles'] = [] sanity_testbed_dict['hosts'].append(host_dict) sanity_testbed_dict['esxi_vms'].append(host_dict) vcenter_servers = env.get('vcenter_servers') if vcenter_servers: for vcenter in vcenter_servers: host_dict = {} host_dict['server'] = vcenter_servers[vcenter]['server'] host_dict['port'] = vcenter_servers[vcenter]['port'] host_dict['username'] = vcenter_servers[vcenter]['username'] host_dict['password'] = vcenter_servers[vcenter]['password'] host_dict['datacenter'] = vcenter_servers[vcenter]['datacenter'] host_dict['auth'] = vcenter_servers[vcenter]['auth'] host_dict['cluster'] = vcenter_servers[vcenter]['cluster'] host_dict['dv_switch'] = vcenter_servers[vcenter]['dv_switch']['dv_switch_name'] #Mostly we do not use the below info for vcenter sanity tests. #Its used for vcenter only mode provosioning for contrail-vm #Its not needed for vcenter_gateway mode, hence might not be there in testbed.py if 'dv_port_group' in vcenter_servers[vcenter]: host_dict['dv_port_group'] = vcenter_servers[vcenter]['dv_port_group']['dv_portgroup_name'] sanity_testbed_dict['vcenter_servers'].append(host_dict) orch = getattr(env, 'orchestrator', 'openstack') #get other orchestrators (vcenter etc) info if any slave_orch = None if env.has_key('other_orchestrators'): sanity_testbed_dict['other_orchestrators'] = env.other_orchestrators for k,v in env.other_orchestrators.items(): if v['type'] == 'vcenter': slave_orch = 'vcenter' # get host ipmi list if env.has_key('hosts_ipmi'): sanity_testbed_dict['hosts_ipmi'].append(env.hosts_ipmi) if not getattr(env, 'test', None): env.test={} # generate json file and copy to cfgm sanity_testbed_json = json.dumps(sanity_testbed_dict) stack_user = env.test.get('stack_user', os.getenv('STACK_USER') or env.get('stack_user', '')) stack_password = env.test.get('stack_password', os.getenv('STACK_PASSWORD') or env.get('stack_password','')) stack_tenant = env.test.get('stack_tenant', os.getenv('STACK_TENANT') or env.get('stack_tenant', '')) stack_domain = env.test.get('stack_domain', os.getenv('STACK_DOMAIN') or env.get('stack_domain', '')) if not env.has_key('domain_isolation'): env.domain_isolation = False if not env.has_key('cloud_admin_domain'): env.cloud_admin_domain = 'Default' if not env.has_key('cloud_admin_user'): env.cloud_admin_user = '******' if not env.has_key('cloud_admin_password'): env.cloud_admin_password = env.get('openstack_admin_password') domain_isolation = env.test.get('domain_isolation', os.getenv('DOMAIN_ISOLATION') or env.domain_isolation) cloud_admin_domain = env.test.get('cloud_admin_domain', os.getenv('CLOUD_ADMIN_DOMAIN') or env.cloud_admin_domain) cloud_admin_user = env.test.get('cloud_admin_user', os.getenv('CLOUD_ADMIN_USER') or env.cloud_admin_user) cloud_admin_password = env.test.get('cloud_admin_password', os.getenv('CLOUD_ADMIN_PASSWORD') or env.cloud_admin_password) tenant_isolation = env.test.get('tenant_isolation', os.getenv('TENANT_ISOLATION') or '') stop_on_fail = env.get('stop_on_fail', False) mail_to = env.test.get('mail_to', os.getenv('MAIL_TO') or '') log_scenario = env.get('log_scenario', 'Sanity') stack_region_name = get_region_name() admin_user, admin_password = get_authserver_credentials() if orch == 'kubernetes': admin_tenant = 'default' else: admin_tenant = get_admin_tenant_name() # Few hardcoded variables for sanity environment # can be removed once we move to python3 and configparser webserver_host = env.test.get('webserver_host', os.getenv('WEBSERVER_HOST') or '') webserver_user = env.test.get('webserver_user', os.getenv('WEBSERVER_USER') or '') webserver_password = env.test.get('webserver_password', os.getenv('WEBSERVER_PASSWORD') or '') webserver_log_path = env.test.get('webserver_log_path', os.getenv('WEBSERVER_LOG_PATH') or '/var/www/contrail-test-ci/logs/') webserver_report_path = env.test.get('webserver_report_path', os.getenv('WEBSERVER_REPORT_PATH') or '/var/www/contrail-test-ci/reports/') webroot = env.test.get('webroot', os.getenv('WEBROOT') or 'contrail-test-ci') mail_server = env.test.get('mail_server', os.getenv('MAIL_SERVER') or '') mail_port = env.test.get('mail_port', os.getenv('MAIL_PORT') or '25') fip_pool_name = env.test.get('fip_pool_name', os.getenv('FIP_POOL_NAME') or 'floating-ip-pool') public_virtual_network=env.test.get('public_virtual_network', os.getenv('PUBLIC_VIRTUAL_NETWORK') or 'public') public_tenant_name=env.test.get('public_tenant_name', os.getenv('PUBLIC_TENANT_NAME') or 'admin') fixture_cleanup = env.test.get('fixture_cleanup', os.getenv('FIXTURE_CLEANUP') or 'yes') generate_html_report = env.test.get('generate_html_report', os.getenv('GENERATE_HTML_REPORT') or 'True') keypair_name = env.test.get('keypair_name', os.getenv('KEYPAIR_NAME') or 'contrail_key') mail_sender = env.test.get('mail_sender', os.getenv('MAIL_SENDER') or '*****@*****.**') discovery_ip = env.test.get('discovery_ip', os.getenv('DISCOVERY_IP') or '') config_api_ip = env.test.get('config_api_ip', os.getenv('CONFIG_API_IP') or '') analytics_api_ip = env.test.get('analytics_api_ip', os.getenv('ANALYTICS_API_IP') or '') discovery_port = env.test.get('discovery_port', os.getenv('DISCOVERY_PORT') or '') config_api_port = env.test.get('config_api_port', os.getenv('CONFIG_API_PORT') or '') analytics_api_port = env.test.get('analytics_api_port', os.getenv('ANALYTICS_API_PORT') or '') control_port = env.test.get('control_port', os.getenv('CONTROL_PORT') or '') dns_port = env.test.get('dns_port', os.getenv('DNS_PORT') or '') agent_port = env.test.get('agent_port', os.getenv('AGENT_PORT') or '') user_isolation = env.test.get('user_isolation', (os.getenv('USER_ISOLATION')) or False if stack_user else True) neutron_username = env.test.get('neutron_username', os.getenv('NEUTRON_USERNAME') or None) availability_zone = env.test.get('availability_zone', os.getenv('AVAILABILITY_ZONE') or None) ci_flavor = env.test.get('ci_flavor', os.getenv('CI_FLAVOR') or None) kube_config_file = env.test.get('kube_config_file', '/etc/kubernetes/admin.conf') use_devicemanager_for_md5 = getattr(testbed, 'use_devicemanager_for_md5', False) router_asn = getattr(testbed, 'router_asn', '') public_vn_rtgt = getattr(testbed, 'public_vn_rtgt', '') public_vn_subnet = getattr(testbed, 'public_vn_subnet', '') ext_routers = getattr(testbed, 'ext_routers', '') router_info = str(ext_routers) test_verify_on_setup = getattr(env, 'test_verify_on_setup', True) webui = getattr(testbed, 'webui', False) horizon = getattr(testbed, 'horizon', False) ui_config = getattr(testbed, 'ui_config', False) ui_browser = getattr(testbed, 'ui_browser', False) if not env.has_key('openstack'): env.openstack = {} if not env.has_key('cfgm'): env.cfgm = {} config_amqp_ip = env.openstack.get('amqp_host', '') if config_amqp_ip: config_amqp_ips = [config_amqp_ip] else: config_amqp_ips = [] # If amqp details are in env.cfgm as well, use that config_amqp_port = env.cfgm.get('amqp_port', '5672') config_amqp_ips = env.cfgm.get('amqp_hosts', config_amqp_ips) key_filename = env.get('key_filename', '') pubkey_filename = env.get('pubkey_filename', '') vcenter_dc = '' if orch == 'vcenter' or slave_orch== 'vcenter': public_tenant_name='vCenter' if env.has_key('vcenter_servers'): if env.vcenter_servers: for k in env.vcenter_servers: vcenter_dc = env.vcenter_servers[k]['datacenter'] #global controller gc_host_mgmt = getattr(testbed, 'gc_host_mgmt', '') gc_host_control_data = getattr(testbed, 'gc_host_control_data', '') gc_user_name = getattr(testbed, 'gc_user_name', '') gc_user_pwd = getattr(testbed, 'gc_user_pwd', '') keystone_password = getattr(testbed, 'keystone_password', '') sanity_params = sanity_ini_templ.safe_substitute( {'__testbed_json_file__' : 'sanity_testbed.json', '__keystone_version__' : keystone_version, '__nova_keypair_name__' : keypair_name, '__orch__' : orch, '__admin_user__' : admin_user, '__admin_password__' : admin_password, '__admin_tenant__' : admin_tenant, '__domain_isolation__' : domain_isolation, '__cloud_admin_domain__' : cloud_admin_domain, '__cloud_admin_user__' : cloud_admin_user, '__cloud_admin_password__': cloud_admin_password, '__tenant_isolation__' : tenant_isolation, '__stack_user__' : stack_user, '__stack_password__' : stack_password, '__auth_ip__' : auth_server_ip, '__auth_port__' : auth_server_port, '__auth_protocol__' : auth_protocol, '__stack_region_name__' : stack_region_name, '__stack_tenant__' : stack_tenant, '__stack_domain__' : stack_domain, '__multi_tenancy__' : get_mt_enable(), '__address_family__' : get_address_family(), '__log_scenario__' : log_scenario, '__generate_html_report__': generate_html_report, '__fixture_cleanup__' : fixture_cleanup, '__key_filename__' : key_filename, '__pubkey_filename__' : pubkey_filename, '__webserver__' : webserver_host, '__webserver_user__' : webserver_user, '__webserver_password__' : webserver_password, '__webserver_log_dir__' : webserver_log_path, '__webserver_report_dir__': webserver_report_path, '__webroot__' : webroot, '__mail_server__' : mail_server, '__mail_port__' : mail_port, '__sender_mail_id__' : mail_sender, '__receiver_mail_id__' : mail_to, '__http_proxy__' : env.get('http_proxy', ''), '__ui_browser__' : ui_browser, '__ui_config__' : ui_config, '__horizon__' : horizon, '__webui__' : webui, '__devstack__' : False, '__public_vn_rtgt__' : public_vn_rtgt, '__router_asn__' : router_asn, '__router_name_ip_tuples__': router_info, '__public_vn_name__' : fip_pool_name, '__public_virtual_network__':public_virtual_network, '__public_tenant_name__' :public_tenant_name, '__public_vn_subnet__' : public_vn_subnet, '__test_revision__' : revision, '__fab_revision__' : fab_revision, '__test_verify_on_setup__': test_verify_on_setup, '__stop_on_fail__' : stop_on_fail, '__ha_setup__' : getattr(testbed, 'ha_setup', ''), '__ipmi_username__' : getattr(testbed, 'ipmi_username', ''), '__ipmi_password__' : getattr(testbed, 'ipmi_password', ''), '__contrail_internal_vip__' : contrail_internal_vip, '__contrail_external_vip__' : contrail_external_vip, '__internal_vip__' : internal_vip, '__external_vip__' : external_vip, '__vcenter_dc__' : vcenter_dc, '__vcenter_server__' : get_vcenter_ip(), '__vcenter_port__' : get_vcenter_port(), '__vcenter_username__' : get_vcenter_username(), '__vcenter_password__' : get_vcenter_password(), '__vcenter_datacenter__' : get_vcenter_datacenter(), '__vcenter_compute__' : get_vcenter_compute(), '__use_devicemanager_for_md5__' : use_devicemanager_for_md5, '__discovery_port__' : discovery_port, '__config_api_port__' : config_api_port, '__analytics_api_port__' : analytics_api_port, '__control_port__' : control_port, '__dns_port__' : dns_port, '__vrouter_agent_port__' : agent_port, '__discovery_ip__' : discovery_ip, '__config_api_ip__' : config_api_ip, '__analytics_api_ip__' : analytics_api_ip, '__user_isolation__' : user_isolation, '__neutron_username__' : neutron_username, '__availability_zone__' : availability_zone, '__ci_flavor__' : ci_flavor, '__config_amqp_ips__' : ','.join(config_amqp_ips), '__config_amqp_port__' : config_amqp_port, '__api_auth_protocol__' : api_auth_protocol, '__api_certfile__' : api_certfile, '__api_keyfile__' : api_keyfile, '__api_cafile__' : api_cafile, '__api_insecure_flag__' : api_insecure_flag, '__keystone_certfile__' : keystone_certfile, '__keystone_keyfile__' : keystone_keyfile, '__keystone_cafile__' : keystone_cafile, '__keystone_insecure_flag__': keystone_insecure_flag, '__gc_host_mgmt__' : gc_host_mgmt, '__gc_host_control_data__': gc_host_control_data, '__gc_user_name__' : gc_user_name, '__gc_user_pwd__' : gc_user_pwd, '__keystone_password__' : keystone_password, }) ini_file = test_dir + '/' + 'sanity_params.ini' testbed_json_file = test_dir + '/' + 'sanity_testbed.json' with open(ini_file, 'w') as ini: ini.write(sanity_params) with open(testbed_json_file,'w') as tb: tb.write(sanity_testbed_json) # Create /etc/contrail/openstackrc if not os.path.exists('/etc/contrail'): os.makedirs('/etc/contrail') keycertbundle = None if keystone_cafile and keystone_keyfile and keystone_certfile: bundle = '/tmp/keystonecertbundle.pem' certs = [keystone_certfile, keystone_keyfile, keystone_cafile] keycertbundle = utils.getCertKeyCaBundle(bundle, certs) with open('/etc/contrail/openstackrc','w') as rc: rc.write("export OS_USERNAME=%s\n" % admin_user) rc.write("export OS_PASSWORD=%s\n" % admin_password) rc.write("export OS_TENANT_NAME=%s\n" % admin_tenant) rc.write("export OS_REGION_NAME=%s\n" % stack_region_name) rc.write("export OS_AUTH_URL=%s://%s:%s/v2.0\n" % (auth_protocol, auth_server_ip, auth_server_port)) rc.write("export OS_CACERT=%s\n" % keycertbundle) rc.write("export OS_CERT=%s\n" % keystone_certfile) rc.write("export OS_KEY=%s\n" % keystone_keyfile) rc.write("export OS_INSECURE=%s\n" % keystone_insecure_flag) rc.write("export OS_NO_CACHE=1\n") # Write vnc_api_lib.ini - this is required for vnc_api to connect to keystone config = ConfigParser.ConfigParser() config.optionxform = str vnc_api_ini = '/etc/contrail/vnc_api_lib.ini' if os.path.exists(vnc_api_ini): config.read(vnc_api_ini) if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','AUTHN_TYPE', 'keystone') config.set('auth','AUTHN_PROTOCOL', auth_protocol) config.set('auth','AUTHN_SERVER', auth_server_ip) config.set('auth','AUTHN_PORT', auth_server_port) if keystone_version == 'v3': config.set('auth','AUTHN_URL', '/v3/auth/tokens') else: config.set('auth','AUTHN_URL', '/v2.0/tokens') if api_auth_protocol == 'https': if 'global' not in config.sections(): config.add_section('global') config.set('global','certfile', api_certfile) config.set('global','cafile', api_cafile) config.set('global','keyfile', api_keyfile) config.set('global','insecure',api_insecure_flag) if auth_protocol == 'https': if 'auth' not in config.sections(): config.add_section('auth') config.set('auth','certfile', keystone_certfile) config.set('auth','cafile', keystone_cafile) config.set('auth','keyfile', keystone_keyfile) config.set('auth','insecure', keystone_insecure_flag) with open(vnc_api_ini,'w') as f: config.write(f) # For now, assume first config node is same as kubernetes master node # Get kube config file to the testrunner node if orch == 'kubernetes': if not os.path.exists(kube_config_file): dir_name = os.path.dirname(kube_config_file) if not os.path.exists(dir_name): os.makedirs(dir_name) with settings(host_string = env.roledefs['cfgm'][0]): get(kube_config_file, kube_config_file) # If webui = True, in testbed, setup webui for sanity if webui: update_config_option('openstack', '/etc/keystone/keystone.conf', 'token', 'expiration', '86400','keystone') container = None if 'contrail-controller' in env.roledefs: container = 'webui' update_js_config('openstack', '/etc/contrail/config.global.js', 'contrail-webui', container=container)
def execute(self): return get(self._remote_path, self._local_path)
def read_remote_file(path): ''' Read remote file contents. ''' fd = StringIO() get(path, fd) return fd.getvalue()
def task_get_logs(backup_path, log_src_dir, file_list): # 备份日志 for file in file_list: log_file = log_src_dir + '/' + file current_file = os.path.join(backup_path, file) get(log_file, current_file)
def run_magma_benchmark(self, cmd: str, stats_file: str): logger.info('Running magma benchmark cmd: {}'.format(cmd)) stdout = run(cmd) logger.info(stdout) get('{}'.format(stats_file), local_path="./")
def get_manifest(self): logger.info('Getting manifest from host node') get("{}/manifest.xml".format(self.CB_DIR), local_path="./")
def fetch_sql_dump(): """ Gets the latest mysql.sql dump for your site from WPEngine. """ print(colors.cyan("Fetching sql dump. This may take a while...")) get('wp-content/mysql.sql', 'mysql.sql')
def backup_db(stage, database, name): """ Backups and pg db and restores it locally (fab settings:stage=staging,database=db_1 backup_db) """ # Generate Filename timestamp = current_milli_time() backup_file = "{databaseName}-{stage}-snapshot-{timestamp}".format( databaseName=name, stage=stage, timestamp=timestamp) # Generate local Backup Folder local( 'mkdir -p {backupFolder}'.format(backupFolder=database['backup_dir'])) # Remote Backup Folder _run('mkdir -p /tmp/backups/database') # Backup Command backup_command = " ".join( map(str, [ "PGPASSWORD={remotePassword}".format( remotePassword=database['remote_password']), "pg_dump", "-p {port}".format(port=database['remote_port']), "-h {host}".format(host=database['remote_host']), "-U {user}".format(user=database['remote_user']), "-F c -b -v", "-f /backups/{backup_file}".format(backup_file=backup_file), "{databaseName}".format(databaseName=database['remote_database']) ])) # Docker Backup Command command = " ".join( map(str, [ "docker", "run", "-v /tmp/backups/database:/backups", "-it", database['image'], "sh", "-c", "\"{backup_command}\"".format(backup_command=backup_command) ])) # Run Command _run(command) # Get the Backup if stage is not 'local': get( '/tmp/backups/database/{backup_file}'.format( backup_file=backup_file), database['backup_dir']) # Restore the local database if console.confirm( "Do you want to replace your local '{databaseName}' databases". format(databaseName=database['local_database'])): local("dropdb -U {user} {databaseName}".format( user=database['local_user'], databaseName=database['local_database'])) local("createdb -U {user} {databaseName}".format( user=database['local_user'], databaseName=database['local_database'])) restore_command = " ".join( map(str, [ "PGPASSWORD={remotePassword}".format( remotePassword=database['local_password']), "pg_restore", "-p {port}".format(port=database['local_port']), "-U {user}".format(user=database['local_user']), "-d {databaseName}".format( databaseName=database['local_database']), "-v {backupFolder}/{backup_file}".format( backupFolder=database['backup_dir'], backup_file=backup_file) ])) local(restore_command)
def get_celery_logs(self, worker_home: str): logger.info('Collecting remote Celery logs') with cd(worker_home), cd('perfrunner'): r = run('stat worker_*.log', quiet=True) if not r.return_code: get('worker_*.log', local_path='celery/')
def get_ip(): run("ifconfig | grep \"inet addr\" > myip.txt") get('myip.txt')
def get_mongo_dump(): run('cd %(remote_path)s; mongodump' % {'remote_path': remote_path}) run('cd %(remote_path)s; tar -cvf dump.tar.gz dump' % {'remote_path': remote_path}) get('%s/dump.tar.gz' % remote_path, local_path)
def get_export_files(self, worker_home: str): logger.info('Collecting YCSB export files') with cd(worker_home), cd('perfrunner'): r = run('stat YCSB/ycsb_run_*.log', quiet=True) if not r.return_code: get('YCSB/ycsb_run_*.log', local_path='YCSB/')
def copyMavenJarsToReleaseDir(releaseDir, version): #The .jars and upload file must be in a directory called voltdb - it is the projectname mavenProjectDir = releaseDir + "/mavenjars/voltdb" if not os.path.exists(mavenProjectDir): os.makedirs(mavenProjectDir) #Get the upload.gradle file get("%s/voltdb/tools/kit_tools/upload.gradle" % (builddir), "%s/upload.gradle" % (mavenProjectDir)) #Get the voltdbclient-n.n.jar from the recently built community build get( "%s/voltdb/obj/release/dist-client-java/voltdb/voltdbclient-%s.jar" % (builddir, version), "%s/voltdbclient-%s.jar" % (mavenProjectDir, version)) #Get the client's src and javadoc .jar files get( "%s/voltdb/obj/release/voltdbclient-%s-javadoc.jar" % (builddir, version), "%s/voltdbclient-%s-javadoc.jar" % (mavenProjectDir, version)) get( "%s/voltdb/obj/release/voltdbclient-%s-sources.jar" % (builddir, version), "%s/voltdbclient-%s-sources.jar" % (mavenProjectDir, version)) #Get the voltdb-n.n.jar from the recently built community build get("%s/voltdb/voltdb/voltdb-%s.jar" % (builddir, version), "%s/voltdb-%s.jar" % (mavenProjectDir, version)) #Get the server's src and javadoc .jar files get("%s/voltdb/obj/release/voltdb-%s-javadoc.jar" % (builddir, version), "%s/voltdb-%s-javadoc.jar" % (mavenProjectDir, version)) get("%s/voltdb/obj/release/voltdb-%s-sources.jar" % (builddir, version), "%s/voltdb-%s-sources.jar" % (mavenProjectDir, version))
def get_remote_task(log_dir, task_file): """Retrieves remote task definition from the host""" get(os.path.join(DEFAULT_REMOTE_TASK_DIR, task_file), log_dir)
# The nosetests command to run the integration tests NOSETESTS_COMMAND = 'cd pulp-automation && nosetests -vs --with-xunit --nologcapture' # Setup the CLI description = 'Run integration tests using a deployed environment by deploy-environment.py' parser = argparse.ArgumentParser(description=description) parser.add_argument( '--config', help='path to the configuration file produced by deploy-environment.py', required=True) parser.add_argument( '--tests-destination', help='the location to place the nosetests.xml file on completion') args = parser.parse_args() config = config_utils.load_config(args.config) flattened_config = config_utils.flatten_structure(config) tester_config = filter( lambda conf: conf[setup_utils.ROLE] == setup_utils.PULP_TESTER_ROLE, flattened_config)[0] with settings(host_string=tester_config[setup_utils.HOST_STRING], key_file=tester_config[setup_utils.PRIVATE_KEY]): test_result = run(NOSETESTS_COMMAND, warn_only=True) get('pulp-automation/nosetests.xml', args.tests_destination or tester_config['tests_destination']) sys.exit(test_result.return_code)
def get_trees(langs='all'): chefdata_dir = os.path.join(CHEF_DATA_DIR, 'chefdata') trees_dir = os.path.join(CHEF_DATA_DIR, 'chefdata', 'trees') local_dir = os.path.join('chefdata', 'vader', 'trees') if langs == 'all': langs = PRADIGI_WEBSITE_LANGUAGES # crawling trees for lang in langs: web_resource_tree_filename = CRAWLING_STAGE_OUTPUT_TMPL.format(lang) get(os.path.join(trees_dir, web_resource_tree_filename), os.path.join(local_dir, web_resource_tree_filename)) # website games get(os.path.join(trees_dir, WEBSITE_GAMES_JSON_FILENAME), os.path.join(local_dir, WEBSITE_GAMES_JSON_FILENAME)) # structure structure_filename = STUCTURE_CACHE_FILENAME get(os.path.join(chefdata_dir, structure_filename), os.path.join(local_dir, structure_filename)) english_structure_filename = ENGLISH_STUCTURE_CACHE_FILENAME get(os.path.join(chefdata_dir, english_structure_filename), os.path.join(local_dir, english_structure_filename)) # corrections corrections_filename = CORRECTIONS_CACHE_FILENAME get(os.path.join(chefdata_dir, corrections_filename), os.path.join(local_dir, corrections_filename)) # ricecooker tree ricecooker_json_tree_filename = SCRAPING_STAGE_OUTPUT get(os.path.join(trees_dir, ricecooker_json_tree_filename), os.path.join(local_dir, ricecooker_json_tree_filename))
def test_get_returns_none_for_stringio(self): """ get() should return None if local_path is a StringIO """ with hide('everything'): eq_([], get('/file.txt', StringIO()))
def copyTrialLicenseToReleaseDir(releaseDir): get("%s/pro/trial_*.xml" % (builddir), "%s/license.xml" % (releaseDir))
def init_config(): """ Configures the etc file and environment variables. Also sets up tc and routing table on the sender. """ run('sudo service ntp start', quiet=True) #run('service iptables start', quiet=True) run('sudo sed -i -e \'s/*\/10/*\/1/g\' /etc/cron.d/sysstat', quiet=True) run('sudo rm /var/log/sa/*', quiet=True) run('sudo service cron start', quiet=True) run('sudo service sysstat start', quiet=True) iface = run('hostname -I | awk \'{print $1}\'') if iface == '10.10.1.13': put('~/Workspace/std_data/1h_std', '/home/cc', mode=0664) config_str = ('MULTICAST ANY 224.0.0.1:38800 1 10.10.1.13\n' 'ALLOW ANY ^.*$\nEXEC \"insert.sh\"' '\nEXEC \"cpu_mon.sh\"\nEXEC \"tc_mon.sh\"') run('route add 224.0.0.1 dev %s' % IFACE_NAME, quiet=True) run('tc qdisc del dev %s root' % IFACE_NAME, quiet=True) run('tc qdisc add dev %s root handle 1: htb default 2' % IFACE_NAME, quiet=True) run('tc class add dev %s parent 1: classid 1:1 htb rate %smbit \ ceil %smbit' % (IFACE_NAME, str(TC_RATE), str(TC_RATE)), quiet=True) run('tc qdisc add dev %s parent 1:1 handle 10: bfifo limit %sb' % (IFACE_NAME, '600m'), quiet=True) run('tc class add dev %s parent 1: classid 1:2 htb rate %smbit \ ceil %smbit' % (IFACE_NAME, str(TC_RATE), str(TC_RATE)), quiet=True) run('tc qdisc add dev %s parent 1:2 handle 11: bfifo limit %sb' % (IFACE_NAME, '600m'), quiet=True) run('tc filter add dev %s protocol ip parent 1:0 prio 1 u32 match \ ip dst 224.0.0.1/32 flowid 1:1' % IFACE_NAME, quiet=True) run('tc filter add dev %s protocol ip parent 1:0 prio 1 u32 match \ ip dst 0/0 flowid 1:2' % IFACE_NAME, quiet=True) #with cd('/home/cc'): # run('git clone \ # https://github.com/shawnsschen/LDM6-LDM7-comparison.git', # quiet=True) run('regutil -s 5G /queue/size', quiet=True) else: iface = run('hostname -I | awk \'{print $2}\'') config_str = 'RECEIVE ANY 192.168.2.1:38800 ' + iface run('regutil -s 2G /queue/size', quiet=True) #patch_sysctl() fd = StringIO() get('/home/cc/.bashrc', fd) content = fd.getvalue() if 'ulimit -c "unlimited"' in content: update_bashrc = True else: update_bashrc = False get('/home/cc/.bash_profile', fd) content = fd.getvalue() if 'export PATH=$PATH:$HOME/util' in content: update_profile = True else: update_profile = False with settings(sudo_user='******'): with cd('/home/cc'): run('echo \'%s\' > etc/ldmd.conf' % config_str) if not update_bashrc: run('echo \'ulimit -c "unlimited"\' >> .bashrc') if not update_profile: run('echo \'export PATH=$PATH:$HOME/util\' >> .bash_profile') run('regutil -s %s /hostname' % iface) #sudo('regutil -s 5G /queue/size') run('regutil -s 35000 /queue/slots')
def download(): env.password = input_raw() get("/home/*", "/home/logos/hehe")
def prepare_namelist(environ, **kwargs): ''' Read atmos namelist and update variables from environ as needed Used vars: name atmos_namelist TRC LV dt_atmos start restart finish rootexp workdir Depends on: None ''' input_file = StringIO() get(fmt('{agcm_namelist[file]}', environ), input_file) data = nml_decode(input_file.getvalue()) input_file.close() output = StringIO() try: tkeys = set(environ['atmos_namelist']['vars'].keys()) & set( data.keys()) except KeyError: pass else: for k in tkeys: keys = set(environ['atmos_namelist']['vars'][k].keys()) & set( data[k].keys()) data[k].update([(ke, environ['atmos_namelist']['vars'][k][ke]) for ke in keys]) trunc = "%04d" % environ['TRC'] lev = "%03d" % environ['LV'] data['MODEL_RES']['trunc'] = "%04d" % environ['TRC'] data['MODEL_RES']['vert'] = environ['LV'] data['MODEL_RES']['dt'] = environ['dt_atmos'] data['MODEL_RES']['IDATEI'] = format_atmos_date(environ['start']) data['MODEL_RES']['IDATEW'] = format_atmos_date(environ['restart']) data['MODEL_RES']['IDATEF'] = format_atmos_date(environ['finish']) data['MODEL_RES']['DHEXT'] = environ.get('DHEXT', 0) if environ.get('DHEXT', 0) != 0: begin = datetime.strptime(environ['restart'], "%Y%m%d%H") end = datetime.strptime(environ['finish'], "%Y%m%d%H") nhext = total_seconds(end - begin) / 3600 else: nhext = 0 data['MODEL_RES']['NHEXT'] = nhext # TODO: is this environ['agcm_model_inputs'] ? data['MODEL_RES']['path_in'] = fmt('{rootexp}/AGCM-1.0/model/datain', environ) data['MODEL_RES']['dirfNameOutput'] = (fmt( '{workdir}/model/dataout/TQ%sL%s' % (trunc, lev), environ)) output.write( yaml2nml(data, key_order=[ 'MODEL_RES', 'MODEL_IN', 'PHYSPROC', 'PHYSCS', 'COMCON' ])) # HACK: sigh, this is needed to run atmos post processing, even if we # don't use these forecasts. output.write(""" 17 6.0 12.0 18.0 24.0 30.0 36.0 42.0 48.0 54.0 60.0 66.0 72.0 84.0 96.0 120.0 144.0 168.0 """) put(output, fmt('{workdir}/MODELIN', environ)) output.close()
def _invalid_file_obj_situations(self, remote_path): with settings(hide('running'), warn_only=True): get(remote_path, StringIO()) assert_contains('is a glob or directory', sys.stderr.getvalue())
def try_get(self, remote_path, local_path=None): try: get(remote_path, local_path) except: logger.warn("Exception calling get({}, {}). Ignoring.".format( remote_path, local_path))
def build_test_package(branch='master'): """Create a test package and publish it in our repo. :param branch: The name of the branch to build from. Defaults to 'master'. :type branch: str To run e.g.:: fab -H <host:port> build_test_package or to package up a specific branch (in this case minimum_needs) fab -H <host:port> build_test_package:minimum_needs .. note:: Using the branch option will not work for branches older than 1.1 """ base_path, code_path, git_url, repo_alias, site_name = get_vars() show_environment() update_git_checkout(env.repo_path, env.git_url, env.repo_alias, branch) initialise_qgis_plugin_repo() fabtools.require.deb.packages(['zip', 'make', 'gettext']) with cd(env.code_path): # Get git version and write it to a text file in case we need to cross # reference it for a user ticket. sha = run('git rev-parse HEAD > git_revision.txt') fastprint('Git revision: %s' % sha) get('metadata.txt', '/tmp/metadata.txt') metadata_file = file('/tmp/metadata.txt') metadata_text = metadata_file.readlines() metadata_file.close() for line in metadata_text: line = line.rstrip() if 'version=' in line: plugin_version = line.replace('version=', '') if 'status=' in line: line.replace('status=', '') # noinspection PyUnboundLocalVariable run('scripts/release.sh %s' % plugin_version) package_name = '%s.%s.zip' % ('inasafe', plugin_version) source = '/tmp/%s' % package_name fastprint(blue('Source: %s\n' % source)) run('cp %s %s' % (source, env.plugin_repo_path)) source = os.path.join('scripts', 'test-build-repo', 'plugins.xml') plugins_xml = os.path.join(env.plugin_repo_path, 'plugins.xml') fastprint(blue('Source: %s\n' % source)) run('cp %s %s' % (source, plugins_xml)) sed(plugins_xml, '\[VERSION\]', plugin_version) sed(plugins_xml, '\[FILE_NAME\]', package_name) sed(plugins_xml, '\[URL\]', 'http://%s/%s' % (env.repo_site_name, package_name)) sed(plugins_xml, '\[DATE\]', str(datetime.now())) fastprint('Add http://%s/plugins.xml to QGIS plugin manager to use.' % env.repo_site_name)
def backup_db(): date = datetime.datetime.now().strftime("%Y-%m-%d_%H%M") dump_name = "%s-geosurvey.dump" % date run("pg_dump geosurvey -n public -c -f /tmp/%s -Fc -O -no-acl -U postgres" % dump_name) get("/tmp/%s" % dump_name, "backups/%s" % dump_name)
def get(self, *args, **kwargs): return fabric_api.get(*args, hosts=self.hostname, **kwargs)
def get_last_release(): fd = StringIO() get(last_release_file, fd) return fd.getvalue()
def collect_cbq_logs(self): logger.info('Getting cbq-engine logs') get('/tmp/cbq.log')
def letsencrypt_configure(reconfigure_nginx=True): require('code_dir') domains = set() # Collect all the domains that need a certificate with cd(env.code_dir): # construct a configparser object config = ConfigParser.ConfigParser() for filename in get_config_repo_paths('letsencrypt'): buf = StringIO() # Add the actual config file data to the buffer get(filename, buf) # Here we prepend a section header to the in-memory buffer. This # allows us to easily read the letsencrypt config file using stdlib configparser # # see: http://stackoverflow.com/questions/2819696/parsing-properties-file-in-python/25493615#25493615 buf = StringIO('[DEFAULT]\n' + buf.getvalue()) # read config from buf config.readfp(buf) # get domains from the config file for domain in config.get('DEFAULT', 'domains').split(','): domains.add(domain.strip()) # Create a temporary nginx config file temporary_nginx_conf = """ server { listen 80; server_name %(domains)s; location /.well-known/acme-challenge/ { root /etc/letsencrypt/www; break; } } """ % { "domains": " ".join(domains), } # Notify the user that the dns MUST be configured for all the domains as of this point print(" ") print( colors.blue( 'Preparing to request certificate using letsencrypt. The DNS for ' 'following domains MUST be configured to point to the remote host: %s' % " ".join(domains))) if not confirm(colors.yellow("Is the dns configured? (see above)")): abort('Deployment aborted.') # Upload it to the app nginx config path put(local_path=StringIO(temporary_nginx_conf), remote_path=get_nginx_app_target_path(), use_sudo=True) # Reload nginx sudo('docker exec nginx nginx -s reload') # use letsencrypt_update to obtain the certificate letsencrypt_update(dry_run=True) # restore nginx config if requested if reconfigure_nginx: nginx_update()
def get_current_release(): fd = StringIO() get(current_release_file, fd) return fd.getvalue()