def test_add_vehicle(curl, apikey): local_file = 'files/vehicle.json' remote_file = '/tmp/testing_uwsgi_vehicle' put(local_file, remote_file) header, data = curl('/vehicles/', apikey, 'POST', '@{}'.format(remote_file)) except_id_added(open(local_file).read(), data) return json.loads(data)['data'][0]['id']
def start_nagios(): ''' Starts Nagios on VM and sets up Nagios as upstart job. ''' put('./commands.cfg','/usr/local/nagios/etc/objects/commands.cfg', use_sudo=True) sudo('ln -sf /etc/init.d/nagios /etc/rcS.d/S99nagios') sudo('/etc/init.d/nagios start')
def config_haproxy(debug=False): if debug: put('config/debug_haproxy.conf', '/etc/haproxy/haproxy.cfg', use_sudo=True) else: put(os.path.join(env.SECRETS_PATH, 'configs/haproxy.conf'), '/etc/haproxy/haproxy.cfg', use_sudo=True) sudo('/etc/init.d/haproxy reload')
def test_add_taxi(curl, apikey): remote_file = '/tmp/taxi.json' put('files/taxi.json', remote_file) header, data = curl('/taxis/', apikey, 'POST', '@{}'.format(remote_file)) expected = open('files/taxi_expected.json').read() except_id_added(expected, data) return json.loads(data)['data'][0]['id']
def copy(app_name): """ Copy the application directory up to the servers directly without any VCS. """ config['remote_path'] = os.path.join(REMOTE_BASE_DIR, config['name']) config['release_path'] = os.path.join(config['remote_path'], 'releases') config['shared_path'] = os.path.join(config['remote_path'], 'shared') dir_ensure(config['remote_path']) dir_ensure(config['release_path']) dir_ensure(config['shared_path']) releases = map(int, run('ls ' + config['release_path']).split()) releases.sort() current = (releases[-1] if len(releases) else 0) new_release_path = os.path.join(config['release_path'], str(current + 1)) dir_ensure(new_release_path) put(os.path.join(config['local_path'], '*'), new_release_path) dir_ensure(os.path.join(new_release_path, 'tmp')) file_link(new_release_path, os.path.join(config['remote_path'], 'current')) releases.append(current + 1) if len(releases) > env.max_releases: for old_release in releases[:(len(releases) - env.max_releases)]: dir_remove(os.path.join(config['release_path'], str(old_release)))
def push(): """Pushes the current folder to the remote machine's Shiny apps folder""" require('hosts', provided_by=[remote]) sudo('mkdir -p /www-shiny') sudo('chown -R ubuntu:ubuntu /www-shiny') sudo('rm -rf /www-shiny/*') put('./', '/www-shiny/')
def copy_certificates(): cert_path = '%s/config/certificates/' % env.NEWSBLUR_PATH run('mkdir -p %s' % cert_path) put(os.path.join(env.SECRETS_PATH, 'certificates/newszeit.com.crt'), cert_path) put(os.path.join(env.SECRETS_PATH, 'certificates/newszeit.com.key'), cert_path) run('cat %s/newszeit.com.crt > %s/newszeit.pem' % (cert_path, cert_path)) run('cat %s/newszeit.com.key >> %s/newszeit.pem' % (cert_path, cert_path))
def deploy(): env.release = datetime.datetime.now().strftime('%Y%m%d%H%M%S') run('mkdir -p {path}/releases {path}/packages'.format(**env)) local('git archive --format=tar master | gzip > {release}.tar.gz'.format(**env)) put('{release}.tar.gz'.format(**env), '{path}/packages/'.format(**env)) local('rm -vf {release}.tar.gz'.format(**env)) with cd(env.path): run('mkdir -p releases/{release}'.format(**env)) with cd('releases/{release}'.format(**env)): run('tar xvf ../../packages/{release}.tar.gz'.format(**env)) run('ln -sf {dbpath} grouphugs.db'.format(**env)) with cd('{path}/releases'.format(**env)): with settings(warn_only=True): run('rm previous') run('mv current previous') run('ln -sf {release} current'.format(**env)) put('settings.py', '{path}/releases/{release}/settings.py'.format(**env)) restart()
def do(): with open(config, 'r') as ip: config_data = json.load(ip) dag_class = config_data['dag'] # push the toast config to the remote machine toast_config_worker_path = os.path.join( eggo_config.get('worker_env', 'work_path'), build_dest_filename(config)) put(local_path=config, remote_path=toast_config_worker_path) # TODO: run on central scheduler instead toast_cmd = ('toaster.py --local-scheduler {clazz} ' '--ToastConfig-config {toast_config}'.format( clazz=dag_class, toast_config=toast_config_worker_path)) hadoop_bin = os.path.join(eggo_config.get('worker_env', 'hadoop_home'), 'bin') toast_env = {'EGGO_HOME': eggo_config.get('worker_env', 'eggo_home'), # toaster.py imports eggo_config, which needs EGGO_HOME on worker 'EGGO_CONFIG': eggo_config.get('worker_env', 'eggo_config_path'), # bc toaster.py imports eggo_config which must be init on the worker 'LUIGI_CONFIG_PATH': eggo_config.get('worker_env', 'luigi_config_path'), 'AWS_ACCESS_KEY_ID': eggo_config.get('aws', 'aws_access_key_id'), # bc dataset dnload pushes data to S3 TODO: should only be added if the dfs is S3 'AWS_SECRET_ACCESS_KEY': eggo_config.get('aws', 'aws_secret_access_key'), # TODO: should only be added if the dfs is S3 'SPARK_HOME': eggo_config.get('worker_env', 'spark_home')} if exec_ctx == 'local': # this should copy vars that maintain venv info env_copy = os.environ.copy() env_copy.update(toast_env) toast_env = env_copy with path(hadoop_bin): with shell_env(**toast_env): wrun(toast_cmd)
def setenv(envfile, restart=True): """ Upload environment variables to the target server(s). """ with cd('govtrack.us-web'): put(envfile, '.env') if restart: restart_webserver()
def setup(): """Prepares one or more servers for deployment""" run("mkdir -p %(domain_path)s" % { 'domain_path':env.domain_path }) run("mkdir -p %(domain_path)s/etc" % { 'domain_path':env.domain_path }) put("%(env_file)s" % { 'env_file':env.env_file }, "%(domain_path)s/etc" % { 'domain_path':env.domain_path }) run("mkdir -p %(releases_path)s" % { 'releases_path':env.releases_path }) run("mkdir -p %(current)s" % { 'current':env.current_path })
def uploadeggs(): """Release developer eggs and send to host """ hostout = api.env['hostout'] #need to send package. cycledown servers, install it, run buildout, cycle up servers dl = hostout.getDownloadCache() with api.hide('running', 'stdout', 'stderr'): contents = api.run('ls %s/dist' % dl).split() for pkg in hostout.localEggs(): name = os.path.basename(pkg) if name not in contents: tmp = os.path.join('/tmp', name) api.put(pkg, tmp) api.run("mv -f %(tmp)s %(tgt)s && " "chown %(buildout)s %(tgt)s && " "chmod a+r %(tgt)s" % dict( tmp = tmp, tgt = os.path.join(dl, 'dist', name), buildout=api.env.hostout.options['buildout-user'], )) # Ensure there is no local pinned.cfg so we don't clobber it # Now upload pinned.cfg. pinned = "[buildout]\ndevelop=\nauto-checkout=\n[versions]\n"+hostout.packages.developVersions() tmp = tempfile.NamedTemporaryFile() tmp.write(pinned) tmp.flush() api.put(tmp.name, api.env.path+'/pinned.cfg') tmp.close()
def buildout(*args): """ Run the buildout on the remote server """ hostout = api.env.hostout hostout_file=hostout.getHostoutFile() #upload generated cfg with hostout versions hostout.getHostoutPackage() # we need this work out releaseid filename = "%s-%s.cfg" % (hostout.name, hostout.releaseid) with cd(api.env.path): tmp = tempfile.NamedTemporaryFile() tmp.write(hostout_file) tmp.flush() api.put(tmp.name, api.env.path+'/'+filename) tmp.close() #if no pinned.cfg then upload empty one if not contrib.files.exists('pinned.cfg'): pinned = "[buildout]" contrib.files.append(pinned, 'pinned.cfg') #run generated buildout # api.run('%s bin/buildout -c %s -t 1900 %s' % (proxy_cmd(), filename, ' '.join(args))) api.run('%s bin/buildout -c %s %s' % (proxy_cmd(), filename, ' '.join(args))) # Update the var dir permissions to add group write api.run("find var -exec chmod g+w {} \; || true")
def publish(): f = os.listdir('dist')[-1] put('dist/%s' % f, '/tmp/%s' % f) sudo('pip install --no-deps -U /tmp/%s' % f) sudo('touch /var/log/alfred.log') sudo('restart alfred') sudo('uname -a')
def setup_minion(*roles): """Setup a minion server with a set of roles.""" require('environment') for r in roles: if r not in VALID_ROLES: abort('%s is not a valid server role for this project.' % r) config = { 'master': 'localhost' if env.master == env.host else env.master, 'output': 'mixed', 'grains': { 'environment': env.environment, 'roles': list(roles), }, 'mine_functions': { 'network.interfaces': [], 'network.ip_addrs': [] }, } _, path = tempfile.mkstemp() with open(path, 'w') as f: yaml.dump(config, f, default_flow_style=False) sudo("mkdir -p /etc/salt") put(local_path=path, remote_path="/etc/salt/minion", use_sudo=True) # install salt minion if it's not there already install_salt(SALT_VERSION, master=False, minion=True, restart=True) # queries server for its fully qualified domain name to get minion id key_name = run('python -c "import socket; print socket.getfqdn()"') execute(accept_key, key_name)
def enable_celerybeat(): with cd(env.NEWSBLUR_PATH): run('mkdir -p data') put('config/supervisor_celerybeat.conf', '/etc/supervisor/conf.d/celerybeat.conf', use_sudo=True) put('config/supervisor_celeryd_beat.conf', '/etc/supervisor/conf.d/celeryd_beat.conf', use_sudo=True) sudo('supervisorctl reread') sudo('supervisorctl update')
def postgres_db_create(dbuser, dbname, password): """ Create a Psql Database: db_create:dbuser,dbname,password Example: db_create:username,databasename,password """ env.user=ADMIN_USER prod_settings_file = local('find . | grep settings/production.py', capture=True) temp_prod_settings_file = prod_settings_file.replace('/p', '/_p') local('sed "s/\$PROD_USER/%s/g;s/\$PROD_PASS/%s/g" %s > %s' % (dbuser, password, prod_settings_file, temp_prod_settings_file)) local('mv %s %s' % (temp_prod_settings_file, prod_settings_file)) local('git add %s' % (prod_settings_file)) local('git commit -m "envkit: set database config"') sudo('psql template1 -c "CREATE USER %s WITH CREATEDB ENCRYPTED PASSWORD \'%s\'"' % (dbuser, password), user='******') sudo('createdb "%s" -O "%s"' % (dbname, dbuser), user='******') sudo('psql %s -c "CREATE EXTENSION unaccent;"' % dbname, user='******') sudo('rm -f /etc/postgresql/9.1/main/postgresql.conf') put('env/postgresql.conf', '/etc/postgresql/9.1/main/postgresql.conf', use_sudo=True) sudo('service postgresql restart')
def update_minion_config(): """Updates minion's config and restarts ``salt-minion`` service.""" put('salt/minion.conf', '/etc/salt/minion', use_sudo=True, mode=0600) sudo('chown root:root /etc/salt/minion') sudo('service salt-minion restart')
def deploySh(filepath=ScriptPath): # split file name seperately path, filename=os.path.split(filepath) authorize() put(filepath, '~/'+filename, mode=775) out = run('~/'+filename) writeFile('output', out)
def put_template(template_name, remote_path, dictionary=None, **kwargs): d = dictionary or {} d.update({ 'env': env }) s = render_to_string(get_templates(template_name), d) put(StringIO(s), remote_path, **kwargs)
def install_docker(): check_valid_os() print(":: Installing Docker on {}".format(env.host_string)) ver = run("cat /etc/lsb-release | grep DISTRIB_RELEASE | cut -d '=' -f2") reboot_needed = False sudo("apt-get update") sudo('sh -c "echo deb http://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"') sudo('sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"') # extras if ver == "12.04": sudo("apt-get install -y linux-image-generic-lts-raring linux-headers-generic-lts-raring") print("* You will need to reboot in order to use the new kernel and aufs module") reboot_needed = True else: sudo("apt-get install -y linux-image-extra-`uname -r`") sudo("apt-get update") # docker sudo("apt-get install -y lxc-docker git-core") sudo('echo "net.ipv4.ip_forward=1" >> /etc/sysctl.conf ; sysctl -p /etc/sysctl.conf') # check ufw sudo("sed -i 's/^DEFAULT_FORWARD_POLICY.*/DEFAULT_FORWARD_POLICY=\"ACCEPT\"/g' /etc/default/ufw") sudo("service ufw restart") # set to listen on local addr local_ip = get_local_ip() with open(".tmpcfg", "w") as f: f.write('DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://127.0.0.1:4243"') put(".tmpcfg", "/etc/default/docker", use_sudo=True) os.remove(".tmpcfg") sudo("service docker restart") if reboot_needed: print("Setup complete. Rebooting...") reboot(wait=60)
def config_sentry(): print("Configuring sentry...", end="\t") try: put("../conf/sentry/sentry.conf.py", "%s/conf/" % env.dir) print_succeed() except AbortException as e: print_fail(e)
def deploy(): local("rm -rf dist") local("python setup.py bdist_egg") sudo("rm -rf /tmp/GetCM.egg") put("dist/GetCM-*-py*.egg", "/tmp/GetCM.egg") sudo("easy_install /tmp/GetCM.egg") sudo("supervisorctl restart cmbalance")
def install_debconf_seeds(): _install_packages("debconf-utils") for seed_file in conf.DEBCONF_SEEDS: directory, sep, seed_filename = seed_file.rpartition('/') print(green('Installing seed: %s' % seed_filename)) put(seed_file, '/tmp/%s' % seed_filename) sudo('debconf-set-selections /tmp/%s' % seed_filename)
def user(name): if not exists('/home/%s' % name): sudo('adduser %s' % name) run('mkdir -p /home/%s/.ssh' % name) put('files/id_rsa', '/home/%s/.ssh' % name) run('chmod 600 /home/%s/.ssh/id_rsa' % name) run('chown -R %s:%s /home/%s' % (name, name, name))
def secure_system(): sudo("apt-get install -y fail2ban") sudo("cp /etc/fail2ban/jail.{conf,local}") # !!! create user with password "simplepassword1234". Please, change it at first login!!! sudo('useradd -g admin -s /bin/bash -p "pacHXCdIdvdUw" -m %s' % admin_user_name) sudo("mkdir /home/%s/.ssh" % admin_user_name) sudo("chmod 700 /home/%s/.ssh" % admin_user_name) sudo('echo "%s" >> /home/%s/.ssh/authorized_keys' % (ssh_key, admin_user_name)) sudo("chmod 400 /home/%s/.ssh/authorized_keys" % admin_user_name) sudo("chown %s:admin /home/%s -R" % (admin_user_name, admin_user_name)) # config sshd sudo("sed -i'.old' 's/^PermitEmptyPasswords [Yy]es/PermitEmptyPasswords no/' /etc/ssh/sshd_config") sudo("sed -i'.old' 's/^PermitRootLogin [Yy]es/PermitRootLogin no/' /etc/ssh/sshd_config") with settings(warn_only=True): if run('cat /etc/ssh/sshd_config | grep -e "^PasswordAuthentication [Yy]es"').failed: sudo("echo 'PasswordAuthentication no' >> /etc/ssh/sshd_config") else: sudo("sed -i'.old' 's/^PasswordAuthentication [Yy]es/PasswordAuthentication no/' /etc/ssh/sshd_config") sudo("echo 'AllowUsers vagrant admin' >> /etc/ssh/sshd_config") # config iptables sudo("mkdir /etc/iptables") put("configs/rules", "/etc/iptables", use_sudo=True) sudo('echo "#!/bin/sh" >> /etc/network/if-pre-up.d/iptables') sudo('echo "iptables-restore < /etc/iptables/rules" >> /etc/network/if-pre-up.d/iptables') sudo("chmod +x /etc/network/if-pre-up.d/iptables")
def upload_config(upload_location, local_file, values, rename=None, user="******", group=None, permissions="644"): """ Creates a backup of the original file on the server, fills in the given template and then uploads it to the desired location. """ if rename is not None: external_file = rename else: external_file = local_file if group is None: group = user # Create and upload a configuration file sudo("mv %s/%s %s/%s.backup" % (upload_location, external_file, upload_location, external_file), warn_only=True) template = template_env.get_template(local_file) config_file = template.render(values) with open("tmp/%s" % external_file, "wb") as fh: fh.write(config_file) put("tmp/%s" % external_file, "~") sudo("mv ~/%s %s" % (external_file, upload_location), warn_only=True) sudo("chown %s:%s %s/%s" % (user, group, upload_location, external_file)) sudo("chmod %s %s/%s" % (permissions, upload_location, external_file)) # remove the temp file if needed if ds.remove_temp_files: local("rm tmp/%s" % external_file)
def deploy(archive=None, name='radar'): if archive is None: archive = os.path.join('dist', sorted(os.listdir('dist'), key=parse_version)[-1]) with temp(): put(archive, 'radar.tar.gz') run('tar --strip-components=1 -xzf radar.tar.gz') version = str(run('cat VERSION')) current_version = '/srv/{name}/current'.format(name=name) new_version = '/srv/{name}/{version}'.format(name=name, version=version) run('rm -rf {0}'.format(new_version)) run('./install.sh {0}'.format(new_version)) run('ln -sfn {0} {1}'.format(new_version, current_version)) services = [ 'radar-admin', 'radar-api', 'radar-ukrdc-exporter-celery', 'radar-ukrdc-importer-api', 'radar-ukrdc-importer-celery', ] # Restart services # TODO replace with try-reload-or-restart when available in our version of systemd for service in services: run('if systemctl is-active {0} >/dev/null; then systemctl reload-or-restart {0}; fi'.format(service))
def _install_site(): "Add the virtualhost file to apache" print(green("\n#####Installing the virtual host file to apache\n")) virtual_host_text = virtual_host % tuple([env.domain] * 8) put(StringIO.StringIO(virtual_host_text), "/tmp/%s" % env.domain) sudo('cp /tmp/%s /etc/apache2/sites-available/' % env.domain) sudo('a2ensite %s' % env.domain)
def bootstrap_simple_manager_blueprint(self): self.manager_blueprints_repo_dir = clone(MANAGER_BLUEPRINTS_REPO_URL, self.workdir) self.test_manager_blueprint_path = \ os.path.join(self.manager_blueprints_repo_dir, 'new', 'simple-manager-blueprint.yaml') self.bootstrap_inputs = { 'public_ip': self.public_ip_address, 'private_ip': self.private_ip_address, 'ssh_user': self.env.centos_7_image_user, 'ssh_key_filename': self.inputs['key_pair_path'], 'agents_user': self.env.centos_7_image_user, 'resources_prefix': '' } # preparing inputs file for bootstrap self.test_inputs_path = \ self.cfy._get_inputs_in_temp_file(self.bootstrap_inputs, self._testMethodName) self._bootstrap() self._running_env_setup(self.public_ip_address) self.logger.info('Uploading key file to manager...') with fabric_api.settings(host_string=self.public_ip_address, user=self.env.centos_7_image_user, key_filename=self.inputs['key_pair_path']): fabric_api.put(self.inputs['key_pair_path'], self.remote_manager_key_path)
def fio(self, config): logger.info('Running fio job: {}'.format(config)) filename = os.path.basename(config) remote_path = os.path.join('/tmp', filename) put(config, remote_path) return run('fio --minimal {}'.format(remote_path))
def upload(local_path, remote_path): ''' Upload one or more files to a remote host. ''' return put(local_path, remote_path)
def setup_solr(): """ Perform Solr-related provisioning tasks on the target OS. """ require('solr_download_link', 'solr_install_dir', 'solr_home', 'tomcat_user', 'local_repo_root', 'catalina_home', provided_by=('staging', 'production')) with hide('commands'): setup_tomcat() # add Tomcat's user to the ``solr`` group fastprint("Addding `tomcat' user to group `solr'...", show_prefix=True) sudo('adduser %(tomcat_user)s solr' % env) fastprint(" done." % env, end='\n') # sanity check if files.exists(env.solr_home): warn("Directory %(solr_home)s already exists" % env) ## Sorl installation procedure with hide('commands'): with cd('/tmp'): # download Solr distribution fastprint("Downloading Solr distribution...", show_prefix=True) run('wget %(solr_download_link)s' % env) fastprint(" done." % env, end='\n') fastprint("Unpacking Solr distribution...", show_prefix=True) tarball = env.solr_download_link.split('/')[-1] dist_dir = os.path.splitext(tarball)[0] # extract compressed archive containing Solr distribution run('tar xzvf %s' % tarball) fastprint(" done." % env, end='\n') ## copy Solr distribution in place fastprint("Installing Solr...", show_prefix=True) if files.exists(os.path.join('/home', dist_dir)): if dist_dir.startswith('apache-solr'): # safety check sudo('rm -rf %s' % os.path.join('/home', dist_dir)) sudo('mv %(dist_dir)s /home/' % {'dist_dir': dist_dir}) # adjust permissions sudo('chown -R solr:solr %(solr_install_dir)s' % env) fastprint(" done." % env, end='\n') # cleanup fastprint("Cleaning up...", show_prefix=True) run('rm -f %s' % tarball) fastprint(" done." % env, end='\n') ## initial setup for Solr with cd(env.solr_home): # create general filesystem layout fastprint("Creating `solr/cores' dir...", show_prefix=True) run('mkdir -p cores') fastprint(" done." % env, end='\n') fastprint("Creating `solr/data' dir...", show_prefix=True) run('mkdir -p data') # Solr data dir must be writable by Tomcat sudo('chmod 2770 %s' % os.path.join(env.solr_home, 'data')) fastprint(" done." % env, end='\n') # upload a context file (needed by Tomcat) with lcd(os.path.join(env.local_repo_root, 'solr')): execute(update_solr_context) fastprint("Publishing Solr context to Tomcat...", show_prefix=True) # publish application described in ``context.xml`` to Tomcat # by dropping a symlink under Tomcat's context configuration dir ln_src = os.path.join(env.solr_home, 'context.xml') ln_dest = os.path.join(env.catalina_home, 'localhost', 'solr.xml') with settings(warn_only=True): sudo('rm -f %s' % ln_dest) sudo('ln -s %s %s' % (ln_src, ln_dest)) fastprint(" done." % env, end='\n') fastprint("Uploading skeleton configuration file for cores...", show_prefix=True) # upload a skeleton file for Solr cores' configuration src = 'solr.xml' dest = os.path.join('cores', 'solr.xml') put(src, dest, mode=0644) fastprint(" done." % env, end='\n') # Tomcat must be restarted in order for the changes to take effect restart_tomcat()
def copy(src, dest): put(src, dest)
def run_script(script_path, fabric_env=None, process=None, **kwargs): if not process: process = {} process = _create_process_config(process, kwargs) base_dir = process.get('base_dir', DEFAULT_BASE_DIR) ctx_server_port = process.get('ctx_server_port') proxy_client_path = proxy_client.__file__ if proxy_client_path.endswith('.pyc'): proxy_client_path = proxy_client_path[:-1] local_ctx_sh_path = os.path.join(_get_bin_dir(), 'ctx-sh') local_ctx_py_path = os.path.join( os.path.dirname(cloudify.ctx_wrappers.__file__), 'ctx-py.py') local_script_path = get_script(ctx.download_resource, script_path) base_script_path = os.path.basename(local_script_path) remote_ctx_dir = base_dir remote_ctx_path = '{0}/ctx'.format(remote_ctx_dir) remote_ctx_sh_path = '{0}/ctx-sh'.format(remote_ctx_dir) remote_ctx_py_path = '{0}/cloudify.py'.format(remote_ctx_dir) remote_scripts_dir = '{0}/scripts'.format(remote_ctx_dir) remote_work_dir = '{0}/work'.format(remote_ctx_dir) remote_path_suffix = '{0}-{1}'.format(base_script_path, utils.id_generator(size=8)) remote_env_script_path = '{0}/env-{1}'.format(remote_scripts_dir, remote_path_suffix) remote_script_path = '{0}/{1}'.format(remote_scripts_dir, remote_path_suffix) env = process.get('env', {}) cwd = process.get('cwd', remote_work_dir) args = process.get('args') command_prefix = process.get('command_prefix') command = remote_script_path if command_prefix: command = '{0} {1}'.format(command_prefix, command) if args: command = ' '.join([command] + args) with fabric_api.settings(**_fabric_env(fabric_env, warn_only=False)): # the remote host must have ctx and any related files before # running any fabric scripts if not fabric_files.exists(remote_ctx_path): # there may be race conditions with other operations that # may be running in parallel, so we pass -p to make sure # we get 0 exit code if the directory already exists fabric_api.run('mkdir -p {0}'.format(remote_scripts_dir)) fabric_api.run('mkdir -p {0}'.format(remote_work_dir)) # this file has to be present before using ctx fabric_api.put(local_ctx_sh_path, remote_ctx_sh_path) fabric_api.put(proxy_client_path, remote_ctx_path) fabric_api.put(local_ctx_py_path, remote_ctx_py_path) actual_ctx = ctx._get_current_object() actual_ctx.is_script_exception_defined = ScriptException is not None def abort_operation(message=None): if actual_ctx._return_value is not None: actual_ctx._return_value = ILLEGAL_CTX_OPERATION_ERROR raise actual_ctx._return_value if actual_ctx.is_script_exception_defined: actual_ctx._return_value = ScriptException(message) else: actual_ctx._return_value = UNSUPPORTED_SCRIPT_FEATURE_ERROR raise actual_ctx return actual_ctx._return_value def retry_operation(message=None, retry_after=None): if actual_ctx._return_value is not None: actual_ctx._return_value = ILLEGAL_CTX_OPERATION_ERROR raise actual_ctx._return_value actual_ctx.operation.retry(message=message, retry_after=retry_after) if actual_ctx.is_script_exception_defined: actual_ctx._return_value = ScriptException(message, retry=True) else: actual_ctx._return_value = UNSUPPORTED_SCRIPT_FEATURE_ERROR raise actual_ctx._return_value return actual_ctx._return_value actual_ctx.abort_operation = abort_operation actual_ctx.retry_operation = retry_operation def returns(_value): if actual_ctx._return_value is not None: actual_ctx._return_value = ILLEGAL_CTX_OPERATION_ERROR raise actual_ctx._return_value actual_ctx._return_value = _value actual_ctx.returns = returns actual_ctx._return_value = None original_download_resource = actual_ctx.download_resource def download_resource(resource_path, target_path=None): local_target_path = original_download_resource(resource_path) return fabric_put_in_remote_path(local_target_path, target_path) actual_ctx.download_resource = download_resource original_download_resource_and_render = \ actual_ctx.download_resource_and_render def download_resource_and_render(resource_path, target_path=None, template_variables=None): local_target_path = original_download_resource_and_render( resource_path, template_variables=template_variables) return fabric_put_in_remote_path(local_target_path, target_path) actual_ctx.download_resource_and_render = download_resource_and_render def fabric_put_in_remote_path(local_target_path, target_path): if target_path: remote_target_path = target_path else: remote_target_path = '{0}/{1}'.format( remote_work_dir, os.path.basename(local_target_path)) fabric_api.put(local_target_path, remote_target_path) return remote_target_path def handle_script_result(script_result): if (actual_ctx.is_script_exception_defined and isinstance(script_result, ScriptException)): if script_result.retry: return script_result else: raise NonRecoverableError(str(script_result)) # this happens when more than 1 ctx operation is invoked or # the plugin runs an unsupported feature on older Cloudify elif isinstance(script_result, RuntimeError): raise NonRecoverableError(str(script_result)) # determine if this code runs during exception handling current_exception = sys.exc_info()[1] if current_exception: raise else: return script_result env_script = StringIO() env['PATH'] = '{0}:$PATH'.format(remote_ctx_dir) env['PYTHONPATH'] = '{0}:$PYTHONPATH'.format(remote_ctx_dir) env_script.write('chmod +x {0}\n'.format(remote_script_path)) env_script.write('chmod +x {0}\n'.format(remote_ctx_path)) fabric_api.put(local_script_path, remote_script_path) proxy = proxy_server.HTTPCtxProxy(actual_ctx, port=ctx_server_port) try: with fabric_context.cd(cwd): local_port = proxy.port with tunnel.remote(local_port=local_port) as remote_port: env[CTX_SOCKET_URL] = proxy.socket_url.replace( str(local_port), str(remote_port)) env['LOCAL_{0}'.format(CTX_SOCKET_URL)] = proxy.socket_url for key, value in env.iteritems(): env_script.write('export {0}={1}\n'.format(key, value)) fabric_api.put(env_script, remote_env_script_path) # invoke sys.exc_clear() because handle_script_result # invokes sys.exc_info() sys.exc_clear() try: fabric_api.run('source {0} && {1}'.format( remote_env_script_path, command)) except FabricTaskError: return handle_script_result(actual_ctx._return_value) return handle_script_result(actual_ctx._return_value) finally: proxy.close()
def assimilate_instance(instance, config, ssh_key, instance_data, deploypass, chroot="", reboot=True): """Assimilate hostname into our collective What this means is that hostname will be set up with some basic things like a script to grab AWS user data, and get it talking to puppet (which is specified in said config). """ def run_chroot(cmd, *args, **kwargs): if chroot: run("chroot {} {}".format(chroot, cmd), *args, **kwargs) else: run(cmd, *args, **kwargs) distro = config.get('distro', '') if distro in ('debian', 'ubuntu'): ubuntu_release = config.get("release", "precise") if distro.startswith('win'): return assimilate_windows(instance, config, instance_data) setup_fabric_env(instance=instance, key_filename=ssh_key) # Sanity check run("date") # Set our hostname hostname = "{hostname}".format(**instance_data) log.info("Bootstrapping %s...", hostname) run_chroot("hostname %s" % hostname) if distro in ('ubuntu', 'debian'): run("echo {hostname} > {chroot}/etc/hostname".format(hostname=hostname, chroot=chroot)) # Resize the file systems # We do this because the AMI image usually has a smaller filesystem than # the instance has. if 'device_map' in config: for device, mapping in config['device_map'].items(): if not mapping.get("skip_resize"): run('resize2fs {dev}'.format(dev=mapping['instance_dev'])) # Set up /etc/hosts to talk to 'puppet' hosts = [ '127.0.0.1 %s localhost' % hostname, '::1 localhost6.localdomain6 localhost6' ] hosts = StringIO.StringIO("\n".join(hosts) + "\n") put(hosts, "{}/etc/hosts".format(chroot)) if distro in ('ubuntu', 'debian'): put('%s/releng-public-%s.list' % (AMI_CONFIGS_DIR, ubuntu_release), '{}/etc/apt/sources.list'.format(chroot)) run_chroot("apt-get update") run_chroot("apt-get install -y --allow-unauthenticated " "puppet cloud-init wget") run_chroot("apt-get clean") else: # Set up yum repos run('rm -f {}/etc/yum.repos.d/*'.format(chroot)) put('%s/releng-public.repo' % AMI_CONFIGS_DIR, '{}/etc/yum.repos.d/releng-public.repo'.format(chroot)) run_chroot('yum clean all') run_chroot('yum install -q -y puppet cloud-init wget') run_chroot("wget -O /root/puppetize.sh " "https://hg.mozilla.org/build/puppet/" "raw-file/production/modules/puppet/files/puppetize.sh") run_chroot("chmod 755 /root/puppetize.sh") put(StringIO.StringIO(deploypass), "{}/root/deploypass".format(chroot)) put(StringIO.StringIO("exit 0\n"), "{}/root/post-puppetize-hook.sh".format(chroot)) puppet_master = pick_puppet_master(instance_data["puppet_masters"]) log.info("Puppetizing %s against %s; this may take a while...", hostname, puppet_master) # export PUPPET_EXTRA_OPTIONS to pass extra parameters to puppet agent if os.environ.get("PUPPET_EXTRA_OPTIONS"): puppet_extra_options = "PUPPET_EXTRA_OPTIONS=%s" % \ pipes.quote(os.environ["PUPPET_EXTRA_OPTIONS"]) # in case we pass --environment, make sure we use proper puppet masters puppet_master = pick_puppet_master(instance_data["dev_puppet_masters"]) else: puppet_extra_options = "" run_chroot("env PUPPET_SERVER=%s %s /root/puppetize.sh" % (puppet_master, puppet_extra_options)) if "buildslave_password" in instance_data: # Set up a stub buildbot.tac run_chroot( "sudo -u cltbld /tools/buildbot/bin/buildslave create-slave " "/builds/slave {buildbot_master} {name} " "{buildslave_password}".format(**instance_data)) run("sync") run("sync") if reboot: log.info("Rebooting %s...", hostname) run("reboot")
def setup_x509(self): logger.info('Setting up x.509 certificates') put("certificates/inbox", "/opt/couchbase/var/lib/couchbase/") run('chmod a+x /opt/couchbase/var/lib/couchbase/inbox/chain.pem') run('chmod a+x /opt/couchbase/var/lib/couchbase/inbox/pkey.key')
def put_file(): put('foo.txt', '/tmp/')
def upload_client(): put("client.py")
def put_dir(): put('bar', '/tmp/')
def upload_hosts(): put("hosts.txt")
def upload_server(): put("server.py")
def publish_docs(): target = '/var/www/elsdoerfer/files/docs/django-assets' run('rm -rf %s' % target) run('mkdir %s' % target) put('build/sphinx/html/*', '%s' % target)
def upload_neighbours(): """ Upload the neighbours.txt file after condense.py has been run """ put(env.host_string + "/neighbours.txt")
def test(): run("mkdir -p /tmp/nginx-confs") put("", "")
def configure(): res = run(cmd("rpm", "--eval", "%configure --enable-arpae-tests")) put(BytesIO(b"\n".join(res.stdout.splitlines())), "rpm-config") run(cmd("chmod", "0755", "rpm-config")) run(cmd("./rpm-config"))
def update_settings(source_folder="/home/ubuntu/cafi"): settings_path = source_folder + '/backend/settings/local.py' put('./local.py', settings_path) sed(settings_path, "Users/yangm/cafi/project", "home/ubuntu/cafi")
def file(path=None, contents=None, source=None, url=None, md5=None, use_sudo=False, owner=None, group='', mode=None, verify_remote=True, temp_dir='/tmp'): """ Require a file to exist and have specific contents and properties. You can provide either: - *contents*: the required contents of the file:: from fabtools import require require.file('/tmp/hello.txt', contents='Hello, world') - *source*: the local path of a file to upload:: from fabtools import require require.file('/tmp/hello.txt', source='files/hello.txt') - *url*: the URL of a file to download (*path* is then optional):: from fabric.api import cd from fabtools import require with cd('tmp'): require.file(url='http://example.com/files/hello.txt') If *verify_remote* is ``True`` (the default), then an MD5 comparison will be used to check whether the remote file is the same as the source. If this is ``False``, the file will be assumed to be the same if it is present. This is useful for very large files, where generating an MD5 sum may take a while. When providing either the *contents* or the *source* parameter, Fabric's ``put`` function will be used to upload the file to the remote host. When ``use_sudo`` is ``True``, the file will first be uploaded to a temporary directory, then moved to its final location. The default temporary directory is ``/tmp``, but can be overridden with the *temp_dir* parameter. If *temp_dir* is an empty string, then the user's home directory will be used. If `use_sudo` is `True`, then the remote file will be owned by root, and its mode will reflect root's default *umask*. The optional *owner*, *group* and *mode* parameters can be used to override these properties. .. note:: This function can be accessed directly from the ``fabtools.require`` module for convenience. """ func = use_sudo and run_as_root or run # 1) Only a path is given if path and not (contents or source or url): assert path if not is_file(path): func('touch "%(path)s"' % locals()) # 2) A URL is specified (path is optional) elif url: if not path: path = os.path.basename(urlparse(url).path) if not is_file(path) or md5 and md5sum(path) != md5: func('wget --progress=dot:mega %(url)s -O %(path)s' % locals()) # 3) A local filename, or a content string, is specified else: if source: assert not contents t = None else: fd, source = mkstemp() t = os.fdopen(fd, 'w') t.write(contents) t.close() if verify_remote: # Avoid reading the whole file into memory at once digest = hashlib.md5() f = open(source, 'rb') try: while True: d = f.read(BLOCKSIZE) if not d: break digest.update(d) finally: f.close() else: digest = None if (not is_file(path, use_sudo=use_sudo) or (verify_remote and md5sum(path, use_sudo=use_sudo) != digest.hexdigest())): with settings(hide('running')): put(source, path, use_sudo=use_sudo, temp_dir=temp_dir) if t is not None: os.unlink(source) # Ensure correct owner if use_sudo and owner is None: owner = 'root' if (owner and _owner(path, use_sudo) != owner) or \ (group and _group(path, use_sudo) != group): func('chown %(owner)s:%(group)s "%(path)s"' % locals()) # Ensure correct mode if use_sudo and mode is None: mode = oct(0o666 & ~int(umask(use_sudo=True), base=8)) if mode and _mode(path, use_sudo) != mode: func('chmod %(mode)s "%(path)s"' % locals())
def install_webstack(): script = """ # Install Apache sudo yum -y install httpd # Modify IP Tables sudo /sbin/iptables -I INPUT -p tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT sudo /sbin/iptables -I OUTPUT -p tcp --sport 80 -m state --state ESTABLISHED -j ACCEPT sudo /sbin/iptables -I INPUT -p tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT sudo /sbin/iptables -I OUTPUT -p tcp --sport 443 -m state --state ESTABLISHED -j ACCEPT sudo /etc/init.d/iptables save sudo yum -y install mod_ssl sudo yum -y install subversion sudo /etc/init.d/httpd stop # Install the EPEL repository # Currently, this just gives us access to a precompiled mod_wsgi. sudo rpm -V epel-release-5-3 || rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm """ run_multiline_script(script) script = """ # set up Apache modules sudo mkdir -p /etc/httpd/conf.d/disabled sudo [ ! -f /etc/httpd/conf.d/php.conf ] || mv /etc/httpd/conf.d/php.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/proxy_ajp.conf ] || mv /etc/httpd/conf.d/proxy_ajp.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/ssl.conf ] || mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/webalizer.conf ] || mv /etc/httpd/conf.d/webalizer.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/welcome.conf ] || mv /etc/httpd/conf.d/welcome.conf /etc/httpd/conf.d/disabled/ # Next 4 lines remove unwanted manual.conf, perl.conf, python.conf, and # squid.conf if present. sudo [ ! -f /etc/httpd/conf.d/manual.conf ] || mv /etc/httpd/conf.d/manual.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/perl.conf ] || mv /etc/httpd/conf.d/perl.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/python.conf ] || mv /etc/httpd/conf.d/python.conf /etc/httpd/conf.d/disabled/ sudo [ ! -f /etc/httpd/conf.d/squid.conf ] || mv /etc/httpd/conf.d/squid.conf /etc/httpd/conf.d/disabled/ sudo [ -f /etc/httpd/conf/httpd.conf.default ] || cp -p /etc/httpd/conf/httpd.conf /etc/httpd/conf/httpd.conf.default # enable the Apache worker MPM in /etc/sysconfig/httpd by uncommenting # an existing config line sudo sed 's|^#HTTPD=/usr/sbin/httpd\.worker$|HTTPD=/usr/sbin/httpd.worker|' /etc/sysconfig/httpd """ run_multiline_script(script) script = """ # enable the Apache worker MPM in /etc/sysconfig/httpd by uncommenting # an existing config line sudo sed 's|^#HTTPD=/usr/sbin/httpd\.worker$|HTTPD=/usr/sbin/httpd.worker|' /etc/sysconfig/httpd """ run_multiline_script(script) # Change server name # in /etc/httpd/conf/httpd.conf find replace with "ServerName ieeetags" #files.sed('/etc/httpd/conf/httpd.conf', before='#ServerName www.example.com:80', after='ServerName ieeetags', use_sudo=True, pty=True) # install global Python packages, mod_wsgi, and virtualenv script = """ sudo yum -y install mod_wsgi.x86_64 python-setuptools python-devel.x86_64 # wget http://codepoint.net/attachments/mod_wsgi/mod_wsgi-3.1-1.el5.x86_64.rpm # sudo rpm -i mod_wsgi-3.1-1.el5.x86_64.rpm sudo rpm -V epel-release-5-3 || rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm sudo [ -f /etc/httpd/conf.d/wsgi.conf ] || echo "LoadModule wsgi_module modules/mod_wsgi.so" > /etc/httpd/conf.d/wsgi.conf sudo easy_install -U pip sudo pip install virtualenv """ run_multiline_script(script) script = """ # install PIL and its requirements (for creating sprites images) sudo yum -y install zlib-devel sudo yum -y install gcc sudo pip install pil """ run_multiline_script(script) # Install MySQL sudo('yum -y install mysql-server.x86_64 mysql.x86_64 mysql-devel.x86_64', pty=True) # Install MySQL-Python script = "sudo yum -y install MySQL-python.x86_64" run_multiline_script(script) # Install Django script = """ wget http://www.djangoproject.com/download/1.1.1/tarball/ tar xzvf Django-1.1.1.tar.gz sudo python Django-1.1.1/setup.py build sudo python Django-1.1.1/setup.py install """ run_multiline_script(script) script = """ # set up an Apache sites dir structure sudo mkdir -p /etc/httpd/sites-available sudo mkdir -p /etc/httpd/sites-enabled sudo mkdir -p /etc/httpd/ssl sudo chmod 700 /etc/httpd/ssl """ run_multiline_script(script) # upload custom Apache conf current_dir = os.path.dirname(__file__) put(os.path.join(current_dir, 'custom_httpd.conf'), '~/temp_httpd.conf') # will go to '/etc/httpd/conf/httpd.conf' script = """ sudo mv ~/temp_httpd.conf /etc/httpd/conf/httpd.conf sudo chown root:root /etc/httpd/conf/httpd.conf """ run_multiline_script(script) # Set services to auto-restart if slice is rebooted. Might be redundant. # Restart Services script = """ sudo /sbin/chkconfig httpd on sudo /sbin/chkconfig mysqld on sudo /etc/init.d/httpd restart sudo /etc/init.d/mysqld start """ run_multiline_script(script) mysql_root_password = getpass.getpass( 'Enter new MySQL root password (blank to leave alone): ') if mysql_root_password: #run('/usr/bin/mysqladmin -u root password "%s"' % mysql_root_password) run_script(""" #!/bin/bash /usr/bin/mysqladmin -u root password "%s" """ % mysql_root_password)
def start_celery(source_folder="/home/ubuntu/cafi"): put('./supervisord.conf', '%s/backend/supervisord.conf' % (source_folder, )) run('cd /home/ubuntu/cafi/backend && supervisord -c supervisord.conf')
def deploy(): with settings(user="******"): run("yum -y install gtk2-devel mesa-libGL mesa-libGL-devel blas atlas lapack blas-devel atlas-devel lapack-devel xorg-x11-xauth* xorg-x11-xkb-utils* qt-devel openssl openssl-devel xclock *Xvfb* svn libXtst" ) put("cellprofiler.tar.gz") run("tar -C / -xzf cellprofiler.tar.gz")
def _test_env_mobile_www_release(): """ Inner Fabric task """ ftp_resource_dir = "/app/online/{}/frontend/{}/{}".format(game, region, version) remote_script_dir = "/app/opbak/mobile_www_scripts_{}".format(TIMESTAMP) remote_temp_dir = "/app/opbak/mobile_www_test_release/{}/{}/{}".format(game, region, version) remote_backup_dir = "/app/opbak/mobile_www_test_backup/{}_{}_{}".format(game, region, TIMESTAMP) #本地检查md5 with lcd(ftp_resource_dir): local("dos2unix md5.txt >/dev/null 2>&1") local("chown virtual_user.virtual_user md5.txt") local("md5sum -c md5.txt >/dev/null") #新建远程临时资源目录 if remote_dir_exists(remote_temp_dir): run('mv {0} {0}.rb{1}'.format(remote_temp_dir, TIMESTAMP)) remote_mkdir(remote_temp_dir) #上传zip包跟md5.txt version_zip_file = '{}.zip'.format(version) print('正在上传 {}...'.format(version_zip_file)) sys.stdout.flush() with lcd(ftp_resource_dir): put(version_zip_file, remote_temp_dir) put('md5.txt', remote_temp_dir) #再次检查md5 with cd(remote_temp_dir): run('dos2unix md5.txt') run('md5sum -c md5.txt') run("unzip -o -q {}".format(version_zip_file)) for each_scope in scopes: scope_check(remote_temp_dir, each_scope, version) #生成差异包的python脚本 make_diff_py = '/app/opbin/work/bible/func/make_diff.py' remote_mkdir(remote_script_dir) put(make_diff_py, remote_script_dir) #校验res.lua所包含的文件md5值跟文件实际md5值的一致性 res_to_verify = ['res.lua', 'res_preview.lua'] _inner_scopes = list_inner_scopes(remote_temp_dir, version) for each_scope in _inner_scopes: for each_res_lua in res_to_verify: lua_file = '{}/{}/{}/{}'.format(remote_temp_dir, version, each_scope, each_res_lua) if remote_file_exists(lua_file): resource_dir = os.path.dirname(lua_file) md5_verify(remote_script_dir, lua_file, resource_dir) #备份 with cd(root_dir): if remote_dir_exists(version): run('mkdir -p {}'.format(remote_backup_dir)) run('mv {} {}/'.format(version, remote_backup_dir)) for each_scope in scopes: if remote_dir_exists(each_scope): run('cp -r {} {}/'.format(each_scope, remote_backup_dir)) else: print('[Warning] {root_dir}/目录下不存在{scope}, 默认这是{scope}的第一次发布。'.format(root_dir=root_dir, scope=each_scope)) with cd(remote_temp_dir): run('cp -r {} {}/'.format(version, root_dir)) need_diff_versions = filter_need_diff_versions(root_dir, start_zip_version) #处理完整版版本之间的差异包 print('开始处理完整版的差异包...') for each_version in need_diff_versions: if version_tuple(each_version) >= version_tuple(version): print('跳过{}, 不需要版本差异包'.format(each_version)) else: if mode == 2: _inner_scopes = list_inner_scopes(root_dir, each_version) for each_scope in _inner_scopes: diff_from_lua = '{}/{}/res.lua'.format(each_version, each_scope) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/{}/res.lua'.format(version, each_scope) resource_dir = '{}/{}'.format(version, each_scope) dest = '{0}/{1}/{2}.zip,{0}/{1}/{2}'.format(each_version, each_scope, version) print('正在生成 {} 完整版的差异包 ==> {} ...'.format(each_version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) else: diff_from_lua = '{}/res.lua'.format(each_version) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/res.lua'.format(version) resource_dir = version dest = '{0}/{1}.zip,{0}/{1}'.format(each_version, version) print('正在生成 {} 完整版的差异包 ==> {} ...'.format(each_version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) #处理预览版版本之间的差异包 print('开始处理预览版的差异包...') for each_version in need_diff_versions: if version_tuple(each_version) >= version_tuple(version): print('跳过{}, 不需要版本差异包'.format(each_version)) else: if mode == 2: _inner_scopes = list_inner_scopes(root_dir, each_version) for each_scope in _inner_scopes: diff_from_lua = '{}/{}/res_preview.lua'.format(each_version, each_scope) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/{}/res_preview.lua'.format(version, each_scope) resource_dir = '{}/{}'.format(version, each_scope) dest = '{0}/{1}/{2}_preview.zip,{0}/{1}/{2}_preview'.format(each_version, each_scope, version) print('正在生成 {} 预览版的差异包 ==> {} ...'.format(each_version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) else: diff_from_lua = '{}/res_preview.lua'.format(each_version) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/res_preview.lua'.format(version) resource_dir = version dest = '{0}/{1}_preview.zip,{0}/{1}_preview'.format(each_version, version) print('正在生成 {} 预览版的差异包 ==> {} ...'.format(each_version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) #处理预览版跟完整版的差异包 print('开始处理预览版跟完整版的差异包...') if mode == 2: _inner_scopes = list_inner_scopes(root_dir, version) for each_scope in _inner_scopes: diff_from_lua = '{}/{}/res_preview.lua'.format(version, each_scope) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/{}/res.lua'.format(version, each_scope) resource_dir = '{}/{}'.format(version, each_scope) dest = '{0}/{1}/{2}_30lv.zip,{0}/{1}/{2}_30lv'.format(version, each_scope, version) print('正在生成 {}_30lv 差异包 ==> {} ...'.format(version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) else: diff_from_lua = '{}/res_preview.lua'.format(version) with cd(root_dir): if remote_file_exists(diff_from_lua): diff_to_lua = '{}/res.lua'.format(version) resource_dir = version dest = '{0}/{1}_30lv.zip,{0}/{1}_30lv'.format(version, version) print('正在生成 {}_30lv 差异包 ==> {} ...'.format(version, dest.replace(',', ', '))) sys.stdout.flush() make_diff(remote_script_dir, diff_from_lua, diff_to_lua, resource_dir, dest) with cd(remote_temp_dir): for each_scope in scopes: run('cp -rf {} {}/'.format(each_scope, root_dir)) #清理FTP上的目录和文件 local("rm -rf /app/online/{}/frontend/{}/{}".format(game, region, version))
def do_deploy(archive_path): """Function for deploy.""" if not os.path.exists(archive_path): return False data_path = '/data/web_static/releases/' tmp = archive_path.split('.')[0] name = tmp.split('/')[1] dest = data_path + name try: put(archive_path, '/tmp') run('mkdir -p {}'.format(dest)) run('tar -xzf /tmp/{}.tgz -C {}'.format(name, dest)) run('rm -f /tmp/{}.tgz'.format(name)) run('mv {}/web_static/* {}/'.format(dest, dest)) run('rm -rf {}/web_static'.format(dest)) run('rm -rf /data/web_static/current'.format(name)) run('ln -s {} /data/web_static/current'.format(dest)) return True except: return False def deploy(): """Compress and upload files to remote server.""" path = do_pack() print(path) if path is None: return False return do_deploy(path) deploy()
def keypush(): put ('./config/root/ssh/*','/root/.ssh') run('chmod 600 /root/.ssh/*') local('ssh-copy-id %s@%s'%(env.user,env.host))
def _fab_copy_swift_directory(local_files, remote_dir): put(local_files, remote_dir, mirror_local_mode=True)
def _copy_keys(self): ec2_key_path = self._driver_options()["x509_key"] ec2_cert_path = self._driver_options()["x509_cert"] put(ec2_key_path, "%s/ec2_key" % env.packaging_dir, use_sudo=True) put(ec2_cert_path, "%s/ec2_cert" % env.packaging_dir, use_sudo=True)
def deploy(): put(settings.BUILD_DIR, 'deploy')
def check_host(): "Check that needed tools are installed on hosts" # get type of current host htype = get_type_cached(env.host_string) # run checks if env.host_string in config.TPCONF_router: if htype == 'FreeBSD': run('which ipfw') if htype == "Linux": run('which tc') run('which iptables') # XXX check that kernel tick rate is high (>= 1000) else: if htype == 'FreeBSD': run('which md5') run('which tcpdump') elif htype == 'Darwin': run('which md5') run('which tcpdump') run('which dsiftr-osx-teacup.d') elif htype == 'Linux': run('which ethtool') run('which md5sum') run('which tcpdump') #run('which web10g-listconns') #run('which web10g-readvars') #updated for ttprobe support try: linux_tcp_logger = config.TPCONF_linux_tcp_logger except AttributeError: linux_tcp_logger = 'web10g' if linux_tcp_logger == 'ttprobe' or linux_tcp_logger == 'both': #checking the availability of ttprobe.ko kernel module run('ls /lib/modules/$(uname -r)/extra/ttprobe.ko') if linux_tcp_logger == 'web10g' or linux_tcp_logger == 'both': run('which web10g-logger') elif htype == 'CYGWIN': run('which WinDump', pty=False) run('which win-estats-logger', pty=False) # if we don't have proper ntp installed then # start time service if not started and force resync with settings(warn_only=True): ret = run('ls "/cygdrive/c/Program Files (x86)/NTP/bin/ntpq"') if ret.return_code != 0: run('net start w32time', pty=False) run('w32tm /resync', pty=False) # try to enable any test network interfaces that are (accidently) # disabled after reboot with settings(warn_only=True): interfaces = get_netint_cached(env.host_string, int_no=-1) for interface in interfaces: run('netsh int set int "Local Area Connection %s" enabled' % interface, pty=False) run('which killall', pty=False) run('which pkill', pty=False) run('which ps', pty=False) run('which gzip', pty=False) run('which dd', pty=False) # check for traffic sender/receiver tools run('which iperf', pty=False) run('which ping', pty=False) run('which httperf', pty=False) run('which lighttpd', pty=False) run('which nttcp', pty=False) put(config.TPCONF_script_path + '/runbg_wrapper.sh', '/usr/bin') run('chmod a+x /usr/bin/runbg_wrapper.sh', pty=False) run('which runbg_wrapper.sh', pty=False) put(config.TPCONF_script_path + '/kill_iperf.sh', '/usr/bin') run('chmod a+x /usr/bin/kill_iperf.sh', pty=False) run('which kill_iperf.sh', pty=False) put(config.TPCONF_script_path + '/pktgen.sh', '/usr/bin') run('chmod a+x /usr/bin/pktgen.sh', pty=False) run('which pktgen.sh', pty=False)
def update_config(): with cd(home_dir): put('config/gspread-key.json', 'config/gspread-key.json') put('slackbot_settings.py', 'slackbot_settings.py')
def putconfig(local): remote = os.path.join(workspace, "config.json") put(local, remote)