def run(self, section=None): sudo('pkg_add libevent') sudo('mkdir -p /opt/pkg/bin') sudo("ln -sf /opt/local/bin/awk /opt/pkg/bin/nawk") sudo("ln -sf /opt/local/bin/sed /opt/pkg/bin/nbsed") with cd('/tmp'): run('wget %s' %self.pgbouncer_src) sudo('pkg_add %s' %self.pkg_name) svc_method = os.path.join(env.configs_dir, 'pgbouncer.xml') put(svc_method, self.config_dir, use_sudo=True) self._setup_parameter('%s/pgbouncer.ini' %self.config_dir, **self.config) if not section: section = 'db-server' username = self._get_username(section) self._get_passwd(username) # postgres should be the owner of these config files sudo('chown -R postgres:postgres %s' %self.config_dir) # pgbouncer won't run smoothly without these directories sudo('mkdir -p /var/run/pgbouncer') sudo('mkdir -p /var/log/pgbouncer') sudo('chown postgres:postgres /var/run/pgbouncer') sudo('chown postgres:postgres /var/log/pgbouncer') # set up log sudo('logadm -C 3 -p1d -c -w /var/log/pgbouncer/pgbouncer.log -z 1') run('svccfg import %s/pgbouncer.xml' %self.config_dir) # start pgbouncer sudo('svcadm enable pgbouncer')
def deploy(): with prefix('source $(which virtualenvwrapper.sh) && workon remote'): settings_file = '--settings=haxclub.settings.base' env_vars = config.get('env_vars') if not exists('~/haxclub'): with cd('~/'): run('git clone https://github.com/jsalva/haxclub') with cd('~/haxclub/haxclub'): if not exists('logs'): run('mkdir logs') run('git pull origin master') with shell_env(**env_vars): prompts = [] prompts += expect("Type 'yes' to continue","yes") with expecting(prompts): erun('python manage.py collectstatic %s' % settings_file) erun('python manage.py migrate %s' % settings_file) erun('python manage.py syncdb %s' % settings_file) if exists('supervisord.pid'): erun('python manage.py supervisor reload %s' % settings_file) else: erun('python manage.py supervisor --daemonize %s' % settings_file) if not exists('/tmp/nginx'): run('mkdir /tmp/nginx') put('nginx.conf','/etc/nginx/nginx.conf',use_sudo=True) put('nginx_haxclub.conf','/etc/nginx/conf.d/nginx_haxclub.conf',use_sudo=True) put('ssl/haxclub.key.nopass','/etc/ssl/certs/haxclub.key.nopass',use_sudo=True) put('ssl/haxclub.crt','/etc/ssl/certs/haxclub.crt',use_sudo=True) put('nginx_haxclub.conf','/etc/nginx/conf.d/nginx_haxclub.conf',use_sudo=True) sudo('service nginx stop; service nginx start;')
def update_xforms(deployment_name, username, path): setup_env(deployment_name) # compress and upload path = path.rstrip("/") dir_name = os.path.basename(path) path_compressed = '%s.tgz' % dir_name check_call(['tar', 'czvf', path_compressed, '-C', os.path.dirname(path), dir_name]) with cd('/tmp'): put(path_compressed, '%s.tgz' % dir_name) # decompress on server run('tar xzvf %s.tgz' % dir_name) try: with cd(env.code_src): with source(env.virtualenv): # run replace command for f in glob.glob(os.path.join(path, '*')): file_path = '/tmp/%s/%s' % (dir_name, os.path.basename(f)) run('python manage.py publish_xls -r %s %s --settings=%s' % (file_path, username, env.django_config_module)) finally: run('rm -r /tmp/%s /tmp/%s.tgz' % (dir_name, dir_name)) check_call(['rm', path_compressed])
def XXXX_deploy (): """ Deploy the packages in the deployment machines """ print(green("Installing packages at %s" % str(env.host_string))) if confirm(red('Install the packages at the %s?' % (env.host_string)), default = False): print(yellow("... stopping XXXX")) if _exists('/etc/init.d/XXXX'): sudo('service XXXX stop') sudo('rm -f /etc/init.d/XXXX') with cd(env.admin.prefix): print(yellow("... cleaning up old RPMs")) if not _exists('tmp'): run('mkdir tmp') run('rm -rf tmp/*') directory = os.path.join(env.admin.prefix, 'tmp') with cd(directory): print(yellow("... uploading RPMs")) for f in env.packages.rpms: put(os.path.join(directory, f), '.') print(yellow("... installing software")) sudo('yum install -R 2 -q -y --nogpgcheck *.rpm') print(red("... XXXX is STOPPED at %s!" % env.host_string))
def setMaster(): if exists('/etc/hosts0'): print 'etc/hosts0 exists' else: sudo('cp /etc/hosts /etc/hosts0') sudo('rm /etc/hosts') sudo('cp /etc/hosts0 /etc/hosts') put('hosts') sudo('cat hosts|sudo tee -a /etc/hosts') run('rm hosts') run('cat /etc/hosts') path1 = '/home/{0}'.format(parm['USER']) rsync_project(path1, exclude=['result']) path2 = join(path1, basename(realpath('.'))) path3 = join(path2, parm['programdir']) for dst in (path2, path3): fi = '{0}/{1}'.format(dst, parm['keyfile']) if not exists(fi, use_sudo=True): put(parm['keyfile'], dst) sudo('chmod 400 {0}'.format(fi)) execute('genkey')
def set_version_state(name, object=None, delete=False): """ Sets a simple 'state' on the server by creating a file with the desired state's name + version and storing ``content`` as json strings if supplied returns the filename used to store state """ if env.project_fullname: state_name = "-".join([env.project_fullname, name]) else: state_name = name with settings(warn_only=True): # Test for os state if not exists("/var/local/woven", use_sudo=True): sudo("mkdir /var/local/woven") if not delete: sudo("touch /var/local/woven/%s" % state_name) if object <> None: fd, file_path = tempfile.mkstemp() f = os.fdopen(fd, "w") f.write(json.dumps(object)) f.close() put(file_path, "/tmp/%s" % state_name) os.remove(file_path) sudo("cp /tmp/%s /var/local/woven/%s" % (state_name, state_name)) else: sudo("rm -f /var/local/woven/%s" % state_name) return state_name
def __patch_jenkins_config( self ): """ A context manager that retrieves the Jenkins configuration XML, deserializes it into an XML ElementTree, yields the XML tree, then serializes the tree and saves it back to Jenkins. """ config_file = StringIO( ) if run( 'test -f ~/config.xml', quiet=True ).succeeded: fresh_instance = False get( remote_path='~/config.xml', local_path=config_file ) else: # Get the in-memory config as the on-disk one may be absent on a fresh instance. # Luckily, a fresh instance won't have any configured security. fresh_instance = True config_url = 'http://localhost:8080/computer/(master)/config.xml' with hide( 'output' ): config_file.write( run( 'curl "%s"' % config_url ) ) config_file.seek( 0 ) config = ElementTree.parse( config_file ) yield config config_file.truncate( 0 ) config.write( config_file, encoding='utf-8', xml_declaration=True ) if fresh_instance: self.__service_jenkins( 'stop' ) try: put( local_path=config_file, remote_path='~/config.xml' ) finally: if fresh_instance: self.__service_jenkins( 'start' ) else: log.warn( 'Visit the Jenkins web UI and click Manage Jenkins - Reload ' 'Configuration from Disk' )
def site_config(path=None,archive_name='latest_codalab_config.tar',url=None,module=None): spath = 'src' if path and os.path.exists(path): path = os.path.abspath(path) elif module: mod = __import__(module) if os.path.isdir(mod.__path__[0]): path = mod.__path__[0] else: raise Exception("Must be a directory module") with settings(warn_ony=True),lcd(path): res = lrun('git diff --exit-code') if res.return_code != 0: raise Exception("*** Module has local changes. You must commit them.") tmp = tempfile.mkdtemp() fname = archive_name tmpf = os.path.join(tmp,fname) path = path.rstrip('/') lrun('git archive --prefix=%s%s -o %s HEAD' % (os.path.basename(path),os.path.sep,tmpf)) env.run('mkdir -p %s' % spath) put(tmpf) env.run('tar -C %s -xvf %s' % (spath,fname)) with virtualenv(env.venvpath): env.run('pip install -U --force-reinstall ./%s' % pathjoin(spath,os.path.basename(path))) env.EXTERNAL_SITE_CONFIG = True
def load_db(dumpfile=None): """Loads data from a SQL script to Pootle DB""" require('environment', provided_by=[production, staging]) if dumpfile is not None: if isfile(dumpfile): remote_filename = '%(project_path)s/DB_backup_to_load.sql' % env if (exists(remote_filename) and confirm('\n%s already exists. Do you want to overwrite it?' % remote_filename, default=False)) or not exists(remote_filename): print('\nLoading data into the DB...') with settings(hide('stderr')): put(dumpfile, remote_filename, use_sudo=True) sudo('mysql -u %s -p %s < %s' % (env['db_user'], env['db_name'], remote_filename)) else: print('\nAborting.') else: print('\nERROR: The file "%s" does not exist. Aborting.' % dumpfile) else: print('\nERROR: A dumpfile must be provided. Aborting.')
def upload_project_sudo(local_dir=None, remote_dir=""): """ Copied from Fabric and updated to use sudo. """ local_dir = local_dir or os.getcwd() # Remove final '/' in local_dir so that basename() works local_dir = local_dir.rstrip(os.sep) local_path, local_name = os.path.split(local_dir) tar_file = "%s.tar.gz" % local_name target_tar = os.path.join(remote_dir, tar_file) tmp_folder = mkdtemp() try: tar_path = os.path.join(tmp_folder, tar_file) local("tar -czf %s -C %s %s" % (tar_path, local_path, local_name)) put(tar_path, target_tar, use_sudo=True) with cd(remote_dir): try: sudo("tar -xzf %s" % tar_file) finally: sudo("rm -f %s" % tar_file) finally: local("rm -rf %s" % tmp_folder)
def put(self, local, remote, mode=0644): self._debug("Uploading %s .." % remote) if config.autoconf: shutil.copyfile(local, remote) os.chmod(remote, mode) else: put(local_path=local, remote_path=remote, mode=mode)
def hd1(): """hadoop install => fab sete:2,5 hd1""" # 1. copy package file_i = os.path.join(SOFTWARE_HOME, HADOOP_PKG) file_o = os.path.join(DEPLOY_HOME, HADOOP_PKG) put(file_i, file_o) # 2. unzip file_i = os.path.join(DEPLOY_HOME, HADOOP_PKG) run('tar -zxf {0} -C {1}'.format(file_i, DEPLOY_HOME)) # 3. rename file_i = os.path.join(DEPLOY_HOME, 'hadoop-{0}'.format(HADOOP_VER)) file_o = os.path.join(DEPLOY_HOME, 'hadoop') run('mv {0} {1}'.format(file_i, file_o)) # 4. copy cfg for cfg in HADOOP_CFGS: file_i = os.path.join(CONF_HOME, 'hadoop', cfg) file_o = os.path.join(DEPLOY_HOME, 'hadoop/etc/hadoop', cfg) put(file_i, file_o) # 5. mkdir file_i = os.path.join(APP_HOME, 'hadoop') run('mkdir -p {0}'.format(file_i)) # 6. clean up file_i = os.path.join(DEPLOY_HOME, HADOOP_PKG) run('rm {0}'.format(file_i))
def mac_setup_virtualenv(): with cd(mac_tmp): with prefix(mac_prefix): put("virtualenv-1.8.4.tar.gz", ".") run("tar xzf virtualenv-1.8.4.tar.gz") with cd("virtualenv-1.8.4"): run("python setup.py install --prefix=../usr")
def mac_setup_paver(): with cd(mac_tmp): with prefix(mac_prefix): put("Paver-1.2.2.tar.gz", ".") run("tar xzf Paver-1.2.2.tar.gz") with cd("Paver-1.2.2"): run("python setup.py install --prefix=../usr")
def confiure_rethinkdb(): """Confiure of RethinkDB""" with settings(warn_only=True): # copy config file to target system put("conf/rethinkdb.conf", "/etc/rethinkdb/instances.d/default.conf", mode=0600, use_sudo=True) # finally restart instance sudo("/etc/init.d/rethinkdb restart")
def copy(): # make sure the directory is there! sudo('mkdir -p /opt/local/codeflow') sudo('chown mbjerkness /opt/local/codeflow') # our local 'testdirectory' - it may contain files or subdirectories ... put('index.html', '/opt/local/codeflow/') put('media', '/opt/local/codeflow/')
def hb1(): """hbase install => fab sete:2,5 hb1""" # 1. copy package file_i = os.path.join(SOFTWARE_HOME, HBASE_PKG) file_o = os.path.join(DEPLOY_HOME, HBASE_PKG) put(file_i, file_o) # 2. unzip file_i = os.path.join(DEPLOY_HOME, HBASE_PKG) run('tar -zxf {0} -C {1}'.format(file_i, DEPLOY_HOME)) # 3. rename file_i = os.path.join(DEPLOY_HOME, 'hbase-1.0.1.1') file_o = os.path.join(DEPLOY_HOME, 'hbase') run('mv {0} {1}'.format(file_i, file_o)) # 4. copy cfg for cfg in HBASE_CFGS: file_i = os.path.join(CONF_HOME, 'hbase', cfg) file_o = os.path.join(DEPLOY_HOME, 'hbase/conf', cfg) put(file_i, file_o) # 5. clean up file_i = os.path.join(DEPLOY_HOME, HBASE_PKG) run('rm {0}'.format(file_i))
def zk1(): """zookeeper install => fab sete:2,4 zk1""" # 1. copy package file_i = os.path.join(SOFTWARE_HOME, ZOOKEEPER_PKG) file_o = os.path.join(DEPLOY_HOME, ZOOKEEPER_PKG) put(file_i, file_o) # 2. unzip file_i = os.path.join(DEPLOY_HOME, ZOOKEEPER_PKG) run('tar -zxf {0} -C {1}'.format(file_i, DEPLOY_HOME)) # 3. rename file_i = os.path.join(DEPLOY_HOME, 'zookeeper-3.4.6') file_o = os.path.join(DEPLOY_HOME, 'zookeeper') run('mv {0} {1}'.format(file_i, file_o)) # 4. copy cfg file_i = os.path.join(CONF_HOME, 'zookeeper', ZOOKEEPER_CFG) file_o = os.path.join(DEPLOY_HOME, 'zookeeper/conf', ZOOKEEPER_CFG) put(file_i, file_o) # 5. mkdir and set `myid` file_i = os.path.join(APP_HOME, 'zookeeper') run('mkdir -p {0}'.format(file_i)) for i in range(2, 5): with settings(host_string='node{0}'.format(i)): run('mkdir -p /app/zookeeper/data') run('echo {0} > /app/zookeeper/data/myid'.format(i)) # 6. clean up file_i = os.path.join(DEPLOY_HOME, ZOOKEEPER_PKG) run('rm {0}'.format(file_i))
def _putl(source_file, dest_dir): """ To be used instead of put, since it doesn't support symbolic links """ put(source_file, '/') run("mv -f /{0} {1}".format(os.path.basename(source_file), dest_dir))
def upload_client(host="",server_name="" ): '''upload file ''' with hide('stdout', 'stderr'): exist_client=sudo("if test -e %s ;then echo True;else echo False;fi" %dest_path ) exist_bakdir=sudo("if test -e %s/version_backup ;then echo True;else echo False;fi" %client_dest_dir ) if os.path.exists(local_path): with hide('stdout', 'stderr'): with lcd(client_src_dir): print "entering dir %s" %client_src_dir local("tar czvf new_client.tar.gz client",capture=False) put("new_client.tar.gz" , client_dest_dir) else: print "\033[1;40;31mdirectory %s is not exist in %s\033[0m" %(local_path,env.host_string) sys.exit(1) if exist_bakdir=="True": pass else: sudo("mkdir %s/version_backup" %client_dest_dir) if exist_client=="True": with hide('stdout', 'stderr'): with cd(client_dest_dir): print "entering dir %s" %client_dest_dir date=time.strftime("%m.%d.%H.%M.%S") sudo("mv client version_backup/client.%s" %date) sudo("tar xzvf new_client.tar.gz") else: print "\033[1;40;31mnot exists client in directory %s\033[0m" %client_dest_dir with hide('stdout', 'stderr'): with cd(client_dest_dir): sudo("tar xzvf new_client.tar.gz")
def run_fab_cmd_on_node(host_string, password, cmd, as_sudo=False): ''' Run fab command on a node. Usecase : as part of script running on cfgm node, can run a cmd on VM from compute node ''' cmd = _escape_some_chars(cmd) # Fetch fabfile put('scripts/tcutils/fabfile.py', '~/') (username, host_ip) = host_string.split('@') cmd_str = 'fab -u %s -p "%s" -H %s -D -w --hide status,user,running ' % ( username, password, host_ip) if username == 'root': as_sudo = False elif username == 'cirros': cmd_str += ' -s "/bin/sh -l -c" ' if as_sudo: cmd_str += 'sudo_command:\"%s\"' % (cmd) else: cmd_str += 'command:\"%s\"' % (cmd) # Sometimes, during bootup, there could be some intermittent conn. issue tries = 5 output = None while tries > 0: output = run(cmd_str) if 'Fatal error' in output: tries -= 1 time.sleep(5) else: break # end while real_output = remove_unwanted_output(output) return real_output
def deploy_script(scriptpath, *args): "copies to remote and executes local script" # with lcd('scripts'): put(local_path=scriptpath, remote_path="", mirror_local_mode=True) scriptfile = os.path.split(scriptpath)[1] args_str = " ".join(args) run("./" + scriptfile + " " + args_str)
def upload_project_sudo(local_dir=None, remote_dir=""): """ Copied from Fabric and updated to use sudo. """ local_dir = local_dir or os.getcwd() # Remove final '/' in local_dir so that basename() works local_dir = local_dir.rstrip(os.sep) local_path, local_name = os.path.split(local_dir) #tar_file = "%s.tar.gz" % local_name #target_tar = os.path.join(remote_dir, tar_file) zip_file = "%s.zip" % local_name target_zip = os.path.join(remote_dir, zip_file) target_zip = target_zip.replace('\\','/') tmp_folder = mkdtemp() try: #tar_path = os.path.join(tmp_folder, tar_file) zip_path = os.path.join(tmp_folder, zip_file) #local("tar -czf %s -C %s %s" % (tar_path, local_path, local_name)) #local("tar -czf %s %s" % (tar_path, local_dir)) zipdir(local_dir, zip_path) #put(tar_path, target_tar, use_sudo=True) put(zip_path, target_zip, use_sudo=True) with cd(remote_dir): try: #sudo("tar -xzf %s" % tar_file) sudo("apt-get install -y unzip") sudo("unzip %s" % zip_file) finally: #sudo("rm -f %s" % tar_file) sudo("rm -f %s" % zip_file) finally: pass
def patch_file(filename, patchfilename, use_sudo=False, backup='.ORIG'): ''' Patch a remote file ''' patchbin = '/usr/bin/patch' use_sudo = _boolify(use_sudo) if not exists(filename, use_sudo=use_sudo): raise Exception('FATAL: Remote file does not exist') if not exists(patchbin): pkg_install('patch') if backup: backup_orig(filename, use_sudo=use_sudo) remote_patchfilename = '/tmp/' + patchfilename.split('/')[-1] + '.%s' % randint(100000,1000000) rejectname = filename + '.rej' put(patchfilename, remote_patchfilename) # TODO: Raise exception if patch is not applyable (but only warn only if patch had # already been applied before) with settings(warn_only=True): _run('patch --forward %s < %s' % (filename, remote_patchfilename), use_sudo=use_sudo) _run('rm %s' % remote_patchfilename) if exists(rejectname, use_sudo=use_sudo): _run('rm %s' % rejectname, use_sudo=use_sudo)
def deploy(appname=None, all=False): """fab -H username@host deploy:appname,all""" appname = appname or os.path.split(os.getcwd())[-1] appfolder = applications+'/'+appname zipfile = os.path.join(appfolder, '_update.zip') if os.path.exists(zipfile): os.unlink(zipfile) backup = mkdir_or_backup(appname) if all=='all' or not backup: local('zip -r _update.zip * -x *~ -x .* -x \#* -x *.bak -x *.bak2') else: local('zip -r _update.zip */*.py */*/*.py views/*.html views/*/*.html static/*') put('_update.zip','/tmp/_update.zip') try: with cd(appfolder): sudo('unzip -o /tmp/_update.zip') sudo('chown -R www-data:www-data *') sudo('echo "%s" > DATE_DEPLOYMENT' % now) finally: sudo('rm /tmp/_update.zip') if backup: print 'TO RESTORE: fab restore:%s' % backup
def copy(): default_ignore_list = ['build.tar.gz', ] ignore_list = [] if 'scp_ignore_list' in env: ignore_list = env.scp_ignore_list ignore_list = ignore_list + default_ignore_list path = get_local_app_path() release_path = paths.get_deploy_path(env.current_release) env.run('mkdir -p {}'.format(release_path)) with lcd(path), cd(release_path): build_filename = 'build.tar.gz' build_remote_path = "/".join([env.current_release, build_filename]) exclude_args = map(lambda x: '--exclude="{}"'.format(x), ignore_list) local('tar {} -czf {} *'.format( ' '.join(exclude_args), build_filename )) put(build_filename, build_remote_path) env.run('tar -xzf {}'.format(build_filename)) env.run('rm {}'.format(build_remote_path)) local("rm build.tar.gz")
def action_stop(master): with show('running'): with cd(master['basedir']): put(BUILDBOT_WRANGLER, '%s/buildbot-wrangler.py' % master['basedir']) run('python buildbot-wrangler.py stop %s' % master['master_dir']) print OK, "stopped %(hostname)s:%(basedir)s" % master
def load_db(dumpfile=None): """Loads data from a SQL script to Pootle DB""" require("environment", provided_by=[production, staging]) if dumpfile is not None: if isfile(dumpfile): remote_filename = "%(project_path)s/DB_backup_to_load.sql" % env if not exists(remote_filename) or confirm( "\n%s already exists. Do you want to overwrite it?" % remote_filename, default=False ): print("\nLoading data into the DB...") with settings(hide("stderr")): put(dumpfile, remote_filename) run( "mysql -u %s %s %s < %s" % (env["db_user"], env["db_password_opt"], env["db_name"], remote_filename) ) run("rm %s" % (remote_filename)) else: abort("\nAborting.") else: abort('\nERROR: The file "%s" does not exist. Aborting.' % dumpfile) else: abort("\nERROR: A (local) dumpfile must be provided. Aborting.")
def deploy(): timestamp = datetime.datetime.utcnow().strftime(env.timestamp_format) version = local('git rev-parse HEAD', capture=True).stdout.strip() run('mkdir -p %s' % env.dest) with cd(env.dest): run('mkdir %s' % timestamp) with cd(timestamp): remote_archive = '/tmp/weddingplanner-%s-%s.tar.gz' % (timestamp, version) # TODO: use rsync in a '3-way' mode (--link-dest) to minimize files transfered # (do the same for the locally built virtualenv) put('build/deploy.tar.gz', remote_archive) # TODO: remove --no-same-owner when built with fakeroot run('tar xfz %s --no-same-owner' % remote_archive) with hide('stdout'): run('virtualenv env') # NOTE: Temporary solution: install through running pip remotely run('env/bin/pip install -r requirements/%s.txt' % env.requirements) # NOTE: take it from the settings... run('mkdir assets') # NOTE: can also be run locally run('env/bin/python manage collectstatic -v 0 -l --noinput -c') run('env/bin/python manage migrate') with settings(warn_only=True): result = run('supervisorctl status | grep "%s\s\+RUNNING"' % env.service_name) if not result.failed: run('supervisorctl stop %s' % env.service_name) run('ln -sfn %s current' % timestamp) run('supervisorctl start %s' % env.service_name)
def upload_singal_file(filename=""): date=time.strftime("%m.%d.%H.%M.%S") with hide('stdout', 'stderr','running'): file_value = sudo("awk -F'|' '/\<%s\>/{print $1}' %s/revision.swf " %(filename,dest_path)) if file_value.isdigit(): new_file_value = int(file_value) + 1 elif len(file_value)==0: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" print "\033[1;40;31mcan't find the file %s\033[0m" %filename print "\033[1;40;31mupdate file %s manually\033[0m" %filename print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" return 1 else: print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" print "\033[1;40;31mfind more than one file %s in revision.swf\033[0m" %filename print "\033[1;40;31mupdate file %s manually\033[0m" %filename print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" return 1 with hide('stdout', 'stderr','running'): sudo("chmod -R 777 %s" %client_dest_dir) filename_path=sudo("find %s -name %s" %(dest_path,filename)) if filename_path.count('\n'): print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" print "\033[1;40;31mFind more than one file %s in %s\033[0m" %(filename,dest_path) print "\033[1;40;31mupdate file %s manually\033[0m" %filename print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" return 1 sudo("cp %s %s/version_backup/%s.%s" %(filename_path,client_dest_dir,filename,date)) sudo("cp %s/revision.swf %s/version_backup/revision.swf.%s" %(dest_path,client_dest_dir,date)) sudo("rm -f %s" %filename_path) put("%s/%s" %(singal_file_dir,filename),os.path.dirname(filename_path)) local("rm %s/%s" %(singal_file_dir,filename)) with hide('stdout', 'stderr','running'): output=sudo("sed -i 's/%s\(.*%s.*\)/%s\\1/' %s/revision.swf" %(file_value,filename,new_file_value,dest_path)) print "\033[1;40;33mUpdate file revision.swf successful\033[0m"
def deploy_default_synonyms(instance): """ add default synonyms to instance this should be done only on the first deployement """ if not instance.first_deploy: return default_synonyms_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, 'static_files', 'ed', 'default_synonyms.txt') print blue("copy default synonyms for {}".format(instance.name)) put(default_synonyms_file, instance.source_dir, use_sudo=True) sudo("chown {u} {f}".format(u=env.KRAKEN_USER, f=os.path.join(instance.source_dir, 'default_synonyms.txt')))
def _push_packages_to_repo(repo_name="dev"): """Push local deploy directory of packages to actual repo, and refresh the repo. """ if env.pkgfmt != "deb": # We only support freight, which is only for deb packages. We'd need to # add something that understands RPM repos as well if we want to add # support for CentOS here. print "Only pushing deb packages is supported, not pushing." return run('mkdir -p /tmp/endaga-packages-deploy') put(local_path='/tmp/endaga-packages-deploy/*.deb', remote_path='/tmp/endaga-packages-deploy/') sudo('freight add /tmp/endaga-packages-deploy/*.deb apt/%s' % repo_name) sudo('freight cache apt/%s' % repo_name) run('rm -r /tmp/endaga-packages-deploy')
def script_fix(): '''Push tsmcli_migration.sh script and allow tsmadm to run it''' tsmpath = '/opt/tivoli/tsm/client/ba/bin/' tsmscript = 'tsmcli_migration.sh' tsmsudoers = '/etc/sudoers.d/tsmadm' if not exists(tsmpath, use_sudo=True): if sudo('mkdir -p ' + tsmpath, warn_only=True, quiet=True).succeeded: print '[ \033[92mDirectory ' + tsmpath + ' created\033[0m ]' else: print '[ \033[91mUnable to create directory : ' + tsmpath + '\033[0m ]' return 1 lscan = os.path.join(source, tsmscript) if put(local_path=lscan, remote_path=tsmpath + tsmscript, use_sudo=True, mode=0754).succeeded: sudo('chown root:root ' + tsmpath + tsmscript, quiet=True) print '[ \033[92mThe script ' + tsmscript + ' has been pushed\033[0m ]' else: print '[ \033[91mUnable to push ' + tsmscript + '\033[0m ]' return 1 if sudo('grep -sqi ' + tsmpath + tsmscript + ' ' + tsmsudoers, quiet=True).succeeded: print '[ \033[96mtsmadm already has the rights to run ' + tsmscript + '\033[0m ]' else: if sudo('echo "tsmadm,%UsrSave ALL=(root) NOPASSWD: "' + tsmpath + tsmscript + ' | (EDITOR="tee -a" visudo -f ' + tsmsudoers + ')', warn_only=True, quiet=True).succeeded: print '[ \033[92mtsmadm now has the rights to run ' + tsmscript + '\033[0m ]' else: print '[ \033[91mUnable to write line in ' + tsmsudoers + '\033[0m ]' return 1
def upload_files_to_site(args): bold('Do you want to upload the files to w3af.org? [Y/n]', newline=False) upload = raw_input() upload = upload.strip() if upload.lower() == 'y' or upload.lower() == 'yes' or upload == '': files = [ 'w3af-%s.tar.bz2.md5sum' % args.release_version, 'w3af-%s.tar.bz2' % args.release_version, 'w3af-sphinx-%s.tar.bz2' % args.release_version, ] for filename in files: fsize = size(os.path.getsize(filename)) bold('Uploading %s with file size of %s' % (filename, fsize,)) with settings(host_string='*****@*****.**'): success = put(filename, UPLOAD_PATH, use_sudo=True) if not success: red('File upload failed!') return False green('Uploaded files to w3af.org!') bold('Remember to add links to these files from wordpress.') return True
def do_deploy(archive_path): """uploads to server""" if not (test(archive_path)): return False try: put(archive_path, "/tmp") name = archive_path.split("/")[-1] path = "/data/web_static/releases/{}".format(name.split(".")[0]) run("mkdir {}".format(path)) run("tar -xzf /tmp{} -C {}".format(name, path)) run("rm -rf /tmp/{} ".format(name)) rune("rm -rf /data/web_static/current".format(name)) run("ln -s {} /data/web_static/current".format(path)) return True except: return False
def deploy(commit=None): if not commit: commit = local('git rev-parse HEAD', capture=True) env.repo_path = os.path.join(env.next_path, 'repo') git_seed(env.repo_path, commit, submodules=True) git_reset(env.repo_path, commit, submodules=True) run('kill $(cat %(pidfile)s) || true' % env) run('virtualenv %(virtualenv_path)s' % env) run('source %(virtualenv_path)s/bin/activate && ' 'pip install -r %(repo_path)s/bluegreen-example/requirements.txt' % env) put(StringIO('proxy_pass http://127.0.0.1:%(bluegreen_port)s/;' % env), env.nginx_conf) run('cd %(repo_path)s/bluegreen-example && PYTHONPATH=. ' 'BLUEGREEN=%(color)s %(virtualenv_path)s/bin/gunicorn -D ' '-b 0.0.0.0:%(bluegreen_port)s -p %(pidfile)s app:app' % env)
def _fetch_image(name, image): """ Fetches the image from s3 and copies the image to /tmp/images in the VM """ # Make local directory local("rm -rf /tmp/%s-images" % name) local("mkdir -p /tmp/%s-images" % name) # Fetch image from s3 local("aws s3 cp 's3://magma-images/%s' /tmp/%s-images" % (image, name)) # create /tmp/images directory on remote host # env has to be set up before calling this function _setup_env("magma", VM_IP_MAP["setup_1"]["gateway"], DEFAULT_KEY_FILENAME) run("rm -rf /tmp/images") run("mkdir -p /tmp/images") # copy images from local /tmp to corresponding remote /tmp/images put("/tmp/%s-images/*" % name, "/tmp/images/")
def setup_backups(self, path=None, **kwargs): if not path: path = self.backup_path script = os.path.join(env.configs_dir, 'pg_backup.sh') sudo('mkdir -p %s' % path) sudo('chown {0}:{1} {2}'.format(self.user, self.group, path)) online_path = os.path.join(path, 'pg_backup.sh') put(script, online_path, use_sudo=True) sudo('sed -i s#BACKUPDIR=.*#BACKUPDIR=%s#g %s' % (path, online_path)) sudo('chmod +x %s' % online_path) bash = run('which bash') append('/tmp/pg_cron', '0 0 * * * %s %s' % (bash, online_path)) run('sudo su {0} -c "crontab < /tmp/pg_cron"'.format(self.user))
def deploy_landing_page(): """Deploy landing page""" create_tmp_if_doesnt_exist() current_path = os.path.dirname(os.path.realpath(__file__)) dist_path = os.path.join(current_path, "landing_page") create_zip_archive(dist_path, ".tmp/landing_page.zip") put(".tmp/landing_page.zip", "/var/www/gtasksapp_com/www/") with cd("/var/www/gtasksapp_com/www/"): run("unzip -o {0}".format("landing_page.zip")) run("rm {0}".format("landing_page.zip")) grunt_clean()
def _get_passwd(self, username): with hide('output'): string = run('echo "select usename, passwd from pg_shadow where ' 'usename=\'%s\' order by 1" | sudo su postgres -c ' '"psql"' %username) user, passwd = string.split('\n')[2].split('|') user = user.strip() passwd = passwd.strip() __, tmp_name = tempfile.mkstemp() fn = open(tmp_name, 'w') fn.write('"%s" "%s" ""\n' %(user, passwd)) fn.close() put(tmp_name, '%s/pgbouncer.userlist'%self.config_dir, use_sudo=True) local('rm %s' %tmp_name)
def copy_id(file='~/.ssh/id_rsa.pub'): put(file, "/tmp/id_rsa.pub") try: run("if [ ! -d ~/.ssh ]; then mkdir -p ~/.ssh; fi") run("if [ ! -f ~/.ssh/authorized_keys ]; then cp /tmp/id_rsa.pub ~/.ssh/authorized_keys && chmod 0600 ~/.ssh/authorized_keys; fi" ) run("cat ~/.ssh/authorized_keys >> /tmp/id_rsa.pub && sort -u /tmp/id_rsa.pub > ~/.ssh/authorized_keys" ) finally: run("rm -f /tmp/id_rsa.pub")
def setup_gui(): """ puts the sarb gui tar on server (including tomcat and starts it up) """ set_user_keys() global INSTALL_DIR INSTALL_DIR = prompt("Which directory do you want to place tomcat in", default = INSTALL_DIR) sudo("mkdir -p %s" % (INSTALL_DIR)) put("./sarbTomcat.zip", INSTALL_DIR, use_sudo = True) with cd(INSTALL_DIR): sudo("unzip sarbTomcat.zip") ip = sudo("hostname -i") sed('{}/sarbTomcat/conf/server.xml'.format(INSTALL_DIR), '<Connector', '<Connector address=\"{}\" '.format(ip), use_sudo = True) sudo("{}/sarbTomcat/bin/catalina.sh start".format(INSTALL_DIR))
def do_deploy(archive_path): """ Deploys packed content to servers """ if os.path.exists(archive_path) is not True: return False if not put(archive_path, "/tmp/").succeeded: return False filename = archive_path[9:] foldername = "/data/web_static/releases/" + filename[:-4] filename = "/tmp/" + filename if run("mkdir -p {}".format(foldername)).failed: return False if run("tar -zxf {} -C {}". format(filename, foldername)).failed: return False if run("rm {}".format(filename)).failed: return False if run('mv {}/web_static/* {}'.format(foldername, foldername)).failed: return False if run("rm -rf {}/web_static".format(foldername)).failed: return False if run("rm -rf /data/web_static/current").failed: return False if run("ln -s {} /data/web_static/current".format(foldername)).failed: return False return True
def do_deploy(archive_path): """ uploads the archive to servers """ destination = "/tmp/" + archive_path.split("/")[-1] result = put(archive_path, "/tmp/") if result.failed: return False filename = archive_path.split("/")[-1] f = filename.split(".")[0] directory = "/data/web_static/releases/" + f run_res = run("mkdir -p \"%s\"" % directory) if run_res.failed: return False run_res = run("tar -xzf %s -C %s" % (destination, directory)) if run_res.failed: return False run_res = run("rm %s" % destination) if run_res: return False web = directory + "/web_static/*" run_res = run("mv %s %s" % (web, directory)) if run_res.failed: return False web = web[0:-2] run_res = run("rm -rf %s" % web) if run_res.failed: return False run_res = run("rm -rf /data/web_static/current") if run_res.failed: return False run_res = run("ln -s %s /data/web_static/current" % directory) if run_res.failed: return False return True
def put_netperf_files(): logging.debug("copying netperf files") with settings(warn_only=True): if not put("~/net*", "~/").succeeded: logging.error("Unable to copy files to remote host") sudo("killall -e netserver") sudo("chmod 777 ./netperf")
def create_ssh_login(destination=dest_host().complete_address()): print(destination) path = "~/.ssh/id_rsa" local_file = "id_rsa_{}.pub".format(get_current_host_name()) pub_file = path + ".pub" if not exists(pub_file): run("ssh-keygen -t rsa -f {} -q -N \"\"".format(path)) get(pub_file, local_file) with settings(host_string=destination): remote_filepath = "~/.ssh/{}".format(local_file) if not exists(remote_filepath): put(local_file, remote_filepath) run("cat {} >> ~/.ssh/authorized_keys".format(remote_filepath)) run("cat {} | sudo sshcommand acl-add dokku {}".format( remote_filepath, get_current_host_name()))
def _copy_secrets(): """ Copies secrets from local to remote. :return: """ secret = ".env.{}".format(ENV.name) remote_path = os.path.join(ENV.project_dir, ".env") print( blue("Copying {secret} to {remote_path} on {host}".format( secret=secret, remote_path=remote_path, host=ENV.host))) put(secret, remote_path) with cd(ENV.project_dir): run("echo 'DEPLOYMENT_DATETIME=%s' >> %s" % (ENV.DEPLOYMENT_DATETIME, remote_path))
def config_upload(): """ 上传dockerfile和相关配置文件 :return: """ if not exists(depc.deploy_dir): print('创建远程目录...') run('mkdir -p %s' % depc.deploy_dir) print('打包文件...') tar_name = '%s.tar.gz' % depc.name with lcd(depc.build): local('tar -zcvf %s containers/' % tar_name) print('删除本地文件...') local('rm -rf containers') print('上传压缩文件...') with settings(warn_only=True): result = put(depc.build + '/%s' % tar_name, depc.deploy_dir + '/%s' % tar_name) local('rm %s' % tar_name) if result.failed and not confirm("put file failed, Continue[Y/N]?"): os.abort("上传文件失败") with cd(depc.deploy_dir): print('解压远程文件...') run('tar -xzvf %s --strip-components 1' % tar_name) print('删除远程文件...') run('rm %s' % tar_name)
def do_deploy(archive_path): """ Function distributes an archive to web servers Returns: True if operations succeed False if archive_path doesn't exist or fail """ import os if not os.path.exists(archive_path): return False if not put(archive_path, "/tmp/").succeeded: return False filename = archive_path[11:] foldername = "/data/web_static/releases/" + filename[:-4] filename = "/tmp/" + filename if not run('mkdir -p {}'.format(foldername)).succeeded: return False if not run('tar -xzf {} -C {}'.format(filename, foldername)).succeeded: return False if not run('rm {}'.format(filename)).succeeded: return False if not run('mv {}/web_static/* {}'.format(foldername, foldername)).succeeded: return False if not run('rm -rf {}/web_static'.format(foldername)).succeeded: return False if not run('rm -rf /data/web_static/current').succeeded: return False return run('ln -s {} /data/web_static/current'.format( foldername)).succeeded
def installGit(): env.warn_only = True user = env.user code_dir = '/home/'+user+'/.vayu' run('mkdir -p '+ code_dir ) codeResult = run('java -version') if codeResult.return_code != 0: with cd(code_dir): put('setup_files/setupgit_ubuntu.sh',code_dir) sudo('chmod 777 setupgit_ubuntu.sh') sudo('./setupgit_ubuntu.sh') sudo('rm setupgit_ubuntu.sh') else: print("git is already installed") env.warn_only = False
def _prepend_remote_shell_script(self, script, remote_path, **put_kwargs): """ Insert the given script into the remote file at the given path before the first script line. See prepend_shell_script() for a definition of script line. :param script: the script to be inserted :param remote_path: the path to the file on the remote host :param put_kwargs: arguments passed to Fabric's put operation """ with closing(StringIO()) as out_file: with closing(StringIO()) as in_file: get(remote_path=remote_path, local_path=in_file) in_file.seek(0) prepend_shell_script('\n' + script, in_file, out_file) out_file.seek(0) put(remote_path=remote_path, local_path=out_file, **put_kwargs)
def deploy(local_path=None): if not os.path.isfile(local_path): abort('RPM file not found at %s.' % local_path) _LOGGER.info("Deploying rpm on %s..." % env.host) print("Deploying rpm on %s..." % env.host) sudo('mkdir -p ' + constants.REMOTE_PACKAGES_PATH) ret_list = put(local_path, constants.REMOTE_PACKAGES_PATH, use_sudo=True) if not ret_list.succeeded: _LOGGER.warn("Failure during put. Now using /tmp as temp dir...") ret_list = put(local_path, constants.REMOTE_PACKAGES_PATH, use_sudo=True, temp_dir='/tmp') if ret_list.succeeded: print("Package deployed successfully on: " + env.host)
def configure_nginx(): #Do this for new servers only run("sudo /etc/init.d/nginx start") print green("Copying nginx.config virtual host file for ajibika.org to the sites-available directory") with settings(warn_only=True): if file_exists("/etc/nginx/sites-available/www.ajibika.org"): run("sudo rm /etc/nginx/sites-enabled/www.ajibika.org") result = put("conf/www.ajibika.org", "/etc/nginx/sites-available/", use_sudo=True) if result.failed and not confirm("Unable to copy www.ajibika.org to sites-enabled dir. Continue anyway?"): abort("Aborting at user request.") print green("conf/www.ajibika.org has been copied") print red("Removing old nginx configs") if file_exists("/etc/nginx/sites-enabled/default"): result = run("sudo rm /etc/nginx/sites-enabled/default") if result.failed and not confirm("Unable to Removing old nginx configs. Continue anyway?"): abort("Aborting at user request.") print magenta("Now Symlinking the ajibika virtual host file to sites sites-enabled") if not file_exists("/etc/nginx/sites-enabled/www.ajibika.org"): with settings(warn_only=True): result = run("sudo ln -s /etc/nginx/sites-available/www.ajibika.org /etc/nginx/sites-enabled/www.ajibika.org") if result.failed and not confirm("Unable to symlink the angani \ virtual host file to sites sites-enabled. Continue anyway?"): abort("Aborting at user request.") print "sudo reload nginx" run("sudo /etc/init.d/nginx reload")
def _run_nginx(): cf = env.cf with lcd(cf.module_path): if os.path.exists("%s/nginx" % cf.module_path) and cf.nginx_path: if os.path.exists(cf.source_project_path + "/nginx.conf"): local( 'tar czvf {0}-nginx.tar.gz nginx -C {1} nginx.conf'.format( cf.app_name, cf.source_project_path)) else: local('tar czvf {0}-nginx.tar.gz nginx'.format(cf.app_name)) put_remote_path = '/tmp/fab_nginx/{0}/{1}'.format( cf.git_root_name, cf.app_name) put_remote_file = '{0}/{1}-nginx.tar.gz'.format( put_remote_path, cf.app_name) put_source_path = '{0}/{1}-nginx.tar.gz'.format( cf.module_path, cf.app_name) run("rm -rf {0}".format(put_remote_path)) run('mkdir -p {}'.format(put_remote_path)) result = put(put_source_path, put_remote_file) if result.succeeded: print green(u'put success: {} -> {}'.format( put_source_path, put_remote_path)) run("tar -xvzf {0} -C {1}".format(put_remote_file, put_remote_path)) _n(put_remote_path)
def _scp(cls, args): remote_packages_path = args.config["remote_packages_path"] rpm_path = cls.get_rpm_path(args) for rpm in rpm_path: if not os.path.isfile(rpm): abort('RPM file not found at %s.' % rpm) logging.info("Deploying rpm on %s" % env.host) sudo('mkdir -p ' + remote_packages_path) ret_list = put(rpm, remote_packages_path, use_sudo=True) if not ret_list.succeeded: logging.warn("Failure during put. Now using /tmp as temp dir") ret_list = put(rpm, remote_packages_path, use_sudo=True, temp_dir='/tmp') if ret_list.succeeded: logging.info("Package deployed successfully on: %s " % env.host)
def do_deploy(archive_path): ''' puts files onto server ''' filename = archive_path.partition('/')[2] data_path = '/data/web_static/releases/' + filename.partition('.')[0] try: if put(archive_path, '/tmp/{}'.format(filename)).failed is True: return False if run('mkdir -p {}'.format(data_path)).failed is True: return False my_command = 'tar -xzf /tmp/{} -C {}'.format(filename, data_path) if run(my_command).failed is True: return False if run('rm /tmp/{}'.format(filename)).failed is True: return False my_command = 'mv {}/web_static/* {}'.format(data_path, data_path) if run(my_command).failed is True: return False if run('rm -rf {}/web_static'.format(data_path)).failed is True: return False if run('rm -rf /data/web_static/current').failed is True: return False my_command = 'ln -s {} /data/web_static/current'.format(data_path) if run(my_command).failed is True: return False except: return False return True
def file_transfer(self, type, node_file, local_file): with settings(host_string='%s@%s' % (self.username, self.ip), password=self.password, warn_only=True, abort_on_prompts=False): if type == "get": return get(node_file, local_file) if type == "put": return put(node_file, local_file)
def task_database_restore_locally(db_name, dba, backup_path, remote_backup_path): run('mkdir -p %s' % remote_backup_path) backup_dir_name = os.path.basename(backup_path) print backup_path, remote_backup_path put(backup_path, remote_backup_path) remote_backup_path = os.path.join(remote_backup_path, backup_dir_name) with settings(sudo_user='******'): sudo('createuser -S -D -R -P %s' % dba, warn_only=True) sudo( "pg_restore -d postgres -j 3 --create --exit-on-error --verbose -F d %s" % remote_backup_path) local("rm -Rf %s" % backup_path) sudo("rm -Rf %s" % remote_backup_path)
def send_file(self, hostname, local_file, remote_path): response_dict = dict() try: ssh_user = self.get_ssh_user() if ssh_user: with settings(host_string=hostname, user=ssh_user, key_filename=self.ssh_key, warn_only=False, timeout=180): status = put(local_file, remote_path) if status.failed: response_dict['status'] = '1' response_dict['output'] = status response_dict['return_code'] = 1 else: response_dict['status'] = '0' response_dict['output'] = status response_dict['return_code'] = 0 print 'Response: {0}, Return code: {1}'.format( str(status), response_dict['return_code']) except Exception as e: response_dict['status'] = '1' response_dict['output'] = e.message finally: disconnect_all() return response_dict
def _run_front(): cf = env.cf with lcd(cf.module_path): local('cnpm install') local('npm run %s' % env.runmode) local('tar czvf dist.tar.gz dist') put_remote_path = '{0}/{1}'.format(cf.remote_path, cf.app_name) put_remote_file = '{0}/dist.tar.gz'.format(put_remote_path) put_source_path = '{0}/dist.tar.gz'.format(cf.module_path) if int(run('[ -e "{}" ] && echo 1 || echo 0'.format( put_remote_path))) == 0: run('mkdir -p {}'.format(put_remote_path)) result = put(put_source_path, put_remote_file) if result.succeeded: print green(u'put success: {} -> {}'.format( put_source_path, put_remote_path)) run("tar -xvzf {0} -C {1}".format(put_remote_file, put_remote_path)) if int( run('[ -e "{0}/html/{1}" ] && echo 1 || echo 0'.format( cf.nginx_path, cf.app_name))) == 0: run('mkdir -p {0}/html/{1}'.format(cf.nginx_path, cf.app_name)) run('cp -rf {0}/dist/* {1}/html/{2}'.format( put_remote_path, cf.nginx_path, cf.app_name))