def go_check(self): disconnect_all() cks = {} try: if not self._collect_infos('old'): raise RuntimeError("Failed to collect old infos.") if "yum_update" in self.ksfile: if re.split(r'_|\.', self.ksfile)[3] == 'vlan': #ret = self._yum_update_process() ret = self._yum_update_process(is_vlan=True, is_bond=False) elif re.split(r'_|\.', self.ksfile)[3] == 'bond': ret = self._yum_update_process(is_vlan=False, is_bond=True) else: ret = self._yum_update_process(is_vlan=False, is_bond=False) elif "yum_install" in self.ksfile: ret = self._yum_install_process() elif "rhvm_upgrade" in self.ksfile: ret = self._rhvm_upgrade_process() if not ret: raise RuntimeError("Failed to run upgrade.") if not self._collect_infos('new'): raise RuntimeError("Failed to collect new infos.") cks = self.run_cases() except Exception as e: log.error(e) finally: self._del_host_on_rhvm() return cks
def run_as_handyrep(self, servername, commands): # runs a set of commands as the "handyrep" user # exiting when the first command fails # returns a dic with the results of the last command # run env.key_filename = self.servers[servername]["ssh_key"] env.user = self.servers[servername]["ssh_user"] env.disable_known_hosts = True env.host_string = self.servers[servername]["hostname"] rundict = { "result": "SUCCESS", "details" : "no commands provided", "return_code" : None } for command in commands: try: runit = run(command, warn_only=True) rundict.update({ "details" : runit , "return_code" : runit.return_code }) if runit.succeeded: rundict.update({"result":"SUCCESS"}) else: rundict.update({"result":"FAIL"}) break except Exception as ex: rundict = { "result" : "FAIL", "details" : "connection failure: %s" % exstr(ex), "return_code" : None } break disconnect_all() return rundict
def enter_password_change(self, username=None, old_password=None): """ Responds to a forced password change via `passwd` prompts due to password expiration. """ from fabric.state import connections from fabric.network import disconnect_all r = self.local_renderer # print('self.genv.user:'******'self.env.passwords:', self.env.passwords) r.genv.user = r.genv.user or username r.pc('Changing password for user {user} via interactive prompts.') r.env.old_password = r.env.default_passwords[self.genv.user] # print('self.genv.user:'******'self.env.passwords:', self.env.passwords) r.env.new_password = self.env.passwords[self.genv.user] if old_password: r.env.old_password = old_password prompts = { '(current) UNIX password: '******'Enter new UNIX password: '******'Retype new UNIX password: '******'%s': " % r.genv.user: r.env.new_password, # "Login password for '%s': " % r.genv.user: r.env.old_password, } print('prompts:', prompts) r.env.password = r.env.old_password with self.settings(warn_only=True): ret = r._local( "sshpass -p '{password}' ssh -o StrictHostKeyChecking=no {user}@{host_string} echo hello", capture=True) #code 1 = good password, but prompts needed #code 5 = bad password #code 6 = good password, but host public key is unknown if ret.return_code in (1, 6) or 'hello' in ret: # Login succeeded, so we haven't yet changed the password, so use the default password. self.genv.password = r.env.old_password elif self.genv.user in self.genv.user_passwords: # Otherwise, use the password or key set in the config. self.genv.password = r.env.new_password else: # Default password fails and there's no current password, so clear. self.genv.password = None print('using password:'******'echo checking for expired password') print('ret:[%s]' % ret) do_disconnect = 'passwd: password updated successfully' in ret print('do_disconnect:', do_disconnect) if do_disconnect: # We need to disconnect to reset the session or else Linux will again prompt # us to change our password. disconnect_all() # Further logins should require the new password. self.genv.password = r.env.new_password
def install_viralassembly_cleanall(env): try: _initialize_env("viral") _remove_dir("%(VIRAL_ROOT_DIR)s" % env) print("Viral Assembly Removed\n") finally: disconnect_all()
def go_check(self): disconnect_all() if self._set_checkdata_map(): cks = self.run_cases() else: cks = {} return cks
def close(): """ Closes all fabric connections. """ network.disconnect_all()
def install_vicvb(env): try: _initialize_env("vicvb") _apt_get_install("libperlio-gzip-perl") _apt_get_install("liblocal-lib-perl") tbl2asn_download_dir = "/usr/local/tbl2asn_download" tbl2asn_dir = "/usr/local/bin" if _path_exists(os.path.join(tbl2asn_dir, "tbl2asn")): sudo("mv %s/tbl2asn %s/tbl2asn_pre_VCR" % (tbl2asn_dir, tbl2asn_dir)) _add_package( "ftp://ftp.ncbi.nih.gov/toolbox/ncbi_tools/converters/by_program/tbl2asn", "linux64.tbl2asn.gz", tbl2asn_download_dir, "gzip") sudo("chmod 777 %s/linux64.tbl2asn" % tbl2asn_download_dir) sudo("mv %s/linux64.tbl2asn %s/tbl2asn" % (tbl2asn_download_dir, tbl2asn_dir)) _remove_dir(tbl2asn_download_dir) with cd("~"): sudo("git clone git://github.com/JCVI-Cloud/VICVB.git") with cd("~/VICVB"): sudo( "lib/VICVB/data/install/install_to_dir_full.sh %s /mnt/galaxyTools/galaxy-central /" % (env.VICVB_LOCAL_DIR)) finally: disconnect_all()
def execute(commands): results = [] try: commands_to_run = [(v, [], {}, [], [], []) for v in commands] for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts \ in commands_to_run: results.append(fab_execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)) pass except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: if output.status: sys.stderr.write("\nStopped.\n") sys.exit(1) except: sys.excepthook(*sys.exc_info()) # we might leave stale threads if we don't explicitly exit() sys.exit(1) finally: disconnect_all() return results
def main(): # getsids() print("Before set hosts") set_hosts() print("After set hosts") uname() disconnect_all()
def perform_run_remotely(base_dispatcher, intent): """ Run a series of commands on a remote host. """ dispatcher = ComposedDispatcher([ TypeDispatcher({ Run: perform_run, Sudo: perform_sudo, Put: perform_put, Comment: perform_comment, }), base_dispatcher, ]) host_string = "%s@%s" % (intent.username, intent.address) with settings( connection_attempts=24, timeout=5, pty=False, host_string=host_string): sync_perform(dispatcher, intent.commands) disconnect_all()
def provision_instance(self, inst): # this seems to cure random SSH connection issues. disconnect_all() provisioner = Provisioner(self) with settings(host_string=inst.public_dns_name, user=self.user): provisioner.execute()
def run_cmd(cmd): try: results = tasks.execute(lambda: run(cmd)) return results[settings.BACKUP_RBD_HOST] finally: disconnect_all()
def provision_instance(self, inst): # this seems to cure random SSH connection issues. disconnect_all() provisioner = Provisioner(self) with self.and_instance(inst): provisioner.execute()
def deploy_using_rsync(): try: rsync_project(local_dir="%s/build/" % os.getcwd(), remote_dir=app.config['DEPLOY_DIR'], delete=True) finally: disconnect_all()
def sync_all(actor=None): try: for login in Login.objects.exclude(machine__manual=True): login.sync(actor=actor) finally: with settings(hide(*ALL_FABRIC_WARNINGS)): disconnect_all()
def multi_merge_results(num): execute(delete_data) time.sleep(2) execute(init_data) time.sleep(3) disconnect_all() threads = [] messages = [] queue = Queue.Queue() for i in xrange(1,int(num)+1): for j in xrange(0,3): t = Multi_ExecThread(i, 8983+j, queue) threads.append(t) for t in threads: t.start() for t in threads: t.join() while(not queue.empty()): messages.append(queue.get()) #results = execute(sudo_java_run) timeregex = re.compile('.*\s.*\s.*\s.*\s.*\sTime spent: 0:00:(.*)') timestamps = [] for message in messages: for result in message.itervalues(): match = timeregex.match(result).group(1) timestamps.append(float(match)) print timestamps print "avg : %f" % (sum(timestamps,0.0)/len(timestamps))
def main(): env['host_tuples'] = [] env['errors'] = [] tasks.execute(get_disk_space) host_list = env['host_tuples'] error_list = env['errors'] if error_list: print '-' * 80 print 'ERRORS:' print for item in error_list: print '%s %s %s' % item print if not host_list: print 'ERROR: something bad happened, no host data' return low_space_list = [] total_space = 0 for host, freespace in host_list: total_space += freespace if freespace <= THRESHOLD: low_space_list.append((host, freespace)) avg_free_space = total_space / len(host_list) if low_space_list: print '-' * 80 print 'LOW DISK SPACE:' for item in low_space_list: print '%-32s %s' % item print print 'AVERAGE FREE DISK SPACE: %s' % avg_free_space disconnect_all()
def main(): parser = argparse.ArgumentParser( prog='pyrsnapshot', description='''\ Pyrsnapshot will back up the contents of the current directory to the remote server and save it under the path given. Use the executing user's .ssh/ssh_config to configure the user and private key for the connection. ''', ) help = 'Keep how many {} backups (default: {})' for adjective, default in zip(remote.ADJECTIVES, remote.DEFAULTS): parser.add_argument('--{}'.format(adjective), type=int, default=default, help=help.format(adjective, default)) parser.add_argument('remote', metavar='host:path', help='Backup to the remote host at the given path') try: args = parser.parse_args() host, root = args.remote.split(':') if not root: raise Exception("You must specify the remote path to backup to.") with quiet(): env.use_ssh_config = True execute(remote.pyrsnapshot, root, args, hosts=[host]) finally: disconnect_all()
def sync(self, actor=None): """ Updates the authorized_keys file on the machine attached to this login adding or deleting users public keys Returns True if successfully changed the authorized files and False if not (status stays dirty). If no change attempted, return None. """ if self.machine.is_down or self.machine.manual or not self.is_dirty: # No update required (either impossible or not needed) return None success, output = self.run("echo '%s' > ~/.ssh/authorized_keys" % self.formatted_keys(), self.get_application_key().private_key) message="%successful %s" % ("S" if success else "Uns", "key deployment") LoginLog.objects.create(stderr=output.stderr, stdout=output.stdout, actor=actor, login=self, message=message ) with settings(hide(*ALL_FABRIC_WARNINGS)): disconnect_all() if success: self.is_dirty = False self.application_key = ApplicationKey.get_latest() self.save() return success
def install(root_password=None): create_user() stop() # create project's parent directory if not files.exists(env.config['root']): sudo('mkdir -p %s' % env.config['root']) sudo('chown {user}:{user} -R {root}'.format(**env.config)) sudo('chmod g+w -R {root}'.format(**env.config)) # git clone if not files.exists(env.config['path']): sudo('git clone {repository} {path}'.format(**env.config), user=env.config['repo_manager']) else: update() setup_fs_permissions() network.disconnect_all() setup_virtualenv() pip_install() setup(root_password) migratedb() npm_install() collectstatic() start() reload_webserver() setup_fs_permissions() print(green(u'Success!\n\n\n\n'),yellow(u'The project should be running now'))
def main(): try: target = Nova(user=args.user, hosts=args.hosts.split(',')) except AttributeError: parser.print_help() sys.exit(1) if args.subparser_name == 'create-nova-db': execute(target._create_nova_db, args.root_db_pass, args.nova_db_pass) if args.subparser_name == 'create-service-credentials': execute(target._create_service_credentials, args.os_password, args.os_auth_url, args.nova_pass, args.endpoint,) if args.subparser_name == 'install': execute(target._install_nova, args.connection, args.auth_uri, args.auth_url, args.nova_pass, args.my_ip, args.memcached_servers, args.rabbit_hosts, args.rabbit_pass, args.glance_host, args.neutron_endpoint, args.neutron_pass, args.metadata_proxy_shared_secret) disconnect_all()
def assign_execute(self): """ Assigns nodes to containers set to be run on remote machines. In the case of checkpoint restoration, previous execution host is overwritten with new ones assigned to the job :return: """ logger.info('-- Running remote containers --') for x in range(len(self.containers)): try: host = self.hosts[x] if isinstance(self.hosts, list) else self.hosts except IndexError: logger.critical( "Not enough hosts assigned to job. One host per container is required" ) self.terminate_clean() self.remove_images() sys.exit("Terminating") container = self.containers[x] container.network = self.network_name if self.create_net else None try: container.execution_host = host logger.debug("Setting user to " + str(self.user)) container.user = str(self.user) execute(self.run_container, container, host=host) except RemoteExecutionError as rex: logger.critical("Remote execution of containers failed: " + rex.message) self.terminate_clean() self.remove_images() sys.exit("Terminating") finally: disconnect_all()
def main(): """ Main entry point for CLI """ load_maestro_rc() parser = ArgumentParser(prog='maestro', description='DevOps management') sub = parser.add_subparsers() sub_tasks = sub.add_parser('tasks', help='Tasks') sub_tasks.add_argument('-l', '--list', action='store_true', dest='tasks_list', default=False, \ help='Show available tasks') sub_tasks.add_argument('-r', '--run', action='store', dest='tasks_task', \ help='Task to run') sub_tasks.set_defaults(func=parse_tasks) sub_nodes = sub.add_parser('nodes', help='Nodes') sub_nodes.add_argument('-c', '--cloud-provider', action='store', dest='cloud_provider', \ choices=config.AVAILABLE_CLOUD_PROVIDERS, default=None, required=True, \ help='Name of cloud provider to use') sub_nodes.add_argument('-l', '--list', action='store_true', dest='list', default=False, \ help='List all nodes') sub_nodes.add_argument('-t', '--task', action='store', dest='task', \ help='Task to run on nodes') sub_nodes.add_argument('-u', '--user', action='store', dest='user', \ help='Username to use when running task') sub_nodes.add_argument('-p', '--parallel', action='store_true', default=False, \ help='Run task in parallel among nodes') sub_nodes.set_defaults(func=parse_nodes) args = parser.parse_args() args.func(args) # disconnect disconnect_all() sys.exit(0)
def deploy(branch_name): obj = DeployTask() if obj.load_legend(branch_name): try: obj.revert_permissions() obj.check_dir() obj.render() obj.files() obj.discard_old_trees() obj.src() obj.venv() obj.link_settings() obj.collectstatic() obj.validate_prod() obj.reload() obj.restore_permissions() obj.nodejs() obj.render_tasks() obj.reload_search() obj.cleanup() except: raise finally: # os.system("rm -rfv out") local('rm -rfv out') disconnect_all() else: print red('legend.txt is required and does not exists', bold=True)
def run_with_fabric(username, address, commands): """ Run a series of commands on a remote host. :param bytes username: User to connect as. :param bytes address: Address to connect to :param list commands: List of commands to run. """ from fabric.api import settings, run, put, sudo from fabric.network import disconnect_all from StringIO import StringIO handlers = { Run: lambda e: run(e.command), Sudo: lambda e: sudo(e.command), Put: lambda e: put(StringIO(e.content), e.path), Comment: lambda e: None, } host_string = "%s@%s" % (username, address) with settings( connection_attempts=24, timeout=5, pty=False, host_string=host_string): for command in commands: handlers[type(command)](command) disconnect_all()
def run(self, suite): with settings(api.hide("running", "stdout", "stderr", "user"), hosts=self.hosts.keys(), parallel=False): api.execute(self.runTests, suite) network.disconnect_all() return unittest.result.TestResult()
def bootstrap(): # Upgrade system packages upgrade_packages() # Install basic package requirements sudo("apt-get install -y build-essential git-core subversion mercurial " "bison openssl libreadline6 libreadline6-dev " "curl zlib1g zlib1g-dev libssl-dev libyaml-dev " "libsqlite3-0 libsqlite3-dev sqlite3 libxml2-dev " "libxslt-dev autoconf libc6-dev ncurses-dev " "python-dev python-software-properties") # Add Postgresql and Nginx PPAs sudo("add-apt-repository ppa:pitti/postgresql") sudo("add-apt-repository ppa:nginx/stable") sudo("apt-get update") # System-Wide RVM install sudo("bash < <(curl -sk https://rvm.beginrescueend.com/install/rvm)") sudo("usermod -a -G rvm %s" % env.user) # Disconnect to have RVM properly load network.disconnect_all() # Install Ruby and Chef rvm_install('ruby-1.9.2-p290', True) run("gem install chef ohai --no-ri --no-rdoc")
def _cleanup(self): try: fabric_network.disconnect_all() shutil.rmtree(self._working_dir) except OSError as ex: if ex.errno != errno.ENOENT: raise
def sync(self, actor=None): """ Updates the authorized_keys file on the machine attached to this login adding or deleting users public keys Returns True if successfully changed the authorized files and False if not (status stays dirty). If no change attempted, return None. """ if self.machine.is_down or self.machine.manual or not self.is_dirty: # No update required (either impossible or not needed) return None success, output = self.run( "echo '%s' > ~/.ssh/authorized_keys" % self.formatted_keys(), self.get_application_key().private_key) message = "%successful %s" % ("S" if success else "Uns", "key deployment") LoginLog.objects.create(stderr=output.stderr, stdout=output.stdout, actor=actor, login=self, message=message) with settings(hide(*ALL_FABRIC_WARNINGS)): disconnect_all() if success: self.is_dirty = False self.application_key = ApplicationKey.get_latest() self.save() return success
def deploy_sepical_redis(user, ssh_port, target_host, redis_port, aof=0, aof_rewrite=1, max_mem_size=0): with settings(parallel=True): parm = parm_parse(locals()) redis_cfg =\ Template(Cfg.redis_cfg).render(port=parm.redis_port, aof=parm.aof, aof_rewrite=parm.aof_rewrite, max_mem_size=parm.max_mem_size) ret = execute(deploy_redis, host=parm.target_host_str, redis_port=parm.redis_port, redis_cfg=redis_cfg) for _, each_ret in ret.items(): if not each_ret: return 301 ret = execute(startup_redis, host=parm.target_host_str, redis_port=parm.redis_port) for _, each_ret in ret.items(): if not each_ret: return 303 disconnect_all() gvar.LOGGER.info("Init redis succeed.") return 1
def _exit_cleanup_hook(self): """ Disconnect all Fabric connections in addition to shutting down the logging. """ disconnect_all() Application._exit_cleanup_hook(self)
def status(): print(_green("Connecting to EC2 Instance...")) run("free -m") execute(mongo_status) execute(nginx_status) print(_yellow("...Disconnecting EC2 instance...")) disconnect_all()
def send_file(self, hostname, local_file, remote_path): response_dict = dict() try: ssh_user = self.get_ssh_user() if ssh_user: with settings(host_string=hostname, user=ssh_user, key_filename=self.ssh_key, warn_only=False, timeout=180): status = put(local_file, remote_path) if status.failed: response_dict['status'] = '1' response_dict['output'] = status response_dict['return_code'] = 1 else: response_dict['status'] = '0' response_dict['output'] = status response_dict['return_code'] = 0 print 'Response: {0}, Return code: {1}'.format( str(status), response_dict['return_code']) except Exception as e: response_dict['status'] = '1' response_dict['output'] = e.message finally: disconnect_all() return response_dict
def main(): try: server = socketserver.ThreadingUDPServer(('127.0.0.1', 7777), UDPHandler) server.serve_forever() except KeyboardInterrupt: disconnect_all() return
def check(redis_host_str, redis_port, dashboard_host_str, dashboard_port, proxy_host_str, proxy_port_list, product_name, zk_servers): with settings(parallel=True): ret = execute(chk_redis_env, hosts=redis_host_str, redis_port=redis_port) for _, each_ret in ret.items(): if not each_ret: return 101 ret = execute(chk_dashboard, hosts=dashboard_host_str, product_name=product_name, dashboard_port=dashboard_port) for _, each_ret in ret.items(): if not each_ret: return 102 if proxy_host_str != "": ret = execute(chk_proxy_env, hosts=proxy_host_str, product_name=product_name, proxy_port_list=proxy_port_list) for _, each_ret in ret.items(): if not each_ret: return 103 ret = chk_zk_node(zk_servers, product_name) if not ret: return 104 disconnect_all() return 1
def configure_pulp_server(instance_name, global_config): """ Set up a Pulp server using Fabric and a puppet module. Fabric will apply the given host name, ensure puppet and any modules declared in PUPPET_MODULES are installed, and will then apply the puppet manifest. :raise RuntimeError: if the server could not be successfully configured. This could be for any number of reasons. Currently fabric is set to be quite verbose, so see its output """ config = get_instance_config(instance_name, global_config) host_string = config[HOST_STRING] private_key = config[PRIVATE_KEY] with settings(host_string=host_string, key_filename=private_key): # Confirm the server is available fabric_confirm_ssh_key(host_string, private_key) # Set the hostname run('sudo hostname ' + config[HOSTNAME]) # Ensure the puppet modules are installed for module in PUPPET_MODULES: run(PUPPET_MODULE_INSTALL % module) # Add external facts to the server puppet_external_facts = {PULP_REPO_FACT: config[REPOSITORY_URL]} add_external_fact(host_string, private_key, puppet_external_facts) # Apply the manifest to the server apply_puppet(host_string, private_key, PULP_SERVER_MANIFEST) fabric_network.disconnect_all()
def main(): parser = argparse.ArgumentParser( prog='pyrsnapshot', description='''\ Pyrsnapshot will back up the contents of the current directory to the remote server and save it under the path given. Use the executing user's .ssh/ssh_config to configure the user and private key for the connection. ''', ) help = 'Keep how many {} backups (default: {})' for adjective, default in zip(remote.ADJECTIVES, remote.DEFAULTS): parser.add_argument( '--{}'.format(adjective), type=int, default=default, help=help.format(adjective, default) ) parser.add_argument( 'remote', metavar='host:path', help='Backup to the remote host at the given path' ) try: args = parser.parse_args() host, root = args.remote.split(':') if not root: raise Exception("You must specify the remote path to backup to.") with quiet(): env.use_ssh_config = True execute(remote.pyrsnapshot, root, args, hosts=[host]) finally: disconnect_all()
def apply_recipes_cookbooks(enviro, settings, args, host_list, run_list): ''' Apply all specified recipes and cookbooks to the requested hosts. :type enviro: dictionary :param enviro: environment dictionary :type settings: dictionary :param settings: settings dictionary :type args: args object :param args: object containing attributes for all possible command-line parameters :type host_list: list of strings :param host_list: list of hosts to run against :type run_list: dictionary :param run_list: dictionary of lists ''' for host in host_list: env.host_string = host if args.user: env.user = args.user if args.package_update: cuisine.package_update() try: for item in run_list[host]: if item["type"] == "recipe": recipe = recipes.recipes[item["name"]]( settings, enviro, args.ok_to_be_rude, args.no_prompt) recipe.run_apply(host) elif item["type"] == "cookbook": cookbook = cookbooks.cookbooks[item["name"]]( settings, enviro, args.ok_to_be_rude, args.no_prompt) cookbook.run_apply(host) finally: disconnect_all()
def doCopy(conn, args): pbf_url = args[1] print "PBF Source " + pbf_url pbfsource = objects.PbfSource(pbf_url) try: size = pbfsource.size(conn) print "Size is {0} GB".format(size) except Exception as e: print e sys.exit(1) timestamp = int(time.time()) fabric.api.env.key_filename = "planet2ebs-{0}.pem".format(timestamp) fabric.api.env.connection_attempts = 10 print "Copying file to EBS volume..." with objects.Instance(conn, timestamp) as i: fabric.api.env.host_string = "ubuntu@{0}".format(i.public_dns_name) with objects.NewArtifact(conn, i, fabric.api,size,"artifact",{'planet2ebs':'pbf','planet2ebs-source':pbf_url}) as artifact: with pbfsource.use(conn, fabric.api, i.id) as path: # TODO check the md5 fabric.api.run("curl -o {0}/osm.pbf {1}".format(artifact.mountpoint,pbf_url)) print "Output: " + artifact.output() disconnect_all()
def deploy_redis_env(redis_host_str, redis_url, redis_pkg_name, redis_unpack_dir): with settings(parallel=True): ret = execute(chk_redis_dir, hosts=redis_host_str, info_only=1) for each_host, ret in ret.items(): if not ret: ret = execute(install_redis_pkg, hosts=each_host, redis_urls=redis_url, redis_pkg_name=redis_pkg_name, redis_unpack_dir=redis_unpack_dir) for _, each_ret in ret.items(): if not each_ret: return 201 ret = execute(chk_redis_dir, hosts=each_host, info_only=0) for _, each_ret in ret.items(): if not each_ret: return 201 disconnect_all() gvar.LOGGER.info("Deploy codis env success.") return 1
def install(): try: _initialize_script() _install_tools() _install_vigor() finally: disconnect_all()
def main(): arguments = docopt(__doc__, version='riker 0.1') try: if arguments.get('create-new-ami') == True: create_new_ami(arguments) elif arguments.get('deploy-ami') == True: deploy_ami(arguments) elif arguments.get('deploy') == True: deploy(arguments) elif arguments.get('update-config') == True: update_config(arguments) elif arguments.get('info') == True: get_info(arguments) elif arguments.get('ssh') == True: ssh(arguments) elif arguments.get('dokku') == True: dokku(arguments) elif arguments.get('open') == True: open_url(arguments) elif arguments.get('url') == True: get_url(arguments) elif arguments.get('config') == True: config(arguments) finally: disconnect_all()
def main(): configure() uts_dict = tasks.execute(test) # uts_dict = tasks.execute(download) # uts_dict = tasks.execute(install) # uts_dict = tasks.execute(reboot) disconnect_all() # Call this when you are done, or get an ugly exception!
def sudorun(self, servername, commands, runas, passwd=""): # generic function to run one or more commands # as a specific remote user. returns the results # of the last command run. aborts when any # command fails env.key_filename = self.servers[servername]["ssh_key"] env.user = self.servers[servername]["ssh_user"] env.disable_known_hosts = True env.host_string = self.servers[servername]["hostname"] rundict = return_dict(True, "no commands provided", {"return_code" : None }) if passwd is None: pgpasswd = "" else: pgpasswd = passwd for command in commands: try: with shell_env(PGPASSWORD=pgpasswd): runit = sudo(command, user=runas, warn_only=True,pty=False) rundict.update({ "details" : runit , "return_code" : runit.return_code }) if runit.succeeded: rundict.update({"result":"SUCCESS"}) else: rundict.update({"result":"FAIL"}) break except Exception as ex: rundict = { "result" : "FAIL", "details" : "connection failure: %s" % self.exstr(ex), "return_code" : None } break disconnect_all() return rundict
def main(): print('Buildbot for Linux {} - built by Foxlet'.format(VERSION)) print('-'*56 +'\n') if arguments.uptime: output = {'uts':[]} execute(botty.util.uptime, hosts=servers, times=output) print('--- Average Server Uptime ' + '-'*30) print(' {} days'.format(sum(output['uts'])/len(output['uts']))) print('-'*56) elif arguments.arch: output = [] execute(botty.util.find_arch, hosts=servers, list=output) for x in range(0, len(output)): print('{} is {} bits'.format(servers[x], output[x])) else: for item in packages_active: if item not in botty.util.getpacks(): print('Could not locate the specified package in the packages store!') sys.exit(1) package_gen = [list(x) for x in zip(servers, packages_active)] print(tabulate.tabulate(package_gen, ['Servers selected', 'Packages selected'], tablefmt="psql")) print('\nAssuming deployment with specified server(s) and package.') if botty.util.getcheck(raw_input('Continue with deployment? [y/N] ').lower()) == False: sys.exit(0) print('Starting deployment.') for item in package_gen: with open('packages/{}'.format(item[1])) as snippet: data = json.load(snippet) execute(botty.util.deploy_builtin, hosts=item[0], snippet=data) network.disconnect_all()
def configure_pulp_server(instance_name, global_config): """ Set up a Pulp server using Fabric and a puppet module. Fabric will apply the given host name, ensure puppet and any modules declared in PUPPET_MODULES are installed, and will then apply the puppet manifest. :raise RuntimeError: if the server could not be successfully configured. This could be for any number of reasons. Currently fabric is set to be quite verbose, so see its output """ config = get_instance_config(instance_name, global_config) host_string = config[HOST_STRING] private_key = config[PRIVATE_KEY] with settings(host_string=host_string, key_file=private_key): # Confirm the server is available fabric_confirm_ssh_key(host_string, private_key) # Set the hostname run('sudo hostname ' + config[HOSTNAME]) # Ensure the puppet modules are installed for module in PUPPET_MODULES: run(PUPPET_MODULE_INSTALL % module) # Add external facts to the server puppet_external_facts = {PULP_REPO_FACT: config[REPOSITORY_URL]} add_external_fact(host_string, private_key, puppet_external_facts) # Apply the manifest to the server apply_puppet(host_string, private_key, PULP_SERVER_MANIFEST) fabric_network.disconnect_all()
def run_with_fabric(username, address, commands): """ Run a series of commands on a remote host. :param bytes username: User to connect as. :param bytes address: Address to connect to :param list commands: List of commands to run. """ from fabric.api import settings, run, put from fabric.network import disconnect_all from StringIO import StringIO handlers = { Run: lambda e: run(e.command), Put: lambda e: put(StringIO(e.content), e.path), } host_string = "%s@%s" % (username, address) with settings(connection_attempts=24, timeout=5, pty=False, host_string=host_string): for command in commands: handlers[type(command)](command) disconnect_all()
def run(self): conf = self.config fab_settings = { 'host_string': join_host_strings(conf['username'], conf['host'], port=conf['port']), } fab_settings.update({ key: val for key, val in conf.items() if key in ('password', 'key') }) cmds = ( 'mkdir -p ' + str(PurePath(conf['tar_name']).parent), self.build_command(), ) try: with settings(**fab_settings): for cmd in cmds: run(cmd) finally: disconnect_all()
def backup(): ### Read the build number from the archive manifest, so that it can be used ### to get rollback path to archive try: disconnect_all() ### Make sure your archive MANIFEST file has the build-number manifest_build_line=local('unzip -q -c ' + env.path_to_archive + ' META-INF/MANIFEST.MF | grep Build-Version', capture=True) if 'Build-Version' in manifest_build_line: ### Get the build number build_number = manifest_build_line.split()[1] rollback_url = path_to_archive.replace('lastSuccessfulBuild', build_number) ### Update the url in the rollback file with open(env.path_to_rollback, "w") as rollback_log_file: rollback_log_file.write(rollback_url) print 'INFO: Updated roll back log file with url: %s' % rollback_url logging.info('Updated roll back log file with url: %s' % rollback_url) except KeyError, e: print 'ERROR: %s' % str(e) logging.error(str(e)) raise