def read_file(fname): if os.path.exists(fname): with open(fname) as f: return f.read() else: display("read_file: File %s does not exist" % fname, stderr=True, color='red') return None
def _execute(self, result, pass_trough=True): if not result or not isinstance(result, dict): return if 'command' in result: shell_command = result['command'] display("%s" % self.shadow_credentials(shell_command), stderr=True, color='yellow') if pass_trough: exit_code = call(shell_command, shell=True) else: p = Popen(shell_command, shell=True, stdout=PIPE, stderr=PIPE) output, errors = p.communicate() if errors: display("%s" % self.shadow_credentials(errors), stderr=True, color='red') exit_code = p.returncode if 'post_actions' in result: for callback in result['post_actions']: callback() return exit_code
def process(self, cwd=None, path=None, filters=(), exclude_keys=(), enclosing_key=None, output_format=yaml, print_data=False, output_file=None, skip_interpolations=False, skip_interpolation_validation=False, display_command=True): path = self.get_relative_path(path) if display_command: command = self.get_sh_command(path, filters, enclosing_key, output_format, print_data, output_file, skip_interpolations, skip_interpolation_validation) display(command, color='yellow') if skip_interpolations: skip_interpolation_validation = True if cwd is None: cwd = os.getcwd() generator = ConfigGenerator(cwd, path) generator.generate_hierarchy() generator.process_hierarchy() if not skip_interpolations: generator.resolve_interpolations() generator.add_dynamic_data() generator.resolve_interpolations() if len(filters) > 0: generator.filter_data(filters) if len(exclude_keys) > 0: generator.exclude_keys(exclude_keys) if not skip_interpolation_validation: generator.validate_interpolations() data = generator.add_enclosing_key( enclosing_key) if enclosing_key else generator.generated_data formatted_data = generator.output_data(data, output_format) if print_data: print(formatted_data) if output_file: with open(output_file, 'w') as f: f.write(formatted_data) return data
def write_token(token=None): try: if token: open(os.path.expanduser('~/.vault-token'), "w").write(token.strip()) except Exception: display("Warning: could not persist token to ~/.vault-token", stderr=False, color='yellow') pass
def __getitem__(self, item): if item not in self.conf and item not in self.ops_config: msg = "Configuration value %s not found; update your %s" % (item, self.cluster_config_path) display(msg, color='red', stderr=True) return if item in self.conf: return self.conf[item] return self.ops_config[item]
def run(self, args): for host in self.get_inventory_hosts(args): group_names = [group.name for group in host.get_groups()] group_names = sorted(group_names) group_string = ", ".join(group_names) host_id = host.vars.get('ec2_id', '') if host_id != '': name_and_id = "%s -- %s" % (stringc(host.name, 'blue'), stringc(host_id, 'blue')) else: name_and_id = "%s" % stringc(host.name, 'blue') display("%s (%s)" % (name_and_id, stringc(group_string, 'green'))) if args.facts: display(self.get_host_facts(host))
def __call__(self, result, pass_trough=True, cwd=None): try: return self._execute(result, pass_trough, cwd) except Exception as ex: display(ex.message, stderr=True, color='red') display('------- TRACEBACK ----------', stderr=True, color='dark gray') import traceback traceback.print_exc() display('------ END TRACEBACK -------', stderr=True, color='dark gray')
def __init__(self, vault_user=None, vault_addr=None, vault_token=None, persistent_session=True, auto_prompt=True): def try_reading_token_file(): ret = None try: ret = open(os.path.expanduser('~/.vault-token'), "r").read().strip() except Exception: ret = None pass return ret def write_token(token=None): try: if token: open(os.path.expanduser('~/.vault-token'), "w").write(token.strip()) except Exception: display("Warning: could not persist token to ~/.vault-token", stderr=False, color='yellow') pass self.vault_addr = vault_addr or os.getenv( 'VAULT_ADDR', None) or "http://localhost:8200" #Actually maybe we should reconsider having a default vault addr. How often we will create infrastructures #with vault running on the provisioner's machine ? self.vault_token = vault_token or os.getenv( 'VAULT_TOKEN', None) or try_reading_token_file() self.vault_user = vault_user or os.getenv('VAULT_USER', None) or getpass.getuser() self.ldap_attempts = 0 if persistent_session: if SimpleVault.p_vault_conn: self.vault_conn = SimpleVault.p_vault_conn else: self.vault_conn = hvac.Client(url=self.vault_addr, token=self.vault_token) while not self.vault_conn.is_authenticated() and auto_prompt: display("VAULT-LIB: Not authenticated to vault '%s'" % self.vault_addr, stderr=True, color='red') display( "Note: the default LDAP username (%s) can be overwritten with VAULT_USER" % self.vault_user, stderr=True, color='yellow') display(" or to pass a token directly use VAULT_TOKEN", stderr=True, color='yellow') try: self.ldap_attempts += 1 ldap_password = getpass.getpass( prompt='LDAP password for %s for server %s: ' % (self.vault_user, self.vault_addr)) auth_response = self.vault_conn.auth_ldap( self.vault_user, ldap_password) self.vault_conn.is_authenticated() self.vault_token = auth_response['auth']['client_token'] write_token(self.vault_token) except Exception as e: if self.ldap_attempts >= MAX_LDAP_ATTEMPTS: display("FAILED authentication {} times".format( self.ldap_attempts), color='red') raise e else: pass
def __init__(self, path, key='value', policy={}, vault_user=None, vault_addr=None, vault_token=None, auto_prompt=True): self.__DEFAULT_POLICY__ = {'engine': 'passgen', 'length': 24} self.current_data = {} self.already_initialized = False self.actual_policy = self.__DEFAULT_POLICY__.copy() self.key = key if isinstance(policy, int): self.actual_policy.update({'length': policy}) elif isinstance(policy, dict): self.actual_policy.update(policy) else: raise Exception( "Incorrect policy specified. Use a number if unsure.") if path: self.vault_path = path else: raise Exception("Invalid path for secret") self.policy = policy if ManagedVaultSecret.p_sv and ManagedVaultSecret.p_sv.is_authenticated( ): self.sv = ManagedVaultSecret.p_sv else: try: self.sv = SimpleVault(vault_user=None, vault_addr=None, vault_token=None, auto_prompt=True) ManagedVaultSecret.p_sv = self.sv except Exception as e: display( 'MANAGED-SECRET: could not obtain a proper Vault connection.\n{}' .format(e.message)) raise e try: self.current_data = self.sv.get(path=path, fetch_all=True, raise_exceptions=True) except Exception as e: display( 'MANAGED-SECRET: could not confirm if secret at path {} does or not already exist. ' 'Exception was:\n{}'.format(path, e.message)) raise e if self.current_data.get(key): #something exists on that path, we assume the secret already exists and do nothing more pass else: #secret does not exist, we will generate it right now according to the desired policy generator_args = self.actual_policy.copy() engine = generator_args.pop('engine', None) if self.actual_policy['engine'] == 'passgen': try: import passgen except ImportErrori as e: display( 'MANAGED-SECRET: You need passgen python module in order to use the passgen engine.' ) raise e try: #generating and storing the new secret self.new_data = self.current_data.copy() self.new_data[key] = passgen.passgen(**generator_args) self.sv.put(path, self.new_data) self.already_initialized = True self.current_data = self.new_data except Exception as e: display( 'MANAGED-SECRET: could not create new managed secret') raise e else: raise Exception("Unsupported password generation engine.")
def skms(args): """ Example cluster file: inventory: - plugin: skms args: skms: endpoint: api.skms.mycompany.com environment: 'Solution Name - OR1 - Production' strip: device_service: 'Solution Name - ' environment: 'Solution Name - OR1 - ' hostname: .solution.mycompany.net Example credentials file (located at ~/.skms/credentials.yaml): endpoint: "api.skms.mycompany.com" username: "******" password: "******" """ credentials_file = "%s/.skms/credentials.yaml" % os.path.expanduser('~') if os.path.isfile(credentials_file): file_stream = open(credentials_file, "r") docs = yaml.load_all(file_stream) for doc in docs: args['skms']['username'] = doc['username'] args['skms']['password'] = doc['password'] else: display(stringc('Credentials file does not exist: %s' % credentials_file, 'red')) sys.exit(1) conn = WebApiClient(args['skms']['username'], args['skms']['password'], args['skms']['endpoint']) query = {'request_arr': []} query_list = [] query_list.append('details') query_list.append('attributes') query['request_arr'].append({'object': 'DeviceDao', 'method': 'search', 'parameters': {'query': "SELECT device_id, name, operating_system.display_name as operating_system, device_service.full_name AS device_service, environment.full_name AS environment, primary_ip_address.ip_address AS primary_ip_address WHERE environment.full_name = \"%s\" PAGE 1, 5000" % args['environment']}}) conn.send_request('SkmsWebApi', 'performMultipleRequests', query) response = conn.get_response_dictionary() if response['status'] == 'error': display(stringc('SKMS query produced an error: %s' % response, 'red')) sys.exit(1) if 'device_service' in args['strip']: device_service_strip = args['strip']['device_service'] else: device_service_strip = '' if 'environment' in args['strip']: environment_strip = args['strip']['environment'] else: environment_strip = '' if 'hostname' in args['strip']: hostname_strip = args['strip']['hostname'] else: hostname_strip = '' dictionary_of_hosts = { '_meta' : { 'hostvars' : {} } } for info in response['data']['result_arr'][0]['data']['results']: # Raising warning when primary ip is unavailable !!! if info['primary_ip_address'] is None: display.display("[WARN] missing primary_ip_address, ignoring following host SKMS data:\n" + str(info), color='yellow') continue if info['operating_system'] is None: info['operating_system'] = '' info['computer_name'] = info['name'] info['location_name'] = info['environment'].split('-')[1].strip() info['name'] = info['name'].replace(hostname_strip,'') info['environment'] = info['environment'].replace(environment_strip,'') info['owner'] = environment_strip.split('-')[0].strip() info['site'] = info['name'].split('-')[0] info['cluster'] = '-'.join(info['name'].split('-')[:2]) # step through device services to add them as groups for device_service in info['device_service']: device_service = device_service.replace(device_service_strip,'').encode('utf-8') # add device service to top-level dictionary as a group if device_service not in dictionary_of_hosts: dictionary_of_hosts[device_service] = {"hosts" : []} # add host to array of that device service group if info['name'] not in dictionary_of_hosts[device_service]['hosts']: dictionary_of_hosts[device_service]['hosts'].append(info['name'].encode('utf-8')) # add environment to top-level dictionary as a group if info['environment'] not in dictionary_of_hosts: dictionary_of_hosts[info['environment'].encode('utf-8')] = {"hosts" : []} # add host to array of that environment group if info['name'] not in dictionary_of_hosts[info['environment']]['hosts']: dictionary_of_hosts[info['environment']]['hosts'].append(info['name'].encode('utf-8')) # add primary_ip_address to top-level dictionary as a group if info['primary_ip_address'] not in dictionary_of_hosts: dictionary_of_hosts[info['primary_ip_address'].encode('utf-8')] = {"hosts" : []} if info['name'] not in dictionary_of_hosts[info['primary_ip_address']]['hosts']: dictionary_of_hosts[info['primary_ip_address']]['hosts'].append(info['name'].encode('utf-8')) # add owner to top-level dictionary as a group if info['owner'] not in dictionary_of_hosts: dictionary_of_hosts[info['owner'].encode('utf-8')] = {"hosts" : []} if info['name'] not in dictionary_of_hosts[info['owner']]['hosts']: dictionary_of_hosts[info['owner']]['hosts'].append(info['name'].encode('utf-8')) # add site to top-level dictionary as a group if info['site'] not in dictionary_of_hosts: dictionary_of_hosts[info['site'].encode('utf-8')] = {"hosts" : []} if info['name'] not in dictionary_of_hosts[info['site']]['hosts']: dictionary_of_hosts[info['site']]['hosts'].append(info['name'].encode('utf-8')) # add cluster to top-level dictionary as a group if info['cluster'] not in dictionary_of_hosts: dictionary_of_hosts[info['cluster'].encode('utf-8')] = {"hosts" : []} if info['name'] not in dictionary_of_hosts[info['cluster']]['hosts']: dictionary_of_hosts[info['cluster']]['hosts'].append(info['name'].encode('utf-8')) # tie some extra information to hostname in the meta varaiables dictionary_of_hosts['_meta']['hostvars'][info['name'].encode('utf-8')] = { 'ec2_id' : info['device_id'].encode('utf-8'), 'ansible_ssh_host' : info['primary_ip_address'].encode('utf-8'), 'ansible_host' : info['primary_ip_address'].encode('utf-8'), 'computer_name' : info['computer_name'].encode('utf-8'), 'location' : info['location_name'].encode('utf-8'), 'name' : info['name'].split('.')[0].encode('utf-8'), 'operating_system' : info['operating_system'].encode('utf-8'), 'private_ip' : info['primary_ip_address'].encode('utf-8'), 'tags' : { 'Adobe:Environment' : info['environment'].encode('utf-8'), 'Adobe:Owner' : info['owner'].encode('utf-8'), 'CMDB_device_service' : device_service.encode('utf-8'), 'CMDB_environment' : info['environment'].encode('utf-8'), 'CMDB_hostname' : info['name'].split('.')[0].encode('utf-8'), 'cluster' : info['cluster'].encode('utf-8'), 'environment' : info['environment'].encode('utf-8'), 'role' : device_service.encode('utf-8'), 'site' : info['site'].encode('utf-8'), } } return dictionary_of_hosts
def generate(self, args): self.selected_terraform_path = args.path_name self.set_current_working_dir() current_terraform_version = self.check_terraform_version() config = self.cluster_config current_terraform_version_major = int( current_terraform_version.split('.')[1]) if 'enable_consul_remote_state' in config['terraform']: terraform_remote_state = config['terraform'][ 'enable_consul_remote_state'] elif config['terraform'].get('state', { 'type': None }).get('type') == 's3': terraform_remote_state = 'true' else: terraform_remote_state = 'false' terraform_config = config.get('terraform', {}) terraform_path = self.get_terraform_path() generate_module_templates = False plan_variables = terraform_config.get('vars', {}) if not config['cluster'].startswith("auto_generated"): plan_variables['cluster'] = config['cluster'] if self.cluster_config.has_ssh_keys: plan_variables['has_ssh_keys'] = True plan_variables[ 'cluster_ssh_pubkey_file'] = self.cluster_config.cluster_ssh_pubkey_file plan_variables[ 'cluster_ssh_prvkey_file'] = self.cluster_config.cluster_ssh_prvkey_file if terraform_config.get('boto_profile'): self.add_profile_vars(plan_variables, terraform_config.get('boto_profile')) vars = '' for key, val in plan_variables.items(): vars += " -var '%s=%s' " % (key, val) state_file = 'terraform.{cluster}.tfstate'.format( cluster=config['cluster']) plan_file = 'terraform.{cluster}.plan'.format( cluster=config['cluster']) landscape = '' if current_terraform_version_major >= 9: if args.force_copy: terraform_init_command = 'terraform init -force-copy && ' else: terraform_init_command = 'terraform init && ' # regarding state location we give priority to the cli parameter if args.state_location == 'remote': state_argument = '' state_out_argument = '' elif args.state_location == 'local': state_argument = "-state={state_file}".format( state_file=state_file) state_out_argument = "-state-out={state_file}".format( state_file=state_file) else: # no cli parameter, decide based on config file if terraform_remote_state == 'true': state_argument = '' state_out_argument = '' else: state_argument = "-state={state_file}".format( state_file=state_file) state_out_argument = "-state-out={state_file}".format( state_file=state_file) else: state_argument = "-state={state_file}".format( state_file=state_file) state_out_argument = "-state-out={state_file}".format( state_file=state_file) terraform_init_command = '' remove_local_cache = 'rm -rf .terraform && ' if \ self.ops_config['terraform.remove_local_cache'] else '' if args.subcommand == 'template': if args.template_location: self.copy_static_files(args.template_location, terraform_path) self.write_module_templates(args.template_location) self.write_var_file( os.path.join(args.template_location, terraform_path), plan_variables) else: for original, fname, contents in self.get_templated_files(): display("# %s -> %s" % (original, fname), color="green") display("# --------------", color="green") display(contents) return if "variables_file" in config['terraform']: variables_file = ' -var-file="{}" '.format( config['terraform']["variables_file"]) else: variables_file = ' ' auto_approve = '-auto-approve' if args.auto_approve else '' if args.subcommand == 'plan': generate_module_templates = True terraform_refresh_command = '' if args.do_refresh: terraform_refresh_command = "terraform refresh" \ "{variables_file}" \ " -input=false {vars} {state_argument} && ".format(vars=vars, state_argument=state_argument, variables_file=variables_file) if self.ops_config[ 'terraform.landscape'] and not args.raw_plan_output: landscape = '| landscape' cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "terraform get -update && " \ "{terraform_init_command}" \ "{terraform_refresh_command}" \ "terraform plan " \ "{variables_file}" \ "-out={plan_file} -refresh=false -input=false {vars} {state_argument}".format( root_dir=self.root_dir, terraform_path=terraform_path, terraform_init_command=terraform_init_command, vars=vars, state_argument=state_argument, plan_file=plan_file, terraform_refresh_command=terraform_refresh_command, remove_local_cache=remove_local_cache, variables_file=variables_file ) elif args.subcommand == 'apply': # the following is to have auxiliary rendered/templated files like cloudinit.yaml # that also needs templating. Without it, plan works but apply does not for this kind of files # todo maybe this deserves a better implementation later generate_module_templates = True self.inventory_generator.clear_cache() if args.skip_plan: # Run Terraform apply without running a plan first cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "{terraform_init_command}" \ "rm -f {plan_file} && terraform apply {vars}" \ "-refresh=true {state_argument} {variables_file} {auto_approve}".format( plan_file=plan_file, root_dir=self.root_dir, state_argument=state_argument, remove_local_cache=remove_local_cache, terraform_init_command=terraform_init_command, terraform_path=terraform_path, vars=vars, variables_file=variables_file, auto_approve=auto_approve ) else: cmd = "cd {root_dir}/{terraform_path} && " \ "terraform apply " \ "-refresh=true {state_out_argument} {plan_file}; code=$?; rm -f {plan_file}; exit $code".format( plan_file=plan_file, root_dir=self.root_dir, state_out_argument=state_out_argument, terraform_path=terraform_path, vars=vars, variables_file=variables_file ) elif args.subcommand == 'destroy': generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "{terraform_init_command}" \ "terraform plan -destroy " \ "-refresh=true {vars} {variables_file} {state_argument} && " \ "terraform destroy {vars} {variables_file} {state_argument} -refresh=true {auto_approve}".format( root_dir=self.root_dir, terraform_path=terraform_path, variables_file=variables_file, vars=vars, state_argument=state_argument, terraform_init_command=terraform_init_command, remove_local_cache=remove_local_cache, auto_approve=auto_approve ) elif args.subcommand == 'output': cmd = "cd {root_dir}/{terraform_path} && " \ "terraform output {state_argument} {output}".format( root_dir=self.root_dir, terraform_path=terraform_path, output=args.var, state_argument=state_argument ) elif args.subcommand == 'refresh': generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "terraform get -update && " \ "terraform refresh {variables_file} {state_argument} {vars}".format( root_dir=self.root_dir, terraform_path=terraform_path, vars=vars, variables_file=variables_file, state_argument=state_argument ) elif args.subcommand == 'taint' or args.subcommand == 'untaint': cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "{terraform_init_command}" \ "terraform {command} {state_argument} -module={module} {resource}".format( root_dir=self.root_dir, command=args.subcommand, terraform_path=terraform_path, resource=args.resource, module=args.module, state_argument=state_argument, terraform_init_command=terraform_init_command, remove_local_cache=remove_local_cache ) elif args.subcommand == 'show': if args.plan: state = plan_file else: state = state_file cmd = "cd {root_dir}/{terraform_path} && " \ "terraform show {state}".format( root_dir=self.root_dir, terraform_path=terraform_path, state=state ) elif args.subcommand == 'import': generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "terraform import {state_argument} {vars} module.{module}.{resource} {name}".format( root_dir=self.root_dir, command=args.subcommand, terraform_path=terraform_path, resource=args.resource, module=args.module, name=args.name, state_argument=state_argument, vars=vars, ) elif args.subcommand == 'console': generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "terraform {command} {state_argument} {vars}".format( root_dir=self.root_dir, command=args.subcommand, terraform_path=terraform_path, state_argument=state_argument, vars=vars, ) elif args.subcommand == 'validate': generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "{terraform_init_command} " \ "terraform {command} {vars} {variables_file}".format( command=args.subcommand, root_dir=self.root_dir, remove_local_cache=remove_local_cache, terraform_init_command=terraform_init_command, terraform_path=terraform_path, vars=vars, variables_file=variables_file ) elif args.subcommand is not None: # Examples: # - command = "state push errored.tfstate" # - command = "force-unlock <LOCK_ID>" generate_module_templates = True cmd = "cd {root_dir}/{terraform_path} && " \ "{remove_local_cache}" \ "{terraform_init_command} " \ "terraform {command}".format( command=args.subcommand, root_dir=self.root_dir, remove_local_cache=remove_local_cache, terraform_init_command=terraform_init_command, terraform_path=terraform_path, ) else: display('Terraform subcommand \'%s\' not found' % args.subcommand, color='red') return if generate_module_templates: self.write_module_templates() post_actions = [self.remove_module_template] else: post_actions = [] # pass on the terraform args to the terraform command line cmd = ' '.join([cmd] + args.terraform_args + [landscape]) return dict(command=cmd, post_actions=post_actions)
def run(self, args): if args.keygen: if self.cluster_config.has_ssh_keys: err('Cluster already has ssh keys, refusing to overwrite') sys.exit(2) else: pub_key_file = self.cluster_config.cluster_ssh_pubkey_file prv_key_file = self.cluster_config.cluster_ssh_prvkey_file display('Trying to generate ssh keys in:\n{} and \n{}'.format( pub_key_file, prv_key_file)) if os.path.isfile(pub_key_file) or os.path.isfile( prv_key_file): err('Although we do not have a complete keyset, one of the files exists and we refuse to overwrite\n' ) sys.exit(2) else: #generate ssh keypair. The passphrase will be the name of the cluster cmd = "ssh-keygen -t rsa -b 4096 -N {} -f {}".format( self.cluster_name, prv_key_file).split(' ') print cmd call(cmd) return if args.local and not IP_HOST_REG_EX.match(args.local): err('The --local parameter must be in the form of host-ip:port or port' ) sys.exit(2) if args.tunnel or args.nossh: if args.local is None or args.remote is None: err('When using --tunnel or --nossh both the --local and --remote parameters are required' ) sys.exit(2) if args.proxy: if args.local is None: err('When using --proxy the --local parameter is required') sys.exit(2) group = "%s,&%s" % (self.cluster_name, args.role) args.index = args.index - 1 if args.index < 0: args.index = 0 hosts = self.ansible_inventory.get_hosts(group) if len(hosts) <= args.index: group = args.role hosts = self.ansible_inventory.get_hosts(group) if not hosts: display("No host found in inventory, using provided name %s" % (args.role), color="purple", stderr=True) display("Expression %s matched hosts (max 10): " % group, stderr=True) host_names = [host.name for host in hosts] for name in host_names[:10]: display(name, color='blue') host = None if host_names: if args.index < len(host_names): host = self.ansible_inventory.get_host(host_names[args.index]) else: display("Index out of bounds for %s" % (group), color="red", stderr=True) return if host: ssh_host = host.vars.get('ansible_ssh_host') or host.name else: # no host found in inventory, use the role provided bastion = self.ansible_inventory.get_hosts('bastion')[0].vars.get( 'ansible_ssh_host') host = Host(name=args.role) ssh_host = '{}--{}'.format(bastion, host.name) ssh_user = self.cluster_config.get('ssh_user') or self.ops_config.get( 'ssh.user') or getpass.getuser() if args.user: ssh_user = args.user if ssh_user and not '-l' in args.ssh_opts: args.ssh_opts.extend(['-l', ssh_user]) if args.nossh: args.tunnel = True args.ipaddress = True ssh_host = self.ansible_inventory.get_hosts('bastion')[0].vars.get( 'ansible_ssh_host') # if args.tunnel or args.proxy: # ssh_config = args.ssh_config or 'ssh.tunnel.config' # else: # ssh_config = args.ssh_config or self.ansible_inventory.get_ssh_config() ssh_config = args.ssh_config or self.ops_config.get( 'ssh.config') or self.ansible_inventory.get_ssh_config() if args.tunnel: if args.ipaddress: host_ip = host.vars.get('private_ip_address') else: host_ip = 'localhost' command = "ssh -F %s %s -4 -N -L %s:%s:%d" % ( ssh_config, ssh_host, args.local, host_ip, args.remote) else: command = "ssh -F %s %s" % (ssh_config, ssh_host) if args.proxy: command = "ssh -F %s %s -4 -N -D %s -f -o 'ExitOnForwardFailure yes'" % ( ssh_config, ssh_host, args.local) if args.ssh_opts: command += " " + " ".join(args.ssh_opts) # Check if optional sshpass is available and print info message sshpass_path = os.path.expanduser("~/bin/sshpass") if (os.path.isfile(sshpass_path) and os.access(sshpass_path, os.X_OK)): display("Using sshpass passwordless wrapper at %s" % (sshpass_path), color="green", stderr=True) else: display("sshpass passwordless wrapper NOT available in %s" % (sshpass_path), color="purple", stderr=True) display("SSH-ing to %s[%d] => %s" % (args.role, args.index, host.name), color="green", stderr=True) return dict(command=command)