def tostring(element, encoding='UTF-8'): if HAS_LXML: return xml_to_string(element, encoding='unicode') else: return to_text(xml_to_string(element, encoding), encoding=encoding)
def set_options(self, task_keys=None, var_options=None, direct=None): super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) cpu_poll_interval = self.get_option('cpu_poll_interval') memory_poll_interval = self.get_option('memory_poll_interval') pid_poll_interval = self.get_option('pid_poll_interval') self._display_recap = self.get_option('display_recap') control_group = to_bytes(self.get_option('control_group'), errors='surrogate_or_strict') self.mem_max_file = b'/sys/fs/cgroup/memory/%s/memory.max_usage_in_bytes' % control_group mem_current_file = b'/sys/fs/cgroup/memory/%s/memory.usage_in_bytes' % control_group cpu_usage_file = b'/sys/fs/cgroup/cpuacct/%s/cpuacct.usage' % control_group pid_current_file = b'/sys/fs/cgroup/pids/%s/pids.current' % control_group for path in (self.mem_max_file, mem_current_file, cpu_usage_file, pid_current_file): try: with open(path) as f: pass except Exception as e: self._display.warning( u'Cannot open %s for reading (%s). Disabling %s' % (to_text(path), to_text(e), self.CALLBACK_NAME)) self.disabled = True return try: with open(self.mem_max_file, 'w+') as f: f.write('0') except Exception as e: self._display.warning( u'Unable to reset max memory value in %s: %s' % (to_text(self.mem_max_file), to_text(e))) self.disabled = True return try: with open(cpu_usage_file, 'w+') as f: f.write('0') except Exception as e: self._display.warning( u'Unable to reset CPU usage value in %s: %s' % (to_text(cpu_usage_file), to_text(e))) self.disabled = True return self._profiler_map = { 'memory': partial(MemoryProf, mem_current_file, poll_interval=memory_poll_interval), 'cpu': partial(CpuProf, cpu_usage_file, poll_interval=cpu_poll_interval), 'pids': partial(PidsProf, pid_current_file, poll_interval=pid_poll_interval), } self.write_files = self.get_option('write_files') file_per_task = self.get_option('file_per_task') self._output_format = to_bytes(self.get_option('output_format')) output_dir = to_bytes(self.get_option('output_dir'), errors='surrogate_or_strict') try: output_dir %= to_bytes(datetime.datetime.now().isoformat()) except TypeError: pass self._output_dir = output_dir file_name_format = to_bytes(self.get_option('file_name_format')) if self.write_files: if file_per_task: self._file_per_task = True if file_name_format == b'%(feature)s.%(ext)s': file_name_format = b'%(counter)s-%(task_uuid)s-%(feature)s.%(ext)s' else: file_name_format = to_bytes( self.get_option('file_name_format')) self._file_name_format = file_name_format if not os.path.exists(output_dir): try: os.mkdir(output_dir) except Exception as e: self._display.warning( u'Could not create the output directory at %s: %s' % (to_text(output_dir), to_text(e))) self.disabled = True return if not self._file_per_task: self._open_files()
def run(self, iterator, play_context): ''' The "free" strategy is a bit more complex, in that it allows tasks to be sent to hosts as quickly as they can be processed. This means that some hosts may finish very quickly if run tasks result in little or no work being done versus other systems. The algorithm used here also tries to be more "fair" when iterating through hosts by remembering the last host in the list to be given a task and starting the search from there as opposed to the top of the hosts list again, which would end up favoring hosts near the beginning of the list. ''' # the last host to be given a task last_host = 0 result = self._tqm.RUN_OK # start with all workers being counted as being free workers_free = len(self._workers) self._set_hosts_cache(iterator._play) work_to_do = True while work_to_do and not self._tqm._terminated: hosts_left = self.get_hosts_left(iterator) if len(hosts_left) == 0: self._tqm.send_callback('v2_playbook_on_no_hosts_remaining') result = False break work_to_do = False # assume we have no more work to do starting_host = last_host # save current position so we know when we've looped back around and need to break # try and find an unblocked host with a task to run host_results = [] while True: host = hosts_left[last_host] display.debug("next free host: %s" % host) host_name = host.get_name() # peek at the next task for the host, to see if there's # anything to do do for this host (state, task) = iterator.get_next_task_for_host(host, peek=True) display.debug("free host state: %s" % state, host=host_name) display.debug("free host task: %s" % task, host=host_name) if host_name not in self._tqm._unreachable_hosts and task: # set the flag so the outer loop knows we've still found # some work which needs to be done work_to_do = True display.debug("this host has work to do", host=host_name) # check to see if this host is blocked (still executing a previous task) if (host_name not in self._blocked_hosts or not self._blocked_hosts[host_name]): display.debug("getting variables", host=host_name) task_vars = self._variable_manager.get_vars( play=iterator._play, host=host, task=task, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) self.add_tqm_variables(task_vars, play=iterator._play) templar = Templar(loader=self._loader, variables=task_vars) display.debug("done getting variables", host=host_name) try: throttle = int(templar.template(task.throttle)) except Exception as e: raise AnsibleError( "Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e) if throttle > 0: same_tasks = 0 for worker in self._workers: if worker and worker.is_alive( ) and worker._task._uuid == task._uuid: same_tasks += 1 display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks)) if same_tasks >= throttle: break # pop the task, mark the host blocked, and queue it self._blocked_hosts[host_name] = True (state, task) = iterator.get_next_task_for_host(host) try: action = action_loader.get(task.action, class_only=True) except KeyError: # we don't care here, because the action may simply not have a # corresponding action plugin action = None try: task.name = to_text(templar.template( task.name, fail_on_undefined=False), nonstring='empty') display.debug("done templating", host=host_name) except Exception: # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason", host=host_name) run_once = templar.template( task.run_once) or action and getattr( action, 'BYPASS_HOST_LOOP', False) if run_once: if action and getattr(action, 'BYPASS_HOST_LOOP', False): raise AnsibleError( "The '%s' module bypasses the host loop, which is currently not supported in the free strategy " "and would instead execute for every host in the inventory list." % task.action, obj=task._ds) else: display.warning( "Using run_once with the free strategy is not currently supported. This task will still be " "executed for every host in the inventory list." ) # check to see if this task should be skipped, due to it being a member of a # role which has already run (and whether that role allows duplicate execution) if task._role and task._role.has_run(host): # If there is no metadata, the default behavior is to not allow duplicates, # if there is metadata, check to see if the allow_duplicates flag was set to true if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates: display.debug( "'%s' skipped because role has already run" % task, host=host_name) del self._blocked_hosts[host_name] continue if task.action == 'meta': self._execute_meta(task, play_context, iterator, target_host=host) self._blocked_hosts[host_name] = False else: # handle step if needed, skip meta actions as they are used internally if not self._step or self._take_step( task, host_name): if task.any_errors_fatal: display.warning( "Using any_errors_fatal with the free strategy is not supported, " "as tasks are executed independently on each host" ) self._tqm.send_callback( 'v2_playbook_on_task_start', task, is_conditional=False) self._queue_task(host, task, task_vars, play_context) # each task is counted as a worker being busy workers_free -= 1 del task_vars else: display.debug("%s is blocked, skipping for now" % host_name) # all workers have tasks to do (and the current host isn't done with the play). # loop back to starting host and break out if self._host_pinned and workers_free == 0 and work_to_do: last_host = starting_host break # move on to the next host and make sure we # haven't gone past the end of our hosts list last_host += 1 if last_host > len(hosts_left) - 1: last_host = 0 # if we've looped around back to the start, break out if last_host == starting_host: break results = self._process_pending_results(iterator) host_results.extend(results) # each result is counted as a worker being free again workers_free += len(results) self.update_active_connections(results) included_files = IncludedFile.process_include_results( host_results, iterator=iterator, loader=self._loader, variable_manager=self._variable_manager) if len(included_files) > 0: all_blocks = dict((host, []) for host in hosts_left) for included_file in included_files: display.debug("collecting new blocks for %s" % included_file) try: if included_file._is_role: new_ir = self._copy_included_file(included_file) new_blocks, handler_blocks = new_ir.get_block_list( play=iterator._play, variable_manager=self._variable_manager, loader=self._loader, ) else: new_blocks = self._load_included_file( included_file, iterator=iterator) except AnsibleError as e: for host in included_file._hosts: iterator.mark_host_failed(host) display.warning(to_text(e)) continue for new_block in new_blocks: task_vars = self._variable_manager.get_vars( play=iterator._play, task=new_block._parent, _hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all) final_block = new_block.filter_tagged_tasks(task_vars) for host in hosts_left: if host in included_file._hosts: all_blocks[host].append(final_block) display.debug("done collecting new blocks for %s" % included_file) display.debug( "adding all collected blocks from %d included file(s) to iterator" % len(included_files)) for host in hosts_left: iterator.add_tasks(host, all_blocks[host]) display.debug("done adding collected blocks to iterator") # pause briefly so we don't spin lock time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL) # collect all the final results results = self._wait_on_pending_results(iterator) # run the base class run() method, which executes the cleanup function # and runs any outstanding handlers which have been triggered return super(StrategyModule, self).run(iterator, play_context, result)
def exists(self): '''Check if the namespace already exists''' rc, out, err = self.module.run_command('ip netns list') if rc != 0: self.module.fail_json(msg=to_text(err)) return self.name in out
def main(): module = AnsibleModule( argument_spec=dict( host=dict(type='str', default='127.0.0.1'), port=dict(type='int'), username=dict(type='str', default='cobbler'), password=dict(type='str', no_log=True), use_ssl=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), name=dict(type='str'), interfaces=dict(type='dict'), properties=dict(type='dict'), sync=dict(type='bool', default=False), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ), supports_check_mode=True, ) username = module.params['username'] password = module.params['password'] port = module.params['port'] use_ssl = module.params['use_ssl'] validate_certs = module.params['validate_certs'] name = module.params['name'] state = module.params['state'] module.params['proto'] = 'https' if use_ssl else 'http' if not port: module.params['port'] = '443' if use_ssl else '80' result = dict(changed=False, ) start = datetime.datetime.utcnow() ssl_context = None if not validate_certs: try: # Python 2.7.9 and newer ssl_context = ssl.create_unverified_context() except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default ssl._create_default_context = ssl._create_unverified_context else: # Python 2.7.8 and older ssl._create_default_https_context = ssl._create_unverified_https_context url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params) if ssl_context: conn = xmlrpc_client.ServerProxy(url, context=ssl_context) else: conn = xmlrpc_client.Server(url) try: token = conn.login(username, password) except xmlrpc_client.Fault as e: module.fail_json( msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}". format(url=url, error=to_text(e), **module.params)) except Exception as e: module.fail_json(msg="Connection to '{url}' failed. {error}".format( url=url, error=to_text(e), **module.params)) system = getsystem(conn, name, token) # result['system'] = system if state == 'query': if name: result['system'] = system else: # Turn it into a dictionary of dictionaries # all_systems = conn.get_systems() # result['systems'] = { system['name']: system for system in all_systems } # Return a list of dictionaries result['systems'] = conn.get_systems() elif state == 'present': if system: # Update existing entry system_id = conn.get_system_handle(name, token) for key, value in iteritems(module.params['properties']): if key not in system: module.warn( "Property '{0}' is not a valid system property.". format(key)) if system[key] != value: try: conn.modify_system(system_id, key, value, token) result['changed'] = True except Exception as e: module.fail_json( msg="Unable to change '{0}' to '{1}'. {2}".format( key, value, e)) else: # Create a new entry system_id = conn.new_system(token) conn.modify_system(system_id, 'name', name, token) result['changed'] = True if module.params['properties']: for key, value in iteritems(module.params['properties']): try: conn.modify_system(system_id, key, value, token) except Exception as e: module.fail_json( msg="Unable to change '{0}' to '{1}'. {2}".format( key, value, e)) # Add interface properties interface_properties = dict() if module.params['interfaces']: for device, values in iteritems(module.params['interfaces']): for key, value in iteritems(values): if key == 'name': continue if key not in IFPROPS_MAPPING: module.warn( "Property '{0}' is not a valid system property.". format(key)) if not system or system['interfaces'][device][ IFPROPS_MAPPING[key]] != value: result['changed'] = True interface_properties['{0}-{1}'.format(key, device)] = value if result['changed'] is True: conn.modify_system(system_id, "modify_interface", interface_properties, token) # Only save when the entry was changed if not module.check_mode and result['changed']: conn.save_system(system_id, token) elif state == 'absent': if system: if not module.check_mode: conn.remove_system(name, token) result['changed'] = True if not module.check_mode and module.params['sync'] and result['changed']: try: conn.sync(token) except Exception as e: module.fail_json( msg="Failed to sync Cobbler. {0}".format(to_text(e))) if state in ('absent', 'present'): result['system'] = getsystem(conn, name, token) if module._diff: result['diff'] = dict(before=system, after=result['system']) elapsed = datetime.datetime.utcnow() - start module.exit_json(elapsed=elapsed.seconds, **result)
def main(): """ main entry point for module execution """ network_spec = { 'prefix': dict(required=True), 'masklen': dict(type='int'), 'route_map': dict(), } redistribute_spec = { 'protocol': dict(choices=REDISTRIBUTE_PROTOCOLS, required=True), 'id': dict(), 'metric': dict(type='int'), 'route_map': dict(), } timer_spec = { 'keepalive': dict(type='int', required=True), 'holdtime': dict(type='int', required=True), 'min_neighbor_holdtime': dict(type='int'), } neighbor_spec = { 'neighbor': dict(required=True), 'remote_as': dict(type='int', required=True), 'local_as': dict(type='int'), 'update_source': dict(), 'password': dict(no_log=True), 'enabled': dict(type='bool'), 'description': dict(), 'ebgp_multihop': dict(type='int'), 'timers': dict(type='dict', options=timer_spec), 'peer_group': dict(), } af_neighbor_spec = { 'neighbor': dict(required=True), 'activate': dict(type='bool'), 'advertisement_interval': dict(type='int'), 'remove_private_as': dict(type='bool'), 'next_hop_self': dict(type='bool'), 'route_reflector_client': dict(type='bool'), 'route_server_client': dict(type='bool'), 'maximum_prefix': dict(type='int'), 'prefix_list_in': dict(), 'prefix_list_out': dict() } address_family_spec = { 'afi': dict(choices=['ipv4', 'ipv6'], required=True), 'safi': dict(choices=['flowspec', 'labeled-unicast', 'multicast', 'unicast'], default='unicast'), 'auto_summary': dict(type='bool'), 'synchronization': dict(type='bool'), 'networks': dict(type='list', elements='dict', options=network_spec), 'redistribute': dict(type='list', elements='dict', options=redistribute_spec), 'neighbors': dict(type='list', elements='dict', options=af_neighbor_spec), } config_spec = { 'bgp_as': dict(type='int', required=True), 'router_id': dict(), 'log_neighbor_changes': dict(type='bool'), 'neighbors': dict(type='list', elements='dict', options=neighbor_spec), 'address_family': dict(type='list', elements='dict', options=address_family_spec), 'networks': dict(type='list', elements='dict', options=network_spec) } argument_spec = { 'config': dict(type='dict', options=config_spec), 'operation': dict(default='merge', choices=['merge', 'replace', 'override', 'delete']) } module = NetworkModule(argument_spec=argument_spec, supports_check_mode=True) try: result = module.edit_config(config_filter='| section ^router bgp') except Exception as exc: module.fail_json(msg=to_text(exc)) module.exit_json(**result)
def start_session(self): ''' start ssm session ''' if self.get_option('instance_id') is None: self.instance_id = self.host else: self.instance_id = self.get_option('instance_id') display.vvv(u"ESTABLISH SSM CONNECTION TO: {0}".format( self.instance_id), host=self.host) executable = self.get_option('plugin') if not os.path.exists( to_bytes(executable, errors='surrogate_or_strict')): raise AnsibleError( "failed to find the executable specified %s." " Please verify if the executable exists and re-try." % executable) profile_name = '' region_name = self.get_option('region') ssm_parameters = dict() client = boto3.client('ssm', region_name=region_name) self._client = client response = client.start_session(Target=self.instance_id, Parameters=ssm_parameters) self._session_id = response['SessionId'] cmd = [ executable, json.dumps(response), region_name, "StartSession", profile_name, json.dumps({"Target": self.instance_id}), client.meta.endpoint_url ] display.vvvv(u"SSM COMMAND: {0}".format(to_text(cmd)), host=self.host) stdout_r, stdout_w = pty.openpty() session = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=stdout_w, stderr=subprocess.PIPE, close_fds=True, bufsize=0, ) os.close(stdout_w) self._stdout = os.fdopen(stdout_r, 'rb', 0) self._session = session self._poll_stdout = select.poll() self._poll_stdout.register(self._stdout, select.POLLIN) # Disable command echo and prompt. self._prepare_terminal() display.vvv(u"SSM CONNECTION ID: {0}".format(self._session_id), host=self.host) return session
def main(): module = AnsibleModule( argument_spec=dict( challenge=dict(type='str', required=True, choices=['tls-alpn-01']), challenge_data=dict(type='dict', required=True), private_key_src=dict(type='path'), private_key_content=dict(type='str', no_log=True), ), required_one_of=(['private_key_src', 'private_key_content'], ), mutually_exclusive=(['private_key_src', 'private_key_content'], ), ) if not HAS_CRYPTOGRAPHY: module.fail_json(msg=missing_required_lib('cryptography >= 1.3'), exception=CRYPTOGRAPHY_IMP_ERR) try: # Get parameters challenge = module.params['challenge'] challenge_data = module.params['challenge_data'] # Get hold of private key private_key_content = module.params.get('private_key_content') if private_key_content is None: private_key_content = read_file(module.params['private_key_src']) else: private_key_content = to_bytes(private_key_content) try: private_key = cryptography.hazmat.primitives.serialization.load_pem_private_key( private_key_content, password=None, backend=_cryptography_backend) except Exception as e: raise ModuleFailException( 'Error while loading private key: {0}'.format(e)) # Some common attributes domain = to_text(challenge_data['resource']) identifier_type, identifier = to_text( challenge_data.get('resource_original', 'dns:' + challenge_data['resource'])).split( ':', 1) subject = issuer = cryptography.x509.Name([]) not_valid_before = datetime.datetime.utcnow() not_valid_after = datetime.datetime.utcnow() + datetime.timedelta( days=10) if identifier_type == 'dns': san = cryptography.x509.DNSName(identifier) elif identifier_type == 'ip': san = cryptography.x509.IPAddress(ipaddress.ip_address(identifier)) else: raise ModuleFailException( 'Unsupported identifier type "{0}"'.format(identifier_type)) # Generate regular self-signed certificate regular_certificate = cryptography.x509.CertificateBuilder( ).subject_name(subject).issuer_name(issuer).public_key( private_key.public_key()).serial_number( cryptography.x509.random_serial_number()).not_valid_before( not_valid_before).not_valid_after( not_valid_after).add_extension( cryptography.x509.SubjectAlternativeName([san]), critical=False, ).sign(private_key, cryptography.hazmat.primitives.hashes.SHA256(), _cryptography_backend) # Process challenge if challenge == 'tls-alpn-01': value = base64.b64decode(challenge_data['resource_value']) challenge_certificate = cryptography.x509.CertificateBuilder( ).subject_name(subject).issuer_name(issuer).public_key( private_key.public_key()).serial_number( cryptography.x509.random_serial_number()).not_valid_before( not_valid_before).not_valid_after( not_valid_after).add_extension( cryptography.x509.SubjectAlternativeName([san ]), critical=False, ).add_extension( cryptography.x509.UnrecognizedExtension( cryptography.x509.ObjectIdentifier( "1.3.6.1.5.5.7.1.31"), encode_octet_string(value), ), critical=True, ).sign( private_key, cryptography.hazmat.primitives.hashes.SHA256(), _cryptography_backend) module.exit_json( changed=True, domain=domain, identifier_type=identifier_type, identifier=identifier, challenge_certificate=challenge_certificate.public_bytes( cryptography.hazmat.primitives.serialization.Encoding.PEM), regular_certificate=regular_certificate.public_bytes( cryptography.hazmat.primitives.serialization.Encoding.PEM)) except ModuleFailException as e: e.do_fail(module)
def run_commands(module, commands, check_rc=True): connection = get_connection(module) try: return connection.run_commands(commands=commands, check_rc=check_rc) except ConnectionError as exc: module.fail_json(msg=to_text(exc))
def run(self, tmp=None, task_vars=None): self._supports_check_mode = True self._supports_async = True result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect state = self._task.args.get('state', 'installed') reboot = self._task.args.get('reboot', False) reboot_timeout = self._task.args.get('reboot_timeout', self.DEFAULT_REBOOT_TIMEOUT) use_task = boolean(self._task.args.get('use_scheduled_task', False), strict=False) if state not in ['installed', 'searched', 'downloaded']: result['failed'] = True result[ 'msg'] = "state must be either installed, searched or downloaded" return result try: reboot = boolean(reboot) except TypeError as exc: result['failed'] = True result['msg'] = "cannot parse reboot as a boolean: %s" % to_text( exc) return result if not isinstance(reboot_timeout, int): result['failed'] = True result['msg'] = "reboot_timeout must be an integer" return result if reboot and self._task.async_val > 0: result['failed'] = True result['msg'] = "async is not supported for this task when " \ "reboot=yes" return result # Run the module new_module_args = self._task.args.copy() new_module_args.pop('reboot', None) new_module_args.pop('reboot_timeout', None) result = self._run_win_updates(new_module_args, task_vars, use_task) # if the module failed to run at all then changed won't be populated # so we just return the result as is # https://github.com/ansible/ansible/issues/38232 failed = result.get('failed', False) if ("updates" not in result.keys() and self._task.async_val == 0) or failed: result['failed'] = True return result changed = result.get('changed', False) updates = result.get('updates', dict()) filtered_updates = result.get('filtered_updates', dict()) found_update_count = result.get('found_update_count', 0) installed_update_count = result.get('installed_update_count', 0) # Handle automatic reboots if the reboot flag is set if reboot and state == 'installed' and not \ self._play_context.check_mode: previously_errored = False while result['installed_update_count'] > 0 or \ result['found_update_count'] > 0 or \ result['reboot_required'] is True: display.vvv("win_updates: check win_updates results for " "automatic reboot: %s" % json.dumps(result)) # check if the module failed, break from the loop if it # previously failed and return error to the user if result.get('failed', False): if previously_errored: break previously_errored = True else: previously_errored = False reboot_error = None # check if a reboot was required before installing the updates if result.get('msg', '') == "A reboot is required before " \ "more updates can be installed": reboot_error = "reboot was required before more updates " \ "can be installed" if result.get('reboot_required', False): if reboot_error is None: reboot_error = "reboot was required to finalise " \ "update install" try: changed = True self._reboot_server(task_vars, reboot_timeout, use_task) except AnsibleError as exc: result['failed'] = True result['msg'] = "Failed to reboot remote host when " \ "%s: %s" \ % (reboot_error, to_text(exc)) break result.pop('msg', None) # rerun the win_updates module after the reboot is complete result = self._run_win_updates(new_module_args, task_vars, use_task) if result.get('failed', False): return result result_updates = result.get('updates', dict()) result_filtered_updates = result.get('filtered_updates', dict()) updates = self._merge_dict(updates, result_updates) filtered_updates = self._merge_dict(filtered_updates, result_filtered_updates) found_update_count += result.get('found_update_count', 0) installed_update_count += result.get('installed_update_count', 0) if result['changed']: changed = True # finally create the return dict based on the aggregated execution # values if we are not in async if self._task.async_val == 0: result['changed'] = changed result['updates'] = updates result['filtered_updates'] = filtered_updates result['found_update_count'] = found_update_count result['installed_update_count'] = installed_update_count return result
def run(self, tmp=None, task_vars=None): if task_vars is None: task_vars = dict() connect_timeout = int( self._task.args.get('connect_timeout', self.DEFAULT_CONNECT_TIMEOUT)) delay = int(self._task.args.get('delay', self.DEFAULT_DELAY)) sleep = int(self._task.args.get('sleep', self.DEFAULT_SLEEP)) timeout = int(self._task.args.get('timeout', self.DEFAULT_TIMEOUT)) if self._play_context.check_mode: display.vvv("wait_for_connection: skipping for check_mode") return dict(skipped=True) result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect def ping_module_test(connect_timeout): ''' Test ping module, if available ''' display.vvv("wait_for_connection: attempting ping module test") # call connection reset between runs if it's there try: self._connection.reset() except AttributeError: pass # Use win_ping on winrm/powershell, else use ping if getattr(self._connection._shell, "_IS_WINDOWS", False): ping_result = self._execute_module(module_name='win_ping', module_args=dict(), task_vars=task_vars) else: ping_result = self._execute_module(module_name='ping', module_args=dict(), task_vars=task_vars) # Test module output if ping_result['ping'] != 'pong': raise Exception('ping test failed') start = datetime.now() if delay: time.sleep(delay) try: # If the connection has a transport_test method, use it first if hasattr(self._connection, 'transport_test'): self.do_until_success_or_timeout( self._connection.transport_test, timeout, connect_timeout, what_desc="connection port up", sleep=sleep) # Use the ping module test to determine end-to-end connectivity self.do_until_success_or_timeout(ping_module_test, timeout, connect_timeout, what_desc="ping module test", sleep=sleep) except TimedOutException as e: result['failed'] = True result['msg'] = to_text(e) elapsed = datetime.now() - start result['elapsed'] = elapsed.seconds # remove a temporary path we created self._remove_tmp_path(self._connection._shell.tmpdir) return result
def parse(self, inventory, loader, path, cache=None): super(InventoryModule, self).parse(inventory, loader, path) self.set_options() if self.get_option('cache') is not None: display.deprecated( msg= "The 'cache' option is deprecated for the script inventory plugin. " "External scripts implement their own caching and this option has never been used", version="2.12") # Support inventory scripts that are not prefixed with some # path information but happen to be in the current working # directory when '.' is not in PATH. cmd = [path, "--list"] try: try: sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e))) (stdout, stderr) = sp.communicate() path = to_native(path) err = to_native(stderr or "") if err and not err.endswith('\n'): err += '\n' if sp.returncode != 0: raise AnsibleError( "Inventory script (%s) had an execution error: %s " % (path, err)) # make sure script output is unicode so that json loader will output unicode strings itself try: data = to_text(stdout, errors="strict") except Exception as e: raise AnsibleError( "Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}" .format(path, to_native(e))) try: processed = self.loader.load(data) except Exception as e: raise AnsibleError( "failed to parse executable inventory script results from {0}: {1}\n{2}" .format(path, to_native(e), err)) # if no other errors happened and you want to force displaying stderr, do so now if stderr and self.get_option('always_show_stderr'): self.display.error(msg=to_text(err)) if not isinstance(processed, Mapping): raise AnsibleError( "failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}" .format(path, err)) group = None data_from_meta = None # A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host # if this "hostvars" exists at all then do not call --host for each # host. # This is for efficiency and scripts should still return data # if called with --host for backwards compat with 1.2 and earlier. for (group, gdata) in processed.items(): if group == '_meta': if 'hostvars' in gdata: data_from_meta = gdata['hostvars'] else: self._parse_group(group, gdata) for host in self._hosts: got = {} if data_from_meta is None: got = self.get_host_variables(path, host) else: try: got = data_from_meta.get(host, {}) except AttributeError as e: raise AnsibleError( "Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e) self._populate_host_vars([host], got) except Exception as e: raise AnsibleParserError(to_native(e))
def export_to_ovf_files(self, vm_obj): self.create_export_dir(vm_obj=vm_obj) export_with_iso = False if 'export_with_images' in self.params and self.params[ 'export_with_images']: export_with_iso = True if 60 > self.params['download_timeout'] > 10: self.download_timeout = self.params['download_timeout'] ovf_files = [] # get http nfc lease firstly http_nfc_lease = vm_obj.ExportVm() # create a thread to track file download progress lease_updater = LeaseProgressUpdater(http_nfc_lease, self.lease_interval) total_bytes_written = 0 # total storage space occupied by the virtual machine across all datastores total_bytes_to_write = vm_obj.summary.storage.unshared # new deployed VM with no OS installed if total_bytes_to_write == 0: total_bytes_to_write = vm_obj.summary.storage.committed if total_bytes_to_write == 0: http_nfc_lease.HttpNfcLeaseAbort() self.module.fail_json( msg='Total storage space occupied by the VM is 0.') headers = {'Accept': 'application/x-vnd.vmware-streamVmdk'} cookies = connect.GetStub().cookie if cookies: headers['Cookie'] = cookies lease_updater.start() try: while True: if http_nfc_lease.state == vim.HttpNfcLease.State.ready: for deviceUrl in http_nfc_lease.info.deviceUrl: file_download = False if deviceUrl.targetId and deviceUrl.disk: file_download = True elif deviceUrl.url.split('/')[-1].split( '.')[-1] == 'iso': if export_with_iso: file_download = True elif deviceUrl.url.split('/')[-1].split( '.')[-1] == 'nvram': if self.host_version_at_least(version=(6, 7, 0), vm_obj=vm_obj): file_download = True else: continue device_file_name = deviceUrl.url.split('/')[-1] # device file named disk-0.iso, disk-1.vmdk, disk-2.vmdk, replace 'disk' with vm name if device_file_name.split('.')[0][0:5] == "disk-": device_file_name = device_file_name.replace( 'disk', vm_obj.name) temp_target_disk = os.path.join( self.ovf_dir, device_file_name) device_url = deviceUrl.url # if export from ESXi host, replace * with hostname in url # e.g., https://*/ha-nfc/5289bf27-da99-7c0e-3978-8853555deb8c/disk-1.vmdk if '*' in device_url: device_url = device_url.replace( '*', self.params['hostname']) if file_download: current_bytes_written = self.download_device_files( headers=headers, temp_target_disk=temp_target_disk, device_url=device_url, lease_updater=lease_updater, total_bytes_written=total_bytes_written, total_bytes_to_write=total_bytes_to_write) total_bytes_written += current_bytes_written ovf_file = vim.OvfManager.OvfFile() ovf_file.deviceId = deviceUrl.key ovf_file.path = device_file_name ovf_file.size = current_bytes_written ovf_files.append(ovf_file) break elif http_nfc_lease.state == vim.HttpNfcLease.State.initializing: sleep(2) continue elif http_nfc_lease.state == vim.HttpNfcLease.State.error: lease_updater.stop() self.module.fail_json(msg='Get HTTP NFC lease error %s.' % http_nfc_lease.state.error[0].fault) # generate ovf file ovf_manager = self.content.ovfManager ovf_descriptor_name = vm_obj.name ovf_parameters = vim.OvfManager.CreateDescriptorParams() ovf_parameters.name = ovf_descriptor_name ovf_parameters.ovfFiles = ovf_files vm_descriptor_result = ovf_manager.CreateDescriptor( obj=vm_obj, cdp=ovf_parameters) if vm_descriptor_result.error: http_nfc_lease.HttpNfcLeaseAbort() lease_updater.stop() self.module.fail_json( msg='Create VM descriptor file error %s.' % vm_descriptor_result.error) else: vm_descriptor = vm_descriptor_result.ovfDescriptor ovf_descriptor_path = os.path.join( self.ovf_dir, ovf_descriptor_name + '.ovf') sha256_hash = hashlib.sha256() with open(self.mf_file, 'a') as mf_handle: with open(ovf_descriptor_path, 'w') as handle: handle.write(vm_descriptor) sha256_hash.update(to_bytes(vm_descriptor)) mf_handle.write('SHA256(' + os.path.basename(ovf_descriptor_path) + ')= ' + sha256_hash.hexdigest() + '\n') http_nfc_lease.HttpNfcLeaseProgress(100) # self.facts = http_nfc_lease.HttpNfcLeaseGetManifest() http_nfc_lease.HttpNfcLeaseComplete() lease_updater.stop() self.facts.update({ 'manifest': self.mf_file, 'ovf_file': ovf_descriptor_path }) except Exception as err: kwargs = { 'changed': False, 'failed': True, 'msg': "get exception: %s" % to_text(err), } http_nfc_lease.HttpNfcLeaseAbort() lease_updater.stop() return kwargs return {'changed': True, 'failed': False, 'instance': self.facts}
def get_text(self, ele, tag): try: return to_text(ele.find(tag).text).strip() except AttributeError: pass
def exec_command(self, cmd, in_data=None, sudoable=True): ''' run a command on the ssm host ''' super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self.host) session = self._session mark_begin = "".join([ random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH) ]) if self.is_windows: mark_start = mark_begin + " $LASTEXITCODE" else: mark_start = mark_begin mark_end = "".join([ random.choice(string.ascii_letters) for i in xrange(self.MARK_LENGTH) ]) # Wrap command in markers accordingly for the shell used cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end) self._flush_stderr(session) for chunk in chunks(cmd, 1024): session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict')) # Read stdout between the markers stdout = '' win_line = '' begin = False stop_time = int(round(time.time())) + self.get_option('timeout') while session.poll() is None: remaining = stop_time - int(round(time.time())) if remaining < 1: self._timeout = True display.vvvv(u"EXEC timeout stdout: {0}".format( to_text(stdout)), host=self.host) raise AnsibleConnectionFailure( "SSM exec_command timeout on host: %s" % self.instance_id) if self._poll_stdout.poll(1000): line = self._filter_ansi(self._stdout.readline()) display.vvvv(u"EXEC stdout line: {0}".format(to_text(line)), host=self.host) else: display.vvvv(u"EXEC remaining: {0}".format(remaining), host=self.host) continue if not begin and self.is_windows: win_line = win_line + line line = win_line if mark_start in line: begin = True if not line.startswith(mark_start): stdout = '' continue if begin: if mark_end in line: display.vvvv(u"POST_PROCESS: {0}".format(to_text(stdout)), host=self.host) returncode, stdout = self._post_process(stdout, mark_begin) break else: stdout = stdout + line stderr = self._flush_stderr(session) return (returncode, stdout, stderr)
def main(): """ main entry point for module execution """ backup_spec = dict(filename=dict(), dir_path=dict(type='path')) argument_spec = dict( content=dict(aliases=['xml']), target=dict(choices=['auto', 'candidate', 'running'], default='auto', aliases=['datastore']), source_datastore=dict(aliases=['source']), format=dict(choices=['xml', 'text'], default='xml'), lock=dict(choices=['never', 'always', 'if-supported'], default='always'), default_operation=dict(choices=['merge', 'replace', 'none']), confirm=dict(type='int', default=0), confirm_commit=dict(type='bool', default=False), error_option=dict(choices=[ 'stop-on-error', 'continue-on-error', 'rollback-on-error' ], default='stop-on-error'), backup=dict(type='bool', default=False), backup_options=dict(type='dict', options=backup_spec), save=dict(type='bool', default=False), delete=dict(type='bool', default=False), commit=dict(type='bool', default=True), validate=dict(type='bool', default=False), get_filter=dict(), ) # deprecated options netconf_top_spec = { 'src': dict(type='path', removed_in_version=2.11), 'host': dict(removed_in_version=2.11), 'port': dict(removed_in_version=2.11, type='int', default=830), 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']), removed_in_version=2.11, no_log=True), 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), removed_in_version=2.11, no_log=True), 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), removed_in_version=2.11, type='path'), 'hostkey_verify': dict(removed_in_version=2.11, type='bool', default=True), 'look_for_keys': dict(removed_in_version=2.11, type='bool', default=True), 'timeout': dict(removed_in_version=2.11, type='int', default=10), } argument_spec.update(netconf_top_spec) mutually_exclusive = [('content', 'src', 'source', 'delete', 'confirm_commit')] required_one_of = [('content', 'src', 'source', 'delete', 'confirm_commit') ] module = AnsibleModule(argument_spec=argument_spec, required_one_of=required_one_of, mutually_exclusive=mutually_exclusive, supports_check_mode=True) if module.params['src']: module.deprecate( msg= "argument 'src' has been deprecated. Use file lookup plugin instead to read file contents.", version="2.11") config = module.params['content'] or module.params['src'] target = module.params['target'] lock = module.params['lock'] source = module.params['source_datastore'] delete = module.params['delete'] confirm_commit = module.params['confirm_commit'] confirm = module.params['confirm'] validate = module.params['validate'] save = module.params['save'] filter = module.params['get_filter'] filter_type = get_filter_type(filter) conn = Connection(module._socket_path) capabilities = get_capabilities(module) operations = capabilities['device_operations'] supports_commit = operations.get('supports_commit', False) supports_writable_running = operations.get('supports_writable_running', False) supports_startup = operations.get('supports_startup', False) # identify target datastore if target == 'candidate' and not supports_commit: module.fail_json( msg=':candidate is not supported by this netconf server') elif target == 'running' and not supports_writable_running: module.fail_json( msg=':writable-running is not supported by this netconf server') elif target == 'auto': if supports_commit: target = 'candidate' elif supports_writable_running: target = 'running' else: module.fail_json( msg= 'neither :candidate nor :writable-running are supported by this netconf server' ) # Netconf server capability validation against input options if save and not supports_startup: module.fail_json( msg= 'cannot copy <%s/> to <startup/>, while :startup is not supported' % target) if confirm_commit and not operations.get('supports_confirm_commit', False): module.fail_json( msg='confirm commit is not supported by Netconf server') if (confirm > 0) and not operations.get('supports_confirm_commit', False): module.fail_json( msg= 'confirm commit is not supported by this netconf server, given confirm timeout: %d' % confirm) if validate and not operations.get('supports_validate', False): module.fail_json( msg='validate is not supported by this netconf server') if filter_type == 'xpath' and not operations.get('supports_xpath', False): module.fail_json( msg= "filter value '%s' of type xpath is not supported on this device" % filter) filter_spec = (filter_type, filter) if filter_type else None if lock == 'never': execute_lock = False elif target in operations.get('lock_datastore', []): # lock is requested (always/if-support) and supported => lets do it execute_lock = True else: # lock is requested (always/if-supported) but not supported => issue warning module.warn( "lock operation on '%s' source is not supported on this device" % target) execute_lock = (lock == 'always') result = { 'changed': False, 'server_capabilities': capabilities.get('server_capabilities', []) } before = None after = None locked = False try: if module.params['backup']: response = get_config(module, target, filter_spec, lock=execute_lock) before = to_text(tostring(response), errors='surrogate_then_replace').strip() result['__backup__'] = before.strip() if validate: conn.validate(target) if source: if not module.check_mode: conn.copy(source, target) result['changed'] = True elif delete: if not module.check_mode: conn.delete(target) result['changed'] = True elif confirm_commit: if not module.check_mode: conn.commit() result['changed'] = True elif config: if module.check_mode and not supports_commit: module.warn( "check mode not supported as Netconf server doesn't support candidate capability" ) result['changed'] = True module.exit_json(**result) if execute_lock: conn.lock(target=target) locked = True if before is None: before = to_text(conn.get_config(source=target, filter=filter_spec), errors='surrogate_then_replace').strip() kwargs = { 'config': config, 'target': target, 'default_operation': module.params['default_operation'], 'error_option': module.params['error_option'], 'format': module.params['format'], } conn.edit_config(**kwargs) if supports_commit and module.params['commit']: after = to_text(conn.get_config(source='candidate', filter=filter_spec), errors='surrogate_then_replace').strip() if not module.check_mode: confirm_timeout = confirm if confirm > 0 else None confirmed_commit = True if confirm_timeout else False conn.commit(confirmed=confirmed_commit, timeout=confirm_timeout) else: conn.discard_changes() if after is None: after = to_text(conn.get_config(source='running', filter=filter_spec), errors='surrogate_then_replace').strip() sanitized_before = sanitize_xml(before) sanitized_after = sanitize_xml(after) if sanitized_before != sanitized_after: result['changed'] = True if result['changed']: if save and not module.check_mode: conn.copy_config(target, 'startup') if module._diff: result['diff'] = { 'before': sanitized_before, 'after': sanitized_after } except ConnectionError as e: module.fail_json( msg=to_text(e, errors='surrogate_then_replace').strip()) finally: if locked: conn.unlock(target=target) module.exit_json(**result)
def check_for_update(connection, module_params, vpn_connection_id): """ Determines if there are any tags or routes that need to be updated. Ensures non-modifiable attributes aren't expected to change. """ tags = module_params.get('tags') routes = module_params.get('routes') purge_tags = module_params.get('purge_tags') purge_routes = module_params.get('purge_routes') vpn_connection = find_connection(connection, module_params, vpn_connection_id=vpn_connection_id) current_attrs = camel_dict_to_snake_dict(vpn_connection) # Initialize changes dict changes = { 'tags_to_add': [], 'tags_to_remove': [], 'routes_to_add': [], 'routes_to_remove': [] } # Get changes to tags current_tags = boto3_tag_list_to_ansible_dict( current_attrs.get('tags', []), u'key', u'value') tags_to_add, changes['tags_to_remove'] = compare_aws_tags( current_tags, tags, purge_tags) changes['tags_to_add'] = ansible_dict_to_boto3_tag_list(tags_to_add) # Get changes to routes if 'Routes' in vpn_connection: current_routes = [ route['DestinationCidrBlock'] for route in vpn_connection['Routes'] ] if purge_routes: changes['routes_to_remove'] = [ old_route for old_route in current_routes if old_route not in routes ] changes['routes_to_add'] = [ new_route for new_route in routes if new_route not in current_routes ] # Check if nonmodifiable attributes are attempted to be modified for attribute in current_attrs: if attribute in ("tags", "routes", "state"): continue elif attribute == 'options': will_be = module_params.get('static_only', None) is_now = bool(current_attrs[attribute]['static_routes_only']) attribute = 'static_only' elif attribute == 'type': will_be = module_params.get("connection_type", None) is_now = current_attrs[attribute] else: is_now = current_attrs[attribute] will_be = module_params.get(attribute, None) if will_be is not None and to_text(will_be) != to_text(is_now): raise VPNConnectionException( msg= "You cannot modify {0}, the current value of which is {1}. Modifiable VPN " "connection attributes are tags and routes. The value you tried to change it to " "is {2}.".format(attribute, is_now, will_be)) return changes
def run(self, tmp=None, task_vars=None): ''' handler for unarchive operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect source = self._task.args.get('src', None) dest = self._task.args.get('dest', None) remote_src = boolean(self._task.args.get('remote_src', False), strict=False) creates = self._task.args.get('creates', None) decrypt = self._task.args.get('decrypt', True) try: # "copy" is deprecated in favor of "remote_src". if 'copy' in self._task.args: # They are mutually exclusive. if 'remote_src' in self._task.args: raise AnsibleActionFail("parameters are mutually exclusive: ('copy', 'remote_src')") # We will take the information from copy and store it in # the remote_src var to use later in this file. self._task.args['remote_src'] = remote_src = not boolean(self._task.args.pop('copy'), strict=False) if source is None or dest is None: raise AnsibleActionFail("src (or content) and dest are required") if creates: # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of command executions. creates = self._remote_expand_user(creates) if self._remote_file_exists(creates): raise AnsibleActionSkip("skipped, since %s exists" % creates) dest = self._remote_expand_user(dest) # CCTODO: Fix path for Windows hosts. source = os.path.expanduser(source) if not remote_src: try: source = self._loader.get_real_file(self._find_needle('files', source), decrypt=decrypt) except AnsibleError as e: raise AnsibleActionFail(to_text(e)) try: remote_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=True) except AnsibleError as e: raise AnsibleActionFail(to_text(e)) if not remote_stat['exists'] or not remote_stat['isdir']: raise AnsibleActionFail("dest '%s' must be an existing dir" % dest) if not remote_src: # transfer the file to a remote tmp location tmp_src = self._connection._shell.join_path(self._connection._shell.tmpdir, 'source') self._transfer_file(source, tmp_src) # handle diff mode client side # handle check mode client side # remove action plugin only keys new_module_args = self._task.args.copy() for key in ('decrypt',): if key in new_module_args: del new_module_args[key] if not remote_src: # fix file permissions when the copy is done as a different user self._fixup_perms2((self._connection._shell.tmpdir, tmp_src)) new_module_args['src'] = tmp_src # execute the unarchive module now, with the updated args result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars)) except AnsibleAction as e: result.update(e.result) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def run(self, terms, variables=None, url=None, queue=None, count=None): if not HAS_PIKA: raise AnsibleError( 'pika python package is required for rabbitmq lookup.') if not url: raise AnsibleError('URL is required for rabbitmq lookup.') if not queue: raise AnsibleError('Queue is required for rabbitmq lookup.') display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count)) try: parameters = pika.URLParameters(url) except Exception as e: raise AnsibleError("URL malformed: %s" % to_native(e)) try: connection = pika.BlockingConnection(parameters) except Exception as e: raise AnsibleError("Connection issue: %s" % to_native(e)) try: conn_channel = connection.channel() except pika.exceptions.AMQPChannelError as e: try: connection.close() except pika.exceptions.AMQPConnectionError as ie: raise AnsibleError( "Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie)) raise AnsibleError("Channel issue: %s" % to_native(e)) ret = [] idx = 0 while True: method_frame, properties, body = conn_channel.basic_get( queue=queue) if method_frame: display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body))) # TODO: In the future consider checking content_type and handle text/binary data differently. msg_details = dict({ 'msg': to_text(body), 'message_count': method_frame.message_count, 'routing_key': method_frame.routing_key, 'delivery_tag': method_frame.delivery_tag, 'redelivered': method_frame.redelivered, 'exchange': method_frame.exchange, 'delivery_mode': properties.delivery_mode, 'content_type': properties.content_type, 'headers': properties.headers }) if properties.content_type == 'application/json': try: msg_details['json'] = json.loads(msg_details['msg']) except ValueError as e: raise AnsibleError( "Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e))) ret.append(msg_details) conn_channel.basic_ack(method_frame.delivery_tag) idx += 1 if method_frame.message_count == 0 or idx == count: break # If we didn't get a method_frame, exit. else: break if connection.is_closed: return [ret] else: try: connection.close() except pika.exceptions.AMQPConnectionError: pass return [ret]
def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self._task_uuid = to_text(kwargs.get('task_uuid', ''))
def run_module(): # define the available arguments/parameters that a user can pass to # the module module_args = dict( cpm_action=dict(choices=['getplugconfig', 'setplugconfig'], required=True), cpm_url=dict(type='str', required=True), cpm_username=dict(type='str', required=True), cpm_password=dict(type='str', required=True, no_log=True), plug_id=dict(type='str', required=True), plug_name=dict(type='str', required=False), plug_bootdelay=dict(type='int', required=False, default=None, choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), plug_default=dict(type='int', required=False, default=None, choices=[0, 1]), plug_bootpriority=dict(type='int', required=False, default=None), use_https=dict(type='bool', default=True), validate_certs=dict(type='bool', default=True), use_proxy=dict(type='bool', default=False) ) result = dict( changed=False, data='', debug='' ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) if module.check_mode: return result auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(to_native(module.params['cpm_username']), to_native(module.params['cpm_password'])), errors='surrogate_or_strict'))) if module.params['use_https'] is True: protocol = "https://" else: protocol = "http://" Payload = None if (module.params['cpm_action'] == 'getplugconfig'): fullurl = ("%s%s/api/v2/config/powerplugconfig" % (protocol, to_native(module.params['cpm_url']))) if (module.params['plug_id'].lower() != 'all'): fullurl = '%s?plug=%s' % (fullurl, to_native(module.params['plug_id'])) method = 'GET' elif (module.params['cpm_action'] == 'setplugconfig'): Payload = assemble_json(module, result) result['debug'] = Payload fullurl = ("%s%s/api/v2/config/powerplugconfig" % (protocol, to_native(module.params['cpm_url']))) method = 'POST' try: response = open_url(fullurl, data=Payload, method=method, validate_certs=module.params['validate_certs'], use_proxy=module.params['use_proxy'], headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth}) if (method != 'GET'): result['changed'] = True except HTTPError as e: fail_json = dict(msg='Received HTTP error for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except URLError as e: fail_json = dict(msg='Failed lookup url for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except SSLValidationError as e: fail_json = dict(msg='Error validating the server''s certificate for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) except ConnectionError as e: fail_json = dict(msg='Error connecting to for {0} : {1}'.format(fullurl, to_native(e)), changed=False) module.fail_json(**fail_json) result['data'] = json.loads(response.read()) module.exit_json(**result)
def get_user_data(self): user_data = self.module.params.get('user_data') if user_data is not None: user_data = to_text(base64.b64encode(to_bytes(user_data))) return user_data
def map_obj_to_ele(module, want, top, value_map=None, param=None): if not HAS_LXML: module.fail_json(msg='lxml is not installed.') if not param: param = module.params root = Element('root') top_ele = top.split('/') ele = SubElement(root, top_ele[0]) if len(top_ele) > 1: for item in top_ele[1:-1]: ele = SubElement(ele, item) container = ele state = param.get('state') active = param.get('active') if active: oper = 'active' else: oper = 'inactive' # build xml subtree if container.tag != top_ele[-1]: node = SubElement(container, top_ele[-1]) else: node = container for fxpath, attributes in want.items(): for attr in attributes: tag_only = attr.get('tag_only', False) leaf_only = attr.get('leaf_only', False) value_req = attr.get('value_req', False) is_key = attr.get('is_key', False) parent_attrib = attr.get('parent_attrib', True) value = attr.get('value') field_top = attr.get('top') # operation 'delete' is added as element attribute # only if it is key or leaf only node if state == 'absent' and not (is_key or leaf_only): continue # convert param value to device specific value if value_map and fxpath in value_map: value = value_map[fxpath].get(value) if (value is not None) or tag_only or leaf_only: ele = node if field_top: # eg: top = 'system/syslog/file' # field_top = 'system/syslog/file/contents' # <file> # <name>test</name> # <contents> # </contents> # </file> ele_list = root.xpath(top + '/' + field_top) if not len(ele_list): fields = field_top.split('/') ele = node for item in fields: inner_ele = root.xpath(top + '/' + item) if len(inner_ele): ele = inner_ele[0] else: ele = SubElement(ele, item) else: ele = ele_list[0] if value is not None and not isinstance(value, bool): value = to_text(value, errors='surrogate_then_replace') if fxpath: tags = fxpath.split('/') for item in tags: ele = SubElement(ele, item) if tag_only: if state == 'present': if not value: # if value of tag_only node is false, delete the node ele.set('delete', 'delete') elif leaf_only: if state == 'present': ele.set(oper, oper) ele.text = value else: ele.set('delete', 'delete') # Add value of leaf node if required while deleting. # in some cases if value is present while deleting, it # can result in error, hence the check if value_req: ele.text = value if is_key: par = ele.getparent() par.set('delete', 'delete') else: ele.text = value par = ele.getparent() if parent_attrib: if state == 'present': # set replace attribute at parent node if not par.attrib.get('replace'): par.set('replace', 'replace') # set active/inactive at parent node if not par.attrib.get(oper): par.set(oper, oper) else: par.set('delete', 'delete') return root.getchildren()[0]
def run(self, tmp=None, task_vars=None): ''' handler for file transfer operations ''' if task_vars is None: task_vars = dict() result = super(ActionModule, self).run(tmp, task_vars) del tmp # tmp no longer has any effect try: creates = self._task.args.get('creates') if creates: # do not run the command if the line contains creates=filename # and the filename already exists. This allows idempotence # of command executions. if self._remote_file_exists(creates): raise AnsibleActionSkip( "%s exists, matching creates option" % creates) removes = self._task.args.get('removes') if removes: # do not run the command if the line contains removes=filename # and the filename does not exist. This allows idempotence # of command executions. if not self._remote_file_exists(removes): raise AnsibleActionSkip( "%s does not exist, matching removes option" % removes) # The chdir must be absolute, because a relative path would rely on # remote node behaviour & user config. chdir = self._task.args.get('chdir') if chdir: # Powershell is the only Windows-path aware shell if getattr(self._connection._shell, "_IS_WINDOWS", False) and \ not self.windows_absolute_path_detection.match(chdir): raise AnsibleActionFail( 'chdir %s must be an absolute path for a Windows remote node' % chdir) # Every other shell is unix-path-aware. if not getattr(self._connection._shell, "_IS_WINDOWS", False) and not chdir.startswith('/'): raise AnsibleActionFail( 'chdir %s must be an absolute path for a Unix-aware remote node' % chdir) # Split out the script as the first item in raw_params using # shlex.split() in order to support paths and files with spaces in the name. # Any arguments passed to the script will be added back later. raw_params = to_native(self._task.args.get('_raw_params', ''), errors='surrogate_or_strict') parts = [ to_text(s, errors='surrogate_or_strict') for s in shlex.split(raw_params.strip()) ] source = parts[0] # Support executable paths and files with spaces in the name. executable = to_native(self._task.args.get('executable', ''), errors='surrogate_or_strict') try: source = self._loader.get_real_file( self._find_needle('files', source), decrypt=self._task.args.get('decrypt', True)) except AnsibleError as e: raise AnsibleActionFail(to_native(e)) # now we execute script, always assume changed. result['changed'] = True if not self._play_context.check_mode: # transfer the file to a remote tmp location tmp_src = self._connection._shell.join_path( self._connection._shell.tmpdir, os.path.basename(source)) # Convert raw_params to text for the purpose of replacing the script since # parts and tmp_src are both unicode strings and raw_params will be different # depending on Python version. # # Once everything is encoded consistently, replace the script path on the remote # system with the remainder of the raw_params. This preserves quoting in parameters # that would have been removed by shlex.split(). target_command = to_text(raw_params).strip().replace( parts[0], tmp_src) self._transfer_file(source, tmp_src) # set file permissions, more permissive when the copy is done as a different user self._fixup_perms2((self._connection._shell.tmpdir, tmp_src), execute=True) # add preparation steps to one ssh roundtrip executing the script env_dict = dict() env_string = self._compute_environment_string(env_dict) if executable: script_cmd = ' '.join( [env_string, executable, target_command]) else: script_cmd = ' '.join([env_string, target_command]) if self._play_context.check_mode: raise _AnsibleActionDone() script_cmd = self._connection._shell.wrap_for_exec(script_cmd) exec_data = None # PowerShell runs the script in a special wrapper to enable things # like become and environment args if getattr(self._connection._shell, "_IS_WINDOWS", False): # FUTURE: use a more public method to get the exec payload pc = self._play_context exec_data = ps_manifest._create_powershell_wrapper( to_bytes(script_cmd), source, {}, env_dict, self._task.async_val, pc.become, pc.become_method, pc.become_user, pc.become_pass, pc.become_flags, "script", task_vars) # build the necessary exec wrapper command # FUTURE: this still doesn't let script work on Windows with non-pipelined connections or # full manual exec of KEEP_REMOTE_FILES script_cmd = self._connection._shell.build_module_command( env_string='', shebang='#!powershell', cmd='') result.update( self._low_level_execute_command(cmd=script_cmd, in_data=exec_data, sudoable=True, chdir=chdir)) if 'rc' in result and result['rc'] != 0: raise AnsibleActionFail('non-zero return code') except AnsibleAction as e: result.update(e.result) finally: self._remove_tmp_path(self._connection._shell.tmpdir) return result
def _populate_from_source(self, source_data, using_current_cache=False): if using_current_cache: self._populate_from_cache(source_data) return source_data cacheable_results = {'_meta': {'hostvars': {}}} hostvars = {} prevkey = pref_k = '' current_host = None # needed to possibly set ansible_host netinfo = self.get_option('network_info_path') for line in source_data: line = to_text(line) if ':' not in line: continue try: k, v = line.split(':', 1) except Exception: # skip non splitable continue if k.strip() == '': # skip empty continue v = v.strip() # found host if k.startswith('Name') and ',' not in v: # some setting strings appear in Name current_host = v if current_host not in hostvars: hostvars[current_host] = {} self.inventory.add_host(current_host) # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: self.inventory.set_variable(current_host, 'ansible_host', netdata) # found groups elif k == 'Groups': for group in v.split('/'): if group: group = self.inventory.add_group(group) self.inventory.add_child(group, current_host) if group not in cacheable_results: cacheable_results[group] = {'hosts': []} cacheable_results[group]['hosts'].append(current_host) continue else: # found vars, accumulate in hostvars for clean inventory set pref_k = 'vbox_' + k.strip().replace(' ', '_') if k.startswith(' '): if prevkey not in hostvars[current_host]: hostvars[current_host][prevkey] = {} hostvars[current_host][prevkey][pref_k] = v else: if v != '': hostvars[current_host][pref_k] = v if self._ungrouped_host(current_host, cacheable_results): if 'ungrouped' not in cacheable_results: cacheable_results['ungrouped'] = {'hosts': []} cacheable_results['ungrouped']['hosts'].append(current_host) prevkey = pref_k self._set_variables(hostvars) for host in hostvars: h = self.inventory.get_host(host) cacheable_results['_meta']['hostvars'][h.name] = h.vars return cacheable_results