def run(self, terms, variables=None, **kwargs): if HVAC_IMPORT_ERROR: raise_from( AnsibleError("This plugin requires the 'hvac' Python library"), HVAC_IMPORT_ERROR) ret = [] self.set_options(direct=kwargs, var_options=variables) # TODO: remove process_deprecations() if backported fix is available (see method definition) self.process_deprecations() self.connection_options.process_connection_options() client_args = self.connection_options.get_hvac_connection_options() client = self.helper.get_vault_client(**client_args) data = self._options_adapter.get_option('data') wrap_ttl = self._options_adapter.get_option_default('wrap_ttl') try: self.authenticator.validate() self.authenticator.authenticate(client) except (NotImplementedError, HashiVaultValueError) as e: raise_from(AnsibleError(e), e) for term in terms: try: response = client.write(path=term, wrap_ttl=wrap_ttl, **data) except hvac.exceptions.Forbidden as e: raise_from( AnsibleError("Forbidden: Permission Denied to path '%s'." % term), e) except hvac.exceptions.InvalidPath as e: raise_from( AnsibleError("The path '%s' doesn't seem to exist." % term), e) except hvac.exceptions.InternalServerError as e: raise_from(AnsibleError("Internal Server Error: %s" % str(e)), e) # https://github.com/hvac/hvac/issues/797 # HVAC returns a raw response object when the body is not JSON. # That includes 204 responses, which are successful with no body. # So we will try to detect that and a act accordingly. # A better way may be to implement our own adapter for this # collection, but it's a little premature to do that. if hasattr(response, 'json') and callable(response.json): if response.status_code == 204: output = {} else: display.warning( 'Vault returned status code %i and an unparsable body.' % response.status_code) output = response.content else: output = response ret.append(output) return ret
def run(self, terms, variables=None, **kwargs): if HVAC_IMPORT_ERROR: raise_from( AnsibleError("This plugin requires the 'hvac' Python library"), HVAC_IMPORT_ERROR) self.set_options(direct=kwargs, var_options=variables) # TODO: remove process_deprecations() if backported fix is available (see method definition) self.process_deprecations() self.connection_options.process_connection_options() client_args = self.connection_options.get_hvac_connection_options() client = self.helper.get_vault_client(**client_args) if len(terms) != 0: display.warning( "Supplied term strings will be ignored. This lookup does not use term strings." ) try: self.authenticator.validate() self.authenticator.authenticate(client) except (NotImplementedError, HashiVaultValueError) as e: raise AnsibleError(e) pass_thru_options = self._options_adapter.get_filled_options( *self.PASS_THRU_OPTION_NAMES) if self.get_option('orphan'): pass_thru_options['no_parent'] = True legacy_options = pass_thru_options.copy() for key in pass_thru_options.keys(): if key in self.LEGACY_OPTION_TRANSLATION: legacy_options[self.LEGACY_OPTION_TRANSLATION[ key]] = legacy_options.pop(key) response = None if self.get_option('orphan'): # this method is deprecated, but it's the only way through hvac to get # at the /create-orphan endpoint at this time. # See: https://github.com/hvac/hvac/issues/758 try: response = client.create_token(orphan=True, **legacy_options) except AttributeError: display.warning( "'create_token' method was not found. Attempting method that requires root privileges." ) except Exception as e: raise AnsibleError(e) if response is None: try: response = client.auth.token.create(**pass_thru_options) except Exception as e: raise AnsibleError(e) return [response]
def wait_for_task(task, max_backoff=64, timeout=3600): """Wait for given task using exponential back-off algorithm. Args: task: VMware task object max_backoff: Maximum amount of sleep time in seconds timeout: Timeout for the given task in seconds Returns: Tuple with True and result for successful task Raises: TaskError on failure """ failure_counter = 0 start_time = time.time() while True: if time.time() - start_time >= timeout: raise TaskError("Timeout") if task.info.state == vim.TaskInfo.State.success: return True, task.info.result if task.info.state == vim.TaskInfo.State.error: error_msg = task.info.error try: error_msg = error_msg.msg except AttributeError: pass finally: raise_from(TaskError(error_msg), task.info.error) if task.info.state in [ vim.TaskInfo.State.running, vim.TaskInfo.State.queued ]: sleep_time = min(2**failure_counter + randint(1, 1000), max_backoff) time.sleep(sleep_time) failure_counter += 1
def run(self, tmp=None, task_vars=None): """Run of action plugin for interacting with Nautobot GraphQL endpoint. Args: tmp ([type], optional): [description]. Defaults to None. task_vars ([type], optional): [description]. Defaults to None. """ if PYNAUTOBOT_IMPORT_ERROR: raise_from( AnsibleError( "pynautobot must be installed to use this plugin"), PYNAUTOBOT_IMPORT_ERROR, ) self._supports_check_mode = True self._supports_async = False result = super(ActionModule, self).run(tmp, task_vars) del tmp if result.get("skipped"): return None if result.get("invocation", {}).get("module_args"): # avoid passing to modules in case of no_log # should not be set anymore but here for backwards compatibility del result["invocation"]["module_args"] # do work! # Get the arguments from the module definition args = self._task.args results = nautobot_action_graphql(args=args) # Results should be the data response of the query to be returned as a lookup return results
def run(self, terms, variables=None, **kwargs): if HVAC_IMPORT_ERROR: raise_from( AnsibleError("This plugin requires the 'hvac' Python library"), HVAC_IMPORT_ERROR ) ret = [] self.set_options(direct=kwargs, var_options=variables) # TODO: remove process_deprecations() if backported fix is available (see method definition) self.process_deprecations() self.connection_options.process_connection_options() client_args = self.connection_options.get_hvac_connection_options() client = self.helper.get_vault_client(**client_args) try: self.authenticator.validate() self.authenticator.authenticate(client) except (NotImplementedError, HashiVaultValueError) as e: raise AnsibleError(e) for term in terms: try: data = client.read(term) except hvac.exceptions.Forbidden: raise AnsibleError("Forbidden: Permission Denied to path '%s'." % term) if data is None: raise AnsibleError("The path '%s' doesn't seem to exist." % term) ret.append(data) return ret
def requests_wrapper(endpoint, method='GET', params=None, data=None, module=None, reauth_attempts=3, retries=3): try: response = REQUEST.request(method, endpoint, data=data, params=params) if response.status_code == 401: set_token_headers(module) if reauth_attempts == 0: raise Exception("Too many reauthentication attempts") return requests_wrapper(endpoint, method, params, data, module, reauth_attempts - 1) elif response.status_code not in VALID_RESPONSE_CODES: error_message = response.json().get('message') validation_errors = response.json().get('validationErrors') raise Exception( 'status code %s | %s | Validation errors: %s' % (response.status_code, error_message, validation_errors)) except requests.exceptions.RequestException as e: if retries == 0: raise_from(Exception("Communications error: %s" % str(e)), e) return requests_wrapper(endpoint, method, params, data, module, retries=retries - 1) return response
def _get_meta_from_src_dir( b_path, # type: bytes ): # type: (...) -> Dict[str, Optional[Union[str, List[str], Dict[str, str]]]] galaxy_yml = os.path.join(b_path, _GALAXY_YAML) if not os.path.isfile(galaxy_yml): raise LookupError( "The collection galaxy.yml path '{path!s}' does not exist.". format(path=to_native(galaxy_yml)) ) with open(galaxy_yml, 'rb') as manifest_file_obj: try: manifest = yaml_load(manifest_file_obj) except yaml.error.YAMLError as yaml_err: raise_from( AnsibleError( "Failed to parse the galaxy.yml at '{path!s}' with " 'the following error:\n{err_txt!s}'. format( path=to_native(galaxy_yml), err_txt=to_native(yaml_err), ), ), yaml_err, ) return _normalize_galaxy_yml_manifest(manifest, galaxy_yml)
def __init__(self, *args, **kwargs): if LIBRARY_IMPORT_ERROR: raise_from( AnsibleError('{0}'.format(LIBRARY_IMPORT_ERROR)), LIBRARY_IMPORT_ERROR ) super().__init__(*args, **kwargs)
def _extract_collection_from_git(repo_url, coll_ver, b_path): name, version, git_url, fragment = parse_scm(repo_url, coll_ver) b_checkout_path = mkdtemp( dir=b_path, prefix=to_bytes(name, errors='surrogate_or_strict'), ) # type: bytes git_clone_cmd = 'git', 'clone', git_url, to_text(b_checkout_path) # FIXME: '--depth', '1', '--branch', version try: subprocess.check_call(git_clone_cmd) except subprocess.CalledProcessError as proc_err: raise_from( AnsibleError( # should probably be LookupError 'Failed to clone a Git repository from `{repo_url!s}`.'.format( repo_url=to_native(git_url)), ), proc_err, ) git_switch_cmd = 'git', 'checkout', to_text(version) try: subprocess.check_call(git_switch_cmd, cwd=b_checkout_path) except subprocess.CalledProcessError as proc_err: raise_from( AnsibleError( # should probably be LookupError 'Failed to switch a cloned Git repo `{repo_url!s}` ' 'to the requested revision `{commitish!s}`.'.format( commitish=to_native(version), repo_url=to_native(git_url), ), ), proc_err, ) return (os.path.join(b_checkout_path, to_bytes(fragment)) if fragment else b_checkout_path)
def update_record(self, zone_id, record): """ Update a record. @param zone_id: The zone ID @param record: The DNS record (DNSRecord) @return The DNS record (DNSRecord) """ if record.id is None: raise DNSAPIError('Need record ID to update record!') self._announce('update record') command = self._prepare() command.add_simple_command('updateRecord', recordId=record.id, recorddata=_encode_record(record, include_id=False)) try: return _create_record_from_encoding( self._execute(command, 'updateRecordResponse', dict)) except WSDLError as exc: raise_from( DNSAPIError('Error while updating record: {0}'.format( to_native(exc))), exc) except WSDLNetworkError as exc: raise_from( DNSAPIError('Network error while updating record: {0}'.format( to_native(exc))), exc)
def add_record(self, zone_id, record): """ Adds a new record to an existing zone. @param zone_id: The zone ID @param record: The DNS record (DNSRecord) @return The created DNS record (DNSRecord) """ self._announce('add record') command = self._prepare() command.add_simple_command('addRecord', search=str(zone_id), recorddata=_encode_record(record, include_id=False)) try: return _create_record_from_encoding( self._execute(command, 'addRecordResponse', dict)) except WSDLError as exc: raise_from( DNSAPIError('Error while adding record: {0}'.format( to_native(exc))), exc) except WSDLNetworkError as exc: raise_from( DNSAPIError('Network error while adding record: {0}'.format( to_native(exc))), exc)
def get_zone_with_records_by_name(self, name, prefix=NOT_PROVIDED, record_type=NOT_PROVIDED): """ Given a zone name, return the zone contents with records if found. @param name: The zone name (string) @param prefix: The prefix to filter for, if provided. Since None is a valid value, the special constant NOT_PROVIDED indicates that we are not filtering. @param record_type: The record type to filter for, if provided @return The zone information with records (DNSZoneWithRecords), or None if not found """ self._announce('get zone') command = self._prepare() command.add_simple_command('getZone', sZoneName=name) try: return _create_zone_from_encoding(self._execute( command, 'getZoneResponse', dict), prefix=prefix, record_type=record_type) except WSDLError as exc: if exc.error_origin == 'server' and exc.error_message == 'zone not found': return None raise_from( DNSAPIError('Error while getting zone: {0}'.format( to_native(exc))), exc) except WSDLNetworkError as exc: raise_from( DNSAPIError('Network error while getting zone: {0}'.format( to_native(exc))), exc)
def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.hide_task_arguments = None self.otel_service_name = None self.ansible_playbook = None self.play_name = None self.tasks_data = None self.errors = 0 self.disabled = False self.traceparent = False if OTEL_LIBRARY_IMPORT_ERROR: raise_from( AnsibleError( 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin' ), OTEL_LIBRARY_IMPORT_ERROR) if ORDER_LIBRARY_IMPORT_ERROR: raise_from( AnsibleError( 'The `ordereddict` must be installed to use this plugin'), ORDER_LIBRARY_IMPORT_ERROR) else: self.tasks_data = OrderedDict() self.opentelemetry = OpenTelemetrySource(display=self._display)
def run(self, terms, variables=None, **kwargs): if HVAC_IMPORT_ERROR: raise_from( AnsibleError("This plugin requires the 'hvac' Python library"), HVAC_IMPORT_ERROR ) self.set_options(direct=kwargs, var_options=variables) # TODO: remove process_deprecations() if backported fix is available (see method definition) self.process_deprecations() if self.get_option('auth_method') == 'none': raise AnsibleError("The 'none' auth method is not valid for this lookup.") self.connection_options.process_connection_options() client_args = self.connection_options.get_hvac_connection_options() client = self.helper.get_vault_client(**client_args) if len(terms) != 0: display.warning("Supplied term strings will be ignored. This lookup does not use term strings.") try: self.authenticator.validate() response = self.authenticator.authenticate(client) except (NotImplementedError, HashiVaultValueError) as e: raise AnsibleError(e) return [response]
def get_galaxy_artifact_path(self, collection): # type: (Union[Candidate, Requirement]) -> bytes """Given a Galaxy-stored collection, return a cached path. If it's not yet on disk, this method downloads the artifact first. """ try: return self._galaxy_artifact_cache[collection] except KeyError: pass try: url, sha256_hash, token = self._galaxy_collection_cache[collection] except KeyError as key_err: raise_from( RuntimeError( 'The is no known source for {coll!s}'. format(coll=collection), ), key_err, ) display.vvvv( "Fetching a collection tarball for '{collection!s}' from " 'Ansible Galaxy'.format(collection=collection), ) try: b_artifact_path = _download_file( url, self._b_working_directory, expected_hash=sha256_hash, validate_certs=self._validate_certs, token=token, ) # type: bytes except URLError as err: raise_from( AnsibleError( 'Failed to download collection tar ' "from '{coll_src!s}': {download_err!s}". format( coll_src=to_native(collection.src), download_err=to_native(err), ), ), err, ) else: display.vvv( "Collection '{coll!s}' obtained from " 'server {server!s} {url!s}'.format( coll=collection, server=collection.src or 'Galaxy', url=collection.src.api_server if collection.src is not None else '', ) ) self._galaxy_artifact_cache[collection] = b_artifact_path return b_artifact_path
def get_artifact_path(self, collection): # type: (Union[Candidate, Requirement]) -> bytes """Given a concrete collection pointer, return a cached path. If it's not yet on disk, this method downloads the artifact first. """ try: return self._artifact_cache[collection.src] except KeyError: pass # NOTE: SCM needs to be special-cased as it may contain either # NOTE: one collection in its root, or a number of top-level # NOTE: collection directories instead. # NOTE: The idea is to store the SCM collection as unpacked # NOTE: directory structure under the temporary location and use # NOTE: a "virtual" collection that has pinned requirements on # NOTE: the directories under that SCM checkout that correspond # NOTE: to collections. # NOTE: This brings us to the idea that we need two separate # NOTE: virtual Requirement/Candidate types -- # NOTE: (single) dir + (multidir) subdirs if collection.is_url: display.vvvv( "Collection requirement '{collection!s}' is a URL " 'to a tar artifact'.format(collection=collection.fqcn), ) try: b_artifact_path = _download_file( collection.src, self._b_working_directory, expected_hash=None, # NOTE: URLs don't support checksums validate_certs=self._validate_certs, ) except URLError as err: raise_from( AnsibleError( 'Failed to download collection tar ' "from '{coll_src!s}': {download_err!s}".format( coll_src=to_native(collection.src), download_err=to_native(err), ), ), err, ) elif collection.is_scm: b_artifact_path = _extract_collection_from_git( collection.src, collection.ver, self._b_working_directory, ) elif collection.is_file or collection.is_dir or collection.is_subdirs: b_artifact_path = to_bytes(collection.src) else: # NOTE: This may happen `if collection.is_online_index_pointer` raise RuntimeError( 'The artifact is of an unexpected type {art_type!s}'.format( art_type=collection.type)) self._artifact_cache[collection.src] = b_artifact_path return b_artifact_path
def api_data(client, params): """ Transform our Ansible params into JSON data for POST'ing or PUT'ing. :param client: Errata Client :param dict params: ansible module params """ # XXX The docs at /developer-guide/api-http-api.html#api-apis # mention a few settings I have not seen before: # - "allow_beta" # - "is_deferred" # - "url_name" - this one is actually listed twice! # Are those really a valid settings? grep errata-rails.git for more # references to find out. That whole POST /api/v1/releases section of the # docs could probably use a review. # CLOUDWF-298 is an RFE for specifying all values by name instead of ID. release = params.copy() # Update the values for ones that the REST API will accept: if 'product' in release: product_name = release.pop('product') if product_name is not None: release['product_id'] = get_product_id(client, product_name) if 'program_manager' in release: pm_login_name = release.pop('program_manager') try: pm_id = common_errata_tool.user_id(client, pm_login_name) except UserNotFoundError as e: raise_from(ProgramManagerNotFoundError(str(e)), e) release['program_manager_id'] = pm_id # "active" -> "isactive" if 'active' in release: active = release.pop('active') release['isactive'] = active # "supports_component_acl" -> "disable_acl" if 'supports_component_acl' in release: supports_component_acl = release.pop('supports_component_acl') release['disable_acl'] = not supports_component_acl # "product_versions" -> "product_version_ids" if 'product_versions' in release: product_versions = release.pop('product_versions') product_version_ids = get_product_version_ids(client, product_versions) release['product_version_ids'] = product_version_ids # "state_machine_rule_set" -> "state_machine_rule_set_id" if 'state_machine_rule_set' in release: state_machine_rule_set = release.pop('state_machine_rule_set') if state_machine_rule_set: rules_scraper = common_errata_tool.WorkflowRulesScraper(client) rule_set_id = int(rules_scraper.enum[state_machine_rule_set]) release['state_machine_rule_set_id'] = rule_set_id else: release['state_machine_rule_set_id'] = None # "blocker_flags" list -> str if 'blocker_flags' in release: release['blocker_flags'] = ",".join(release['blocker_flags']) data = {'release': release} if 'type' in params: data['type'] = params['type'] return data
def run(self, terms, variables, **kwargs): if PYANG_IMPORT_ERROR: raise_from( AnsibleLookupError( "pyang must be installed to use this plugin"), PYANG_IMPORT_ERROR, ) res = [] try: json_config = terms[0] except IndexError: raise AnsibleLookupError("path to json file must be specified") try: yang_file = kwargs["yang_file"] except KeyError: raise AnsibleLookupError("value of 'yang_file' must be specified") search_path = kwargs.pop("search_path", "") keep_tmp_files = kwargs.pop("keep_tmp_files", False) json_config = os.path.realpath(os.path.expanduser(json_config)) try: # validate json with open(json_config) as fp: json.load(fp) except Exception as exc: raise AnsibleLookupError( "Failed to load json configuration: %s" % (to_text(exc, errors="surrogate_or_strict"))) try: tmp_dir_path = create_tmp_dir(JSON2XML_DIR_PATH) doctype = kwargs.get("doctype", "config") tl = Translator( yang_file, search_path, doctype, keep_tmp_files, debug=self._debug, ) xml_data = tl.json_to_xml(json_config, tmp_dir_path) except ValueError as exc: raise AnsibleLookupError( to_text(exc, errors="surrogate_then_replace")) except Exception as exc: raise AnsibleLookupError( "Unhandled exception from [lookup][json2xml]. Error: {err}". format(err=to_text(exc, errors="surrogate_then_replace"))) res.append(xml_data) return res
def process_json(data, template): if JINA2_IMPORT_ERROR: raise_from( F5ModuleError( 'jinja2 package must be installed to use this collection'), JINA2_IMPORT_ERROR) jinja_env = Environment() template = jinja_env.from_string(template) content = template.render(params=data) my_json = json.loads(content) return my_json
def process_to_api(self, record): """ Process a record object (DNSRecord) for sending to API. Modifies the record in-place. """ try: if record.type == 'TXT': self._handle_txt_api(True, record) return record except DNSConversionError as e: raise_from(DNSConversionError(u'While processing record for the API: {0}'.format(e.error_message)), e)
def parse(self, inventory, loader, path, cache): """Return dynamic inventory from source Returns the processed inventory from the lxd import Args: str(inventory): inventory object with existing data and the methods to add hosts/groups/variables to inventory str(loader): Ansible's DataLoader str(path): path to the config bool(cache): use or avoid caches Kwargs: None Raises: AnsibleParserError Returns: None""" if IPADDRESS_IMPORT_ERROR: raise_from( AnsibleError( 'another_library must be installed to use this plugin'), IPADDRESS_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file self._read_config_data(path) try: self.client_key = self.get_option('client_key') self.client_cert = self.get_option('client_cert') self.debug = self.DEBUG self.data = {} # store for inventory-data self.groupby = self.get_option('groupby') self.plugin = self.get_option('plugin') self.prefered_instance_network_family = self.get_option( 'prefered_instance_network_family') self.prefered_instance_network_interface = self.get_option( 'prefered_instance_network_interface') self.type_filter = self.get_option('type_filter') if self.get_option( 'state').lower() == 'none': # none in config is str() self.filter = None else: self.filter = self.get_option('state').lower() self.trust_password = self.get_option('trust_password') self.url = self.get_option('url') except Exception as err: raise AnsibleParserError( 'All correct options required: {0}'.format(to_native(err))) # Call our internal helper to populate the dynamic inventory self._populate()
def process_from_user(self, record): """ Process a record object (DNSRecord) after receiving from the user. Modifies the record in-place. """ try: record.target = to_text(record.target) if record.type == 'TXT': self._handle_txt_user(False, record) return record except DNSConversionError as e: raise_from(DNSConversionError(u'While processing record from the user: {0}'.format(e.error_message)), e)
def get_direct_collection_meta(self, collection): # type: (t.Union[Candidate, Requirement]) -> dict[str, t.Union[str, dict[str, str], list[str], None, t.Type[Sentinel]]] """Extract meta from the given on-disk collection artifact.""" try: # FIXME: use unique collection identifier as a cache key? return self._artifact_meta_cache[collection.src] except KeyError: b_artifact_path = self.get_artifact_path(collection) if collection.is_url or collection.is_file: collection_meta = _get_meta_from_tar(b_artifact_path) elif collection.is_dir: # should we just build a coll instead? # FIXME: what if there's subdirs? try: collection_meta = _get_meta_from_dir( b_artifact_path, self.require_build_metadata) except LookupError as lookup_err: raise_from( AnsibleError( 'Failed to find the collection dir deps: {err!s}'. format(err=to_native(lookup_err)), ), lookup_err, ) elif collection.is_scm: collection_meta = { 'name': None, 'namespace': None, 'dependencies': { to_native(b_artifact_path): '*' }, 'version': '*', } elif collection.is_subdirs: collection_meta = { 'name': None, 'namespace': None, # NOTE: Dropping b_artifact_path since it's based on src anyway 'dependencies': dict.fromkeys( map(to_native, collection.namespace_collection_paths), '*', ), 'version': '*', } else: raise RuntimeError self._artifact_meta_cache[collection.src] = collection_meta return collection_meta
def _extract_collection_from_git(repo_url, coll_ver, b_path): name, version, git_url, fragment = parse_scm(repo_url, coll_ver) b_checkout_path = mkdtemp( dir=b_path, prefix=to_bytes(name, errors='surrogate_or_strict'), ) # type: bytes try: git_executable = get_bin_path('git') except ValueError as err: raise AnsibleError( "Could not find git executable to extract the collection from the Git repository `{repo_url!s}`." .format(repo_url=to_native(git_url))) from err # Perform a shallow clone if simply cloning HEAD if version == 'HEAD': git_clone_cmd = git_executable, 'clone', '--depth=1', git_url, to_text( b_checkout_path) else: git_clone_cmd = git_executable, 'clone', git_url, to_text( b_checkout_path) # FIXME: '--branch', version try: subprocess.check_call(git_clone_cmd) except subprocess.CalledProcessError as proc_err: raise_from( AnsibleError( # should probably be LookupError 'Failed to clone a Git repository from `{repo_url!s}`.'.format( repo_url=to_native(git_url)), ), proc_err, ) git_switch_cmd = git_executable, 'checkout', to_text(version) try: subprocess.check_call(git_switch_cmd, cwd=b_checkout_path) except subprocess.CalledProcessError as proc_err: raise_from( AnsibleError( # should probably be LookupError 'Failed to switch a cloned Git repo `{repo_url!s}` ' 'to the requested revision `{commitish!s}`.'.format( commitish=to_native(version), repo_url=to_native(git_url), ), ), proc_err, ) return (os.path.join(b_checkout_path, to_bytes(fragment)) if fragment else b_checkout_path)
def parse(self, inventory, loader, path, cache=True): if YAML_IMPORT_ERROR: raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) config_zones = self.get_option("regions") tags = self.get_option("tags") token = self.get_oauth_token() if not token: raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.") hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
def filter_scl_name(scl_prefix): scl_prefix = str(scl_prefix) if (len(scl_prefix) == 0): raise AnsibleError('empty name in scl_prefix') try: if (scl_prefix.count('-') == 1): if (len(scl_prefix.split('-')[1]) == 0): raise AnsibleError('empty name in scl_prefix') return scl_prefix.split('-')[1] elif (scl_prefix.count('-') == 0): return scl_prefix else: raise AnsibleError('Bad scl_prefix: %s' % scl_prefix) except Exception as e: raise_from(AnsibleError('Error in filter_scl_name: %s' % to_native(e)), e)
def filter_scl_vendor(scl_prefix): scl_prefix = str(scl_prefix) if (len(scl_prefix) == 0): raise AnsibleError('empty provider in scl_prefix') try: if (scl_prefix.count('-') == 1): if (len(scl_prefix.split('-', 1)[0]) == 0): raise AnsibleError('empty provider in scl_prefix') return scl_prefix.split('-', 1)[0] elif (scl_prefix.count('-') == 0): return 'rh' else: raise AnsibleError('Bad scl_prefix: %s' % scl_prefix) except Exception as e: raise_from( AnsibleError('Error in filter_scl_vendor: %s' % to_native(e)), e)
def __init__(self, play_context, new_stdin, *args, **kwargs): if LIBVIRT_IMPORT_ERROR: raise_from( AnsibleError('libvirt-python must be installed to use this plugin'), LIBVIRT_IMPORT_ERROR) super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) self._host = self._play_context.remote_addr # Windows operates differently from a POSIX connection/shell plugin, # we need to set various properties to ensure SSH on Windows continues # to work if getattr(self._shell, "_IS_WINDOWS", False): self.has_native_async = True self.always_pipeline_modules = True self.module_implementation_preferences = ('.ps1', '.exe', '') self.allow_executable = False
def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.hide_task_arguments = None self.apm_service_name = None self.ansible_playbook = None self.traceparent = False self.play_name = None self.tasks_data = None self.errors = 0 self.disabled = False if ELASTIC_LIBRARY_IMPORT_ERROR: raise_from( AnsibleError( 'The `elastic-apm` must be installed to use this plugin'), ELASTIC_LIBRARY_IMPORT_ERROR) self.tasks_data = OrderedDict() self.elastic = ElasticSource(display=self._display)
def html_form_data(client, params): """ Transform our Ansible params into an HTML form "data" for POST'ing. """ data = {} data['product[short_name]'] = params['short_name'] data['product[name]'] = params['name'] data['product[description]'] = params['description'] data['product[bugzilla_product_name]'] = params['bugzilla_product_name'] data['product[valid_bug_states][]'] = params['valid_bug_states'] data['product[isactive]'] = int(params['active']) data['product[ftp_path]'] = params['ftp_path'] data['product[ftp_subdir]'] = params.get('ftp_subdir', '') data['product[is_internal]'] = int(params['internal']) docs_reviewer = params.get('default_docs_reviewer') if docs_reviewer is not None: try: docs_user_id = common_errata_tool.user_id(client, docs_reviewer) except UserNotFoundError as e: raise_from(DocsReviewerNotFoundError(str(e)), e) data['product[default_docs_reviewer_id]'] = docs_user_id # push targets need scraper push_targets = params['push_targets'] push_target_scraper = common_errata_tool.PushTargetScraper(client) push_target_ints = push_target_scraper.convert_to_ints(push_targets) data['product[push_targets][]'] = push_target_ints # This is an internal-only product thing that we can probably skip: # data['product[cdw_flag_prefix]'] = params['cdw_flag_prefix'] solution = params['default_solution'].upper() solution_id = int(common_errata_tool.DefaultSolutions[solution]) data['product[default_solution_id]'] = solution_id state_machine_rule_set = params['state_machine_rule_set'] rules_scraper = common_errata_tool.WorkflowRulesScraper(client) state_machine_rule_set_id = int(rules_scraper.enum[state_machine_rule_set]) data['product[state_machine_rule_set_id]'] = state_machine_rule_set_id data['product[move_bugs_on_qe]'] = int(params['move_bugs_on_qe']) exd_org_group = params.get('exd_org_group') if exd_org_group is not None: exd_org_group_id = int(EXD_ORG_GROUPS[exd_org_group]) data['product[exd_org_group_id]'] = exd_org_group_id return data