def main(): module = AnsibleModule( argument_spec=dict( challenge=dict(type='str', required=True, choices=['tls-alpn-01']), challenge_data=dict(type='dict', required=True), private_key_src=dict(type='path'), private_key_content=dict(type='str', no_log=True), ), required_one_of=( ['private_key_src', 'private_key_content'], ), mutually_exclusive=( ['private_key_src', 'private_key_content'], ), ) if not HAS_CRYPTOGRAPHY: module.fail_json(msg=missing_required_lib('cryptography >= 1.3'), exception=CRYPTOGRAPHY_IMP_ERR) try: # Get parameters challenge = module.params['challenge'] challenge_data = module.params['challenge_data'] # Get hold of private key private_key_content = module.params.get('private_key_content') if private_key_content is None: private_key_content = read_file(module.params['private_key_src']) else: private_key_content = to_bytes(private_key_content) try: private_key = cryptography.hazmat.primitives.serialization.load_pem_private_key(private_key_content, password=None, backend=_cryptography_backend) except Exception as e: raise ModuleFailException('Error while loading private key: {0}'.format(e)) # Some common attributes domain = to_text(challenge_data['resource']) identifier_type, identifier = to_text(challenge_data.get('resource_original', 'dns:' + challenge_data['resource'])).split(':', 1) subject = issuer = cryptography.x509.Name([]) not_valid_before = datetime.datetime.utcnow() not_valid_after = datetime.datetime.utcnow() + datetime.timedelta(days=10) if identifier_type == 'dns': san = cryptography.x509.DNSName(identifier) elif identifier_type == 'ip': san = cryptography.x509.IPAddress(ipaddress.ip_address(identifier)) else: raise ModuleFailException('Unsupported identifier type "{0}"'.format(identifier_type)) # Generate regular self-signed certificate regular_certificate = cryptography.x509.CertificateBuilder().subject_name( subject ).issuer_name( issuer ).public_key( private_key.public_key() ).serial_number( cryptography.x509.random_serial_number() ).not_valid_before( not_valid_before ).not_valid_after( not_valid_after ).add_extension( cryptography.x509.SubjectAlternativeName([san]), critical=False, ).sign( private_key, cryptography.hazmat.primitives.hashes.SHA256(), _cryptography_backend ) # Process challenge if challenge == 'tls-alpn-01': value = base64.b64decode(challenge_data['resource_value']) challenge_certificate = cryptography.x509.CertificateBuilder().subject_name( subject ).issuer_name( issuer ).public_key( private_key.public_key() ).serial_number( cryptography.x509.random_serial_number() ).not_valid_before( not_valid_before ).not_valid_after( not_valid_after ).add_extension( cryptography.x509.SubjectAlternativeName([san]), critical=False, ).add_extension( cryptography.x509.UnrecognizedExtension( cryptography.x509.ObjectIdentifier("1.3.6.1.5.5.7.1.31"), encode_octet_string(value), ), critical=True, ).sign( private_key, cryptography.hazmat.primitives.hashes.SHA256(), _cryptography_backend ) module.exit_json( changed=True, domain=domain, identifier_type=identifier_type, identifier=identifier, challenge_certificate=challenge_certificate.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM), regular_certificate=regular_certificate.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM) ) except ModuleFailException as e: e.do_fail(module)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True), valid_at=dict(type='dict'), select_crypto_backend=dict( type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']), ), supports_check_mode=True, ) try: base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg= 'The directory %s does not exist or the file is not a directory' % base_dir) backend = module.params['select_crypto_backend'] if backend == 'auto': # Detect what backend we can use can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion( MINIMAL_CRYPTOGRAPHY_VERSION) can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion( MINIMAL_PYOPENSSL_VERSION) # If cryptography is available we'll use it if can_use_cryptography: backend = 'cryptography' elif can_use_pyopenssl: backend = 'pyopenssl' # Fail if no backend has been found if backend == 'auto': module.fail_json( msg=("Can't detect any of the required Python libraries " "cryptography (>= {0}) or PyOpenSSL (>= {1})" ).format(MINIMAL_CRYPTOGRAPHY_VERSION, MINIMAL_PYOPENSSL_VERSION)) if backend == 'pyopenssl': if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib('pyOpenSSL'), exception=PYOPENSSL_IMP_ERR) try: getattr(crypto.X509Req, 'get_extensions') except AttributeError: module.fail_json(msg='You need to have PyOpenSSL>=0.15') certificate = CertificateInfoPyOpenSSL(module) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib('cryptography'), exception=CRYPTOGRAPHY_IMP_ERR) certificate = CertificateInfoCryptography(module) result = certificate.get_info() module.exit_json(**result) except crypto_utils.OpenSSLObjectError as exc: module.fail_json(msg=to_native(exc))
def main(): module = AnsibleModule(argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['present', 'absent']), mappings=dict(type='list', elements='dict', options=dict(value=dict(type='str', required=True), map_to=dict(type='str', required=True))), timeout=dict(type='int', default=10)), supports_check_mode=True, required_if=[ ['state', 'present', ['mappings']], ]) if not HAS_ZABBIX_API: module.fail_json(msg=missing_required_lib( 'zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] name = module.params['name'] state = module.params['state'] mappings = module.params['mappings'] timeout = module.params['timeout'] zbx = None # login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) atexit.register(zbx.logout) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) valuemap_exists, valuemap_object = check_if_valuemap_exists( module, zbx, name) parameters = construct_parameters(name=name, mappings=mappings) if valuemap_exists: valuemap_id = valuemap_object['valuemapid'] if state == 'absent': if module.check_mode: module.exit_json( changed=True, msg= "Value map would have been deleted. Name: {name}, ID: {_id}" .format(name=name, _id=valuemap_id)) valuemap_id = delete_valuemap(module, zbx, valuemap_id) module.exit_json( changed=True, msg="Value map deleted. Name: {name}, ID: {_id}".format( name=name, _id=valuemap_id)) else: params_to_update, diff = get_update_params(module, zbx, valuemap_object, **parameters) if params_to_update == {}: module.exit_json( changed=False, msg="Value map is up to date: {name}".format(name=name)) else: if module.check_mode: module.exit_json( changed=True, diff=diff, msg= "Value map would have been updated. Name: {name}, ID: {_id}" .format(name=name, _id=valuemap_id)) valuemap_id = update_valuemap(module, zbx, valuemapid=valuemap_id, **params_to_update) module.exit_json( changed=True, diff=diff, msg="Value map updated. Name: {name}, ID: {_id}".format( name=name, _id=valuemap_id)) else: if state == "absent": module.exit_json(changed=False) else: if module.check_mode: module.exit_json( changed=True, msg= "Value map would have been created. Name: {name}, ID: {_id}" .format(name=name, _id=valuemap_id)) valuemap_id = create_valuemap(module, zbx, **parameters) module.exit_json(changed=True, msg="Value map created: {name}, ID: {_id}".format( name=name, _id=valuemap_id))
def check_required_library(self): if not HAS_REQUESTS: self.module.fail_json(msg=missing_required_lib("requests"))
def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), host_name=dict(type='str', required=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), host_groups=dict(type='list', required=False), link_templates=dict(type='list', required=False), status=dict(default="enabled", choices=['enabled', 'disabled']), state=dict(default="present", choices=['present', 'absent']), inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']), ipmi_authtype=dict(type='int', default=None), ipmi_privilege=dict(type='int', default=None), ipmi_username=dict(type='str', required=False, default=None), ipmi_password=dict(type='str', required=False, default=None, no_log=True), tls_connect=dict(type='int', default=1), tls_accept=dict(type='int', default=1), tls_psk_identity=dict(type='str', required=False), tls_psk=dict(type='str', required=False), ca_cert=dict(type='str', required=False, aliases=['tls_issuer']), tls_subject=dict(type='str', required=False), inventory_zabbix=dict(required=False, type='dict'), timeout=dict(type='int', default=10), interfaces=dict(type='list', required=False), force=dict(type='bool', default=True), proxy=dict(type='str', required=False), visible_name=dict(type='str', required=False), description=dict(type='str', required=False) ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] host_name = module.params['host_name'] visible_name = module.params['visible_name'] description = module.params['description'] host_groups = module.params['host_groups'] link_templates = module.params['link_templates'] inventory_mode = module.params['inventory_mode'] ipmi_authtype = module.params['ipmi_authtype'] ipmi_privilege = module.params['ipmi_privilege'] ipmi_username = module.params['ipmi_username'] ipmi_password = module.params['ipmi_password'] tls_connect = module.params['tls_connect'] tls_accept = module.params['tls_accept'] tls_psk_identity = module.params['tls_psk_identity'] tls_psk = module.params['tls_psk'] tls_issuer = module.params['ca_cert'] tls_subject = module.params['tls_subject'] inventory_zabbix = module.params['inventory_zabbix'] status = module.params['status'] state = module.params['state'] timeout = module.params['timeout'] interfaces = module.params['interfaces'] force = module.params['force'] proxy = module.params['proxy'] # convert enabled to 0; disabled to 1 status = 1 if status == "disabled" else 0 zbx = None # login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) atexit.register(zbx.logout) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) host = Host(module, zbx) template_ids = [] if link_templates: template_ids = host.get_template_ids(link_templates) group_ids = [] if host_groups: group_ids = host.get_group_ids_by_group_names(host_groups) ip = "" if interfaces: # ensure interfaces are well-formed for interface in interfaces: if 'type' not in interface: module.fail_json(msg="(interface) type needs to be specified for interface '%s'." % interface) interfacetypes = {'agent': 1, 'snmp': 2, 'ipmi': 3, 'jmx': 4} if interface['type'] in interfacetypes.keys(): interface['type'] = interfacetypes[interface['type']] if interface['type'] < 1 or interface['type'] > 4: module.fail_json(msg="Interface type can only be 1-4 for interface '%s'." % interface) if 'useip' not in interface: interface['useip'] = 0 if 'dns' not in interface: if interface['useip'] == 0: module.fail_json(msg="dns needs to be set if useip is 0 on interface '%s'." % interface) interface['dns'] = '' if 'ip' not in interface: if interface['useip'] == 1: module.fail_json(msg="ip needs to be set if useip is 1 on interface '%s'." % interface) interface['ip'] = '' if 'main' not in interface: interface['main'] = 0 if 'port' in interface and not isinstance(interface['port'], str): try: interface['port'] = str(interface['port']) except ValueError: module.fail_json(msg="port should be convertable to string on interface '%s'." % interface) if 'port' not in interface: if interface['type'] == 1: interface['port'] = "10050" elif interface['type'] == 2: interface['port'] = "161" elif interface['type'] == 3: interface['port'] = "623" elif interface['type'] == 4: interface['port'] = "12345" if interface['type'] == 1: ip = interface['ip'] # Use proxy specified, or set to 0 if proxy: proxy_id = host.get_proxyid_by_proxy_name(proxy) else: proxy_id = 0 # check if host exist is_host_exist = host.is_host_exist(host_name) if is_host_exist: # get host id by host name zabbix_host_obj = host.get_host_by_host_name(host_name) host_id = zabbix_host_obj['hostid'] # If proxy is not specified as a module parameter, use the existing setting if proxy is None: proxy_id = int(zabbix_host_obj['proxy_hostid']) if state == "absent": # remove host host.delete_host(host_id, host_name) module.exit_json(changed=True, result="Successfully delete host %s" % host_name) else: if not host_groups: # if host_groups have not been specified when updating an existing host, just # get the group_ids from the existing host without updating them. group_ids = host.get_group_ids_by_host_id(host_id) # get existing host's interfaces exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) # if no interfaces were specified with the module, start with an empty list if not interfaces: interfaces = [] # When force=no is specified, append existing interfaces to interfaces to update. When # no interfaces have been specified, copy existing interfaces as specified from the API. # Do the same with templates and host groups. if not force or not interfaces: for interface in copy.deepcopy(exist_interfaces): # remove values not used during hostinterface.add/update calls for key in tuple(interface.keys()): if key in ['interfaceid', 'hostid', 'bulk']: interface.pop(key, None) for index in interface.keys(): if index in ['useip', 'main', 'type']: interface[index] = int(interface[index]) if interface not in interfaces: interfaces.append(interface) if not force or link_templates is None: template_ids = list(set(template_ids + host.get_host_templates_by_host_id(host_id))) if not force: for group_id in host.get_group_ids_by_host_id(host_id): if group_id not in group_ids: group_ids.append(group_id) # update host if host.check_all_properties(host_id, group_ids, status, interfaces, template_ids, exist_interfaces, zabbix_host_obj, proxy_id, visible_name, description, host_name, inventory_mode, inventory_zabbix, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password): host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) host.update_inventory_mode(host_id, inventory_mode) host.update_inventory_zabbix(host_id, inventory_zabbix) module.exit_json(changed=True, result="Successfully update host %s (%s) and linked with template '%s'" % (host_name, ip, link_templates)) else: module.exit_json(changed=False) else: if state == "absent": # the host is already deleted. module.exit_json(changed=False) if not group_ids: module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name) if not interfaces or (interfaces and len(interfaces) == 0): module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name) # create host host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password) host.update_inventory_mode(host_id, inventory_mode) host.update_inventory_zabbix(host_id, inventory_zabbix) module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % ( host_name, ip, link_templates))
def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( api_token=dict(type='str', no_log=True), description=dict(type='str', required=True, aliases=["name"]), active=dict(type='bool', default=True), tag_list=dict(type='list', default=[]), run_untagged=dict(type='bool', default=True), locked=dict(type='bool', default=False), access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), maximum_timeout=dict(type='int', default=3600), registration_token=dict(type='str', required=True), state=dict(type='str', default="present", choices=["absent", "present"]), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['api_username', 'api_token'], ['api_password', 'api_token'], ], required_together=[ ['api_username', 'api_password'], ], required_one_of=[ ['api_username', 'api_token'], ], supports_check_mode=True, ) state = module.params['state'] runner_description = module.params['description'] runner_active = module.params['active'] tag_list = module.params['tag_list'] run_untagged = module.params['run_untagged'] runner_locked = module.params['locked'] access_level = module.params['access_level'] maximum_timeout = module.params['maximum_timeout'] registration_token = module.params['registration_token'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) gitlab_instance = gitlabAuthentication(module) gitlab_runner = GitLabRunner(module, gitlab_instance) runner_exists = gitlab_runner.existsRunner(runner_description) if state == 'absent': if runner_exists: gitlab_runner.deleteRunner() module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description) else: module.exit_json(changed=False, msg="Runner deleted or does not exists") if state == 'present': if gitlab_runner.createOrUpdateRunner(runner_description, { "active": runner_active, "tag_list": tag_list, "run_untagged": run_untagged, "locked": runner_locked, "access_level": access_level, "maximum_timeout": maximum_timeout, "registration_token": registration_token}): module.exit_json(changed=True, runner=gitlab_runner.runnerObject._attrs, msg="Successfully created or updated the runner %s" % runner_description) else: module.exit_json(changed=False, runner=gitlab_runner.runnerObject._attrs, msg="No need to update the runner %s" % runner_description)
def main(): module = AnsibleModule(argument_spec=dict( ca_cert=dict(type='path'), host=dict(type='str', required=True), port=dict(type='int', required=True), proxy_host=dict(type='str'), proxy_port=dict(type='int', default=8080), timeout=dict(type='int', default=10), select_crypto_backend=dict( type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'), ), ) ca_cert = module.params.get('ca_cert') host = module.params.get('host') port = module.params.get('port') proxy_host = module.params.get('proxy_host') proxy_port = module.params.get('proxy_port') timeout = module.params.get('timeout') backend = module.params.get('select_crypto_backend') if backend == 'auto': # Detection what is possible can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion( MINIMAL_CRYPTOGRAPHY_VERSION) can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion( MINIMAL_PYOPENSSL_VERSION) # First try cryptography, then pyOpenSSL if can_use_cryptography: backend = 'cryptography' elif can_use_pyopenssl: backend = 'pyopenssl' # Success? if backend == 'auto': module.fail_json(msg=( "Can't detect any of the required Python libraries " "cryptography (>= {0}) or PyOpenSSL (>= {1})" ).format(MINIMAL_CRYPTOGRAPHY_VERSION, MINIMAL_PYOPENSSL_VERSION)) if backend == 'pyopenssl': if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib( 'pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)), exception=PYOPENSSL_IMP_ERR) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib( 'cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)), exception=CRYPTOGRAPHY_IMP_ERR) result = dict(changed=False, ) if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib('pyOpenSSL >= 0.15'), exception=PYOPENSSL_IMP_ERR) if timeout: setdefaulttimeout(timeout) if ca_cert: if not isfile(ca_cert): module.fail_json(msg="ca_cert file does not exist") if proxy_host: if not HAS_CREATE_DEFAULT_CONTEXT: module.fail_json( msg= 'To use proxy_host, you must run the get_certificate module with Python 2.7 or newer.', exception=CREATE_DEFAULT_CONTEXT_IMP_ERR) try: connect = "CONNECT %s:%s HTTP/1.0\r\n\r\n" % (host, port) sock = socket() atexit.register(sock.close) sock.connect((proxy_host, proxy_port)) sock.send(connect.encode()) sock.recv(8192) ctx = create_default_context() ctx.check_hostname = False ctx.verify_mode = CERT_NONE if ca_cert: ctx.verify_mode = CERT_OPTIONAL ctx.load_verify_locations(cafile=ca_cert) cert = ctx.wrap_socket(sock, server_hostname=host).getpeercert(True) cert = DER_cert_to_PEM_cert(cert) except Exception as e: module.fail_json( msg="Failed to get cert from port with error: {0}".format(e)) else: try: cert = get_server_certificate((host, port), ca_certs=ca_cert) except Exception as e: module.fail_json( msg="Failed to get cert from port with error: {0}".format(e)) result['cert'] = cert if backend == 'pyopenssl': x509 = crypto.load_certificate(crypto.FILETYPE_PEM, cert) result['subject'] = {} for component in x509.get_subject().get_components(): result['subject'][component[0]] = component[1] result['expired'] = x509.has_expired() result['extensions'] = [] extension_count = x509.get_extension_count() for index in range(0, extension_count): extension = x509.get_extension(index) result['extensions'].append({ 'critical': extension.get_critical(), 'asn1_data': extension.get_data(), 'name': extension.get_short_name(), }) result['issuer'] = {} for component in x509.get_issuer().get_components(): result['issuer'][component[0]] = component[1] result['not_after'] = x509.get_notAfter() result['not_before'] = x509.get_notBefore() result['serial_number'] = x509.get_serial_number() result['signature_algorithm'] = x509.get_signature_algorithm() result['version'] = x509.get_version() elif backend == 'cryptography': x509 = cryptography.x509.load_pem_x509_certificate( to_bytes(cert), cryptography_backend()) result['subject'] = {} for attribute in x509.subject: result['subject'][crypto_utils.cryptography_oid_to_name( attribute.oid, short=True)] = attribute.value result['expired'] = x509.not_valid_after < datetime.datetime.utcnow() result['extensions'] = [] for dotted_number, entry in crypto_utils.cryptography_get_extensions_from_cert( x509).items(): oid = cryptography.x509.oid.ObjectIdentifier(dotted_number) result['extensions'].append({ 'critical': entry['critical'], 'asn1_data': base64.b64decode(entry['value']), 'name': crypto_utils.cryptography_oid_to_name(oid, short=True), }) result['issuer'] = {} for attribute in x509.issuer: result['issuer'][crypto_utils.cryptography_oid_to_name( attribute.oid, short=True)] = attribute.value result['not_after'] = x509.not_valid_after.strftime('%Y%m%d%H%M%SZ') result['not_before'] = x509.not_valid_before.strftime('%Y%m%d%H%M%SZ') result['serial_number'] = x509.serial_number result['signature_algorithm'] = crypto_utils.cryptography_oid_to_name( x509.signature_algorithm_oid) # We need the -1 offset to get the same values as pyOpenSSL if x509.version == cryptography.x509.Version.v1: result['version'] = 1 - 1 elif x509.version == cryptography.x509.Version.v3: result['version'] = 3 - 1 else: result['version'] = "unknown" module.exit_json(**result)
def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( table=dict(type='str', required=True, aliases=['name']), state=dict(type='str', default="present", choices=["absent", "present"]), db=dict(type='str', default='', aliases=['login_db']), port=dict(type='int', default=5432, aliases=['login_port']), ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']), ca_cert=dict(type='str', aliases=['ssl_rootcert']), tablespace=dict(type='str'), owner=dict(type='str'), unlogged=dict(type='bool'), like=dict(type='str'), including=dict(type='str'), rename=dict(type='str'), truncate=dict(type='bool'), columns=dict(type='list'), storage_params=dict(type='list'), session_role=dict(type='str'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) table = module.params["table"] state = module.params["state"] tablespace = module.params["tablespace"] owner = module.params["owner"] unlogged = module.params["unlogged"] like = module.params["like"] including = module.params["including"] newname = module.params["rename"] storage_params = module.params["storage_params"] truncate = module.params["truncate"] columns = module.params["columns"] sslrootcert = module.params["ca_cert"] session_role = module.params["session_role"] # Check mutual exclusive parameters: if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including): module.fail_json(msg="%s: state=absent is mutually exclusive with: " "truncate, rename, columns, tablespace, " "including, like, storage_params, unlogged, owner" % table) if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including): module.fail_json(msg="%s: truncate is mutually exclusive with: " "rename, columns, like, unlogged, including, " "storage_params, owner, tablespace" % table) if newname and (columns or like or unlogged or storage_params or owner or tablespace or including): module.fail_json(msg="%s: rename is mutually exclusive with: " "columns, like, unlogged, including, " "storage_params, owner, tablespace" % table) if like and columns: module.fail_json(msg="%s: like and columns params are mutually exclusive" % table) if including and not like: module.fail_json(msg="%s: including param needs like param specified" % table) # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { "login_host": "host", "login_user": "******", "login_password": "******", "port": "port", "db": "database", "ssl_mode": "sslmode", "ca_cert": "sslrootcert" } kw = dict((params_map[k], v) for (k, v) in iteritems(module.params) if k in params_map and v != "" and v is not None) if not HAS_PSYCOPG2: module.fail_json(msg=missing_required_lib("psycopg2")) # If a login_unix_socket is specified, incorporate it here. is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost" if is_localhost and module.params["login_unix_socket"] != "": kw["host"] = module.params["login_unix_socket"] if psycopg2.__version__ < '2.4.3' and sslrootcert is not None: module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter') try: db_connection = psycopg2.connect(**kw) cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert') module.fail_json(msg="unable to connect to database: %s" % to_native(e)) except Exception as e: module.fail_json(msg="unable to connect to database: %s" % to_native(e)) if session_role: try: cursor.execute('SET ROLE %s' % session_role) except Exception as e: module.fail_json(msg="Could not switch role: %s" % to_native(e)) if storage_params: storage_params = ','.join(storage_params) if columns: columns = ','.join(columns) ############## # Do main job: table_obj = Table(table, module, cursor) # Set default returned values: changed = False kw['table'] = table kw['state'] = '' if table_obj.exists: kw = dict( table=table, state='present', owner=table_obj.info['owner'], tablespace=table_obj.info['tblspace'], storage_params=table_obj.info['storage_params'], ) if state == 'absent': changed = table_obj.drop() elif truncate: changed = table_obj.truncate() elif newname: changed = table_obj.rename(newname) q = table_obj.executed_queries table_obj = Table(newname, module, cursor) table_obj.executed_queries = q elif state == 'present' and not like: changed = table_obj.create(columns, storage_params, tablespace, unlogged, owner) elif state == 'present' and like: changed = table_obj.create_like(like, including, tablespace, unlogged, storage_params) if changed: if module.check_mode: db_connection.rollback() else: db_connection.commit() # Refresh table info for RETURN. # Note, if table has been renamed, it gets info by newname: table_obj.get_info() db_connection.commit() if table_obj.exists: kw = dict( table=table, state='present', owner=table_obj.info['owner'], tablespace=table_obj.info['tblspace'], storage_params=table_obj.info['storage_params'], ) else: # We just change the table state here # to keep other information about the dropped table: kw['state'] = 'absent' kw['queries'] = table_obj.executed_queries kw['changed'] = changed db_connection.close() module.exit_json(**kw)
def main(): module = AnsibleModule(argument_spec=dict( database=dict(required=True, aliases=['db']), state=dict(default='present', choices=['present', 'absent']), privs=dict(required=False, aliases=['priv']), type=dict(default='table', choices=[ 'table', 'sequence', 'function', 'database', 'schema', 'language', 'tablespace', 'group', 'default_privs' ]), objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), session_role=dict(required=False), grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password'], no_log=True), ssl_mode=dict(default="prefer", choices=[ 'disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full' ]), ssl_rootcert=dict(default=None)), supports_check_mode=True) # Create type object as namespace for module params p = type('Params', (), module.params) # param "schema": default, allowed depends on param "type" if p.type in ['table', 'sequence', 'function', 'default_privs']: p.schema = p.schema or 'public' elif p.schema: module.fail_json(msg='Argument "schema" is not allowed ' 'for type "%s".' % p.type) # param "objs": default, required depends on param "type" if p.type == 'database': p.objs = p.objs or p.database elif not p.objs: module.fail_json(msg='Argument "objs" is required ' 'for type "%s".' % p.type) # param "privs": allowed, required depends on param "type" if p.type == 'group': if p.privs: module.fail_json(msg='Argument "privs" is not allowed ' 'for type "group".') elif not p.privs: module.fail_json(msg='Argument "privs" is required ' 'for type "%s".' % p.type) # Connect to Database if not psycopg2: module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR) try: conn = Connection(p) except psycopg2.Error as e: module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc()) except TypeError as e: if 'sslrootcert' in e.args[0]: module.fail_json( msg= 'Postgresql server must be at least version 8.4 to support sslrootcert' ) module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc()) except ValueError as e: # We raise this when the psycopg library is too old module.fail_json(msg=to_native(e)) if p.session_role: try: conn.cursor.execute('SET ROLE %s' % pg_quote_identifier(p.session_role, 'role')) except Exception as e: module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc()) try: # privs if p.privs: privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None # objs: if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_tables_in_schema(p.schema) elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA': objs = conn.get_all_sequences_in_schema(p.schema) elif p.type == 'default_privs': if p.objs == 'ALL_DEFAULT': objs = frozenset(VALID_DEFAULT_OBJS.keys()) else: objs = frozenset(obj.upper() for obj in p.objs.split(',')) if not objs.issubset(VALID_DEFAULT_OBJS): module.fail_json( msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys())) # Again, do we have valid privs specified for object type: valid_objects_for_priv = frozenset( obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj])) if not valid_objects_for_priv == objs: module.fail_json( msg= 'Invalid priv specified. Valid object for priv: {0}. Objects: {1}' .format(valid_objects_for_priv, objs)) else: objs = p.objs.split(',') # function signatures are encoded using ':' to separate args if p.type == 'function': objs = [obj.replace(':', ',') for obj in objs] # roles if p.roles == 'PUBLIC': roles = 'PUBLIC' else: roles = p.roles.split(',') changed = conn.manipulate_privs(obj_type=p.type, privs=privs, objs=objs, roles=roles, state=p.state, grant_option=p.grant_option, schema_qualifier=p.schema) except Error as e: conn.rollback() module.fail_json(msg=e.message, exception=traceback.format_exc()) except psycopg2.Error as e: conn.rollback() module.fail_json(msg=to_native(e.message)) if module.check_mode: conn.rollback() else: conn.commit() module.exit_json(changed=changed)
def run_module(): module = AnsibleModule( argument_spec={ HOST_PARAMETER: dict(type="str", required=True), PORT_PARAMETER: dict(type="int", required=True), STATE_PARAMETER: dict(type="str", default=None), EMAIL_PARAMETER: dict(type="str"), PASSWORD_PARAMETER: dict(type="str", no_log=True), USER_PARAMETER: dict(type="dict", no_log=True), MONITOR_ID_PARAMETER: dict(type="str", aliases=MONITOR_ID_PARAMETER_ALIASES), CONFIGURATION_PARAMETER: dict(type="dict") }) user = module.params[USER_PARAMETER] if user: if (module.params[EMAIL_PARAMETER] or module.params[PASSWORD_PARAMETER]): module.fail_json( msg= f"Either {USER_PARAMETER} or {EMAIL_PARAMETER}/{PASSWORD_PARAMETER} must be given, not both" ) if not user[EMAIL_PARAMETER]: module.fail_json( msg= f"{EMAIL_PARAMETER} field missing from {USER_PARAMETER} argument" ) if not user[PASSWORD_PARAMETER]: module.fail_json( msg= f"{PASSWORD_PARAMETER} field missing from {USER_PARAMETER} argument" ) module.params[EMAIL_PARAMETER] = user[EMAIL_PARAMETER] module.params[PASSWORD_PARAMETER] = user[PASSWORD_PARAMETER] host = module.params[HOST_PARAMETER] port = module.params[PORT_PARAMETER] state = module.params[STATE_PARAMETER] email = module.params[EMAIL_PARAMETER] password = module.params[PASSWORD_PARAMETER] monitor_id = module.params[MONITOR_ID_PARAMETER] configuration = module.params[CONFIGURATION_PARAMETER] if shinobi_client_import_error is not None: module.fail_json(msg=missing_required_lib("shinobi-client"), exception=shinobi_client_import_error) if email is None: module.fail_json( msg="The email of the user to set the monitor for must be given") if password is None: module.fail_json( msg="The password of the user to set the monitor for must be given" ) try: shinobi_monitor_orm = ShinobiClient(host, port).monitor(email, password) except ShinobiWrongPasswordError: module.fail_json(msg=f"Invalid email address and password pair") return changed = False if state is None: if configuration is not None: module.fail_json( msg= f"\"{CONFIGURATION_PARAMETER}\" must not be supplied if {STATE_PARAMETER} is not set" ) if monitor_id is not None: info = dict(monitor=shinobi_monitor_orm.get(monitor_id)) else: info = dict(monitors=shinobi_monitor_orm.get_all()) else: if monitor_id is None: module.fail_json( msg= f"The ID of the monitor to be set must be given as {MONITOR_ID_PARAMETER}" ) if configuration is None and state != ABSENT_STATE: module.fail_json( msg= f"\"{CONFIGURATION_PARAMETER}\" must be supplied to setup a monitor" ) changed, info = modify_monitor(shinobi_monitor_orm, state, monitor_id, configuration) if info is None: module.exit_json(changed=changed) else: module.exit_json(changed=changed, **info)
def main(): module = AnsibleModule(argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), validate_certs=dict(type='bool', required=False, default=True), timeout=dict(type='int', default=10), screens=dict(type='list', elements='dict', required=True, options=dict( screen_name=dict(type='str', required=True), host_group=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), graph_names=dict(type='list', elements='str'), graph_width=dict(type='int', default=None), graph_height=dict(type='int', default=None), graphs_in_row=dict(type='int', default=3), sort=dict(default=False, type='bool'), ), required_if=[['state', 'present', ['host_group']]])), supports_check_mode=True) if not HAS_ZABBIX_API: module.fail_json(msg=missing_required_lib( 'zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] timeout = module.params['timeout'] screens = module.params['screens'] zbx = None # login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) atexit.register(zbx.logout) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) screen = Screen(module, zbx) created_screens = [] changed_screens = [] deleted_screens = [] for zabbix_screen in screens: screen_name = zabbix_screen['screen_name'] screen_id = screen.get_screen_id(screen_name) state = zabbix_screen['state'] sort = zabbix_screen['sort'] if state == "absent": if screen_id: screen_item_list = screen.get_screen_items(screen_id) screen_item_id_list = [] for screen_item in screen_item_list: screen_item_id = screen_item['screenitemid'] screen_item_id_list.append(screen_item_id) screen.delete_screen_items(screen_id, screen_item_id_list) screen.delete_screen(screen_id, screen_name) deleted_screens.append(screen_name) else: host_group = zabbix_screen['host_group'] graph_names = zabbix_screen['graph_names'] graphs_in_row = zabbix_screen['graphs_in_row'] graph_width = zabbix_screen['graph_width'] graph_height = zabbix_screen['graph_height'] host_group_id = screen.get_host_group_id(host_group) hosts = screen.get_host_ids_by_group_id(host_group_id, sort) screen_item_id_list = [] resource_id_list = [] graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) h_size, v_size = screen.get_hsize_vsize(hosts, v_size, graphs_in_row) if not screen_id: # create screen screen_id = screen.create_screen(screen_name, h_size, v_size) screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row) created_screens.append(screen_name) else: screen_item_list = screen.get_screen_items(screen_id) for screen_item in screen_item_list: screen_item_id = screen_item['screenitemid'] resource_id = screen_item['resourceid'] screen_item_id_list.append(screen_item_id) resource_id_list.append(resource_id) # when the screen items changed, then update if graph_ids != resource_id_list: deleted = screen.delete_screen_items( screen_id, screen_item_id_list) if deleted: screen.update_screen(screen_id, screen_name, h_size, v_size) screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size, graphs_in_row) changed_screens.append(screen_name) if created_screens and changed_screens: module.exit_json( changed=True, result= "Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) elif created_screens: module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) elif changed_screens: module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) elif deleted_screens: module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) else: module.exit_json(changed=False)
def main(): module = AnsibleModule(argument_spec=dict( schema=dict(required=True, aliases=['name']), usage_roles=dict(default=None, aliases=['usage_role']), create_roles=dict(default=None, aliases=['create_role']), owner=dict(default=None), state=dict(default='present', choices=['absent', 'present']), db=dict(default=None), cluster=dict(default='localhost'), port=dict(default='5433'), login_user=dict(default='dbadmin'), login_password=dict(default=None, no_log=True), ), supports_check_mode=True) if not pyodbc_found: module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR) schema = module.params['schema'] usage_roles = [] if module.params['usage_roles']: usage_roles = module.params['usage_roles'].split(',') usage_roles = filter(None, usage_roles) create_roles = [] if module.params['create_roles']: create_roles = module.params['create_roles'].split(',') create_roles = filter(None, create_roles) owner = module.params['owner'] state = module.params['state'] db = '' if module.params['db']: db = module.params['db'] changed = False try: dsn = ("Driver=Vertica;" "Server={0};" "Port={1};" "Database={2};" "User={3};" "Password={4};" "ConnectionLoadBalance={5}").format( module.params['cluster'], module.params['port'], db, module.params['login_user'], module.params['login_password'], 'true') db_conn = pyodbc.connect(dsn, autocommit=True) cursor = db_conn.cursor() except Exception as e: module.fail_json( msg="Unable to connect to database: {0}.".format(to_native(e))) try: schema_facts = get_schema_facts(cursor) if module.check_mode: changed = not check(schema_facts, schema, usage_roles, create_roles, owner) elif state == 'absent': try: changed = absent(schema_facts, cursor, schema, usage_roles, create_roles) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) elif state == 'present': try: changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner) except pyodbc.Error as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) except NotSupportedError as e: module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) except CannotDropError as e: module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts}) except SystemExit: # avoid catching this on python 2.4 raise except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), host_identifier=dict(type='str', required=True), host_id_type=dict( default='hostname', type='str', choices=['hostname', 'visible_name', 'hostid']), trigger_severity=dict( type='str', required=False, default='average', choices=['not_classified', 'information', 'warning', 'average', 'high', 'disaster']), validate_certs=dict(type='bool', required=False, default=True), timeout=dict(type='int', default=10), ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR) trigger_severity_map = {'not_classified': 0, 'information': 1, 'warning': 2, 'average': 3, 'high': 4, 'disaster': 5} server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] validate_certs = module.params['validate_certs'] host_id = module.params['host_identifier'] host_id_type = module.params['host_id_type'] trigger_severity = trigger_severity_map[module.params['trigger_severity']] timeout = module.params['timeout'] host_inventory = 'hostid' zbx = None # login to zabbix try: zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password, validate_certs=validate_certs) zbx.login(login_user, login_password) atexit.register(zbx.logout) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) host = Host(module, zbx) if host_id_type == 'hostname': zabbix_host = host.get_host(host_id, host_inventory, 'host') host_id = zabbix_host['hostid'] elif host_id_type == 'visible_name': zabbix_host = host.get_host(host_id, host_inventory, 'name') host_id = zabbix_host['hostid'] elif host_id_type == 'hostid': ''' check hostid exist''' zabbix_host = host.get_host(host_id, host_inventory, 'hostid') triggers = host.get_triggers_by_host_id_in_problem_state(host_id, trigger_severity) triggers_ok = [] triggers_problem = [] for trigger in triggers: # tGet last event for trigger with problem value = 1 # https://www.zabbix.com/documentation/3.4/manual/api/reference/trigger/object if int(trigger['value']) == 1: event = host.get_last_event_by_trigger_id(trigger['triggerid']) trigger['last_event'] = event triggers_problem.append(trigger) else: triggers_ok.append(trigger) module.exit_json(ok=True, triggers_ok=triggers_ok, triggers_problem=triggers_problem)
def main(): argument_spec = dict( direction=dict(required=True, choices=['push', 'pull']), overwrite=dict( choices=['always', 'never', 'different', 'newer', 'larger'], default='never'), diff_attributes=dict(type='list', default=['e_tag']), bucket=dict(required=True), prefix=dict(default=''), path=dict(type='path', required=True), directory_mode=dict(), permission=dict(required=False, choices=[ 'private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control' ]), mime_encodings_map=dict(type='dict', default={}), mime_types_map=dict(type='dict', default={}), mime_override=dict(type='bool', default=False), mime_strict=dict(type='bool', default=False), patterns=dict(required=False, type='list', aliases=['pattern']), excludes=dict(required=False, type='list', aliases=['exclude']), hidden=dict(type='bool', default=False), use_regex=dict(type='bool', default=False), metadata=dict(type='dict', default={}), delete=dict(type='bool', default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True, ) if not HAS_DATEUTIL: module.fail_json(msg=missing_required_lib('dateutil'), exception=HAS_DATEUTIL_EXC) if module.params['overwrite'] == 'different' and 'e_tag' in module.params[ 'diff_attributes'] and not HAS_MD5: module.fail_json( msg= 'Invalid diff_attributes: ETag calculation requires MD5 support, which is not available.' ) if not os.path.exists( to_bytes(module.params['path'], errors='surrogate_or_strict')): module.fail_json(msg="path not found: %s" % module.params['path']) syncer = S3Syncer(module) syncer.gather_local_files() syncer.gather_s3_files() if module.params['direction'] == 'push': syncer.upload_files() if module.params['delete']: syncer.delete_s3_files() else: syncer.download_files() if module.params['delete']: syncer.delete_local_files() module.exit_json(changed=syncer.changed, objects=syncer.objects)
def main(): module = AnsibleModule(argument_spec=dict( host=dict(type='str', required=True), login=dict(type='str', default='Administrator'), password=dict(type='str', default='admin', no_log=True), media=dict(type='str', choices=[ 'cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb' ]), image=dict(type='str'), state=dict(type='str', default='boot_once', choices=[ 'boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff' ]), force=dict(type='bool', default=False), ssl_version=dict( type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), )) if not HAS_HPILO: module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR) host = module.params['host'] login = module.params['login'] password = module.params['password'] media = module.params['media'] image = module.params['image'] state = module.params['state'] force = module.params['force'] ssl_version = getattr( hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) changed = False status = {} power_status = 'UNKNOWN' if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'): # Workaround for: Error communicating with iLO: Problem manipulating EV try: ilo.set_one_time_boot(media) except hpilo.IloError: time.sleep(60) ilo.set_one_time_boot(media) # TODO: Verify if image URL exists/works if image: ilo.insert_virtual_media(media, image) changed = True if media == 'cdrom': ilo.set_vm_status('cdrom', state, True) status = ilo.get_vm_status() changed = True elif media in ('floppy', 'usb'): ilo.set_vf_status(state, True) status = ilo.get_vf_status() changed = True # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot if state in ('boot_once', 'boot_always') or force: power_status = ilo.get_host_power_status() if not force and power_status == 'ON': module.fail_json( msg= 'HP iLO (%s) reports that the server is already powered on !' % host) if power_status == 'ON': ilo.warm_boot_server() # ilo.cold_boot_server() changed = True else: ilo.press_pwr_btn() # ilo.reset_server() # ilo.set_host_power(host_power=True) changed = True elif state in ('poweroff'): power_status = ilo.get_host_power_status() if not power_status == 'OFF': ilo.hold_pwr_btn() # ilo.set_host_power(host_power=False) changed = True module.exit_json(changed=changed, power=power_status, **status)
def main(): module = AnsibleModule( argument_spec=dict( group_id=dict(required=True), artifact_id=dict(required=True), version=dict(default=None), version_by_spec=dict(default=None), classifier=dict(default=''), extension=dict(default='jar'), repository_url=dict(default='https://repo1.maven.org/maven2'), username=dict(default=None, aliases=['aws_secret_key']), password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']), headers=dict(type='dict'), force_basic_auth=dict(default=False, type='bool'), state=dict(default="present", choices=["present", "absent" ]), # TODO - Implement a "latest" state timeout=dict(default=10, type='int'), dest=dict(type="path", required=True), validate_certs=dict(required=False, default=True, type='bool'), client_cert=dict(type="path", required=False), client_key=dict(type="path", required=False), keep_name=dict(required=False, default=False, type='bool'), verify_checksum=dict( required=False, default='download', choices=['never', 'download', 'change', 'always']), checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']), directory_mode=dict(type='str'), ), add_file_common_args=True, mutually_exclusive=([('version', 'version_by_spec')])) if not HAS_LXML_ETREE: module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR) if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION: module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR) repository_url = module.params["repository_url"] if not repository_url: repository_url = "https://repo1.maven.org/maven2" try: parsed_url = urlparse(repository_url) except AttributeError as e: module.fail_json(msg='url parsing went wrong %s' % e) local = parsed_url.scheme == "file" if parsed_url.scheme == 's3' and not HAS_BOTO: module.fail_json(msg=missing_required_lib( 'boto3', reason='when using s3:// repository URLs'), exception=BOTO_IMP_ERR) group_id = module.params["group_id"] artifact_id = module.params["artifact_id"] version = module.params["version"] version_by_spec = module.params["version_by_spec"] classifier = module.params["classifier"] extension = module.params["extension"] headers = module.params['headers'] state = module.params["state"] dest = module.params["dest"] b_dest = to_bytes(dest, errors='surrogate_or_strict') keep_name = module.params["keep_name"] verify_checksum = module.params["verify_checksum"] verify_download = verify_checksum in ['download', 'always'] verify_change = verify_checksum in ['change', 'always'] checksum_alg = module.params["checksum_alg"] downloader = MavenDownloader(module, repository_url, local, headers) if not version_by_spec and not version: version = "latest" try: artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension) except ValueError as e: module.fail_json(msg=e.args[0]) changed = False prev_state = "absent" if dest.endswith(os.sep): b_dest = to_bytes(dest, errors='surrogate_or_strict') if not os.path.exists(b_dest): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest) os.makedirs(b_dest) directory_args = module.load_file_common_arguments(module.params) directory_mode = module.params["directory_mode"] if directory_mode is not None: directory_args['mode'] = directory_mode else: directory_args['mode'] = None changed = adjust_recursive_directory_permissions( pre_existing_dir, new_directory_list, module, directory_args, changed) if os.path.isdir(b_dest): version_part = version if version == 'latest': version_part = downloader.find_latest_version_available(artifact) elif version_by_spec: version_part = downloader.find_version_by_spec(artifact) filename = "{artifact_id}{version_part}{classifier}.{extension}".format( artifact_id=artifact_id, version_part="-{0}".format(version_part) if keep_name else "", classifier="-{0}".format(classifier) if classifier else "", extension=extension) dest = posixpath.join(dest, filename) b_dest = to_bytes(dest, errors='surrogate_or_strict') if os.path.lexists(b_dest) and ( (not verify_change) or not downloader.is_invalid_checksum( dest, downloader.find_uri_for_artifact(artifact), checksum_alg)): prev_state = "present" if prev_state == "absent": try: download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg) if download_error is None: changed = True else: module.fail_json( msg="Cannot retrieve the artifact to destination: " + download_error) except ValueError as e: module.fail_json(msg=e.args[0]) try: file_args = module.load_file_common_arguments(module.params, path=dest) except TypeError: # The path argument is only supported in Ansible-base 2.10+. Fall back to # pre-2.10 behavior for older Ansible versions. module.params['path'] = dest file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) if changed: module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=changed) else: module.exit_json(state=state, dest=dest, changed=changed)
def main(): global module module = AnsibleModule( argument_spec=dict( binary_path=dict(type='path'), chart_ref=dict(type='path'), chart_repo_url=dict(type='str'), chart_version=dict(type='str'), release_name=dict(type='str', required=True, aliases=['name']), release_namespace=dict(type='str', required=True, aliases=['namespace']), release_state=dict(default='present', choices=['present', 'absent'], aliases=['state']), release_values=dict(type='dict', default={}, aliases=['values']), values_files=dict(type='list', default=[], elements='str'), update_repo_cache=dict(type='bool', default=False), # Helm options disable_hook=dict(type='bool', default=False), force=dict(type='bool', default=False), kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), purge=dict(type='bool', default=True), wait=dict(type='bool', default=False), wait_timeout=dict(type='str'), atomic=dict(type='bool', default=False), create_namespace=dict(type='bool', default=False), replace=dict(type='bool', default=False), ), required_if=[ ('release_state', 'present', ['release_name', 'chart_ref']), ('release_state', 'absent', ['release_name']) ], supports_check_mode=True, ) if not IMP_YAML: module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR) changed = False bin_path = module.params.get('binary_path') chart_ref = module.params.get('chart_ref') chart_repo_url = module.params.get('chart_repo_url') chart_version = module.params.get('chart_version') release_name = module.params.get('release_name') release_namespace = module.params.get('release_namespace') release_state = module.params.get('release_state') release_values = module.params.get('release_values') values_files = module.params.get('values_files') update_repo_cache = module.params.get('update_repo_cache') # Helm options disable_hook = module.params.get('disable_hook') force = module.params.get('force') kube_context = module.params.get('kube_context') kubeconfig_path = module.params.get('kubeconfig_path') purge = module.params.get('purge') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') atomic = module.params.get('atomic') create_namespace = module.params.get('create_namespace') replace = module.params.get('replace') if bin_path is not None: helm_cmd_common = bin_path else: helm_cmd_common = module.get_bin_path('helm', required=True) if kube_context is not None: helm_cmd_common += " --kube-context " + kube_context if kubeconfig_path is not None: helm_cmd_common += " --kubeconfig " + kubeconfig_path if update_repo_cache: run_repo_update(helm_cmd_common) helm_cmd_common += " --namespace=" + release_namespace # Get real/deployed release status release_status = get_release_status(helm_cmd_common, release_name) # keep helm_cmd_common for get_release_status in module_exit_json helm_cmd = helm_cmd_common if release_state == "absent" and release_status is not None: if replace: module.fail_json(msg="replace is not applicable when state is absent") helm_cmd = delete(helm_cmd, release_name, purge, disable_hook) changed = True elif release_state == "present": if chart_version is not None: helm_cmd += " --version=" + chart_version if chart_repo_url is not None: helm_cmd += " --repo=" + chart_repo_url # Fetch chart info to have real version and real name for chart_ref from archive, folder or url chart_info = fetch_chart_info(helm_cmd, chart_ref) if release_status is None: # Not installed helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, disable_hook, False, values_files=values_files, atomic=atomic, create_namespace=create_namespace, replace=replace) changed = True else: # the 'appVersion' specification is optional in a chart chart_app_version = chart_info.get('appVersion', None) released_app_version = release_status.get('app_version', None) # when deployed without an 'appVersion' chart value the 'helm list' command will return the entry `app_version: ""` appversion_is_same = (chart_app_version == released_app_version) or (chart_app_version is None and released_app_version == "") if force or release_values != release_status['values'] \ or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"] \ or not appversion_is_same: helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, disable_hook, force, values_files=values_files, atomic=atomic, create_namespace=create_namespace, replace=replace) changed = True if module.check_mode: check_status = { 'values': { "current": {}, "declared": {}, } } if release_status: check_status['values']['current'] = release_status['values'] check_status['values']['declared'] = release_status module.exit_json( changed=changed, command=helm_cmd, status=check_status, stdout='', stderr='', ) elif not changed: module.exit_json( changed=False, status=release_status, stdout='', stderr='', command=helm_cmd, ) rc, out, err = exec_command(helm_cmd) module.exit_json( changed=changed, stdout=out, stderr=err, status=get_release_status(helm_cmd_common, release_name), command=helm_cmd, )
def main(): argument_spec = dict( action=dict(type='str', default='export', choices=['export', 'parse']), ca_certificates=dict(type='list', elements='path'), certificate_path=dict(type='path'), force=dict(type='bool', default=False), friendly_name=dict(type='str', aliases=['name']), iter_size=dict(type='int', default=2048), maciter_size=dict(type='int', default=1), passphrase=dict(type='str', no_log=True), path=dict(type='path', required=True), privatekey_passphrase=dict(type='str', no_log=True), privatekey_path=dict(type='path'), state=dict(type='str', default='present', choices=['absent', 'present']), src=dict(type='path'), ) required_if = [ ['action', 'parse', ['src']], ] required_together = [ ['privatekey_path', 'friendly_name'], ] module = AnsibleModule( add_file_common_args=True, argument_spec=argument_spec, required_if=required_if, required_together=required_together, supports_check_mode=True, ) if not pyopenssl_found: module.fail_json(msg=missing_required_lib('pyOpenSSL'), exception=PYOPENSSL_IMP_ERR) base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg="The directory '%s' does not exist or the path is not a directory" % base_dir ) pkcs12 = Pkcs(module) changed = False if module.params['state'] == 'present': if module.check_mode: result = pkcs12.dump() result['changed'] = module.params['force'] or not pkcs12.check(module) module.exit_json(**result) try: if not pkcs12.check(module, perms_required=False) or module.params['force']: if module.params['action'] == 'export': if not module.params['friendly_name']: module.fail_json(msg='Friendly_name is required') pkcs12.generate(module) changed = True else: pkcs12.parse(module) file_args = module.load_file_common_arguments(module.params) if module.set_fs_attributes_if_different(file_args, changed): changed = True except PkcsError as exc: module.fail_json(msg=to_native(exc)) else: if module.check_mode: result = pkcs12.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) if os.path.exists(module.params['path']): try: pkcs12.remove(module) changed = True except PkcsError as exc: module.fail_json(msg=to_native(exc)) result = pkcs12.dump() result['changed'] = changed if os.path.exists(module.params['path']): file_mode = "%04o" % stat.S_IMODE(os.stat(module.params['path']).st_mode) result['mode'] = file_mode module.exit_json(**result)
def assert_requirements_present(module): if DNSPYTHON_IMPORTERROR is not None: module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMPORTERROR)
def main(): module_args = oci_utils.get_taggable_arg_spec(supports_create=True, supports_wait=True) module_args.update( dict( cidr_block=dict(type="str", required=False), compartment_id=dict(type="str", required=False), display_name=dict(type="str", required=False, aliases=["name"]), dns_label=dict(type="str", required=False), state=dict( type="str", required=False, default="present", choices=["absent", "present"], ), vcn_id=dict(type="str", required=False, aliases=["id"]), )) module = AnsibleModule( argument_spec=module_args, supports_check_mode=False, mutually_exclusive=[["compartment_id", "vcn_id"]], ) if not HAS_OCI_PY_SDK: module.fail_json(msg=missing_required_lib("oci")) virtual_network_client = oci_utils.create_service_client( module, VirtualNetworkClient) exclude_attributes = {"display_name": True, "dns_label": True} state = module.params["state"] vcn_id = module.params["vcn_id"] if state == "absent": if vcn_id is not None: result = delete_vcn(virtual_network_client, module) else: module.fail_json( msg="Specify vcn_id with state as 'absent' to delete a VCN.") else: if vcn_id is not None: result = update_vcn(virtual_network_client, module) else: result = oci_utils.check_and_create_resource( resource_type="vcn", create_fn=create_vcn, kwargs_create={ "virtual_network_client": virtual_network_client, "module": module, }, list_fn=virtual_network_client.list_vcns, kwargs_list={ "compartment_id": module.params["compartment_id"] }, module=module, model=CreateVcnDetails(), exclude_attributes=exclude_attributes, ) module.exit_json(**result)
def main(): ''' This function is the main function of this module ''' # argument_spec = postgres_common_argument_spec() argument_spec = dict() argument_spec.update(address=dict(type='str', default='samehost', aliases=['source', 'src']), backup=dict(type='bool', default=False), backup_file=dict(type='str'), contype=dict(type='str', default=None, choices=PG_HBA_TYPES), create=dict(type='bool', default=False), databases=dict(type='str', default='all'), dest=dict(type='path', required=True), method=dict(type='str', default='md5', choices=PG_HBA_METHODS), netmask=dict(type='str'), options=dict(type='str'), order=dict( type='str', default="sdu", choices=PG_HBA_ORDERS, removed_in_version='3.0.0', removed_from_collection='community.postgresql'), state=dict(type='str', default="present", choices=["absent", "present"]), users=dict(type='str', default='all')) module = AnsibleModule(argument_spec=argument_spec, add_file_common_args=True, supports_check_mode=True) if IPADDRESS_IMP_ERR is not None: module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR) contype = module.params["contype"] create = bool(module.params["create"] or module.check_mode) if module.check_mode: backup = False else: backup = module.params['backup'] backup_file = module.params['backup_file'] databases = module.params["databases"] dest = module.params["dest"] method = module.params["method"] netmask = module.params["netmask"] options = module.params["options"] order = module.params["order"] source = module.params["address"] state = module.params["state"] users = module.params["users"] ret = {'msgs': []} try: pg_hba = PgHba(dest, order, backup=backup, create=create) except PgHbaError as error: module.fail_json(msg='Error reading file:\n{0}'.format(error)) if contype: try: for database in databases.split(','): for user in users.split(','): rule = PgHbaRule(contype, database, user, source, netmask, method, options) if state == "present": ret['msgs'].append('Adding') pg_hba.add_rule(rule) else: ret['msgs'].append('Removing') pg_hba.remove_rule(rule) except PgHbaError as error: module.fail_json(msg='Error modifying rules:\n{0}'.format(error)) file_args = module.load_file_common_arguments(module.params) ret['changed'] = changed = pg_hba.changed() if changed: ret['msgs'].append('Changed') ret['diff'] = pg_hba.diff if not module.check_mode: ret['msgs'].append('Writing') try: if pg_hba.write(backup_file): module.set_fs_attributes_if_different(file_args, True, pg_hba.diff, expand=False) except PgHbaError as error: module.fail_json( msg='Error writing file:\n{0}'.format(error)) if pg_hba.last_backup: ret['backup_file'] = pg_hba.last_backup ret['pg_hba'] = list(pg_hba.get_rules()) module.exit_json(**ret)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['present', 'absent']), size=dict(type='int', default=4096), type=dict(type='str', default='RSA', choices=[ 'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448' ]), curve=dict(type='str', choices=[ 'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1', 'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1', 'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1', 'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2', ]), force=dict(type='bool', default=False), path=dict(type='path', required=True), passphrase=dict(type='str', no_log=True), cipher=dict(type='str'), backup=dict(type='bool', default=False), format=dict( type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']), format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']), select_crypto_backend=dict( type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'), return_content=dict(type='bool', default=False), regenerate=dict(type='str', default='full_idempotence', choices=[ 'never', 'fail', 'partial_idempotence', 'full_idempotence', 'always' ]), ), supports_check_mode=True, add_file_common_args=True, required_together=[['cipher', 'passphrase']], required_if=[ ['type', 'ECC', ['curve']], ], ) base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir) backend = module.params['select_crypto_backend'] if backend == 'auto': # Detection what is possible can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion( MINIMAL_CRYPTOGRAPHY_VERSION) can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion( MINIMAL_PYOPENSSL_VERSION) # Decision if module.params['cipher'] and module.params[ 'passphrase'] and module.params['cipher'] != 'auto': # First try pyOpenSSL, then cryptography if can_use_pyopenssl: backend = 'pyopenssl' elif can_use_cryptography: backend = 'cryptography' else: # First try cryptography, then pyOpenSSL if can_use_cryptography: backend = 'cryptography' elif can_use_pyopenssl: backend = 'pyopenssl' # Success? if backend == 'auto': module.fail_json(msg=( "Can't detect any of the required Python libraries " "cryptography (>= {0}) or PyOpenSSL (>= {1})" ).format(MINIMAL_CRYPTOGRAPHY_VERSION, MINIMAL_PYOPENSSL_VERSION)) try: if backend == 'pyopenssl': if not PYOPENSSL_FOUND: module.fail_json(msg=missing_required_lib( 'pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)), exception=PYOPENSSL_IMP_ERR) module.deprecate( 'The module is using the PyOpenSSL backend. This backend has been deprecated', version='ansible.builtin:2.13') private_key = PrivateKeyPyOpenSSL(module) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib( 'cryptography >= {0}'.format( MINIMAL_CRYPTOGRAPHY_VERSION)), exception=CRYPTOGRAPHY_IMP_ERR) private_key = PrivateKeyCryptography(module) if private_key.state == 'present': if module.check_mode: result = private_key.dump() result['changed'] = private_key.force \ or not private_key.check(module, ignore_conversion=True) \ or not private_key.check(module, ignore_conversion=False) module.exit_json(**result) private_key.generate(module) else: if module.check_mode: result = private_key.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) private_key.remove(module) result = private_key.dump() module.exit_json(**result) except crypto_utils.OpenSSLObjectError as exc: module.fail_json(msg=to_native(exc))
def main(): module = AnsibleModule(argument_spec=dict( state=dict(type='str', default='present', choices=['present']), host=dict(type='str', default='localhost'), port=dict(type='int', default=8125), protocol=dict(type='str', default='udp', choices=['udp', 'tcp']), timeout=dict(type='float', default=1.0), metric=dict(type='str', required=True), metric_type=dict(type='str', required=True, choices=['counter', 'gauge']), metric_prefix=dict(type='str', default=''), value=dict(type='int', required=True), delta=dict(type='bool', default=False), ), supports_check_mode=False) if not HAS_STATSD: module.fail_json(msg=missing_required_lib('statsd')) host = module.params.get('host') port = module.params.get('port') protocol = module.params.get('protocol') timeout = module.params.get('timeout') metric = module.params.get('metric') metric_type = module.params.get('metric_type') metric_prefix = module.params.get('metric_prefix') value = module.params.get('value') delta = module.params.get('delta') if protocol == 'udp': client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False) elif protocol == 'tcp': client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False) metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric metric_display_value = '%s (delta=%s)' % ( value, delta) if metric_type == 'gauge' else value try: if metric_type == 'counter': client.incr(metric, value) elif metric_type == 'gauge': client.gauge(metric, value, delta=delta) except Exception as exc: module.fail_json(msg='Failed sending to StatsD %s' % str(exc)) finally: if protocol == 'tcp': client.close() module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True)
def _get_xml_dict(self, xml_root): if not HAS_XMLTODICT: self._module.fail_json(msg=missing_required_lib("xmltodict")) xml_dict = xmltodict.parse(etree.tostring(xml_root), dict_constructor=dict) return xml_dict
def main(): module = AnsibleModule( argument_spec=dict( policy=dict(type='str'), state=dict(type='str', required=True, choices=['enforcing', 'permissive', 'disabled']), configfile=dict(type='str', default='/etc/selinux/config', aliases=['conf', 'file']), update_kernel_param=dict(type='bool', default=False), ), supports_check_mode=True, ) if not HAS_SELINUX: module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR) # global vars changed = False msgs = [] configfile = module.params['configfile'] policy = module.params['policy'] state = module.params['state'] update_kernel_param = module.params['update_kernel_param'] runtime_enabled = selinux.is_selinux_enabled() runtime_policy = selinux.selinux_getpolicytype()[1] runtime_state = 'disabled' kernel_enabled = None reboot_required = False if runtime_enabled: # enabled means 'enforcing' or 'permissive' if selinux.security_getenforce(): runtime_state = 'enforcing' else: runtime_state = 'permissive' if not os.path.isfile(configfile): module.fail_json(msg="Unable to find file {0}".format(configfile), details="Please install SELinux-policy package, " "if this package is not installed previously.") config_policy = get_config_policy(configfile) config_state = get_config_state(configfile) if update_kernel_param: try: grubby_bin = get_bin_path('grubby') except ValueError: grubby_bin = None kernel_enabled = get_kernel_enabled(module, grubby_bin) # check to see if policy is set if state is not 'disabled' if state != 'disabled': if not policy: module.fail_json( msg="Policy is required if state is not 'disabled'") else: if not policy: policy = config_policy # check changed values and run changes if policy != runtime_policy: if module.check_mode: module.exit_json(changed=True) # cannot change runtime policy msgs.append("Running SELinux policy changed from '%s' to '%s'" % (runtime_policy, policy)) changed = True if policy != config_policy: if module.check_mode: module.exit_json(changed=True) set_config_policy(module, policy, configfile) msgs.append( "SELinux policy configuration in '%s' changed from '%s' to '%s'" % (configfile, config_policy, policy)) changed = True if state != runtime_state: if runtime_enabled: if state == 'disabled': if runtime_state != 'permissive': # Temporarily set state to permissive if not module.check_mode: set_state(module, 'permissive') module.warn( "SELinux state temporarily changed from '%s' to 'permissive'. State change will take effect next reboot." % (runtime_state)) changed = True else: module.warn( 'SELinux state change will take effect next reboot') reboot_required = True else: if not module.check_mode: set_state(module, state) msgs.append("SELinux state changed from '%s' to '%s'" % (runtime_state, state)) # Only report changes if the file is changed. # This prevents the task from reporting changes every time the task is run. changed = True else: module.warn("Reboot is required to set SELinux state to '%s'" % state) reboot_required = True if state != config_state: if not module.check_mode: set_config_state(module, state, configfile) msgs.append("Config SELinux state changed from '%s' to '%s'" % (config_state, state)) changed = True requested_kernel_enabled = state in ('enforcing', 'permissive') # Update kernel enabled/disabled config only when setting is consistent # across all kernels AND the requested state differs from the current state if update_kernel_param and kernel_enabled != requested_kernel_enabled: if not module.check_mode: set_kernel_enabled(module, grubby_bin, requested_kernel_enabled) if requested_kernel_enabled: states = ('disabled', 'enabled') else: states = ('enabled', 'disabled') if kernel_enabled is None: states = ('<inconsistent>', states[1]) msgs.append("Kernel SELinux state changed from '%s' to '%s'" % states) changed = True module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state, reboot_required=reboot_required)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', aliases=['dest', 'file']), xmlstring=dict(type='str'), xpath=dict(type='str'), namespaces=dict(type='dict', default={}), state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']), value=dict(type='raw'), attribute=dict(type='raw'), add_children=dict(type='list'), set_children=dict(type='list'), count=dict(type='bool', default=False), print_match=dict(type='bool', default=False), pretty_print=dict(type='bool', default=False), content=dict(type='str', choices=['attribute', 'text']), input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']), backup=dict(type='bool', default=False), strip_cdata_tags=dict(type='bool', default=False), insertbefore=dict(type='bool', default=False), insertafter=dict(type='bool', default=False), ), supports_check_mode=True, required_by=dict( add_children=['xpath'], attribute=['value'], content=['xpath'], set_children=['xpath'], value=['xpath'], ), required_if=[ ['count', True, ['xpath']], ['print_match', True, ['xpath']], ['insertbefore', True, ['xpath']], ['insertafter', True, ['xpath']], ], required_one_of=[ ['path', 'xmlstring'], [ 'add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value' ], ], mutually_exclusive=[ [ 'add_children', 'content', 'count', 'print_match', 'set_children', 'value' ], ['path', 'xmlstring'], ['insertbefore', 'insertafter'], ], ) xml_file = module.params['path'] xml_string = module.params['xmlstring'] xpath = module.params['xpath'] namespaces = module.params['namespaces'] state = module.params['state'] value = json_dict_bytes_to_unicode(module.params['value']) attribute = module.params['attribute'] set_children = json_dict_bytes_to_unicode(module.params['set_children']) add_children = json_dict_bytes_to_unicode(module.params['add_children']) pretty_print = module.params['pretty_print'] content = module.params['content'] input_type = module.params['input_type'] print_match = module.params['print_match'] count = module.params['count'] backup = module.params['backup'] strip_cdata_tags = module.params['strip_cdata_tags'] insertbefore = module.params['insertbefore'] insertafter = module.params['insertafter'] # Check if we have lxml 2.3.0 or newer installed if not HAS_LXML: module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR) elif LooseVersion('.'.join( to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'): module.fail_json( msg= 'The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine' ) elif LooseVersion('.'.join( to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'): module.warn( 'Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.' ) # Check if the file exists if xml_string: infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict')) elif os.path.isfile(xml_file): infile = open(xml_file, 'rb') else: module.fail_json(msg="The target XML source '%s' does not exist." % xml_file) # Parse and evaluate xpath expression if xpath is not None: try: etree.XPath(xpath) except etree.XPathSyntaxError as e: module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e)) except etree.XPathEvalError as e: module.fail_json( msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e)) # Try to parse in the target XML file try: parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags) doc = etree.parse(infile, parser) except etree.XMLSyntaxError as e: module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e)) # Ensure we have the original copy to compare global orig_doc orig_doc = copy.deepcopy(doc) if print_match: do_print_match(module, doc, xpath, namespaces) if count: count_nodes(module, doc, xpath, namespaces) if content == 'attribute': get_element_attr(module, doc, xpath, namespaces) elif content == 'text': get_element_text(module, doc, xpath, namespaces) # File exists: if state == 'absent': # - absent: delete xpath target delete_xpath_target(module, doc, xpath, namespaces) # - present: carry on # children && value both set?: should have already aborted by now # add_children && set_children both set?: should have already aborted by now # set_children set? if set_children: set_target_children(module, doc, xpath, namespaces, set_children, input_type) # add_children set? if add_children: add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter) # No?: Carry on # Is the xpath target an attribute selector? if value is not None: set_target(module, doc, xpath, namespaces, attribute, value) # If an xpath was provided, we need to do something with the data if xpath is not None: ensure_xpath_exists(module, doc, xpath, namespaces) # Otherwise only reformat the xml data? if pretty_print: make_pretty(module, doc) module.fail_json(msg="Don't know what to do")
def main(): """ Initiates module.""" module = AnsibleModule(argument_spec=dict( balancer_vhost=dict(required=True, default=None, type='str'), balancer_url_suffix=dict(default="/balancer-manager/", type='str'), member_host=dict(type='str'), state=dict(type='str'), tls=dict(default=False, type='bool'), validate_certs=dict(default=True, type='bool')), supports_check_mode=True) if HAS_BEAUTIFULSOUP is False: module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) if module.params['state'] is not None: states = module.params['state'].split(',') if (len(states) > 1) and (("present" in states) or ("enabled" in states)): module.fail_json( msg= "state present/enabled is mutually exclusive with other states!" ) else: for _state in states: if _state not in [ 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors' ]: module.fail_json( msg= "State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." ) else: states = ['None'] mybalancer = Balancer(module.params['balancer_vhost'], module.params['balancer_url_suffix'], module=module, tls=module.params['tls']) if module.params['member_host'] is None: json_output_list = [] for member in mybalancer.members: json_output_list.append({ "host": member.host, "status": member.status, "protocol": member.protocol, "port": member.port, "path": member.path, "attributes": member.attributes, "management_url": member.management_url, "balancer_url": member.balancer_url }) module.exit_json(changed=False, members=json_output_list) else: changed = False member_exists = False member_status = { 'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False } for mode in member_status.keys(): for state in states: if mode == state: member_status[mode] = True elif mode == 'disabled' and state == 'absent': member_status[mode] = True for member in mybalancer.members: if str(member.host) == str(module.params['member_host']): member_exists = True if module.params['state'] is not None: member_status_before = member.status if not module.check_mode: member_status_after = member.status = member_status else: member_status_after = member_status if member_status_before != member_status_after: changed = True json_output = { "host": member.host, "status": member.status, "protocol": member.protocol, "port": member.port, "path": member.path, "attributes": member.attributes, "management_url": member.management_url, "balancer_url": member.balancer_url } if member_exists: module.exit_json(changed=changed, member=json_output) else: module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
def main(): module = AnsibleModule( argument_spec=dict( account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])), account_api_token=dict(type='str', no_log=True, fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])), domain=dict(type='str'), record=dict(type='str'), record_ids=dict(type='list', elements='str'), type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA']), ttl=dict(type='int', default=3600), value=dict(type='str'), priority=dict(type='int'), state=dict(type='str', choices=['present', 'absent'], default='present'), solo=dict(type='bool', default=False), sandbox=dict(type='bool', default=False), ), required_together=[ ['record', 'value'] ], supports_check_mode=True, ) if not HAS_DNSIMPLE: module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0]) account_email = module.params.get('account_email') account_api_token = module.params.get('account_api_token') domain = module.params.get('domain') record = module.params.get('record') record_ids = module.params.get('record_ids') record_type = module.params.get('type') ttl = module.params.get('ttl') value = module.params.get('value') priority = module.params.get('priority') state = module.params.get('state') is_solo = module.params.get('solo') sandbox = module.params.get('sandbox') DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0] try: if DNSIMPLE_MAJOR_VERSION > 1: ds = DNSimpleV2(account_email, account_api_token, sandbox, module) else: module.deprecate( 'Support for python-dnsimple < 2 is deprecated. ' 'Update python-dnsimple to version >= 2.0.0', version='5.0.0', collection_name='community.general' ) ds = DNSimpleV1(account_email, account_api_token, sandbox, module) # Let's figure out what operation we want to do # No domain, return a list if not domain: all_domains = ds.get_all_domains() module.exit_json(changed=False, result=all_domains) # Domain & No record if record is None and not record_ids: if domain.isdigit(): typed_domain = int(domain) else: typed_domain = str(domain) dr = ds.get_domain(typed_domain) # domain does not exist if state == 'present': if dr: module.exit_json(changed=False, result=dr) else: if module.check_mode: module.exit_json(changed=True) else: response = ds.create_domain(domain) module.exit_json(changed=True, result=response) # state is absent else: if dr: if not module.check_mode: ds.delete_domain(domain) module.exit_json(changed=True) else: module.exit_json(changed=False) # need the not none check since record could be an empty string if record is not None: if not record_type: module.fail_json(msg="Missing the record type") if not value: module.fail_json(msg="Missing the record value") records_list = ds.get_records(domain, dnsimple_filter={'name': record}) rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None) if state == 'present': changed = False if is_solo: # delete any records that have the same name and record type same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type] if rr: same_type = [rid for rid in same_type if rid != rr['id']] if same_type: if not module.check_mode: for rid in same_type: ds.delete_record(domain, rid) changed = True if rr: # check if we need to update if rr['ttl'] != ttl or rr['priority'] != priority: if module.check_mode: module.exit_json(changed=True) else: response = ds.update_record(domain, rr['id'], ttl, priority) module.exit_json(changed=True, result=response) else: module.exit_json(changed=changed, result=rr) else: # create it if module.check_mode: module.exit_json(changed=True) else: response = ds.create_record(domain, record, record_type, value, ttl, priority) module.exit_json(changed=True, result=response) # state is absent else: if rr: if not module.check_mode: ds.delete_record(domain, rr['id']) module.exit_json(changed=True) else: module.exit_json(changed=False) # Make sure these record_ids either all exist or none if record_ids: current_records = ds.get_records(domain, dnsimple_filter=None) current_record_ids = [str(d['id']) for d in current_records] wanted_record_ids = [str(r) for r in record_ids] if state == 'present': difference = list(set(wanted_record_ids) - set(current_record_ids)) if difference: module.fail_json(msg="Missing the following records: %s" % difference) else: module.exit_json(changed=False) # state is absent else: difference = list(set(wanted_record_ids) & set(current_record_ids)) if difference: if not module.check_mode: for rid in difference: ds.delete_record(domain, rid) module.exit_json(changed=True) else: module.exit_json(changed=False) except DNSimpleException as e: if DNSIMPLE_MAJOR_VERSION > 1: module.fail_json(msg="DNSimple exception: %s" % e.message) else: module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message'])) module.fail_json(msg="Unknown what you wanted me to do")
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, required_together=None, required_if=None, min_docker_version=MIN_DOCKER_VERSION, min_docker_api_version=None, option_minimal_versions=None, option_minimal_versions_ignore_params=None, fail_results=None): # Modules can put information in here which will always be returned # in case client.fail() is called. self.fail_results = fail_results or {} merged_arg_spec = dict() merged_arg_spec.update(DOCKER_COMMON_ARGS) if argument_spec: merged_arg_spec.update(argument_spec) self.arg_spec = merged_arg_spec mutually_exclusive_params = [] mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE if mutually_exclusive: mutually_exclusive_params += mutually_exclusive required_together_params = [] required_together_params += DOCKER_REQUIRED_TOGETHER if required_together: required_together_params += required_together self.module = AnsibleModule( argument_spec=merged_arg_spec, supports_check_mode=supports_check_mode, mutually_exclusive=mutually_exclusive_params, required_together=required_together_params, required_if=required_if) NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) self.docker_py_version = LooseVersion(docker_version) if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: self.fail( "Cannot have both the docker-py and docker python modules (old and new version of Docker " "SDK for Python) installed together as they use the same namespace and cause a corrupt " "installation. Please uninstall both packages, and re-install only the docker-py or docker " "python module (for %s's Python %s). It is recommended to install the docker module if no " "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " "can leave the other module in a broken state." % (platform.node(), sys.executable)) if not HAS_DOCKER_PY: if NEEDS_DOCKER_PY2: msg = missing_required_lib("Docker SDK for Python: docker") msg = msg + ", for example via `pip install docker`. The error was: %s" else: msg = missing_required_lib( "Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)" ) msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s" self.fail(msg % HAS_DOCKER_ERROR) if self.docker_py_version < LooseVersion(min_docker_version): msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." if not NEEDS_DOCKER_PY2: # The minimal required version is < 2.0 (and the current version as well). # Advertise docker (instead of docker-py) for non-Python-2.6 users. msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER elif docker_version < LooseVersion('2.0'): msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER else: msg += DOCKERPYUPGRADE_UPGRADE_DOCKER self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) self.debug = self.module.params.get('debug') self.check_mode = self.module.check_mode self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) try: super(AnsibleDockerClient, self).__init__(**self._connect_params) self.docker_api_version_str = self.version()['ApiVersion'] except APIError as exc: self.fail("Docker API error: %s" % exc) except Exception as exc: self.fail("Error connecting: %s" % exc) self.docker_api_version = LooseVersion(self.docker_api_version_str) if min_docker_api_version is not None: if self.docker_api_version < LooseVersion(min_docker_api_version): self.fail( 'Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) if option_minimal_versions is not None: self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def main(): module = AnsibleModule(argument_spec=dict( taiga_host=dict(required=False, default="https://api.taiga.io"), project=dict(required=True), subject=dict(required=True), issue_type=dict(required=True), priority=dict(required=False, default="Normal"), status=dict(required=False, default="New"), severity=dict(required=False, default="Normal"), description=dict(required=False, default=""), attachment=dict(required=False, default=None), attachment_description=dict(required=False, default=""), tags=dict(required=False, default=[], type='list'), state=dict(required=False, choices=['present', 'absent'], default='present'), ), supports_check_mode=True) if not TAIGA_MODULE_IMPORTED: module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR) taiga_host = module.params['taiga_host'] project_name = module.params['project'] issue_subject = module.params['subject'] issue_priority = module.params['priority'] issue_status = module.params['status'] issue_type = module.params['issue_type'] issue_severity = module.params['severity'] issue_description = module.params['description'] issue_attachment = module.params['attachment'] issue_attachment_description = module.params['attachment_description'] if issue_attachment: if not isfile(issue_attachment): msg = "%s is not a file" % issue_attachment module.fail_json(msg=msg) issue_tags = module.params['tags'] state = module.params['state'] return_status, changed, msg, issue_attr_dict = manage_issue( module, taiga_host, project_name, issue_subject, issue_priority, issue_status, issue_type, issue_severity, issue_description, issue_attachment, issue_attachment_description, issue_tags, state, check_mode=module.check_mode) if return_status: if len(issue_attr_dict) > 0: module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict) else: module.exit_json(changed=changed, msg=msg) else: module.fail_json(msg=msg)