def main(): argument_spec = rax_argument_spec() argument_spec.update( dict(state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), entity_id=dict(required=True), check_id=dict(required=True), notification_plan_id=dict(required=True), criteria=dict(), disabled=dict(type='bool', default=False), metadata=dict(type='dict'))) module = AnsibleModule(argument_spec=argument_spec, required_together=rax_required_together()) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') state = module.params.get('state') label = module.params.get('label') entity_id = module.params.get('entity_id') check_id = module.params.get('check_id') notification_plan_id = module.params.get('notification_plan_id') criteria = module.params.get('criteria') disabled = module.boolean(module.params.get('disabled')) metadata = module.params.get('metadata') setup_rax_module(module, pyrax) alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, disabled, metadata)
def main(): argument_spec = rax_argument_spec() argument_spec.update(dict( loadbalancer=dict(required=True), state=dict(default='present', choices=['present', 'absent']), enabled=dict(type='bool', default=True), private_key=dict(), certificate=dict(), intermediate_certificate=dict(), secure_port=dict(type='int', default=443), secure_traffic_only=dict(type='bool', default=False), https_redirect=dict(type='bool'), wait=dict(type='bool', default=False), wait_timeout=dict(type='int', default=300) )) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together(), ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module.') loadbalancer = module.params.get('loadbalancer') state = module.params.get('state') enabled = module.boolean(module.params.get('enabled')) private_key = module.params.get('private_key') certificate = module.params.get('certificate') intermediate_certificate = module.params.get('intermediate_certificate') secure_port = module.params.get('secure_port') secure_traffic_only = module.boolean(module.params.get('secure_traffic_only')) https_redirect = module.boolean(module.params.get('https_redirect')) wait = module.boolean(module.params.get('wait')) wait_timeout = module.params.get('wait_timeout') setup_rax_module(module, pyrax) cloud_load_balancer_ssl( module, loadbalancer, state, enabled, private_key, certificate, intermediate_certificate, secure_port, secure_traffic_only, https_redirect, wait, wait_timeout )
def main(): module = AnsibleModule(supports_check_mode=True, argument_spec=dict( features=dict(required=True, type='dict'), icinga2_featuredir=dict( type='str', default='/etc/icinga2/features-enabled/'), )) # params = module.params features = params['features'] to_enable, to_disable = [], [] config_mask = '%s/{}.conf' % params['icinga2_featuredir'] for feature_name, should_enable in features.items(): try: should_enable = module.boolean(should_enable) except TypeError: module.fail_json( msg="Feature {} should be 'on' or 'off', but is set to '{}'".\ format(feature_name, should_enable)) config = config_mask.format(feature_name) if os.path.exists(config) and not should_enable: to_disable.append(feature_name) elif not os.path.exists(config) and should_enable: to_enable.append(feature_name) # if not to_enable and not to_disable: module.exit_json(changed=False, msg="No Icinga2 features changed", features=features) if module.check_mode: if to_enable or to_disable: module.exit_json(changed=True, msg="We should changed icinga2 features", enabled=to_disable, disabled=to_disable, features=features) # do the work now for feature in to_enable: rc, out, err = module.run_command( 'icinga2 feature enable {}'.format(feature)) if rc != 0: module.fail_json(msg="Failed to enable feature {}".format(feature)) for feature in to_disable: rc, out, err = module.run_command( 'icinga2 feature disable {}'.format(feature)) if rc != 0: module.fail_json(msg="Failed to disale feature {}".format(feature)) module.exit_json( changed=True, msg="Updated icinga2 features. Icinga should be restarted", enabled=to_enable, disabled=to_disable, features=features)
def main(): arg_spec = dict( name=dict(default=None, required=False, aliases=['pkg']), from_path=dict(default=None, required=False, type='path'), notest=dict(default=False, type='bool'), locallib=dict(default=None, required=False, type='path'), mirror=dict(default=None, required=False), mirror_only=dict(default=False, type='bool'), installdeps=dict(default=False, type='bool'), system_lib=dict(default=False, type='bool', aliases=['use_sudo']), version=dict(default=None, required=False), executable=dict(required=False, type='path'), ) module = AnsibleModule( argument_spec=arg_spec, required_one_of=[['name', 'from_path']], ) cpanm = _get_cpanm_path(module) name = module.params['name'] from_path = module.params['from_path'] notest = module.boolean(module.params.get('notest', False)) locallib = module.params['locallib'] mirror = module.params['mirror'] mirror_only = module.params['mirror_only'] installdeps = module.params['installdeps'] use_sudo = module.params['system_lib'] version = module.params['version'] changed = False installed = _is_package_installed(module, name, locallib, cpanm, version) if not installed: cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) if rc_cpanm != 0: module.fail_json(msg=err_cpanm, cmd=cmd) if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1): changed = True module.exit_json(changed=changed, binary=cpanm, name=name)
def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( entity_id=dict(required=True), label=dict(required=True), check_type=dict(required=True), monitoring_zones_poll=dict(), target_hostname=dict(), target_alias=dict(), details=dict(type='dict', default={}), disabled=dict(type='bool', default=False), metadata=dict(type='dict', default={}), period=dict(type='int'), timeout=dict(type='int'), state=dict(default='present', choices=['present', 'absent']) ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') entity_id = module.params.get('entity_id') label = module.params.get('label') check_type = module.params.get('check_type') monitoring_zones_poll = module.params.get('monitoring_zones_poll') target_hostname = module.params.get('target_hostname') target_alias = module.params.get('target_alias') details = module.params.get('details') disabled = module.boolean(module.params.get('disabled')) metadata = module.params.get('metadata') period = module.params.get('period') timeout = module.params.get('timeout') state = module.params.get('state') setup_rax_module(module, pyrax) cloud_check(module, state, entity_id, label, check_type, monitoring_zones_poll, target_hostname, target_alias, details, disabled, metadata, period, timeout)
def main(): module = AnsibleModule(argument_spec=dict( schema_file=dict(type='str', required=True), value=dict(type='raw', required=True), full_validation=dict(type='bool', default=True), )) schema_file = module.params['schema_file'] value = module.params['value'] full_validation = module.boolean(module.params['full_validation']) error_msgs = list() if os.path.exists(schema_file): schema = None try: schema = load_yaml_file(schema_file) except Exception as error: error_msgs += [[ str('file: ' + schema_file), 'msg: error when trying to load the schema file', 'error type: ' + str(type(error)), 'error details: ', traceback.format_exc().split('\n'), ]] if schema: error_msgs_aux = validate_schema(schema, value, full_validation) for value in (error_msgs_aux or []): new_value = [str('schema file: ' + schema_file)] + value error_msgs += [new_value] else: error_msgs += [[str('schema file not found: ' + schema_file)]] if error_msgs: context = "schema validation" module.fail_json(msg=to_text(error_text(error_msgs, context))) module.exit_json(changed=False)
def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( state=dict(default='present', choices=['present', 'absent']), label=dict(required=True), entity_id=dict(required=True), check_id=dict(required=True), notification_plan_id=dict(required=True), criteria=dict(), disabled=dict(type='bool', default=False), metadata=dict(type='dict') ) ) module = AnsibleModule( argument_spec=argument_spec, required_together=rax_required_together() ) if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') state = module.params.get('state') label = module.params.get('label') entity_id = module.params.get('entity_id') check_id = module.params.get('check_id') notification_plan_id = module.params.get('notification_plan_id') criteria = module.params.get('criteria') disabled = module.boolean(module.params.get('disabled')) metadata = module.params.get('metadata') setup_rax_module(module, pyrax) alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria, disabled, metadata)
def main(): """ Main """ module = AnsibleModule( argument_spec=dict( state=dict(default="install", choices=['install', 'uninstall', 'update']), release=dict(), build_date=dict(), director=dict(type='bool', default=True), director_build_date=dict(), pin_puddle=dict(default=True), enable_poodle_repos=dict(default=False), poodle_type=dict(choices=POODLE_TYPES.keys()), target_directory=dict(), distro_version=dict(), source_hostname=dict(), enable_flea_repos=dict(default=False), one_shot_mode=dict(default=False), buildmods=dict(type='list'), discover_build=dict(type='bool', default=False), enable_testing_repos=dict() ) ) base_cmd = 'rhos-release' state = module.params['state'] repo_directory = module.params['target_directory'] release = module.params['release'] puddle = module.params['build_date'] director = module.params['director'] director_puddle = module.params['director_build_date'] distro_version = module.params['distro_version'] pin_puddle = module.params['pin_puddle'] enable_poodle_repos = module.params['enable_poodle_repos'] source_hostname = module.params['source_hostname'] enable_flea_repos = module.params['enable_flea_repos'] one_shot_mode = module.params['one_shot_mode'] buildmods = module.params['buildmods'] discover_build = module.params['discover_build'] enable_testing_repos = module.params['enable_testing_repos'] repo_args = ['-t', str(repo_directory)] if repo_directory else[] if discover_build and not puddle: puddle = do_build_discover() puddle = ['-p', str(puddle)] if puddle else [] pin_puddle = ['-P'] if module.boolean(pin_puddle) else [] enable_poodle_repos = ['-d'] if module.boolean(enable_poodle_repos) else [] director_puddle = ['-p', str(director_puddle)] if director_puddle else [] distro_version = ['-r', distro_version] if distro_version else [] poodle_type = POODLE_TYPES.get(module.params['poodle_type'], []) source_hostname = ['-H', source_hostname] if source_hostname else [] enable_flea_repos = ['-f'] if module.boolean(enable_flea_repos) else [] one_shot_mode = ['-O'] if module.boolean(one_shot_mode) else [] enable_testing_repos = ['-T', str(enable_testing_repos)] if enable_testing_repos else [] mods = { 'pin': '-P', 'flea': '-f', 'unstable': '--unstable', 'cdn': '' } cmd = [] if state == 'uninstall': cmd = [base_cmd, '-x'] cmd.extend(repo_args) elif state in ['install', 'update']: if not release: _fail(module, "'release' option should be specified.", cmd) if "cdn" in buildmods: release = str(release) + "-cdn" releases = [(str(release), puddle)] try: if 10 > int(release) > 6 and director: releases = [(str(release) + '-director', director_puddle)] + releases except ValueError: # RDO versions & CDN shouldn't try to get director repos pass for release, build in releases: if state == 'update': cmd.extend([base_cmd, '-u']) else: cmd.extend([base_cmd, release]) if not(len(buildmods) == 1 and 'none' in buildmods): for buildmod in buildmods: cmd.append(mods[buildmod.lower()]) cmd.extend(enable_poodle_repos) # cmd.extend(enable_flea_repos) cmd.extend(poodle_type) # cmd.extend(pin_puddle) cmd.extend(build) cmd.extend(distro_version) cmd.extend(source_hostname) cmd.extend(one_shot_mode) cmd.extend(enable_testing_repos) cmd.extend(repo_args) cmd.append(';') _run_command(module, ['sh', '-c', ' '.join(cmd)])
def main(): module = AnsibleModule(argument_spec=dict( path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']), state=dict(type='str', default='present', choices=['absent', 'present']), marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'), block=dict(type='str', default='', aliases=['content']), insertafter=dict(type='str'), insertbefore=dict(type='str'), create=dict(type='bool', default=False), backup=dict(type='bool', default=False), validate=dict(type='str'), marker_begin=dict(type='str', default='BEGIN'), marker_end=dict(type='str', default='END'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True) params = module.params path = params['path'] if os.path.isdir(path): module.fail_json(rc=256, msg='Path %s is a directory !' % path) path_exists = os.path.exists(path) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Path %s does not exist !' % path) destpath = os.path.dirname(path) if not os.path.exists(destpath) and not module.check_mode: try: os.makedirs(destpath) except Exception as e: module.fail_json( msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1])) original = None lines = [] else: with open(path, 'rb') as f: original = f.read() lines = original.splitlines(True) diff = { 'before': '', 'after': '', 'before_header': '%s (content)' % path, 'after_header': '%s (content)' % path } if module._diff and original: diff['before'] = original insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File %s not present" % path) if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile( to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): insertre = re.compile( to_bytes(insertbefore, errors='surrogate_or_strict')) else: insertre = None marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) + b( os.linesep) marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) + b( os.linesep) if present and block: if not block.endswith(b(os.linesep)): block += b(os.linesep) blocklines = [marker0] + block.splitlines(True) + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line == marker0: n0 = i if line == marker1: n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1 + 1] = [] else: lines[n1:n0 + 1] = [] n0 = n1 # Ensure there is a line separator before the block of lines to be inserted if n0 > 0: if not lines[n0 - 1].endswith(b(os.linesep)): lines[n0 - 1] += b(os.linesep) lines[n0:n0] = blocklines if lines: result = b''.join(lines) else: result = b'' if module._diff: diff['after'] = result if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True backup_file = None if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: backup_file = module.backup_local(path) # We should always follow symlinks so that we change the real file real_path = os.path.realpath(params['path']) write_changes(module, result, real_path) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % path attr_diff['after_header'] = '%s (file attributes)' % path difflist = [diff, attr_diff] if backup_file is None: module.exit_json(changed=changed, msg=msg, diff=difflist) else: module.exit_json(changed=changed, msg=msg, diff=difflist, backup_file=backup_file)
def main(): module = AnsibleModule(argument_spec=dict( dest=dict(required=True, aliases=['name', 'destfile'], type='path'), state=dict(default='present', choices=['absent', 'present']), marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'), block=dict(default='', type='str', aliases=['content']), insertafter=dict(default=None), insertbefore=dict(default=None), create=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), validate=dict(default=None, type='str'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True) params = module.params dest = params['dest'] if module.boolean(params.get('follow', None)): dest = os.path.realpath(dest) if os.path.isdir(dest): module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) path_exists = os.path.exists(dest) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) original = None lines = [] else: f = open(dest, 'rb') original = f.read() f.close() lines = original.splitlines() insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File not present") if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile(insertafter) elif insertbefore not in (None, 'BOF'): insertre = re.compile(insertbefore) else: insertre = None marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker) marker1 = re.sub(b(r'{mark}'), b('END'), marker) if present and block: # Escape seqeuences like '\n' need to be handled in Ansible 1.x if module.ansible_version.startswith('1.'): block = re.sub('', block, '') blocklines = [marker0] + block.splitlines() + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line.startswith(marker0): n0 = i if line.startswith(marker1): n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1 + 1] = [] else: lines[n1:n0 + 1] = [] n0 = n1 lines[n0:n0] = blocklines if lines: result = b('\n').join(lines) if original is None or original.endswith(b('\n')): result += b('\n') else: result = '' if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: module.backup_local(dest) write_changes(module, result, dest) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg) msg, changed = check_file_attrs(module, changed, msg) module.exit_json(changed=changed, msg=msg)
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(bucket=dict(required=True), dest=dict(default=None), encrypt=dict(default=True, type='bool'), expiry=dict(default=600, type='int', aliases=['expiration']), headers=dict(type='dict'), marker=dict(default=""), max_keys=dict(default=1000, type='int'), metadata=dict(type='dict'), mode=dict(choices=[ 'get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list' ], required=True), object=dict(), permission=dict(type='list', default=['private']), version=dict(default=None), overwrite=dict(aliases=['force'], default='always'), prefix=dict(default=""), retries=dict(aliases=['retry'], type='int', default=0), s3_url=dict(aliases=['S3_URL']), rgw=dict(default='no', type='bool'), src=dict(), ignore_nonexistent_bucket=dict(default=False, type='bool')), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 's3': module.deprecate("The 's3' module is being renamed 'aws_s3'", version=2.7) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore required for this module') bucket = module.params.get('bucket') encrypt = module.params.get('encrypt') expiry = module.params.get('expiry') dest = module.params.get('dest', '') headers = module.params.get('headers') marker = module.params.get('marker') max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') rgw = module.params.get('rgw') src = module.params.get('src') ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') if dest: dest = os.path.expanduser(dest) object_canned_acl = [ "private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control" ] bucket_canned_acl = [ "private", "public-read", "public-read-write", "authenticated-read" ] if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): overwrite = 'always' else: overwrite = 'never' region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region in ('us-east-1', '', None): # default to US Standard region location = 'us-east-1' else: # Boto uses symbolic names for locations but region strings will # actually work fine for everything except us-east-1 (US Standard) location = region if module.params.get('object'): obj = module.params['object'] # If there is a top level object, do nothing - if the object starts with / # remove the leading character to maintain compatibility with Ansible versions < 2.4 if obj.startswith('/'): obj = obj[1:] # Bucket deletion does not require obj. Prevents ambiguity with delobj. if obj and mode == "delete": module.fail_json(msg='Parameter obj cannot be used with mode=delete') # allow eucarc environment variables to be used if ansible vars aren't set if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] # rgw requires an explicit url if rgw and not s3_url: module.fail_json(msg='rgw flavour requires s3_url') # Look at s3_url and tweak connection settings # if connecting to RGW, Walrus or fakes3 if s3_url: for key in ['validate_certs', 'security_token', 'profile_name']: aws_connect_kwargs.pop(key, None) try: s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: module.fail_json( msg= "Can't authorize connection. Check your credentials and profile.", exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) validate = not ignore_nonexistent_bucket # separate types of ACLs bucket_acl = [ acl for acl in module.params.get('permission') if acl in bucket_canned_acl ] object_acl = [ acl for acl in module.params.get('permission') if acl in object_canned_acl ] error_acl = [ acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl ] if error_acl: module.fail_json(msg='Unknown permission specified: %s' % error_acl) # First, we check to see if the bucket exists, we get "bucket" returned. bucketrtn = bucket_check(module, s3, bucket, validate=validate) if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: module.fail_json(msg="Source bucket cannot be found.") # If our mode is a GET operation (download), go through the procedure as appropriate ... if mode == 'get': # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn is False: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download. # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if path_check(dest): # Determine if the remote and local object are identical if keysum(module, s3, bucket, obj, version=version) == module.md5(dest): sum_matches = True if overwrite == 'always': download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json( msg= "Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False if overwrite in ('always', 'different'): download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json( msg= "WARNING: Checksums do not match. Use overwrite parameter to force download." ) else: download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified # these were separated into the variables bucket_acl and object_acl above # Lets check the src path. if not path_check(src): module.fail_json(msg="Local object for PUT does not exist") # Lets check to see if bucket exists to get ground truth. if bucketrtn: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn and keyrtn: # Compare the local and remote object if module.md5(src) == keysum(module, s3, bucket, obj): sum_matches = True if overwrite == 'always': # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: module.exit_json( msg= "WARNING: Checksums do not match. Use overwrite parameter to force upload." ) # If neither exist (based on bucket existence), we can create both. if not bucketrtn: # only use valid bucket acls for create_bucket function module.params['permission'] = bucket_acl create_bucket(module, s3, bucket, location) # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # If bucket exists but key doesn't, just upload. if bucketrtn and not keyrtn: # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # Delete an object from a bucket, not the entire bucket if mode == 'delobj': if obj is None: module.fail_json(msg="object parameter is required") if bucket: deletertn = delete_key(module, s3, bucket, obj) if deletertn is True: module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Delete an entire bucket, including all objects in the bucket if mode == 'delete': if bucket: deletertn = delete_bucket(module, s3, bucket) if deletertn is True: module.exit_json( msg="Bucket %s and all keys have been deleted." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Support for listing a set of keys if mode == 'list': exists = bucket_check(module, s3, bucket) # If the bucket does not exist then bail out if not exists: module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) list_keys(module, s3, bucket, prefix, marker, max_keys) # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified # these were separated above into the variables bucket_acl and object_acl if bucket and not obj: if bucketrtn: module.exit_json(msg="Bucket already exists.", changed=False) else: # only use valid bucket acls when creating the bucket module.params['permission'] = bucket_acl module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) if bucket and obj: if obj.endswith('/'): dirobj = obj else: dirobj = obj + "/" if bucketrtn: if key_check(module, s3, bucket, dirobj): module.exit_json( msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) else: # setting valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj) else: # only use valid bucket acls for the create_bucket function module.params['permission'] = bucket_acl created = create_bucket(module, s3, bucket, location) # only use valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj) # Support for grabbing the time-expired URL for an object in S3/Walrus. if mode == 'geturl': if not bucket and not obj: module.fail_json(msg="Bucket and Object parameters must be set") keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: get_download_url(module, s3, bucket, obj, expiry) else: module.fail_json(msg="Key %s does not exist." % obj) if mode == 'getstr': if bucket and obj: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: download_s3str(module, s3, bucket, obj, version=version) elif version is not None: module.fail_json( msg="Key %s with version id %s does not exist." % (obj, version)) else: module.fail_json(msg="Key %s does not exist." % obj) module.exit_json(failed=False)
def run_module(): module_args = dict( right=dict(type="str", default="system.preferences"), key=dict(type="str", required=True), type=dict( type="str", choices=( "string", "str", "boolean", "bool", "float", "real", "integer", "int", ), ), value=dict(type="raw", required=True), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) desired_value = module.params["value"] desired_value_type = module.params.get("type") if desired_value_type in ("string", "str"): if not isinstance(desired_value, str): desired_value = str(desired_value) elif desired_value_type in ("boolean", "bool"): desired_value = module.boolean(desired_value) elif desired_value_type in ("float", "real"): desired_value = float(desired_value) elif desired_value_type in ("integer", "int"): desired_value = int(desired_value) plist_xml = subprocess.check_output( ["security", "authorizationdb", "read", module.params["right"]] ) plist = plistlib.loads(plist_xml) key = module.params["key"] try: current_value = plist[key] except KeyError: module.fail_json( msg=("Key %r not in plist for %r" % (key, module.params["right"])) ) result = { "old_value": current_value, "new_value": desired_value, "changed": (current_value != desired_value), } if result["changed"] and not module.check_mode: plist[key] = desired_value security = subprocess.Popen( ["security", "authorizationdb", "write", module.params["right"]], stdin=subprocess.PIPE, ) security.communicate(plistlib.dumps(plist)) if security.returncode != 0: module.fail_json( msg=( "security authorizationdb write returned %r" % (security.returncode,) ) ) module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']), state=dict(type='str', default='present', choices=['absent', 'present']), marker=dict(type='str', default='# {mark} ANSIBLE MANAGED BLOCK'), block=dict(type='str', default='', aliases=['content']), insertafter=dict(type='str'), insertbefore=dict(type='str'), create=dict(type='bool', default=False), backup=dict(type='bool', default=False), validate=dict(type='str'), marker_begin=dict(type='str', default='BEGIN'), marker_end=dict(type='str', default='END'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True ) params = module.params path = params['path'] if os.path.isdir(path): module.fail_json(rc=256, msg='Path %s is a directory !' % path) path_exists = os.path.exists(path) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Path %s does not exist !' % path) destpath = os.path.dirname(path) if not os.path.exists(destpath) and not module.check_mode: try: os.makedirs(destpath) except Exception as e: module.fail_json(msg='Error creating %s Error code: %s Error description: %s' % (destpath, e[0], e[1])) original = None lines = [] else: f = open(path, 'rb') original = f.read() f.close() lines = original.splitlines() diff = {'before': '', 'after': '', 'before_header': '%s (content)' % path, 'after_header': '%s (content)' % path} if module._diff and original: diff['before'] = original insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File %s not present" % path) if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile(to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): insertre = re.compile(to_bytes(insertbefore, errors='surrogate_or_strict')) else: insertre = None marker0 = re.sub(b(r'{mark}'), b(params['marker_begin']), marker) marker1 = re.sub(b(r'{mark}'), b(params['marker_end']), marker) if present and block: # Escape seqeuences like '\n' need to be handled in Ansible 1.x if module.ansible_version.startswith('1.'): block = re.sub('', block, '') blocklines = [marker0] + block.splitlines() + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line == marker0: n0 = i if line == marker1: n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1 + 1] = [] else: lines[n1:n0 + 1] = [] n0 = n1 lines[n0:n0] = blocklines if lines: result = b('\n').join(lines) if original is None or original.endswith(b('\n')): result += b('\n') else: result = '' if module._diff: diff['after'] = result if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: module.backup_local(path) # We should always follow symlinks so that we change the real file real_path = os.path.realpath(params['path']) write_changes(module, result, real_path) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % path attr_diff['after_header'] = '%s (file attributes)' % path difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, diff=difflist)
def main(): command_allowed_param_map = dict( cleanup=(), createcachetable=('cache_table', 'database', ), flush=('database', ), loaddata=('database', 'fixtures', ), syncdb=('database', ), test=('failfast', 'testrunner', 'liveserver', 'apps', ), validate=(), migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('clear', 'link', ), ) command_required_param_map = dict( loaddata=('fixtures', ), ) # forces --noinput on every command that needs it noinput_commands = ( 'flush', 'syncdb', 'migrate', 'test', 'collectstatic', ) # These params are allowed for certain commands only specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') # These params are automatically added to the command if present general_params = ('settings', 'pythonpath', 'database',) specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') end_of_command_params = ('apps', 'cache_table', 'fixtures') module = AnsibleModule( argument_spec=dict( command=dict(default=None, required=True), app_path=dict(default=None, required=True, type='path'), settings=dict(default=None, required=False), pythonpath=dict(default=None, required=False, aliases=['python_path']), virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']), apps=dict(default=None, required=False), cache_table=dict(default=None, required=False), clear=dict(default=None, required=False, type='bool'), database=dict(default=None, required=False), failfast=dict(default='no', required=False, type='bool', aliases=['fail_fast']), fixtures=dict(default=None, required=False), liveserver=dict(default=None, required=False, aliases=['live_server']), testrunner=dict(default=None, required=False, aliases=['test_runner']), skip=dict(default=None, required=False, type='bool'), merge=dict(default=None, required=False, type='bool'), link=dict(default=None, required=False, type='bool'), ), ) command = module.params['command'] app_path = module.params['app_path'] virtualenv = module.params['virtualenv'] for param in specific_params: value = module.params[param] if param in specific_boolean_params: value = module.boolean(value) if value and param not in command_allowed_param_map[command]: module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) for param in command_required_param_map.get(command, ()): if not module.params[param]: module.fail_json(msg='%s param is required for command=%s' % (param, command)) _ensure_virtualenv(module) cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd for param in general_params: if module.params[param]: cmd = '%s --%s=%s' % (cmd, param, module.params[param]) for param in specific_boolean_params: if module.boolean(module.params[param]): cmd = '%s --%s' % (cmd, param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: cmd = '%s %s' % (cmd, module.params[param]) rc, out, err = module.run_command(cmd, cwd=app_path) if rc != 0: if command == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'Already exists.' else: if "Unknown command:" in err: _fail(module, cmd, err, "Unknown django command: %s" % command) _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) changed = False lines = out.split('\n') filt = globals().get(command + "_filter_output", None) if filt: filtered_output = list(filter(filt, lines)) if len(filtered_output): changed = filtered_output module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host = dict(required=True), https = dict(required=False, type='bool', default=False), subdomains = dict(required=False, type='list', default=[]), site_apps = dict(required=False, type='list', default=[]), login_name = dict(required=True), login_password = dict(required=True, no_log=True), ), supports_check_mode=True ) site_name = module.params['name'] site_state = module.params['state'] site_host = module.params['host'] site_ip = socket.gethostbyname(site_host) session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'] ) site_list = webfaction.list_websites(session_id) site_map = dict([(i['name'], i) for i in site_list]) existing_site = site_map.get(site_name) result = {} # Here's where the real stuff happens if site_state == 'present': # Does a site with this name already exist? if existing_site: # If yes, but it's on a different IP address, then fail. # If we wanted to allow relocation, we could add a 'relocate=true' option # which would get the existing IP address, delete the site there, and create it # at the new address. A bit dangerous, perhaps, so for now we'll require manual # deletion if it's on another host. if existing_site['ip'] != site_ip: module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.") # If it's on this host and the key parameters are the same, nothing needs to be done. if (existing_site['https'] == module.boolean(module.params['https'])) and \ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): module.exit_json( changed = False ) positional_args = [ session_id, site_name, site_ip, module.boolean(module.params['https']), module.params['subdomains'], ] for a in module.params['site_apps']: positional_args.append( (a[0], a[1]) ) if not module.check_mode: # If this isn't a dry run, create or modify the site result.update( webfaction.create_website( *positional_args ) if not existing_site else webfaction.update_website ( *positional_args ) ) elif site_state == 'absent': # If the site's already not there, nothing changed. if not existing_site: module.exit_json( changed = False, ) if not module.check_mode: # If this isn't a dry run, delete the site result.update( webfaction.delete_website(session_id, site_name, site_ip) ) else: module.fail_json(msg="Unknown state specified: {}".format(site_state)) module.exit_json( changed = True, result = result )
def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict( bucket=dict(required=True), dest=dict(default=None), encrypt=dict(default=True, type='bool'), expiry=dict(default=600, type='int', aliases=['expiration']), headers=dict(type='dict'), marker=dict(default=""), max_keys=dict(default=1000, type='int'), metadata=dict(type='dict'), mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object=dict(), permission=dict(type='list', default=['private']), version=dict(default=None), overwrite=dict(aliases=['force'], default='always'), prefix=dict(default=""), retries=dict(aliases=['retry'], type='int', default=0), s3_url=dict(aliases=['S3_URL']), rgw=dict(default='no', type='bool'), src=dict(), ignore_nonexistent_bucket=dict(default=False, type='bool') ), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) if module._name == 's3': module.deprecate("The 's3' module is being renamed 'aws_s3'", version=2.7) if not HAS_BOTO3: module.fail_json(msg='boto3 and botocore required for this module') bucket = module.params.get('bucket') encrypt = module.params.get('encrypt') expiry = module.params.get('expiry') dest = module.params.get('dest', '') headers = module.params.get('headers') marker = module.params.get('marker') max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') rgw = module.params.get('rgw') src = module.params.get('src') ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') if dest: dest = os.path.expanduser(dest) object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): overwrite = 'always' else: overwrite = 'never' region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) if region in ('us-east-1', '', None): # default to US Standard region location = 'us-east-1' else: # Boto uses symbolic names for locations but region strings will # actually work fine for everything except us-east-1 (US Standard) location = region if module.params.get('object'): obj = module.params['object'] # If there is a top level object, do nothing - if the object starts with / # remove the leading character to maintain compatibility with Ansible versions < 2.4 if obj.startswith('/'): obj = obj[1:] # Bucket deletion does not require obj. Prevents ambiguity with delobj. if obj and mode == "delete": module.fail_json(msg='Parameter obj cannot be used with mode=delete') # allow eucarc environment variables to be used if ansible vars aren't set if not s3_url and 'S3_URL' in os.environ: s3_url = os.environ['S3_URL'] # rgw requires an explicit url if rgw and not s3_url: module.fail_json(msg='rgw flavour requires s3_url') # Look at s3_url and tweak connection settings # if connecting to RGW, Walrus or fakes3 for key in ['validate_certs', 'security_token', 'profile_name']: aws_connect_kwargs.pop(key, None) try: s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url) except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e: module.fail_json(msg="Can't authorize connection. Check your credentials and profile.", exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) validate = not ignore_nonexistent_bucket # separate types of ACLs bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] if error_acl: module.fail_json(msg='Unknown permission specified: %s' % error_acl) # First, we check to see if the bucket exists, we get "bucket" returned. bucketrtn = bucket_check(module, s3, bucket, validate=validate) if validate and mode not in ('create', 'put', 'delete') and not bucketrtn: module.fail_json(msg="Source bucket cannot be found.") # If our mode is a GET operation (download), go through the procedure as appropriate ... if mode == 'get': # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn is False: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) # If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download. # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if path_check(dest): # Determine if the remote and local object are identical if keysum(module, s3, bucket, obj, version=version) == module.md5(dest): sum_matches = True if overwrite == 'always': download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False if overwrite in ('always', 'different'): download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") else: download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified # these were separated into the variables bucket_acl and object_acl above # Lets check the src path. if not path_check(src): module.fail_json(msg="Local object for PUT does not exist") # Lets check to see if bucket exists to get ground truth. if bucketrtn: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn and keyrtn: # Compare the local and remote object if module.md5(src) == keysum(module, s3, bucket, obj): sum_matches = True if overwrite == 'always': # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if not bucketrtn: # only use valid bucket acls for create_bucket function module.params['permission'] = bucket_acl create_bucket(module, s3, bucket, location) # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # If bucket exists but key doesn't, just upload. if bucketrtn and not keyrtn: # only use valid object acls for the upload_s3file function module.params['permission'] = object_acl upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers) # Delete an object from a bucket, not the entire bucket if mode == 'delobj': if obj is None: module.fail_json(msg="object parameter is required") if bucket: deletertn = delete_key(module, s3, bucket, obj) if deletertn is True: module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Delete an entire bucket, including all objects in the bucket if mode == 'delete': if bucket: deletertn = delete_bucket(module, s3, bucket) if deletertn is True: module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) else: module.fail_json(msg="Bucket parameter is required.") # Support for listing a set of keys if mode == 'list': exists = bucket_check(module, s3, bucket) # If the bucket does not exist then bail out if not exists: module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) list_keys(module, s3, bucket, prefix, marker, max_keys) # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified # these were separated above into the variables bucket_acl and object_acl if bucket and not obj: if bucketrtn: module.exit_json(msg="Bucket already exists.", changed=False) else: # only use valid bucket acls when creating the bucket module.params['permission'] = bucket_acl module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) if bucket and obj: if obj.endswith('/'): dirobj = obj else: dirobj = obj + "/" if bucketrtn: if key_check(module, s3, bucket, dirobj): module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) else: # setting valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj) else: # only use valid bucket acls for the create_bucket function module.params['permission'] = bucket_acl created = create_bucket(module, s3, bucket, location) # only use valid object acls for the create_dirkey function module.params['permission'] = object_acl create_dirkey(module, s3, bucket, dirobj) # Support for grabbing the time-expired URL for an object in S3/Walrus. if mode == 'geturl': if not bucket and not obj: module.fail_json(msg="Bucket and Object parameters must be set") keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: get_download_url(module, s3, bucket, obj, expiry) else: module.fail_json(msg="Key %s does not exist." % obj) if mode == 'getstr': if bucket and obj: keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) if keyrtn: download_s3str(module, s3, bucket, obj, version=version) elif version is not None: module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) else: module.fail_json(msg="Key %s does not exist." % obj) module.exit_json(failed=False)
def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default="localhost"), login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), password=dict(default=None, no_log=True, type='str'), encrypted=dict(default=False, type='bool'), host=dict(default="localhost"), host_all=dict(type="bool", default="no"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), append_privs=dict(default=False, type='bool'), check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), connect_timeout=dict(default=30, type='int'), config_file=dict(default="~/.my.cnf", type='path'), sql_log_bin=dict(default=True, type='bool'), ssl_cert=dict(default=None, type='path'), ssl_key=dict(default=None, type='path'), ssl_ca=dict(default=None, type='path'), ), supports_check_mode=True ) login_user = module.params["login_user"] login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] encrypted = module.boolean(module.params["encrypted"]) host = module.params["host"].lower() host_all = module.params["host_all"] state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] connect_timeout = module.params['connect_timeout'] config_file = module.params['config_file'] append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] ssl_cert = module.params["ssl_cert"] ssl_key = module.params["ssl_key"] ssl_ca = module.params["ssl_ca"] db = 'mysql' sql_log_bin = module.params["sql_log_bin"] if not mysqldb_found: module.fail_json(msg="The MySQL-python module is required.") cursor = None try: if check_implicit_admin: try: cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) except: pass if not cursor: cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) except Exception as e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " "Exception message: %s" % (config_file, to_native(e))) if not sql_log_bin: cursor.execute("SET SQL_LOG_BIN=0;") if priv is not None: try: mode = get_mode(cursor) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) try: priv = privileges_unpack(priv, mode) except Exception as e: module.fail_json(msg="invalid privileges string: %s" % to_native(e)) if state == "present": if user_exists(cursor, user, host, host_all): try: if update_password == 'always': changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module) else: changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module) except (SQLParseError, InvalidPrivsError, MySQLdb.Error) as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) else: if host_all: module.fail_json(msg="host_all parameter cannot be used when adding a user") try: changed = user_add(cursor, user, host, host_all, password, encrypted, priv, module.check_mode) except (SQLParseError, InvalidPrivsError, MySQLdb.Error) as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) elif state == "absent": if user_exists(cursor, user, host, host_all): changed = user_delete(cursor, user, host, host_all, module.check_mode) else: changed = False module.exit_json(changed=changed, user=user)
def main(): module = AnsibleModule( argument_spec = dict( fstype=dict(required=True, aliases=['type']), dev=dict(required=True, aliases=['device']), opts=dict(), force=dict(type='bool', default='no'), resizefs=dict(type='bool', default='no'), ), supports_check_mode=True, ) # There is no "single command" to manipulate filesystems, so we map them all out and their options fs_cmd_map = { 'ext2' : { 'mkfs' : 'mkfs.ext2', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', 'fsinfo': 'tune2fs', }, 'ext3' : { 'mkfs' : 'mkfs.ext3', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', 'fsinfo': 'tune2fs', }, 'ext4' : { 'mkfs' : 'mkfs.ext4', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', 'fsinfo': 'tune2fs', }, 'reiserfs' : { 'mkfs' : 'mkfs.reiserfs', 'grow' : 'resize_reiserfs', 'grow_flag' : None, 'force_flag' : '-f', 'fsinfo': 'reiserfstune', }, 'ext4dev' : { 'mkfs' : 'mkfs.ext4', 'grow' : 'resize2fs', 'grow_flag' : None, 'force_flag' : '-F', 'fsinfo': 'tune2fs', }, 'xfs' : { 'mkfs' : 'mkfs.xfs', 'grow' : 'xfs_growfs', 'grow_flag' : None, 'force_flag' : '-f', 'fsinfo': 'xfs_growfs', }, 'btrfs' : { 'mkfs' : 'mkfs.btrfs', 'grow' : 'btrfs', 'grow_flag' : 'filesystem resize', 'force_flag' : '-f', 'fsinfo': 'btrfs', } } dev = module.params['dev'] fstype = module.params['fstype'] opts = module.params['opts'] force = module.boolean(module.params['force']) resizefs = module.boolean(module.params['resizefs']) changed = False try: _ = fs_cmd_map[fstype] except KeyError: module.exit_json(changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype) mkfscmd = fs_cmd_map[fstype]['mkfs'] force_flag = fs_cmd_map[fstype]['force_flag'] growcmd = fs_cmd_map[fstype]['grow'] fssize_cmd = fs_cmd_map[fstype]['fsinfo'] if not os.path.exists(dev): module.fail_json(msg="Device %s not found."%dev) cmd = module.get_bin_path('blkid', required=True) rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) fs = raw_fs.strip() if fs == fstype and resizefs is False and not force: module.exit_json(changed=False) elif fs == fstype and resizefs is True: # Get dev and fs size and compare devsize_in_bytes = _get_dev_size(dev, module) fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module) if fssize_in_bytes < devsize_in_bytes: fs_smaller = True else: fs_smaller = False if module.check_mode and fs_smaller: module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype,dev)) elif module.check_mode and not fs_smaller: module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) elif fs_smaller: cmd = module.get_bin_path(growcmd, required=True) rc,out,err = module.run_command("%s %s" % (cmd, dev)) # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on. # in the future, you would have to parse the output to determine this. # thankfully, these are safe operations if no change is made. if rc == 0: module.exit_json(changed=True, msg=out) else: module.fail_json(msg="Resizing filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) else: module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) elif fs and not force: module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) ### create fs if module.check_mode: changed = True else: mkfs = module.get_bin_path(mkfscmd, required=True) cmd = None if opts is None: cmd = "%s %s '%s'" % (mkfs, force_flag, dev) else: cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev) rc,_,err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) module.exit_json(changed=changed)
def main(): module = AnsibleModule( argument_spec = dict( state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete = dict(default=False, type='bool'), route = dict(default=False, type='bool'), insert = dict(default=None), rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), interface = dict(default=None, aliases=['if']), log = dict(default=False, type='bool'), from_ip = dict(default='any', aliases=['src', 'from']), from_port = dict(default=None), to_ip = dict(default='any', aliases=['dest', 'to']), to_port = dict(default=None, aliases=['port']), proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']), app = dict(default=None, aliases=['name']), comment = dict(default=None, type='str') ), supports_check_mode = True, mutually_exclusive = [['app', 'proto', 'logging']] ) cmds = [] def execute(cmd): cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) cmds.append(cmd) (rc, out, err) = module.run_command(cmd) if rc != 0: module.fail_json(msg=err or out) def ufw_version(): """ Returns the major and minor version of ufw installed on the system. """ rc, out, err = module.run_command("%s --version" % ufw_bin) if rc != 0: module.fail_json( msg="Failed to get ufw version.", rc=rc, out=out, err=err ) lines = [x for x in out.split('\n') if x.strip() != ''] if len(lines) == 0: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) if matches is None: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) # Convert version to numbers major = int(matches.group(1)) minor = int(matches.group(2)) rev = 0 if matches.group(3) is not None: rev = int(matches.group(3)) return major, minor, rev params = module.params # Ensure at least one of the command arguments are given command_keys = ['state', 'default', 'rule', 'logging'] commands = dict((key, params[key]) for key in command_keys if params[key]) if len(commands) < 1: module.fail_json(msg="Not any of the command arguments %s given" % commands) if(params['interface'] is not None and params['direction'] is None): module.fail_json(msg="Direction must be specified when creating a rule on an interface") # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) # Save the pre state and rules in order to recognize changes (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") # Execute commands for (command, value) in commands.items(): cmd = [[ufw_bin], [module.check_mode, '--dry-run']] if command == 'state': states = { 'enabled': 'enable', 'disabled': 'disable', 'reloaded': 'reload', 'reset': 'reset' } execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': execute(cmd + [[command], [value]]) elif command == 'default': execute(cmd + [[command], [value], [params['direction']]]) elif command == 'rule': # Rules are constructed according to the long format # # ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] [comment COMMENT] cmd.append([module.boolean(params['delete']), 'delete']) cmd.append([module.boolean(params['route']), 'route']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([params['direction'], "%s" % params['direction']]) cmd.append([params['interface'], "on %s" % params['interface']]) cmd.append([module.boolean(params['log']), 'log']) for (key, template) in [('from_ip', "from %s" ), ('from_port', "port %s" ), ('to_ip', "to %s" ), ('to_port', "port %s" ), ('proto', "proto %s"), ('app', "app '%s'")]: value = params[key] cmd.append([value, template % (value)]) ufw_major, ufw_minor, _ = ufw_version() # comment is supported only in ufw version after 0.35 if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: cmd.append([params['comment'], "comment '%s'" % params['comment']]) execute(cmd) # Get the new state (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
def main(): """Main function""" module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), record=dict(required=True, aliases=['name'], type='str'), zone=dict(type='str'), zone_id=dict(type='str'), type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'), record_data=dict(aliases=['value'], type='list'), ttl=dict(default=300, type='int'), overwrite=dict(default=False, type='bool'), service_account_email=dict(type='str'), pem_file=dict(type='path'), credentials_file=dict(type='path'), project_id=dict(type='str') ), required_if=[ ('state', 'present', ['record_data']), ('overwrite', False, ['record_data']) ], required_one_of=[['zone', 'zone_id']], supports_check_mode=True ) _sanity_check(module) record_name = module.params['record'] record_type = module.params['type'] state = module.params['state'] ttl = module.params['ttl'] zone_name = module.params['zone'] zone_id = module.params['zone_id'] json_output = dict( state=state, record=record_name, zone=zone_name, zone_id=zone_id, type=record_type, record_data=module.params['record_data'], ttl=ttl, overwrite=module.boolean(module.params['overwrite']) ) # Google Cloud DNS wants the trailing dot on all DNS names. if zone_name is not None and zone_name[-1] != '.': zone_name = zone_name + '.' if record_name[-1] != '.': record_name = record_name + '.' # Build a connection object that we can use to connect with Google Cloud # DNS. gcdns = gcdns_connect(module, provider=PROVIDER) # We need to check that the zone we're creating a record for actually # exists. zone = _get_zone(gcdns, zone_name, zone_id) if zone is None and zone_name is not None: module.fail_json( msg='zone name was not found: %s' % zone_name, changed=False ) elif zone is None and zone_id is not None: module.fail_json( msg='zone id was not found: %s' % zone_id, changed=False ) # Populate the returns with the actual zone information. json_output['zone'] = zone.domain json_output['zone_id'] = zone.id # We also need to check if the record we want to create or remove actually # exists. try: record = _get_record(gcdns, zone, record_type, record_name) except InvalidRequestError: # We gave Google Cloud DNS an invalid DNS record name. module.fail_json( msg='record name is invalid: %s' % record_name, changed=False ) _additional_sanity_checks(module, zone) diff = dict() # Build the 'before' diff if record is None: diff['before'] = '' diff['before_header'] = '<absent>' else: diff['before'] = dict( record=record.data['name'], type=record.data['type'], record_data=record.data['rrdatas'], ttl=record.data['ttl'] ) diff['before_header'] = "%s:%s" % (record_type, record_name) # Create, remove, or modify the record. if state == 'present': diff['after'] = dict( record=record_name, type=record_type, record_data=module.params['record_data'], ttl=ttl ) diff['after_header'] = "%s:%s" % (record_type, record_name) changed = create_record(module, gcdns, zone, record) elif state == 'absent': diff['after'] = '' diff['after_header'] = '<absent>' changed = remove_record(module, gcdns, record) module.exit_json(changed=changed, diff=diff, **json_output)
def main(): module = AnsibleModule( argument_spec=dict( vg=dict(required=True), lv=dict(required=True), size=dict(type='str'), opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), shrink=dict(type='bool', default='yes'), active=dict(type='bool', default='yes'), snapshot=dict(type='str', default=None), pvs=dict(type='str') ), supports_check_mode=True, ) # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found is None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) size_opt = 'L' size_unit = 'm' snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' if not '%' in size: # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot is None: check_lv = lv else: check_lv = snapshot for test_lv in lvs: if test_lv['name'] == check_lv: this_lv = test_lv break else: this_lv = None if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") msg = '' if this_lv is None: if state == 'present': ### create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': ### remove LV if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': ### Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if '+' in size: size_requested += this_lv['size'] if this_lv['size'] < size_requested: if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) ) elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large if size_requested == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: ### resize LV based on absolute values tool = None if int(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif shrink and int(size) < this_lv['size']: if int(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg)
def main(): helper = get_connection( template=True, template_stack=True, with_classic_provider_spec=True, argument_spec=setup_args(), ) module = AnsibleModule( argument_spec=helper.argument_spec, supports_check_mode=True, required_one_of=helper.required_one_of, ) parent = helper.get_pandevice_parent(module) vr = VirtualRouter(module.params['vr_name']) parent.add(vr) try: vr.refresh() except PanDeviceError as e: module.fail_json(msg='Failed refresh: {0}'.format(e)) bgp = vr.find('', Bgp) if bgp is None: module.fail_json(msg='BGP is not configured for virtual router {0}'.format(vr.name)) policy = None if module.params['policy_type'] == 'conditional-advertisement': policy_cls = BgpPolicyConditionalAdvertisement else: policy_cls = BgpPolicyAggregationAddress policy = bgp.find_or_create(module.params['policy_name'], policy_cls) obj_type = None if module.params['filter_type'] == 'non-exist': obj_type = BgpPolicyNonExistFilter elif module.params['filter_type'] == 'advertise': obj_type = BgpPolicyAdvertiseFilter elif module.params['filter_type'] == 'suppress': obj_type = BgpPolicySuppressFilter else: module.fail_json(msg='Unknown filter_type: {0}'.format(module.params['filter_type'])) listing = policy.findall(obj_type) spec = { 'name': module.params['name'], 'enable': module.params['enable'], 'match_afi': module.params['match_afi'], 'match_safi': module.params['match_safi'], 'match_route_table': module.params['match_route_table'], 'match_nexthop': module.params['match_nexthop'], 'match_from_peer': module.params['match_from_peer'], 'match_med': module.params['match_med'], 'match_as_path_regex': module.params['match_as_path_regex'], 'match_community_regex': module.params['match_community_regex'], 'match_extended_community_regex': module.params['match_extended_community_regex'], } obj = obj_type(**spec) policy.add(obj) # Handle address prefixes. for x in module.params['address_prefix']: if isinstance(x, dict): if 'name' not in x: module.fail_json(msg='Address prefix dict requires "name": {0}'.format(x)) obj.add(BgpPolicyAddressPrefix( to_text(x['name'], encoding='utf-8', errors='surrogate_or_strict'), None if x.get('exact') is None else module.boolean(x['exact']), )) else: obj.add(BgpPolicyAddressPrefix(to_text(x, encoding='utf-8', errors='surrogate_or_strict'))) if module.params['state'] == 'return-object': module.deprecate('state=return-object is deprecated', '2.12') import pickle from base64 import b64encode obj.parent = None panos_obj = b64encode(pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)) module.exit_json(msg='returning serialized object', panos_obj=panos_obj) changed = helper.apply_state(obj, listing, module) if changed and module.params['commit']: helper.commit(module) module.exit_json(changed=changed, msg='done')
def main(): """Main """ module = AnsibleModule( argument_spec=dict( state=dict(default="install", choices=['install', 'uninstall', 'update']), release=dict(), build_date=dict(), director=dict(type='bool', default=True), director_build_date=dict(), pin_puddle=dict(default=True), enable_poodle_repos=dict(default=False), poodle_type=dict(choices=POODLE_TYPES.keys()), target_directory=dict(), distro_version=dict(), source_hostname=dict(), enable_flea_repos=dict(default=False), one_shot_mode=dict(default=False), buildmods=dict(type='list'), discover_build=dict(type='bool', default=False), enable_testing_repos=dict(), without_ceph=dict(type='bool', default=False), ) ) base_cmd = 'rhos-release' state = module.params['state'] repo_directory = module.params['target_directory'] release = module.params['release'] puddle = module.params['build_date'] director = module.params['director'] director_puddle = module.params['director_build_date'] distro_version = module.params['distro_version'] pin_puddle = module.params['pin_puddle'] enable_poodle_repos = module.params['enable_poodle_repos'] source_hostname = module.params['source_hostname'] enable_flea_repos = module.params['enable_flea_repos'] one_shot_mode = module.params['one_shot_mode'] buildmods = module.params['buildmods'] discover_build = module.params['discover_build'] enable_testing_repos = module.params['enable_testing_repos'] without_ceph = module.params['without_ceph'] repo_args = ['-t', str(repo_directory)] if repo_directory and \ repo_directory != DEFAULT_TARGET_DIR else[] if discover_build and not puddle: puddle = do_build_discover() puddle = ['-p', str(puddle)] if puddle else [] pin_puddle = ['-P'] if module.boolean(pin_puddle) else [] enable_poodle_repos = ['-d'] if module.boolean(enable_poodle_repos) else [] director_puddle = ['-p', str(director_puddle)] if director_puddle else [] distro_version = ['-r', distro_version] if distro_version else [] poodle_type = POODLE_TYPES.get(module.params['poodle_type'], []) source_hostname = ['-H', source_hostname] if source_hostname else [] enable_flea_repos = ['-f'] if module.boolean(enable_flea_repos) else [] one_shot_mode = ['-O'] if module.boolean(one_shot_mode) else [] enable_testing_repos = ['-T', str(enable_testing_repos)] if enable_testing_repos else [] without_ceph = ['--without-ceph'] if module.boolean(without_ceph) else [] mods = { 'pin': '-P', 'flea': '-f', 'unstable': '--unstable', 'cdn': '' } cmd = [] if state == 'uninstall': cmd = [base_cmd, '-x'] cmd.extend(repo_args) elif state in ['install', 'update']: if not release: _fail(module, "'release' option should be specified.", cmd) if "cdn" in buildmods: release = str(release) + "-cdn" releases = [(str(release), puddle)] try: if 10 > int(release) > 6 and director: releases = [(str(release) + '-director', director_puddle)] + releases except ValueError: # RDO versions & CDN shouldn't try to get director repos pass for release, build in releases: if state == 'update': cmd.extend([base_cmd, '-u']) else: cmd.extend([base_cmd, release]) if not(len(buildmods) == 1 and 'none' in buildmods): for buildmod in buildmods: cmd.append(mods[buildmod.lower()]) cmd.extend(enable_poodle_repos) # cmd.extend(enable_flea_repos) cmd.extend(poodle_type) # cmd.extend(pin_puddle) cmd.extend(build) cmd.extend(distro_version) cmd.extend(source_hostname) cmd.extend(one_shot_mode) cmd.extend(enable_testing_repos) cmd.extend(repo_args) cmd.extend(without_ceph) cmd.append(';') _run_command(module, ['sh', '-c', ' '.join(cmd)])
def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), pvs=dict(type='list'), pesize=dict(type='str', default='4'), pv_options=dict(type='str', default=''), pvresize=dict(type='bool', default=False), vg_options=dict(type='str', default=''), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), ), supports_check_mode=True, ) vg = module.params['vg'] state = module.params['state'] force = module.boolean(module.params['force']) pvresize = module.boolean(module.params['pvresize']) pesize = module.params['pesize'] pvoptions = module.params['pv_options'].split() vgoptions = module.params['vg_options'].split() dev_list = [] if module.params['pvs']: dev_list = list(module.params['pvs']) elif state == 'present': module.fail_json(msg="No physical volumes given.") # LVM always uses real paths not symlinks so replace symlinks with actual path for idx, dev in enumerate(dev_list): dev_list[idx] = os.path.realpath(dev) if state == 'present': # check given devices for test_dev in dev_list: if not os.path.exists(test_dev): module.fail_json(msg="Device %s not found." % test_dev) # get pv list pvs_cmd = module.get_bin_path('pvs', True) if dev_list: pvs_filter_pv_name = ' || '.join( 'pv_name = {0}'.format(x) for x in itertools.chain(dev_list, module.params['pvs'])) pvs_filter_vg_name = 'vg_name = {0}'.format(vg) pvs_filter = "--select '{0} || {1}' ".format( pvs_filter_pv_name, pvs_filter_vg_name) else: pvs_filter = '' rc, current_pvs, err = module.run_command( "%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) if rc != 0: module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) # check pv for devices pvs = parse_pvs(module, current_pvs) used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ] if used_pvs: module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) vgs_cmd = module.get_bin_path('vgs', True) rc, current_vgs, err = module.run_command( "%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) if rc != 0: module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) changed = False vgs = parse_vgs(current_vgs) for test_vg in vgs: if test_vg['name'] == vg: this_vg = test_vg break else: this_vg = None if this_vg is None: if state == 'present': # create VG if module.check_mode: changed = True else: # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: rc, _, err = module.run_command( [pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json( msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) if rc == 0: changed = True else: module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) else: if state == 'absent': if module.check_mode: module.exit_json(changed=True) else: if this_vg['lv_count'] == 0 or force: # remove VG vgremove_cmd = module.get_bin_path('vgremove', True) rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) if rc == 0: module.exit_json(changed=True) else: module.fail_json( msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) else: module.fail_json( msg= "Refuse to remove non-empty volume group %s without force=yes" % (vg)) # resize VG current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) if current_devs: if state == 'present' and pvresize: for device in current_devs: pvresize_cmd = module.get_bin_path('pvresize', True) pvdisplay_cmd = module.get_bin_path('pvdisplay', True) pvdisplay_ops = [ "--units", "b", "--columns", "--noheadings", "--nosuffix" ] pvdisplay_cmd_device_options = [pvdisplay_cmd, device ] + pvdisplay_ops rc, dev_size, err = module.run_command( pvdisplay_cmd_device_options + ["-o", "dev_size"]) dev_size = int(dev_size.replace(" ", "")) rc, pv_size, err = module.run_command( pvdisplay_cmd_device_options + ["-o", "pv_size"]) pv_size = int(pv_size.replace(" ", "")) rc, pe_start, err = module.run_command( pvdisplay_cmd_device_options + ["-o", "pe_start"]) pe_start = int(pe_start.replace(" ", "")) rc, vg_extent_size, err = module.run_command( pvdisplay_cmd_device_options + ["-o", "vg_extent_size"]) vg_extent_size = int(vg_extent_size.replace(" ", "")) if (dev_size - (pe_start + pv_size)) > vg_extent_size: if module.check_mode: changed = True else: rc, _, err = module.run_command( [pvresize_cmd, device]) if rc != 0: module.fail_json( msg="Failed executing pvresize command.", rc=rc, err=err) else: changed = True if devs_to_add or devs_to_remove: if module.check_mode: changed = True else: if devs_to_add: devs_to_add_string = ' '.join(devs_to_add) # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: rc, _, err = module.run_command( [pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json( msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) # add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) rc, _, err = module.run_command( "%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) # remove some PV from our VG if devs_to_remove: devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) rc, _, err = module.run_command( "%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) module.exit_json(changed=changed)
def main(): module = AnsibleModule( argument_spec=dict(vg=dict(required=True, type='str'), lv=dict(required=True, type='str'), lv_type=dict(default='jfs2', type='str'), size=dict(type='str'), opts=dict(default='', type='str'), copies=dict(default='1', type='str'), state=dict(choices=["absent", "present"], default='present'), shrink=dict(type='bool', default='yes'), policy=dict(choices=["maximum", "minimum"], default='maximum'), pvs=dict(type='list', default=list())), supports_check_mode=True, ) vg = module.params['vg'] lv = module.params['lv'] lv_type = module.params['lv_type'] size = module.params['size'] opts = module.params['opts'] copies = module.params['copies'] policy = module.params['policy'] state = module.params['state'] shrink = module.boolean(module.params['shrink']) pvs = module.params['pvs'] pv_list = ' '.join(pvs) if policy == 'maximum': lv_policy = 'x' else: lv_policy = 'm' # Add echo command when running in check-mode if module.check_mode: test_opt = 'echo ' else: test_opt = '' # check if system commands are available lsvg_cmd = module.get_bin_path("lsvg", required=True) lslv_cmd = module.get_bin_path("lslv", required=True) # Get information on volume group requested rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) this_vg = parse_vg(vg_info) if size is not None: # Calculate pp size and round it up based on pp size. lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) # Get information on logical volume requested rc, lv_info, err = module.run_command("%s %s" % (lslv_cmd, lv)) if rc != 0: if state == 'absent': module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) changed = False this_lv = parse_lv(lv_info) if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") if this_lv is None: if state == 'present': if lv_size > this_vg['free']: module.fail_json( msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) # create LV mklv_cmd = module.get_bin_path("mklv", required=True) cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % ( test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s created." % lv) else: module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) else: if state == 'absent': # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) rc, out, err = module.run_command( "%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) else: module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) else: if this_lv['policy'] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) rc, out, err = module.run_command( "%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) if rc == 0: module.exit_json( changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) else: module.fail_json( msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) if vg != this_lv['vg']: module.fail_json( msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. if not size: module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) # resize LV based on absolute values if int(lv_size) > this_lv['size']: extendlv_cmd = module.get_bin_path("extendlv", required=True) cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json( changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) else: module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) elif shrink and lv_size < this_lv['size']: module.fail_json( msg= "No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) else: module.exit_json( changed=False, msg="Logical volume %s size is already %sMB or higher." % (lv, lv_size))
def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), state = dict(required=False, choices=['present', 'absent'], default='present'), type = dict(required=True), autostart = dict(required=False, type='bool', default=False), extra_info = dict(required=False, default=""), port_open = dict(required=False, type='bool', default=False), login_name = dict(required=True), login_password = dict(required=True, no_log=True), machine = dict(required=False, default=False), ), supports_check_mode=True ) app_name = module.params['name'] app_type = module.params['type'] app_state = module.params['state'] if module.params['machine']: session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'], module.params['machine'] ) else: session_id, account = webfaction.login( module.params['login_name'], module.params['login_password'] ) app_list = webfaction.list_apps(session_id) app_map = dict([(i['name'], i) for i in app_list]) existing_app = app_map.get(app_name) result = {} # Here's where the real stuff happens if app_state == 'present': # Does an app with this name already exist? if existing_app: if existing_app['type'] != app_type: module.fail_json(msg="App already exists with different type. Please fix by hand.") # If it exists with the right type, we don't change it # Should check other parameters. module.exit_json( changed = False, ) if not module.check_mode: # If this isn't a dry run, create the app result.update( webfaction.create_app( session_id, app_name, app_type, module.boolean(module.params['autostart']), module.params['extra_info'], module.boolean(module.params['port_open']) ) ) elif app_state == 'absent': # If the app's already not there, nothing changed. if not existing_app: module.exit_json( changed = False, ) if not module.check_mode: # If this isn't a dry run, delete the app result.update( webfaction.delete_app(session_id, app_name) ) else: module.fail_json(msg="Unknown state specified: {}".format(app_state)) module.exit_json( changed = True, result = result )
def main(): friendly_names = { 'lvm': 'LVM2_member', } # There is no "single command" to manipulate filesystems, so we map them all out and their options fs_cmd_map = { 'ext2': { 'mkfs': 'mkfs.ext2', 'grow': 'resize2fs', 'grow_flag': None, 'force_flag': '-F', 'fsinfo': 'tune2fs', }, 'ext3': { 'mkfs': 'mkfs.ext3', 'grow': 'resize2fs', 'grow_flag': None, 'force_flag': '-F', 'fsinfo': 'tune2fs', }, 'ext4': { 'mkfs': 'mkfs.ext4', 'grow': 'resize2fs', 'grow_flag': None, 'force_flag': '-F', 'fsinfo': 'tune2fs', }, 'reiserfs': { 'mkfs': 'mkfs.reiserfs', 'grow': 'resize_reiserfs', 'grow_flag': None, 'force_flag': '-f', 'fsinfo': 'reiserfstune', }, 'ext4dev': { 'mkfs': 'mkfs.ext4', 'grow': 'resize2fs', 'grow_flag': None, 'force_flag': '-F', 'fsinfo': 'tune2fs', }, 'xfs': { 'mkfs': 'mkfs.xfs', 'grow': 'xfs_growfs', 'grow_flag': None, 'force_flag': '-f', 'fsinfo': 'xfs_growfs', }, 'btrfs': { 'mkfs': 'mkfs.btrfs', 'grow': 'btrfs', 'grow_flag': 'filesystem resize', 'force_flag': '-f', 'fsinfo': 'btrfs', }, 'LVM2_member': { 'mkfs': 'pvcreate', 'grow': 'pvresize', 'grow_flag': None, 'force_flag': '-f', 'fsinfo': 'pvs', } } module = AnsibleModule( argument_spec=dict( fstype=dict(type='str', required=True, aliases=['type'], choices=fs_cmd_map.keys() + friendly_names.keys()), dev=dict(type='str', required=True, aliases=['device']), opts=dict(type='str'), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), ), supports_check_mode=True, ) dev = module.params['dev'] fstype = module.params['fstype'] opts = module.params['opts'] force = module.boolean(module.params['force']) resizefs = module.boolean(module.params['resizefs']) if fstype in friendly_names: fstype = friendly_names[fstype] changed = False try: _ = fs_cmd_map[fstype] except KeyError: module.exit_json( changed=False, msg="WARNING: module does not support this filesystem yet. %s" % fstype) mkfscmd = fs_cmd_map[fstype]['mkfs'] force_flag = fs_cmd_map[fstype]['force_flag'] growcmd = fs_cmd_map[fstype]['grow'] fssize_cmd = fs_cmd_map[fstype]['fsinfo'] if not os.path.exists(dev): module.fail_json(msg="Device %s not found." % dev) cmd = module.get_bin_path('blkid', required=True) rc, raw_fs, err = module.run_command( "%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) fs = raw_fs.strip() if fs == fstype and resizefs is False and not force: module.exit_json(changed=False) elif fs == fstype and resizefs is True: # Get dev and fs size and compare devsize_in_bytes = _get_dev_size(dev, module) fssize_in_bytes = _get_fs_size(fssize_cmd, dev, module) if fssize_in_bytes < devsize_in_bytes: fs_smaller = True else: fs_smaller = False if module.check_mode and fs_smaller: module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (fstype, dev)) elif module.check_mode and not fs_smaller: module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) elif fs_smaller: cmd = module.get_bin_path(growcmd, required=True) rc, out, err = module.run_command("%s %s" % (cmd, dev)) # Sadly there is no easy way to determine if this has changed. For now, just say "true" and move on. # in the future, you would have to parse the output to determine this. # thankfully, these are safe operations if no change is made. if rc == 0: module.exit_json(changed=True, msg=out) else: module.fail_json( msg="Resizing filesystem %s on device '%s' failed" % (fstype, dev), rc=rc, err=err) else: module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (fstype, dev)) elif fs and not force: module.fail_json( msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) # create fs if module.check_mode: changed = True else: mkfs = module.get_bin_path(mkfscmd, required=True) cmd = None if opts is None: cmd = "%s %s '%s'" % (mkfs, force_flag, dev) else: cmd = "%s %s %s '%s'" % (mkfs, force_flag, opts, dev) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json( msg="Creating filesystem %s on device '%s' failed" % (fstype, dev), rc=rc, err=err) module.exit_json(changed=changed)
def main(): argument_spec = mysql_common_argument_spec() argument_spec.update( user=dict(type='str', required=True, aliases=['name']), password=dict(type='str', no_log=True), encrypted=dict(type='bool', default=False), host=dict(type='str', default='localhost'), host_all=dict(type="bool", default=False), state=dict(type='str', default='present', choices=['absent', 'present']), priv=dict(type='raw'), tls_requires=dict(type='dict'), append_privs=dict(type='bool', default=False), check_implicit_admin=dict(type='bool', default=False), update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False), sql_log_bin=dict(type='bool', default=True), plugin=dict(default=None, type='str'), plugin_hash_string=dict(default=None, type='str'), plugin_auth_string=dict(default=None, type='str'), resource_limits=dict(type='dict'), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) login_user = module.params["login_user"] login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] encrypted = module.boolean(module.params["encrypted"]) host = module.params["host"].lower() host_all = module.params["host_all"] state = module.params["state"] priv = module.params["priv"] tls_requires = sanitize_requires(module.params["tls_requires"]) check_implicit_admin = module.params["check_implicit_admin"] connect_timeout = module.params["connect_timeout"] config_file = module.params["config_file"] append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] ssl_cert = module.params["client_cert"] ssl_key = module.params["client_key"] ssl_ca = module.params["ca_cert"] check_hostname = module.params["check_hostname"] db = '' sql_log_bin = module.params["sql_log_bin"] plugin = module.params["plugin"] plugin_hash_string = module.params["plugin_hash_string"] plugin_auth_string = module.params["plugin_auth_string"] resource_limits = module.params["resource_limits"] if priv and not isinstance(priv, (str, dict)): module.fail_json( msg="priv parameter must be str or dict but %s was passed" % type(priv)) if priv and isinstance(priv, dict): priv = convert_priv_dict_to_str(priv) if mysql_driver is None: module.fail_json(msg=mysql_driver_fail_msg) cursor = None try: if check_implicit_admin: try: cursor, db_conn = mysql_connect( module, "root", "", config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout, check_hostname=check_hostname) except Exception: pass if not cursor: cursor, db_conn = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout, check_hostname=check_hostname) except Exception as e: module.fail_json( msg= "unable to connect to database, check login_user and login_password are correct or %s has the credentials. " "Exception message: %s" % (config_file, to_native(e))) if not sql_log_bin: cursor.execute("SET SQL_LOG_BIN=0;") if priv is not None: try: mode = get_mode(cursor) except Exception as e: module.fail_json(msg=to_native(e)) try: priv = privileges_unpack(priv, mode) except Exception as e: module.fail_json(msg="invalid privileges string: %s" % to_native(e)) if state == "present": if user_exists(cursor, user, host, host_all): try: if update_password == "always": changed, msg = user_mod(cursor, user, host, host_all, password, encrypted, plugin, plugin_hash_string, plugin_auth_string, priv, append_privs, tls_requires, module) else: changed, msg = user_mod(cursor, user, host, host_all, None, encrypted, plugin, plugin_hash_string, plugin_auth_string, priv, append_privs, tls_requires, module) except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: module.fail_json(msg=to_native(e)) else: if host_all: module.fail_json( msg="host_all parameter cannot be used when adding a user") try: changed = user_add(cursor, user, host, host_all, password, encrypted, plugin, plugin_hash_string, plugin_auth_string, priv, tls_requires, module.check_mode) if changed: msg = "User added" except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: module.fail_json(msg=to_native(e)) if resource_limits: changed = limit_resources(module, cursor, user, host, resource_limits, module.check_mode) or changed elif state == "absent": if user_exists(cursor, user, host, host_all): changed = user_delete(cursor, user, host, host_all, module.check_mode) msg = "User deleted" else: changed = False msg = "User doesn't exist" module.exit_json(changed=changed, user=user, msg=msg)
def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), lv=dict(type='str'), size=dict(type='str'), opts=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), shrink=dict(type='bool', default=True), active=dict(type='bool', default=True), snapshot=dict(type='str'), pvs=dict(type='str'), resizefs=dict(type='bool', default=False), thinpool=dict(type='str'), ), supports_check_mode=True, required_one_of=(['lv', 'thinpool'], ), ) module.run_command_environ_update = LVOL_ENV_VARS # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found is None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) resizefs = module.boolean(module.params['resizefs']) thinpool = module.params['thinpool'] size_opt = 'L' size_unit = 'm' size_operator = None snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing if size.startswith('+'): size_operator = '+' size = size[1:] elif size.startswith('-'): size_operator = '-' size = size[1:] # LVCREATE(8) does not support [+-] # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json( msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json( msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit if '%' not in size: if size[-1].lower() in 'bskmgtpe': size_unit = size[-1] size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot: # Check snapshot pre-conditions for test_lv in lvs: if test_lv['name'] == lv or test_lv['name'] == thinpool: if not test_lv['thinpool'] and not thinpool: break else: module.fail_json( msg="Snapshots of thin pool LVs are not supported.") else: module.fail_json( msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) check_lv = snapshot elif thinpool: if lv: # Check thin volume pre-conditions for test_lv in lvs: if test_lv['name'] == thinpool: break else: module.fail_json( msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg)) check_lv = lv else: check_lv = thinpool else: check_lv = lv for test_lv in lvs: if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): this_lv = test_lv break else: this_lv = None msg = '' if this_lv is None: if state == 'present': if size_operator is not None: if size_operator == "-" or (size_whole not in [ "VG", "PVS", "FREE", "ORIGIN", None ]): module.fail_json( msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size)) # Require size argument except for snapshot of thin volumes if (lv or thinpool) and not size: for test_lv in lvs: if test_lv['name'] == lv and test_lv[ 'thinvol'] and snapshot: break else: module.fail_json(msg="No size given.") # create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: if size: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % ( lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -s -n %s %s %s/%s" % ( lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) elif thinpool and lv: if size_opt == 'l': module.fail_json( changed=False, msg="Thin volume sizing with percentage not supported." ) size_opt = 'V' cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % ( lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) elif thinpool and not lv: cmd = "%s %s %s -%s %s%s %s -T %s/%s" % ( lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % ( lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, dummy, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': # remove LV if not force: module.fail_json( msg= "Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, dummy, err = module.run_command( "%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': # Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if size_operator == '+': size_requested += this_lv['size'] elif size_operator == '-': size_requested = this_lv['size'] - size_requested # According to latest documentation (LVM2-2.03.11) all tools round down size_requested -= (size_requested % this_vg['ext_size']) if this_lv['size'] < size_requested: if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( msg= "Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)) elif shrink and this_lv['size'] > size_requested + this_vg[ 'ext_size']: # more than an extent too large if size_requested < 1: module.fail_json( msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json( msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') if size_operator: cmd = "%s %s -%s %s%s%s %s/%s %s" % ( tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) else: cmd = "%s %s -%s %s%s %s/%s %s" % ( tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json( changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: # resize LV based on absolute values tool = None if float(size) > this_lv['size'] or size_operator == '+': tool = module.get_bin_path("lvextend", required=True) elif shrink and float( size) < this_lv['size'] or size_operator == '-': if float(size) == 0: module.fail_json( msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json( msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') if size_operator: cmd = "%s %s -%s %s%s%s %s/%s %s" % ( tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs) else: cmd = "%s %s -%s %s%s %s/%s %s" % ( tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json( changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, dummy, err = module.run_command( "%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, dummy, err = module.run_command( "%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg)
def main(): command_allowed_param_map = dict( cleanup=(), createcachetable=( 'cache_table', 'database', ), flush=('database', ), loaddata=( 'database', 'fixtures', ), syncdb=('database', ), test=( 'failfast', 'testrunner', 'liveserver', 'apps', ), validate=(), migrate=( 'apps', 'skip', 'merge', 'database', ), collectstatic=( 'clear', 'link', ), ) command_required_param_map = dict(loaddata=('fixtures', ), ) # forces --noinput on every command that needs it noinput_commands = ( 'flush', 'syncdb', 'migrate', 'test', 'collectstatic', ) # These params are allowed for certain commands only specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner') # These params are automatically added to the command if present general_params = ( 'settings', 'pythonpath', 'database', ) specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link') end_of_command_params = ('apps', 'cache_table', 'fixtures') module = AnsibleModule(argument_spec=dict( command=dict(default=None, required=True), app_path=dict(default=None, required=True, type='path'), settings=dict(default=None, required=False), pythonpath=dict(default=None, required=False, aliases=['python_path']), virtualenv=dict(default=None, required=False, type='path', aliases=['virtual_env']), apps=dict(default=None, required=False), cache_table=dict(default=None, required=False), clear=dict(default=None, required=False, type='bool'), database=dict(default=None, required=False), failfast=dict(default=False, required=False, type='bool', aliases=['fail_fast']), fixtures=dict(default=None, required=False), liveserver=dict(default=None, required=False, aliases=['live_server']), testrunner=dict(default=None, required=False, aliases=['test_runner']), skip=dict(default=None, required=False, type='bool'), merge=dict(default=None, required=False, type='bool'), link=dict(default=None, required=False, type='bool'), ), ) command = module.params['command'] app_path = module.params['app_path'] virtualenv = module.params['virtualenv'] for param in specific_params: value = module.params[param] if param in specific_boolean_params: value = module.boolean(value) if value and param not in command_allowed_param_map[command]: module.fail_json(msg='%s param is incompatible with command=%s' % (param, command)) for param in command_required_param_map.get(command, ()): if not module.params[param]: module.fail_json(msg='%s param is required for command=%s' % (param, command)) _ensure_virtualenv(module) cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd for param in general_params: if module.params[param]: cmd = '%s --%s=%s' % (cmd, param, module.params[param]) for param in specific_boolean_params: if module.boolean(module.params[param]): cmd = '%s --%s' % (cmd, param) # these params always get tacked on the end of the command for param in end_of_command_params: if module.params[param]: cmd = '%s %s' % (cmd, module.params[param]) rc, out, err = module.run_command(cmd, cwd=app_path) if rc != 0: if command == 'createcachetable' and 'table' in err and 'already exists' in err: out = 'already exists.' else: if "Unknown command:" in err: _fail(module, cmd, err, "Unknown django command: %s" % command) _fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path) changed = False lines = out.split('\n') filt = globals().get(command + "_filter_output", None) if filt: filtered_output = list(filter(filt, lines)) if len(filtered_output): changed = True check_changed = globals().get("{0}_check_changed".format(command), None) if check_changed: changed = check_changed(out) module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
def main(): module = AnsibleModule(argument_spec=dict( name=dict(required=True), state=dict(required=False, choices=['present', 'absent'], default='present'), type=dict(required=True), autostart=dict(required=False, type='bool', default=False), extra_info=dict(required=False, default=""), port_open=dict(required=False, type='bool', default=False), login_name=dict(required=True), login_password=dict(required=True, no_log=True), machine=dict(required=False, default=None), ), supports_check_mode=True) app_name = module.params['name'] app_type = module.params['type'] app_state = module.params['state'] if module.params['machine']: session_id, account = webfaction.login(module.params['login_name'], module.params['login_password'], module.params['machine']) else: session_id, account = webfaction.login(module.params['login_name'], module.params['login_password']) app_list = webfaction.list_apps(session_id) app_map = dict([(i['name'], i) for i in app_list]) existing_app = app_map.get(app_name) result = {} # Here's where the real stuff happens if app_state == 'present': # Does an app with this name already exist? if existing_app: if existing_app['type'] != app_type: module.fail_json( msg= "App already exists with different type. Please fix by hand." ) # If it exists with the right type, we don't change it # Should check other parameters. module.exit_json( changed=False, result=existing_app, ) if not module.check_mode: # If this isn't a dry run, create the app result.update( webfaction.create_app( session_id, app_name, app_type, module.boolean(module.params['autostart']), module.params['extra_info'], module.boolean(module.params['port_open']))) elif app_state == 'absent': # If the app's already not there, nothing changed. if not existing_app: module.exit_json(changed=False, ) if not module.check_mode: # If this isn't a dry run, delete the app result.update(webfaction.delete_app(session_id, app_name)) else: module.fail_json(msg="Unknown state specified: {0}".format(app_state)) module.exit_json(changed=True, result=result)
def main(): module = AnsibleModule(argument_spec=dict( path=dict(required=True, aliases=['dest', 'destfile', 'name'], type='path'), state=dict(default='present', choices=['absent', 'present']), marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'), block=dict(default='', type='str', aliases=['content']), insertafter=dict(default=None), insertbefore=dict(default=None), create=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), validate=dict(default=None, type='str'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True) params = module.params path = params['path'] if os.path.isdir(path): module.fail_json(rc=256, msg='Path %s is a directory !' % path) path_exists = os.path.exists(path) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Path %s does not exist !' % path) original = None lines = [] else: f = open(path, 'rb') original = f.read() f.close() lines = original.splitlines() diff = { 'before': '', 'after': '', 'before_header': '%s (content)' % path, 'after_header': '%s (content)' % path } if module._diff and original: diff['before'] = original insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File %s not present" % path) if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile( to_bytes(insertafter, errors='surrogate_or_strict')) elif insertbefore not in (None, 'BOF'): insertre = re.compile( to_bytes(insertbefore, errors='surrogate_or_strict')) else: insertre = None marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker) marker1 = re.sub(b(r'{mark}'), b('END'), marker) if present and block: # Escape seqeuences like '\n' need to be handled in Ansible 1.x if module.ansible_version.startswith('1.'): block = re.sub('', block, '') blocklines = [marker0] + block.splitlines() + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line == marker0: n0 = i if line == marker1: n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1 + 1] = [] else: lines[n1:n0 + 1] = [] n0 = n1 lines[n0:n0] = blocklines if lines: result = b('\n').join(lines) if original is None or original.endswith(b('\n')): result += b('\n') else: result = '' if module._diff: diff['after'] = result if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: module.backup_local(path) # We should always follow symlinks so that we change the real file real_path = os.path.realpath(params['path']) write_changes(module, result, real_path) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg, diff=diff) attr_diff = {} msg, changed = check_file_attrs(module, changed, msg, attr_diff) attr_diff['before_header'] = '%s (file attributes)' % path attr_diff['after_header'] = '%s (file attributes)' % path difflist = [diff, attr_diff] module.exit_json(changed=changed, msg=msg, diff=difflist)
def main(): module = AnsibleModule( argument_spec=dict( dest=dict(required=True, aliases=['name', 'destfile'], type='path'), state=dict(default='present', choices=['absent', 'present']), marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'), block=dict(default='', type='str', aliases=['content']), insertafter=dict(default=None), insertbefore=dict(default=None), create=dict(default=False, type='bool'), backup=dict(default=False, type='bool'), validate=dict(default=None, type='str'), ), mutually_exclusive=[['insertbefore', 'insertafter']], add_file_common_args=True, supports_check_mode=True ) params = module.params dest = params['dest'] if module.boolean(params.get('follow', None)): dest = os.path.realpath(dest) if os.path.isdir(dest): module.fail_json(rc=256, msg='Destination %s is a directory !' % dest) path_exists = os.path.exists(dest) if not path_exists: if not module.boolean(params['create']): module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) original = None lines = [] else: f = open(dest, 'rb') original = f.read() f.close() lines = original.splitlines() insertbefore = params['insertbefore'] insertafter = params['insertafter'] block = to_bytes(params['block']) marker = to_bytes(params['marker']) present = params['state'] == 'present' if not present and not path_exists: module.exit_json(changed=False, msg="File not present") if insertbefore is None and insertafter is None: insertafter = 'EOF' if insertafter not in (None, 'EOF'): insertre = re.compile(insertafter) elif insertbefore not in (None, 'BOF'): insertre = re.compile(insertbefore) else: insertre = None marker0 = re.sub(b(r'{mark}'), b('BEGIN'), marker) marker1 = re.sub(b(r'{mark}'), b('END'), marker) if present and block: # Escape seqeuences like '\n' need to be handled in Ansible 1.x if module.ansible_version.startswith('1.'): block = re.sub('', block, '') blocklines = [marker0] + block.splitlines() + [marker1] else: blocklines = [] n0 = n1 = None for i, line in enumerate(lines): if line == marker0: n0 = i if line == marker1: n1 = i if None in (n0, n1): n0 = None if insertre is not None: for i, line in enumerate(lines): if insertre.search(line): n0 = i if n0 is None: n0 = len(lines) elif insertafter is not None: n0 += 1 elif insertbefore is not None: n0 = 0 # insertbefore=BOF else: n0 = len(lines) # insertafter=EOF elif n0 < n1: lines[n0:n1+1] = [] else: lines[n1:n0+1] = [] n0 = n1 lines[n0:n0] = blocklines if lines: result = b('\n').join(lines) if original is None or original.endswith(b('\n')): result += b('\n') else: result = '' if original == result: msg = '' changed = False elif original is None: msg = 'File created' changed = True elif not blocklines: msg = 'Block removed' changed = True else: msg = 'Block inserted' changed = True if changed and not module.check_mode: if module.boolean(params['backup']) and path_exists: module.backup_local(dest) write_changes(module, result, dest) if module.check_mode and not path_exists: module.exit_json(changed=changed, msg=msg) msg, changed = check_file_attrs(module, changed, msg) module.exit_json(changed=changed, msg=msg)
def main(): command_keys = ['state', 'default', 'rule', 'logging'] module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), logging=dict( type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), direction=dict( type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete=dict(type='bool', default=False), route=dict(type='bool', default=False), insert=dict(type='int'), insert_relative_to=dict(choices=[ 'zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6' ], default='zero'), rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), interface=dict(type='str', aliases=['if']), interface_in=dict(type='str', aliases=['if_in']), interface_out=dict(type='str', aliases=['if_out']), log=dict(type='bool', default=False), from_ip=dict(type='str', default='any', aliases=['from', 'src']), from_port=dict(type='str'), to_ip=dict(type='str', default='any', aliases=['dest', 'to']), to_port=dict(type='str', aliases=['port']), proto=dict(type='str', aliases=['protocol'], choices=[ 'ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp' ]), name=dict(type='str', aliases=['app']), comment=dict(type='str'), ), supports_check_mode=True, mutually_exclusive=[ ['name', 'proto', 'logging'], # Mutual exclusivity with `interface` implied by `required_by`. ['direction', 'interface_in'], ['direction', 'interface_out'], ], required_one_of=([command_keys]), required_by=dict(interface=('direction', ), ), ) cmds = [] ipv4_regexp = compile_ipv4_regexp() ipv6_regexp = compile_ipv6_regexp() def filter_line_that_not_start_with(pattern, content): return ''.join([ line for line in content.splitlines(True) if line.startswith(pattern) ]) def filter_line_that_contains(pattern, content): return [line for line in content.splitlines(True) if pattern in line] def filter_line_that_not_contains(pattern, content): return ''.join([ line for line in content.splitlines(True) if not line.contains(pattern) ]) def filter_line_that_match_func(match_func, content): return ''.join([ line for line in content.splitlines(True) if match_func(line) is not None ]) def filter_line_that_contains_ipv4(content): return filter_line_that_match_func(ipv4_regexp.search, content) def filter_line_that_contains_ipv6(content): return filter_line_that_match_func(ipv6_regexp.search, content) def is_starting_by_ipv4(ip): return ipv4_regexp.match(ip) is not None def is_starting_by_ipv6(ip): return ipv6_regexp.match(ip) is not None def execute(cmd, ignore_error=False): cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) cmds.append(cmd) (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"}) if rc != 0 and not ignore_error: module.fail_json(msg=err or out, commands=cmds) return out def get_current_rules(): user_rules_files = [ "/lib/ufw/user.rules", "/lib/ufw/user6.rules", "/etc/ufw/user.rules", "/etc/ufw/user6.rules", "/var/lib/ufw/user.rules", "/var/lib/ufw/user6.rules" ] cmd = [[grep_bin], ["-h"], ["'^### tuple'"]] cmd.extend([[f] for f in user_rules_files]) return execute(cmd, ignore_error=True) def ufw_version(): """ Returns the major and minor version of ufw installed on the system. """ out = execute([[ufw_bin], ["--version"]]) lines = [x for x in out.split('\n') if x.strip() != ''] if len(lines) == 0: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) if matches is None: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) # Convert version to numbers major = int(matches.group(1)) minor = int(matches.group(2)) rev = 0 if matches.group(3) is not None: rev = int(matches.group(3)) return major, minor, rev params = module.params commands = dict((key, params[key]) for key in command_keys if params[key]) # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) grep_bin = module.get_bin_path('grep', True) # Save the pre state and rules in order to recognize changes pre_state = execute([[ufw_bin], ['status verbose']]) pre_rules = get_current_rules() changed = False # Execute filter for (command, value) in commands.items(): cmd = [[ufw_bin], [module.check_mode, '--dry-run']] if command == 'state': states = { 'enabled': 'enable', 'disabled': 'disable', 'reloaded': 'reload', 'reset': 'reset' } if value in ['reloaded', 'reset']: changed = True if module.check_mode: # "active" would also match "inactive", hence the space ufw_enabled = pre_state.find(" active") != -1 if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled): changed = True else: execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state) if extract: current_level = extract.group(2) current_on_off_value = extract.group(1) if value != "off": if current_on_off_value == "off": changed = True elif value != "on" and value != current_level: changed = True elif current_on_off_value != "off": changed = True else: changed = True if not module.check_mode: execute(cmd + [[command], [value]]) elif command == 'default': if params['direction'] not in [ 'outgoing', 'incoming', 'routed', None ]: module.fail_json( msg= 'For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.' ) if module.check_mode: regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)' extract = re.search(regexp, pre_state) if extract is not None: current_default_values = {} current_default_values["incoming"] = extract.group(1) current_default_values["outgoing"] = extract.group(2) current_default_values["routed"] = extract.group(3) v = current_default_values[params['direction'] or 'incoming'] if v not in (value, 'disabled'): changed = True else: changed = True else: execute(cmd + [[command], [value], [params['direction']]]) elif command == 'rule': if params['direction'] not in ['in', 'out', None]: module.fail_json( msg= 'For rules, direction must be one of "in" and "out", or direction must not be specified.' ) if not params['route'] and params['interface_in'] and params[ 'interface_out']: module.fail_json(msg='Only route rules can combine ' 'interface_in and interface_out') # Rules are constructed according to the long format # # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] [comment COMMENT] cmd.append([module.boolean(params['route']), 'route']) cmd.append([module.boolean(params['delete']), 'delete']) if params['insert'] is not None: relative_to_cmd = params['insert_relative_to'] if relative_to_cmd == 'zero': insert_to = params['insert'] else: (dummy, numbered_state, dummy) = module.run_command( [ufw_bin, 'status', 'numbered']) numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ') lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()] lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher] last_number = max([no for (no, ipv6) in lines]) if lines else 0 has_ipv4 = any([not ipv6 for (no, ipv6) in lines]) has_ipv6 = any([ipv6 for (no, ipv6) in lines]) if relative_to_cmd == 'first-ipv4': relative_to = 1 elif relative_to_cmd == 'last-ipv4': relative_to = max( [no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1 elif relative_to_cmd == 'first-ipv6': relative_to = max( [no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1 elif relative_to_cmd == 'last-ipv6': relative_to = last_number if has_ipv6 else last_number + 1 insert_to = params['insert'] + relative_to if insert_to > last_number: # ufw does not like it when the insert number is larger than the # maximal rule number for IPv4/IPv6. insert_to = None cmd.append([insert_to is not None, "insert %s" % insert_to]) cmd.append([value]) cmd.append([params['direction'], "%s" % params['direction']]) cmd.append([params['interface'], "on %s" % params['interface']]) cmd.append( [params['interface_in'], "in on %s" % params['interface_in']]) cmd.append([ params['interface_out'], "out on %s" % params['interface_out'] ]) cmd.append([module.boolean(params['log']), 'log']) for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), ('to_ip', "to %s"), ('to_port', "port %s"), ('proto', "proto %s"), ('name', "app '%s'")]: value = params[key] cmd.append([value, template % (value)]) ufw_major, ufw_minor, dummy = ufw_version() # comment is supported only in ufw version after 0.35 if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: cmd.append( [params['comment'], "comment '%s'" % params['comment']]) rules_dry = execute(cmd) if module.check_mode: nb_skipping_line = len( filter_line_that_contains("Skipping", rules_dry)) if not (nb_skipping_line > 0 and nb_skipping_line == len( rules_dry.splitlines(True))): rules_dry = filter_line_that_not_start_with( "### tuple", rules_dry) # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules if is_starting_by_ipv4( params['from_ip']) or is_starting_by_ipv4( params['to_ip']): if filter_line_that_contains_ipv4( pre_rules) != filter_line_that_contains_ipv4( rules_dry): changed = True elif is_starting_by_ipv6( params['from_ip']) or is_starting_by_ipv6( params['to_ip']): if filter_line_that_contains_ipv6( pre_rules) != filter_line_that_contains_ipv6( rules_dry): changed = True elif pre_rules != rules_dry: changed = True # Get the new state if module.check_mode: return module.exit_json(changed=changed, commands=cmds) else: post_state = execute([[ufw_bin], ['status'], ['verbose']]) if not changed: post_rules = get_current_rules() changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
def main(): helper = get_connection( template=True, template_stack=True, with_state=True, with_classic_provider_spec=True, argument_spec=setup_args(), ) module = AnsibleModule( argument_spec=helper.argument_spec, supports_check_mode=True, required_one_of=helper.required_one_of, ) parent = helper.get_pandevice_parent(module) vr = VirtualRouter(module.params['vr_name']) parent.add(vr) try: vr.refresh() except PanDeviceError as e: module.fail_json(msg='Failed refresh: {0}'.format(e)) bgp = vr.find('', Bgp) if bgp is None: module.fail_json(msg='BGP is not configured for "{0}"'.format(vr.name)) spec = { 'name': module.params['name'], 'enable': module.params['enable'], 'match_afi': module.params['match_afi'], 'match_safi': module.params['match_safi'], 'match_route_table': module.params['match_route_table'], 'match_nexthop': module.params['match_nexthop'], 'match_from_peer': module.params['match_from_peer'], 'match_med': module.params['match_med'], 'match_as_path_regex': module.params['match_as_path_regex'], 'match_community_regex': module.params['match_community_regex'], 'match_extended_community_regex': module.params['match_extended_community_regex'], 'used_by': module.params['used_by'], 'action': module.params['action'], 'action_local_preference': module.params['action_local_preference'], 'action_med': module.params['action_med'], 'action_nexthop': module.params['action_nexthop'], 'action_origin': module.params['action_origin'], 'action_as_path_limit': module.params['action_as_path_limit'], 'action_as_path_type': module.params['action_as_path_type'], 'action_as_path_prepend_times': module.params['action_as_path_prepend_times'], 'action_community_type': module.params['action_community_type'], 'action_community_argument': module.params['action_community_argument'], 'action_extended_community_type': module.params['action_extended_community_type'], 'action_extended_community_argument': module.params['action_extended_community_argument'], } # Add the correct rule type. if module.params['type'] == 'import': spec['action_dampening'] = module.params['action_dampening'] spec['action_weight'] = module.params['action_weight'] obj = BgpPolicyImportRule(**spec) else: obj = BgpPolicyExportRule(**spec) # Handle address prefixes. for x in module.params['address_prefix']: if 'name' not in x: module.fail_json( msg='Address prefix dict requires "name": {0}'.format(x)) obj.add( BgpPolicyAddressPrefix( to_text(x['name'], encoding='utf-8', errors='surrogate_or_strict'), None if x.get('exact') is None else module.boolean(x['exact']), )) listing = bgp.findall(obj.__class__) bgp.add(obj) # Apply the state. changed, diff = helper.apply_state(obj, listing, module) # Optional commit. if changed and module.params['commit']: helper.commit(module) module.exit_json(changed=changed, diff=diff, msg='done')
def main(): # MAIN global module module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['volume']), state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']), cluster=dict(type='list'), host=dict(type='str'), stripes=dict(type='int'), replicas=dict(type='int'), arbiters=dict(type='int'), disperses=dict(type='int'), redundancies=dict(type='int'), transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), bricks=dict(type='str', aliases=['brick']), start_on_create=dict(type='bool', default=True), rebalance=dict(type='bool', default=False), options=dict(type='dict', default={}), quota=dict(type='str'), directory=dict(type='str'), force=dict(type='bool', default=False), ), ) global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False action = module.params['state'] volume_name = module.params['name'] cluster = module.params['cluster'] brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] arbiters = module.params['arbiters'] disperses = module.params['disperses'] redundancies = module.params['redundancies'] transport = module.params['transport'] myhostname = module.params['host'] start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() # Clean up if last element is empty. Consider that yml can look like this: # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" if cluster is not None and len(cluster) > 1 and cluster[-1] == '': cluster = cluster[0:-1] if cluster is None or cluster[0] == '': cluster = [myhostname] if brick_paths is not None and "," in brick_paths: brick_paths = brick_paths.split(",") else: brick_paths = [brick_paths] options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] # get current state info peers = get_peers() volumes = get_volumes() quotas = {} if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': quotas = get_quotas(volume_name, True) # do the work! if action == 'absent': if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) run_gluster(['volume', 'delete', volume_name]) changed = True if action == 'present': probe_all_peers(cluster, peers, myhostname) # create if it doesn't exist if volume_name not in volumes: create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True # switch bricks new_bricks = [] removed_bricks = [] all_bricks = [] for node in cluster: for brick_path in brick_paths: brick = '%s:%s' % (node, brick_path) all_bricks.append(brick) if brick not in volumes[volume_name]['bricks']: new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: if brick not in all_bricks: removed_bricks.append(brick) if new_bricks: add_bricks(volume_name, new_bricks, stripes, replicas, force) changed = True # handle quotas if quota: if not volumes[volume_name]['quota']: enable_quota(volume_name) quotas = get_quotas(volume_name, False) if directory not in quotas or quotas[directory] != quota: set_quota(volume_name, directory, quota) changed = True # set options for option in options.keys(): if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: set_volume_option(volume_name, option, options[option]) changed = True else: module.fail_json(msg='failed to create volume %s' % volume_name) if action != 'delete' and volume_name not in volumes: module.fail_json(msg='volume not found %s' % volume_name) if action == 'started': if volumes[volume_name]['status'].lower() != 'started': start_volume(volume_name) changed = True if action == 'stopped': if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) changed = True if changed: volumes = get_volumes() if rebalance: do_rebalance(volume_name) facts = {} facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas} module.exit_json(changed=changed, ansible_facts=facts)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), state=dict(required=False, choices=['present', 'absent'], default='present'), # You can specify an IP address or hostname. host=dict(required=True), https=dict(required=False, type='bool', default=False), subdomains=dict(required=False, type='list', default=[]), site_apps=dict(required=False, type='list', default=[]), login_name=dict(required=True), login_password=dict(required=True, no_log=True), ), supports_check_mode=True) site_name = module.params['name'] site_state = module.params['state'] site_host = module.params['host'] site_ip = socket.gethostbyname(site_host) session_id, account = webfaction.login(module.params['login_name'], module.params['login_password']) site_list = webfaction.list_websites(session_id) site_map = dict([(i['name'], i) for i in site_list]) existing_site = site_map.get(site_name) result = {} # Here's where the real stuff happens if site_state == 'present': # Does a site with this name already exist? if existing_site: # If yes, but it's on a different IP address, then fail. # If we wanted to allow relocation, we could add a 'relocate=true' option # which would get the existing IP address, delete the site there, and create it # at the new address. A bit dangerous, perhaps, so for now we'll require manual # deletion if it's on another host. if existing_site['ip'] != site_ip: module.fail_json( msg= "Website already exists with a different IP address. Please fix by hand." ) # If it's on this host and the key parameters are the same, nothing needs to be done. if (existing_site['https'] == module.boolean(module.params['https'])) and \ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])): module.exit_json(changed=False) positional_args = [ session_id, site_name, site_ip, module.boolean(module.params['https']), module.params['subdomains'], ] for a in module.params['site_apps']: positional_args.append((a[0], a[1])) if not module.check_mode: # If this isn't a dry run, create or modify the site result.update( webfaction.create_website( *positional_args) if not existing_site else webfaction. update_website(*positional_args)) elif site_state == 'absent': # If the site's already not there, nothing changed. if not existing_site: module.exit_json(changed=False, ) if not module.check_mode: # If this isn't a dry run, delete the site result.update( webfaction.delete_website(session_id, site_name, site_ip)) else: module.fail_json(msg="Unknown state specified: {}".format(site_state)) module.exit_json(changed=True, result=result)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete=dict(type='bool', default=False), route=dict(type='bool', default=False), insert=dict(type='str'), rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), interface=dict(type='str', aliases=['if']), log=dict(type='bool', default=False), from_ip=dict(type='str', default='any', aliases=['from', 'src']), from_port=dict(type='str'), to_ip=dict(type='str', default='any', aliases=['dest', 'to']), to_port=dict(type='str', aliases=['port']), proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']), app=dict(type='str', aliases=['name']), comment=dict(type='str'), ), supports_check_mode=True, mutually_exclusive=[ ['app', 'proto', 'logging'] ], ) cmds = [] def execute(cmd): cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) cmds.append(cmd) (rc, out, err) = module.run_command(cmd) if rc != 0: module.fail_json(msg=err or out) def ufw_version(): """ Returns the major and minor version of ufw installed on the system. """ rc, out, err = module.run_command("%s --version" % ufw_bin) if rc != 0: module.fail_json( msg="Failed to get ufw version.", rc=rc, out=out, err=err ) lines = [x for x in out.split('\n') if x.strip() != ''] if len(lines) == 0: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) if matches is None: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) # Convert version to numbers major = int(matches.group(1)) minor = int(matches.group(2)) rev = 0 if matches.group(3) is not None: rev = int(matches.group(3)) return major, minor, rev params = module.params # Ensure at least one of the command arguments are given command_keys = ['state', 'default', 'rule', 'logging'] commands = dict((key, params[key]) for key in command_keys if params[key]) if len(commands) < 1: module.fail_json(msg="Not any of the command arguments %s given" % commands) if (params['interface'] is not None and params['direction'] is None): module.fail_json(msg="Direction must be specified when creating a rule on an interface") # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) # Save the pre state and rules in order to recognize changes (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") # Execute commands for (command, value) in commands.items(): cmd = [[ufw_bin], [module.check_mode, '--dry-run']] if command == 'state': if value == 'reset' and module.check_mode: continue states = {'enabled': 'enable', 'disabled': 'disable', 'reloaded': 'reload', 'reset': 'reset'} execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': execute(cmd + [[command], [value]]) elif command == 'default': if params['direction'] not in ['outgoing', 'incoming', 'routed']: module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed".') execute(cmd + [[command], [value], [params['direction']]]) elif command == 'rule': if params['direction'] not in ['in', 'out', None]: module.fail_json(msg='For rules, direction must be one of "in" and "out".') # Rules are constructed according to the long format # # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] [comment COMMENT] cmd.append([module.boolean(params['route']), 'route']) cmd.append([module.boolean(params['delete']), 'delete']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([params['direction'], "%s" % params['direction']]) cmd.append([params['interface'], "on %s" % params['interface']]) cmd.append([module.boolean(params['log']), 'log']) for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), ('to_ip', "to %s"), ('to_port', "port %s"), ('proto', "proto %s"), ('app', "app '%s'")]: value = params[key] cmd.append([value, template % (value)]) ufw_major, ufw_minor, _ = ufw_version() # comment is supported only in ufw version after 0.35 if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: cmd.append([params['comment'], "comment '%s'" % params['comment']]) execute(cmd) # Get the new state (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
def main(): module = AnsibleModule( argument_spec = dict( vg=dict(required=True), pvs=dict(type='list'), pesize=dict(type='int', default=4), pv_options=dict(default=''), vg_options=dict(default=''), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), ), supports_check_mode=True, ) vg = module.params['vg'] state = module.params['state'] force = module.boolean(module.params['force']) pesize = module.params['pesize'] pvoptions = module.params['pv_options'].split() vgoptions = module.params['vg_options'].split() dev_list = [] if module.params['pvs']: dev_list = module.params['pvs'] elif state == 'present': module.fail_json(msg="No physical volumes given.") # LVM always uses real paths not symlinks so replace symlinks with actual path for idx, dev in enumerate(dev_list): dev_list[idx] = os.path.realpath(dev) if state=='present': ### check given devices for test_dev in dev_list: if not os.path.exists(test_dev): module.fail_json(msg="Device %s not found."%test_dev) ### get pv list pvs_cmd = module.get_bin_path('pvs', True) rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd) if rc != 0: module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err) ### check pv for devices pvs = parse_pvs(module, current_pvs) used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ] if used_pvs: module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name'])) vgs_cmd = module.get_bin_path('vgs', True) rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) if rc != 0: module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err) changed = False vgs = parse_vgs(current_vgs) for test_vg in vgs: if test_vg['name'] == vg: this_vg = test_vg break else: this_vg = None if this_vg is None: if state == 'present': ### create VG if module.check_mode: changed = True else: ### create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: rc,_,err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg] + dev_list) if rc == 0: changed = True else: module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err) else: if state == 'absent': if module.check_mode: module.exit_json(changed=True) else: if this_vg['lv_count'] == 0 or force: ### remove VG vgremove_cmd = module.get_bin_path('vgremove', True) rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err) else: module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) ### resize VG current_devs = [ os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg ] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) if devs_to_add or devs_to_remove: if module.check_mode: changed = True else: if devs_to_add: devs_to_add_string = ' '.join(devs_to_add) ### create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: rc,_,err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) ### add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err) ### remove some PV from our VG if devs_to_remove: devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err) module.exit_json(changed=changed)
def main(): """Main function""" module = AnsibleModule(argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), record=dict(required=True, aliases=['name'], type='str'), zone=dict(type='str'), zone_id=dict(type='str'), type=dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'), record_data=dict(aliases=['value'], type='list'), ttl=dict(default=300, type='int'), overwrite=dict(default=False, type='bool'), service_account_email=dict(type='str'), pem_file=dict(type='path'), credentials_file=dict(type='path'), project_id=dict(type='str')), required_if=[('state', 'present', ['record_data']), ('overwrite', False, ['record_data'])], required_one_of=[['zone', 'zone_id']], supports_check_mode=True) _sanity_check(module) record_name = module.params['record'] record_type = module.params['type'] state = module.params['state'] ttl = module.params['ttl'] zone_name = module.params['zone'] zone_id = module.params['zone_id'] json_output = dict(state=state, record=record_name, zone=zone_name, zone_id=zone_id, type=record_type, record_data=module.params['record_data'], ttl=ttl, overwrite=module.boolean(module.params['overwrite'])) # Google Cloud DNS wants the trailing dot on all DNS names. if zone_name is not None and zone_name[-1] != '.': zone_name = zone_name + '.' if record_name[-1] != '.': record_name = record_name + '.' # Build a connection object that we can use to connect with Google Cloud # DNS. gcdns = gcdns_connect(module, provider=PROVIDER) # We need to check that the zone we're creating a record for actually # exists. zone = _get_zone(gcdns, zone_name, zone_id) if zone is None and zone_name is not None: module.fail_json(msg='zone name was not found: %s' % zone_name, changed=False) elif zone is None and zone_id is not None: module.fail_json(msg='zone id was not found: %s' % zone_id, changed=False) # Populate the returns with the actual zone information. json_output['zone'] = zone.domain json_output['zone_id'] = zone.id # We also need to check if the record we want to create or remove actually # exists. try: record = _get_record(gcdns, zone, record_type, record_name) except InvalidRequestError: # We gave Google Cloud DNS an invalid DNS record name. module.fail_json(msg='record name is invalid: %s' % record_name, changed=False) _additional_sanity_checks(module, zone) diff = dict() # Build the 'before' diff if record is None: diff['before'] = '' diff['before_header'] = '<absent>' else: diff['before'] = dict(record=record.data['name'], type=record.data['type'], record_data=record.data['rrdatas'], ttl=record.data['ttl']) diff['before_header'] = "%s:%s" % (record_type, record_name) # Create, remove, or modify the record. if state == 'present': diff['after'] = dict(record=record_name, type=record_type, record_data=module.params['record_data'], ttl=ttl) diff['after_header'] = "%s:%s" % (record_type, record_name) changed = create_record(module, gcdns, zone, record) elif state == 'absent': diff['after'] = '' diff['after_header'] = '<absent>' changed = remove_record(module, gcdns, record) module.exit_json(changed=changed, diff=diff, **json_output)
def main(): module = AnsibleModule( argument_spec=dict( vg=dict(required=True), lv=dict(required=True), size=dict(type='str'), opts=dict(type='str'), state=dict(choices=["absent", "present"], default='present'), force=dict(type='bool', default='no'), shrink=dict(type='bool', default='yes'), active=dict(type='bool', default='yes'), snapshot=dict(type='str', default=None), pvs=dict(type='str') ), supports_check_mode=True, ) # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found is None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) size_opt = 'L' size_unit = 'm' snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json(msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' if not '%' in size: # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot is None: check_lv = lv else: check_lv = snapshot for test_lv in lvs: if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): this_lv = test_lv break else: this_lv = None if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") msg = '' if this_lv is None: if state == 'present': ### create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': ### remove LV if not force: module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': ### Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if '+' in size: size_requested += this_lv['size'] if this_lv['size'] < size_requested: if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit) ) elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large if size_requested == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg="Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: ### resize LV based on absolute values tool = None if int(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif shrink and int(size) < this_lv['size']: if int(size) == 0: module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg)
def main(): module = AnsibleModule( argument_spec=dict( login_user=dict(type='str'), login_password=dict(type='str', no_log=True), login_host=dict(type='str', default='localhost'), login_port=dict(type='int', default=3306), login_unix_socket=dict(type='str'), user=dict(type='str', required=True, aliases=['name']), password=dict(type='str', no_log=True), encrypted=dict(type='bool', default=False), host=dict(type='str', default='localhost'), host_all=dict(type="bool", default=False), state=dict(type='str', default='present', choices=['absent', 'present']), priv=dict(type='str'), append_privs=dict(type='bool', default=False), check_implicit_admin=dict(type='bool', default=False), update_password=dict(type='str', default='always', choices=['always', 'on_create']), connect_timeout=dict(type='int', default=30), config_file=dict(type='path', default='~/.my.cnf'), sql_log_bin=dict(type='bool', default=True), ssl_cert=dict(type='path'), ssl_key=dict(type='path'), ssl_ca=dict(type='path'), ), supports_check_mode=True, ) login_user = module.params["login_user"] login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] encrypted = module.boolean(module.params["encrypted"]) host = module.params["host"].lower() host_all = module.params["host_all"] state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] connect_timeout = module.params['connect_timeout'] config_file = module.params['config_file'] append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] ssl_cert = module.params["ssl_cert"] ssl_key = module.params["ssl_key"] ssl_ca = module.params["ssl_ca"] db = 'mysql' sql_log_bin = module.params["sql_log_bin"] if mysql_driver is None: module.fail_json(msg=mysql_driver_fail_msg) cursor = None try: if check_implicit_admin: try: cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) except Exception: pass if not cursor: cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db, connect_timeout=connect_timeout) except Exception as e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. " "Exception message: %s" % (config_file, to_native(e))) if not sql_log_bin: cursor.execute("SET SQL_LOG_BIN=0;") if priv is not None: try: mode = get_mode(cursor) except Exception as e: module.fail_json(msg=to_native(e)) try: priv = privileges_unpack(priv, mode) except Exception as e: module.fail_json(msg="invalid privileges string: %s" % to_native(e)) if state == "present": if user_exists(cursor, user, host, host_all): try: if update_password == 'always': changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs, module) else: changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs, module) except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: module.fail_json(msg=to_native(e)) else: if host_all: module.fail_json(msg="host_all parameter cannot be used when adding a user") try: changed = user_add(cursor, user, host, host_all, password, encrypted, priv, module.check_mode) except (SQLParseError, InvalidPrivsError, mysql_driver.Error) as e: module.fail_json(msg=to_native(e)) elif state == "absent": if user_exists(cursor, user, host, host_all): changed = user_delete(cursor, user, host, host_all, module.check_mode) else: changed = False module.exit_json(changed=changed, user=user)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(), cmd=dict(required=False, aliases=['command']), box_name=dict(required=False, aliases=['image']), box_path=dict(), vm_name=dict(), forward_ports=dict(), count=dict(default=1, type='int'), vagrant_root=dict(default='.'), log=dict(default=False, type='bool'), config_code=dict(default=""), # "" for None as i'm not sure of the None behavior python <-> json share_folder=dict(default=""), share_mount=dict(default='/vagrant'), provider=dict(default="virtualbox") ) ) state = module.params.get('state') cmd = module.params.get('cmd') box_name = module.params.get('box_name') box_path = module.params.get('box_path') vm_name = module.params.get('vm_name') forward_ports = module.params.get('forward_ports') vagrant_root = module.params.get('vagrant_root') log = module.boolean(module.params.get('log')) config_code = module.params.get('config_code') share_folder = module.params.get('share_folder') share_mount = module.params.get('share_mount') provider = module.params.get('provider') count = module.params.get('count') global VAGRANT_ROOT VAGRANT_ROOT = os.path.abspath(os.path.join(vagrant_root, ".vagrant")) if not os.path.exists(VAGRANT_ROOT): os.makedirs(VAGRANT_ROOT) global VAGRANT_FILE VAGRANT_FILE = VAGRANT_ROOT + "/Vagrantfile" global VAGRANT_DICT_FILE VAGRANT_DICT_FILE = VAGRANT_ROOT + "/Vagrantfile.json" global VAGRANT_LOCKFILE VAGRANT_LOCKFILE = VAGRANT_ROOT + "/.vagrant-lock" global VAGRANT_LOGFILE VAGRANT_LOGFILE = VAGRANT_ROOT + '/vagrant.log' if forward_ports is not None: forward_ports = forward_ports.split(',') if forward_ports is None: forward_ports = [] # Initialize vagrant vgw = VagrantWrapper( module=module, log=log, config_code=config_code, share_folder=share_folder, share_mount=share_mount, provider=provider ) # # Check if we are being invoked under an idempotency idiom of "state=present" or "state=absent" # try: if state is not None: if state != 'halt' and state != 'up': module.fail_json(msg="State must be \"halt\" or \"up\" in vagrant module.") if state == 'up': changd, insts = vgw.up(box_name, vm_name, count, box_path, forward_ports) module.exit_json(changed=changd, instances=insts) if state == 'halt': changd = vgw.halt(vm_name) module.exit_json(changed=changd, status=vgw.status(vm_name)) # # Main command tree for old style invocation # else: if cmd == 'up': # print "I am running cmd up" if count is None: count = 1 (changd, insts) = vgw.up(box_name, vm_name, count, box_path, forward_ports) module.exit_json(changed=changd, instances=insts) elif cmd == 'status': # if vm_name is None: # module.fail_json(msg = "Error: you must specify a vm_name when calling status." ) (changd, result) = vgw.status(vm_name) module.exit_json(changed=changd, status=result) elif cmd == "config" or cmd == "conf": if vm_name is None: module.fail_json(msg="Error: you must specify a vm_name when calling config.") (changd, cnf) = vgw.config(vm_name) module.exit_json(changed=changd, config=cnf) elif cmd == 'ssh_command': if vm_name is None: module.fail_json(msg="Error: you must specify a vm_name when calling ssh_command.") (changd, cnf) = vgw.config(vm_name) sshcmd = ("ssh %s@%s -p %s -i %s " "-o StrictHostKeyChecking=no " "-o NoHostAuthenticationForLocalhost=yes " "-o IdentitiesOnly=yes" ) % ( cnf[vm_name][0]["User"], cnf[vm_name][0]["HostName"], cnf[vm_name][0]["Port"], cnf[vm_name][0]["IdentityFile"]) sshmsg = "To connect to %s, execute in your shell the given command" % (vm_name) module.exit_json(changed=changd, msg=sshmsg, ssh_command=sshcmd) # elif cmd == "load_key": # if vm_name is None: # module.fail_json(msg = "Error: you must specify a vm_name when calling load_key." ) # # cnf = vg.config(vm_name) # keyfile=cnf["IdentityFile"] # # # Get loaded keys ... # loaded_keys = subprocess.check_output(["ssh-add", "-l"]) # module.exit_json(changed = True, msg = loaded_keys) # # subprocess.call(["ssh-add", keyfile]) # # module.exit_json(changed = True, msg = sshmsg, SshCommand = sshcmd) elif cmd == 'halt': (changd, stats) = vgw.halt(vm_name) module.exit_json(changed=changd, status=stats) elif cmd == 'destroy': changd = vgw.destroy(vm_name) module.exit_json(changed=changd, status=vgw.status(vm_name)) elif cmd == 'clear': changd = vgw.clear() module.exit_json(changed=changd) else: module.fail_json(msg="Unknown vagrant subcommand: \"%s\"." % (cmd)) except subprocess.CalledProcessError as e: module.fail_json(msg="Vagrant command failed: %s\n%s" % ( e, 'Details in: ' + VAGRANT_LOGFILE if log else 'Add "log: true" option to find log in: ' + VAGRANT_LOGFILE )) # except Exception as e: # module.fail_json(msg = e.__str__()) module.exit_json(status="success")