def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), voice=dict(required=False), ), supports_check_mode=True ) msg = module.params['msg'] voice = module.params['voice'] executable = module.get_bin_path('say') if not executable: executable = module.get_bin_path('espeak') elif get_platform() != 'Darwin': # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter voice = None module.warn("'say' executable found but system is '%s': ignoring voice parameter" % get_platform()) if not executable: module.fail_json(msg="Unable to find either 'say' or 'espeak' executable") if module.check_mode: module.exit_json(msg=msg, changed=False) say(module, executable, msg, voice) module.exit_json(msg=msg, changed=True)
def main(): module = AnsibleModule( argument_spec={ 'name': {'required': True}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'params': {'default': ''}, }, supports_check_mode=True, ) args = { 'changed': False, 'failed': False, 'name': module.params['name'], 'state': module.params['state'], 'params': module.params['params'], } # Check if module is present try: modules = open('/proc/modules') present = False module_name = args['name'].replace('-', '_') + ' ' for line in modules: if line.startswith(module_name): present = True break modules.close() except IOError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **args) # Check only; don't modify if module.check_mode: if args['state'] == 'present' and not present: changed = True elif args['state'] == 'absent' and present: changed = True else: changed = False module.exit_json(changed=changed) # Add/remove module as needed if args['state'] == 'present': if not present: command = [module.get_bin_path('modprobe', True), args['name']] command.extend(shlex.split(args['params'])) rc, _, err = module.run_command(command) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True elif args['state'] == 'absent': if present: rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']]) if rc != 0: module.fail_json(msg=err, **args) args['changed'] = True module.exit_json(**args)
def main(): """Returns, calling appropriate command""" module = AnsibleModule( argument_spec=dict( name=dict(default=None, aliases=["pkg", "package"], type="list"), state=dict(default="present", choices=["present", "installed", "latest", "absent", "removed"]), recurse=dict(default=False, type="bool"), force=dict(default=False, type="bool"), upgrade=dict(default=False, type="bool"), update_cache=dict(default=True, aliases=["update-cache"], type="bool"), ), required_one_of=[["name", "update_cache", "upgrade"]], supports_check_mode=True, ) xbps_path = dict() xbps_path["install"] = module.get_bin_path("xbps-install", True) xbps_path["query"] = module.get_bin_path("xbps-query", True) xbps_path["remove"] = module.get_bin_path("xbps-remove", True) if not os.path.exists(xbps_path["install"]): module.fail_json(msg="cannot find xbps, in path %s" % (xbps_path["install"])) p = module.params # normalize the state parameter if p["state"] in ["present", "installed"]: p["state"] = "present" elif p["state"] in ["absent", "removed"]: p["state"] = "absent" if p["update_cache"] and not module.check_mode: changed = update_package_db(module, xbps_path) if p["name"] is None and not p["upgrade"]: if changed: module.exit_json(changed=True, msg="Updated the package master lists") else: module.exit_json(changed=False, msg="Package list already up to date") if p["update_cache"] and module.check_mode and not (p["name"] or p["upgrade"]): module.exit_json(changed=True, msg="Would have updated the package cache") if p["upgrade"]: upgrade(module, xbps_path) if p["name"]: pkgs = p["name"] if module.check_mode: check_packages(module, xbps_path, pkgs, p["state"]) if p["state"] in ["present", "latest"]: install_packages(module, xbps_path, p["state"], pkgs) elif p["state"] == "absent": remove_packages(module, xbps_path, pkgs)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), params=dict(type='str', default=''), ), supports_check_mode=True, ) name = module.params['name'] params = module.params['params'] state = module.params['state'] # FIXME: Adding all parameters as result values is useless result = dict( changed=False, name=name, params=params, state=state, ) # Check if module is present try: modules = open('/proc/modules') present = False module_name = name.replace('-', '_') + ' ' for line in modules: if line.startswith(module_name): present = True break modules.close() except IOError as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **result) # Add/remove module as needed if state == 'present': if not present: if not module.check_mode: command = [module.get_bin_path('modprobe', True), name] command.extend(shlex.split(params)) rc, out, err = module.run_command(command) if rc != 0: module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) result['changed'] = True elif state == 'absent': if present: if not module.check_mode: rc, out, err = module.run_command([module.get_bin_path('modprobe', True), '-r', name]) if rc != 0: module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **result) result['changed'] = True module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']), name=dict(aliases=["pkg"], required=True, type='list'), update_cache=dict(default=False, aliases=["update-cache"], type='bool'), ), supports_check_mode=True) slackpkg_path = module.get_bin_path('slackpkg', True) p = module.params pkgs = p['name'] if p["update_cache"]: update_cache(module, slackpkg_path) if p['state'] == 'latest': upgrade_packages(module, slackpkg_path, pkgs) elif p['state'] in ['present', 'installed']: install_packages(module, slackpkg_path, pkgs) elif p["state"] in ['removed', 'absent']: remove_packages(module, slackpkg_path, pkgs)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='enabled', choices=['enabled', 'disabled']), name=dict(type='list'), activate=dict(default=False, type='bool'), ), required_one_of=[['name', 'activate']], supports_check_mode=True ) global AWALL_PATH AWALL_PATH = module.get_bin_path('awall', required=True) p = module.params if p['name']: if p['state'] == 'enabled': enable_policy(module, p['name'], p['activate']) elif p['state'] == 'disabled': disable_policy(module, p['name'], p['activate']) if p['activate']: if not module.check_mode: activate(module) module.exit_json(changed=True, msg="activated awall rules") module.fail_json(msg="no action defined")
def main(): module = AnsibleModule( argument_spec=dict( dest=dict(default=None, required=True, type="path"), repo=dict(default=None, required=True, type="str"), ), supports_check_mode=False ) dest = os.path.abspath(module.params['dest']) repo = module.params['repo'] git_path = module.get_bin_path('git', True) is_dest = os.path.exists(dest) result = {"changed": False} module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') if is_dest and not os.path.exists(os.path.join(dest, ".git", 'config')): module.fail_json(msg="dest directory ({}) is not empty".format(self.dest), **result) elif not is_dest: clone(git_path, module, repo, dest) result.update(changed=True) elif has_local_mods(module, git_path, dest): result.update(show_warning="Local modifications exist in repository ({})".format(dest)) elif get_head_branch(git_path, module, dest) != 'master': result.update(show_warning="HEAD branch not a master in repository ({})".format(dest)) elif pull_master(git_path, module, dest): result.update(changed=True) module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=["pkg"], required=True), state=dict(default="present", choices=["present", "installed", "absent", "removed"]), force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]), update_cache=dict(default="no", aliases=["update-cache"], type='bool') ) ) opkg_path = module.get_bin_path('opkg', True, ['/bin']) p = module.params if p["update_cache"]: update_package_db(module, opkg_path) pkgs = p["name"].split(",") if p["state"] in ["present", "installed"]: install_packages(module, opkg_path, pkgs) elif p["state"] in ["absent", "removed"]: remove_packages(module, opkg_path, pkgs)
def main(): """ Entry point for ansible module. """ argument_spec = { 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'table': {'required': True}, 'record': {'required': True}, 'col': {'required': True}, 'key': {'required': True}, 'value': {'required': True}, 'timeout': {'default': 5, 'type': 'int'}, } module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) result = {'changed': False} # We add ovs-vsctl to module_params to later build up templatized commands module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True) want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands(want, have, module) result['commands'] = commands if commands: if not module.check_mode: for c in commands: module.run_command(c, check_rc=True) result['changed'] = True module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), selection=dict(choices=['install', 'hold', 'deinstall', 'purge']) ), supports_check_mode=True, ) dpkg = module.get_bin_path('dpkg', True) name = module.params['name'] selection = module.params['selection'] # Get current settings. rc, out, err = module.run_command([dpkg, '--get-selections', name], check_rc=True) if not out: current = 'not present' else: current = out.split()[1] changed = current != selection if module.check_mode or not changed: module.exit_json(changed=changed, before=current, after=selection) module.run_command([dpkg, '--set-selections'], data="%s %s" % (name, selection), check_rc=True) module.exit_json(changed=changed, before=current, after=selection)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=["pkg"], required=True), state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), update_cache=dict(default="no", aliases=["update-cache"], type='bool') ) ) port_path = module.get_bin_path('port', True, ['/opt/local/bin']) p = module.params if p["update_cache"]: update_package_db(module, port_path) pkgs = p["name"].split(",") if p["state"] in ["present", "installed"]: install_packages(module, port_path, pkgs) elif p["state"] in ["absent", "removed"]: remove_packages(module, port_path, pkgs) elif p["state"] == "active": activate_packages(module, port_path, pkgs) elif p["state"] == "inactive": deactivate_packages(module, port_path, pkgs)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default="present", choices=["present", "absent"], required=False), name=dict(aliases=["pkg"], required=True, type='list'), cached=dict(default=False, type='bool'), annotation=dict(default="", required=False), pkgsite=dict(default="", required=False), rootdir=dict(default="", required=False, type='path'), chroot=dict(default="", required=False, type='path'), jail=dict(default="", required=False, type='str'), autoremove=dict(default=False, type='bool')), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) pkgng_path = module.get_bin_path('pkg', True) p = module.params pkgs = p["name"] changed = False msgs = [] dir_arg = "" if p["rootdir"] != "": old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) if old_pkgng: module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater") else: dir_arg = "--rootdir %s" % (p["rootdir"]) if p["chroot"] != "": dir_arg = '--chroot %s' % (p["chroot"]) if p["jail"] != "": dir_arg = '--jail %s' % (p["jail"]) if p["state"] == "present": _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg) changed = changed or _changed msgs.append(_msg) elif p["state"] == "absent": _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) changed = changed or _changed msgs.append(_msg) if p["autoremove"]: _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) changed = changed or _changed msgs.append(_msg) if p["annotation"]: _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) changed = changed or _changed msgs.append(_msg) module.exit_json(changed=changed, msg=", ".join(msgs))
def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='list', aliases=['package', 'pkg']), state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), recurse=dict(type='bool', default=False), force=dict(type='bool', default=False), upgrade=dict(type='bool', default=False), update_cache=dict(type='bool', default=False, aliases=['update-cache']), ), required_one_of=[['name', 'update_cache', 'upgrade']], supports_check_mode=True, ) pacman_path = module.get_bin_path('pacman', True) p = module.params # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' elif p['state'] in ['absent', 'removed']: p['state'] = 'absent' if p["update_cache"] and not module.check_mode: update_package_db(module, pacman_path) if not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Updated the package master lists') if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Would have updated the package cache') if p['upgrade']: upgrade(module, pacman_path) if p['name']: pkgs = expand_package_groups(module, pacman_path, p['name']) pkg_files = [] for i, pkg in enumerate(pkgs): if not pkg: # avoid empty strings continue elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg): # The package given is a filename, extract the raw pkg name from # it and store the filename pkg_files.append(pkg) pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1]) else: pkg_files.append(None) if module.check_mode: check_packages(module, pacman_path, pkgs, p['state']) if p['state'] in ['present', 'latest']: install_packages(module, pacman_path, p['state'], pkgs, pkg_files) elif p['state'] == 'absent': remove_packages(module, pacman_path, pkgs)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path'), follow=dict(default='no', type='bool'), get_md5=dict(default='yes', type='bool'), get_checksum=dict(default='yes', type='bool'), checksum_algorithm=dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo', 'checksum']), mime=dict(default=False, type='bool', aliases=['mime_type', 'mime-type']), ), supports_check_mode=True ) path = module.params.get('path') b_path = to_bytes(path, errors='surrogate_or_strict') follow = module.params.get('follow') get_mime = module.params.get('mime') get_md5 = module.params.get('get_md5') get_checksum = module.params.get('get_checksum') checksum_algorithm = module.params.get('checksum_algorithm') try: if follow: st = os.stat(b_path) else: st = os.lstat(b_path) except OSError: e = get_exception() if e.errno == errno.ENOENT: output = {'exists': False} module.exit_json(changed=False, stat=output) module.fail_json(msg=e.strerror) mimetype = None charset = None if get_mime: mimetype = 'unknown' charset = 'unknown' filecmd = [module.get_bin_path('file', True), '-i', path] try: rc, out, err = module.run_command(filecmd) if rc == 0: mimetype, charset = out.split(':')[1].split(';') mimetype = mimetype.strip() charset = charset.split('=')[1].strip() except: pass output = format_output(module, path, st, follow, get_md5, get_checksum, checksum_algorithm, mimetype=mimetype, charset=charset) module.exit_json(changed=False, stat=output)
def main(): module = AnsibleModule( argument_spec=dict( database=dict(type='str', required=True), key=dict(type='str'), split=dict(type='str'), fail_key=dict(type='bool', default=True), ), supports_check_mode=True, ) colon = ['passwd', 'shadow', 'group', 'gshadow'] database = module.params['database'] key = module.params.get('key') split = module.params.get('split') fail_key = module.params.get('fail_key') getent_bin = module.get_bin_path('getent', True) if key is not None: cmd = [getent_bin, database, key] else: cmd = [getent_bin, database] if split is None and database in colon: split = ':' try: rc, out, err = module.run_command(cmd) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) msg = "Unexpected failure!" dbtree = 'getent_%s' % database results = {dbtree: {}} if rc == 0: for line in out.splitlines(): record = line.split(split) results[dbtree][record[0]] = record[1:] module.exit_json(ansible_facts=results) elif rc == 1: msg = "Missing arguments, or database unknown." elif rc == 2: msg = "One or more supplied key could not be found in the database." if not fail_key: results[dbtree][key] = None module.exit_json(ansible_facts=results, msg=msg) elif rc == 3: msg = "Enumeration not supported on this database." module.fail_json(msg=msg)
def main(): module = AnsibleModule( argument_spec=dict( command=dict(type='str'), script_file=dict(type='str'), count=dict(type='int'), units=dict(type='str', choices=['minutes', 'hours', 'days', 'weeks']), state=dict(type='str', default='present', choices=['present', 'absent']), unique=dict(type='bool', default=False), ), mutually_exclusive=[['command', 'script_file']], required_one_of=[['command', 'script_file']], supports_check_mode=False, ) at_cmd = module.get_bin_path('at', True) command = module.params['command'] script_file = module.params['script_file'] count = module.params['count'] units = module.params['units'] state = module.params['state'] unique = module.params['unique'] if (state == 'present') and (not count or not units): module.fail_json(msg="present state requires count and units") result = dict( changed=False, state=state, ) # If command transform it into a script_file if command: script_file = create_tempfile(command) # if absent remove existing and return if state == 'absent': delete_job(module, result, at_cmd, command, script_file) # if unique if existing return unchanged if unique: if len(get_matching_jobs(module, at_cmd, script_file)) != 0: if command: os.unlink(script_file) module.exit_json(**result) result['script_file'] = script_file result['count'] = count result['units'] = units add_job(module, result, at_cmd, count, units, command, script_file) module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec = dict() ) facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin']) cmd = [facter_path, "--json"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_json(**json.loads(out))
def main(): module = AnsibleModule( argument_spec=dict( src=dict(type='path', required=True, aliases=['patchfile']), dest=dict(type='path', aliases=['originalfile']), basedir=dict(type='path'), strip=dict(type='int', default=0), remote_src=dict(type='bool', default=False), # NB: for 'backup' parameter, semantics is slightly different from standard # since patch will create numbered copies, not strftime("%Y-%m-%d@%H:%M:%S~") backup=dict(type='bool', default=False), binary=dict(type='bool', default=False), ), required_one_of=[['dest', 'basedir']], supports_check_mode=True, ) # Create type object as namespace for module params p = type('Params', (), module.params) p.src = os.path.expanduser(p.src) if not os.access(p.src, os.R_OK): module.fail_json(msg="src %s doesn't exist or not readable" % (p.src)) if p.dest and not os.access(p.dest, os.W_OK): module.fail_json(msg="dest %s doesn't exist or not writable" % (p.dest)) if p.basedir and not os.path.exists(p.basedir): module.fail_json(msg="basedir %s doesn't exist" % (p.basedir)) if not p.basedir: p.basedir = os.path.dirname(p.dest) patch_bin = module.get_bin_path('patch') if patch_bin is None: module.fail_json(msg="patch command not found") def patch_func(opts): return module.run_command('%s %s' % (patch_bin, ' '.join(opts))) # patch need an absolute file name p.src = os.path.abspath(p.src) changed = False if not is_already_applied(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip): try: apply_patch(patch_func, p.src, p.basedir, dest_file=p.dest, binary=p.binary, strip=p.strip, dry_run=module.check_mode, backup=p.backup) changed = True except PatchError as e: module.fail_json(msg=to_native(e), exception=format_exc()) module.exit_json(changed=changed)
def main(): module = AnsibleModule( argument_spec=dict( dest=dict(type='path', required=True), name=dict(type='str', required=True, aliases=['parent']), version=dict(type='str', default='head'), force=dict(type='bool', default='no'), executable=dict(type='str'), ) ) dest = module.params['dest'] parent = module.params['name'] version = module.params['version'] force = module.params['force'] bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') rc, out, err = (0, None, None) bzr = Bzr(module, parent, dest, version, bzr_path) # if there is no bzr configuration, do a branch operation # else pull and switch the version before = None local_mods = False if not os.path.exists(bzrconfig): (rc, out, err) = bzr.clone() else: # else do a pull local_mods = bzr.has_local_mods() before = bzr.get_version() (rc, out, err) = bzr.reset(force) if rc != 0: module.fail_json(msg=err) (rc, out, err) = bzr.fetch() if rc != 0: module.fail_json(msg=err) # switch to version specified regardless of whether # we cloned or pulled (rc, out, err) = bzr.switch_version() # determine if we changed anything after = bzr.get_version() changed = False if before != after or local_mods: changed = True module.exit_json(changed=changed, before=before, after=after)
def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( target=dict(required=False, default=None, type='str'), params=dict(required=False, default=None, type='dict'), chdir=dict(required=True, default=None, type='path'), ), ) # Build up the invocation of `make` we are going to use make_path = module.get_bin_path('make', True) make_target = module.params['target'] if module.params['params'] is not None: make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] else: make_parameters = [] base_command = [make_path, make_target] base_command.extend(make_parameters) # Check if the target is already up to date rc, out, err = run_command(base_command + ['--question'], module, check_rc=False) if module.check_mode: # If we've been asked to do a dry run, we only need # to report whether or not the target is up to date changed = (rc != 0) else: if rc == 0: # The target is up to date, so we don't have to # do anything changed = False else: # The target isn't upd to date, so we need to run it rc, out, err = run_command(base_command, module) changed = True # We don't report the return code, as if this module failed # we would be calling fail_json from run_command, so even if # we had a non-zero return code, we did not fail. However, if # we report a non-zero return code here, we will be marked as # failed regardless of what we signal using the failed= kwarg. module.exit_json( changed=changed, failed=False, stdout=out, stderr=err, target=module.params['target'], params=module.params['params'], chdir=module.params['chdir'] )
def main(): module = AnsibleModule( argument_spec = dict( dest=dict(type='str', required=True), opt=dict(type='str'), format=dict(type='str', default='qcow2'), size=dict(type='int'), state=dict(type='str', choices=['absent', 'present'], default='present'), ), ) changed = False qemu_img = module.get_bin_path('qemu-img', True) dest = module.params['dest'] img_format = module.params['format'] opt = module.params['opt'] if module.params['state'] == 'present': if not module.params['size']: module.fail_json(msg="Parameter 'size' required") size = module.params['size'] * 1024 * 1024 if not os.path.exists(dest): if not opt: module.run_command('%s create -f %s "%s" %s'%(qemu_img, img_format, dest, size), check_rc=True) else: module.run_command('%s create -f %s -o %s "%s" %s'%(qemu_img, img_format, opt, dest, size), check_rc=True) changed = True else: rc, stdout, _ = module.run_command('%s info "%s"'%(qemu_img, dest), check_rc=True) current_size = None for line in stdout.splitlines(): if 'virtual size' in line: ### virtual size: 5.0M (5242880 bytes) current_size = int(line.split('(')[1].split()[0]) if not current_size: module.fail_json(msg='Unable to read virtual disk size of %s'%(dest)) if current_size != size: module.run_command('%s resize "%s" %s'%(qemu_img, dest, size), check_rc=True) changed = True if module.params['state'] == 'absent': if os.path.exists(dest): os.remove(dest) changed = True module.exit_json(changed=changed)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default="present", choices=["present", "absent"]), name=dict(aliases=["pkg"], type='list'), update_cache=dict(default='no', type='bool'), upgrade=dict(default='no', type='bool'), full_upgrade=dict(default='no', type='bool'), clean=dict(default='no', type='bool'), force=dict(default='no', type='bool')), required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']], supports_check_mode=True) global PKGIN_PATH PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin']) module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') p = module.params if p["update_cache"]: c, msg = update_package_db(module) if not (p['name'] or p["upgrade"] or p["full_upgrade"]): module.exit_json(changed=c, msg=msg) if p["upgrade"]: upgrade_packages(module) if not p['name']: module.exit_json(changed=True, msg='upgraded packages') if p["full_upgrade"]: full_upgrade_packages(module) if not p['name']: module.exit_json(changed=True, msg='upgraded all packages') if p["clean"]: clean_cache(module) if not p['name']: module.exit_json(changed=True, msg='cleaned caches') pkgs = p["name"] if p["state"] == "present": install_packages(module, pkgs) elif p["state"] == "absent": remove_packages(module, pkgs)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['tap'], type='list', required=True), url=dict(default=None, required=False), state=dict(default='present', choices=['present', 'absent']), ), supports_check_mode=True, ) brew_path = module.get_bin_path( 'brew', required=True, opt_dirs=['/usr/local/bin'] ) taps = module.params['name'] url = module.params['url'] if module.params['state'] == 'present': if url is None: # No tap URL provided explicitly, continue with bulk addition # of all the taps. failed, changed, msg = add_taps(module, brew_path, taps) else: # When an tap URL is provided explicitly, we allow adding # *single* tap only. Validate and proceed to add single tap. if len(taps) > 1: msg = "List of multiple taps may not be provided with 'url' option." module.fail_json(msg=msg) else: failed, changed, msg = add_tap(module, brew_path, taps[0], url) if failed: module.fail_json(msg=msg) else: module.exit_json(changed=changed, msg=msg) elif module.params['state'] == 'absent': failed, changed, msg = remove_taps(module, brew_path, taps) if failed: module.fail_json(msg=msg) else: module.exit_json(changed=changed, msg=msg)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']), name=dict(type='list'), repository=dict(type='list'), update_cache=dict(default='no', type='bool'), upgrade=dict(default='no', type='bool'), available=dict(default='no', type='bool'), ), required_one_of=[['name', 'update_cache', 'upgrade']], mutually_exclusive=[['name', 'upgrade']], supports_check_mode=True ) # Set LANG env since we parse stdout module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') global APK_PATH APK_PATH = module.get_bin_path('apk', required=True) p = module.params # add repositories to the APK_PATH if p['repository']: for r in p['repository']: APK_PATH = "%s --repository %s" % (APK_PATH, r) # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' if p['state'] in ['absent', 'removed']: p['state'] = 'absent' if p['update_cache']: update_package_db(module, not p['name'] and not p['upgrade']) if p['upgrade']: upgrade_packages(module, p['available']) if p['state'] in ['present', 'latest']: install_packages(module, p['name'], p['state']) elif p['state'] == 'absent': remove_packages(module, p['name'])
def main(): module = AnsibleModule( argument_spec = dict( name = dict(default=None, aliases=['spell'], type='list'), state = dict(default='present', choices=['present', 'latest', 'absent', 'cast', 'dispelled', 'rebuild']), depends = dict(default=None), update = dict(default=False, type='bool'), update_cache = dict(default=False, aliases=['update_codex'], type='bool'), cache_valid_time = dict(default=0, type='int') ), required_one_of = [['name', 'update', 'update_cache']], supports_check_mode = True ) if os.geteuid() != 0: module.fail_json(msg="root privileges are required for this operation") for c in SORCERY: SORCERY[c] = module.get_bin_path(c, True) # prepare environment: run sorcery commands without asking questions module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0') params = module.params # normalize 'state' parameter if params['state'] in ('present', 'cast'): params['state'] = 'present' elif params['state'] in ('absent', 'dispelled'): params['state'] = 'absent' if params['update']: update_sorcery(module) if params['update_cache'] or params['state'] == 'latest': update_codex(module) if params['name']: manage_spells(module)
def main(): """ Entry point. """ argument_spec = { 'bridge': {'required': True}, 'parent': {'default': None}, 'vlan': {'default': None, 'type': 'int'}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'timeout': {'default': 5, 'type': 'int'}, 'external_ids': {'default': None, 'type': 'dict'}, 'fail_mode': {'default': None}, 'set': {'required': False, 'default': None} } required_if = [('parent', not None, ('vlan',))] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) result = {'changed': False} # We add ovs-vsctl to module_params to later build up templatized commands module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True) want = map_params_to_obj(module) have = map_config_to_obj(module) commands = map_obj_to_commands(want, have, module) result['commands'] = commands if commands: if not module.check_mode: for c in commands: module.run_command(c, check_rc=True) result['changed'] = True module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec = dict( path = dict(required=True), state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]), name = dict(required=False, default=None, type='str'), logtype = dict(required=False, default=None, type='str', aliases=['type']) ), supports_check_mode=True ) le_path = module.get_bin_path('le', True, ['/usr/local/bin']) p = module.params # Handle multiple log files logs = p["path"].split(",") logs = filter(None, logs) if p["state"] in ["present", "followed"]: follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype']) elif p["state"] in ["absent", "unfollowed"]: unfollow_log(module, le_path, logs)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['dest', 'name']), follow=dict(type='bool', default=False), get_md5=dict(type='bool', default=False), get_checksum=dict(type='bool', default=True), get_mime=dict(type='bool', default=True, aliases=['mime', 'mime_type', 'mime-type']), get_attributes=dict(type='bool', default=True, aliases=['attr', 'attributes']), checksum_algorithm=dict(type='str', default='sha1', choices=[ 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ], aliases=['checksum', 'checksum_algo']), ), supports_check_mode=True, ) path = module.params.get('path') b_path = to_bytes(path, errors='surrogate_or_strict') follow = module.params.get('follow') get_mime = module.params.get('get_mime') get_attr = module.params.get('get_attributes') get_checksum = module.params.get('get_checksum') checksum_algorithm = module.params.get('checksum_algorithm') # NOTE: undocumented option since 2.9 to be removed at a later date if possible (3.0+) # no real reason for keeping other than fear we may break older content. get_md5 = module.params.get('get_md5') # main stat data try: if follow: st = os.stat(b_path) else: st = os.lstat(b_path) except OSError as e: if e.errno == errno.ENOENT: output = {'exists': False} module.exit_json(changed=False, stat=output) module.fail_json(msg=e.strerror) # process base results output = format_output(module, path, st) # resolved permissions for perm in [('readable', os.R_OK), ('writeable', os.W_OK), ('executable', os.X_OK)]: output[perm[0]] = os.access(b_path, perm[1]) # symlink info if output.get('islnk'): output['lnk_source'] = os.path.realpath(b_path) output['lnk_target'] = os.readlink(b_path) try: # user data pw = pwd.getpwuid(st.st_uid) output['pw_name'] = pw.pw_name except (TypeError, KeyError): pass try: # group data grp_info = grp.getgrgid(st.st_gid) output['gr_name'] = grp_info.gr_name except (KeyError, ValueError, OverflowError): pass # checksums if output.get('isreg') and output.get('readable'): # NOTE: see above about get_md5 if get_md5: # Will fail on FIPS-140 compliant systems try: output['md5'] = module.md5(b_path) except ValueError: output['md5'] = None if get_checksum: output['checksum'] = module.digest_from_file( b_path, checksum_algorithm) # try to get mime data if requested if get_mime: output['mimetype'] = output['charset'] = 'unknown' mimecmd = module.get_bin_path('file') if mimecmd: mimecmd = [mimecmd, '--mime-type', '--mime-encoding', b_path] try: rc, out, err = module.run_command(mimecmd) if rc == 0: mimetype, charset = out.rsplit(':', 1)[1].split(';') output['mimetype'] = mimetype.strip() output['charset'] = charset.split('=')[1].strip() except Exception: pass # try to get attr data if get_attr: output['version'] = None output['attributes'] = [] output['attr_flags'] = '' out = module.get_file_attributes(b_path) for x in ('version', 'attributes', 'attr_flags'): if x in out: output[x] = out[x] module.exit_json(changed=False, stat=output)
def main(): global module, units_si, units_iec, parted_exec changed = False output_script = "" script = "" module = AnsibleModule( argument_spec={ 'device': { 'required': True, 'type': 'str' }, 'align': { 'default': 'optimal', 'choices': ['none', 'cylinder', 'minimal', 'optimal'], 'type': 'str' }, 'number': { 'default': None, 'type': 'int' }, # unit <unit> command 'unit': { 'default': 'KiB', 'choices': parted_units, 'type': 'str' }, # mklabel <label-type> command 'label': { 'choices': [ 'aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun' ], 'type': 'str' }, # mkpart <part-type> [<fs-type>] <start> <end> command 'part_type': { 'default': 'primary', 'choices': ['primary', 'extended', 'logical'], 'type': 'str' }, 'part_start': { 'default': '0%', 'type': 'str' }, 'part_end': { 'default': '100%', 'type': 'str' }, # name <partition> <name> command 'name': { 'type': 'str' }, # set <partition> <flag> <state> command 'flags': { 'type': 'list' }, # rm/mkpart command 'state': { 'choices': ['present', 'absent', 'info'], 'default': 'info', 'type': 'str' } }, supports_check_mode=True, ) module.run_command_environ_update = { 'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C' } # Data extraction device = module.params['device'] align = module.params['align'] number = module.params['number'] unit = module.params['unit'] label = module.params['label'] part_type = module.params['part_type'] part_start = module.params['part_start'] part_end = module.params['part_end'] name = module.params['name'] state = module.params['state'] flags = module.params['flags'] # Parted executable parted_exec = module.get_bin_path('parted', True) # Conditioning if number and number < 0: module.fail_json(msg="The partition number must be non negative.") if not check_size_format(part_start): module.fail_json( msg="The argument 'part_start' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_start)) if not check_size_format(part_end): module.fail_json( msg="The argument 'part_end' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_end)) # Read the current disk information current_device = get_device_info(device, unit) current_parts = current_device['partitions'] if state == 'present': # Default value for the label if not label: label = 'msdos' # Assign label if required if current_device['generic'].get('table', None) != label: script += "mklabel %s " % label # Create partition if required if part_type and not part_exists(current_parts, 'num', number): script += "mkpart %s %s %s " % (part_type, part_start, part_end) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script and update the data structure. # This will create the partition for the next steps if script: output_script += script parted(script, device, align) changed = True script = "" current_parts = get_device_info(device, unit)['partitions'] if part_exists(current_parts, 'num', number) or module.check_mode: partition = {'flags': []} # Empty structure for the check-mode if not module.check_mode: partition = [p for p in current_parts if p['num'] == number][0] # Assign name to the the partition if name is not None and partition.get('name', None) != name: script += "name %s %s " % (number, name) # Manage flags if flags: # Compute only the changes in flags status flags_off = list(set(partition['flags']) - set(flags)) flags_on = list(set(flags) - set(partition['flags'])) for f in flags_on: script += "set %s %s on " % (number, f) for f in flags_off: script += "set %s %s off " % (number, f) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script if script: output_script += script changed = True parted(script, device, align) elif state == 'absent': # Remove the partition if part_exists(current_parts, 'num', number) or module.check_mode: script = "rm %s " % number output_script += script changed = True parted(script, device, align) elif state == 'info': output_script = "unit '%s' print " % unit # Final status of the device final_device_status = get_device_info(device, unit) module.exit_json(changed=changed, disk=final_device_status['generic'], partitions=final_device_status['partitions'], script=output_script.strip())
def main(): # initialize module = AnsibleModule( argument_spec=dict( name=dict(type='str', aliases=['service', 'unit']), state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']), enabled=dict(type='bool'), force=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']), scope=dict(type='str', default='system', choices=['system', 'user', 'global']), no_block=dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']], required_by=dict( state=('name', ), enabled=('name', ), masked=('name', ), ), ) unit = module.params['name'] if unit is not None: for globpattern in (r"*", r"?", r"["): if globpattern in unit: module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit)) systemctl = module.get_bin_path('systemctl', True) if os.getenv('XDG_RUNTIME_DIR') is None: os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid() ''' Set CLI options depending on params ''' # if scope is 'system' or None, we can ignore as there is no extra switch. # The other choices match the corresponding switch if module.params['scope'] != 'system': systemctl += " --%s" % module.params['scope'] if module.params['no_block']: systemctl += " --no-block" if module.params['force']: systemctl += " --force" rc = 0 out = err = '' result = dict( name=unit, changed=False, status=dict(), ) # Run daemon-reload first, if requested if module.params['daemon_reload'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) # Run daemon-reexec if module.params['daemon_reexec'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err)) if unit: found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)): # load return of systemctl show into dictionary for easy access and return if out: result['status'] = parse_systemctl_show(to_native(out).split('\n')) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked' # Check for loading error if is_systemd and not is_masked and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) # Workaround for https://github.com/ansible/ansible/issues/71528 elif err and rc == 1 and 'Failed to parse bus message' in err: result['status'] = parse_systemctl_show(to_native(out).split('\n')) unit_base, sep, suffix = unit.partition('@') unit_search = '{unit_base}{sep}'.format(unit_base=unit_base, sep=sep) (rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}*'".format(systemctl=systemctl, unit_search=unit_search)) is_systemd = unit_search in out (rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit)) result['status']['ActiveState'] = out.rstrip('\n') else: # list taken from man systemctl(1) for systemd 244 valid_enabled_states = [ "enabled", "enabled-runtime", "linked", "linked-runtime", "masked", "masked-runtime", "static", "indirect", "disabled", "generated", "transient"] (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) if out.strip() in valid_enabled_states: is_systemd = True else: # fallback list-unit-files as show does not work on some systems (chroot) # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit)) if rc == 0: is_systemd = True else: # Check for systemctl command module.run_command(systemctl, check_rc=True) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) masked = out.strip() == "masked" if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s' -l" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True # Check if the service is indirect or alias and if out contains exactly 1 line of string 'indirect'/ 'alias' it's disabled if out.splitlines() == ["indirect"] or out.splitlines() == ["alias"]: enabled = False elif rc == 1: # if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if module.params['scope'] == 'system' and \ is_initd and \ not out.strip().endswith('disabled') and \ sysv_is_enabled(unit): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if not is_running_service(result['status']): action = 'start' elif module.params['state'] == 'stopped': if is_running_service(result['status']) or is_deactivating_service(result['status']): action = 'stop' else: if not is_running_service(result['status']): action = 'start' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) # check for chroot elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1': module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.") else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result)
def main(): # init module = AnsibleModule( argument_spec=dict( name=dict(required=True, type='str', aliases=['unit', 'service']), state=dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled=dict(type='bool'), masked=dict(type='bool'), daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']], ) # initialize systemctl = module.get_bin_path('systemctl') unit = module.params['name'] rc = 0 out = err = '' result = { 'name': unit, 'changed': False, 'status': {}, } # Run daemon-reload first, if requested if module.params['daemon_reload']: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) #TODO: check if service exists (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if rc != 0: module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, unit, err)) # load return of systemctl show into dictionary for easy access and return k = None multival = [] for line in out.split( '\n'): # systemd can have multiline values delimited with {} if line.strip(): if k is None: if '=' in line: k, v = line.split('=', 1) if v.lstrip().startswith('{'): if not v.rstrip().endswith('}'): multival.append(line) continue result['status'][k] = v.strip() k = None else: if line.rstrip().endswith('}'): result['status'][k] = '\n'.join(multival).strip() multival = [] k = None else: multival.append(line) if 'LoadState' in result['status'] and result['status'][ 'LoadState'] == 'not-found': module.fail_json(msg='Could not find the requested service "%r": %s' % (unit, err)) elif 'LoadError' in result['status']: module.fail_json(msg="Failed to get the service status '%s': %s" % (unit, result['status']['LoadError'])) # mask/unmask the service, if requested if module.params['masked'] is not None: masked = (result['status']['LoadState'] == 'masked') # Change? if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True elif rc == 1: # Deals with init scripts # if both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries initscript = '/etc/init.d/' + unit if os.path.exists(initscript) and os.access(initscript, os.X_OK) and \ (not out.startswith('disabled') or bool(glob.glob('/etc/rc?.d/S??' + unit))): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if module.params['enabled']: action = 'enable' else: action = 'disable' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) result['enabled'] = not enabled if module.params['state'] is not None: # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if result['status']['ActiveState'] != 'active': action = 'start' result['changed'] = True elif module.params['state'] == 'stopped': if result['status']['ActiveState'] == 'active': action = 'stop' result['changed'] = True else: action = module.params[ 'state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' result['changed'] = True if action: if not module.check_mode: (rc, out, err) = module.run_command( "%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result)
def main(): # MAIN global module module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['volume']), state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']), cluster=dict(type='list'), host=dict(type='str'), stripes=dict(type='int'), replicas=dict(type='int'), arbiters=dict(type='int'), disperses=dict(type='int'), redundancies=dict(type='int'), transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), bricks=dict(type='str', aliases=['brick']), start_on_create=dict(type='bool', default=True), rebalance=dict(type='bool', default=False), options=dict(type='dict', default={}), quota=dict(type='str'), directory=dict(type='str'), force=dict(type='bool', default=False), ), ) global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False action = module.params['state'] volume_name = module.params['name'] cluster = module.params['cluster'] brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] arbiters = module.params['arbiters'] disperses = module.params['disperses'] redundancies = module.params['redundancies'] transport = module.params['transport'] myhostname = module.params['host'] start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() # Clean up if last element is empty. Consider that yml can look like this: # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" if cluster is not None and len(cluster) > 1 and cluster[-1] == '': cluster = cluster[0:-1] if cluster is None: cluster = [] if brick_paths is not None and "," in brick_paths: brick_paths = brick_paths.split(",") else: brick_paths = [brick_paths] options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] # get current state info peers = get_peers() volumes = get_volumes() quotas = {} if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': quotas = get_quotas(volume_name, True) # do the work! if action == 'absent': if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) run_gluster(['volume', 'delete', volume_name]) changed = True if action == 'present': probe_all_peers(cluster, peers, myhostname) # create if it doesn't exist if volume_name not in volumes: create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True # switch bricks new_bricks = [] removed_bricks = [] all_bricks = [] for node in cluster: for brick_path in brick_paths: brick = '%s:%s' % (node, brick_path) all_bricks.append(brick) if brick not in volumes[volume_name]['bricks']: new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: if brick not in all_bricks: removed_bricks.append(brick) if new_bricks: add_bricks(volume_name, new_bricks, stripes, replicas, force) changed = True # handle quotas if quota: if not volumes[volume_name]['quota']: enable_quota(volume_name) quotas = get_quotas(volume_name, False) if directory not in quotas or quotas[directory] != quota: set_quota(volume_name, directory, quota) changed = True # set options for option in options.keys(): if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: set_volume_option(volume_name, option, options[option]) changed = True else: module.fail_json(msg='failed to create volume %s' % volume_name) if action != 'delete' and volume_name not in volumes: module.fail_json(msg='volume not found %s' % volume_name) if action == 'started': if volumes[volume_name]['status'].lower() != 'started': start_volume(volume_name) changed = True if action == 'stopped': if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) changed = True if changed: volumes = get_volumes() if rebalance: do_rebalance(volume_name) facts = {} facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas} module.exit_json(changed=changed, ansible_facts=facts)
def main(): # initialize module = AnsibleModule( argument_spec = dict( name = dict(aliases=['unit', 'service']), state = dict(choices=[ 'started', 'stopped', 'restarted', 'reloaded'], type='str'), enabled = dict(type='bool'), masked = dict(type='bool'), daemon_reload = dict(type='bool', default=False, aliases=['daemon-reload']), user = dict(type='bool', default=False), no_block = dict(type='bool', default=False), ), supports_check_mode=True, required_one_of=[['state', 'enabled', 'masked', 'daemon_reload']], ) systemctl = module.get_bin_path('systemctl', True) if module.params['user']: systemctl = systemctl + " --user" if module.params['no_block']: systemctl = systemctl + " --no-block" unit = module.params['name'] rc = 0 out = err = '' result = { 'name': unit, 'changed': False, 'status': {}, } for requires in ('state', 'enabled', 'masked'): if module.params[requires] is not None and unit is None: module.fail_json(msg="name is also required when specifying %s" % requires) # Run daemon-reload first, if requested if module.params['daemon_reload'] and not module.check_mode: (rc, out, err) = module.run_command("%s daemon-reload" % (systemctl)) if rc != 0: module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err)) if unit: found = False is_initd = sysv_exists(unit) is_systemd = False # check service data, cannot error out on rc as it changes across versions, assume not found (rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit)) if request_was_ignored(out) or request_was_ignored(err): # fallback list-unit-files as show does not work on some systems (chroot) # not used as primary as it skips some services (like those using init.d) and requires .service/etc notation (rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit)) if rc == 0: is_systemd = True elif rc == 0: # load return of systemctl show into dictionary for easy access and return if out: result['status'] = parse_systemctl_show(to_native(out).split('\n')) is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found' # Check for loading error if is_systemd and 'LoadError' in result['status']: module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError'])) else: # Check for systemctl command module.run_command(systemctl, check_rc=True) # Does service exist? found = is_systemd or is_initd if is_initd and not is_systemd: module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit) # mask/unmask the service, if requested, can operate on services before they are installed if module.params['masked'] is not None: # state is not masked unless systemd affirms otherwise masked = ('LoadState' in result['status'] and result['status']['LoadState'] == 'masked') if masked != module.params['masked']: result['changed'] = True if module.params['masked']: action = 'mask' else: action = 'unmask' if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: # some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't fail_if_missing(module, found, unit, msg='host') # Enable/disable service startup at boot if requested if module.params['enabled'] is not None: if module.params['enabled']: action = 'enable' else: action = 'disable' fail_if_missing(module, found, unit, msg='host') # do we need to enable the service? enabled = False (rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit)) # check systemctl result or if it is a init script if rc == 0: enabled = True elif rc == 1: # if not a user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries if not module.params['user'] and \ is_initd and \ (not out.strip().endswith('disabled') or sysv_is_enabled(unit)): enabled = True # default to current state result['enabled'] = enabled # Change enable/disable if needed if enabled != module.params['enabled']: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err)) result['enabled'] = not enabled # set service state if requested if module.params['state'] is not None: fail_if_missing(module, found, unit, msg="host") # default to desired state result['state'] = module.params['state'] # What is current service state? if 'ActiveState' in result['status']: action = None if module.params['state'] == 'started': if not is_running_service(result['status']): action = 'start' elif module.params['state'] == 'stopped': if is_running_service(result['status']): action = 'stop' else: if not is_running_service(result['status']): action = 'start' else: action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded result['state'] = 'started' if action: result['changed'] = True if not module.check_mode: (rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit)) if rc != 0: module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err)) else: # this should not happen? module.fail_json(msg="Service is in unknown state", status=result['status']) module.exit_json(**result)
def main(): global module module = AnsibleModule( argument_spec=dict( binary_path=dict(type='path'), chart_ref=dict(type='path'), chart_repo_url=dict(type='str'), chart_version=dict(type='str'), release_name=dict(type='str', required=True, aliases=['name']), release_namespace=dict(type='str', required=True, aliases=['namespace']), release_state=dict(default='present', choices=['present', 'absent'], aliases=['state']), release_values=dict(type='dict', default={}, aliases=['values']), values_files=dict(type='list', default=[], elements='str'), update_repo_cache=dict(type='bool', default=False), # Helm options disable_hook=dict(type='bool', default=False), force=dict(type='bool', default=False), kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])), kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])), purge=dict(type='bool', default=True), wait=dict(type='bool', default=False), wait_timeout=dict(type='str'), atomic=dict(type='bool', default=False), create_namespace=dict(type='bool', default=False), replace=dict(type='bool', default=False), ), required_if=[('release_state', 'present', ['release_name', 'chart_ref']), ('release_state', 'absent', ['release_name'])], supports_check_mode=True, ) if not IMP_YAML: module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR) changed = False bin_path = module.params.get('binary_path') chart_ref = module.params.get('chart_ref') chart_repo_url = module.params.get('chart_repo_url') chart_version = module.params.get('chart_version') release_name = module.params.get('release_name') release_namespace = module.params.get('release_namespace') release_state = module.params.get('release_state') release_values = module.params.get('release_values') values_files = module.params.get('values_files') update_repo_cache = module.params.get('update_repo_cache') # Helm options disable_hook = module.params.get('disable_hook') force = module.params.get('force') kube_context = module.params.get('kube_context') kubeconfig_path = module.params.get('kubeconfig_path') purge = module.params.get('purge') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') atomic = module.params.get('atomic') create_namespace = module.params.get('create_namespace') replace = module.params.get('replace') if bin_path is not None: helm_cmd_common = bin_path else: helm_cmd_common = module.get_bin_path('helm', required=True) if kube_context is not None: helm_cmd_common += " --kube-context " + kube_context if kubeconfig_path is not None: helm_cmd_common += " --kubeconfig " + kubeconfig_path if update_repo_cache: run_repo_update(helm_cmd_common) helm_cmd_common += " --namespace=" + release_namespace # Get real/deployed release status release_status = get_release_status(helm_cmd_common, release_name) # keep helm_cmd_common for get_release_status in module_exit_json helm_cmd = helm_cmd_common if release_state == "absent" and release_status is not None: if replace: module.fail_json( msg="replace is not applicable when state is absent") helm_cmd = delete(helm_cmd, release_name, purge, disable_hook) changed = True elif release_state == "present": if chart_version is not None: helm_cmd += " --version=" + chart_version if chart_repo_url is not None: helm_cmd += " --repo=" + chart_repo_url # Fetch chart info to have real version and real name for chart_ref from archive, folder or url chart_info = fetch_chart_info(helm_cmd, chart_ref) if release_status is None: # Not installed helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, disable_hook, False, values_files=values_files, atomic=atomic, create_namespace=create_namespace, replace=replace) changed = True else: # the 'appVersion' specification is optional in a chart chart_app_version = chart_info.get('appVersion', None) released_app_version = release_status.get('app_version', None) # when deployed without an 'appVersion' chart value the 'helm list' command will return the entry `app_version: ""` appversion_is_same = (chart_app_version == released_app_version ) or (chart_app_version is None and released_app_version == "") if force or release_values != release_status['values'] \ or (chart_info['name'] + '-' + chart_info['version']) != release_status["chart"] \ or not appversion_is_same: helm_cmd = deploy(helm_cmd, release_name, release_values, chart_ref, wait, wait_timeout, disable_hook, force, values_files=values_files, atomic=atomic, create_namespace=create_namespace, replace=replace) changed = True if module.check_mode: check_status = { 'values': { "current": release_status['values'], "declared": release_values } } module.exit_json( changed=changed, command=helm_cmd, status=check_status, stdout='', stderr='', ) elif not changed: module.exit_json( changed=False, status=release_status, stdout='', stderr='', command=helm_cmd, ) rc, out, err = exec_command(helm_cmd) module.exit_json( changed=changed, stdout=out, stderr=err, status=get_release_status(helm_cmd_common, release_name), command=helm_cmd, )
def main(): module = AnsibleModule( argument_spec=dict(vg=dict(type='str', required=True), lv=dict(type='str', required=True), lv_type=dict(type='str', default='jfs2'), size=dict(type='str'), opts=dict(type='str', default=''), copies=dict(type='str', default='1'), state=dict(type='str', default='present', choices=['absent', 'present']), policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']), pvs=dict(type='list', default=list())), supports_check_mode=True, ) vg = module.params['vg'] lv = module.params['lv'] lv_type = module.params['lv_type'] size = module.params['size'] opts = module.params['opts'] copies = module.params['copies'] policy = module.params['policy'] state = module.params['state'] pvs = module.params['pvs'] pv_list = ' '.join(pvs) if policy == 'maximum': lv_policy = 'x' else: lv_policy = 'm' # Add echo command when running in check-mode if module.check_mode: test_opt = 'echo ' else: test_opt = '' # check if system commands are available lsvg_cmd = module.get_bin_path("lsvg", required=True) lslv_cmd = module.get_bin_path("lslv", required=True) # Get information on volume group requested rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, msg="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err) this_vg = parse_vg(vg_info) if size is not None: # Calculate pp size and round it up based on pp size. lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) # Get information on logical volume requested rc, lv_info, err = module.run_command("%s %s" % (lslv_cmd, lv)) if rc != 0: if state == 'absent': module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv) changed = False this_lv = parse_lv(lv_info) if state == 'present' and not size: if this_lv is None: module.fail_json(msg="No size given.") if this_lv is None: if state == 'present': if lv_size > this_vg['free']: module.fail_json( msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free'])) # create LV mklv_cmd = module.get_bin_path("mklv", required=True) cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % ( test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s created." % lv) else: module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err) else: if state == 'absent': # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) rc, out, err = module.run_command( "%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) else: module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err) else: if this_lv['policy'] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) rc, out, err = module.run_command( "%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) if rc == 0: module.exit_json( changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) else: module.fail_json( msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err) if vg != this_lv['vg']: module.fail_json( msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg'])) # from here the last remaining action is to resize it, if no size parameter is passed we do nothing. if not size: module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv)) # resize LV based on absolute values if int(lv_size) > this_lv['size']: extendlv_cmd = module.get_bin_path("extendlv", required=True) cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json( changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) else: module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err) elif lv_size < this_lv['size']: module.fail_json( msg= "No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size'])) else: module.exit_json( changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
def main(): # initialize module = AnsibleModule(argument_spec=dict( name=dict(type='list', aliases=['filesets']), state=dict(choices=[ 'present', 'commit', 'absent', 'installed', 'removed', 'allocate', 'deallocate', 'reset', ], default='present'), lpp_source=dict(type='str'), spot=dict(type='str'), commit=dict(type='bool', default='True'), installp_flags=dict(type='str', default='acgwXY'), ), ) result = { 'name': module.params['state'], 'changed': False, 'msg': "module did do nothing" } # Find commandline strings nimclient = module.get_bin_path('nimclient') lslpp = module.get_bin_path('lslpp') rpm = module.get_bin_path('rpm') rc = 0 # remove the installp commit flag if commit is False if not module.params['commit']: new_flags = module.params['installp_flags'].replace("c", "") module.params['installp_flags'] = new_flags if module.params['state'] == 'allocate': if module.params['lpp_source'] is not None: _check(module, module.params['lpp_source']) if module.params['spot'] is not None: _check(module, module.params['spot']) if module.params['spot'] is None and module.params[ 'lpp_source'] is None: msg = "ERROR: give at least the spot or lpp_source to allocate" module.fail_json(msg=msg, rc=1) result = allocate(module) if module.params['state'] == 'deallocate': result = deallocate(module) if module.params['state'] == 'commit': result = commit(module) if module.params['state'] == 'reset': result = reset(module) if module.params['state'] == 'absent' or module.params[ 'state'] == 'removed': result = uninstall(module) if module.params['state'] == 'present' or module.params[ 'state'] == 'installed': if module.params['lpp_source'] is None: msg = "ERROR: lpp_source may not be empty" module.fail_json(msg=msg, rc=1) else: _check(module, module.params['lpp_source']) if "update_all" in module.params['name']: result = update(module) else: result = install(module) module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( backup_crypt_files=dict(type='bool', default=True), backup_dmapi_fs=dict(type='bool', default=True), create_map_files=dict(type='bool', default=False), exclude_files=dict(type='bool', default=False), exclude_wpar_files=dict(type='bool', default=False), extended_attrs=dict(type='bool', default=True), name=dict(required=True), new_image_data=dict(type='bool', default=True), software_packing=dict(type='bool', default=False), storage_path=dict(required=True), use_snapshot=dict(type='bool', default=False) ), supports_check_mode=True, ) # Command options. map_file_opt = { True: '-m', False: '' } use_snapshot_opt = { True: '-T', False: '' } exclude_files_opt = { True: '-e', False: '' } exclude_wpar_opt = { True: '-G', False: '' } new_image_data_opt = { True: '-i', False: '' } soft_packing_opt = { True: '', False: '-p' } extend_attr_opt = { True: '', False: '-a' } crypt_files_opt = { True: '', False: '-Z' } dmapi_fs_opt = { True: '-a', False: '' } backup_crypt_files = crypt_files_opt[module.params['backup_crypt_files']] backup_dmapi_fs = dmapi_fs_opt[module.params['backup_dmapi_fs']] create_map_files = map_file_opt[module.params['create_map_files']] exclude_files = exclude_files_opt[module.params['exclude_files']] exclude_wpar_files = exclude_wpar_opt[module.params['exclude_wpar_files']] extended_attrs = extend_attr_opt[module.params['extended_attrs']] name = module.params['name'] new_image_data = new_image_data_opt[module.params['new_image_data']] software_packing = soft_packing_opt[module.params['software_packing']] storage_path = module.params['storage_path'] use_snapshot = use_snapshot_opt[module.params['use_snapshot']] # Validate if storage_path is a valid directory. if os.path.isdir(storage_path): if not module.check_mode: # Generates the mksysb image backup. mksysb_cmd = module.get_bin_path('mksysb', True) rc, mksysb_output, err = module.run_command( "%s -X %s %s %s %s %s %s %s %s %s %s/%s" % ( mksysb_cmd, create_map_files, use_snapshot, exclude_files, exclude_wpar_files, software_packing, extended_attrs, backup_crypt_files, backup_dmapi_fs, new_image_data, storage_path, name)) if rc == 0: module.exit_json(changed=True, msg=mksysb_output) else: module.fail_json(msg="mksysb failed.", rc=rc, err=err) module.exit_json(changed=True) else: module.fail_json(msg="Storage path %s is not valid." % storage_path)
def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='list', elements='str', aliases=['pkg', 'package']), state=dict(type='str', default='present', choices=[ 'present', 'installed', 'latest', 'absent', 'removed' ]), force=dict(type='bool', default=False), extra_args=dict(type='str', default=''), upgrade=dict(type='bool', default=False), upgrade_extra_args=dict(type='str', default=''), update_cache=dict(type='bool', default=False, aliases=['update-cache']), update_cache_extra_args=dict(type='str', default=''), ), required_one_of=[['name', 'update_cache', 'upgrade']], mutually_exclusive=[['name', 'upgrade']], supports_check_mode=True, ) pacman_path = module.get_bin_path('pacman', True) module.run_command_environ_update = dict(LC_ALL='C') p = module.params # normalize the state parameter if p['state'] in ['present', 'installed']: p['state'] = 'present' elif p['state'] in ['absent', 'removed']: p['state'] = 'absent' if p["update_cache"] and not module.check_mode: update_package_db(module, pacman_path) if not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Updated the package master lists') if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']): module.exit_json(changed=True, msg='Would have updated the package cache') if p['upgrade']: upgrade(module, pacman_path) if p['name']: pkgs = expand_package_groups(module, pacman_path, p['name']) pkg_files = [] for i, pkg in enumerate(pkgs): if not pkg: # avoid empty strings continue elif re.match(r".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg): # The package given is a filename, extract the raw pkg name from # it and store the filename pkg_files.append(pkg) pkgs[i] = re.sub(r'-[0-9].*$', '', pkgs[i].split('/')[-1]) else: pkg_files.append(None) if module.check_mode: check_packages(module, pacman_path, pkgs, p['state']) if p['state'] in ['present', 'latest']: install_packages(module, pacman_path, p['state'], pkgs, pkg_files) elif p['state'] == 'absent': remove_packages(module, pacman_path, pkgs) else: module.exit_json(changed=False, msg="No package specified to work on.")
def main(): """Main function""" module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['absent', 'present']), size=dict(type='int', default=4096), force=dict(type='bool', default=False), path=dict(type='path', required=True), backup=dict(type='bool', default=False), select_crypto_backend=dict( type='str', default='auto', choices=['auto', 'cryptography', 'openssl']), return_content=dict(type='bool', default=False), ), supports_check_mode=True, add_file_common_args=True, ) base_dir = os.path.dirname(module.params['path']) or '.' if not os.path.isdir(base_dir): module.fail_json( name=base_dir, msg= "The directory '%s' does not exist or the file is not a directory" % base_dir) if module.params['state'] == 'present': backend = module.params['select_crypto_backend'] if backend == 'auto': # Detection what is possible can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion( MINIMAL_CRYPTOGRAPHY_VERSION) can_use_openssl = module.get_bin_path('openssl', False) is not None # First try cryptography, then OpenSSL if can_use_cryptography: backend = 'cryptography' elif can_use_openssl: backend = 'openssl' # Success? if backend == 'auto': module.fail_json(msg=( "Can't detect either the required Python library cryptography (>= {0}) " "or the OpenSSL binary openssl" ).format(MINIMAL_CRYPTOGRAPHY_VERSION)) if backend == 'openssl': dhparam = DHParameterOpenSSL(module) elif backend == 'cryptography': if not CRYPTOGRAPHY_FOUND: module.fail_json(msg=missing_required_lib( 'cryptography >= {0}'.format( MINIMAL_CRYPTOGRAPHY_VERSION)), exception=CRYPTOGRAPHY_IMP_ERR) dhparam = DHParameterCryptography(module) if module.check_mode: result = dhparam.dump() result['changed'] = module.params['force'] or not dhparam.check( module) module.exit_json(**result) try: dhparam.generate(module) except DHParameterError as exc: module.fail_json(msg=to_native(exc)) else: dhparam = DHParameterAbsent(module) if module.check_mode: result = dhparam.dump() result['changed'] = os.path.exists(module.params['path']) module.exit_json(**result) if os.path.exists(module.params['path']): try: dhparam.remove(module) except Exception as exc: module.fail_json(msg=to_native(exc)) result = dhparam.dump() module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']), default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']), logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']), direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']), delete=dict(type='bool', default=False), route=dict(type='bool', default=False), insert=dict(type='str'), rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']), interface=dict(type='str', aliases=['if']), log=dict(type='bool', default=False), from_ip=dict(type='str', default='any', aliases=['from', 'src']), from_port=dict(type='str'), to_ip=dict(type='str', default='any', aliases=['dest', 'to']), to_port=dict(type='str', aliases=['port']), proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']), app=dict(type='str', aliases=['name']), comment=dict(type='str'), ), supports_check_mode=True, mutually_exclusive=[ ['app', 'proto', 'logging'] ], ) cmds = [] def execute(cmd): cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) cmds.append(cmd) (rc, out, err) = module.run_command(cmd) if rc != 0: module.fail_json(msg=err or out) def ufw_version(): """ Returns the major and minor version of ufw installed on the system. """ rc, out, err = module.run_command("%s --version" % ufw_bin) if rc != 0: module.fail_json( msg="Failed to get ufw version.", rc=rc, out=out, err=err ) lines = [x for x in out.split('\n') if x.strip() != ''] if len(lines) == 0: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0]) if matches is None: module.fail_json(msg="Failed to get ufw version.", rc=0, out=out) # Convert version to numbers major = int(matches.group(1)) minor = int(matches.group(2)) rev = 0 if matches.group(3) is not None: rev = int(matches.group(3)) return major, minor, rev params = module.params # Ensure at least one of the command arguments are given command_keys = ['state', 'default', 'rule', 'logging'] commands = dict((key, params[key]) for key in command_keys if params[key]) if len(commands) < 1: module.fail_json(msg="Not any of the command arguments %s given" % commands) if (params['interface'] is not None and params['direction'] is None): module.fail_json(msg="Direction must be specified when creating a rule on an interface") # Ensure ufw is available ufw_bin = module.get_bin_path('ufw', True) # Save the pre state and rules in order to recognize changes (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") # Execute commands for (command, value) in commands.items(): cmd = [[ufw_bin], [module.check_mode, '--dry-run']] if command == 'state': if value == 'reset' and module.check_mode: continue states = {'enabled': 'enable', 'disabled': 'disable', 'reloaded': 'reload', 'reset': 'reset'} execute(cmd + [['-f'], [states[value]]]) elif command == 'logging': execute(cmd + [[command], [value]]) elif command == 'default': if params['direction'] not in ['outgoing', 'incoming', 'routed', None]: module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.') execute(cmd + [[command], [value], [params['direction']]]) elif command == 'rule': if params['direction'] not in ['in', 'out', None]: module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.') # Rules are constructed according to the long format # # ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ # [proto protocol] [app application] [comment COMMENT] cmd.append([module.boolean(params['route']), 'route']) cmd.append([module.boolean(params['delete']), 'delete']) cmd.append([params['insert'], "insert %s" % params['insert']]) cmd.append([value]) cmd.append([params['direction'], "%s" % params['direction']]) cmd.append([params['interface'], "on %s" % params['interface']]) cmd.append([module.boolean(params['log']), 'log']) for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"), ('to_ip', "to %s"), ('to_port', "port %s"), ('proto', "proto %s"), ('app', "app '%s'")]: value = params[key] cmd.append([value, template % (value)]) ufw_major, ufw_minor, _ = ufw_version() # comment is supported only in ufw version after 0.35 if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0: cmd.append([params['comment'], "comment '%s'" % params['comment']]) execute(cmd) # Get the new state (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules") changed = (pre_state != post_state) or (pre_rules != post_rules) return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True), dest=dict(type='path'), binary_path=dict(type='path'), signature=dict(type='bool', default=False), security_library=dict(type='path'), manifest=dict(type='str', default="SIGNATURE.SMF"), remove=dict(type='bool', default=False), ), supports_check_mode=True, ) rc, out, err = [0, "", ""] params = module.params check_mode = module.check_mode path = params['path'] dest = params['dest'] signature = params['signature'] security_library = params['security_library'] manifest = params['manifest'] remove = params['remove'] bin_path = download_SAPCAR(params['binary_path'], module) if dest is None: dest_head_tail = os.path.split(path) dest = dest_head_tail[0] + '/' else: if not os.path.exists(dest): os.makedirs(dest, 0o755) if bin_path is not None: command = [module.get_bin_path(bin_path, required=True)] else: try: command = [module.get_bin_path('sapcar', required=True)] except Exception as e: module.fail_json( msg= 'Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}' .format(bin_path, to_native(e))) present = check_if_present(command[0], path, dest, signature, manifest, module) if not present: command.extend(['-xvf', path, '-R', dest]) if security_library: command.extend(['-L', security_library]) if signature: command.extend(['-manifest', manifest]) if not check_mode: (rc, out, err) = module.run_command(command, check_rc=True) changed = True else: changed = False out = "allready unpacked" if remove: os.remove(path) module.exit_json(changed=changed, message=rc, stdout=out, stderr=err, command=' '.join(command))
def main(): # initialize module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['service']), runlevel=dict(type='str', required=True), action=dict(type='str', choices=[ 'boot', 'bootwait', 'hold', 'initdefault', 'off', 'once', 'ondemand', 'powerfail', 'powerwait', 'respawn', 'sysinit', 'wait', ]), command=dict(type='str', required=True), insertafter=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), ), supports_check_mode=True, ) result = { 'name': module.params['name'], 'changed': False, 'msg': "" } # Find commandline strings mkitab = module.get_bin_path('mkitab') rmitab = module.get_bin_path('rmitab') chitab = module.get_bin_path('chitab') rc = 0 # check if the new entry exists current_entry = check_current_entry(module) # if action is install or change, if module.params['state'] == 'present': # create new entry string new_entry = module.params['name'] + ":" + module.params['runlevel'] + \ ":" + module.params['action'] + ":" + module.params['command'] # If current entry exists or fields are different(if the entry does not # exists, then the entry wil be created if (not current_entry['exist']) or ( module.params['runlevel'] != current_entry['runlevel'] or module.params['action'] != current_entry['action'] or module.params['command'] != current_entry['command']): # If the entry does exist then change the entry if current_entry['exist']: if not module.check_mode: (rc, out, err) = module.run_command([chitab, new_entry]) if rc != 0: module.fail_json( msg="could not change inittab", rc=rc, err=err) result['msg'] = "changed inittab entry" + " " + current_entry['name'] result['changed'] = True # If the entry does not exist create the entry elif not current_entry['exist']: if module.params['insertafter']: if not module.check_mode: (rc, out, err) = module.run_command( [mkitab, '-i', module.params['insertafter'], new_entry]) else: if not module.check_mode: (rc, out, err) = module.run_command( [mkitab, new_entry]) if rc != 0: module.fail_json(msg="could not adjust inittab", rc=rc, err=err) result['msg'] = "add inittab entry" + " " + module.params['name'] result['changed'] = True elif module.params['state'] == 'absent': # If the action is remove and the entry exists then remove the entry if current_entry['exist']: if not module.check_mode: (rc, out, err) = module.run_command( [rmitab, module.params['name']]) if rc != 0: module.fail_json( msg="could not remove entry grom inittab)", rc=rc, err=err) result['msg'] = "removed inittab entry" + " " + current_entry['name'] result['changed'] = True module.exit_json(**result)
def main(): module = AnsibleModule(argument_spec=dict( image_filter=dict(type='list', default=[]), volume_filter=dict(type='list', default=[]), container_filter=dict(type='list', default=[]), ), supports_check_mode=True) docker_bin = [module.get_bin_path('docker')] docker_facts = {} for item in DOCKER_SUBCOMMAND_LOOKUP: docker_facts[item[0]] = [] docker_facts[item[0] + '_filtered'] = [] if docker_bin[0]: docker_facts[item[0]] = [] # Run each Docker command for item in DOCKER_SUBCOMMAND_LOOKUP: rc, out, err = run_docker_command(module, docker_bin, sub_command=item[1], opts=item[2]) # For everything but containers, return just the UIDs if item[0] != 'containers': docker_facts[item[0]] = out elif item[0] == 'containers': # For containers, use a custom format to get name, id, # and status for line in out: container_name, container_id, container_status = \ line.split('##') container_status = container_status.split()[0] docker_facts[item[0]].append({ 'name': container_name, 'id': container_id, 'status': container_status }) # Get filtered facts rc, out, err = run_docker_command( module, docker_bin, sub_command=item[1], opts=item[2], filters=module.params[item[0].rstrip('s') + '_filter']) if item[0] != 'containers': docker_facts[item[0] + '_filtered'] = out elif item[0] == 'containers': for line in out: container_name, container_id, container_status = \ line.split('##') container_status = container_status.split()[0] docker_facts[item[0] + '_filtered'].append({ 'name': container_name, 'id': container_id, 'status': container_status }) results = dict(ansible_facts=dict(docker=docker_facts)) module.exit_json(**results)
def main(): # Mimic the behaviour of the dpkg-divert(1) command: '--add' is implicit # when not using '--remove'; '--rename' takes care to never overwrite # existing files; and options are intended to not conflict between them. # 'force' is an option of the module, not of the command, and implies to # run the command twice. Its purpose is to allow one to re-divert a file # with another target path or to 'give' it to another package, in one task. # This is very easy because one of the values is unique in the diversion # database, and dpkg-divert itself is idempotent (does nothing when nothing # needs doing). module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path', aliases=['name']), state=dict(required=False, type='str', default='present', choices=['absent', 'present']), package=dict(required=False, type='str', default='LOCAL'), divert=dict(required=False, type='path'), rename=dict(required=False, type='bool', default=True), delete=dict(required=False, type='bool', default=False), force=dict(required=False, type='bool', default=False), ), supports_check_mode=True, ) path = module.params['path'] state = module.params['state'] package = module.params['package'] divert = module.params['divert'] rename = module.params['rename'] delete = module.params['delete'] force = module.params['force'] DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) # We need to parse the command's output, which is localized. # So we have to reset environment variable (LC_ALL). ENVIRONMENT = module.get_bin_path('env', required=True) # Start to build the commandline we'll have to run COMMANDLINE = [ENVIRONMENT, 'LC_ALL=C', DPKG_DIVERT, path] # Then insert options as requested in the task parameters: if state == 'absent': COMMANDLINE.insert(3, '--remove') elif state == 'present': COMMANDLINE.insert(3, '--add') if rename: COMMANDLINE.insert(3, '--rename') if divert: COMMANDLINE.insert(3, '--divert') COMMANDLINE.insert(4, divert) else: if package == 'LOCAL': COMMANDLINE.insert(3, '--divert') COMMANDLINE.insert(4, '.'.join([path, 'dpkg-divert'])) elif package: COMMANDLINE.insert(3, '--divert') COMMANDLINE.insert(4, '.'.join([path, 'distrib'])) if package == 'LOCAL': COMMANDLINE.insert(3, '--local') elif package: COMMANDLINE.insert(3, '--package') COMMANDLINE.insert(4, package) # dpkg-divert has a useful --test option that we will use in check mode or # when needing to parse output before actually doing anything. TESTCOMMAND = list(COMMANDLINE) TESTCOMMAND.insert(3, '--test') if module.check_mode: COMMANDLINE = list(TESTCOMMAND) cmd = ' '.join(COMMANDLINE) # `dpkg-divert --listpackage FILE` always returns 0, but not diverted files # provide no output. rc, listpackage, _ = module.run_command( [DPKG_DIVERT, '--listpackage', path]) rc, placeholder, _ = module.run_command(TESTCOMMAND) # There is probably no need to do more than that. Please read the first # sentence of the next comment for a better understanding of the following # `if` statement: if rc == 0 or not force or not listpackage: # If requested, delete the file to make way for the reverted one, but # only of the diversion currently exists. if not module.check_mode: if state == 'absent' and listpackage and delete: try: os.unlink(path) except OSError as e: # It may already have been removed if e.errno != errno.ENOENT: raise AnsibleModuleError( results={ 'msg': "unlinking failed: %s " % to_native(e), 'path': path }) # In the check mode, the 'dpkg-divert' command still tests the # diversion removal for real and returns with an error when a changed # file is in place. In that specific case, we instead simulate a file # deletion and diversion removal ourselves to have the check mode # succeed. if (module.check_mode and state == 'absent' and delete and listpackage and os.path.exists(path)): fake_stdout = ['Deleting', path, 'and', 'removing'] if package == 'LOCAL': fake_stdout.append('local') fake_stdout.extend(['diversion', 'of', path, 'to']) if divert: fake_stdout.append(divert) else: if package == 'LOCAL': fake_stdout.append('.'.join([path, 'dpkg-divert'])) elif package: fake_stdout.append('.'.join([path, 'distrib'])) rc, stdout, stderr = [0, ' '.join(fake_stdout), ''] else: rc, stdout, stderr = module.run_command(COMMANDLINE, check_rc=True) if re.match('^(Leaving|No diversion)', stdout): module.exit_json(changed=False, stdout=stdout, stderr=stderr, cmd=cmd) else: module.exit_json(changed=True, stdout=stdout, stderr=stderr, cmd=cmd) # So, here we are: the test failed AND force is true AND a diversion exists # for the file. Anyway, we have to remove it first (then stop here, or add # a new diversion for the same file), and without failure. Cases of failure # with dpkg-divert are: # - The diversion does not belong to the same package (or LOCAL) # - The divert filename is not the same (e.g. path.distrib != path.divert) # So: force removal by stripping '--package' and '--divert' options... and # their arguments. Fortunately, this module accepts only a few parameters, # so we can rebuild a whole command line from scratch at no cost: FORCEREMOVE = [ENVIRONMENT, 'LC_ALL=C', DPKG_DIVERT, '--remove', path] module.check_mode and FORCEREMOVE.insert(3, '--test') rename and FORCEREMOVE.insert(3, '--rename') forcerm = ' '.join(FORCEREMOVE) if state == 'absent': rc, stdout, stderr = module.run_command(FORCEREMOVE, check_rc=True) module.exit_json(changed=True, stdout=stdout, stderr=stderr, cmd=forcerm) # The situation is that we want to modify the settings (package or divert) # of an existing diversion. dpkg-divert does not handle this, and we have # to remove the diversion and set a new one. First, get state info: rc, truename, _ = module.run_command([DPKG_DIVERT, '--truename', path]) rc, rmout, rmerr = module.run_command(FORCEREMOVE, check_rc=True) if module.check_mode: module.exit_json(changed=True, cmd=[forcerm, cmd], msg=[ rmout, "*** RUNNING IN CHECK MODE ***", "The next step can't be actually performed - " "even dry-run - without error (since the " "previous removal didn't happen) but is " "supposed to achieve the task." ]) old = truename.rstrip() if divert: new = divert else: if package == 'LOCAL': new = '.'.join([path, 'dpkg-divert']) elif package: new = '.'.join([path, 'distrib']) # Store state of files as they may change old_exists = os.path.isfile(old) new_exists = os.path.isfile(new) # RENAMING NOT REMAINING # The behaviour of this module is to NEVER overwrite a file, i.e. never # change file contents but only file paths and only if not conflicting, # as does dpkg-divert. It means that if there is already a diversion for # a given file and the divert file exists too, the divert file must be # moved from old to new divert paths between the two dpkg-divert commands, # because: # # src = /etc/screenrc (tweaked ; exists) # old = /etc/screentc.distrib (default ; exists) # new = /etc/screenrc.ansible (not existing yet) # # Without extra move: # 1. dpkg-divert --rename --remove src # => dont move old to src because src exists # 2. dpkg-divert --rename --divert new --add src # => move src to new because new doesn't exist # Results: # - old still exists with default contents # - new holds the tweaked contents # - src is missing # => confusing, kind of breakage # # With extra move: # 1. dpkg-divert --rename --remove src # => dont move old to src because src exists # 2. os.path.rename(old, new) [conditional] # => move old to new because new doesn't exist # 3. dpkg-divert --rename --divert new --add src # => dont move src to new because new exists # Results: # - old does not exist anymore # - src is still the same tweaked file # - new exists with default contents # => idempotency for next times, and no breakage # if rename and old_exists and not new_exists: os.rename(old, new) rc, stdout, stderr = module.run_command(COMMANDLINE) rc == 0 and module.exit_json(changed=True, stdout=stdout, stderr=stderr, cmd=[forcerm, cmd], msg=[rmout, stdout]) # Damn! FORCEREMOVE succeeded and COMMANDLINE failed. Try to restore old # state and end up with a 'failed' status anyway. if (rename and (old_exists and not os.path.isfile(old)) and (os.path.isfile(new) and not new_exists)): os.rename(new, old) RESTORE = [ENVIRONMENT, 'LC_ALL=C', DPKG_DIVERT, '--divert', old, path] old_pkg = listpackage.rstrip() if old_pkg == "LOCAL": RESTORE.insert(3, '--local') else: RESTORE.insert(3, '--package') RESTORE.insert(4, old_pkg) rename and RESTORE.insert(3, '--rename') module.run_command(RESTORE, check_rc=True) module.exit_json(failed=True, changed=True, stdout=stdout, stderr=stderr, cmd=[forcerm, cmd])
def main(): module = AnsibleModule(argument_spec=dict(state=dict( default="present", choices=["present", "absent"], required=False), name=dict(aliases=["pkg"], required=True, type='list'), cached=dict(default=False, type='bool'), annotation=dict(default="", required=False), pkgsite=dict(default="", required=False), rootdir=dict(default="", required=False, type='path'), chroot=dict(default="", required=False, type='path'), jail=dict(default="", required=False, type='str'), autoremove=dict(default=False, type='bool')), supports_check_mode=True, mutually_exclusive=[["rootdir", "chroot", "jail"]]) pkgng_path = module.get_bin_path('pkg', True) p = module.params pkgs = p["name"] changed = False msgs = [] dir_arg = "" if p["rootdir"] != "": old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0]) if old_pkgng: module.fail_json( msg="To use option 'rootdir' pkg version must be 1.5 or greater" ) else: dir_arg = "--rootdir %s" % (p["rootdir"]) if p["chroot"] != "": dir_arg = '--chroot %s' % (p["chroot"]) if p["jail"] != "": dir_arg = '--jail %s' % (p["jail"]) if p["state"] == "present": _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg) changed = changed or _changed msgs.append(_msg) elif p["state"] == "absent": _changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg) changed = changed or _changed msgs.append(_msg) if p["autoremove"]: _changed, _msg = autoremove_packages(module, pkgng_path, dir_arg) changed = changed or _changed msgs.append(_msg) if p["annotation"]: _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg) changed = changed or _changed msgs.append(_msg) module.exit_json(changed=changed, msg=", ".join(msgs))
def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), lv=dict(type='str'), size=dict(type='str'), opts=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), shrink=dict(type='bool', default=True), active=dict(type='bool', default=True), snapshot=dict(type='str'), pvs=dict(type='str'), resizefs=dict(type='bool', default=False), thinpool=dict(type='str'), ), supports_check_mode=True, required_one_of=(['lv', 'thinpool'], ), ) module.run_command_environ_update = LVOL_ENV_VARS # Determine if the "--yes" option should be used version_found = get_lvm_version(module) if version_found is None: module.fail_json(msg="Failed to get LVM version number") version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option if version_found >= version_yesopt: yesopt = "--yes" else: yesopt = "" vg = module.params['vg'] lv = module.params['lv'] size = module.params['size'] opts = module.params['opts'] state = module.params['state'] force = module.boolean(module.params['force']) shrink = module.boolean(module.params['shrink']) active = module.boolean(module.params['active']) resizefs = module.boolean(module.params['resizefs']) thinpool = module.params['thinpool'] size_opt = 'L' size_unit = 'm' snapshot = module.params['snapshot'] pvs = module.params['pvs'] if pvs is None: pvs = "" else: pvs = pvs.replace(",", " ") if opts is None: opts = "" # Add --test option when running in check-mode if module.check_mode: test_opt = ' --test' else: test_opt = '' if size: # LVCREATE(8) -l --extents option with percentage if '%' in size: size_parts = size.split('%', 1) size_percent = int(size_parts[0]) if size_percent > 100: module.fail_json( msg="Size percentage cannot be larger than 100%") size_whole = size_parts[1] if size_whole == 'ORIGIN': module.fail_json(msg="Snapshot Volumes are not supported") elif size_whole not in ['VG', 'PVS', 'FREE']: module.fail_json( msg="Specify extents as a percentage of VG|PVS|FREE") size_opt = 'l' size_unit = '' if '%' not in size: # LVCREATE(8) -L --size option unit if size[-1].lower() in 'bskmgtpe': size_unit = size[-1].lower() size = size[0:-1] try: float(size) if not size[0].isdigit(): raise ValueError() except ValueError: module.fail_json(msg="Bad size specification of '%s'" % size) # when no unit, megabytes by default if size_opt == 'l': unit = 'm' else: unit = size_unit # Get information on volume group requested vgs_cmd = module.get_bin_path("vgs", required=True) rc, current_vgs, err = module.run_command( "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) vgs = parse_vgs(current_vgs) this_vg = vgs[0] # Get information on logical volume requested lvs_cmd = module.get_bin_path("lvs", required=True) rc, current_lvs, err = module.run_command( "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit, vg)) if rc != 0: if state == 'absent': module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg) else: module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) changed = False lvs = parse_lvs(current_lvs) if snapshot: # Check snapshot pre-conditions for test_lv in lvs: if test_lv['name'] == lv or test_lv['name'] == thinpool: if not test_lv['thinpool'] and not thinpool: break else: module.fail_json( msg="Snapshots of thin pool LVs are not supported.") else: module.fail_json( msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg)) check_lv = snapshot elif thinpool: if lv: # Check thin volume pre-conditions for test_lv in lvs: if test_lv['name'] == thinpool: break else: module.fail_json( msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg)) check_lv = lv else: check_lv = thinpool else: check_lv = lv for test_lv in lvs: if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]): this_lv = test_lv break else: this_lv = None msg = '' if this_lv is None: if state == 'present': # Require size argument except for snapshot of thin volumes if (lv or thinpool) and not size: for test_lv in lvs: if test_lv['name'] == lv and test_lv[ 'thinvol'] and snapshot: break else: module.fail_json(msg="No size given.") # create LV lvcreate_cmd = module.get_bin_path("lvcreate", required=True) if snapshot is not None: if size: cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % ( lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv) else: cmd = "%s %s %s -s -n %s %s %s/%s" % ( lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv) elif thinpool and lv: if size_opt == 'l': module.fail_json( changed=False, msg="Thin volume sizing with percentage not supported." ) size_opt = 'V' cmd = "%s %s -n %s -%s %s%s %s -T %s/%s" % ( lvcreate_cmd, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool) elif thinpool and not lv: cmd = "%s %s -%s %s%s %s -T %s/%s" % ( lvcreate_cmd, yesopt, size_opt, size, size_unit, opts, vg, thinpool) else: cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % ( lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs) rc, _, err = module.run_command(cmd) if rc == 0: changed = True else: module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) else: if state == 'absent': # remove LV if not force: module.fail_json( msg= "Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) lvremove_cmd = module.get_bin_path("lvremove", required=True) rc, _, err = module.run_command( "%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) elif not size: pass elif size_opt == 'l': # Resize LV based on % value tool = None size_free = this_vg['free'] if size_whole == 'VG' or size_whole == 'PVS': size_requested = size_percent * this_vg['size'] / 100 else: # size_whole == 'FREE': size_requested = size_percent * this_vg['free'] / 100 if '+' in size: size_requested += this_lv['size'] if this_lv['size'] < size_requested: if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))): tool = module.get_bin_path("lvextend", required=True) else: module.fail_json( msg= "Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)) elif shrink and this_lv['size'] > size_requested + this_vg[ 'ext_size']: # more than an extent too large if size_requested == 0: module.fail_json( msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) elif not force: module.fail_json( msg="Sorry, no shrinking of %s without force=yes" % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit) elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json( changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) else: # resize LV based on absolute values tool = None if float(size) > this_lv['size']: tool = module.get_bin_path("lvextend", required=True) elif shrink and float(size) < this_lv['size']: if float(size) == 0: module.fail_json( msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name'])) if not force: module.fail_json( msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) else: tool = module.get_bin_path("lvreduce", required=True) tool = '%s %s' % (tool, '--force') if tool: if resizefs: tool = '%s %s' % (tool, '--resizefs') cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs) rc, out, err = module.run_command(cmd) if "Reached maximum COW size" in out: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out) elif rc == 0: changed = True elif "matches existing size" in err: module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) elif "not larger than existing size" in err: module.exit_json( changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err) else: module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) if this_lv is not None: if active: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command( "%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err) else: lvchange_cmd = module.get_bin_path("lvchange", required=True) rc, _, err = module.run_command( "%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name'])) if rc == 0: module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size']) else: module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err) module.exit_json(changed=changed, msg=msg)
def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path'), state=dict(required=False, type='str', default='present', choices=['absent', 'present']), holder=dict(required=False, type='str'), divert=dict(required=False, type='path'), rename=dict(required=False, type='bool', default=False), force=dict(required=False, type='bool', default=False), ), supports_check_mode=True, ) path = module.params['path'] state = module.params['state'] holder = module.params['holder'] divert = module.params['divert'] rename = module.params['rename'] force = module.params['force'] diversion_wanted = dict(path=path, state=state) changed = False DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True) MAINCOMMAND = [DPKG_DIVERT] # Option --listpackage is needed and comes with 1.15.0 rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True) [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)] if LooseVersion(current_version) < LooseVersion("1.15.0"): module.fail_json(msg="Unsupported dpkg version (<1.15.0).") no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1")) b_path = to_bytes(path, errors='surrogate_or_strict') path_exists = os.path.exists(b_path) # Used for things not doable with a single dpkg-divert command (as forced # renaming of files, and diversion's 'holder' or 'divert' updates). target_exists = False truename_exists = False diversion_before = diversion_state(module, DPKG_DIVERT, path) if diversion_before['state'] == 'present': b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict') truename_exists = os.path.exists(b_divert) # Append options as requested in the task parameters, but ignore some of # them when removing the diversion. if rename: MAINCOMMAND.append('--rename') elif no_rename_is_supported: MAINCOMMAND.append('--no-rename') if state == 'present': if holder and holder != 'LOCAL': MAINCOMMAND.extend(['--package', holder]) diversion_wanted['holder'] = holder else: MAINCOMMAND.append('--local') diversion_wanted['holder'] = 'LOCAL' if divert: MAINCOMMAND.extend(['--divert', divert]) target = divert else: target = '%s.distrib' % path MAINCOMMAND.extend(['--add', path]) diversion_wanted['divert'] = target b_target = to_bytes(target, errors='surrogate_or_strict') target_exists = os.path.exists(b_target) else: MAINCOMMAND.extend(['--remove', path]) diversion_wanted['divert'] = None diversion_wanted['holder'] = None # Start to populate the returned objects. diversion = diversion_before.copy() maincommand = ' '.join(MAINCOMMAND) commands = [maincommand] if module.check_mode or diversion_wanted == diversion_before: MAINCOMMAND.insert(1, '--test') diversion_after = diversion_wanted # Just try and see rc, stdout, stderr = module.run_command(MAINCOMMAND) if rc == 0: messages = [stdout.rstrip()] # else... cases of failure with dpkg-divert are: # - The diversion does not belong to the same package (or LOCAL) # - The divert filename is not the same (e.g. path.distrib != path.divert) # - The renaming is forbidden by dpkg-divert (i.e. both the file and the # diverted file exist) elif state != diversion_before['state']: # There should be no case with 'divert' and 'holder' when creating the # diversion from none, and they're ignored when removing the diversion. # So this is all about renaming... if rename and path_exists and ( (state == 'absent' and truename_exists) or (state == 'present' and target_exists)): if not force: msg = "Set 'force' param to True to force renaming of files." module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion) else: msg = "Unexpected error while changing state of the diversion." module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion) to_remove = path if state == 'present': to_remove = target if not module.check_mode: try: b_remove = to_bytes(to_remove, errors='surrogate_or_strict') os.unlink(b_remove) except OSError as e: msg = 'Failed to remove %s: %s' % (to_remove, to_native(e)) module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion) rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) messages = [stdout.rstrip()] # The situation is that we want to modify the settings (holder or divert) # of an existing diversion. dpkg-divert does not handle this, and we have # to remove the existing diversion first, and then set a new one. else: RMDIVERSION = [DPKG_DIVERT, '--remove', path] if no_rename_is_supported: RMDIVERSION.insert(1, '--no-rename') rmdiversion = ' '.join(RMDIVERSION) if module.check_mode: RMDIVERSION.insert(1, '--test') if rename: MAINCOMMAND.remove('--rename') if no_rename_is_supported: MAINCOMMAND.insert(1, '--no-rename') maincommand = ' '.join(MAINCOMMAND) commands = [rmdiversion, maincommand] rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True) if module.check_mode: messages = [rmdout.rstrip(), 'Running in check mode'] else: rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True) messages = [rmdout.rstrip(), stdout.rstrip()] # Avoid if possible to orphan files (i.e. to dereference them in diversion # database but let them in place), but do not make renaming issues fatal. # BTW, this module is not about state of files involved in the diversion. old = diversion_before['divert'] new = diversion_wanted['divert'] if new != old: b_old = to_bytes(old, errors='surrogate_or_strict') b_new = to_bytes(new, errors='surrogate_or_strict') if os.path.exists(b_old) and not os.path.exists(b_new): try: os.rename(b_old, b_new) except OSError as e: pass if not module.check_mode: diversion_after = diversion_state(module, DPKG_DIVERT, path) diversion = diversion_after.copy() diff = dict() if module._diff: diff['before'] = diversion_before diff['after'] = diversion_after if diversion_after != diversion_before: changed = True if diversion_after == diversion_wanted: module.exit_json(changed=changed, diversion=diversion, commands=commands, messages=messages, diff=diff) else: msg = "Unexpected error: see stdout and stderr for details." module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg, stderr=stderr, stdout=stdout, diversion=diversion)
def main(): module = AnsibleModule(argument_spec=dict( repo=dict(required=True, aliases=['name']), dest=dict(required=True, type='path'), revision=dict(default=None, aliases=['version']), force=dict(default='no', type='bool'), purge=dict(default='no', type='bool'), update=dict(default='yes', type='bool'), executable=dict(default=None), ), ) repo = module.params['repo'] dest = module.params['dest'] revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] update = module.params['update'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) hgrc = os.path.join(dest, '.hg/hgrc') # initial states before = '' changed = False cleaned = False hg = Hg(module, dest, repo, revision, hg_path) # If there is no hgrc file, then assume repo is absent # and perform clone. Otherwise, perform pull and update. if not os.path.exists(hgrc): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) elif not update: # Just return having found a repo already in the dest path before = hg.get_revision() elif hg.at_revision: # no update needed, don't pull before = hg.get_revision() # but force and purge if desired cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() # can perform force and purge cleaned = hg.cleanup(force, purge) (rc, out, err) = hg.pull() if rc != 0: module.fail_json(msg=err) (rc, out, err) = hg.update() if rc != 0: module.fail_json(msg=err) after = hg.get_revision() if before != after or cleaned: changed = True module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
def main(): # Load RHSM configuration from file rhsm = Rhsm(None) module = AnsibleModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent']), username=dict(default=None, required=False), password=dict(default=None, required=False, no_log=True), server_hostname=dict(default=None, required=False), server_insecure=dict(default=None, required=False), rhsm_baseurl=dict(default=None, required=False), rhsm_repo_ca_cert=dict(default=None, required=False), auto_attach=dict(aliases=['autosubscribe'], default=False, type='bool'), activationkey=dict(default=None, required=False, no_log=True), org_id=dict(default=None, required=False), environment=dict(default=None, required=False, type='str'), pool=dict(default='^$', required=False, type='str'), pool_ids=dict(default=[], required=False, type='list'), consumer_type=dict(default=None, required=False), consumer_name=dict(default=None, required=False), consumer_id=dict(default=None, required=False), force_register=dict(default=False, type='bool'), server_proxy_hostname=dict(default=None, required=False), server_proxy_port=dict(default=None, required=False), server_proxy_user=dict(default=None, required=False), server_proxy_password=dict(default=None, required=False, no_log=True), release=dict(default=None, required=False) ), required_together=[['username', 'password'], ['server_proxy_hostname', 'server_proxy_port'], ['server_proxy_user', 'server_proxy_password']], mutually_exclusive=[['activationkey', 'username'], ['activationkey', 'consumer_id'], ['activationkey', 'environment'], ['activationkey', 'autosubscribe'], ['force', 'consumer_id'], ['pool', 'pool_ids']], required_if=[['state', 'present', ['username', 'activationkey'], True]], ) rhsm.module = module state = module.params['state'] username = module.params['username'] password = module.params['password'] server_hostname = module.params['server_hostname'] server_insecure = module.params['server_insecure'] rhsm_baseurl = module.params['rhsm_baseurl'] rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert'] auto_attach = module.params['auto_attach'] activationkey = module.params['activationkey'] org_id = module.params['org_id'] if activationkey and not org_id: module.fail_json(msg='org_id is required when using activationkey') environment = module.params['environment'] pool = module.params['pool'] pool_ids = {} for value in module.params['pool_ids']: if isinstance(value, dict): if len(value) != 1: module.fail_json(msg='Unable to parse pool_ids option.') pool_id, quantity = value.items()[0] else: pool_id, quantity = value, 1 pool_ids[pool_id] = str(quantity) consumer_type = module.params["consumer_type"] consumer_name = module.params["consumer_name"] consumer_id = module.params["consumer_id"] force_register = module.params["force_register"] server_proxy_hostname = module.params['server_proxy_hostname'] server_proxy_port = module.params['server_proxy_port'] server_proxy_user = module.params['server_proxy_user'] server_proxy_password = module.params['server_proxy_password'] release = module.params['release'] global SUBMAN_CMD SUBMAN_CMD = module.get_bin_path('subscription-manager', True) # Ensure system is registered if state == 'present': # Register system if rhsm.is_registered and not force_register: if pool != '^$' or pool_ids: try: if pool_ids: result = rhsm.update_subscriptions_by_pool_ids(pool_ids) else: result = rhsm.update_subscriptions(pool) except Exception as e: module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e))) else: module.exit_json(**result) else: module.exit_json(changed=False, msg="System already registered.") else: try: rhsm.enable() rhsm.configure(**module.params) rhsm.register(username, password, auto_attach, activationkey, org_id, consumer_type, consumer_name, consumer_id, force_register, environment, rhsm_baseurl, server_insecure, server_hostname, server_proxy_hostname, server_proxy_port, server_proxy_user, server_proxy_password, release) if pool_ids: subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids) else: subscribed_pool_ids = rhsm.subscribe(pool) except Exception as e: module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e))) else: module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname, subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': if not rhsm.is_registered: module.exit_json(changed=False, msg="System already unregistered.") else: try: rhsm.unsubscribe() rhsm.unregister() except Exception as e: module.fail_json(msg="Failed to unregister: %s" % to_native(e)) else: module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
def main(): # MAIN global module module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True, aliases=['volume']), state=dict(type='str', required=True, choices=['absent', 'started', 'stopped', 'present']), cluster=dict(type='list'), host=dict(type='str'), stripes=dict(type='int'), replicas=dict(type='int'), arbiters=dict(type='int'), disperses=dict(type='int'), redundancies=dict(type='int'), transport=dict(type='str', default='tcp', choices=['tcp', 'rdma', 'tcp,rdma']), bricks=dict(type='str', aliases=['brick']), start_on_create=dict(type='bool', default=True), rebalance=dict(type='bool', default=False), options=dict(type='dict', default={}), quota=dict(type='str'), directory=dict(type='str'), force=dict(type='bool', default=False), ), ) global glusterbin glusterbin = module.get_bin_path('gluster', True) changed = False action = module.params['state'] volume_name = module.params['name'] cluster = module.params['cluster'] brick_paths = module.params['bricks'] stripes = module.params['stripes'] replicas = module.params['replicas'] arbiters = module.params['arbiters'] disperses = module.params['disperses'] redundancies = module.params['redundancies'] transport = module.params['transport'] myhostname = module.params['host'] start_on_create = module.boolean(module.params['start_on_create']) rebalance = module.boolean(module.params['rebalance']) force = module.boolean(module.params['force']) if not myhostname: myhostname = socket.gethostname() # Clean up if last element is empty. Consider that yml can look like this: # cluster="{% for host in groups['glusterfs'] %}{{ hostvars[host]['private_ip'] }},{% endfor %}" if cluster is not None and len(cluster) > 1 and cluster[-1] == '': cluster = cluster[0:-1] if cluster is None or cluster[0] == '': cluster = [myhostname] if brick_paths is not None and "," in brick_paths: brick_paths = brick_paths.split(",") else: brick_paths = [brick_paths] options = module.params['options'] quota = module.params['quota'] directory = module.params['directory'] # get current state info peers = get_peers() volumes = get_volumes() quotas = {} if volume_name in volumes and volumes[volume_name]['quota'] and volumes[volume_name]['status'].lower() == 'started': quotas = get_quotas(volume_name, True) # do the work! if action == 'absent': if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) run_gluster(['volume', 'delete', volume_name]) changed = True if action == 'present': probe_all_peers(cluster, peers, myhostname) # create if it doesn't exist if volume_name not in volumes: create_volume(volume_name, stripes, replicas, arbiters, disperses, redundancies, transport, cluster, brick_paths, force) volumes = get_volumes() changed = True if volume_name in volumes: if volumes[volume_name]['status'].lower() != 'started' and start_on_create: start_volume(volume_name) changed = True # switch bricks new_bricks = [] removed_bricks = [] all_bricks = [] for node in cluster: for brick_path in brick_paths: brick = '%s:%s' % (node, brick_path) all_bricks.append(brick) if brick not in volumes[volume_name]['bricks']: new_bricks.append(brick) # this module does not yet remove bricks, but we check those anyways for brick in volumes[volume_name]['bricks']: if brick not in all_bricks: removed_bricks.append(brick) if new_bricks: add_bricks(volume_name, new_bricks, stripes, replicas, force) changed = True # handle quotas if quota: if not volumes[volume_name]['quota']: enable_quota(volume_name) quotas = get_quotas(volume_name, False) if directory not in quotas or quotas[directory] != quota: set_quota(volume_name, directory, quota) changed = True # set options for option in options.keys(): if option not in volumes[volume_name]['options'] or volumes[volume_name]['options'][option] != options[option]: set_volume_option(volume_name, option, options[option]) changed = True else: module.fail_json(msg='failed to create volume %s' % volume_name) if action != 'delete' and volume_name not in volumes: module.fail_json(msg='volume not found %s' % volume_name) if action == 'started': if volumes[volume_name]['status'].lower() != 'started': start_volume(volume_name) changed = True if action == 'stopped': if volumes[volume_name]['status'].lower() != 'stopped': stop_volume(volume_name) changed = True if changed: volumes = get_volumes() if rebalance: do_rebalance(volume_name) facts = {} facts['glusterfs'] = {'peers': peers, 'volumes': volumes, 'quotas': quotas} module.exit_json(changed=changed, ansible_facts=facts)
def main(): global module, units_si, units_iec, parted_exec changed = False output_script = "" script = "" module = AnsibleModule( argument_spec={ 'device': {'required': True, 'type': 'str'}, 'align': { 'default': 'optimal', 'choices': ['none', 'cylinder', 'minimal', 'optimal'], 'type': 'str' }, 'number': {'default': None, 'type': 'int'}, # unit <unit> command 'unit': { 'default': 'KiB', 'choices': parted_units, 'type': 'str' }, # mklabel <label-type> command 'label': { 'default': 'msdos', 'choices': [ 'aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun' ], 'type': 'str' }, # mkpart <part-type> [<fs-type>] <start> <end> command 'part_type': { 'default': 'primary', 'choices': ['primary', 'extended', 'logical'], 'type': 'str' }, 'part_start': {'default': '0%', 'type': 'str'}, 'part_end': {'default': '100%', 'type': 'str'}, # name <partition> <name> command 'name': {'type': 'str'}, # set <partition> <flag> <state> command 'flags': {'type': 'list'}, # rm/mkpart command 'state': { 'choices': ['present', 'absent', 'info'], 'default': 'info', 'type': 'str' } }, required_if=[ ['state', 'present', ['number']], ['state', 'absent', ['number']], ], supports_check_mode=True, ) module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'} # Data extraction device = module.params['device'] align = module.params['align'] number = module.params['number'] unit = module.params['unit'] label = module.params['label'] part_type = module.params['part_type'] part_start = module.params['part_start'] part_end = module.params['part_end'] name = module.params['name'] state = module.params['state'] flags = module.params['flags'] # Parted executable parted_exec = module.get_bin_path('parted', True) # Conditioning if number is not None and number < 1: module.fail_json(msg="The partition number must be greater then 0.") if not check_size_format(part_start): module.fail_json( msg="The argument 'part_start' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_start) ) if not check_size_format(part_end): module.fail_json( msg="The argument 'part_end' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_end) ) # Read the current disk information current_device = get_device_info(device, unit) current_parts = current_device['partitions'] if state == 'present': # Assign label if required if current_device['generic'].get('table', None) != label: script += "mklabel %s " % label # Create partition if required if part_type and not part_exists(current_parts, 'num', number): script += "mkpart %s %s %s " % ( part_type, part_start, part_end ) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script and update the data structure. # This will create the partition for the next steps if script: output_script += script parted(script, device, align) changed = True script = "" current_parts = get_device_info(device, unit)['partitions'] if part_exists(current_parts, 'num', number) or module.check_mode: partition = {'flags': []} # Empty structure for the check-mode if not module.check_mode: partition = [p for p in current_parts if p['num'] == number][0] # Assign name to the partition if name is not None and partition.get('name', None) != name: script += "name %s %s " % (number, name) # Manage flags if flags: # Compute only the changes in flags status flags_off = list(set(partition['flags']) - set(flags)) flags_on = list(set(flags) - set(partition['flags'])) for f in flags_on: script += "set %s %s on " % (number, f) for f in flags_off: script += "set %s %s off " % (number, f) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script if script: output_script += script changed = True parted(script, device, align) elif state == 'absent': # Remove the partition if part_exists(current_parts, 'num', number) or module.check_mode: script = "rm %s " % number output_script += script changed = True parted(script, device, align) elif state == 'info': output_script = "unit '%s' print " % unit # Final status of the device final_device_status = get_device_info(device, unit) module.exit_json( changed=changed, disk=final_device_status['generic'], partitions=final_device_status['partitions'], script=output_script.strip() )
def main(): friendly_names = { 'lvm': 'LVM2_member', } fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set( friendly_names.keys()) # There is no "single command" to manipulate filesystems, so we map them all out and their options module = AnsibleModule( argument_spec=dict( state=dict(type='str', default='present', choices=['present', 'absent']), fstype=dict(type='str', aliases=['type'], choices=list(fstypes)), dev=dict(type='path', required=True, aliases=['device']), opts=dict(type='str'), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), ), required_if=[('state', 'present', ['fstype'])], supports_check_mode=True, ) state = module.params['state'] dev = module.params['dev'] fstype = module.params['fstype'] opts = module.params['opts'] force = module.params['force'] resizefs = module.params['resizefs'] changed = False if not os.path.exists(dev): msg = "Device %s not found." % dev if state == "present": module.fail_json(msg=msg) else: module.exit_json(msg=msg) dev = Device(module, dev) cmd = module.get_bin_path('blkid', required=True) rc, raw_fs, err = module.run_command( "%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) # In case blkid isn't able to identify an existing filesystem, device is considered as empty, # then this existing filesystem would be overwritten even if force isn't enabled. fs = raw_fs.strip() if state == "present": if fstype in friendly_names: fstype = friendly_names[fstype] try: klass = FILESYSTEMS[fstype] except KeyError: module.fail_json( changed=False, msg="module does not support this filesystem (%s) yet." % fstype) filesystem = klass(module) same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] if same_fs and not resizefs and not force: module.exit_json(changed=False) elif same_fs and resizefs: if not filesystem.GROW: module.fail_json( changed=False, msg="module does not support resizing %s filesystem yet." % fstype) out = filesystem.grow(dev) module.exit_json(changed=True, msg=out) elif fs and not force: module.fail_json( msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err) # create fs filesystem.create(opts, dev) changed = True elif fs: # wipe fs signatures filesystem = Filesystem(module) filesystem.wipefs(dev) changed = True module.exit_json(changed=changed)
def main(): module = AnsibleModule( argument_spec=dict(list_all=dict(required=False, type='bool', default=False), name=dict(type='str'), repo=dict(type='path'), scope=dict(required=False, type='str', choices=['local', 'global', 'system']), state=dict(required=False, type='str', default='present', choices=['present', 'absent']), value=dict(required=False)), mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], required_if=[('scope', 'local', ['repo'])], required_one_of=[['list_all', 'name']], supports_check_mode=True, ) git_path = module.get_bin_path('git', True) params = module.params # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. # Set the locale to C to ensure consistent messages. module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') if params['name']: name = params['name'] else: name = None if params['scope']: scope = params['scope'] elif params['list_all']: scope = None else: scope = 'system' if params['state'] == 'absent': unset = 'unset' params['value'] = None else: unset = None if params['value']: new_value = params['value'] else: new_value = None args = [git_path, "config", "--includes"] if params['list_all']: args.append('-l') if scope: args.append("--" + scope) if name: args.append(name) if scope == 'local': dir = params['repo'] elif params['list_all'] and params['repo']: # Include local settings from a specific repo when listing all available settings dir = params['repo'] else: # Run from root directory to avoid accidentally picking up any local config settings dir = "/" (rc, out, err) = module.run_command(' '.join(args), cwd=dir) if params[ 'list_all'] and scope and rc == 128 and 'unable to read config file' in err: # This just means nothing has been set at the given scope module.exit_json(changed=False, msg='', config_values={}) elif rc >= 2: # If the return code is 1, it just means the option hasn't been set yet, which is fine. module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) if params['list_all']: values = out.rstrip().splitlines() config_values = {} for value in values: k, v = value.split('=', 1) config_values[k] = v module.exit_json(changed=False, msg='', config_values=config_values) elif not new_value and not unset: module.exit_json(changed=False, msg='', config_value=out.rstrip()) elif unset and not out: module.exit_json(changed=False, msg='no setting to unset') else: old_value = out.rstrip() if old_value == new_value: module.exit_json(changed=False, msg="") if not module.check_mode: if unset: args.insert(len(args) - 1, "--" + unset) cmd = ' '.join(args) else: new_value_quoted = shlex_quote(new_value) cmd = ' '.join(args + [new_value_quoted]) (rc, out, err) = module.run_command(cmd, cwd=dir) if err: module.fail_json(rc=rc, msg=err, cmd=cmd) module.exit_json(msg='setting changed', diff=dict(before_header=' '.join(args), before=old_value + "\n", after_header=' '.join(args), after=(new_value or '') + "\n"), changed=True)
def main(): # In order to reduce the clutter and boilerplate for trivial options, # abstract the vmadm properties and build the dict of arguments later. # Dict of all options that are simple to define based on their type. # They're not required and have a default of None. properties = { 'str': [ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname', 'image_uuid', 'internal_metadata_namespace', 'kernel_version', 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts', 'spice_opts', 'uuid', 'vga', 'zfs_data_compression', 'zfs_root_compression', 'zpool' ], 'bool': [ 'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset', 'docker', 'firewall_enabled', 'force', 'indestructible_delegated', 'indestructible_zoneroot', 'maintain_resolvers', 'nowait' ], 'int': [ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps', 'max_physical_memory', 'max_swap', 'mdata_exec_timeout', 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst', 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize', 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize', 'zfs_snapshot_limit' ], 'dict': ['customer_metadata', 'internal_metadata', 'routes'], 'list': ['disks', 'nics', 'resolvers', 'filesystems'] } # Start with the options that are not as trivial as those above. options = dict( state=dict( default='running', type='str', choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted'] ), name=dict( default=None, type='str', aliases=['alias'] ), brand=dict( default='joyent', type='str', choices=['joyent', 'joyent-minimal', 'kvm', 'lx'] ), cpu_type=dict( default='qemu64', type='str', choices=['host', 'qemu64'] ), # Regular strings, however these require additional options. spice_password=dict(type='str', no_log=True), vnc_password=dict(type='str', no_log=True), ) # Add our 'simple' options to options dict. for type in properties: for p in properties[type]: option = dict(default=None, type=type) options[p] = option module = AnsibleModule( argument_spec=options, supports_check_mode=True, required_one_of=[['name', 'uuid']] ) module.vmadm = module.get_bin_path('vmadm', required=True) p = module.params uuid = p['uuid'] state = p['state'] # Translate the state parameter into something we can use later on. if state in ['present', 'running']: vm_state = 'running' elif state in ['stopped', 'created']: vm_state = 'stopped' elif state in ['absent', 'deleted']: vm_state = 'deleted' elif state in ['restarted', 'rebooted']: vm_state = 'rebooted' result = {'state': state} # While it's possible to refer to a given VM by it's `alias`, it's easier # to operate on VMs by their UUID. So if we're not given a `uuid`, look # it up. if not uuid: uuid = get_vm_uuid(module, p['name']) # Bit of a chicken and egg problem here for VMs with state == deleted. # If they're going to be removed in this play, we have to lookup the # uuid. If they're already deleted there's nothing to looup. # So if state == deleted and get_vm_uuid() returned '', the VM is already # deleted and there's nothing else to do. if uuid is None and vm_state == 'deleted': result['name'] = p['name'] module.exit_json(**result) validate_uuids(module) if p['name']: result['name'] = p['name'] result['uuid'] = uuid if uuid == '*': result['changed'] = manage_all_vms(module, vm_state) module.exit_json(**result) # The general flow is as follows: # - first the current state of the VM is obtained by it's UUID. # - If the state was not found and the desired state is 'deleted', return. # - If the state was not found, it means the VM has to be created. # Subsequently the VM will be set to the desired state (i.e. stopped) # - Otherwise, it means the VM exists already and we operate on it's # state (i.e. reboot it.) # # In the future it should be possible to query the VM for a particular # property as a valid state (i.e. queried) so the result can be # registered. # Also, VMs should be able to get their properties updated. # Managing VM snapshots should be part of a standalone module. # First obtain the VM state to determine what needs to be done with it. current_vm_state = get_vm_prop(module, uuid, 'state') # First handle the case where the VM should be deleted and is not present. if not current_vm_state and vm_state == 'deleted': result['changed'] = False elif module.check_mode: # Shortcut for check mode, if there is no VM yet, it will need to be created. # Or, if the VM is not in the desired state yet, it needs to transition. if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state): result['changed'] = True else: result['changed'] = False module.exit_json(**result) # No VM was found that matched the given ID (alias or uuid), so we create it. elif not current_vm_state: result['changed'], result['uuid'] = new_vm(module, uuid, vm_state) else: # VM was found, operate on its state directly. result['changed'] = vm_state_transition(module, uuid, vm_state) module.exit_json(**result)
def main(): module = AnsibleModule(argument_spec=dict( dest=dict(type='path'), repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), refspec=dict(default=None), reference=dict(default=None), force=dict(default='no', type='bool'), depth=dict(default=None, type='int'), clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), verify_commit=dict(default='no', type='bool'), accept_hostkey=dict(default='no', type='bool'), key_file=dict(default=None, type='path', required=False), ssh_opts=dict(default=None, required=False), executable=dict(default=None, type='path'), bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), track_submodules=dict(default='no', type='bool'), umask=dict(default=None, type='raw'), archive=dict(type='path'), ), supports_check_mode=True) dest = module.params['dest'] repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] refspec = module.params['refspec'] force = module.params['force'] depth = module.params['depth'] update = module.params['update'] allow_clone = module.params['clone'] bare = module.params['bare'] verify_commit = module.params['verify_commit'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) key_file = module.params['key_file'] ssh_opts = module.params['ssh_opts'] umask = module.params['umask'] archive = module.params['archive'] result = dict(changed=False, warnings=list()) if module.params['accept_hostkey']: if ssh_opts is not None: if "-o StrictHostKeyChecking=no" not in ssh_opts: ssh_opts += " -o StrictHostKeyChecking=no" else: ssh_opts = "-o StrictHostKeyChecking=no" # evaluate and set the umask before doing anything else if umask is not None: if not isinstance(umask, string_types): module.fail_json( msg="umask must be defined as a quoted octal integer") try: umask = int(umask, 8) except: module.fail_json(msg="umask must be an octal integer", details=str(sys.exc_info()[1])) os.umask(umask) # Certain features such as depth require a file:/// protocol for path based urls # so force a protocol here ... if repo.startswith('/'): repo = 'file://' + repo # We screenscrape a huge amount of git commands so use C locale anytime we # call run_command() module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') gitconfig = None if not dest and allow_clone: module.fail_json( msg="the destination directory must be specified unless clone=no") elif dest: dest = os.path.abspath(dest) if bare: gitconfig = os.path.join(dest, 'config') else: gitconfig = os.path.join(dest, '.git', 'config') # create a wrapper script and export # GIT_SSH=<path> as an environment variable # for git to use the wrapper script ssh_wrapper = write_ssh_wrapper() set_git_ssh(ssh_wrapper, key_file, ssh_opts) module.add_cleanup_file(path=ssh_wrapper) git_version_used = git_version(git_path, module) if depth is not None and git_version_used < LooseVersion('1.9.1'): result['warnings'].append( "Your git version is too old to fully support the depth argument. Falling back to full checkouts." ) depth = None recursive = module.params['recursive'] track_submodules = module.params['track_submodules'] result.update(before=None) local_mods = False need_fetch = True if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone): # if there is no git configuration, do a clone operation unless: # * the user requested no clone (they just want info) # * we're doing a check mode test # In those cases we do an ls-remote if module.check_mode or not allow_clone: remote_head = get_remote_head(git_path, module, dest, version, repo, bare) result.update(changed=True, after=remote_head) if module._diff: diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) if diff: result['diff'] = diff module.exit_json(**result) # there's no git config, so clone clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit) need_fetch = False elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo # requested. result['before'] = get_version(module, git_path, dest) result.update(after=result['before']) module.exit_json(**result) else: # else do a pull local_mods = has_local_mods(module, git_path, dest, bare) result['before'] = get_version(module, git_path, dest) if local_mods: # failure should happen regardless of check mode if not force: module.fail_json( msg="Local modifications exist in repository (force=no).", **result) # if force and in non-check mode, do a reset if not module.check_mode: reset(git_path, module, dest) result.update(changed=True, msg='Local modifications exist.') # exit if already at desired sha version if module.check_mode: remote_url = get_remote_url(git_path, module, dest, remote) remote_url_changed = remote_url and remote_url != repo and unfrackgitpath( remote_url) != unfrackgitpath(repo) else: remote_url_changed = set_remote_url(git_path, module, repo, dest, remote) result.update(remote_url_changed=remote_url_changed) if module.check_mode: remote_head = get_remote_head(git_path, module, dest, version, remote, bare) result.update(changed=(result['before'] != remote_head or remote_url_changed), after=remote_head) # FIXME: This diff should fail since the new remote_head is not fetched yet?! if module._diff: diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) if diff: result['diff'] = diff module.exit_json(**result) else: fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used) result['after'] = get_version(module, git_path, dest) # switch to version specified regardless of whether # we got new revisions from the repository if not bare: switch_version(git_path, module, dest, remote, version, verify_commit, depth) # Deal with submodules submodules_updated = False if recursive and not bare: submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest) if submodules_updated: result.update(submodules_changed=submodules_updated) if module.check_mode: result.update(changed=True, after=remote_head) module.exit_json(**result) # Switch to version specified submodule_update(git_path, module, dest, track_submodules, force=force) # determine if we changed anything result['after'] = get_version(module, git_path, dest) if result['before'] != result[ 'after'] or local_mods or submodules_updated or remote_url_changed: result.update(changed=True) if module._diff: diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after']) if diff: result['diff'] = diff if archive: # Git archive is not supported by all git servers, so # we will first clone and perform git archive from local directory if module.check_mode: result.update(changed=True) module.exit_json(**result) create_archive(git_path, module, dest, archive, version, repo, result) # cleanup the wrapper script if ssh_wrapper: try: os.remove(ssh_wrapper) except OSError: # No need to fail if the file already doesn't exist pass module.exit_json(**result)
def main(): module = AnsibleModule( argument_spec=dict( timeout=dict(type='str', default='30m'), puppetmaster=dict(type='str'), modulepath=dict(type='str'), manifest=dict(type='str'), noop=dict(required=False, type='bool'), logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']), # internal code to work with --diff, do not use show_diff=dict(type='bool', default=False, aliases=['show-diff']), facts=dict(type='dict'), facter_basename=dict(type='str', default='ansible'), environment=dict(type='str'), certname=dict(type='str'), tags=dict(type='list'), execute=dict(type='str'), summarize=dict(type='bool', default=False), debug=dict(type='bool', default=False), verbose=dict(type='bool', default=False), use_srv_records=dict(type='bool'), ), supports_check_mode=True, mutually_exclusive=[ ('puppetmaster', 'manifest'), ('puppetmaster', 'manifest', 'execute'), ('puppetmaster', 'modulepath'), ], ) p = module.params global PUPPET_CMD PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin']) if not PUPPET_CMD: module.fail_json( msg="Could not find puppet. Please ensure it is installed.") global TIMEOUT_CMD TIMEOUT_CMD = module.get_bin_path("timeout", False) if p['manifest']: if not os.path.exists(p['manifest']): module.fail_json(msg="Manifest file %(manifest)s not found." % dict(manifest=p['manifest'])) # Check if puppet is disabled here if not p['manifest']: rc, stdout, stderr = module.run_command( PUPPET_CMD + " config print agent_disabled_lockfile") if os.path.exists(stdout.strip()): module.fail_json(msg="Puppet agent is administratively disabled.", disabled=True) elif rc != 0: module.fail_json(msg="Puppet agent state could not be determined.") if module.params['facts'] and not module.check_mode: _write_structured_data(_get_facter_dir(), module.params['facter_basename'], module.params['facts']) if TIMEOUT_CMD: base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict( timeout_cmd=TIMEOUT_CMD, timeout=shlex_quote(p['timeout']), puppet_cmd=PUPPET_CMD) else: base_cmd = PUPPET_CMD if not p['manifest'] and not p['execute']: cmd = ("%(base_cmd)s agent --onetime" " --no-daemonize --no-usecacheonfailure --no-splay" " --detailed-exitcodes --verbose --color 0") % dict( base_cmd=base_cmd) if p['puppetmaster']: cmd += " --server %s" % shlex_quote(p['puppetmaster']) if p['show_diff']: cmd += " --show_diff" if p['environment']: cmd += " --environment '%s'" % p['environment'] if p['tags']: cmd += " --tags '%s'" % ','.join(p['tags']) if p['certname']: cmd += " --certname='%s'" % p['certname'] if module.check_mode: cmd += " --noop" if p['use_srv_records'] is not None: if not p['use_srv_records']: cmd += " --no-use_srv_records" else: cmd += " --use_srv_records" elif 'noop' in p: if p['noop']: cmd += " --noop" else: cmd += " --no-noop" else: cmd = "%s apply --detailed-exitcodes " % base_cmd if p['logdest'] == 'syslog': cmd += "--logdest syslog " if p['logdest'] == 'all': cmd += " --logdest syslog --logdest stdout" if p['modulepath']: cmd += "--modulepath='%s'" % p['modulepath'] if p['environment']: cmd += "--environment '%s' " % p['environment'] if p['certname']: cmd += " --certname='%s'" % p['certname'] if p['tags']: cmd += " --tags '%s'" % ','.join(p['tags']) if module.check_mode: cmd += "--noop " elif 'noop' in p: if p['noop']: cmd += " --noop" else: cmd += " --no-noop" if p['execute']: cmd += " --execute '%s'" % p['execute'] else: cmd += shlex_quote(p['manifest']) if p['summarize']: cmd += " --summarize" if p['debug']: cmd += " --debug" if p['verbose']: cmd += " --verbose" rc, stdout, stderr = module.run_command(cmd) if rc == 0: # success module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr) elif rc == 1: # rc==1 could be because it's disabled # rc==1 could also mean there was a compilation failure disabled = "administratively disabled" in stdout if disabled: msg = "puppet is disabled" else: msg = "puppet did not run" module.exit_json(rc=rc, disabled=disabled, msg=msg, error=True, stdout=stdout, stderr=stderr) elif rc == 2: # success with changes module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) elif rc == 124: # timeout module.exit_json(rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr) else: # failure module.fail_json(rc=rc, msg="%s failed with return code: %d" % (cmd, rc), stdout=stdout, stderr=stderr)
def main(): module = AnsibleModule( argument_spec = dict( repo = dict(required=True, aliases=['name']), dest = dict(type='path'), revision = dict(default=None, aliases=['version']), force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), update = dict(default='yes', type='bool'), clone = dict(default='yes', type='bool'), executable = dict(default=None), ), ) repo = module.params['repo'] dest = module.params['dest'] revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] update = module.params['update'] clone = module.params['clone'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) if dest is not None: hgrc = os.path.join(dest, '.hg/hgrc') # initial states before = '' changed = False cleaned = False if not dest and (clone or update): module.fail_json(msg="the destination directory must be specified unless clone=no and update=no") hg = Hg(module, dest, repo, revision, hg_path) # If there is no hgrc file, then assume repo is absent # and perform clone. Otherwise, perform pull and update. if not clone and not update: out = hg.get_remote_revision() module.exit_json(after=out, changed=False) if not os.path.exists(hgrc): if clone: (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) else: module.exit_json(changed=False) elif not update: # Just return having found a repo already in the dest path before = hg.get_revision() elif hg.at_revision: # no update needed, don't pull before = hg.get_revision() # but force and purge if desired cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() # can perform force and purge cleaned = hg.cleanup(force, purge) (rc, out, err) = hg.pull() if rc != 0: module.fail_json(msg=err) (rc, out, err) = hg.update() if rc != 0: module.fail_json(msg=err) after = hg.get_revision() if before != after or cleaned: changed = True module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
def main(): module = AnsibleModule( argument_spec=dict( image=dict(type='path', required=True, aliases=['path', 'src']), dest=dict(type='path', required=True), files=dict(type='list', required=True), force=dict(type='bool', default=True, aliases=['thirsty']), executable=dict(type='path'), # No default on purpose ), supports_check_mode=True, ) image = module.params['image'] dest = module.params['dest'] files = module.params['files'] force = module.params['force'] executable = module.params['executable'] result = dict( changed=False, dest=dest, image=image, ) # We want to know if the user provided it or not, so we set default here if executable is None: executable = '7z' binary = module.get_bin_path(executable, None) # When executable was provided and binary not found, warn user ! if module.params['executable'] is not None and not binary: module.warn( "Executable '%s' is not found on the system, trying to mount ISO instead." % executable) if not os.path.exists(dest): module.fail_json(msg="Directory '%s' does not exist" % dest) if not os.path.exists(os.path.dirname(image)): module.fail_json(msg="ISO image '%s' does not exist" % image) result['files'] = [] extract_files = list(files) if not force: # Check if we have to process any files based on existence for f in files: dest_file = os.path.join(dest, os.path.basename(f)) if os.path.exists(dest_file): result['files'].append( dict( checksum=None, dest=dest_file, src=f, )) extract_files.remove(f) if not extract_files: module.exit_json(**result) tmp_dir = tempfile.mkdtemp() # Use 7zip when we have a binary, otherwise try to mount if binary: cmd = '%s x "%s" -o"%s" %s' % (binary, image, tmp_dir, ' '.join( [quote(f) for f in extract_files])) else: cmd = 'mount -o loop,ro "%s" "%s"' % (image, tmp_dir) rc, out, err = module.run_command(cmd) if rc != 0: result.update(dict( cmd=cmd, rc=rc, stderr=err, stdout=out, )) shutil.rmtree(tmp_dir) if binary: module.fail_json( msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result) else: module.fail_json( msg= "Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result) try: for f in extract_files: tmp_src = os.path.join(tmp_dir, f) if not os.path.exists(tmp_src): module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result) src_checksum = module.sha1(tmp_src) dest_file = os.path.join(dest, os.path.basename(f)) if os.path.exists(dest_file): dest_checksum = module.sha1(dest_file) else: dest_checksum = None result['files'].append( dict( checksum=src_checksum, dest=dest_file, src=f, )) if src_checksum != dest_checksum: if not module.check_mode: shutil.copy(tmp_src, dest_file) result['changed'] = True finally: if not binary: module.run_command('umount "%s"' % tmp_dir) shutil.rmtree(tmp_dir) module.exit_json(**result)
def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']), state=dict(type='str', default='present', choices=['absent', 'present']), action=dict(type='str', default='append', choices=['append', 'insert']), ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']), chain=dict(type='str'), rule_num=dict(type='str'), protocol=dict(type='str'), wait=dict(type='str'), source=dict(type='str'), to_source=dict(type='str'), destination=dict(type='str'), to_destination=dict(type='str'), match=dict(type='list', elements='str', default=[]), tcp_flags=dict(type='dict', options=dict(flags=dict(type='list', elements='str'), flags_set=dict(type='list', elements='str'))), jump=dict(type='str'), gateway=dict(type='str'), log_prefix=dict(type='str'), log_level=dict( type='str', choices=[ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ], default=None, ), goto=dict(type='str'), in_interface=dict(type='str'), out_interface=dict(type='str'), fragment=dict(type='str'), set_counters=dict(type='str'), source_port=dict(type='str'), destination_port=dict(type='str'), to_ports=dict(type='str'), set_dscp_mark=dict(type='str'), set_dscp_mark_class=dict(type='str'), comment=dict(type='str'), ctstate=dict(type='list', elements='str', default=[]), src_range=dict(type='str'), dst_range=dict(type='str'), limit=dict(type='str'), limit_burst=dict(type='str'), uid_owner=dict(type='str'), gid_owner=dict(type='str'), reject_with=dict(type='str'), icmp_type=dict(type='str'), syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']), flush=dict(type='bool', default=False), policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']), ), mutually_exclusive=( ['set_dscp_mark', 'set_dscp_mark_class'], ['flush', 'policy'], ), required_if=[ ['jump', 'TEE', ['gateway']], ['jump', 'tee', ['gateway']], ]) args = dict( changed=False, failed=False, ip_version=module.params['ip_version'], table=module.params['table'], chain=module.params['chain'], flush=module.params['flush'], rule=' '.join(construct_rule(module.params)), state=module.params['state'], ) ip_version = module.params['ip_version'] iptables_path = module.get_bin_path(BINS[ip_version], True) # Check if chain option is required if args['flush'] is False and args['chain'] is None: module.fail_json( msg="Either chain or flush parameter must be specified.") if module.params.get('log_prefix', None) or module.params.get( 'log_level', None): if module.params['jump'] is None: module.params['jump'] = 'LOG' elif module.params['jump'] != 'LOG': module.fail_json( msg="Logging options can only be used with the LOG jump target." ) # Check if wait option is supported iptables_version = LooseVersion(get_iptables_version( iptables_path, module)) if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED): if iptables_version < LooseVersion( IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED): module.params['wait'] = '' else: module.params['wait'] = None # Flush the table if args['flush'] is True: args['changed'] = True if not module.check_mode: flush_table(iptables_path, module, module.params) # Set the policy elif module.params['policy']: current_policy = get_chain_policy(iptables_path, module, module.params) if not current_policy: module.fail_json(msg='Can\'t detect current policy') changed = current_policy != module.params['policy'] args['changed'] = changed if changed and not module.check_mode: set_chain_policy(iptables_path, module, module.params) else: insert = (module.params['action'] == 'insert') rule_is_present = check_present(iptables_path, module, module.params) should_be_present = (args['state'] == 'present') # Check if target is up to date args['changed'] = (rule_is_present != should_be_present) if args['changed'] is False: # Target is already up to date module.exit_json(**args) # Check only; don't modify if not module.check_mode: if should_be_present: if insert: insert_rule(iptables_path, module, module.params) else: append_rule(iptables_path, module, module.params) else: remove_rule(iptables_path, module, module.params) module.exit_json(**args)
def main(): global module, units_si, units_iec, parted_exec changed = False output_script = "" script = "" module = AnsibleModule( argument_spec=dict( device=dict(type='str', required=True), align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal']), number=dict(type='int'), # unit <unit> command unit=dict(type='str', default='KiB', choices=parted_units), # mklabel <label-type> command label=dict(type='str', default='msdos', choices=[ 'aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun' ]), # mkpart <part-type> [<fs-type>] <start> <end> command part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']), part_start=dict(type='str', default='0%'), part_end=dict(type='str', default='100%'), # name <partition> <name> command name=dict(type='str'), # set <partition> <flag> <state> command flags=dict(type='list'), # rm/mkpart command state=dict(type='str', default='info', choices=['absent', 'info', 'present']), ), required_if=[ ['state', 'present', ['number']], ['state', 'absent', ['number']], ], supports_check_mode=True, ) module.run_command_environ_update = { 'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C' } # Data extraction device = module.params['device'] align = module.params['align'] number = module.params['number'] unit = module.params['unit'] label = module.params['label'] part_type = module.params['part_type'] part_start = module.params['part_start'] part_end = module.params['part_end'] name = module.params['name'] state = module.params['state'] flags = module.params['flags'] # Parted executable parted_exec = module.get_bin_path('parted', True) # Conditioning if number is not None and number < 1: module.fail_json(msg="The partition number must be greater then 0.") if not check_size_format(part_start): module.fail_json( msg="The argument 'part_start' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_start)) if not check_size_format(part_end): module.fail_json( msg="The argument 'part_end' doesn't respect required format." "The size unit is case sensitive.", err=parse_unit(part_end)) # Read the current disk information current_device = get_device_info(device, unit) current_parts = current_device['partitions'] if state == 'present': # Assign label if required if current_device['generic'].get('table', None) != label: script += "mklabel %s " % label # Create partition if required if part_type and not part_exists(current_parts, 'num', number): script += "mkpart %s %s %s " % (part_type, part_start, part_end) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script and update the data structure. # This will create the partition for the next steps if script: output_script += script parted(script, device, align) changed = True script = "" current_parts = get_device_info(device, unit)['partitions'] if part_exists(current_parts, 'num', number) or module.check_mode: partition = {'flags': []} # Empty structure for the check-mode if not module.check_mode: partition = [p for p in current_parts if p['num'] == number][0] # Assign name to the partition if name is not None and partition.get('name', None) != name: # Wrap double quotes in single quotes so the shell doesn't strip # the double quotes as those need to be included in the arg # passed to parted script += 'name %s \'"%s"\' ' % (number, name) # Manage flags if flags: # Parted infers boot with esp, if you assign esp, boot is set # and if boot is unset, esp is also unset. if 'esp' in flags and 'boot' not in flags: flags.append('boot') # Compute only the changes in flags status flags_off = list(set(partition['flags']) - set(flags)) flags_on = list(set(flags) - set(partition['flags'])) for f in flags_on: script += "set %s %s on " % (number, f) for f in flags_off: script += "set %s %s off " % (number, f) # Set the unit of the run if unit and script: script = "unit %s %s" % (unit, script) # Execute the script if script: output_script += script changed = True parted(script, device, align) elif state == 'absent': # Remove the partition if part_exists(current_parts, 'num', number) or module.check_mode: script = "rm %s " % number output_script += script changed = True parted(script, device, align) elif state == 'info': output_script = "unit '%s' print " % unit # Final status of the device final_device_status = get_device_info(device, unit) module.exit_json(changed=changed, disk=final_device_status['generic'], partitions=final_device_status['partitions'], script=output_script.strip())