def main(): global log global logdir global results global module results = dict( changed=False, version='', files=[], ) module = AnsibleModule( argument_spec=dict( sdist=dict(type='path', required=True), topdir=dict(type='path', default='openafs'), logdir=dict(type='path', default=None), # bin paths git=dict(type='path', default=None), tar=dict(type='path', default=None), gzip=dict(type='path', default=None), bzip2=dict(type='path', default=None), md5sum=dict(type='path', default=None), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) sdist = expand_path(module.params['sdist']) topdir = expand_path(module.params['topdir']) logdir = expand_path(module.params['logdir']) make_sdist(topdir, sdist) results['changed'] = True module.exit_json(**results)
def main(): results = dict(changed=False, ) module = AnsibleModule( argument_spec=dict(), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) def restorecon(*args): restorecon = module.get_bin_path('restorecon', required=True) cmdargs = [restorecon] + list(args) cmdline = ' '.join(cmdargs) log.info("Running: %s", cmdline) rc, out, err = module.run_command(cmdargs) if rc != 0: log.error("Command failed: %s, rc=%d, err=%s", cmdline, rc, err) module.fail_json(msg="Command failed", cmd=cmdline, out=out, err=err) factsfile = '/etc/ansible/facts.d/openafs.fact' try: with open(factsfile) as fp: facts = json.load(fp) except Exception: facts = {} changed = [] relabelled = facts.get('relabelled', []) for path in top_dirs: if not os.path.exists(path): os.makedirs(path) if path not in relabelled: restorecon('-i', '-r', path) changed.append(path) for path in glob.glob('/vicep*'): if path not in relabelled: restorecon('-i', path) changed.append(path) for path in glob.glob('/vicep*/AlwaysAttach'): if path not in relabelled: restorecon(path) changed.append(path) if changed: facts['relabelled'] = sorted(set(relabelled) | set(changed)) if not os.path.exists(os.path.dirname(factsfile)): os.makedirs(os.path.dirname(factsfile)) with open(factsfile, 'w') as fp: json.dump(facts, fp, indent=2) results['changed'] = True log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)
def main(): results = dict( changed=False, ansible_facts={'ansible_local': {'openafs': {}}}, ) module = AnsibleModule( argument_spec=dict( state=dict(choices=['set', 'update'], default='update'), factsdir=dict(type='path', default='/etc/ansible/facts.d'), facts=dict(type='dict', default={}), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) state = module.params['state'] factsdir = module.params['factsdir'] factsfile = os.path.join(factsdir, 'openafs.fact') try: with open(factsfile) as fp: facts = json.load(fp) except Exception: facts = {} signature_before = signature(facts) for key, value in module.params['facts'].items(): if state == 'set': facts[key] = value elif state == 'update': if key not in facts: facts[key] = value elif isinstance(facts[key], dict) and isinstance(value, dict): facts[key].update(value) elif isinstance(facts[key], list) and isinstance(value, list): facts[key].append(value) else: facts[key] = value else: module.fail_json(msg='Internal error: unknown state %s' % state) if not os.path.exists(factsdir): os.makedirs(factsdir) with open(factsfile, 'w') as fp: json.dump(facts, fp, indent=2) with open(factsfile) as fp: facts = json.load(fp) signature_after = signature(facts) if signature_before != signature_after: log.info("Facts file '%s' changed.", factsfile) results['changed'] = True # Update local facts in the current play. results['ansible_facts']['ansible_local']['openafs'] = facts log.debug('results={0}'.format(results)) module.exit_json(**results)
def main(): global log global logdir global results global module results = dict( changed=False, logfiles=[], version={}, ) module = AnsibleModule( argument_spec=dict( build=dict(choices=['all', 'source', 'userspace', 'modules'], default='all'), sdist=dict(type='path', required=True), spec=dict(type='str', default=None), relnotes=dict(type='str', default=None), changelog=dict(type='str', default=None), csdb=dict(type='path', default=None), patchdir=dict(type='path', default=None), kernvers=dict(default=None), topdir=dict(type='path', default='~/rpmbuild'), logdir=dict(type='path', default=None), tar=dict(type='path', default=None), tar_extra_options=dict(type='str', default=''), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) build = module.params['build'] sdist = expand_path(module.params['sdist']) spec = expand_path(module.params['spec']) relnotes = expand_path(module.params['relnotes']) changelog = expand_path(module.params['changelog']) csdb = expand_path(module.params['csdb']) patchdir = expand_path(module.params['patchdir']) kernvers = module.params['kernvers'] topdir = expand_path(module.params['topdir']) logdir = expand_path(module.params['logdir']) # Set up logging. if logdir: logdir = os.path.abspath(logdir) else: logdir = os.path.join(topdir, 'BUILD') if not os.path.isdir(logdir): os.makedirs(logdir) results['changed'] = True create_workspace(topdir, sdist, spec, relnotes, changelog, csdb, patchdir) rpmbuild(topdir, build, kernvers) results['changed'] = True module.exit_json(**results)
def main(): results = dict( changed=False, ) module = AnsibleModule( argument_spec=dict( timeout=dict(type='int', default=600), delay=dict(type='int', default=0), sleep=dict(type='int', default=20), fail_on_timeout=dict(type='bool', default=False) ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) timeout = module.params['timeout'] delay = module.params['delay'] sleep = module.params['sleep'] fail_on_timeout = module.params['fail_on_timeout'] if delay < 0: log.warning('Ignoring negative delay parameter.') delay = 0 if sleep < 1: log.warning('Ignoring out of range sleep parameter.') sleep = 1 def lookup_command(name): """ Lookup an OpenAFS command from local facts file. Try the PATH if not found in the local facts. """ try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) cmd = facts['bins'][name] except Exception: cmd = module.get_bin_path(name) if not cmd: module.fail_json(msg='Unable to locate %s command.' % name) return cmd def check_quorum(port): """ Run udebug to check for quorum. """ status = {'port': port, 'quorum': False} udebug = lookup_command('udebug') args = [udebug, '-server', 'localhost', '-port', str(port)] log.info('Running: %s', ' '.join(args)) rc, out, err = module.run_command(args) log.debug("Ran udebug: rc=%d, out=%s, err=%s", rc, out, err) if rc != 0: log.warning("Failed udebug: rc=%d, out=%s, err=%s", rc, out, err) return status status['udebug'] = out for line in out.splitlines(): m = re.match(r'I am sync site', line) if m: status['sync'] = True log.info('Local host is sync site.') continue m = re.match(r'Recovery state ([0-9a-f]+)', line) if m: status['flags'] = m.group(1) continue m = re.match(r'Sync host (\S+) was set \d+ secs ago', line) if m: if m.group(1) != '0.0.0.0': status['sync_host'] = m.group(1) log.info('Remote host is sync site: %s', status['sync_host']) continue m = re.match(r"Sync site's db version is (\d+)\.(\d+)", line) if m: status['db_version'] = (int(m.group(1)), int(m.group(2))) continue # Check recovery flags if this is the sync site, otherwise check for a # remote sync site address. if 'sync' in status and status['sync']: if 'flags' in status and status['flags'] in ('1f', 'f'): status['quorum'] = True elif 'sync_host' in status: status['quorum'] = True return status # # Wait for PRDB and VLDB quorum. # if delay: time.sleep(delay) now = int(time.time()) expires = now + timeout retries = 0 while True: pr = check_quorum(7002) vl = check_quorum(7003) if pr['quorum'] and vl['quorum']: log.info('Databases have quorum.') results['pr'] = pr results['vl'] = vl break now = int(time.time()) if now > expires: if fail_on_timeout: log.error('Timeout expired.') module.fail_json(msg='Timeout expired.') else: log.warning('Timeout expired.') break log.info('Will retry in %d seconds.' % sleep) time.sleep(sleep) retries += 1 results['retries'] = retries log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)
def main(): global log results = dict( changed=False, msg='', ansible_facts={}, logfiles=[], kmods=[], bins={}, dirs={}, ) module = AnsibleModule(argument_spec=dict( path=dict(type='path', required=True, aliases=['destdir']), exclude=dict(type='list', default=[]), sysname=dict(type='str', default=None), components=dict(type='list', default=['common', 'client', 'server']), ldconfig=dict(type='path', default='/sbin/ldconfig'), depmod=dict(type='path', default='/sbin/depmod'), ), supports_check_mode=False) log = Logger(module_name) log.info('Starting %s', module_name) path = module.params['path'] exclude = module.params['exclude'] components = module.params['components'] ldconfig = module.params['ldconfig'] depmod = module.params['depmod'] if not os.path.isdir(path): msg = 'Directory not found: %s' % path log.error(msg) module.fail_json(msg=msg) sysname = module.params['sysname'] destdir = find_destdir(path, sysname) if destdir: log.info("Installing %s from path '%s'." % (','.join(components), destdir)) files = install_dest(destdir, components) results['dirs'] = TRANSARC_INSTALL_DIRS else: log.info('Copying files from %s to /' % path) files = copy_tree(path, '/', exclude) dirs = {} filename = os.path.join(path, '.build-info.json') if os.path.exists(filename): log.info("Reading metadata file '%s'.", filename) with open(filename) as f: build_info = json.load(f) log.info("build_info=%s", pprint.pformat(build_info)) dirs = build_info.get('dirs', {}) log.info("dirs=%s", pprint.pformat(dirs)) results['dirs'] = dirs for f in files: fn, changed = f if changed: results['changed'] = True try: mode = os.stat(fn).st_mode if stat.S_ISREG(mode) and (mode & stat.S_IXUSR): results['bins'][os.path.basename(fn)] = fn except IOError as e: log.error('Failed to stat installed file "%s: %s".' % (fn, e)) if platform.system() == 'Linux': results['kmods'] = find_by_suffix(files, '.ko') if results['changed']: log.info('Updating shared object cache.') libdirs = directories(find_by_suffix(files, '.so')) if libdirs and os.path.exists('/etc/ld.so.conf.d'): with open('/etc/ld.so.conf.d/openafs.conf', 'w') as f: for libdir in libdirs: f.write('%s\n' % libdir) module.run_command([ldconfig], check_rc=True) if results['changed']: if results['kmods']: log.info('Updating module dependencies.') module.run_command([depmod, '-a'], check_rc=True) elif platform.system() == 'SunOS': if results['changed']: libdirs = directories(find_by_suffix(files, '.so')) for libdir in libdirs: log.info('Configuring runtime link path: %s', libdir) module.run_command(['crle', '-64', '-u', '-l', libdir], check_rc=True) results['kmods'] = find_by_suffix(files, 'libafs64.o') if results['kmods']: kmod = results['kmods'][0] else: kmod = None if kmod: driver = solaris_driver_path() if not os.path.exists(driver): update = True log.debug('Driver to be installed.') elif filecmp.cmp(kmod, driver, shallow=True): update = False log.debug('Driver is already up to date.') else: update = True log.debug('Driver to be updated.') if update: log.info('Copying "%s" to "%s"', kmod, driver) shutil.copy2(kmod, driver) results['kmods'].append(driver) results['changed'] = True msg = 'Install completed' log.info(msg) results['msg'] = msg log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)
def main(): results = dict(changed=False, ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['present', 'absent'], default='present'), user=dict(type='str', aliases=['name']), id=dict(type='int', default=0), groups=dict(type='list', default=[], aliases=['group']), localauth=dict(type='bool', default=False), auth_user=dict(type='str', default='admin'), auth_keytab=dict(type='str', default='admin.keytab'), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) state = module.params['state'] user = module.params['user'] userid = module.params['id'] groups = set(module.params['groups']) localauth = module.params['localauth'] auth_user = module.params['auth_user'] auth_keytab = module.params['auth_keytab'] # Convert k4 to k5 name. if '.' in auth_user and '/' not in auth_user: auth_user = auth_user.replace('.', '/') def die(msg): log.error(msg) module.fail_json(msg=msg) def lookup_command(name): try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) cmd = facts['bins'][name] except Exception: cmd = module.get_bin_path(name) if not cmd: module.fail_json(msg='Unable to locate %s command.' % name) return cmd def run_command(args): """ Run a command. """ cmdline = ' '.join(args) log.debug('Running: %s', cmdline) rc, out, err = module.run_command(args) log.debug('Ran: %s, rc=%d, out=%s, err=%s', cmdline, rc, out, err) if rc != 0: die('Failed: %s, rc=%d, out=%s, err=%s' % (cmdline, rc, out, err)) def login(): """ Get a token for authenicated access. """ kinit = lookup_command('kinit') aklog = lookup_command('aklog') if not os.path.exists(auth_keytab): die('keytab %s not found.' % auth_keytab) run_command([kinit, '-k', '-t', auth_keytab, auth_user]) run_command([aklog, '-d']) def run_pts(args, is_done): """ Run a pts command with retries. """ def should_retry(err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. if "User or group doesn't exist" in err: return True # Retry not found! return False pts = lookup_command('pts') args.insert(0, pts) if localauth: args.append('-localauth') cmdline = ' '.join(args) retries = 120 while True: log.debug('Running: %s', cmdline) rc, out, err = module.run_command(args) log.debug('Ran: %s, rc=%d, out=%s, err=%s', cmdline, rc, out, err) if is_done(rc, out, err): return out if retries == 0 or not should_retry(err): log.error("Failed: %s, rc=%d, err=%s", cmdline, rc, err) module.fail_json( dict(msg='Command failed.', cmdline=cmdline, rc=rc, out=out, err=err)) log.warning("Failed: %s, rc=%d, err=%s; %d retr%s left.", cmdline, rc, err, retries, ('ies' if retries > 1 else 'y')) retries -= 1 time.sleep(2) def pts_examine(name): """ Return the entry of an existing user. """ pts_fields = { 'name': r'Name: ([^,]+),', 'id': r'id: (\d+),', 'owner': r'owner: ([^,]+),', 'creator': r'creator: ([^,]+),', 'flags': r'flags: (.....),', 'quota': r'group quota: (\d+|unlimited)\.' } def is_done(rc, out, err): if rc == 0: return True if rc == 1 and "User or group doesn't exist" in err: log.warning("User %s not found.", name) return False # Retry return False out = run_pts(['examine', '-nameorid', name], is_done) entry = {} for name, pattern in pts_fields.items(): m = re.search(pattern, out) value = m.group(1) if m else None if name == 'id': value = int(value) elif name == 'quota': if value == 'unlimited': continue # no quota value = int(value) entry[name] = value return entry def pts_membership(name): """ Lookup user groups. """ def is_done(rc, out, err): return rc == 0 out = run_pts(['membership', '-nameorid', name], is_done) members = set() for line in out.splitlines(): m = re.search(r'^ (\S+)', line) if m: members.add(m.group(1)) return list(members) def pts_createuser(name, userid): """ Ensure a user exists. """ def is_done(rc, out, err): if rc == 0: results['changed'] = True return True if rc == 1 and "Entry for name already exists" in err: return True if rc == 1 and "Entry for id already exists" in err: pattern = r'unable to create user (\S+) with id (\d+)' m = re.search(pattern, err) if m and m.group(1) == name and int(m.group(2)) == userid: return True return False cmd = ['createuser', '-name', name] if userid: cmd.extend(['-id', str(userid)]) run_pts(cmd, is_done) def pts_creategroup(name): """ Ensure a group exists. """ def is_done(rc, out, err): if rc == 0: results['changed'] = True return True if rc == 1 and "already exists" in err: return True return False run_pts(['creategroup', '-name', name], is_done) def pts_adduser(user, group): """ Ensure user is member of the group. """ def is_done(rc, out, err): if rc == 0: results['changed'] = True return True if rc == 1 and "already exists" in err: return True return False run_pts(['adduser', '-user', user, '-group', group], is_done) def pts_delete(name): """ Ensure user is absent. """ def is_done(rc, out, err): if rc == 0 and err == '': results['changed'] = True return True if rc == 0 and "User or group doesn't exist" in err: log.warning("User %s not found.", name) return True return False run_pts(['delete', '-nameorid', name], is_done) if not localauth: login() if state == 'present': pts_createuser(user, userid) for group in groups: if not group.startswith('system:'): pts_creategroup(group) pts_adduser(user, group) results['user'] = pts_examine(user) results['user']['groups'] = pts_membership(user) elif state == 'absent': pts_delete(user) else: module.fail_json(msg="Internal error: invalid state %s" % state) log.debug('Results: %s' % pprint.pformat(results)) log.info('Exiting %s' % module_name) module.exit_json(**results)
def main(): global log results = dict( changed=False, msg='', projectdir=None, logfiles=[], sysname='', kmods=[], install_dirs={}, ) global module module = AnsibleModule( argument_spec=dict( state=dict(type='str'), # Not used. projectdir=dict(type='path', required=True), builddir=dict(type='path'), logdir=dict(type='path'), destdir=dict(type='path'), # Procesing options make=dict(type='path'), clean=dict(type='bool', default=False), jobs=dict(type='int', fallback=(cpu_count, [])), # Build options. with_version=dict(type='str', default=None, aliases=['version']), with_transarc_paths=dict(type='bool', default=False, aliases=['transarc_paths']), with_debug_symbols=dict(type='bool', default=True), with_rxgk=dict(type='bool', defaults=False), # What to build. build_manpages=dict(type='bool', default=True, aliases=['manpages']), build_userspace=dict(type='bool', default=True), build_module=dict(type='bool', default=True), build_terminal_programs=dict(type='bool', default=True), build_fuse_client=dict(type='bool', default=True), build_bindings=dict(type='bool', default=True), # Explicit configure and target options. configure_options=dict(type='raw', default=None), configure_environment=dict(type='dict', default=None), target=dict(type='str', default=None), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) projectdir = module.params['projectdir'] builddir = module.params['builddir'] logdir = module.params['logdir'] clean = module.params['clean'] make = module.params['make'] build_manpages = module.params['build_manpages'] with_version = module.params['with_version'] configure_environment = module.params['configure_environment'] if not (os.path.exists(projectdir) and os.path.isdir(projectdir)): module.fail_json(msg='projectdir directory not found: %s' % projectdir) results['projectdir'] = os.path.abspath(projectdir) # Find `make` if not specified. if not make: make = module.get_bin_path('make', required=True) # # Setup build logging. # if not logdir: logdir = os.path.join(projectdir, '.ansible') if not os.path.isdir(logdir): os.makedirs(logdir) results['changed'] = True results['logdir'] = logdir # # Setup paths. # if builddir: builddir = abspath(projectdir, builddir) else: builddir = projectdir results['builddir'] = builddir log.debug("builddir='%s'", builddir) gitdir = os.path.abspath(os.path.join(projectdir, '.git')) if not (os.path.exists(gitdir) and os.path.isdir(gitdir)): gitdir = None log.debug("gitdir='%s'.", gitdir) # # Setup environment. # solariscc = lookup_fact('solariscc') if solariscc: os.environ['SOLARISCC'] = solariscc os.environ['UT_NO_USAGE_TRACKING'] = '1' os.environ['SUNW_NO_UPDATE_NOTIFY'] = '1' # # Clean previous build. # if clean and gitdir: clean_command = [ module.get_bin_path('git', required=True), 'clean', '-f', '-d', '-x', '--exclude=.ansible', ] log.info('Running git clean.') run_command('clean', clean_command, projectdir, logdir, results) # # Clean out of tree build files. # if clean and builddir != projectdir and os.path.exists(builddir): if builddir == '/': module.fail_json(msg='Refusing to remove "/" builddir!') log.info('Removing old build directory %s' % builddir) shutil.rmtree(builddir) # # Setup build directory. (This must be done after the clean step.) # if not os.path.isdir(builddir): log.info('Creating build directory %s' % builddir) os.makedirs(builddir) # # Set the version string, if supplied. # if with_version: version_file = os.path.join(projectdir, '.version') log.info('Writing version %s to file %s' % (with_version, version_file)) with open(version_file, 'w') as f: f.write(with_version) # # Report the version string. This is read from the .version file if # present, otherwise it is generated from `git describe`. # cwd = os.path.join(builddir, 'build-tools') rc, out, err = module.run_command(['./git-version', builddir], cwd=cwd) if rc != 0: log.info('Unable to determine version string.') else: results['version'] = out # # Run autoconf. # if os.path.exists(os.path.join(projectdir, 'configure')): log.info('Skipping regen.sh: configure found.') else: regen_command = [os.path.join(projectdir, 'regen.sh')] if not build_manpages: regen_command.append('-q') run_command('regen', regen_command, projectdir, logdir, results) # # Run configure. # run_command('configure', configure_command(module, results), builddir, logdir, results, extra_env=configure_environment) results['sysname'] = configured_sysname(builddir) log.info("configured sysname is '%s'.", results['sysname']) # # Get installation directories. # with open(os.path.join(builddir, '.Makefile.dirs'), 'w') as f: f.write(MAKEFILE_PATHS) rc, out, err = module.run_command([make, '-f', '.Makefile.dirs'], cwd=builddir) if rc != 0: module.fail_json(msg='Failed to find installation directories: %s' % err) for line in out.splitlines(): line = line.rstrip() if '=' in line: name, value = line.split('=', 1) if value.startswith('//'): # Cleanup leading double slashes. value = value.replace('//', '/', 1) results['install_dirs'][name] = value # # Run make clean if we did not run git clean. # if clean and not gitdir: run_command('make', [make, 'clean'], builddir, logdir, results) # # Run make. # run_command('make', make_command(module, results), builddir, logdir, results) # # `make` may silently fail to build a kernel module for the running kernel # version (or any version). Let's fail early instead of finding out later # when we try to start the cache manager. # if is_linux(): results['kmods'] = find_kmods_linux(builddir) elif is_solaris(): results['kmods'] = find_kmods_solaris(builddir) else: log.warning('Unable to find kernel modules; unknown platform %s' % platform.system()) if is_kmod_expected(results): if is_linux(): kmod_check_linux(module, results) elif is_solaris(): kmod_check_solaris(module, results) # # Copy the transarc-style distribution tree into a DESTDIR file tree # for installation. # if 'destdir' in results and 'target' in results and \ results['target'] in ('dest', 'dest_nolibafs', 'dest_only_libafs'): log.info('Copying transarc-style distribution files to %s' % results['destdir']) sysname = configured_sysname(builddir) if not sysname: module.fail_json(msg='Unable to get destdir; sysname not found.') dest = os.path.join(builddir, sysname, 'dest') if not os.path.isdir(dest): module.fail_json(msg='Missing dest directory: %s' % dest) copy_tree(dest, results['destdir']) results['changed'] = True # # Copy security key utilities to a standard location. # if 'destdir' in results and 'target' in results and \ results['target'] in ('install', 'install_nolibafs', 'dest', 'dest_nolibafs'): log.info('Copying security key utilities to %s' % results['destdir']) for p in ('asetkey', 'akeyconvert'): src = os.path.join(builddir, 'src', 'aklog', p) dst = os.path.join(results['destdir'], 'usr', 'sbin') if os.path.isfile(src): if not os.path.isdir(dst): os.makedirs(dst) log.debug('shutil.copy2("%s", "%s")' % (src, dst)) shutil.copy2(src, dst) results['changed'] = True # # Save configured build paths in a meta-data file for installation. # if 'destdir' in results and 'target' in results and \ results['target'] in ('install', 'install_nolibafs'): filename = os.path.join(results['destdir'], '.build-info.json') build_info = {'dirs': results['install_dirs']} with open(filename, 'w') as f: f.write(json.dumps(build_info, indent=4)) log.debug('Results: %s' % pprint.pformat(results)) results['msg'] = 'Build completed' log.info(results['msg']) # # Save results. # with open(os.path.join(logdir, 'results.json'), 'w') as f: f.write(json.dumps(results, indent=4)) module.exit_json(**results)
def main(): results = dict(changed=False, ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['present', 'absent'], default='present'), volume=dict(type='str', aliases=['name']), server=dict(type='str', default=None), partition=dict(type='str', default=None), mount=dict(type='str', default=None, aliases=['mountpoint', 'mtpt']), acl=dict(type='list', default=[], aliases=['acls', 'rights']), quota=dict(type='int', default=0), replicas=dict(type='int', default=0), localauth=dict(type='bool', default=False), auth_user=dict(type='str', default='admin'), auth_keytab=dict(type='str', default='admin.keytab'), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) state = module.params['state'] volume = module.params['volume'] server = module.params['server'] partition = module.params['partition'] mount = module.params['mount'] acl = module.params['acl'] quota = module.params['quota'] replicas = module.params['replicas'] localauth = module.params['localauth'] auth_user = module.params['auth_user'] auth_keytab = module.params['auth_keytab'] # Convert k4 to k5 name. if '.' in auth_user and '/' not in auth_user: auth_user = auth_user.replace('.', '/') if mount and not mount.startswith('/'): module.fail_json( msg='Invalid parameter: mount must be an asolute path; %s' % mount) def die(msg): log.error(msg) module.fail_json(msg=msg) def lookup_command(name): """ Lookup an OpenAFS command. First search the installation facts, then the PATH. """ global _commands if name in _commands: return _commands[name] try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) cmd = facts['bins'][name] except Exception: cmd = module.get_bin_path(name) if not cmd: die('Unable to locate %s command.' % name) _commands[name] = cmd return cmd def lookup_directory(name): """ Lookup an OpenAFS directory from the local facts file. """ try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) dir = facts['dirs'][name] except Exception: module.fail_json(msg='Unable to locate %s directory.' % name) return dir def run_command(cmd, *args): """ Run a command. """ cmdargs = [cmd] + list(args) cmdline = ' '.join(args) rc, out, err = module.run_command(cmdargs) log.debug('command=%s, rc=%d, out=%s, err=%s', cmdline, rc, out, err) if rc != 0: die('Command failed: %s, rc=%d, out=%s, err=%s' % (cmdline, rc, out, err)) return out def login(): """ Get a token for authenicated access. """ log.debug("login()") if not os.path.exists(auth_keytab): die('keytab %s not found.' % auth_keytab) kinit = lookup_command('kinit') aklog = lookup_command('aklog') run_command(kinit, '-k', '-t', auth_keytab, auth_user) run_command(aklog, '-d') def vos(args, done=None, retry=None): """ Run a vos command with retries. """ def _done(rc, out, err): return rc == 0 def _retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. return False if done is None: done = _done if retry is None: retry = _retry args.insert(0, lookup_command('vos')) if localauth: args.append('-localauth') cmdline = ' '.join(args) retries = 120 while True: rc, out, err = module.run_command(args) log.debug('command=%s, rc=%d, out=%s, err=%s', cmdline, rc, out, err) if done(rc, out, err): return out if retries == 0 or not retry(rc, out, err): die("Command failed: %s, rc=%d, err=%s" % (cmdline, rc, err)) log.warning("Failed: %s, rc=%d, err=%s; %d retr%s left.", cmdline, rc, err, retries, ('ies' if retries > 1 else 'y')) retries -= 1 time.sleep(5) def fs(*args): """ Run the fs command and return the stdout. """ return run_command(lookup_command('fs'), *args) def vos_listaddrs(): """ Retrieve the list of registered server UUIDs from the VLDB. """ log.debug("vos_listaddrs()") def done(rc, out, err): return rc == 0 and out != '' def retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. if out == '': return True # No results; servers not registered yet? return False out = vos(['listaddrs', '-noresolve', '-printuuid'], done=done, retry=retry) servers = [] uuid = None addrs = [] for line in out.splitlines(): m = re.match(r'UUID: (\S+)', line) if m: uuid = m.group(1) addrs = [] continue m = re.match(r'(\S+)', line) if m: addrs.append(m.group(1)) continue m = re.match(r'$', line) if m: # Records are terminated with a blank line. servers.append(dict(uuid=uuid, addrs=addrs)) uuid = None addrs = [] return servers def vos_listpart(server): """ Retrieve the list of available partitions on the given server. """ log.debug("vos_listpart(server='%s')", server) def done(rc, out, err): return rc == 0 and 'The partitions on the server are:' in out def retry(rc, out, err): if "Possible communication failure" in err: return True if "server or network not reponding" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Could not fetch the list of partitions" in err: return True return False out = vos(['listpart', '-server', server], done=done, retry=retry) parts = re.findall(r'/vicep([a-z]+)', out) log.debug('partitions=%s', parts) return parts def get_entry(name, retry_not_found=True): """ Return the entry of an existing volume. """ log.debug("get_entry(name='%s')", name) entry = {'sites': []} vos_fields = { 'rw': r'RWrite: (\d+)', 'ro': r'ROnly: (\d+)', 'bk': r'Backup: (\d+)', 'rc': r'RClone: (\d+)' } def done(rc, out, err): if rc == 0: return True if "no such entry" in err: if retry_not_found: log.warning("Volume %s not found.", name) return False # Retry. else: return True # Volume is not present. return False def retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. if "no such entry" in err: if retry_not_found: return True # Retry not found! return False out = vos(['listvldb', '-name', name, '-noresolve', '-nosort'], done, retry) for line in out.splitlines(): if line == '': continue # Skip blank lines m = re.match(r'(\S+)', line) if m: entry['name'] = m.group(1) continue for name, pattern in vos_fields.items(): m = re.search(pattern, line) if m: entry[name] = int(m.group(1)) m = re.search(r'server (\S+) partition (\S+) (RO|RW) Site(.*)', line) if m: site = { 'server': m.group(1), 'partition': m.group(2).replace('/vicep', ''), 'type': m.group(3).lower(), 'flags': m.group(4).replace('--', '').lower().strip() } entry['sites'].append(site) return entry def vos_create(name, server, partition, quota): """ Ensure a user exists. """ log.debug( "vos_create(name='%s', server='%s', partition='%s', " "quota='%d')", name, server, partition, quota) def done(rc, out, err): if rc == 0: log.info('changed: vos create returned 0') results['changed'] = True return True if rc == 255 and "already exists" in err: log.info("Volume '%s' already exists.", name) return True return False def retry(rc, out, err): if "Possible communication failure" in err: return True if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. if "Could not fetch the list of partitions" in err: return True return False vos([ 'create', '-server', server, '-partition', partition, '-name', name, '-maxquota', str(quota) ], done, retry) def lookup_index(fileservers, addr): for i in fileservers: for a in fileservers[i]['addrs']: if a == addr: return i return None def determine_sites(name, nreplicas): """ Determine the fileserver addresses and partitions for read-only sites. """ log.debug("determine_ro_sites(name='%s', nreplicas=%d)", name, nreplicas) # Use a simple integer server index key to reference fileservers. fileservers = {} for i, entry in enumerate(vos_listaddrs()): if not entry['addrs']: log.warning( "No addresses found for fileserver %d; " "ignoring.", i) continue fileservers[i] = entry log.debug('determine_sites: fileservers=%s', pprint.pformat(fileservers)) # Convert the ipv4 addresses of the rw and ro sites to the server # index. entry = get_entry(name) rw = None # rw (index, partition) tuple ro = [] # list of (index, partition) tuples for ro sites for s in entry['sites']: i = lookup_index(fileservers, s['server']) if s['type'] == 'rw': rw = (i, s['partition']) elif s['type'] == 'ro': ro.append((i, s['partition'])) log.debug('determine_sites: rw=%s, ro=%s', pprint.pformat(rw), pprint.pformat(ro)) # Assemble a list of server indexes to matching our goal state. Start # with the existing ro sites. goal = list(ro) all_ = sorted(fileservers.keys()) taken = [s[0] for s in goal] # First, add the read-only clone, if one is not already present. if len(goal) < nreplicas and rw: clone = rw # Same server and partition as the rw volume. if not clone[0] in taken: goal.append(clone) # Add remote read-only sites, if needed. Additional read-onlies # are added in listaddrs order. if len(goal) < nreplicas: available = [] taken = [s[0] for s in goal] for i in all_: if i not in taken: available.append(i) while len(goal) < nreplicas and available: goal.append((available.pop(0), None)) log.debug('determine_sites: goal=%s', pprint.pformat(goal)) # Finally, get the addresses and partitions to be added. Order is # important here, since we want to add the clone first. sites = [] ro_indexes = [s[0] for s in ro] for s in goal: i, part = s if i not in ro_indexes: addr = fileservers[i]['addrs'][0] if not part: parts = vos_listpart(addr) part = parts[0] sites.append((addr, part)) log.debug('determine_sites: sites=%s', pprint.pformat(sites)) return sites def vos_addsite(name, server, partition): log.debug("vos_addsite(name='%s', server='%s', partition='%s')", name, server, partition) def done(rc, out, err): if rc == 0: log.info('changed: vos addsite returned 0') results['changed'] = True return True if 'RO already exists on partition' in err: return True return False vos([ 'addsite', '-server', server, '-partition', partition, '-id', name ], done) def vos_release(name): log.debug("vos_release(name='%s')", name) def done(rc, out, err): if rc == 0: log.info('changed: vos release returned 0') results['changed'] = True return True if 'has no replicas - release operation is meaningless!' in err: return True return False vos(['release', '-id', name, '-verbose'], done) fs('checkv') def vos_remove(name, server=None, partition=None): """ Ensure volume is absent. """ log.debug("vos_remove(name='%s', server='%s', partition='%s')", name, server, partition) def done(rc, out, err): if rc == 0 and err == '': log.info('changed: vos remove returned 0') results['changed'] = True return True if "no such entry" in err: log.warning("Volume %s not found.", name) return True if rc == 0 and "Can't find volume name" in err: log.warning("Volume %s not found.", name) return True return False def retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. return False args = ['remove', '-id', str(name)] if server: args.extend(['-server', server]) if partition: args.extend(['-partition', partition]) vos(args, done, retry) def get_cell_name(): """ Get the current cell name. Assumes this node is a client. """ global _cell # Cached value. if _cell is None: out = fs('wscell') m = re.search(r"This workstation belongs to cell '(.*)'", out) if m: _cell = m.group(1) log.info("Cell name is '%s'.", _cell) if not _cell: die("Cell name not found.") return _cell def get_dynroot_mode(): """ Returns true if the client dynroot is enabled. When the dynamic root (-dynroot, -dynroot-sparse) and the fake stat (-fakestat, -fakestat-all) modes are in effect, use the special directory named /afs/.:mount to mount the root.cell volume and to set root.afs access rights. The afsd command arguments are saved as an installation fact to provide a portable way to lookup the client startup options. """ global _dynroot # Cached value. if _dynroot is None: try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) options = facts['client_options'] except Exception as e: die('Unable to determine dynroot mode: ' 'afsd options not found; %s' % e) options = set(options.split(' ')) dynroot = set(['-dynroot', '-dynroot-sparse']) fakestat = set(['-fakestat', '-fakestat-all']) _dynroot = (dynroot & options) and (fakestat & options) return _dynroot def get_afs_root(): """ Get the afs root directory from the client cacheinfo file. The root directory conventionally '/afs'. """ global _afsroot # Cached value. if _afsroot is None: path = os.path.join(lookup_directory('viceetcdir'), 'cacheinfo') with open(path) as f: cacheinfo = f.read() m = re.match(r'(.*):(.*):(.*)', cacheinfo) if m: _afsroot = m.group(1) if not _afsroot: die("Failed to parse cacheinfo file '%s'." % path) return _afsroot def split_dir(path): """ Split a path to get the parent and directory. Example: split_dir('/afs/example.com/test) -> ('/afs/example.com', 'test') """ components = path.split('/') dirname = components.pop(-1) return '/'.join(components), dirname def get_acls(path): """ Get positive and negative acls for a given path. Returns a tuple of dictionaries. """ out = fs('listacl', '-path', path) acls = {'normal': {}, 'negative': {}} for line in out.splitlines(): if line.startswith('Accces list for'): continue if line == 'Normal rights:': kind = 'normal' continue if line == 'Negative rights:': kind = 'negative' continue m = re.match(r' (\S+) (\S+)', line) if m: name = m.group(1) rights = set(m.group(2)) acls[kind][name] = rights return acls['normal'], acls['negative'] class ExtraRights: """ Context manager to add rights temporarily to allow the system administrator to mount and unmount volumes. """ def __init__(self, rights, path, name='system:administrators'): self.rights = set(rights) self.path = path self.name = name acl = get_acls(self.path)[0] self.existing = acl.get(self.name, set('')) self.augmented = self.existing | self.rights def __enter__(self): if self.existing != self.augmented: log.info("Adding temporary rights '%s %s' to directory '%s.", self.name, ''.join(self.rights), self.path) rights = ''.join(self.augmented) fs('setacl', '-dir', self.path, '-acl', self.name, rights) return self def __exit__(self, *exc): if self.existing != self.augmented: log.info("Removing temporary rights '%s %s' to directory '%s'", self.name, ''.join(self.rights), self.path) rights = ''.join(self.existing) if not rights: rights = 'none' fs('setacl', '-dir', self.path, '-acl', self.name, rights) def is_read_only(path): """ Check to see if the given path is to a read-only volume. """ out = fs('examine', '-path', path) m = re.search(r'Volume status for vid = (\d+) named (\S+)', out) if not m: die("Unable to examine path '%s'." % path) name = m.group(2) return name.endswith('.readonly') or name.endswith('.backup') def make_mounts(volume, path, vcell=None, rw=False): """ Make a mount point. """ log.debug("make_mounts(volume='%s, path='%s', vcell='%s')", volume, path, vcell) afsroot = get_afs_root() cell = get_cell_name() dynroot = get_dynroot_mode() parent_changed = False # The root.afs volume is a special case. In dynroot mode, the rw # root.afs vnodes are accessed via the synthetic # '/afs/.:mount/<cell>:root.afs' path. if volume == 'root.afs' and path == afsroot: log.info("Skipping root.afs mount on '%s'." % path) return # The root.cell volume is a special case in dynroot mode. if volume == 'root.cell': # Be sure to create a cellular mount point for root.cell. if not vcell: vcell = cell canonical_path = os.path.join(afsroot, vcell) canonical_path_rw = os.path.join(afsroot, '.' + vcell) if path in (canonical_path, canonical_path_rw): # /afs/.:mount/example.com:root.afs/example.com -> root.cell if dynroot: path = os.path.join(afsroot, '.:mount', ':'.join([vcell, 'root.afs']), vcell) log.info("Mounting volume '%s' with dynroot path '%s'" % (volume, path)) # Switch to the read/write path when available parent, dirname = split_dir(path) root_path = os.path.join(afsroot, cell) root_path_rw = os.path.join(afsroot, '.' + cell) if parent.startswith(root_path): parent_rw = parent.replace(root_path, root_path_rw) if os.path.exists(parent_rw): parent = parent_rw path = os.path.join(parent_rw, dirname) log.info("Mounting volume '%s' with read/write path '%s'" % (volume, path)) # Create the regular and read/only mount points if not present. path_reg = os.path.join(parent, dirname) if not os.path.exists(path_reg): log.info("Creating new mount point '%s' for volume '%s'.", path_reg, volume) args = ['mkmount', '-dir', path_reg, '-vol', volume] if vcell: args.extend(['-cell', vcell]) with ExtraRights('ia', parent): fs(*args) log.info('changed: mounted volume %s on path %s.', volume, path_reg) results['changed'] = True results['mount'] = path_reg parent_changed = True # Create a rw mount point if this is root.cell, or requested by caller. if volume == 'root.cell' or rw: path_rw = os.path.join(parent, '.' + dirname) if not os.path.exists(path_rw): log.info("Creating new mount point '%s' for volume '%s'.", path_rw, volume) args = ['mkmount', '-dir', path_rw, '-vol', volume, '-rw'] if vcell: args.extend(['-cell', vcell]) with ExtraRights('ia', parent): fs(*args) log.info( 'changed: mounted volume %s on path %s with ' 'read/write flag.', volume, path_rw) results['changed'] = True results['mount'] = path_rw parent_changed = True # Release the parent volume if we changed it. if parent_changed: out = fs('getfid', '-path', parent) m = re.search(r'File .* \((\d+)\.\d+\.\d+\)', out) if not m: die("Failed to find parent volume id for mount path '%s'." % path) parent_id = m.group(1) log.info("Releasing parent volume '%s'.", parent_id) vos_release(parent_id) def remove_mounts(volume, path): """ Remove regular and read/write mount points. """ log.debug("remove_mounts(volume='%s', path='%s')", volume, path) afsroot = get_afs_root() cell = get_cell_name() dynroot = get_dynroot_mode() if not os.path.exists(path): log.info("Mount '%s' already absent.", path) return # Unmounting root.afs is a no-op. if volume == 'root.afs': log.info("Skipping fs rmmount of root.afs") return # The root.cell volume is a special case. if volume == 'root.cell' and dynroot: canonical_path = os.path.join(afsroot, cell) canonical_path_rw = os.path.join(afsroot, '.' + cell) if path in (canonical_path, canonical_path_rw): path = os.path.join(afsroot, '.:mount', ':'.join([cell, 'root.afs']), cell) log.info("Unmounting volume '%s' with dynroot path '%s'" % (volume, path)) # Switch to the read/write path when available parent, dirname = split_dir(path) root_path = os.path.join(afsroot, cell) root_path_rw = os.path.join(afsroot, '.' + cell) if parent.startswith(root_path): parent_rw = parent.replace(root_path, root_path_rw) if os.path.exists(parent_rw): parent = parent_rw log.info("Unmounting volume '%s' with read/write parent " "path '%s'" % (volume, parent)) # Remove the regular and read/only mount points when present. parent_changed = False paths = [ os.path.join(parent, dirname), os.path.join(parent, '.' + dirname), ] for p in paths: if os.path.exists(p): with ExtraRights('d', parent): fs('rmmount', '-dir', p) log.info('changed: removed mount %s', p) results['changed'] = True parent_changed = True # Release the parent volume if we changed it. if parent_changed: out = fs('getfid', '-path', parent) m = re.search(r'File .* \((\d+)\.\d+\.\d+\)', out) if not m: die("Failed to find parent volume id for mount " "path '%s'." % path) parent_id = m.group(1) log.info("Releasing parent volume '%s'.", parent_id) vos_release(parent_id) def parse_acl_param(acl): """ Convert a list of strings (each containing two words separated by one or more spaces) or dictionaries into a list of terms to be passed to fs setacl. """ if not isinstance(acl, list): die('Internal: acl param is not a list') terms = [] for a in acl: if isinstance(a, dict): for n, r in a.items(): terms.extend([n, r]) else: m = re.match(r'\s*(\S+)\s+(\S+)', a) if m: terms.extend(list(m.groups())) else: die("Invalid acl term '%s'." % a) log.debug('acl=%s', pprint.pformat(terms)) return terms def set_acl(volume, path, acl): """ Set the acl on a path and checks for a change. This function assumes the user is a member of system:administors (or already has 'a' rights on directory.) """ log.debug("set_acl(volume='%s', path='%s', acl='%s')", volume, path, acl) acl = parse_acl_param(acl) afsroot = get_afs_root() # e.g. /afs cell = get_cell_name() # e.g. example.com dynroot = get_dynroot_mode() # The root.afs volume is a special case. if volume == 'root.afs' and path == afsroot: if dynroot: path = os.path.join(afsroot, '.:mount', ':'.join([cell, 'root.afs'])) log.info("Setting '%s' acl with dynroot path '%s'." % (volume, path)) else: # No dynroot: We need to use temporary rw mount point if the # root.afs volume has been released. For now, just set the acls # before the release. if is_read_only(path): log.info("Skipping acl change of root.afs on path '%s'.", path) return # Switch to the read/write path when available root_path = os.path.join(afsroot, cell) root_path_rw = os.path.join(afsroot, '.' + cell) if path.startswith(root_path): path_rw = path.replace(root_path, root_path_rw) if os.path.exists(path_rw): log.info("Setting acl with rw path '%s'.", path_rw) path = path_rw else: log.warning("path_rw='%s' does not exist.", path_rw) log.info("Setting acl '%s' on path '%s'.", ' '.join(acl), path) old = get_acls(path) fs('setacl', '-clear', '-dir', path, '-acl', *acl) new = get_acls(path) results['acl'] = new if new != old: log.info('changed: acl from=%s to=%s', pprint.pformat(old), pprint.pformat(new)) results['changed'] = True # # Ensure volume is present/absent. # if state == 'present': if not localauth: login() if not server: servers = vos_listaddrs() if not servers: die('No fileservers found.') server = servers[0]['addrs'][0] # Pick the first one found. if not partition: partitions = vos_listpart(server) if not partitions: die('No partitions found on server %s.' % server) partition = partitions[0] # Pick the first one found. vos_create(volume, server, partition, quota) if mount: make_mounts(volume, mount) if mount and acl: set_acl(volume, mount, acl) if replicas: for addr, part in determine_sites(volume, replicas): vos_addsite(volume, addr, part) entry = get_entry(volume) if volume != 'root.afs': # Defer root.afs release until root.cell is mounted. for s in entry['sites']: if s['flags'] != '': vos_release(volume) entry = get_entry(volume) break results['volume'] = entry elif state == 'absent': if not localauth: login() if mount: remove_mounts(volume, mount) entry = get_entry(volume, retry_not_found=False) if 'ro' in entry: ro = entry['ro'] for s in entry['sites']: if s['type'] == 'ro': vos_remove(ro, s['server'], s['partition']) vos_remove(volume) else: die("Internal error: invalid state %s" % state) log.debug('Results: %s' % pprint.pformat(results)) log.info('Exiting %s' % module_name) module.exit_json(**results)
def main(): global log results = dict( changed=False, debug=[], ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['present', 'absent', 'rekey'], default='present'), principal=dict(type='str', required=True), password=dict(type='str', no_log=True), enctypes=dict(type='list', aliases=['enctype', 'encryption_type', 'encryption_types', 'keysalts']), acl=dict(type='str'), keytab_name=dict(type='str'), keytabs=dict(type='path', default='/var/lib/ansible-openafs/keytabs'), kadmin=dict(type='path'), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) state = module.params['state'] principal = module.params['principal'] password = module.params['password'] enctypes = module.params['enctypes'] acl = module.params['acl'] keytab_name = module.params['keytab_name'] keytabs = module.params['keytabs'] kadmin = module.params['kadmin'] if '@' in principal: principal, realm = principal.split('@', 1) else: realm = None # Convert k4 to k5 name. if '.' in principal and '/' not in principal: principal = principal.replace('.', '/') if not keytab_name: keytab_name = principal.replace('/', '.') if not keytab_name.endswith('.keytab'): keytab_name += '.keytab' keytab = '%s/%s' % (keytabs, keytab_name) if not kadmin: kadmin = module.get_bin_path('kadmin.local', required=True) facts = load_facts() # Read our installation facts. results['principal'] = principal results['kadmin'] = kadmin if realm: results['realm'] = realm def die(msg): log.error("%s: %s", msg, results) results['msg'] = msg module.fail_json(**results) def run(*cmd): query = ' '.join(cmd) args = [kadmin] if realm: args.extend(['-r', realm]) args.extend(['-q', query]) rc, out, err = module.run_command(args) results['debug'].append(dict(cmd=args, rc=rc, out=out, err=err)) if rc != 0: die('%s failed' % cmd[0]) return out, err def delete_keytab(): if os.path.exists(keytab): os.remove(keytab) results['debug'].append(dict(cmd='rm %s' % keytab)) def get_principal(): metadata = None out, err = run('get_principal', principal) if 'Principal does not exist' not in err: metadata = out.splitlines() return metadata def add_principal(): args = [] if password: args.extend(['-pw', password]) else: args.append('-randkey') if enctypes: args.extend(['-e', '"%s"' % ','.join(enctypes)]) args.append(principal) run('add_principal', *args) results['changed'] = True def delete_principal(): run('delete_principal', '-force', principal) results['changed'] = True def ktadd(rekey=False): if not os.path.exists(keytabs): os.makedirs(keytabs) args = ['-keytab', keytab] if rekey: if enctypes: args.extend(['-e', '"%s"' % ','.join(enctypes)]) else: args.append('-norandkey') args.append(principal) run('ktadd', *args) if not os.path.exists(keytab): die('Failed to create keytab; file not found.') results['changed'] = True def read_acl_file(): """ Slurp acl file into a list of lines. """ kadm5_acl = facts.get('kadm5_acl', None) if not kadm5_acl: die('Unable to read kadm5.acl; path not found in local facts.') log.info('Reading %s', kadm5_acl) with open(kadm5_acl) as fh: lines = fh.readlines() return lines def write_acl_file(lines): kadm5_acl = facts.get('kadm5_acl', None) if not kadm5_acl: die('Unable to write kadm5.acl; path not found in local facts.') log.info('Updating %s', kadm5_acl) with open(kadm5_acl, 'w') as fh: for line in lines: fh.write(line) results['changed'] = True def add_acl(principal, permissions): """ Add permissions for a principal to the ACL file. """ found = False output = [] for line in read_acl_file(): m = re.match(r'^\s*#', line) if m: output.append(line) continue m = re.match(r'^\s*$', line) if m: output.append(line) continue m = re.match(r'^\s*(\S+)\s+(\S+)', line) if m: # Note: To keep this simple, we don't bother with the wildcard # matching. if m.group(1) == principal and m.group(2) == permissions: # Already present. log.debug("Permissions '%s' for principal '%s' already " "present in acl file.", permissions, principal) return if m.group(1) == principal: # Update in place. found = True line = '%s %s\n' % (principal, permissions) log.info("Updating line in acl file: '%s'" % (line)) output.append(line) continue output.append(line) if not found: line = '%s %s\n' % (principal, permissions) log.info("Adding line to acl file: '%s'" % (line)) output.append(line) write_acl_file(output) def remove_acl(principal): """ Remove the permissions for a principal. """ found = False output = [] for line in read_acl_file(): m = re.match(r'^\s*#', line) if m: output.append(line) continue m = re.match(r'^\s*$', line) if m: output.append(line) continue m = re.match(r'^\s*(\S+)\s+(\S+)', line) if m: if m.group(1) == principal: found = True # remove this line log.info("Removing line from acl file: '%s'" % (line)) continue output.append(line) if found: write_acl_file(output) if state == 'present': metadata = get_principal() if not metadata: delete_keytab() # Remove stale keytab, if present. add_principal() metadata = get_principal() if not metadata: die('Failed to add principal.') if acl: add_acl(principal, acl) if not os.path.exists(keytab): ktadd() results['metadata'] = metadata results['keytab'] = keytab elif state == 'rekey': if get_principal(): ktadd(rekey=True) else: delete_keytab() # Remove stale keytab, if present. add_principal() ktadd() metadata = get_principal() if not metadata: die('Failed to add principal.') results['metadata'] = metadata results['keytab'] = keytab elif state == 'absent': delete_keytab() if get_principal(): delete_principal() if get_principal(): die('Failed to delete principal.') remove_acl(principal) else: die('Internal error; invalid state: %s' % state) log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)
def main(): results = dict( changed=False, ) module = AnsibleModule( argument_spec=dict( state=dict(type='str', choices=['present'], default='present'), name=dict(type='str', default='openafs'), path=dict(type='path', default='/var/lib/ansible-openafs'), ), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) name = module.params['name'] path = module.params['path'] def die(msg): log.error(msg) module.fail_json(msg=msg) def run_command(cmd, *args): cmdargs = [module.get_bin_path(cmd, required=True)] + list(args) cmdline = ' '.join(cmdargs) log.info("Running: %s", cmdline) rc, out, err = module.run_command(cmdargs) if rc != 0: die("Command failed: %s, rc=%d, err=%s" % (cmdline, rc, err)) return out def checkmodule(*args): return run_command('checkmodule', *args) def semodule_package(*args): return run_command('semodule_package', *args) def semodule(*args): return run_command('semodule', *args) if not name: die("Module name is required.") te = os.path.join(path, '%s.te' % name) fc = os.path.join(path, '%s.fc' % name) for f in (te, fc): if not os.path.exists(f): die("Input file '%s' not found." % f) current_version = None target_version = None out = semodule('-lstandard') for line in out.splitlines(): m = re.match(r'\s*(\S+)\s+(\S+)\s*', line) if m and m.group(1) == name: current_version = m.group(2) break if not current_version: log.info("%s module is not installed.", name) else: log.info("%s module version is '%s'.", name, current_version) try: with open(te) as f: for line in f.readlines(): m = re.match(r'\s*module\s+(\S+)\s+(\S+)\s*;', line) if m and m.group(1) == name: target_version = m.group(2) break except Exception as e: die("Failed to read te file: %s" % str(e)) if not target_version: die("SELinux module version number not found in file '%s'." % te) log.info("%s module target version is '%s'.", name, target_version) if not current_version or (current_version != target_version): mod = os.path.join(path, '%s.mod' % name) pp = os.path.join(path, '%s.pp' % name) checkmodule('-M', '-m', '-o', mod, te) semodule_package('-o', pp, '-m', mod, '-f', fc) semodule('-i', pp) results['changed'] = True log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)
def main(): results = dict(changed=False, ) module = AnsibleModule( argument_spec=dict(timeout=dict(type='int', default=600), delay=dict(type='int', default=0), sleep=dict(type='int', default=20), signal=dict(type='bool', default=True)), supports_check_mode=False, ) log = Logger(module_name) log.info('Starting %s', module_name) timeout = module.params['timeout'] delay = module.params['delay'] sleep = module.params['sleep'] signal = module.params['signal'] if delay < 0: log.warning('Ignoring negative delay parameter.') delay = 0 if sleep < 1: log.warning('Ignoring out of range sleep parameter.') sleep = 1 def lookup_command(name): """ Lookup an OpenAFS command from local facts file. Try the PATH if not found in the local facts. """ try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) cmd = facts['bins'][name] except Exception as e: log.warning("Unable to load facts: %s", e) cmd = module.get_bin_path(name) if not cmd: module.fail_json(msg='Unable to locate %s command.' % name) return cmd def lookup_directory(name): """ Lookup an OpenAFS directory from the local facts file. """ try: with open('/etc/ansible/facts.d/openafs.fact') as f: facts = json.load(f) dir = facts['dirs'][name] except Exception as e: log.warning("Unable to load facts: %s", e) module.fail_json(msg='Unable to locate %s directory.' % name) return dir def run_command(args, done=None, retry=None): """ Run an afs command with retries. """ def _done(rc, out, err): return rc == 0 def _retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. if "no such entry" in err: return True # Retry not found! return False if done is None: done = _done if retry is None: retry = _retry args.append('-localauth') cmdline = ' '.join(args) retries = 120 while True: log.debug('Running: %s', cmdline) rc, out, err = module.run_command(args) log.debug('Ran: %s, rc=%d, out=%s, err=%s', cmdline, rc, out, err) if done(rc, out, err): return out if retries == 0 or not retry(rc, out, err): log.error("Failed: %s, rc=%d, err=%s", cmdline, rc, err) module.fail_json( dict(msg='Command failed.', cmdline=cmdline, rc=rc, out=out, err=err)) log.warning("Failed: %s, rc=%d, err=%s; %d retr%s left.", cmdline, rc, err, retries, ('ies' if retries > 1 else 'y')) retries -= 1 time.sleep(5) def vos_listaddrs(): """ Retrieve the server uuid and addreses from the VLDB. """ def done(rc, out, err): return rc == 0 def retry(rc, out, err): if "server or network not reponding" in err: return True if "no quorum elected" in err: return True if "invalid RPC (RX) operation" in err: return True # May occur during server startup. if "Couldn't read/write the database" in err: return True # May occur during server startup. return False vos = lookup_command('vos') out = run_command([vos, 'listaddrs', '-noresolve', '-printuuid'], done=done, retry=retry) servers = [] uuid = None addrs = [] for line in out.splitlines(): m = re.match(r'UUID: (\S+)', line) if m: uuid = UUID.parse(m.group(1)) addrs = [] continue m = re.match(r'(\S+)', line) if m: addrs.append(m.group(1)) continue m = re.match(r'$', line) if m: # Records are terminated with a blank line. servers.append(dict(uuid=uuid, addrs=addrs)) uuid = None addrs = [] log.debug("servers=%s", servers) return servers def lookup_uuid(): """ Retreive the fileserver UUID value from the sysid file created by the fileserver process. """ path = os.path.join(lookup_directory('afslocaldir'), 'sysid') if not os.path.exists(path): # The sysid file is created by the filserver process. log.info("Waiting for sysid file '%s'.", path) return None log.debug("Reading sysid file '%s'.", path) sysid = Sysid(path) log.debug('sysid=%s', sysid) return sysid.uuid def lookup_bnode(): """ Lookup the active fileserver bnode name; 'fs', 'dafs', or None. """ path = os.path.join(lookup_directory('afsbosconfigdir'), 'BosConfig') log.debug("Reading BosConfig file '%s'.", path) with open(path) as f: bosconfig = f.read() bnodes = re.findall(r'bnode (fs|dafs) \S+ 1', bosconfig) if len(bnodes) == 0: log.warning('No active fileserver bnodes found in BosConfig.') return None if len(bnodes) > 1: log.warning('Too many fileserver bnodes found in BosConfig.') return None bnode = bnodes[0] log.debug('fileserver bnode is %s', bnode) return bnode def lookup_pid(): """ Lookup the fileserver process pid or return None if not found. """ bnode = lookup_bnode() if not bnode: return None path = os.path.join(lookup_directory('afslocaldir'), '%s.file.pid' % bnode) try: log.debug("Reading pid file '%s'.", path) with open(path) as f: pid = int(f.read()) except IOError as e: log.warning("Unable to read pid file '%s'; %s", path, e) return None except ValueError as e: log.warning("Unable to convert pid file '%s' contents to int; %s", path, e) return None log.debug('fileserver pid is %d', pid) return pid # # Wait for VLDB registration. We check for our uuid in the VLDB, and if not # present, send a signal to the fileserver to expedite the registration. # The fileserver will retry to register every 5 minutes as well. # if delay: time.sleep(delay) now = int(time.time()) expires = now + timeout retries = 0 while True: uuid = lookup_uuid() if uuid: servers = vos_listaddrs() registered_uuids = [s['uuid'] for s in servers] if uuid in registered_uuids: results['uuid'] = str(uuid) log.info('Fileserver uuid %s is registered.', uuid) break if signal and retries > 0: pid = lookup_pid() if pid: log.info('Running: kill -XCPU %d', pid) module.run_command(['kill', '-XCPU', '%d' % pid]) now = int(time.time()) if now > expires: log.error('Timeout expired.') module.fail_json(msg='Timeout expired') log.info('Will retry in %d seconds.' % sleep) time.sleep(sleep) retries += 1 results['retries'] = retries log.info('Results: %s', pprint.pformat(results)) module.exit_json(**results)