def sort_osd_crush_location(location, module): ''' Sort location tuple ''' if len(location) < 2: fatal("You must specify at least 2 buckets.", module) if not any(item for item in location if item[0] == "host"): fatal("You must specify a 'host' bucket.", module) try: crush_bucket_types = [ "host", "chassis", "rack", "row", "pdu", "pod", "room", "datacenter", "region", "root", ] return sorted(location, key=lambda crush: crush_bucket_types.index(crush[0])) # noqa E501 except ValueError as error: fatal("{} is not a valid CRUSH bucket, valid bucket types are {}".format(error.args[0].split()[0], crush_bucket_types), module) # noqa E501
def lookup_ceph_initial_entities(module, out): ''' Lookup Ceph initial keys entries in the auth map ''' # convert out to json, ansible returns a string... try: out_dict = json.loads(out) except ValueError as e: fatal("Could not decode 'ceph auth list' json output: {}".format(e), module) # noqa E501 entities = [] if "auth_dump" in out_dict: for key in out_dict["auth_dump"]: for k, v in key.items(): if k == "entity": if v in CEPH_INITIAL_KEYS: entities.append(v) else: fatal("'auth_dump' key not present in json output:", module) # noqa E501 if len(entities) != len(CEPH_INITIAL_KEYS) and not str_to_bool( os.environ.get('CEPH_ROLLING_UPDATE', False)): # noqa E501 # must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS # it'd be in entities from the above test. Report what's missing. missing = [] for e in CEPH_INITIAL_KEYS: if e not in entities: missing.append(e) fatal("initial keyring does not contain keys: " + ' '.join(missing), module) # noqa E501 return entities
def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=False), state=dict(type='str', required=False, default='present', choices=[ 'present', 'update', 'absent', 'list', 'info', 'fetch_initial_keys', 'generate_secret' ]), caps=dict(type='dict', required=False, default=None), secret=dict(type='str', required=False, default=None, no_log=True), import_key=dict(type='bool', required=False, default=True), dest=dict(type='str', required=False, default='/etc/ceph/'), user=dict(type='str', required=False, default='client.admin'), user_key=dict(type='str', required=False, default=None), output_format=dict(type='str', required=False, default='json', choices=['json', 'plain', 'xml', 'yaml'])) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, add_file_common_args=True, ) file_args = module.load_file_common_arguments(module.params) # Gather module parameters in variables state = module.params['state'] name = module.params.get('name') cluster = module.params.get('cluster') caps = module.params.get('caps') secret = module.params.get('secret') import_key = module.params.get('import_key') dest = module.params.get('dest') user = module.params.get('user') user_key = module.params.get('user_key') output_format = module.params.get('output_format') changed = False result = dict( changed=changed, stdout='', stderr='', rc=0, start='', end='', delta='', ) if module.check_mode: module.exit_json(**result) startd = datetime.datetime.now() # will return either the image name or None container_image = is_containerized() # Test if the key exists, if it does we skip its creation # We only want to run this check when a key needs to be added # There is no guarantee that any cluster is running and we don't need one _secret = secret _caps = caps key_exist = 1 if not user_key: user_key_filename = '{}.{}.keyring'.format(cluster, user) user_key_dir = '/etc/ceph' user_key_path = os.path.join(user_key_dir, user_key_filename) else: user_key_path = user_key if (state in ["present", "update"]): # if dest is not a directory, the user wants to change the file's name # (e,g: /etc/ceph/ceph.mgr.ceph-mon2.keyring) if not os.path.isdir(dest): file_path = dest else: if 'bootstrap' in dest: # Build a different path for bootstrap keys as there are stored # as /var/lib/ceph/bootstrap-rbd/ceph.keyring keyring_filename = cluster + '.keyring' else: keyring_filename = cluster + "." + name + ".keyring" file_path = os.path.join(dest, keyring_filename) file_args['path'] = file_path if import_key: _info_key = [] rc, cmd, out, err = exec_commands( module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa E501 key_exist = rc if not caps and key_exist != 0: fatal("Capabilities must be provided when state is 'present'", module) # noqa E501 if key_exist != 0 and secret is None and caps is None: fatal( "Keyring doesn't exist, you must provide 'secret' and 'caps'", module) # noqa E501 if key_exist == 0: _info_key = json.loads(out) if not secret: secret = _info_key[0]['key'] _secret = _info_key[0]['key'] if not caps: caps = _info_key[0]['caps'] _caps = _info_key[0]['caps'] if secret == _secret and caps == _caps: if not os.path.isfile(file_path): rc, cmd, out, err = exec_commands( module, get_key(cluster, user, user_key_path, name, file_path, container_image)) # noqa E501 result["rc"] = rc if rc != 0: result[ "stdout"] = "Couldn't fetch the key {0} at {1}.".format( name, file_path) # noqa E501 module.exit_json(**result) result[ "stdout"] = "fetched the key {0} at {1}.".format( name, file_path) # noqa E501 result[ "stdout"] = "{0} already exists and doesn't need to be updated.".format( name) # noqa E501 result["rc"] = 0 module.set_fs_attributes_if_different(file_args, False) module.exit_json(**result) else: if os.path.isfile(file_path) and not secret or not caps: result[ "stdout"] = "{0} already exists in {1} you must provide secret *and* caps when import_key is {2}".format( name, dest, import_key) # noqa E501 result["rc"] = 0 module.exit_json(**result) if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa E501 rc, cmd, out, err = exec_commands( module, create_key(module, result, cluster, user, user_key_path, name, secret, caps, import_key, file_path, container_image)) # noqa E501 if rc != 0: result["stdout"] = "Couldn't create or update {0}".format(name) result["stderr"] = err module.exit_json(**result) module.set_fs_attributes_if_different(file_args, False) changed = True elif state == "absent": if key_exist == 0: rc, cmd, out, err = exec_commands( module, delete_key(cluster, user, user_key_path, name, container_image)) if rc == 0: changed = True else: rc = 0 elif state == "info": rc, cmd, out, err = exec_commands( module, info_key(cluster, name, user, user_key_path, output_format, container_image)) # noqa E501 elif state == "list": rc, cmd, out, err = exec_commands( module, list_keys(cluster, user, user_key_path, container_image)) elif state == "fetch_initial_keys": hostname = socket.gethostname().split('.', 1)[0] user = "******" keyring_filename = cluster + "-" + hostname + "/keyring" user_key_path = os.path.join("/var/lib/ceph/mon/", keyring_filename) rc, cmd, out, err = exec_commands( module, list_keys(cluster, user, user_key_path, container_image)) if rc != 0: result["stdout"] = "failed to retrieve ceph keys" result["sdterr"] = err result['rc'] = 0 module.exit_json(**result) entities = lookup_ceph_initial_entities(module, out) output_format = "plain" for entity in entities: key_path = build_key_path(cluster, entity) if key_path is None: fatal("Failed to build key path, no entity yet?", module) elif os.path.isfile(key_path): # if the key is already on the filesystem # there is no need to fetch it again continue extra_args = [ '-o', key_path, ] info_cmd = info_key(cluster, entity, user, user_key_path, output_format, container_image) # we use info_cmd[0] because info_cmd is an array made of an array info_cmd[0].extend(extra_args) rc, cmd, out, err = exec_commands(module, info_cmd) # noqa E501 file_args = module.load_file_common_arguments(module.params) file_args['path'] = key_path module.set_fs_attributes_if_different(file_args, False) elif state == "generate_secret": out = generate_secret().decode() cmd = '' rc = 0 err = '' changed = True endd = datetime.datetime.now() delta = endd - startd result = dict( cmd=cmd, start=str(startd), end=str(endd), delta=str(delta), rc=rc, stdout=out.rstrip("\r\n"), stderr=err.rstrip("\r\n"), changed=changed, ) if rc != 0: module.fail_json(msg='non-zero return code', **result) module.exit_json(**result)
def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'), action=dict(type='str', required=False, choices=[ 'create', 'zap', 'batch', 'prepare', 'activate', 'list', 'inventory' ], default='create'), # noqa: 4502 data=dict(type='str', required=False), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), journal_vg=dict(type='str', required=False), db=dict(type='str', required=False), db_vg=dict(type='str', required=False), wal=dict(type='str', required=False), wal_vg=dict(type='str', required=False), crush_device_class=dict(type='str', required=False), dmcrypt=dict(type='bool', required=False, default=False), batch_devices=dict(type='list', required=False, default=[]), osds_per_device=dict(type='int', required=False, default=1), journal_size=dict(type='str', required=False, default='5120'), journal_devices=dict(type='list', required=False, default=[]), block_db_size=dict(type='str', required=False, default='-1'), block_db_devices=dict(type='list', required=False, default=[]), wal_devices=dict(type='list', required=False, default=[]), report=dict(type='bool', required=False, default=False), osd_fsid=dict(type='str', required=False), osd_id=dict(type='str', required=False), destroy=dict(type='bool', required=False, default=True), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True, mutually_exclusive=[ ('data', 'osd_fsid', 'osd_id'), ], required_if=[('action', 'zap', ('data', 'osd_fsid', 'osd_id'), True)]) result = dict( changed=False, stdout='', stderr='', rc=0, start='', end='', delta='', ) if module.check_mode: module.exit_json(**result) # start execution startd = datetime.datetime.now() # get the desired action action = module.params['action'] # will return either the image name or None container_image = is_containerized() # Assume the task's status will be 'changed' changed = True if action == 'create' or action == 'prepare': # First test if the device has Ceph LVM Metadata rc, cmd, out, err = exec_command(module, list_osd(module, container_image)) # list_osd returns a dict, if the dict is empty this means # we can not check the return code since it's not consistent # with the plain output # see: http://tracker.ceph.com/issues/36329 # FIXME: it's probably less confusing to check for rc # convert out to json, ansible returns a string... try: out_dict = json.loads(out) except ValueError: fatal( "Could not decode json output: {} from the command {}".format( out, cmd), module) # noqa: E501 if out_dict: data = module.params['data'] result[ 'stdout'] = 'skipped, since {0} is already used for an osd'.format( data) # noqa: E501 result['rc'] = 0 module.exit_json(**result) # Prepare or create the OSD rc, cmd, out, err = exec_command( module, prepare_or_create_osd(module, action, container_image)) err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) elif action == 'activate': if container_image: fatal( "This is not how container's activation happens, nothing to activate", module) # noqa: E501 # Activate the OSD rc, cmd, out, err = exec_command(module, activate_osd()) elif action == 'zap': # Zap the OSD skip = [] for device_type in ['journal', 'data', 'db', 'wal']: # 1/ if we passed vg/lv if module.params.get('{}_vg'.format(device_type), None) and module.params.get( device_type, None): # noqa: E501 # 2/ check this is an actual lv/vg ret = is_lv(module, module.params['{}_vg'.format(device_type)], module.params[device_type], container_image) # noqa: E501 skip.append(ret) # 3/ This isn't a lv/vg device if not ret: module.params['{}_vg'.format(device_type)] = False module.params[device_type] = False # 4/ no journal|data|db|wal|_vg was passed, so it must be a raw device # noqa: E501 elif not module.params.get('{}_vg'.format(device_type), None) and module.params.get( device_type, None): # noqa: E501 skip.append(True) cmd = zap_devices(module, container_image) if any(skip) or module.params.get('osd_fsid', None) \ or module.params.get('osd_id', None): rc, cmd, out, err = exec_command(module, cmd) for scan_cmd in ['vgscan', 'lvscan']: module.run_command([scan_cmd, '--cache']) else: out = 'Skipped, nothing to zap' err = '' changed = False rc = 0 elif action == 'list': # List Ceph LVM Metadata on a device rc, cmd, out, err = exec_command(module, list_osd(module, container_image)) elif action == 'inventory': # List storage device inventory. rc, cmd, out, err = exec_command( module, list_storage_inventory(module, container_image)) elif action == 'batch': # Batch prepare AND activate OSDs report = module.params.get('report', None) # Add --report flag for the idempotency test report_flags = [ '--report', '--format=json', ] cmd = batch(module, container_image, report=True) batch_report_cmd = copy.copy(cmd) batch_report_cmd.extend(report_flags) # Run batch --report to see what's going to happen # Do not run the batch command if there is nothing to do rc, cmd, out, err = exec_command(module, batch_report_cmd) try: if not out: out = '{}' report_result = json.loads(out) except ValueError: strategy_changed_in_out = "strategy changed" in out strategy_changed_in_err = "strategy changed" in err strategy_changed = strategy_changed_in_out or \ strategy_changed_in_err if strategy_changed: if strategy_changed_in_out: out = json.dumps({ "changed": False, "stdout": out.rstrip("\r\n") }) elif strategy_changed_in_err: out = json.dumps({ "changed": False, "stderr": err.rstrip("\r\n") }) rc = 0 changed = False else: out = out.rstrip("\r\n") result = dict( cmd=cmd, stdout=out.rstrip('\r\n'), stderr=err.rstrip('\r\n'), rc=rc, changed=changed, ) if strategy_changed: module.exit_json(**result) module.fail_json(msg='non-zero return code', **result) if not report: if 'changed' in report_result: # we have the old batch implementation # if not asking for a report, let's just run the batch command changed = report_result['changed'] if changed: # Batch prepare the OSD rc, cmd, out, err = exec_command( module, batch(module, container_image)) err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) else: # we have the refactored batch, its idempotent so lets just # run it rc, cmd, out, err = exec_command( module, batch(module, container_image)) err = re.sub('[a-zA-Z0-9+/]{38}==', '*' * 8, err) else: cmd = batch_report_cmd endd = datetime.datetime.now() delta = endd - startd result = dict( cmd=cmd, start=str(startd), end=str(endd), delta=str(delta), rc=rc, stdout=out.rstrip('\r\n'), stderr=err.rstrip('\r\n'), changed=changed, ) if rc != 0: module.fail_json(msg='non-zero return code', **result) module.exit_json(**result)
def batch(module, container_image, report=None): ''' Batch prepare OSD devices ''' # get module variables cluster = module.params['cluster'] objectstore = module.params['objectstore'] batch_devices = module.params.get('batch_devices', None) crush_device_class = module.params.get('crush_device_class', None) journal_devices = module.params.get('journal_devices', None) journal_size = module.params.get('journal_size', None) block_db_size = module.params.get('block_db_size', None) block_db_devices = module.params.get('block_db_devices', None) wal_devices = module.params.get('wal_devices', None) dmcrypt = module.params.get('dmcrypt', None) osds_per_device = module.params.get('osds_per_device', 1) if not osds_per_device: fatal('osds_per_device must be provided if action is "batch"', module) if osds_per_device < 1: fatal('osds_per_device must be greater than 0 if action is "batch"', module) # noqa: E501 if not batch_devices: fatal('batch_devices must be provided if action is "batch"', module) # Build the CLI action = ['lvm', 'batch'] cmd = build_cmd(action, container_image, cluster) cmd.extend(['--%s' % objectstore]) if not report: cmd.append('--yes') if container_image: cmd.append('--prepare') if crush_device_class: cmd.extend(['--crush-device-class', crush_device_class]) if dmcrypt: cmd.append('--dmcrypt') if osds_per_device > 1: cmd.extend(['--osds-per-device', str(osds_per_device)]) if objectstore == 'filestore': cmd.extend(['--journal-size', journal_size]) if objectstore == 'bluestore' and block_db_size != '-1': cmd.extend(['--block-db-size', block_db_size]) cmd.extend(batch_devices) if journal_devices and objectstore == 'filestore': cmd.append('--journal-devices') cmd.extend(journal_devices) if block_db_devices and objectstore == 'bluestore': cmd.append('--db-devices') cmd.extend(block_db_devices) if wal_devices and objectstore == 'bluestore': cmd.append('--wal-devices') cmd.extend(wal_devices) return cmd
def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=True), state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 realm=dict(type='str', require=True), zonegroup=dict(type='str', require=True), endpoints=dict(type='list', require=False, default=[]), access_key=dict(type='str', required=False, no_log=True), secret_key=dict(type='str', required=False, no_log=True), default=dict(type='bool', required=False, default=False), master=dict(type='bool', required=False, default=False), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, ) # Gather module parameters in variables name = module.params.get('name') state = module.params.get('state') endpoints = module.params.get('endpoints') access_key = module.params.get('access_key') secret_key = module.params.get('secret_key') if module.check_mode: module.exit_json( changed=False, stdout='', stderr='', rc=0, start='', end='', delta='', ) startd = datetime.datetime.now() changed = False # will return either the image name or None container_image = is_containerized() if state == "present": rc, cmd, out, err = exec_commands( module, get_zone(module, container_image=container_image)) # noqa: E501 if rc == 0: zone = json.loads(out) _rc, _cmd, _out, _err = exec_commands( module, get_realm(module, container_image=container_image)) # noqa: E501 if _rc != 0: fatal(_err, module) realm = json.loads(_out) _rc, _cmd, _out, _err = exec_commands( module, get_zonegroup(module, container_image=container_image)) # noqa: E501 if _rc != 0: fatal(_err, module) zonegroup = json.loads(_out) if not access_key: access_key = '' if not secret_key: secret_key = '' current = { 'endpoints': next(zone['endpoints'] for zone in zonegroup['zones'] if zone['name'] == name), # noqa: E501 'access_key': zone['system_key']['access_key'], 'secret_key': zone['system_key']['secret_key'], 'realm_id': zone['realm_id'] } asked = { 'endpoints': endpoints, 'access_key': access_key, 'secret_key': secret_key, 'realm_id': realm['id'] } if current != asked: rc, cmd, out, err = exec_commands( module, modify_zone(module, container_image=container_image)) # noqa: E501 changed = True else: rc, cmd, out, err = exec_commands( module, create_zone(module, container_image=container_image)) # noqa: E501 changed = True elif state == "absent": rc, cmd, out, err = exec_commands( module, get_zone(module, container_image=container_image)) # noqa: E501 if rc == 0: rc, cmd, out, err = exec_commands( module, remove_zone(module, container_image=container_image)) # noqa: E501 changed = True else: rc = 0 out = "Zone {} doesn't exist".format(name) elif state == "info": rc, cmd, out, err = exec_commands( module, get_zone(module, container_image=container_image)) # noqa: E501 exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501
def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=True), state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), realm=dict(type='str', require=True), endpoints=dict(type='list', require=False, default=[]), default=dict(type='bool', required=False, default=False), master=dict(type='bool', required=False, default=False), ) module = AnsibleModule( argument_spec=module_args, supports_check_mode=True, ) # Gather module parameters in variables name = module.params.get('name') state = module.params.get('state') endpoints = module.params.get('endpoints') master = str(module.params.get('master')).lower() if module.check_mode: module.exit_json( changed=False, stdout='', stderr='', rc=0, start='', end='', delta='', ) startd = datetime.datetime.now() changed = False # will return either the image name or None container_image = is_containerized() if state == "present": rc, cmd, out, err = exec_commands( module, get_zonegroup(module, container_image=container_image)) if rc == 0: zonegroup = json.loads(out) _rc, _cmd, _out, _err = exec_commands( module, get_realm(module, container_image=container_image)) if _rc != 0: fatal(_err, module) realm = json.loads(_out) current = { 'endpoints': zonegroup['endpoints'], 'master': zonegroup.get('is_master', 'false'), 'realm_id': zonegroup['realm_id'] } asked = { 'endpoints': endpoints, 'master': master, 'realm_id': realm['id'] } if current != asked: rc, cmd, out, err = exec_commands( module, modify_zonegroup(module, container_image=container_image)) changed = True else: rc, cmd, out, err = exec_commands( module, create_zonegroup(module, container_image=container_image)) changed = True elif state == "absent": rc, cmd, out, err = exec_commands( module, get_zonegroup(module, container_image=container_image)) if rc == 0: rc, cmd, out, err = exec_commands( module, remove_zonegroup(module, container_image=container_image)) changed = True else: rc = 0 out = "Zonegroup {} doesn't exist".format(name) elif state == "info": rc, cmd, out, err = exec_commands( module, get_zonegroup(module, container_image=container_image)) exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed)
def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), name=dict(type='str', required=True), state=dict(type='str', required=False, choices=['present', 'absent', 'info'], default='present'), # noqa: E501 password=dict(type='str', required=False, no_log=True), roles=dict( type='list', required=False, choices=[ 'administrator', 'read-only', 'block-manager', 'rgw-manager', 'cluster-manager', 'pool-manager', 'cephfs-manager' ], # noqa: E501 default=[]), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True, required_if=[['state', 'present', ['password']]]) # Gather module parameters in variables name = module.params.get('name') state = module.params.get('state') roles = module.params.get('roles') password = module.params.get('password') if module.check_mode: module.exit_json( changed=False, stdout='', stderr='', rc=0, start='', end='', delta='', ) startd = datetime.datetime.now() changed = False # will return either the image name or None container_image = is_containerized() if state == "present": rc, cmd, out, err = exec_command( module, get_user(module, container_image=container_image)) # noqa: E501 if rc == 0: user = json.loads(out) user['roles'].sort() roles.sort() if user['roles'] != roles: rc, cmd, out, err = exec_command( module, set_roles(module, container_image=container_image)) # noqa: E501 changed = True rc, cmd, out, err = exec_command( module, set_password(module, container_image=container_image), stdin=password) # noqa: E501 else: rc, cmd, out, err = exec_command( module, create_user(module, container_image=container_image), stdin=password) # noqa: E501 if rc != 0: fatal(err, module) rc, cmd, out, err = exec_command( module, set_roles(module, container_image=container_image)) # noqa: E501 changed = True elif state == "absent": rc, cmd, out, err = exec_command( module, get_user(module, container_image=container_image)) # noqa: E501 if rc == 0: rc, cmd, out, err = exec_command( module, remove_user(module, container_image=container_image)) # noqa: E501 changed = True else: rc = 0 out = "Dashboard User {} doesn't exist".format(name) elif state == "info": rc, cmd, out, err = exec_command( module, get_user(module, container_image=container_image)) # noqa: E501 exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa: E501