def disk_zap(args): for hostname, disk, journal in args.disk: if not disk or not hostname: raise RuntimeError( 'zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk)) LOG.debug('zapping %s on %s', disk, hostname) distro = hosts.get(hostname, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) distro.conn.remote_module.zeroing(disk) ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') remoto.process.run( distro.conn, [ ceph_disk_executable, 'zap', disk, ], ) # once all is done, call partprobe (or partx) # On RHEL and CentOS distros, calling partprobe forces a reboot of the # server. Since we are not resizing partitons we rely on calling # partx if distro.normalized_name.startswith(('centos', 'red')): LOG.info('calling partx on zapped device %s', disk) LOG.info('re-reading known partitions will display errors') remoto.process.run( distro.conn, [ 'partx', '-a', disk, ], ) else: LOG.debug('Calling partprobe on zapped device %s', disk) remoto.process.run( distro.conn, [ 'partprobe', disk, ], ) distro.conn.exit()
def disk_list(args, cfg): for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username, callbacks=[packages.ceph_is_installed]) LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename) LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname)) ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') remoto.process.run( distro.conn, [ ceph_disk_executable, 'list', ], ) distro.conn.exit()
def osd_tree(conn, cluster): """ Check the status of an OSD. Make sure all are up and in What good output would look like:: { "epoch": 8, "num_osds": 1, "num_up_osds": 1, "num_in_osds": "1", "full": "false", "nearfull": "false" } Note how the booleans are actually strings, so we need to take that into account and fix it before returning the dictionary. Issue #8108 """ ceph_executable = system.executable_path(conn, 'ceph') command = [ ceph_executable, '--cluster={cluster}'.format(cluster=cluster), 'osd', 'tree', '--format=json', ] out, err, code = remoto.process.check( conn, command, ) try: loaded_json = json.loads(''.join(out)) # convert boolean strings to actual booleans because # --format=json fails to do this properly for k, v in loaded_json.items(): if v == 'true': loaded_json[k] = True elif v == 'false': loaded_json[k] = False return loaded_json except ValueError: return {}
def executable(self): try: return system.executable_path(self.conn, "ceph") except ExecutableNotFound: return None
def executable(self): try: return system.executable_path(self.conn, 'ceph') except ExecutableNotFound: return None
def osd_list(args, cfg): monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get(mon_host, username=args.username, callbacks=[packages.ceph_is_installed]) tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ['active', 'magic', 'whoami', 'journal_uuid'] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module osds = distro.conn.remote_module.listdir(constants.osd_path) ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') output, err, exit_code = remoto.process.check(distro.conn, [ ceph_disk_executable, 'list', ]) for _osd in osds: osd_path = os.path.join(constants.osd_path, _osd) journal_path = os.path.join(osd_path, 'journal') _id = int(_osd.split('-')[-1]) # split on dash, get the id osd_name = 'osd.%s' % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point device = get_osd_mount_point(output, osd_name) if device: metadata['device'] = device # read interesting metadata from files for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? if remote_module.path_exists(journal_path): metadata['journal path'] = remote_module.get_realpath( journal_path) # is this OSD in osd tree? for blob in tree['nodes']: if blob.get('id') == _id: # matches our OSD json_blob = blob print_osd( distro.conn.logger, hostname, osd_path, json_blob, metadata, ) distro.conn.exit()