def osd_list(args, cfg): monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get(mon_host, username=args.username) tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ['active', 'magic', 'whoami', 'journal_uuid'] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module osds = distro.conn.remote_module.listdir(constants.osd_path) ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') output, err, exit_code = remoto.process.check(distro.conn, [ ceph_disk_executable, 'list', ]) for _osd in osds: osd_path = os.path.join(constants.osd_path, _osd) journal_path = os.path.join(osd_path, 'journal') _id = int(_osd.split('-')[-1]) # split on dash, get the id osd_name = 'osd.%s' % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point device = get_osd_mount_point(output, osd_name) if device: metadata['device'] = device # read interesting metadata from files for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? if remote_module.path_exists(journal_path): metadata['journal path'] = remote_module.get_realpath( journal_path) # is this OSD in osd tree? for blob in tree['nodes']: if blob.get('id') == _id: # matches our OSD json_blob = blob print_osd( distro.conn.logger, hostname, osd_path, json_blob, metadata, ) distro.conn.exit()
def osd_list(args, cfg): monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get(mon_host, username=args.username) tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ["active", "magic", "whoami", "journal_uuid"] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module osds = distro.conn.remote_module.listdir(constants.osd_path) ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk") output, err, exit_code = remoto.process.check(distro.conn, [ceph_disk_executable, "list"]) for _osd in osds: osd_path = os.path.join(constants.osd_path, _osd) journal_path = os.path.join(osd_path, "journal") _id = int(_osd.split("-")[-1]) # split on dash, get the id osd_name = "osd.%s" % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point device = get_osd_mount_point(output, osd_name) if device: metadata["device"] = device # read interesting metadata from files for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? if remote_module.path_exists(journal_path): metadata["journal path"] = remote_module.get_realpath(journal_path) # is this OSD in osd tree? for blob in tree["nodes"]: if blob.get("id") == _id: # matches our OSD json_blob = blob print_osd(distro.conn.logger, hostname, osd_path, json_blob, metadata) distro.conn.exit()
def osd_list(args, cfg): monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get( mon_host, username=args.username, callbacks=[packages.ceph_is_installed] ) tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ['active', 'magic', 'whoami', 'journal_uuid'] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module osds = distro.conn.remote_module.listdir(constants.osd_path) ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') output, err, exit_code = remoto.process.check( distro.conn, [ ceph_disk_executable, 'list', ] ) for _osd in osds: osd_path = os.path.join(constants.osd_path, _osd) journal_path = os.path.join(osd_path, 'journal') _id = int(_osd.split('-')[-1]) # split on dash, get the id osd_name = 'osd.%s' % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point device = get_osd_mount_point(output, osd_name) if device: metadata['device'] = device # read interesting metadata from files for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? if remote_module.path_exists(journal_path): metadata['journal path'] = remote_module.get_realpath(journal_path) # is this OSD in osd tree? for blob in tree['nodes']: if blob.get('id') == _id: # matches our OSD json_blob = blob print_osd( distro.conn.logger, hostname, osd_path, json_blob, metadata, ) distro.conn.exit()
def test_multiple_item_if_mon_not_none(self): cfg = make_fake_conf() cfg.add_section('global') cfg.set('global', 'mon initial members', 'AAAA, BBBB') mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) assert set(mon_initial_members) == set(['AAAA', 'BBBB'])
def test_return_if_mon_none_and_empty_false(self): cfg = make_fake_conf() mon_initial_members = mon.get_mon_initial_members(Mock(), False, cfg) assert mon_initial_members is None
def test_assert_if_mon_none_and_empty_True(self): cfg = make_fake_conf() with pytest.raises(exc.NeedHostError): mon.get_mon_initial_members(Mock(), True, cfg)
def osd_list(args, cfg): monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg) # get the osd tree from a monitor host mon_host = monitors[0] distro = hosts.get(mon_host, username=args.username, callbacks=[packages.ceph_is_installed]) # 执行ceph --cluster=ceph osd tree --format=json命令获取osd信息 tree = osd_tree(distro.conn, args.cluster) distro.conn.exit() interesting_files = ['active', 'magic', 'whoami', 'journal_uuid'] for hostname, disk, journal in args.disk: distro = hosts.get(hostname, username=args.username) remote_module = distro.conn.remote_module #获取OSD的目录/var/run/ceph/osd下的osd名称 osds = distro.conn.remote_module.listdir(constants.osd_path) # 执行ceph-disk list命令获取磁盘、分区信息 ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk') output, err, exit_code = remoto.process.check(distro.conn, [ ceph_disk_executable, 'list', ]) # 循环OSD for _osd in osds: # osd路径,比如/var/run/ceph/osd/ceph-0 osd_path = os.path.join(constants.osd_path, _osd) # journal路径 journal_path = os.path.join(osd_path, 'journal') # OSD的id _id = int(_osd.split('-')[-1]) # split on dash, get the id osd_name = 'osd.%s' % _id metadata = {} json_blob = {} # piggy back from ceph-disk and get the mount point # ceph-disk list的结果与osd名称匹配,获取磁盘设备 device = get_osd_mount_point(output, osd_name) if device: metadata['device'] = device # read interesting metadata from files # 获取OSD下的active, magic, whoami, journal_uuid文件信息 for f in interesting_files: osd_f_path = os.path.join(osd_path, f) if remote_module.path_exists(osd_f_path): metadata[f] = remote_module.readline(osd_f_path) # do we have a journal path? # 获取 journal path if remote_module.path_exists(journal_path): metadata['journal path'] = remote_module.get_realpath( journal_path) # is this OSD in osd tree? for blob in tree['nodes']: if blob.get('id') == _id: # matches our OSD json_blob = blob # 输出OSD信息 print_osd( distro.conn.logger, hostname, osd_path, json_blob, metadata, ) distro.conn.exit()