コード例 #1
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(('centos', 'red')):
            LOG.info('calling partx on zapped device %s', disk)
            LOG.info('re-reading known partitions will display errors')
            partx_executable = system.executable_path(distro.conn, 'partx')
            remoto.process.run(
                distro.conn,
                [
                    partx_executable,
                    '-a',
                    disk,
                ],
            )

        else:
            LOG.debug('Calling partprobe on zapped device %s', disk)
            partprobe_executable = system.executable_path(distro.conn, 'partprobe')
            remoto.process.run(
                distro.conn,
                [
                    partprobe_executable,
                    disk,
                ],
            )

        distro.conn.exit()
コード例 #2
0
ファイル: osd.py プロジェクト: AvengerMoJo/ceph-deploy
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(('centos', 'red')):
            LOG.info('calling partx on zapped device %s', disk)
            LOG.info('re-reading known partitions will display errors')
            partx_executable = system.executable_path(distro.conn, 'partx')
            remoto.process.run(
                distro.conn,
                [
                    partx_executable,
                    '-a',
                    disk,
                ],
            )

        else:
            LOG.debug('Calling partprobe on zapped device %s', disk)
            partprobe_executable = system.executable_path(distro.conn, 'partprobe')
            remoto.process.run(
                distro.conn,
                [
                    partprobe_executable,
                    disk,
                ],
            )

        distro.conn.exit()
コード例 #3
0
ファイル: osd.py プロジェクト: tzhanglinx/ceph-deploy
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug("zapping %s on %s", disk, hostname)
        distro = hosts.get(hostname, username=args.username)
        LOG.info("Distro info: %s %s %s", distro.name, distro.release, distro.codename)

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        remoto.process.run(distro.conn, [ceph_disk_executable, "zap", disk])

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(("centos", "red")):
            LOG.info("calling partx on zapped device %s", disk)
            LOG.info("re-reading known partitions will display errors")
            remoto.process.run(distro.conn, ["partx", "-a", disk])

        else:
            LOG.debug("Calling partprobe on zapped device %s", disk)
            remoto.process.run(distro.conn, ["partprobe", disk])

        distro.conn.exit()
コード例 #4
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError(
                'zap command needs both HOSTNAME and DISK but got "%s %s"' %
                (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        distro.conn.exit()
コード例 #5
0
ファイル: osd.py プロジェクト: codenrhoden/ceph-deploy
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        distro.conn.exit()
コード例 #6
0
def osd_list(args, cfg):
    monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host, username=args.username)
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ['active', 'magic', 'whoami', 'journal_uuid']

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        output, err, exit_code = remoto.process.check(distro.conn, [
            ceph_disk_executable,
            'list',
        ])

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, 'journal')
            _id = int(_osd.split('-')[-1])  # split on dash, get the id
            osd_name = 'osd.%s' % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata['device'] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata['journal path'] = remote_module.get_realpath(
                    journal_path)

            # is this OSD in osd tree?
            for blob in tree['nodes']:
                if blob.get('id') == _id:  # matches our OSD
                    json_blob = blob

            print_osd(
                distro.conn.logger,
                hostname,
                osd_path,
                json_blob,
                metadata,
            )

        distro.conn.exit()
コード例 #7
0
def osd_list(args, cfg):
    for hostname in args.host:
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_volume_executable = system.executable_path(
            distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(distro.conn, [
                ceph_volume_executable,
                'lvm',
                'list',
            ],
                               env={'CEPH_VOLUME_DEBUG': '1'})
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'list',
                ],
            )
        distro.conn.exit()
コード例 #8
0
ファイル: osd.py プロジェクト: tzhanglinx/ceph-deploy
def osd_tree(conn, cluster):
    """
    Check the status of an OSD. Make sure all are up and in

    What good output would look like::

        {
            "epoch": 8,
            "num_osds": 1,
            "num_up_osds": 1,
            "num_in_osds": "1",
            "full": "false",
            "nearfull": "false"
        }

    Note how the booleans are actually strings, so we need to take that into
    account and fix it before returning the dictionary. Issue #8108
    """
    ceph_executable = system.executable_path(conn, "ceph")
    command = [ceph_executable, "--cluster={cluster}".format(cluster=cluster), "osd", "tree", "--format=json"]

    out, err, code = remoto.process.check(conn, command)

    try:
        loaded_json = json.loads("".join(out))
        # convert boolean strings to actual booleans because
        # --format=json fails to do this properly
        for k, v in loaded_json.items():
            if v == "true":
                loaded_json[k] = True
            elif v == "false":
                loaded_json[k] = False
        return loaded_json
    except ValueError:
        return {}
コード例 #9
0
ファイル: osd.py プロジェクト: tzhanglinx/ceph-deploy
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        LOG.info("Distro info: %s %s %s", distro.name, distro.release, distro.codename)

        LOG.debug("Listing disks on {hostname}...".format(hostname=hostname))
        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        remoto.process.run(distro.conn, [ceph_disk_executable, "list"])
        distro.conn.exit()
コード例 #10
0
ファイル: osd.py プロジェクト: ceph/ceph-deploy
def create_osd(
        conn,
        cluster,
        data,
        journal,
        zap,
        fs_type,
        dmcrypt,
        dmcrypt_dir,
        storetype,
        block_wal,
        block_db,
        **kw):
    """
    Run on osd node, creates an OSD from a data disk.
    """
    ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
    args = [
        ceph_volume_executable,
        '--cluster', cluster,
        'lvm',
        'create',
        '--%s' % storetype,
        '--data', data
    ]
    if zap:
        LOG.warning('zapping is no longer supported when preparing')
    if dmcrypt:
        args.append('--dmcrypt')
        # TODO: re-enable dmcrypt support once ceph-volume grows it
        LOG.warning('dmcrypt is currently not supported')

    if storetype == 'bluestore':
        if block_wal:
            args.append('--block.wal')
            args.append(block_wal)
        if block_db:
            args.append('--block.db')
            args.append(block_db)
    elif storetype == 'filestore':
        if not journal:
            raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
        args.append('--journal')
        args.append(journal)

    if kw.get('debug'):
        remoto.process.run(
            conn,
            args,
            extend_env={'CEPH_VOLUME_DEBUG': '1'}
        )

    else:
        remoto.process.run(
            conn,
            args
        )
コード例 #11
0
ファイル: osd.py プロジェクト: codenrhoden/ceph-deploy
def prepare_disk(
        conn,
        cluster,
        disk,
        journal,
        activate_prepared_disk,
        init,
        zap,
        fs_type,
        dmcrypt,
        dmcrypt_dir,
        storetype):
    """
    Run on osd node, prepares a data disk for use.
    """
    ceph_disk_executable = system.executable_path(conn, 'ceph-disk')
    args = [
        ceph_disk_executable,
        '-v',
        'prepare',
        ]
    if zap:
        args.append('--zap-disk')
    if dmcrypt:
        args.append('--dmcrypt')
        if dmcrypt_dir is not None:
            args.append('--dmcrypt-key-dir')
            args.append(dmcrypt_dir)
    if storetype:
        args.append('--' + storetype)
    args.extend([
        '--cluster',
        cluster,
        '--fs-type',
        fs_type,
        '--',
        disk,
    ])

    if journal is not None:
        args.append(journal)

    remoto.process.run(
        conn,
        args
    )

    if activate_prepared_disk:
        # we don't simply run activate here because we don't know
        # which partition ceph-disk prepare created as the data
        # volume.  instead, we rely on udev to do the activation and
        # just give it a kick to ensure it wakes up.  we also enable
        # ceph.target, the other key piece of activate.
        if init == 'systemd':
            system.enable_service(conn, "ceph.target")
        elif init == 'sysvinit':
            system.enable_service(conn, "ceph")
コード例 #12
0
def create_osd(
        conn,
        cluster,
        data,
        journal,
        zap,
        fs_type,
        dmcrypt,
        dmcrypt_dir,
        storetype,
        block_wal,
        block_db,
        **kw):
    """
    Run on osd node, creates an OSD from a data disk.
    """
    ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
    args = [
        ceph_volume_executable,
        '--cluster', cluster,
        'lvm',
        'create',
        '--%s' % storetype,
        '--data', data
    ]
    if zap:
        LOG.warning('zapping is no longer supported when preparing')
    if dmcrypt:
        args.append('--dmcrypt')
        # TODO: re-enable dmcrypt support once ceph-volume grows it
        LOG.warning('dmcrypt is currently not supported')

    if storetype == 'bluestore':
        if block_wal:
            args.append('--block.wal')
            args.append(block_wal)
        if block_db:
            args.append('--block.db')
            args.append(block_db)
    elif storetype == 'filestore':
        if not journal:
            raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
        args.append('--journal')
        args.append(journal)

    if kw.get('debug'):
        remoto.process.run(
            conn,
            args,
            env={'CEPH_VOLUME_DEBUG': '1'}
        )

    else:
        remoto.process.run(
            conn,
            args
        )
コード例 #13
0
def prepare_disk(
        conn,
        cluster,
        disk,
        journal,
        activate_prepared_disk,
        init,
        zap,
        fs_type,
        dmcrypt,
        dmcrypt_dir):
    """
    Run on osd node, prepares a data disk for use.
    """
    ceph_disk_executable = system.executable_path(conn, 'ceph-disk')
    args = [
        ceph_disk_executable,
        '-v',
        'prepare',
        ]
    if zap:
        args.append('--zap-disk')
    if dmcrypt:
        args.append('--dmcrypt')
        if dmcrypt_dir is not None:
            args.append('--dmcrypt-key-dir')
            args.append(dmcrypt_dir)
    args.extend([
        '--cluster',
        cluster,
        '--fs-type',
        fs_type,
        '--',
        disk,
    ])

    if journal is not None:
        args.append(journal)

    remoto.process.run(
        conn,
        args
    )

    if activate_prepared_disk:
        # we don't simply run activate here because we don't know
        # which partition ceph-disk prepare created as the data
        # volume.  instead, we rely on udev to do the activation and
        # just give it a kick to ensure it wakes up.  we also enable
        # ceph.target, the other key piece of activate.
        if init == 'systemd':
            system.enable_service(conn, "ceph.target")
        elif init == 'sysvinit':
            system.enable_service(conn, "ceph")
コード例 #14
0
def osd_status_check(conn, cluster):
    """
    Check the status of an OSD. Make sure all are up and in

    What good output would look like::

        {
            "epoch": 8,
            "num_osds": 1,
            "num_up_osds": 1,
            "num_in_osds": "1",
            "full": "false",
            "nearfull": "false"
        }

    Note how the booleans are actually strings, so we need to take that into
    account and fix it before returning the dictionary. Issue #8108
    """
    ceph_executable = system.executable_path(conn, 'ceph')
    command = [
        ceph_executable,
        '--cluster={cluster}'.format(cluster=cluster),
        'osd',
        'stat',
        '--format=json',
    ]

    try:
        out, err, code = remoto.process.check(
            conn,
            command,
        )
    except TypeError:
        # XXX This is a bug in remoto. If the other end disconnects with a timeout
        # it will return a None, and here we are expecting a 3 item tuple, not a None
        # so it will break with a TypeError. Once remoto fixes this, we no longer need
        # this try/except.
        return {}

    try:
        loaded_json = json.loads(b''.join(out).decode('utf-8'))
        # convert boolean strings to actual booleans because
        # --format=json fails to do this properly
        for k, v in loaded_json.items():
            if v == 'true':
                loaded_json[k] = True
            elif v == 'false':
                loaded_json[k] = False
        return loaded_json
    except ValueError:
        return {}
コード例 #15
0
ファイル: osd.py プロジェクト: codenrhoden/ceph-deploy
def osd_status_check(conn, cluster):
    """
    Check the status of an OSD. Make sure all are up and in

    What good output would look like::

        {
            "epoch": 8,
            "num_osds": 1,
            "num_up_osds": 1,
            "num_in_osds": "1",
            "full": "false",
            "nearfull": "false"
        }

    Note how the booleans are actually strings, so we need to take that into
    account and fix it before returning the dictionary. Issue #8108
    """
    ceph_executable = system.executable_path(conn, 'ceph')
    command = [
        ceph_executable,
        '--cluster={cluster}'.format(cluster=cluster),
        'osd',
        'stat',
        '--format=json',
    ]

    try:
        out, err, code = remoto.process.check(
            conn,
            command,
        )
    except TypeError:
        # XXX This is a bug in remoto. If the other end disconnects with a timeout
        # it will return a None, and here we are expecting a 3 item tuple, not a None
        # so it will break with a TypeError. Once remoto fixes this, we no longer need
        # this try/except.
        return {}

    try:
        loaded_json = json.loads(b''.join(out).decode('utf-8'))
        # convert boolean strings to actual booleans because
        # --format=json fails to do this properly
        for k, v in loaded_json.items():
            if v == 'true':
                loaded_json[k] = True
            elif v == 'false':
                loaded_json[k] = False
        return loaded_json
    except ValueError:
        return {}
コード例 #16
0
ファイル: osd.py プロジェクト: tzhanglinx/ceph-deploy
def osd_list(args, cfg):
    monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host, username=args.username)
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ["active", "magic", "whoami", "journal_uuid"]

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        output, err, exit_code = remoto.process.check(distro.conn, [ceph_disk_executable, "list"])

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, "journal")
            _id = int(_osd.split("-")[-1])  # split on dash, get the id
            osd_name = "osd.%s" % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata["device"] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata["journal path"] = remote_module.get_realpath(journal_path)

            # is this OSD in osd tree?
            for blob in tree["nodes"]:
                if blob.get("id") == _id:  # matches our OSD
                    json_blob = blob

            print_osd(distro.conn.logger, hostname, osd_path, json_blob, metadata)

        distro.conn.exit()
コード例 #17
0
ファイル: osd.py プロジェクト: codenrhoden/ceph-deploy
def activate(args, cfg):
    LOG.debug(
        'Activating cluster %s disks %s',
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
        )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('activating host %s disk %s', hostname, disk)
        LOG.debug('will use init type: %s', distro.init)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                '-v',
                'activate',
                '--mark-init',
                distro.init,
                '--mount',
                disk,
            ],
        )
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)

        if distro.init == 'systemd':
            system.enable_service(distro.conn, "ceph.target")
        elif distro.init == 'sysvinit':
            system.enable_service(distro.conn, "ceph")

        distro.conn.exit()
コード例 #18
0
ファイル: osd.py プロジェクト: zyt19941113/ceph-deploy
def activate(args, cfg):
    LOG.debug(
        'Activating cluster %s disks %s',
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
        )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('activating host %s disk %s', hostname, disk)
        LOG.debug('will use init type: %s', distro.init)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                '-v',
                'activate',
                '--mark-init',
                distro.init,
                '--mount',
                disk,
            ],
        )
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)

        if distro.init == 'systemd':
            system.enable_service(distro.conn, "ceph.target")
        elif distro.init == 'sysvinit':
            system.enable_service(distro.conn, "ceph")

        distro.conn.exit()
コード例 #19
0
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'list',
            ],
        )
        distro.conn.exit()
コード例 #20
0
def disk_zap(args):

    hostname = args.host
    for disk in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
                env={'CEPH_VOLUME_DEBUG': '1'}
            )
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
            )

        distro.conn.exit()
コード例 #21
0
ファイル: osd.py プロジェクト: ceph/ceph-deploy
def disk_zap(args):

    hostname = args.host
    for disk in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
                env={'CEPH_VOLUME_DEBUG': '1'}
            )
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
            )

        distro.conn.exit()
コード例 #22
0
def osd_tree(conn, cluster):
    """
    Check the status of an OSD. Make sure all are up and in

    What good output would look like::

        {
            "epoch": 8,
            "num_osds": 1,
            "num_up_osds": 1,
            "num_in_osds": "1",
            "full": "false",
            "nearfull": "false"
        }

    Note how the booleans are actually strings, so we need to take that into
    account and fix it before returning the dictionary. Issue #8108
    """
    ceph_executable = system.executable_path(conn, 'ceph')
    command = [
        ceph_executable,
        '--cluster={cluster}'.format(cluster=cluster),
        'osd',
        'tree',
        '--format=json',
    ]

    out, err, code = remoto.process.check(
        conn,
        command,
    )

    try:
        loaded_json = json.loads(b''.join(out).decode('utf-8'))
        # convert boolean strings to actual booleans because
        # --format=json fails to do this properly
        for k, v in loaded_json.items():
            if v == 'true':
                loaded_json[k] = True
            elif v == 'false':
                loaded_json[k] = False
        return loaded_json
    except ValueError:
        return {}
コード例 #23
0
ファイル: osd.py プロジェクト: ceph/ceph-deploy
def osd_list(args, cfg):
    for hostname in args.host:
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'list',
                ],
                env={'CEPH_VOLUME_DEBUG': '1'}

            )
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'list',
                ],
            )
        distro.conn.exit()
コード例 #24
0
ファイル: osd.py プロジェクト: liujun01203/ceph-deploy
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'list',
            ],
        )
        distro.conn.exit()
コード例 #25
0
ファイル: test_system.py プロジェクト: 1oscar/ceph-deploy
 def test_cannot_find_executable(self):
     fake_conn = Mock()
     fake_conn.remote_module.which = Mock(return_value=None)
     with raises(exc.ExecutableNotFound):
         system.executable_path(fake_conn, 'foo')
コード例 #26
0
ファイル: test_system.py プロジェクト: willxiong/ceph-deploy
 def test_returns_path(self):
     fake_conn = Mock()
     fake_conn.remote_module.which = Mock(return_value='/path')
     result = system.executable_path(fake_conn, 'foo')
     assert result == '/path'
コード例 #27
0
ファイル: test_system.py プロジェクト: willxiong/ceph-deploy
 def test_cannot_find_executable(self):
     fake_conn = Mock()
     fake_conn.remote_module.which = Mock(return_value=None)
     with raises(exc.ExecutableNotFound):
         system.executable_path(fake_conn, 'foo')
コード例 #28
0
ファイル: osd.py プロジェクト: liujun01203/ceph-deploy
def osd_list(args, cfg):
    monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(
        mon_host,
        username=args.username,
        callbacks=[packages.ceph_is_installed]
    )

    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ['active', 'magic', 'whoami', 'journal_uuid']

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        output, err, exit_code = remoto.process.check(
            distro.conn,
            [
                ceph_disk_executable,
                'list',
            ]
        )

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, 'journal')
            _id = int(_osd.split('-')[-1])  # split on dash, get the id
            osd_name = 'osd.%s' % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata['device'] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata['journal path'] = remote_module.get_realpath(journal_path)

            # is this OSD in osd tree?
            for blob in tree['nodes']:
                if blob.get('id') == _id:  # matches our OSD
                    json_blob = blob

            print_osd(
                distro.conn.logger,
                hostname,
                osd_path,
                json_blob,
                metadata,
            )

        distro.conn.exit()
コード例 #29
0
ファイル: test_system.py プロジェクト: 1oscar/ceph-deploy
 def test_returns_path(self):
     fake_conn = Mock()
     fake_conn.remote_module.which = Mock(return_value='/path')
     result = system.executable_path(fake_conn, 'foo')
     assert result == '/path'
コード例 #30
0
def osd_list(args, cfg):
    # FIXME: this portion should probably be abstracted. We do the same in
    # mon.py
    cfg = conf.ceph.load(args)
    mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
    monitors = re.split(r'[,\s]+', mon_initial_members)

    if not monitors:
        raise exc.NeedHostError(
            'could not find `mon initial members` defined in ceph.conf')

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host, username=args.username)
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ['active', 'magic', 'whoami', 'journal_uuid']

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        output, err, exit_code = remoto.process.check(distro.conn, [
            ceph_disk_executable,
            'list',
        ])

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, 'journal')
            _id = int(_osd.split('-')[-1])  # split on dash, get the id
            osd_name = 'osd.%s' % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata['device'] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata['journal path'] = remote_module.get_realpath(
                    journal_path)

            # is this OSD in osd tree?
            for blob in tree['nodes']:
                if blob.get('id') == _id:  # matches our OSD
                    json_blob = blob

            print_osd(
                distro.conn.logger,
                hostname,
                osd_path,
                json_blob,
                metadata,
            )

        distro.conn.exit()
コード例 #31
0
ファイル: packages.py プロジェクト: SUSE/ceph-deploy
 def executable(self):
     try:
         return system.executable_path(self.conn, 'ceph')
     except ExecutableNotFound:
         return None
コード例 #32
0
 def executable(self):
     try:
         return system.executable_path(self.conn, 'ceph')
     except ExecutableNotFound:
         return None
コード例 #33
0
ファイル: osd.py プロジェクト: hl10502/ceph-deploy
def osd_list(args, cfg):
    monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host,
                       username=args.username,
                       callbacks=[packages.ceph_is_installed])

    # 执行ceph --cluster=ceph osd tree --format=json命令获取osd信息
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ['active', 'magic', 'whoami', 'journal_uuid']

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        #获取OSD的目录/var/run/ceph/osd下的osd名称
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        # 执行ceph-disk list命令获取磁盘、分区信息
        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        output, err, exit_code = remoto.process.check(distro.conn, [
            ceph_disk_executable,
            'list',
        ])

        # 循环OSD
        for _osd in osds:
            # osd路径,比如/var/run/ceph/osd/ceph-0
            osd_path = os.path.join(constants.osd_path, _osd)
            # journal路径
            journal_path = os.path.join(osd_path, 'journal')
            # OSD的id
            _id = int(_osd.split('-')[-1])  # split on dash, get the id
            osd_name = 'osd.%s' % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            # ceph-disk list的结果与osd名称匹配,获取磁盘设备
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata['device'] = device

            # read interesting metadata from files
            # 获取OSD下的active, magic, whoami, journal_uuid文件信息
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            # 获取 journal path
            if remote_module.path_exists(journal_path):
                metadata['journal path'] = remote_module.get_realpath(
                    journal_path)

            # is this OSD in osd tree?
            for blob in tree['nodes']:
                if blob.get('id') == _id:  # matches our OSD
                    json_blob = blob

            # 输出OSD信息
            print_osd(
                distro.conn.logger,
                hostname,
                osd_path,
                json_blob,
                metadata,
            )

        distro.conn.exit()
コード例 #34
0
ファイル: osd.py プロジェクト: jebtang/ceph-deploy
def osd_list(args, cfg):
    # FIXME: this portion should probably be abstracted. We do the same in
    # mon.py
    cfg = conf.ceph.load(args)
    mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
    monitors = re.split(r'[,\s]+', mon_initial_members)

    if not monitors:
        raise exc.NeedHostError(
            'could not find `mon initial members` defined in ceph.conf'
        )

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host, username=args.username)
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ['active', 'magic', 'whoami', 'journal_uuid']

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        output, err, exit_code = remoto.process.check(
            distro.conn,
            [
                ceph_disk_executable,
                'list',
            ]
        )

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, 'journal')
            _id = int(_osd.split('-')[-1])  # split on dash, get the id
            osd_name = 'osd.%s' % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata['device'] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata['journal path'] = remote_module.get_realpath(journal_path)

            # is this OSD in osd tree?
            for blob in tree['nodes']:
                if blob.get('id') == _id:  # matches our OSD
                    json_blob = blob

            print_osd(
                distro.conn.logger,
                hostname,
                osd_path,
                json_blob,
                metadata,
            )

        distro.conn.exit()