Esempio n. 1
0
    def test_get_unsupported_message_release(self):
        fake_get_connection = self.make_fake_connection(('Solaris', 'Tijuana', '12'))
        with patch('ceph_deploy.hosts.get_connection', fake_get_connection):
            with raises(exc.UnsupportedPlatform) as error:
                hosts.get('myhost')

        assert error.value.__str__() == 'Platform is not supported: Solaris 12 Tijuana'
Esempio n. 2
0
def osd_list(args, cfg):
    monitors = mon.get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    # get the osd tree from a monitor host
    mon_host = monitors[0]
    distro = hosts.get(mon_host, username=args.username)
    tree = osd_tree(distro.conn, args.cluster)
    distro.conn.exit()

    interesting_files = ["active", "magic", "whoami", "journal_uuid"]

    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        remote_module = distro.conn.remote_module
        osds = distro.conn.remote_module.listdir(constants.osd_path)

        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        output, err, exit_code = remoto.process.check(distro.conn, [ceph_disk_executable, "list"])

        for _osd in osds:
            osd_path = os.path.join(constants.osd_path, _osd)
            journal_path = os.path.join(osd_path, "journal")
            _id = int(_osd.split("-")[-1])  # split on dash, get the id
            osd_name = "osd.%s" % _id
            metadata = {}
            json_blob = {}

            # piggy back from ceph-disk and get the mount point
            device = get_osd_mount_point(output, osd_name)
            if device:
                metadata["device"] = device

            # read interesting metadata from files
            for f in interesting_files:
                osd_f_path = os.path.join(osd_path, f)
                if remote_module.path_exists(osd_f_path):
                    metadata[f] = remote_module.readline(osd_f_path)

            # do we have a journal path?
            if remote_module.path_exists(journal_path):
                metadata["journal path"] = remote_module.get_realpath(journal_path)

            # is this OSD in osd tree?
            for blob in tree["nodes"]:
                if blob.get("id") == _id:  # matches our OSD
                    json_blob = blob

            print_osd(distro.conn.logger, hostname, osd_path, json_blob, metadata)

        distro.conn.exit()
Esempio n. 3
0
def get_admin_keyring(args):

    topath = 'client.admin.keyring'
    frompath = '/Ceph/Meta/Keyring/client.admin.keyring'

    errors = 0
    for hostname in args.mon:
        try:
            LOG.debug('Checking %s for %s', hostname, frompath)
            distro = hosts.get(hostname, username=args.username)
            conf_file_contents = distro.conn.remote_module.get_file(frompath)

            if conf_file_contents is not None:
                LOG.debug('Got %s from %s', frompath, hostname)
                if os.path.exists(topath):
                    with open(topath, 'rb') as f:
                        existing = f.read()
                        if existing != conf_file_contents and not args.overwrite_conf:
                            LOG.error(
                                'local keyring file %s exists with different content; use --overwrite-conf to overwrite'
                                % topath)
                            raise

                with open(topath, 'wb') as f:
                    f.write(conf_file_contents)
                return
            distro.conn.exit()
            LOG.debug('Empty or missing %s on %s', frompath, hostname)
        except:
            LOG.error('Unable to pull %s from %s', frompath, hostname)
        finally:
            errors += 1

    raise exc.GenericError(
        'Failed to fetch client.admin.keyring from %d hosts' % errors)
Esempio n. 4
0
def install_repo(args):
    """
    For a user that only wants to install the repository only (and avoid
    installing ceph and its dependencies).
    """
    cd_conf = getattr(args, 'cd_conf', None)

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            # XXX this should get removed once ceph packages are split for
            # upstream. If default_release is True, it means that the user is
            # trying to install on a RHEL machine and should expect to get RHEL
            # packages. Otherwise, it will need to specify either a specific
            # version, or repo, or a development branch. Other distro users should
            # not see any differences.
            use_rhceph=args.default_release,
        )
        rlogger = logging.getLogger(hostname)

        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
Esempio n. 5
0
def install_repo(args):
    """
    For a user that only wants to install the repository only (and avoid
    installing Ceph and its dependencies).
    """
    cd_conf = getattr(args, 'cd_conf', None)

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            # XXX this should get removed once Ceph packages are split for
            # upstream. If default_release is True, it means that the user is
            # trying to install on a RHEL machine and should expect to get RHEL
            # packages. Otherwise, it will need to specify either a specific
            # version, or repo, or a development branch. Other distro users should
            # not see any differences.
            use_rhceph=args.default_release,
        )
        rlogger = logging.getLogger(hostname)

        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
Esempio n. 6
0
def remove(args, purge):
    LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
    LOG.info('like: librbd1 and librados2')
    remove_action = 'Uninstalling'
    if purge:
        remove_action = 'Purging'
    LOG.debug(
        '%s on cluster %s hosts %s',
        remove_action,
        args.cluster,
        ' '.join(args.host),
        )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(
            hostname,
            username=args.username,
            use_rhceph=True)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('%s Ceph on %s' % (remove_action, hostname))
        distro.uninstall(distro, purge=purge)
        distro.conn.exit()
Esempio n. 7
0
def ssh_copy_keys(hostname, username=None):
    LOG.info("making sure passwordless SSH succeeds")
    if ssh.can_connect_passwordless(hostname):
        return

    LOG.warning("could not connect via SSH")

    # Create the key if it doesn't exist:
    id_rsa_pub_file = os.path.expanduser(u"~/.ssh/id_rsa.pub")
    id_rsa_file = id_rsa_pub_file.split(".pub")[0]
    if not os.path.exists(id_rsa_file):
        LOG.info("creating a passwordless id_rsa.pub key file")
        with get_local_connection(LOG) as conn:
            remoto.process.run(conn, ["ssh-keygen", "-t", "rsa", "-N", "", "-f", id_rsa_file])

    # Get the contents of id_rsa.pub and push it to the host
    LOG.info("will connect again with password prompt")
    distro = hosts.get(hostname, username)  # XXX Add username
    auth_keys_path = ".ssh/authorized_keys"
    if not distro.conn.remote_module.path_exists(auth_keys_path):
        distro.conn.logger.warning(".ssh/authorized_keys does not exist, will skip adding keys")
    else:
        LOG.info("adding public keys to authorized_keys")
        with open(os.path.expanduser("~/.ssh/id_rsa.pub"), "r") as id_rsa:
            contents = id_rsa.read()
        distro.conn.remote_module.append_to_file(auth_keys_path, contents)
    distro.conn.exit()
Esempio n. 8
0
def osd_list(args, cfg):
    for hostname in args.host:
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_volume_executable = system.executable_path(
            distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(distro.conn, [
                ceph_volume_executable,
                'lvm',
                'list',
            ],
                               env={'CEPH_VOLUME_DEBUG': '1'})
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'list',
                ],
            )
        distro.conn.exit()
Esempio n. 9
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        distro.conn.exit()
Esempio n. 10
0
def activate(args, cfg):
    LOG.debug(
        "Activating cluster %s disks %s",
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        " ".join(":".join((s or "") for s in t) for t in args.disk),
    )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(hostname, username=args.username)
        LOG.info("Distro info: %s %s %s", distro.name, distro.release, distro.codename)

        LOG.debug("activating host %s disk %s", hostname, disk)
        LOG.debug("will use init type: %s", distro.init)

        remoto.process.run(distro.conn, ["ceph-disk", "-v", "activate", "--mark-init", distro.init, "--mount", disk])
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)

        if distro.is_el:
            system.enable_service(distro.conn)

        distro.conn.exit()
Esempio n. 11
0
def disk_zap(args):
    cfg = conf.ceph.load(args)

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(hostname, username=args.username)
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        # NOTE: this mirrors ceph-disk-prepare --zap-disk DEV
        # zero the device
        distro.conn.remote_module.zeroing(disk)

        process.run(
            distro.conn,
            [
                'sgdisk',
                '--zap-all',
                '--clear',
                '--mbrtogpt',
                '--',
                disk,
            ],
        )
        distro.conn.exit()
Esempio n. 12
0
def admin(args):
    cfg = conf.ceph.load(args)
    conf_data = StringIO()
    cfg.write(conf_data)

    try:
        with open("%s.client.admin.keyring" % args.cluster, "rb") as f:
            keyring = f.read()
    except:
        raise RuntimeError("%s.client.admin.keyring not found" % args.cluster)

    errors = 0
    for hostname in args.client:
        LOG.debug("Pushing admin keys and conf to %s", hostname)
        try:
            distro = hosts.get(hostname, username=args.username)

            distro.conn.remote_module.write_conf(args.cluster, conf_data.getvalue(), args.overwrite_conf)

            distro.conn.remote_module.write_file("/etc/ceph/%s.client.admin.keyring" % args.cluster, keyring, 0o600)

            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError("Failed to configure %d admin hosts" % errors)
Esempio n. 13
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug("zapping %s on %s", disk, hostname)
        distro = hosts.get(hostname, username=args.username)
        LOG.info("Distro info: %s %s %s", distro.name, distro.release, distro.codename)

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        remoto.process.run(distro.conn, [ceph_disk_executable, "zap", disk])

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(("centos", "red")):
            LOG.info("calling partx on zapped device %s", disk)
            LOG.info("re-reading known partitions will display errors")
            remoto.process.run(distro.conn, ["partx", "-a", disk])

        else:
            LOG.debug("Calling partprobe on zapped device %s", disk)
            remoto.process.run(distro.conn, ["partprobe", disk])

        distro.conn.exit()
Esempio n. 14
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    # args.mon is a list with only one entry
    mon_host = args.mon[0]

    try:
        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
                  'rb') as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError(
            'mon keyring not found; run \'new\' to create a new cluster')

    LOG.info('ensuring configuration of new mon host: %s', mon_host)
    args.client = args.mon
    admin.admin(args)
    LOG.debug(
        'Adding mon to cluster %s, host %s',
        args.cluster,
        mon_host,
    )

    mon_section = 'mon.%s' % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')

    if args.address:
        LOG.debug('using mon address via --address %s' % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug('using mon address by resolving host: %s' % mon_ip)

    try:
        LOG.debug('detecting platform for host %s ...', mon_host)
        distro = hosts.get(mon_host,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug('adding mon to %s', mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError('Failed to add monitor to host:  %s' % mon_host)
Esempio n. 15
0
def mon_destroy(args):
    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            LOG.debug('Removing mon from %s', name)

            distro = hosts.get(
                host,
                username=args.username,
                callbacks=[packages.ceph_is_installed]
            )
            hostname = distro.conn.remote_module.shortname()

            destroy_mon(
                distro.conn,
                args.cluster,
                hostname,
            )
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to destroy %d monitors' % errors)
Esempio n. 16
0
def config_pull(args):

    topath = '{cluster}.conf'.format(cluster=args.cluster)
    frompath = '/etc/ceph/{cluster}.conf'.format(cluster=args.cluster)

    errors = 0
    for hostname in args.client:
        try:
            LOG.debug('Checking %s for %s', hostname, frompath)
            distro = hosts.get(hostname, username=args.username)
            conf_file_contents = distro.conn.remote_module.get_file(frompath)

            if conf_file_contents is not None:
                LOG.debug('Got %s from %s', frompath, hostname)
                if os.path.exists(topath):
                    with file(topath, 'rb') as f:
                        existing = f.read()
                        if existing != conf_file_contents and not args.overwrite_conf:
                            LOG.error('local config file %s exists with different content; use --overwrite-conf to overwrite' % topath)
                            raise

                with file(topath, 'w') as f:
                    f.write(conf_file_contents)
                return
            distro.conn.exit()
            LOG.debug('Empty or missing %s on %s', frompath, hostname)
        except:
            LOG.error('Unable to pull %s from %s', frompath, hostname)
        finally:
            errors += 1

    raise exc.GenericError('Failed to fetch config from %d hosts' % errors)
Esempio n. 17
0
def activate(args, cfg):
    LOG.debug(
        'Activating cluster %s disks %s',
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
    )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('activating host %s disk %s', hostname, disk)
        LOG.debug('will use init type: %s', distro.init)

        remoto.process.run(
            distro.conn,
            [
                'ceph-disk-activate',
                '--mark-init',
                distro.init,
                '--mount',
                disk,
            ],
        )
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)
        distro.conn.exit()
Esempio n. 18
0
def disk_zap(args):
    cfg = conf.ceph.load(args)

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError(
                'zap command needs both HOSTNAME and DISK but got "%s %s"' %
                (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        # NOTE: this mirrors ceph-disk-prepare --zap-disk DEV
        # zero the device
        distro.conn.remote_module.zeroing(disk)

        process.run(
            distro.conn,
            [
                'sgdisk',
                '--zap-all',
                '--clear',
                '--mbrtogpt',
                '--',
                disk,
            ],
        )
        distro.conn.exit()
Esempio n. 19
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError(
                'zap command needs both HOSTNAME and DISK but got "%s %s"' %
                (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        distro.conn.exit()
Esempio n. 20
0
def config_push(args):
    conf_data = conf.ceph.load_raw(args)

    errors = 0
    for hostname in args.client:
        LOG.debug('Pushing config to %s', hostname)
        try:
            distro = hosts.get(hostname, username=args.username)

            distro.conn.remote_module.write_conf(
                args.cluster,
                conf_data,
                args.overwrite_conf,
            )

            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to config %d hosts' % errors)
    ''' start osd that sync in verify process '''
    osds = []
    if os.path.exists('/root/osds'):
        with open('/root/osds', 'r') as f:
            osds = f.readline()
        cmd = '/etc/init.d/ceph -a -c /etc/ceph/ceph.conf start %s' % osds.strip(
            '\n')
        LOG.debug("excute: %s", cmd)
        (ret, msg) = commands.getstatusoutput(cmd)
        os.unlink('/root/osds')
Esempio n. 21
0
def mon_destroy(args):
    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            LOG.debug('Removing mon from %s', name)

            distro = hosts.get(
                host,
                username=args.username,
                callbacks=[packages.ceph_is_installed]
            )
            hostname = distro.conn.remote_module.shortname()

            # 删除mon
            destroy_mon(
                distro.conn,
                args.cluster,
                hostname,
            )
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to destroy %d monitors' % errors)
Esempio n. 22
0
def remove(args, purge):
    LOG.info(
        'note that some dependencies *will not* be removed because they can cause issues with qemu-kvm'
    )
    LOG.info('like: librbd1 and librados2')
    remove_action = 'Uninstalling'
    if purge:
        remove_action = 'Purging'
    LOG.debug(
        '%s on cluster %s hosts %s',
        remove_action,
        args.cluster,
        ' '.join(args.host),
    )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(hostname, username=args.username, use_rhceph=True)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('%s Ceph on %s' % (remove_action, hostname))
        distro.uninstall(distro, purge=purge)
        distro.conn.exit()
Esempio n. 23
0
def mds_create(args):
    cfg = conf.ceph.load(args)
    LOG.debug(
        'Deploying mds, cluster %s hosts %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.mds),
    )

    key = get_bootstrap_mds_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    failed_on_rhel = False

    for hostname, name in args.mds:
        try:
            distro = hosts.get(hostname, username=args.username)
            rlogger = distro.conn.logger
            LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                     distro.codename)

            LOG.debug('remote host will use %s', distro.init)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('deploying mds bootstrap to %s', hostname)
                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data.getvalue(),
                    args.overwrite_conf,
                )

                path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
                    cluster=args.cluster, )

                if not distro.conn.remote_module.path_exists(path):
                    rlogger.warning(
                        'mds keyring does not exist yet, creating one')
                    distro.conn.remote_module.write_keyring(path, key)

            create_mds(distro, name, args.cluster, distro.init)
            distro.conn.exit()
        except RuntimeError as e:
            if distro.normalized_name == 'redhat':
                LOG.error('this feature may not yet available for %s %s' %
                          (distro.name, distro.release))
                failed_on_rhel = True
            LOG.error(e)
            errors += 1

    if errors:
        if failed_on_rhel:
            # because users only read the last few lines :(
            LOG.error(
                'RHEL RHCS systems do not have the ability to deploy MDS yet')

        raise exc.GenericError('Failed to create %d MDSs' % errors)
Esempio n. 24
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(('centos', 'red')):
            LOG.info('calling partx on zapped device %s', disk)
            LOG.info('re-reading known partitions will display errors')
            partx_executable = system.executable_path(distro.conn, 'partx')
            remoto.process.run(
                distro.conn,
                [
                    partx_executable,
                    '-a',
                    disk,
                ],
            )

        else:
            LOG.debug('Calling partprobe on zapped device %s', disk)
            partprobe_executable = system.executable_path(distro.conn, 'partprobe')
            remoto.process.run(
                distro.conn,
                [
                    partprobe_executable,
                    disk,
                ],
            )

        distro.conn.exit()
Esempio n. 25
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    if not args.mon:
        raise exc.NeedHostError()
    mon_host = args.mon[0]

    try:
        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
                  'rb') as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError(
            'mon keyring not found; run \'new\' to create a new cluster'
        )

    LOG.info('ensuring configuration of new mon host: %s', mon_host)
    args.client = [mon_host]
    admin.admin(args)
    LOG.debug(
        'Adding mon to cluster %s, host %s',
        args.cluster,
        mon_host,
    )

    mon_section = 'mon.%s' % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')

    if args.address:
        LOG.debug('using mon address via --address %s' % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug('using mon address by resolving host: %s' % mon_ip)

    try:
        LOG.debug('detecting platform for host %s ...', mon_host)
        distro = hosts.get(mon_host, username=args.username)
        LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug('adding mon to %s', mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError('Failed to add monitor to host:  %s' % mon_host)
Esempio n. 26
0
def disk_zap(args):

    for hostname, disk, journal in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'zap',
                disk,
            ],
        )

        # once all is done, call partprobe (or partx)
        # On RHEL and CentOS distros, calling partprobe forces a reboot of the
        # server. Since we are not resizing partitons we rely on calling
        # partx
        if distro.normalized_name.startswith(('centos', 'red')):
            LOG.info('calling partx on zapped device %s', disk)
            LOG.info('re-reading known partitions will display errors')
            partx_executable = system.executable_path(distro.conn, 'partx')
            remoto.process.run(
                distro.conn,
                [
                    partx_executable,
                    '-a',
                    disk,
                ],
            )

        else:
            LOG.debug('Calling partprobe on zapped device %s', disk)
            partprobe_executable = system.executable_path(distro.conn, 'partprobe')
            remoto.process.run(
                distro.conn,
                [
                    partprobe_executable,
                    disk,
                ],
            )

        distro.conn.exit()
Esempio n. 27
0
def prepare(args, cfg, activate_prepared_disk):
    LOG.debug(
        'Preparing cluster %s disks %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.disk),
    )

    key = get_bootstrap_osd_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, disk, journal in args.disk:
        try:
            if disk is None:
                raise exc.NeedDiskError(hostname)

            distro = hosts.get(hostname, username=args.username)
            LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                     distro.codename)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('Deploying osd to %s', hostname)

                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(args.cluster,
                                                     conf_data.getvalue(),
                                                     args.overwrite_conf)

                create_osd(distro.conn, args.cluster, key)

            LOG.debug('Preparing host %s disk %s journal %s activate %s',
                      hostname, disk, journal, activate_prepared_disk)

            prepare_disk(
                distro.conn,
                cluster=args.cluster,
                disk=disk,
                journal=journal,
                activate_prepared_disk=activate_prepared_disk,
                zap=args.zap_disk,
                fs_type=args.fs_type,
                dmcrypt=args.dmcrypt,
                dmcrypt_dir=args.dmcrypt_key_dir,
            )

            # give the OSD a few seconds to start
            time.sleep(5)
            catch_osd_errors(distro.conn, distro.conn.logger, args)
            LOG.debug('Host %s is now ready for osd use.', hostname)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d OSDs' % errors)
Esempio n. 28
0
def rgw_create(args):
    cfg = conf.ceph.load(args)
    LOG.debug(
        'Deploying rgw, cluster %s hosts %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.rgw),
        )

    key = get_bootstrap_rgw_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, name in args.rgw:
        try:
            distro = hosts.get(hostname, username=args.username)
            rlogger = distro.conn.logger
            LOG.info(
                'Distro info: %s %s %s',
                distro.name,
                distro.release,
                distro.codename
            )
            LOG.debug('remote host will use %s', distro.init)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('deploying rgw bootstrap to %s', hostname)
                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data.getvalue(),
                    args.overwrite_conf,
                )

                path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format(
                    cluster=args.cluster,
                )

                if not distro.conn.remote_module.path_exists(path):
                    rlogger.warning('rgw keyring does not exist yet, creating one')
                    distro.conn.remote_module.write_keyring(path, key)

            create_rgw(distro, name, args.cluster, distro.init)
            distro.conn.exit()
            LOG.info(
                ('The Ceph Object Gateway (RGW) is now running on host %s and '
                 'default port %s'),
                hostname,
                '7480'
            )
        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d RGWs' % errors)
Esempio n. 29
0
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        LOG.info("Distro info: %s %s %s", distro.name, distro.release, distro.codename)

        LOG.debug("Listing disks on {hostname}...".format(hostname=hostname))
        ceph_disk_executable = system.executable_path(distro.conn, "ceph-disk")
        remoto.process.run(distro.conn, [ceph_disk_executable, "list"])
        distro.conn.exit()
Esempio n. 30
0
def mds_create(args):
    cfg = conf.ceph.load(args)
    LOG.debug(
        'Deploying mds, cluster %s hosts %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.mds),
        )

    if not args.mds:
        raise exc.NeedHostError()

    key = get_bootstrap_mds_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, name in args.mds:
        try:
            distro = hosts.get(hostname, username=args.username)
            rlogger = distro.conn.logger
            LOG.info(
                'Distro info: %s %s %s',
                distro.name,
                distro.release,
                distro.codename
            )
            LOG.debug('remote host will use %s', distro.init)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('deploying mds bootstrap to %s', hostname)
                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data.getvalue(),
                    args.overwrite_conf,
                )

                path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
                    cluster=args.cluster,
                )

                if not distro.conn.remote_module.path_exists(path):
                    rlogger.warning('mds keyring does not exist yet, creating one')
                    distro.conn.remote_module.write_keyring(path, key)

            create_mds(distro, name, args.cluster, distro.init)
            distro.conn.exit()
        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d MDSs' % errors)
Esempio n. 31
0
def mds_create(args):
    cfg = conf.ceph.load(args)
    LOG.debug(
        'Deploying mds, cluster %s hosts %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.mds),
        )

    if not args.mds:
        raise exc.NeedHostError()

    key = get_bootstrap_mds_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, name in args.mds:
        try:
            distro = hosts.get(hostname, username=args.username)
            rlogger = distro.conn.logger
            LOG.info(
                'Distro info: %s %s %s',
                distro.name,
                distro.release,
                distro.codename
            )
            LOG.debug('remote host will use %s', distro.init)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('deploying mds bootstrap to %s', hostname)
                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data.getvalue(),
                    args.overwrite_conf,
                )

                path = '/var/lib/ceph/bootstrap-mds/{cluster}.keyring'.format(
                    cluster=args.cluster,
                )

                if not distro.conn.remote_module.path_exists(path):
                    rlogger.warning('mds keyring does not exist yet, creating one')
                    distro.conn.remote_module.write_keyring(path, key)

            create_mds(distro.conn, name, args.cluster, distro.init)
            distro.conn.exit()
        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d MDSs' % errors)
Esempio n. 32
0
def mon_create(args):

    cfg = conf.ceph.load(args)
    if not args.mon:
        args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    if args.keyrings:
        monitor_keyring = concatenate_keyrings(args)
    else:
        keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
        try:
            monitor_keyring = files.read_file(keyring_path)
        except IOError:
            LOG.warning('keyring (%s) not found, creating a new one' %
                        keyring_path)
            new_mon_keyring(args)
            monitor_keyring = files.read_file(keyring_path)

    LOG.debug(
        'Deploying mon, cluster %s hosts %s',
        args.cluster,
        ' '.join(args.mon),
    )

    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            # TODO add_bootstrap_peer_hint
            LOG.debug('detecting platform for host %s ...', name)
            distro = hosts.get(host,
                               username=args.username,
                               callbacks=[packages.ceph_is_installed])
            LOG.info('distro info: %s %s %s', distro.name, distro.release,
                     distro.codename)
            rlogger = logging.getLogger(name)

            # ensure remote hostname is good to go
            hostname_is_compatible(distro.conn, rlogger, name)
            rlogger.debug('deploying mon to %s', name)
            distro.mon.create(distro, args, monitor_keyring)

            # tell me the status of the deployed mon
            time.sleep(2)  # give some room to start
            mon_status(distro.conn, rlogger, name, args)
            catch_mon_errors(distro.conn, rlogger, name, cfg, args)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d monitors' % errors)
Esempio n. 33
0
def mon_create(args):

    cfg = conf.ceph.load(args)
    if not args.mon:
        args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    if args.keyrings:
        monitor_keyring = concatenate_keyrings(args)
    else:
        keyring_path = '{cluster}.mon.keyring'.format(cluster=args.cluster)
        try:
            monitor_keyring = files.read_file(keyring_path)
        except IOError:
            LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
            new_mon_keyring(args)
            monitor_keyring = files.read_file(keyring_path)

    LOG.debug(
        'Deploying mon, cluster %s hosts %s',
        args.cluster,
        ' '.join(args.mon),
        )

    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            # TODO add_bootstrap_peer_hint
            LOG.debug('detecting platform for host %s ...', name)
            distro = hosts.get(
                host,
                username=args.username,
                callbacks=[packages.ceph_is_installed]
            )
            LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
            rlogger = logging.getLogger(name)

            # ensure remote hostname is good to go
            hostname_is_compatible(distro.conn, rlogger, name)
            rlogger.debug('deploying mon to %s', name)
            distro.mon.create(distro, args, monitor_keyring)

            # tell me the status of the deployed mon
            time.sleep(2)  # give some room to start
            mon_status(distro.conn, rlogger, name, args)
            catch_mon_errors(distro.conn, rlogger, name, cfg, args)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d monitors' % errors)
Esempio n. 34
0
def connect(args):
    for hostname in args.hosts:
        distro = hosts.get(hostname, username=args.username)
        if not distro_is_supported(distro.normalized_name):
            raise exc.UnsupportedPlatform(
                distro.distro_name,
                distro.codename,
                distro.release
            )

        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )
        LOG.info('assuming that a repository with Calamari packages is already configured.')
        LOG.info('Refer to the docs for examples (http://ceph.com/ceph-deploy/docs/conf.html)')

        rlogger = logging.getLogger(hostname)

        # Emplace minion config prior to installation so that it is present
        # when the minion first starts.
        minion_config_dir = os.path.join('/etc/salt/', 'minion.d')
        minion_config_file = os.path.join(minion_config_dir, 'calamari.conf')

        rlogger.debug('creating config dir: %s' % minion_config_dir)
        distro.conn.remote_module.makedir(minion_config_dir, [errno.EEXIST])

        rlogger.debug(
            'creating the calamari salt config: %s' % minion_config_file
        )
        distro.conn.remote_module.write_file(
            minion_config_file,
            ('master: %s\n' % args.master).encode('utf-8')
        )

        distro.packager.install('salt-minion')
        distro.packager.install('diamond')

        # redhat/centos need to get the service started
        if distro.normalized_name in ['redhat', 'centos']:
            remoto.process.run(
                distro.conn,
                ['chkconfig', 'salt-minion', 'on']
            )

            remoto.process.run(
                distro.conn,
                ['service', 'salt-minion', 'start']
            )

        distro.conn.exit()
Esempio n. 35
0
def mon_create_initial(args):
    # 获取ceph.conf中的mon_initial_members
    mon_initial_members = get_mon_initial_members(args, error_on_empty=True)

    # create them normally through mon_create
    args.mon = mon_initial_members
    # 创建mon
    mon_create(args)

    # make the sets to be able to compare late
    mon_in_quorum = set([])
    mon_members = set([host for host in mon_initial_members])

    for host in mon_initial_members:
        mon_name = 'mon.%s' % host
        LOG.info('processing monitor %s', mon_name)
        sleeps = [20, 20, 15, 10, 10, 5]
        tries = 5
        rlogger = logging.getLogger(host)
        distro = hosts.get(
            host,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )

        while tries:
            # 获取mon的状态
            status = mon_status_check(distro.conn, rlogger, host, args)
            has_reached_quorum = status.get('state', '') in ['peon', 'leader']
            if not has_reached_quorum:
                LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
                tries -= 1
                sleep_seconds = sleeps.pop()
                LOG.warning('waiting %s seconds before retrying', sleep_seconds)
                time.sleep(sleep_seconds)  # Magic number
            else:
                mon_in_quorum.add(host)
                LOG.info('%s monitor has reached quorum!', mon_name)
                break
        distro.conn.exit()

    # 集群中的mon_in_quorum与ceph.conf中的mon_initial_members完全匹配
    if mon_in_quorum == mon_members:
        LOG.info('all initial monitors are running and have formed quorum')
        LOG.info('Running gatherkeys...')
        # 调用gatherkeys模块gatherkeys函数,收集用于配置新节点的keys
        gatherkeys.gatherkeys(args)
    else:
        LOG.error('Some monitors have still not reached quorum:')
        for host in mon_members - mon_in_quorum:
            LOG.error('%s', host)
        raise SystemExit('cluster may not be in a healthy state')
Esempio n. 36
0
def mon_create(args):

    cfg = conf.ceph.load(args)
    if not args.mon:
        mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
        args.mon = re.split(r'[,\s]+', mon_initial_members)

    if not args.mon:
        raise exc.NeedHostError()

    if args.keyrings:
        monitor_keyring = concatenate_keyrings(args)
    else:
        try:
            with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
                      'rb') as f:
                monitor_keyring = f.read()
        except IOError:
            raise RuntimeError('mon keyring not found; run \'new\' to create a new cluster')

    LOG.debug(
        'Deploying mon, cluster %s hosts %s',
        args.cluster,
        ' '.join(args.mon),
        )

    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            # TODO add_bootstrap_peer_hint
            LOG.debug('detecting platform for host %s ...', name)
            distro = hosts.get(host, username=args.username)
            LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
            rlogger = logging.getLogger(name)

            # ensure remote hostname is good to go
            hostname_is_compatible(distro.conn, rlogger, name)
            rlogger.debug('deploying mon to %s', name)
            distro.mon.create(distro, args, monitor_keyring)

            # tell me the status of the deployed mon
            time.sleep(2)  # give some room to start
            mon_status(distro.conn, rlogger, name, args)
            catch_mon_errors(distro.conn, rlogger, name, cfg, args)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d monitors' % errors)
Esempio n. 37
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    # args.mon is a list with only one entry
    mon_host = args.mon[0]

    try:
        with file("{cluster}.mon.keyring".format(cluster=args.cluster), "rb") as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError("mon keyring not found; run 'new' to create a new cluster")

    LOG.info("ensuring configuration of new mon host: %s", mon_host)
    args.client = args.mon
    admin.admin(args)
    LOG.debug("Adding mon to cluster %s, host %s", args.cluster, mon_host)

    mon_section = "mon.%s" % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, "mon addr")

    if args.address:
        LOG.debug("using mon address via --address %s" % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug("using mon address via configuration: %s" % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug("using mon address by resolving host: %s" % mon_ip)

    try:
        LOG.debug("detecting platform for host %s ...", mon_host)
        distro = hosts.get(mon_host, username=args.username)
        LOG.info("distro info: %s %s %s", distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug("adding mon to %s", mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError("Failed to add monitor to host:  %s" % mon_host)
Esempio n. 38
0
def disk_list(args, cfg):
    command = ['fdisk', '-l']

    for hostname in args.host:
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        out, err, code = remoto.process.check(
            distro.conn,
            command,
        )
        for line in out:
            if line.startswith('Disk /'):
                distro.conn.logger.info(line)
Esempio n. 39
0
def rgw_create(args):
    cfg = conf.ceph.load(args)
    LOG.debug(
        'Deploying rgw, cluster %s hosts %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.rgw),
    )

    key = get_bootstrap_rgw_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, name in args.rgw:
        try:
            distro = hosts.get(hostname, username=args.username)
            rlogger = distro.conn.logger
            LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                     distro.codename)
            LOG.debug('remote host will use %s', distro.init)

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('deploying rgw bootstrap to %s', hostname)
                conf_data = StringIO()
                cfg.write(conf_data)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data.getvalue(),
                    args.overwrite_conf,
                )

                path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format(
                    cluster=args.cluster, )

                if not distro.conn.remote_module.path_exists(path):
                    rlogger.warning(
                        'rgw keyring does not exist yet, creating one')
                    distro.conn.remote_module.write_keyring(path, key)

            create_rgw(distro, name, args.cluster, distro.init)
            distro.conn.exit()
            LOG.info(
                ('The Ceph Object Gateway (RGW) is now running on host %s and '
                 'default port %s'), hostname, '7480')
        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d RGWs' % errors)
Esempio n. 40
0
def activate(args, cfg):
    LOG.debug(
        'Activating cluster %s disks %s',
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
        )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('activating host %s disk %s', hostname, disk)
        LOG.debug('will use init type: %s', distro.init)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                '-v',
                'activate',
                '--mark-init',
                distro.init,
                '--mount',
                disk,
            ],
        )
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)

        if distro.init == 'systemd':
            system.enable_service(distro.conn, "ceph.target")
        elif distro.init == 'sysvinit':
            system.enable_service(distro.conn, "ceph")

        distro.conn.exit()
Esempio n. 41
0
def activate(args, cfg):
    LOG.debug(
        'Activating cluster %s disks %s',
        args.cluster,
        # join elements of t with ':', t's with ' '
        # allow None in elements of t; print as empty
        ' '.join(':'.join((s or '') for s in t) for t in args.disk),
        )

    for hostname, disk, journal in args.disk:

        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('activating host %s disk %s', hostname, disk)
        LOG.debug('will use init type: %s', distro.init)

        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                '-v',
                'activate',
                '--mark-init',
                distro.init,
                '--mount',
                disk,
            ],
        )
        # give the OSD a few seconds to start
        time.sleep(5)
        catch_osd_errors(distro.conn, distro.conn.logger, args)

        if distro.init == 'systemd':
            system.enable_service(distro.conn, "ceph.target")
        elif distro.init == 'sysvinit':
            system.enable_service(distro.conn, "ceph")

        distro.conn.exit()
Esempio n. 42
0
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        remoto.process.run(
            distro.conn,
            [
                'ceph-disk',
                'list',
            ],
        )
        distro.conn.exit()
Esempio n. 43
0
def purge(args):
    LOG.debug(
        'Purging from cluster %s hosts %s',
        args.cluster,
        ' '.join(args.host),
        )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('purging host ... %s' % hostname)
        distro.uninstall(distro.conn, purge=True)
        distro.conn.exit()
Esempio n. 44
0
def disk_list(args, cfg):
    command = ['fdisk', '-l']

    for hostname in args.host:
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        out, err, code = remoto.process.check(
            distro.conn,
            command,
        )
        for line in out:
            if line.startswith('Disk /'):
                distro.conn.logger.info(line)
Esempio n. 45
0
def uninstall(args):
    LOG.debug(
        'Uninstalling on cluster %s hosts %s',
        args.cluster,
        ' '.join(args.host),
        )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('uninstalling ceph on %s' % hostname)
        distro.uninstall(distro.conn)
        distro.conn.exit()
Esempio n. 46
0
def install_repo(args):
    """
    For a user that only wants to install the repository only (and avoid
    installing ceph and its dependencies).
    """
    cd_conf = getattr(args, 'cd_conf', None)

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)
        distro = hosts.get(hostname, username=args.username)
        rlogger = logging.getLogger(hostname)

        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        return custom_repo(distro, args, cd_conf, rlogger, install_ceph=False)
Esempio n. 47
0
def mon_create_initial(args):
    mon_initial_members = get_mon_initial_members(args, error_on_empty=True)

    # create them normally through mon_create
    args.mon = mon_initial_members
    mon_create(args)

    # make the sets to be able to compare late
    mon_in_quorum = set([])
    mon_members = set([host for host in mon_initial_members])

    for host in mon_initial_members:
        mon_name = 'mon.%s' % host
        LOG.info('processing monitor %s', mon_name)
        sleeps = [20, 20, 15, 10, 10, 5]
        tries = 5
        rlogger = logging.getLogger(host)
        distro = hosts.get(
            host,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )

        while tries:
            status = mon_status_check(distro.conn, rlogger, host, args)
            has_reached_quorum = status.get('state', '') in ['peon', 'leader']
            if not has_reached_quorum:
                LOG.warning('%s monitor is not yet in quorum, tries left: %s' % (mon_name, tries))
                tries -= 1
                sleep_seconds = sleeps.pop()
                LOG.warning('waiting %s seconds before retrying', sleep_seconds)
                time.sleep(sleep_seconds)  # Magic number
            else:
                mon_in_quorum.add(host)
                LOG.info('%s monitor has reached quorum!', mon_name)
                break
        distro.conn.exit()

    if mon_in_quorum == mon_members:
        LOG.info('all initial monitors are running and have formed quorum')
        LOG.info('Running gatherkeys...')
        gatherkeys.gatherkeys(args)
    else:
        LOG.error('Some monitors have still not reached quorum:')
        for host in mon_members - mon_in_quorum:
            LOG.error('%s', host)
        raise SystemExit('cluster may not be in a healthy state')
Esempio n. 48
0
def disk_zap(args):

    hostname = args.host
    for disk in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
                env={'CEPH_VOLUME_DEBUG': '1'}
            )
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
            )

        distro.conn.exit()
Esempio n. 49
0
def disk_zap(args):

    hostname = args.host
    for disk in args.disk:
        if not disk or not hostname:
            raise RuntimeError('zap command needs both HOSTNAME and DISK but got "%s %s"' % (hostname, disk))
        LOG.debug('zapping %s on %s', disk, hostname)
        distro = hosts.get(
            hostname,
            username=args.username,
            callbacks=[packages.ceph_is_installed]
        )
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        distro.conn.remote_module.zeroing(disk)

        ceph_volume_executable = system.executable_path(distro.conn, 'ceph-volume')
        if args.debug:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
                env={'CEPH_VOLUME_DEBUG': '1'}
            )
        else:
            remoto.process.run(
                distro.conn,
                [
                    ceph_volume_executable,
                    'lvm',
                    'zap',
                    disk,
                ],
            )

        distro.conn.exit()
Esempio n. 50
0
def mon_destroy(args):
    errors = 0
    for (name, host) in mon_hosts(args.mon):
        try:
            LOG.debug("Removing mon from %s", name)

            distro = hosts.get(host, username=args.username)
            hostname = distro.conn.remote_module.shortname()

            destroy_mon(distro.conn, args.cluster, hostname)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError("Failed to destroy %d monitors" % errors)
Esempio n. 51
0
def uninstall(args):
    LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
    LOG.info('like: librbd1 and librados2')
    LOG.debug(
        'Uninstalling on cluster %s hosts %s',
        args.cluster,
        ' '.join(args.host),
        )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('uninstalling ceph on %s' % hostname)
        distro.uninstall(distro.conn)
        distro.conn.exit()
Esempio n. 52
0
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('Distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        ceph_disk_executable = system.executable_path(distro.conn, 'ceph-disk')
        remoto.process.run(
            distro.conn,
            [
                ceph_disk_executable,
                'list',
            ],
        )
        distro.conn.exit()
Esempio n. 53
0
def uninstall(args):
    LOG.info('note that some dependencies *will not* be removed because they can cause issues with qemu-kvm')
    LOG.info('like: librbd1 and librados2')
    LOG.debug(
        'Uninstalling on cluster %s hosts %s',
        args.cluster,
        ' '.join(args.host),
        )

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)

        distro = hosts.get(hostname, username=args.username)
        LOG.info('Distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(hostname)
        rlogger.info('uninstalling ceph on %s' % hostname)
        distro.uninstall(distro.conn)
        distro.conn.exit()
Esempio n. 54
0
def ssh_copy_keys(hostname, username=None):
    LOG.info('making sure passwordless SSH succeeds')
    if ssh.can_connect_passwordless(hostname):
        return

    LOG.warning('could not connect via SSH')

    # Create the key if it doesn't exist:
    id_rsa_pub_file = os.path.expanduser(u'~/.ssh/id_rsa.pub')
    id_rsa_file = id_rsa_pub_file.split('.pub')[0]
    if not os.path.exists(id_rsa_file):
        LOG.info('creating a passwordless id_rsa.pub key file')
        with get_local_connection(LOG) as conn:
            remoto.process.run(
                conn,
                [
                    'ssh-keygen',
                    '-t',
                    'rsa',
                    '-N',
                    "",
                    '-f',
                    id_rsa_file,
                ]
            )

    # Get the contents of id_rsa.pub and push it to the host
    LOG.info('will connect again with password prompt')
    distro = hosts.get(hostname, username)  # XXX Add username
    auth_keys_path = '.ssh/authorized_keys'
    if not distro.conn.remote_module.path_exists(auth_keys_path):
        distro.conn.logger.warning(
            '.ssh/authorized_keys does not exist, will skip adding keys'
        )
    else:
        LOG.info('adding public keys to authorized_keys')
        with open(os.path.expanduser('~/.ssh/id_rsa.pub'), 'r') as id_rsa:
            contents = id_rsa.read()
        distro.conn.remote_module.append_to_file(
            auth_keys_path,
            contents
        )
    distro.conn.exit()
Esempio n. 55
0
def disk_list(args, cfg):
    for hostname, disk, journal in args.disk:
        distro = hosts.get(hostname, username=args.username)
        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
        remoto.process.run(
            distro.conn,
            [
                'ceph-disk',
                'list',
            ],
        )
        distro.conn.exit()
Esempio n. 56
0
def fetch_file(args, frompath, topath, _hosts):
    if os.path.exists(topath):
        LOG.debug('Have %s', topath)
        return True
    else:
        for hostname in _hosts:
            filepath = frompath.format(hostname=hostname)
            LOG.debug('Checking %s for %s', hostname, filepath)
            distro = hosts.get(hostname, username=args.username)
            key = distro.conn.remote_module.get_file(filepath)

            if key is not None:
                LOG.debug('Got %s key from %s.', topath, hostname)
                with file(topath, 'w') as f:
                    f.write(key)
                    return True
            distro.conn.exit()
            LOG.warning('Unable to find %s on %s', filepath, hostname)
    return False
Esempio n. 57
0
def repo(args):
    cd_conf = getattr(args, 'cd_conf', None)

    for hostname in args.host:
        LOG.debug('Detecting platform for host %s ...', hostname)
        distro = hosts.get(
            hostname,
            username=args.username
        )
        rlogger = logging.getLogger(hostname)

        LOG.info(
            'Distro info: %s %s %s',
            distro.name,
            distro.release,
            distro.codename
        )

        if args.remove:
            distro.packager.remove_repo(args.repo_name)
        else:
            install_repo(distro, args, cd_conf, rlogger)