Esempio n. 1
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    # args.mon is a list with only one entry
    mon_host = args.mon[0]

    try:
        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
                  'rb') as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError(
            'mon keyring not found; run \'new\' to create a new cluster')

    LOG.info('ensuring configuration of new mon host: %s', mon_host)
    args.client = args.mon
    admin.admin(args)
    LOG.debug(
        'Adding mon to cluster %s, host %s',
        args.cluster,
        mon_host,
    )

    mon_section = 'mon.%s' % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')

    if args.address:
        LOG.debug('using mon address via --address %s' % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug('using mon address by resolving host: %s' % mon_ip)

    try:
        LOG.debug('detecting platform for host %s ...', mon_host)
        distro = hosts.get(mon_host,
                           username=args.username,
                           callbacks=[packages.ceph_is_installed])
        LOG.info('distro info: %s %s %s', distro.name, distro.release,
                 distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug('adding mon to %s', mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError('Failed to add monitor to host:  %s' % mon_host)
Esempio n. 2
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    if not args.mon:
        raise exc.NeedHostError()
    mon_host = args.mon[0]

    try:
        with file('{cluster}.mon.keyring'.format(cluster=args.cluster),
                  'rb') as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError(
            'mon keyring not found; run \'new\' to create a new cluster'
        )

    LOG.info('ensuring configuration of new mon host: %s', mon_host)
    args.client = [mon_host]
    admin.admin(args)
    LOG.debug(
        'Adding mon to cluster %s, host %s',
        args.cluster,
        mon_host,
    )

    mon_section = 'mon.%s' % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, 'mon addr')

    if args.address:
        LOG.debug('using mon address via --address %s' % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug('using mon address via configuration: %s' % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug('using mon address by resolving host: %s' % mon_ip)

    try:
        LOG.debug('detecting platform for host %s ...', mon_host)
        distro = hosts.get(mon_host, username=args.username)
        LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug('adding mon to %s', mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError('Failed to add monitor to host:  %s' % mon_host)
Esempio n. 3
0
def mon_add(args):
    cfg = conf.ceph.load(args)

    # args.mon is a list with only one entry
    mon_host = args.mon[0]

    try:
        with file("{cluster}.mon.keyring".format(cluster=args.cluster), "rb") as f:
            monitor_keyring = f.read()
    except IOError:
        raise RuntimeError("mon keyring not found; run 'new' to create a new cluster")

    LOG.info("ensuring configuration of new mon host: %s", mon_host)
    args.client = args.mon
    admin.admin(args)
    LOG.debug("Adding mon to cluster %s, host %s", args.cluster, mon_host)

    mon_section = "mon.%s" % mon_host
    cfg_mon_addr = cfg.safe_get(mon_section, "mon addr")

    if args.address:
        LOG.debug("using mon address via --address %s" % args.address)
        mon_ip = args.address
    elif cfg_mon_addr:
        LOG.debug("using mon address via configuration: %s" % cfg_mon_addr)
        mon_ip = cfg_mon_addr
    else:
        mon_ip = net.get_nonlocal_ip(mon_host)
        LOG.debug("using mon address by resolving host: %s" % mon_ip)

    try:
        LOG.debug("detecting platform for host %s ...", mon_host)
        distro = hosts.get(mon_host, username=args.username)
        LOG.info("distro info: %s %s %s", distro.name, distro.release, distro.codename)
        rlogger = logging.getLogger(mon_host)

        # ensure remote hostname is good to go
        hostname_is_compatible(distro.conn, rlogger, mon_host)
        rlogger.debug("adding mon to %s", mon_host)
        args.address = mon_ip
        distro.mon.add(distro, args, monitor_keyring)

        # tell me the status of the deployed mon
        time.sleep(2)  # give some room to start
        catch_mon_errors(distro.conn, rlogger, mon_host, cfg, args)
        mon_status(distro.conn, rlogger, mon_host, args)
        distro.conn.exit()

    except RuntimeError as e:
        LOG.error(e)
        raise exc.GenericError("Failed to add monitor to host:  %s" % mon_host)
Esempio n. 4
0
def new(args):
    if args.ceph_conf:
        raise RuntimeError(
            'will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag'
        )
    LOG.debug('Creating new cluster named %s', args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section('global')
    cfg.add_section('client')
    cfg.add_section('mon')
    cfg.add_section('osd')

    fsid = args.fsid or uuid.uuid4()
    cfg.set('global', 'fsid', str(fsid))

    # if networks were passed in, lets set them in the
    # global section
    #    if args.public_network:
    #        cfg.set('global', 'public network', str(args.public_network))

    #    if args.cluster_network:
    #        cfg.set('global', 'cluster network', str(args.cluster_network))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        # Try to ensure we can ssh in properly before anything else
        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

        # Now get the non-local IPs from the remote node
        distro = hosts.get(host, username=args.username)
        remote_ips = net.ip_addresses(distro.conn)
        ceph_dir = '/Ceph'
        ceph_mon_dir = '/Ceph/Data/Mon'
        ceph_meta_dir = '/Ceph/Meta/Keyring'
        if not distro.conn.remote_module.path_exists(ceph_dir):
            distro.conn.remote_module.create_ceph_path(ceph_dir, 167, 167)
            distro.conn.remote_module.create_mon_path(ceph_mon_dir, 167, 167)
            distro.conn.remote_module.create_meta_path(ceph_meta_dir, 167, 167)
        # custom cluster names on sysvinit hosts won't work
        if distro.init == 'sysvinit' and args.cluster != 'ceph':
            LOG.error(
                'custom cluster names are not supported on sysvinit hosts')
            raise exc.ClusterNameError(
                'host %s does not support custom cluster names' % host)

        distro.conn.exit()

        # Validate subnets if we received any
        if args.public_network or args.cluster_network:
            validate_host_ip(remote_ips,
                             [args.public_network, args.cluster_network])

        # Pick the IP that matches the public cluster (if we were told to do
        # so) otherwise pick the first, non-local IP
        LOG.debug('Resolving host %s', host)
        if args.public_network:
            ip = get_public_network_ip(remote_ips, args.public_network)
        else:
            ip = net.get_nonlocal_ip(host)
        LOG.debug('Monitor %s at %s', name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
            cfg.set('global', 'ms bind ipv6', 'true')
        except socket.error:
            mon_host.append(ip)

    LOG.debug('Monitor initial members are %s', mon_initial_members)
    LOG.debug('Monitor addrs are %s', mon_host)

    #cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    #cfg.set('global', 'mon host', ','.join(mon_host))

    cfg.set('global', 'keyring', '/Ceph/Meta/Keyring/$name.keyring')
    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set('global', 'auth cluster required', 'cephx')
    cfg.set('global', 'auth service required', 'cephx')
    cfg.set('global', 'auth client required', 'cephx')

    cfg.set('client', 'rbd cache', 'false')
    cfg.set('client', 'client cache size', '16384000')
    cfg.set('client', 'client_oc_size', '838860800')
    cfg.set('client', 'admin socket', '/var/run/ceph/rbd-client-$pid.asok')
    cfg.set('client', 'log file', '/var/log/ceph/ceph.client.log')

    cfg.set('mon', 'mon clock drift allowed', '0.5')
    cfg.set('mon', 'mon debug dump transactions', 'false')
    cfg.set('mon', 'mon osd max split count', '10000')
    cfg.set('mon', 'mon data', '/Ceph/Data/Mon/mon.$id')

    cfg.set('osd', 'osd crush update on start', 'false')
    cfg.set('osd', 'osd journal size', '10240')
    cfg.set('osd', 'osd new ceph', 'false')
    cfg.set('osd', 'osd max backfills', '1')
    cfg.set('osd', 'osd recovery max active', '1')
    cfg.set('osd', 'osd deep scrub interval', '209018880000')
    cfg.set('osd', 'osd scrub begin hour', '0')
    cfg.set('osd', 'osd scrub end hour', '8')
    cfg.set('osd', 'osd deep scrub primary write', 'false')
    cfg.set('osd', 'osd deep scrub replica write', 'false')
    cfg.set('osd', 'osd max object name len', '256')

    path = '{name}.conf'.format(name=args.cluster, )

    new_mon_keyring(args)

    LOG.debug('Writing initial config to %s...', path)
    tmp = '%s.tmp' % path
    with open(tmp, 'w') as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise
Esempio n. 5
0
def new(args):
    if args.ceph_conf:
        raise RuntimeError(
            'will not create a Ceph conf file if attemtping to re-use with `--ceph-conf` flag'
        )
    LOG.debug('Creating new cluster named %s', args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section('global')

    fsid = args.fsid or uuid.uuid4()
    cfg.set('global', 'fsid', str(fsid))

    # if networks were passed in, lets set them in the
    # global section
    if args.public_network:
        cfg.set('global', 'public network', str(args.public_network))

    if args.cluster_network:
        cfg.set('global', 'cluster network', str(args.cluster_network))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        # Try to ensure we can ssh in properly before anything else
        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

        # Now get the non-local IPs from the remote node
        distro = hosts.get(host, username=args.username)
        remote_ips = net.ip_addresses(distro.conn)

        # custom cluster names on sysvinit hosts won't work
        if distro.init == 'sysvinit' and args.cluster != 'ceph':
            LOG.error(
                'custom cluster names are not supported on sysvinit hosts')
            raise exc.ClusterNameError(
                'host %s does not support custom cluster names' % host)

        distro.conn.exit()

        # Validate subnets if we received any
        if args.public_network or args.cluster_network:
            validate_host_ip(remote_ips,
                             [args.public_network, args.cluster_network])

        # Pick the IP that matches the public cluster (if we were told to do
        # so) otherwise pick the first, non-local IP
        LOG.debug('Resolving host %s', host)
        if args.public_network:
            ip = get_public_network_ip(remote_ips, args.public_network)
        else:
            ip = net.get_nonlocal_ip(host)
        LOG.debug('Monitor %s at %s', name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
            cfg.set('global', 'ms bind ipv6', 'true')
        except socket.error:
            mon_host.append(ip)

    LOG.debug('Monitor initial members are %s', mon_initial_members)
    LOG.debug('Monitor addrs are %s', mon_host)

    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    cfg.set('global', 'mon host', ','.join(mon_host))

    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set('global', 'auth cluster required', 'cephx')
    cfg.set('global', 'auth service required', 'cephx')
    cfg.set('global', 'auth client required', 'cephx')

    path = '{name}.conf'.format(name=args.cluster, )

    new_mon_keyring(args)

    LOG.debug('Writing initial config to %s...', path)
    tmp = '%s.tmp' % path
    with open(tmp, 'w') as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise
Esempio n. 6
0
def new(args):
    if args.ceph_conf:
        raise RuntimeError('will not create a ceph conf file if attemtping to re-use with `--ceph-conf` flag')
    LOG.debug('Creating new cluster named %s', args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section('global')

    fsid = args.fsid or uuid.uuid4()
    cfg.set('global', 'fsid', str(fsid))

    # if networks were passed in, lets set them in the
    # global section
    if args.public_network:
        cfg.set('global', 'public network', str(args.public_network))

    if args.cluster_network:
        cfg.set('global', 'cluster network', str(args.cluster_network))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        # Try to ensure we can ssh in properly before anything else
        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

        # Now get the non-local IPs from the remote node
        distro = hosts.get(host, username=args.username)
        remote_ips = net.ip_addresses(distro.conn)
        distro.conn.exit()

        # Validate subnets if we received any
        if args.public_network or args.cluster_network:
            validate_host_ip(remote_ips, [args.public_network, args.cluster_network])

        # Pick the IP that matches the public cluster (if we were told to do
        # so) otherwise pick the first, non-local IP
        LOG.debug('Resolving host %s', host)
        if args.public_network:
            ip = get_public_network_ip(remote_ips, args.public_network)
        else:
            ip = net.get_nonlocal_ip(host)
        LOG.debug('Monitor %s at %s', name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
            cfg.set('global', 'ms bind ipv6', 'true')
        except socket.error:
            mon_host.append(ip)



    LOG.debug('Monitor initial members are %s', mon_initial_members)
    LOG.debug('Monitor addrs are %s', mon_host)

    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    cfg.set('global', 'mon host', ','.join(mon_host))

    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set('global', 'auth cluster required', 'cephx')
    cfg.set('global', 'auth service required', 'cephx')
    cfg.set('global', 'auth client required', 'cephx')

    # http://tracker.newdream.net/issues/3138
    cfg.set('global', 'filestore xattr use omap', 'true')

    path = '{name}.conf'.format(
        name=args.cluster,
        )

    # FIXME: create a random key
    LOG.debug('Creating a random mon key...')
    mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()

    keypath = '{name}.mon.keyring'.format(
        name=args.cluster,
        )

    LOG.debug('Writing initial config to %s...', path)
    tmp = '%s.tmp' % path
    with file(tmp, 'w') as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise

    LOG.debug('Writing monitor keyring to %s...', keypath)
    tmp = '%s.tmp' % keypath
    with file(tmp, 'w') as f:
        f.write(mon_keyring)
    try:
        os.rename(tmp, keypath)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(keypath)
        else:
            raise
Esempio n. 7
0
def new(args):
    LOG.debug('Creating new cluster named %s', args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section('global')

    fsid = uuid.uuid4()
    cfg.set('global', 'fsid', str(fsid))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        LOG.debug('Resolving host %s', host)
        ip = net.get_nonlocal_ip(host)
        LOG.debug('Monitor %s at %s', name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
            cfg.set('global', 'ms bind ipv6', 'true')
        except socket.error:
            mon_host.append(ip)

        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

    LOG.debug('Monitor initial members are %s', mon_initial_members)
    LOG.debug('Monitor addrs are %s', mon_host)

    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    cfg.set('global', 'mon host', ','.join(mon_host))

    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set('global', 'auth cluster required', 'cephx')
    cfg.set('global', 'auth service required', 'cephx')
    cfg.set('global', 'auth client required', 'cephx')

    # http://tracker.newdream.net/issues/3138
    cfg.set('global', 'filestore xattr use omap', 'true')

    path = '{name}.conf'.format(name=args.cluster, )

    # FIXME: create a random key
    LOG.debug('Creating a random mon key...')
    mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key(
    )

    keypath = '{name}.mon.keyring'.format(name=args.cluster, )

    LOG.debug('Writing initial config to %s...', path)
    tmp = '%s.tmp' % path
    with file(tmp, 'w') as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise

    LOG.debug('Writing monitor keyring to %s...', keypath)
    tmp = '%s.tmp' % keypath
    with file(tmp, 'w') as f:
        f.write(mon_keyring)
    try:
        os.rename(tmp, keypath)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(keypath)
        else:
            raise
Esempio n. 8
0
def new(args):
    LOG.debug('Creating new cluster named %s', args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section('global')

    fsid = uuid.uuid4()
    cfg.set('global', 'fsid', str(fsid))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        LOG.debug('Resolving host %s', host)
        ip = net.get_nonlocal_ip(host)
        LOG.debug('Monitor %s at %s', name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info('Monitors are IPv6, binding Messenger traffic on IPv6')
            cfg.set('global', 'ms bind ipv6', 'true')
        except socket.error:
            mon_host.append(ip)

        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

    LOG.debug('Monitor initial members are %s', mon_initial_members)
    LOG.debug('Monitor addrs are %s', mon_host)

    cfg.set('global', 'mon initial members', ', '.join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    cfg.set('global', 'mon host', ','.join(mon_host))

    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set('global', 'auth cluster required', 'cephx')
    cfg.set('global', 'auth service required', 'cephx')
    cfg.set('global', 'auth client required', 'cephx')

    # http://tracker.newdream.net/issues/3138
    cfg.set('global', 'filestore xattr use omap', 'true')

    path = '{name}.conf'.format(
        name=args.cluster,
        )

    # FIXME: create a random key
    LOG.debug('Creating a random mon key...')
    mon_keyring = '[mon.]\nkey = %s\ncaps mon = allow *\n' % generate_auth_key()

    keypath = '{name}.mon.keyring'.format(
        name=args.cluster,
        )

    LOG.debug('Writing initial config to %s...', path)
    tmp = '%s.tmp' % path
    with file(tmp, 'w') as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise

    LOG.debug('Writing monitor keyring to %s...', keypath)
    tmp = '%s.tmp' % keypath
    with file(tmp, 'w') as f:
        f.write(mon_keyring)
    try:
        os.rename(tmp, keypath)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(keypath)
        else:
            raise
Esempio n. 9
0
def new(args):
    if args.ceph_conf:
        raise RuntimeError("will not create a ceph conf file if attemtping to re-use with `--ceph-conf` flag")
    LOG.debug("Creating new cluster named %s", args.cluster)
    cfg = conf.ceph.CephConf()
    cfg.add_section("global")

    fsid = args.fsid or uuid.uuid4()
    cfg.set("global", "fsid", str(fsid))

    mon_initial_members = []
    mon_host = []

    for (name, host) in mon_hosts(args.mon):
        LOG.debug("Resolving host %s", host)
        ip = net.get_nonlocal_ip(host)
        LOG.debug("Monitor %s at %s", name, ip)
        mon_initial_members.append(name)
        try:
            socket.inet_pton(socket.AF_INET6, ip)
            mon_host.append("[" + ip + "]")
            LOG.info("Monitors are IPv6, binding Messenger traffic on IPv6")
            cfg.set("global", "ms bind ipv6", "true")
        except socket.error:
            mon_host.append(ip)

        if args.ssh_copykey:
            ssh_copy_keys(host, args.username)

    LOG.debug("Monitor initial members are %s", mon_initial_members)
    LOG.debug("Monitor addrs are %s", mon_host)

    cfg.set("global", "mon initial members", ", ".join(mon_initial_members))
    # no spaces here, see http://tracker.newdream.net/issues/3145
    cfg.set("global", "mon host", ",".join(mon_host))

    # override undesirable defaults, needed until bobtail

    # http://tracker.ceph.com/issues/6788
    cfg.set("global", "auth cluster required", "cephx")
    cfg.set("global", "auth service required", "cephx")
    cfg.set("global", "auth client required", "cephx")

    # http://tracker.newdream.net/issues/3138
    cfg.set("global", "filestore xattr use omap", "true")

    path = "{name}.conf".format(name=args.cluster)

    # FIXME: create a random key
    LOG.debug("Creating a random mon key...")
    mon_keyring = "[mon.]\nkey = %s\ncaps mon = allow *\n" % generate_auth_key()

    keypath = "{name}.mon.keyring".format(name=args.cluster)

    LOG.debug("Writing initial config to %s...", path)
    tmp = "%s.tmp" % path
    with file(tmp, "w") as f:
        cfg.write(f)
    try:
        os.rename(tmp, path)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(path)
        else:
            raise

    LOG.debug("Writing monitor keyring to %s...", keypath)
    tmp = "%s.tmp" % keypath
    with file(tmp, "w") as f:
        f.write(mon_keyring)
    try:
        os.rename(tmp, keypath)
    except OSError as e:
        if e.errno == errno.EEXIST:
            raise exc.ClusterExistsError(keypath)
        else:
            raise
Esempio n. 10
0
def prepare(args, cfg, activate_prepared_disk):
    LOG.debug(
        'Preparing cluster %s disks %s',
        args.cluster,
        ' '.join(':'.join(x or '' for x in t) for t in args.disk),
        )
    if args.filestore:
        if args.fs_type == 'f2fs':
            LOG.debug('start run prepare_all_disk')

            admin_keyring_path = '/Ceph/Meta/Keyring/client.admin.keyring'
            admin_key = get_admin_key()

            errors = 0
            # hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type
            for hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type in args.disk:
                try:
                    distro = hosts.get(
                        hostname,
                        username=args.username,
                        callbacks=[packages.ceph_is_installed]
                    )

                    LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
                    rlogger = logging.getLogger(hostname)

                    # get client.admin.keyring from manager node
                    if not distro.conn.remote_module.path_exists(admin_keyring_path):
                        LOG.debug('client.admin.keyring not exist yet, creating one')
                        distro.conn.remote_module.write_keyring(admin_keyring_path, admin_key)

                    # ensure remote hostname is good to go
                    #hostname_is_compatible(distro.conn, rlogger, hostname)
                    localIP = net.get_nonlocal_ip(hostname)
                    LOG.debug('get host ip : %s' ,localIP)
                    LOG.debug('Create:add mon to ceph.conf')
                    distro.mon.create_all(distro, args, hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used,ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type)
                    distro.conn.exit()
                except RuntimeError as e:
                    LOG.error(e)
                    errors += 1

            # start sync all osd ceph.conf
            conf_data = conf.ceph.load_raw(args)
            errnos = 0
            for hostname, pIP, cIP, disk, journal, strategy, mode, nvme_def_used, nvme_tt_nums, ssd_def_used, ssd_tt_used, hdd_def_used, hdd_tt_nums, cache_nums, cache_dis_type in args.disk:
                LOG.debug('sync configfile for host %s ...', hostname)
                try:
                    distro = hosts.get(hostname, username=args.username)
                    distro.conn.remote_module.write_conf(
                        args.cluster,
                        conf_data,
                        args.overwrite_conf,
                    )
                    distro.conn.exit()
                except RuntimeError as e:
                    LOG.error(e)
                    errnos += 1
            if errnos:
                raise exc.GenericError('Failed to sync configfile %d monitors' % errors)
            return

    hosts_in_danger = exceeds_max_osds(args)

    if hosts_in_danger:
        LOG.warning('if ``kernel.pid_max`` is not increased to a high enough value')
        LOG.warning('the following hosts will encounter issues:')
        for host, count in hosts_in_danger.items():
            LOG.warning('Host: %8s, OSDs: %s' % (host, count))

    key = get_bootstrap_osd_key(cluster=args.cluster)

    bootstrapped = set()
    errors = 0
    for hostname, disk, journal in args.disk:
        localIP = net.get_nonlocal_ip(hostname)
        LOG.debug('hostname:%s ; ip:%s' % (hostname, localIP))
        try:
            if disk is None:
                raise exc.NeedDiskError(hostname)

            distro = hosts.get(
                hostname,
                username=args.username,
                callbacks=[packages.ceph_is_installed]
            )
            LOG.info(
                'Distro info: %s %s %s',
                distro.name,
                distro.release,
                distro.codename
            )

            if hostname not in bootstrapped:
                bootstrapped.add(hostname)
                LOG.debug('Deploying osd to %s', hostname)

                conf_data = conf.ceph.load_raw(args)
                distro.conn.remote_module.write_conf(
                    args.cluster,
                    conf_data,
                    args.overwrite_conf
                )

                create_osd_keyring(distro.conn, args.cluster, key)

            LOG.debug('Preparing host %s disk %s journal %s activate %s',
                      hostname, disk, journal, activate_prepared_disk)

            storetype = None
            if args.bluestore:
                storetype = 'bluestore'
            if args.filestore:
                storetype = 'filestore'

            prepare_disk(
                distro.conn,
                cluster=args.cluster,
                disk=disk,
                journal=journal,
                activate_prepared_disk=activate_prepared_disk,
                init=distro.init,
                zap=args.zap_disk,
                fs_type=args.fs_type,
                dmcrypt=args.dmcrypt,
                dmcrypt_dir=args.dmcrypt_key_dir,
                storetype=storetype,
                block_wal=args.block_wal,
                block_db=args.block_db
            )

            # give the OSD a few seconds to start
            time.sleep(5)
            catch_osd_errors(distro.conn, distro.conn.logger, args)
            LOG.debug('Host %s is now ready for osd use.', hostname)
            distro.conn.exit()

        except RuntimeError as e:
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d OSDs' % errors)
Esempio n. 11
0
def mon_create(args):

    cfg = conf.ceph.load(args)
    if not args.mon:
        args.mon = get_mon_initial_members(args, error_on_empty=True, _cfg=cfg)

    if args.keyrings:
        monitor_keyring = concatenate_keyrings(args)
    else:
        keyring_path = '/root/{cluster}.mon.keyring'.format(cluster=args.cluster)
        try:
            monitor_keyring = files.read_file(keyring_path)
        except IOError:
            LOG.warning('keyring (%s) not found, creating a new one' % keyring_path)
            new_mon_keyring(args)
            monitor_keyring = files.read_file(keyring_path)

    LOG.debug(
        'Deploying mon, cluster %s hosts %s',
        args.cluster,
        ' '.join(args.mon),
        )

    errors = 0
    mon_no = None
    for (name, host) in mon_hosts(args.mon):
        try:
            # TODO add_bootstrap_peer_hint
            LOG.debug('detecting platform for host %s ...', name)
            distro = hosts.get(
                host,
                username=args.username,
                callbacks=[packages.ceph_is_installed]
            )
            LOG.info('distro info: %s %s %s', distro.name, distro.release, distro.codename)
            rlogger = logging.getLogger(name)

            # ensure remote hostname is good to go
            hostname_is_compatible(distro.conn, rlogger, name)
            LOG.debug('write mon config to /etc/ceph.conf')
            mon_ip = net.get_nonlocal_ip(name)
            LOG.debug('get host ip : %s', mon_ip)
            LOG.debug('Create:add mon to ceph.conf')
            mon_no = add_mon_conf(name, mon_ip)
            rlogger.debug('deploying mon to %s ,mon_index : %s', name, mon_no.split('.')[1])
            distro.mon.create(distro, args, monitor_keyring, mon_no.split('.')[1])

            # tell me the status of the deployed mon
            time.sleep(5)  # give some room to start
            mon_status(distro.conn, rlogger, mon_no.split('.')[1], args)
            catch_mon_errors(distro.conn, rlogger, name, mon_no.split('.')[1], cfg, args)
            distro.conn.exit()

        except RuntimeError as e:
            del_conf(mon_no)
            LOG.error(e)
            errors += 1

    if errors:
        raise exc.GenericError('Failed to create %d monitors' % errors)
    # cp clientkey for create osd
    cmd = "cp /Ceph/Meta/Keyring/client.admin.keyring /root/"
    (ret, out) = commands.getstatusoutput(cmd)
    # start sync all monitors ceph.conf
    conf_data = conf.ceph.load_raw(args)
    errnos = 0
    for (name, host) in mon_hosts(args.mon):
        LOG.debug('sync configfile for host %s ...', name)
        try:
            distro = hosts.get(host, username=args.username)
            distro.conn.remote_module.write_conf(
                args.cluster,
                conf_data,
                args.overwrite_conf,
            )
            distro.conn.exit()
        except RuntimeError as e:
            LOG.error(e)
            errnos += 1
    if errnos:
        raise exc.GenericError('Failed to sync configfile %d monitors' % errors)