Beispiel #1
0
def _config_add_monitors(config, monitors):
    for m, v in monitors.iteritems():
        section = 'mon.' + m
        config.add_section(section)
        config.set(section, 'host', v['name'])
        config.set(section, 'mon addr',
                   '%s:%s' % (v['address'], v.get('port', _DEFAULT_MON_PORT)))
Beispiel #2
0
def add_ceph_mon(cluster_name, minions):
    conf_file = _CEPH_CLUSTER_CONF_DIR + "/" + cluster_name + "/" + cluster_name + ".conf"
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get("global", "public network"))
    check_minion_networks(minions, public_network)

    used_mon_ids = set([id.strip() for id in config.get("mon", "mon initial members").split(",")])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set("mon", "mon initial members", ", ".join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, "wb") as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {"usm": pillar_data}

    out = run_state(local, minions, "add_ceph_mon", expr_form="list", kwarg={"pillar": pillar})
    if out:
        return out

    return sync_ceph_conf(cluster_name, minions)
Beispiel #3
0
def _config_add_monitors(config, monitors):
    for m, v in monitors.iteritems():
        section = 'mon.' + m
        config.add_section(section)
        config.set(section, 'host', v['name'])
        config.set(section, 'mon addr',
                   '%s:%s' % (v['address'], v.get('port', _DEFAULT_MON_PORT)))
Beispiel #4
0
def AddMon(cluster_name, minions, ctxt=""):
    # convert list of minions to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS}, ...}
    d = {}
    for m in minions:
        d.update({m['Node']: {'public_ip': m['PublicIP4'],
                              'cluster_ip': m['ClusterIP4']}})
    minion_set = minions
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    _check_minion_networks(minions, public_network)

    used_mon_ids = set([id.strip() for id in config.get(
        'mon', 'mon initial members').split(',')])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set('mon', 'mon initial members', ', '.join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local, minions, 'add_ceph_mon', expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error('%s-add_mon failed for %s. error=%s' %
                  (ctxt, minion_set, out))
        raise Exception('add_mon failed for %s. error=%s' %
                        (minion_set, out))

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_ceph_conf failed to %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("sync_ceph_conf failed to %s. error=%s" %
                        (minions, out))

    return True
Beispiel #5
0
def add_ceph_osd(cluster_name, minions):
    """
    :: minions = {MINION_ID: {'public_ip': IP_ADDRESS,
                              'cluster_ip': IP_ADDRESS,
                              'host_name': HOSTNAME,
                              'devices': {DEVICE: FSTYPE, ...}}, ...}

    """
    conf_file = _CEPH_CLUSTER_CONF_DIR + "/" + cluster_name + "/" + cluster_name + ".conf"
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get("global", "public network"))
    if config.has_option("global", "cluster network"):
        cluster_network = IPNetwork(config.get("global", "cluster network"))
    else:
        cluster_network = None
    public_network, cluster_network = check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True
    )

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {
            "cluster_name": cluster_name,
            "cluster_id": config.get("global", "fsid"),
            "devices": v["devices"],
        }
    pillar = {"usm": pillar_data}

    out = run_state(local, minions, "prepare_ceph_osd", expr_form="list", kwarg={"pillar": pillar})
    if out:
        return out

    out = local.cmd(minions, "cmd.run_all", ["ceph-disk activate-all"], expr_form="list")

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get("retcode") != 0:
            failed_minions[minion] = v
            continue

        for line in v["stdout"].splitlines():
            if line.startswith("=== "):
                osds.append(line.split("=== ")[1].strip())
                break
        osd_map[minion] = osds

    config.set("global", "cluster network", cluster_network)
    for minion, osds in osd_map.iteritems():
        name = minions[minion].get("host_name", utils.get_short_hostname(minion))
        for osd in osds:
            config.add_section(osd)
            config.set(osd, "host", name)
            config.set(osd, "public addr", minions[minion]["public_ip"])
            config.set(osd, "cluster addr", minions[minion]["cluster_ip"])

    with open(conf_file, "wb") as f:
        config.write(f)

    sync_ceph_conf(cluster_name, minions)

    return failed_minions
Beispiel #6
0
def _gen_ceph_cluster_conf(
    conf_file,
    cluster_name,
    fsid,
    monitors,
    public_network,
    osd_journal_size=1024,
    osd_pool_default_size=2,
    osd_pool_default_min_size=1,
    osd_pool_default_pg_num=128,
    osd_pool_default_pgp_num=128,
    osd_crush_chooseleaf_type=1,
):
    """
    :: monitors = {ID: {'name': SHORT_HOSTNAME, 'address': IP_ADDR,
                        'port': INT}, ...}
    """
    config = ConfigParser.RawConfigParser()

    config.add_section("global")
    config.set("global", "fsid", fsid)
    config.set("global", "public network", public_network)
    config.set("global", "auth cluster required", "cephx")
    config.set("global", "auth service required", "cephx")
    config.set("global", "auth client required", "cephx")
    config.set("global", "osd journal size", osd_journal_size)
    config.set("global", "filestore xattr use omap", "true")
    config.set("global", "osd pool default size", osd_pool_default_size)
    config.set("global", "osd pool default min size", osd_pool_default_min_size)
    config.set("global", "osd pool default pg num", osd_pool_default_pg_num)
    config.set("global", "osd pool default pgp num", osd_pool_default_pgp_num)
    config.set("global", "osd crush chooseleaf type", osd_crush_chooseleaf_type)

    config.add_section("mon")
    config.set("mon", "mon initial members", ", ".join(monitors))
    _config_add_monitors(config, monitors)

    with open(conf_file, "wb") as f:
        config.write(f)
    return True
Beispiel #7
0
def _config_add_monitors(config, monitors):
    for m, v in monitors.iteritems():
        section = "mon." + m
        config.add_section(section)
        config.set(section, "host", v["name"])
        config.set(section, "mon addr", "%s:%s" % (v["address"], v.get("port", _DEFAULT_MON_PORT)))
Beispiel #8
0
def AddOSD(cluster_name, minions, ctxt=""):
    # convert minions dict to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS,
    #              'devices': {DEVICE: FSTYPE, ...}}, ...}
    d = {minions['Node']: {'public_ip': minions['PublicIP4'],
                           'cluster_ip': minions['ClusterIP4'],
                           'devices': {
                               minions['Device']: minions['FSType'],
                           }}}
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    if config.has_option('global', 'cluster network'):
        cluster_network = IPNetwork(config.get('global', 'cluster network'))
    else:
        cluster_network = None
    public_network, cluster_network = _check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True)

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {'cluster_name': cluster_name,
                               'cluster_id': config.get('global', 'fsid'),
                               'devices': v['devices']}
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local, minions, 'prepare_ceph_osd', expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error("%s-prepare_osd failed for %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("prepare_osd failed for %s. error=%s" %
                        (minions, out))

    for minion, v in minions.iteritems():
        count = 0
        found = False
        failed_devices = []
        while count < 6:
            out = local.cmd(minion, 'cmd.run_all', ['ls -l /dev/disk/by-parttypeuuid'])
            time.sleep(15)
            for key, value in v['devices'].iteritems():
                val_to_check = key.split('/')[-1]
                found = False
                for line in out[minion]["stdout"].splitlines():
                    if val_to_check in line:
                        found = True
                        if key in failed_devices:
                            failed_devices.remove(key)
                        break
                if not found:
                    if key not in failed_devices:
                        failed_devices.append(key)
                    break
            if found:
                break
            count += 1
        if len(failed_devices) != 0:
            log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices))
            raise Exception("prepare_osd failed for %s" % failed_devices)

    out = local.cmd(minions, 'cmd.run_all', ['ceph-disk activate-all'],
                    expr_form='list')

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get('retcode') != 0:
            failed_minions[minion] = v
            continue

        for line in v['stdout'].splitlines():
            if line.startswith('=== '):
                osds.append(line.split('=== ')[1].strip())
                break
        osd_map[minion] = osds

    config.set('global', 'cluster network', cluster_network)
    for minion, osds in osd_map.iteritems():
        name = _get_short_hostname(minion)
        for osd in osds:
            config.add_section(osd)
            config.set(osd, 'host', name)
            config.set(osd, 'public addr', minions[minion]['public_ip'])
            config.set(osd, 'cluster addr', minions[minion]['cluster_ip'])

    with open(conf_file, 'wb') as f:
        config.write(f)

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_cepH-conf failed for %s. error=%s" %
                  (ctxt, minions, out))
        #raise Exception("sync_ceph_conf failed for %s. error=%s" %
        #                (minions, out))

    if failed_minions:
        log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions))
        raise Exception('add_osd failed. error=%s' % failed_minions)

    return osd_map
Beispiel #9
0
def _gen_ceph_cluster_conf(conf_file, cluster_name, fsid, monitors,
                           public_network,
                           osd_journal_size=1024,
                           osd_pool_default_size=2,
                           osd_pool_default_min_size=1,
                           osd_pool_default_pg_num=128,
                           osd_pool_default_pgp_num=128,
                           osd_crush_chooseleaf_type=1):
    '''
    :: monitors = {ID: {'name': SHORT_HOSTNAME, 'address': IP_ADDR,
                        'port': INT}, ...}
    '''
    config = ConfigParser.RawConfigParser()

    config.add_section('global')
    config.set('global', 'fsid', fsid)
    config.set('global', 'public network', public_network)
    config.set('global', 'auth cluster required', 'cephx')
    config.set('global', 'auth service required', 'cephx')
    config.set('global', 'auth client required', 'cephx')
    config.set('global', 'osd journal size', osd_journal_size)
    config.set('global', 'filestore xattr use omap', 'true')
    config.set('global', 'osd pool default size', osd_pool_default_size)
    config.set('global', 'osd pool default min size',
               osd_pool_default_min_size)
    config.set('global', 'osd pool default pg num', osd_pool_default_pg_num)
    config.set('global', 'osd pool default pgp num', osd_pool_default_pgp_num)
    config.set('global', 'osd crush chooseleaf type',
               osd_crush_chooseleaf_type)

    config.add_section('mon')
    config.set('mon', 'mon initial members', ', '.join(monitors))
    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)
    return True
Beispiel #10
0
def AddOSD(cluster_name, minions, ctxt=""):
    # convert minions dict to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS,
    #              'devices': {DEVICE: FSTYPE, ...}}, ...}
    d = {
        minions['Node']: {
            'public_ip': minions['PublicIP4'],
            'cluster_ip': minions['ClusterIP4'],
            'devices': {
                minions['Device']: minions['FSType'],
            }
        }
    }
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    if config.has_option('global', 'cluster network'):
        cluster_network = IPNetwork(config.get('global', 'cluster network'))
    else:
        cluster_network = None
    public_network, cluster_network = _check_minion_networks(
        minions, public_network, cluster_network, check_cluster_network=True)

    pillar_data = {}
    for minion, v in minions.iteritems():
        pillar_data[minion] = {
            'cluster_name': cluster_name,
            'cluster_id': config.get('global', 'fsid'),
            'devices': v['devices']
        }
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local,
                    minions,
                    'prepare_ceph_osd',
                    expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error("%s-prepare_osd failed for %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("prepare_osd failed for %s. error=%s" % (minions, out))

    for minion, v in minions.iteritems():
        count = 0
        found = False
        failed_devices = []
        while count < 6:
            out = local.cmd(minion, 'cmd.run_all',
                            ['ls -l /dev/disk/by-parttypeuuid'])
            time.sleep(15)
            for key, value in v['devices'].iteritems():
                val_to_check = key.split('/')[-1]
                found = False
                for line in out[minion]["stdout"].splitlines():
                    if val_to_check in line:
                        found = True
                        if key in failed_devices:
                            failed_devices.remove(key)
                        break
                if not found:
                    if key not in failed_devices:
                        failed_devices.append(key)
                    break
            if found:
                break
            count += 1
        if len(failed_devices) != 0:
            log.error("%s-prepare_osd failed for %s" % (ctxt, failed_devices))
            raise Exception("prepare_osd failed for %s" % failed_devices)

    out = local.cmd(minions,
                    'cmd.run_all', ['ceph-disk activate-all'],
                    expr_form='list')

    osd_map = {}
    failed_minions = {}
    for minion, v in out.iteritems():
        osds = []

        if v.get('retcode') != 0:
            failed_minions[minion] = v
            continue

        for line in v['stdout'].splitlines():
            if line.startswith('=== '):
                osds.append(line.split('=== ')[1].strip())
                break
        osd_map[minion] = osds

    config.set('global', 'cluster network', cluster_network)
    for minion, osds in osd_map.iteritems():
        name = _get_short_hostname(minion)
        for osd in osds:
            config.add_section(osd)
            config.set(osd, 'host', name)
            config.set(osd, 'public addr', minions[minion]['public_ip'])
            config.set(osd, 'cluster addr', minions[minion]['cluster_ip'])

    with open(conf_file, 'wb') as f:
        config.write(f)

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_cepH-conf failed for %s. error=%s" %
                  (ctxt, minions, out))
        #raise Exception("sync_ceph_conf failed for %s. error=%s" %
        #                (minions, out))

    if failed_minions:
        log.error('%s-add_osd failed. error=%s' % (ctxt, failed_minions))
        raise Exception('add_osd failed. error=%s' % failed_minions)

    return osd_map
Beispiel #11
0
def AddMon(cluster_name, minions, ctxt=""):
    # convert list of minions to below dict
    # {MINION_ID: {'public_ip': IP_ADDRESS,
    #              'cluster_ip': IP_ADDRESS}, ...}
    d = {}
    for m in minions:
        d.update({
            m['Node']: {
                'public_ip': m['PublicIP4'],
                'cluster_ip': m['ClusterIP4']
            }
        })
    minion_set = minions
    minions = d

    conf_file = (_CEPH_CLUSTER_CONF_DIR + '/' + cluster_name + '/' +
                 cluster_name + '.conf')
    config = ConfigParser.RawConfigParser()
    config.read(conf_file)

    public_network = IPNetwork(config.get('global', 'public network'))
    _check_minion_networks(minions, public_network)

    used_mon_ids = set([
        id.strip()
        for id in config.get('mon', 'mon initial members').split(',')
    ])
    unused_mon_ids = list(set(_MON_ID_LIST) - used_mon_ids)
    unused_mon_ids.sort()

    mon_id_map, monitors = _get_mon_id_map(unused_mon_ids, minions)

    mon_initial_members = list(used_mon_ids) + list(monitors)
    mon_initial_members.sort()
    config.set('mon', 'mon initial members', ', '.join(mon_initial_members))

    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)

    pillar_data = _add_ceph_mon_pillar_data(mon_id_map, cluster_name, monitors)
    pillar = {'skyring': pillar_data}

    local = salt.client.LocalClient()
    out = run_state(local,
                    minions,
                    'add_ceph_mon',
                    expr_form='list',
                    kwarg={'pillar': pillar})
    if out:
        log.error('%s-add_mon failed for %s. error=%s' %
                  (ctxt, minion_set, out))
        raise Exception('add_mon failed for %s. error=%s' % (minion_set, out))

    out = sync_ceph_conf(cluster_name, minions)
    if out:
        log.error("%s-sync_ceph_conf failed to %s. error=%s" %
                  (ctxt, minions, out))
        raise Exception("sync_ceph_conf failed to %s. error=%s" %
                        (minions, out))

    return True
Beispiel #12
0
def _gen_ceph_cluster_conf(conf_file,
                           cluster_name,
                           fsid,
                           monitors,
                           public_network,
                           osd_journal_size=1024,
                           osd_pool_default_size=2,
                           osd_pool_default_min_size=1,
                           osd_pool_default_pg_num=128,
                           osd_pool_default_pgp_num=128,
                           osd_crush_chooseleaf_type=1):
    '''
    :: monitors = {ID: {'name': SHORT_HOSTNAME, 'address': IP_ADDR,
                        'port': INT}, ...}
    '''
    config = ConfigParser.RawConfigParser()

    config.add_section('global')
    config.set('global', 'fsid', fsid)
    config.set('global', 'public network', public_network)
    config.set('global', 'auth cluster required', 'cephx')
    config.set('global', 'auth service required', 'cephx')
    config.set('global', 'auth client required', 'cephx')
    config.set('global', 'osd journal size', osd_journal_size)
    config.set('global', 'filestore xattr use omap', 'true')
    config.set('global', 'osd pool default size', osd_pool_default_size)
    config.set('global', 'osd pool default min size',
               osd_pool_default_min_size)
    config.set('global', 'osd pool default pg num', osd_pool_default_pg_num)
    config.set('global', 'osd pool default pgp num', osd_pool_default_pgp_num)
    config.set('global', 'osd crush chooseleaf type',
               osd_crush_chooseleaf_type)

    config.add_section('mon')
    config.set('mon', 'mon initial members', ', '.join(monitors))
    _config_add_monitors(config, monitors)

    with open(conf_file, 'wb') as f:
        config.write(f)
    return True