コード例 #1
0
def create_pools(ctx, clients):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in clients:
        log.debug("Obtaining remote for client {}".format(client))
        (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = 'default.rgw.buckets.data'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client,
                           ctx.rgw.data_pool_pg_size,
                           ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
        else:
            create_replicated_pool(remote, data_pool,
                                   ctx.rgw.data_pool_pg_size, cluster_name,
                                   'rgw')

        index_pool = 'default.rgw.buckets.index'
        create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size,
                               cluster_name, 'rgw')

        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64 * 1024 * 1024, cluster_name)
    log.debug('Pools created')
    yield
コード例 #2
0
ファイル: rgw_multisite.py プロジェクト: IlsooByun/ceph
def create_zone_pools(ctx, zone):
    """ Create the data_pool for each placement type """
    gateway = zone.gateways[0]
    cluster = zone.cluster
    for pool_config in zone.data.get('placement_pools', []):
        pool_name = pool_config['val']['storage_classes']['STANDARD']['data_pool']
        if ctx.rgw.ec_data_pool:
            create_ec_pool(gateway.remote, pool_name, zone.name, 64,
                           ctx.rgw.erasure_code_profile, cluster.name, 'rgw')
        else:
            create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw')
コード例 #3
0
def create_zone_pools(ctx, zone):
    """ Create the data_pool for each placement type """
    gateway = zone.gateways[0]
    cluster = zone.cluster
    for pool_config in zone.data.get('placement_pools', []):
        pool_name = pool_config['val']['data_pool']
        if ctx.rgw.ec_data_pool:
            create_ec_pool(gateway.remote, pool_name, zone.name, 64,
                           ctx.rgw.erasure_code_profile, cluster.name, 'rgw')
        else:
            create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw')
コード例 #4
0
ファイル: rgw.py プロジェクト: andrewschoen/ceph-qa-suite
def create_nonregion_pools(ctx, config, regions):
    """Create replicated or erasure coded data pools for rgw."""
    if regions:
        yield
        return

    log.info('creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile)
        else:
            create_replicated_pool(remote, data_pool, 64)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024)
    yield
コード例 #5
0
def create_nonregion_pools(ctx, config, regions):
    """Create replicated or erasure coded data pools for rgw."""
    if regions:
        yield
        return

    log.info('creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile)
        else:
            create_replicated_pool(remote, data_pool, 64)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024)
    yield
コード例 #6
0
def create_pools(ctx, config):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile, cluster_name)
        else:
            create_replicated_pool(remote, data_pool, 64, cluster_name)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024, cluster_name)
    log.debug('Pools created')
    yield
コード例 #7
0
ファイル: rgw.py プロジェクト: dreamsxin/ceph
def create_pools(ctx, clients):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in clients:
        log.debug("Obtaining remote for client {}".format(client))
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = 'default.rgw.buckets.data'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
                           ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
        else:
            create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')

        index_pool = 'default.rgw.buckets.index'
        create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')

        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024, cluster_name)
    log.debug('Pools created')
    yield
コード例 #8
0
ファイル: ceph_objectstore_tool.py プロジェクト: zxgm/ceph
def task(ctx, config):
    """
    Run ceph_objectstore_tool test

    The config should be as follows::

        ceph_objectstore_tool:
          objects: 20 # <number of objects>
          pgnum: 12
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'ceph_objectstore_tool task only accepts a dict for configuration'

    log.info('Beginning ceph_objectstore_tool...')

    log.debug(config)
    log.debug(ctx)
    clients = ctx.cluster.only(teuthology.is_type('client'))
    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
    (cli_remote, _) = clients.remotes.popitem()
    log.debug(cli_remote)

    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
    # client = clients.popitem()
    # log.info(client)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    log.info("OSDS")
    log.info(osds)
    log.info(osds.remotes)

    manager = ctx.managers['ceph']
    while (len(manager.get_osd_status()['up']) != len(
            manager.get_osd_status()['raw'])):
        time.sleep(10)
    while (len(manager.get_osd_status()['in']) != len(
            manager.get_osd_status()['up'])):
        time.sleep(10)
    manager.raw_cluster_cmd('osd', 'set', 'noout')
    manager.raw_cluster_cmd('osd', 'set', 'nodown')

    PGNUM = config.get('pgnum', 12)
    log.info("pgnum: {num}".format(num=PGNUM))

    ERRORS = 0

    REP_POOL = "rep_pool"
    REP_NAME = "REPobject"
    create_replicated_pool(cli_remote, REP_POOL, PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)

    EC_POOL = "ec_pool"
    EC_NAME = "ECobject"
    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
    ERRORS += test_objectstore(ctx,
                               config,
                               cli_remote,
                               EC_POOL,
                               EC_NAME,
                               ec=True)

    if ERRORS == 0:
        log.info("TEST PASSED")
    else:
        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))

    assert ERRORS == 0

    try:
        yield
    finally:
        log.info('Ending ceph_objectstore_tool')
コード例 #9
0
def task(ctx, config):
    """
    Run ceph_objectstore_tool test

    The config should be as follows::

        ceph_objectstore_tool:
          objects: 20 # <number of objects>
          pgnum: 12
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'ceph_objectstore_tool task only accepts a dict for configuration'

    log.info('Beginning ceph_objectstore_tool...')

    log.debug(config)
    log.debug(ctx)
    clients = ctx.cluster.only(teuthology.is_type('client'))
    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
    (cli_remote, _) = clients.remotes.popitem()
    log.debug(cli_remote)

    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
    # client = clients.popitem()
    # log.info(client)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    log.info("OSDS")
    log.info(osds)
    log.info(osds.remotes)

    manager = ctx.managers['ceph']
    while (len(manager.get_osd_status()['up']) !=
           len(manager.get_osd_status()['raw'])):
        time.sleep(10)
    while (len(manager.get_osd_status()['in']) !=
           len(manager.get_osd_status()['up'])):
        time.sleep(10)
    manager.raw_cluster_cmd('osd', 'set', 'noout')
    manager.raw_cluster_cmd('osd', 'set', 'nodown')

    PGNUM = config.get('pgnum', 12)
    log.info("pgnum: {num}".format(num=PGNUM))

    ERRORS = 0

    REP_POOL = "rep_pool"
    REP_NAME = "REPobject"
    create_replicated_pool(cli_remote, REP_POOL, PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)

    EC_POOL = "ec_pool"
    EC_NAME = "ECobject"
    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote,
                               EC_POOL, EC_NAME, ec=True)

    if ERRORS == 0:
        log.info("TEST PASSED")
    else:
        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))

    assert ERRORS == 0

    try:
        yield
    finally:
        log.info('Ending ceph_objectstore_tool')
コード例 #10
0
ファイル: rgw.py プロジェクト: andrewschoen/ceph-qa-suite
def configure_regions_and_zones(ctx, config, regions, role_endpoints):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])

    for client in config.iterkeys():
        for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default'])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default'])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64'])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64)

            rgwadmin(ctx, client,
                     cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
                     stdin=StringIO(json.dumps(dict(
                         zone_info.items() + user_info.items()))),
                     check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['-n', client, 'region', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['-n', client,
                              'region', 'default',
                              '--rgw-region', region],
                         check_status=True)

        rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update'])
    yield
コード例 #11
0
def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    for client in config.iterkeys():
        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        if zonegroup == master_zonegroup and zone == master_zone:
            master_client = client
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)
    log.debug('config %r ', config)

    (ret, out)=rgwadmin(ctx, master_client,
                        cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
    log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
    assert ret == 0 or ret != -errno.EEXIST
    if ret is -errno.EEXIST:
        log.debug('realm %r exists', realm)

    for client in config.iterkeys():
        for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default'])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default'])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64'])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64)
            zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
            log.debug('zone info is: %r', zone_json)
            rgwadmin(ctx, client,
                 cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                      '--rgw-zone', zone],
                 stdin=StringIO(zone_json),
                 check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['zonegroup', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
                         check_status=True)

        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        rgwadmin(ctx, client,
                 cmd=['zone', 'default', zone],
                 check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'period', 'update', '--commit'],
             check_status=True)

    yield
コード例 #12
0
ファイル: rgw.py プロジェクト: cooboos/ceph-qa-suite
def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, realm, master_client):
    """
    Configure multisite regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring multisite regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('role_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)

    rgwadmin(ctx, master_client,
             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
             check_status=True)

    for region, info in region_info.iteritems():
        region_json = json.dumps(info)
        log.debug('region info is: %s', region_json)
        rgwadmin(ctx, master_client,
                 cmd=['zonegroup', 'set'],
                 stdin=StringIO(region_json),
                 check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
             check_status=True)

    for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
        (remote,) = ctx.cluster.only(role).remotes.keys()
        for pool_info in zone_info['placement_pools']:
            remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                             pool_info['val']['index_pool'], '64', '64'])
            if ctx.rgw.ec_data_pool:
                create_ec_pool(remote, pool_info['val']['data_pool'],
                               zone, 64, ctx.rgw.erasure_code_profile)
            else:
                create_replicated_pool(remote, pool_info['val']['data_pool'], 64)

    (zonegroup, zone, zone_info, user_info) = role_zones[master_client]
    zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
    log.debug("zone info is: %r", zone_json)
    rgwadmin(ctx, master_client,
             cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                  '--rgw-zone', zone],
             stdin=StringIO(zone_json),
             check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'zone', 'default', zone],
             check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'period', 'update', '--commit'],
             check_status=True)

    yield
コード例 #13
0
ファイル: rgw.py プロジェクト: jackyding2679/librbd-ssd-cache
def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints,
                                          realm, master_client):
    """
    Configure multisite regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring multisite regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)

    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('role_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([(region_name,
                         extract_region_info(region_name, r_config))
                        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    cluster_name, daemon_type, client_id = teuthology.split_role(master_client)
    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
    (mon, ) = ctx.cluster.only(first_mon).remotes.iterkeys()

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)

    rgwadmin(ctx,
             master_client,
             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
             check_status=True)

    for region, info in region_info.iteritems():
        region_json = json.dumps(info)
        log.debug('region info is: %s', region_json)
        rgwadmin(ctx,
                 master_client,
                 cmd=['zonegroup', 'set'],
                 stdin=StringIO(region_json),
                 check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
             check_status=True)

    for role, (zonegroup, zone, zone_info,
               user_info) in role_zones.iteritems():
        (remote, ) = ctx.cluster.only(role).remotes.keys()
        for pool_info in zone_info['placement_pools']:
            remote.run(args=[
                'sudo', 'ceph', 'osd', 'pool', 'create', pool_info['val']
                ['index_pool'], '64', '64', '--cluster', cluster_name
            ])
            if ctx.rgw.ec_data_pool:
                create_ec_pool(remote, pool_info['val']['data_pool'], zone, 64,
                               ctx.rgw.erasure_code_profile, cluster_name)
            else:
                create_replicated_pool(remote, pool_info['val']['data_pool'],
                                       64, cluster_name)

    (zonegroup, zone, zone_info, user_info) = role_zones[master_client]
    zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
    log.debug("zone info is: %r", zone_json)
    rgwadmin(
        ctx,
        master_client,
        cmd=['zone', 'set', '--rgw-zonegroup', zonegroup, '--rgw-zone', zone],
        stdin=StringIO(zone_json),
        check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['zone', 'default', '--rgw-zone', zone],
             check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['period', 'update', '--commit'],
             check_status=True)

    yield
コード例 #14
0
ファイル: rgw.py プロジェクト: big-henry/ceph
def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        configure_compression_in_default_zone(ctx, config)
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_regions_and_zones() and realm is None. '
            'Bailing')
        configure_compression_in_default_zone(ctx, config)
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)

    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    cluster_name, daemon_type, client_id = teuthology.split_role(client)
    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default', '--cluster', cluster_name])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default', '--cluster', cluster_name])

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    for client in config.iterkeys():
        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        if zonegroup == master_zonegroup and zone == master_zone:
            master_client = client
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)
    log.debug('config %r ', config)

    (ret, out)=rgwadmin(ctx, client,
                        cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
    log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
    assert ret == 0 or ret != -errno.EEXIST
    if ret is -errno.EEXIST:
        log.debug('realm %r exists', realm)

    for client in config.iterkeys():
        for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default', '--cluster', cluster_name])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default', '--cluster', cluster_name])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile, cluster_name)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64, cluster_name)
            zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
            log.debug('zone info is: %r', zone_json)
            rgwadmin(ctx, client,
                 cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                      '--rgw-zone', zone],
                 stdin=StringIO(zone_json),
                 check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['zonegroup', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
                         check_status=True)

        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        rgwadmin(ctx, client,
                 cmd=['zone', 'default', '--rgw-zone', zone],
                 check_status=True)

    #this used to take master_client, need to edit that accordingly
    rgwadmin(ctx, client,
             cmd=['period', 'update', '--commit'],
             check_status=True)

    yield