Beispiel #1
0
def create_pools(ctx, clients):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in clients:
        log.debug("Obtaining remote for client {}".format(client))
        (remote, ) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = 'default.rgw.buckets.data'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client,
                           ctx.rgw.data_pool_pg_size,
                           ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
        else:
            create_replicated_pool(remote, data_pool,
                                   ctx.rgw.data_pool_pg_size, cluster_name,
                                   'rgw')

        index_pool = 'default.rgw.buckets.index'
        create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size,
                               cluster_name, 'rgw')

        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64 * 1024 * 1024, cluster_name)
    log.debug('Pools created')
    yield
Beispiel #2
0
def create_zone_pools(ctx, zone):
    """ Create the data_pool for each placement type """
    gateway = zone.gateways[0]
    cluster = zone.cluster
    for pool_config in zone.data.get('placement_pools', []):
        pool_name = pool_config['val']['storage_classes']['STANDARD']['data_pool']
        if ctx.rgw.ec_data_pool:
            create_ec_pool(gateway.remote, pool_name, zone.name, 64,
                           ctx.rgw.erasure_code_profile, cluster.name, 'rgw')
        else:
            create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw')
Beispiel #3
0
def create_zone_pools(ctx, zone):
    """ Create the data_pool for each placement type """
    gateway = zone.gateways[0]
    cluster = zone.cluster
    for pool_config in zone.data.get('placement_pools', []):
        pool_name = pool_config['val']['data_pool']
        if ctx.rgw.ec_data_pool:
            create_ec_pool(gateway.remote, pool_name, zone.name, 64,
                           ctx.rgw.erasure_code_profile, cluster.name, 'rgw')
        else:
            create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw')
Beispiel #4
0
def create_nonregion_pools(ctx, config, regions):
    """Create replicated or erasure coded data pools for rgw."""
    if regions:
        yield
        return

    log.info('creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile)
        else:
            create_replicated_pool(remote, data_pool, 64)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024)
    yield
Beispiel #5
0
def create_nonregion_pools(ctx, config, regions):
    """Create replicated or erasure coded data pools for rgw."""
    if regions:
        yield
        return

    log.info('creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile)
        else:
            create_replicated_pool(remote, data_pool, 64)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024)
    yield
Beispiel #6
0
def create_pools(ctx, config):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in config.keys():
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = '.rgw.buckets'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, 64,
                           ctx.rgw.erasure_code_profile, cluster_name)
        else:
            create_replicated_pool(remote, data_pool, 64, cluster_name)
        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024, cluster_name)
    log.debug('Pools created')
    yield
Beispiel #7
0
def create_pools(ctx, clients):
    """Create replicated or erasure coded data pools for rgw."""

    log.info('Creating data pools')
    for client in clients:
        log.debug("Obtaining remote for client {}".format(client))
        (remote,) = ctx.cluster.only(client).remotes.iterkeys()
        data_pool = 'default.rgw.buckets.data'
        cluster_name, daemon_type, client_id = teuthology.split_role(client)

        if ctx.rgw.ec_data_pool:
            create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size,
                           ctx.rgw.erasure_code_profile, cluster_name, 'rgw')
        else:
            create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw')

        index_pool = 'default.rgw.buckets.index'
        create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw')

        if ctx.rgw.cache_pools:
            create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
                              64*1024*1024, cluster_name)
    log.debug('Pools created')
    yield
Beispiel #8
0
def task(ctx, config):
    """
    Run ceph_objectstore_tool test

    The config should be as follows::

        ceph_objectstore_tool:
          objects: 20 # <number of objects>
          pgnum: 12
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'ceph_objectstore_tool task only accepts a dict for configuration'

    log.info('Beginning ceph_objectstore_tool...')

    log.debug(config)
    log.debug(ctx)
    clients = ctx.cluster.only(teuthology.is_type('client'))
    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
    (cli_remote, _) = clients.remotes.popitem()
    log.debug(cli_remote)

    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
    # client = clients.popitem()
    # log.info(client)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    log.info("OSDS")
    log.info(osds)
    log.info(osds.remotes)

    manager = ctx.managers['ceph']
    while (len(manager.get_osd_status()['up']) != len(
            manager.get_osd_status()['raw'])):
        time.sleep(10)
    while (len(manager.get_osd_status()['in']) != len(
            manager.get_osd_status()['up'])):
        time.sleep(10)
    manager.raw_cluster_cmd('osd', 'set', 'noout')
    manager.raw_cluster_cmd('osd', 'set', 'nodown')

    PGNUM = config.get('pgnum', 12)
    log.info("pgnum: {num}".format(num=PGNUM))

    ERRORS = 0

    REP_POOL = "rep_pool"
    REP_NAME = "REPobject"
    create_replicated_pool(cli_remote, REP_POOL, PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)

    EC_POOL = "ec_pool"
    EC_NAME = "ECobject"
    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
    ERRORS += test_objectstore(ctx,
                               config,
                               cli_remote,
                               EC_POOL,
                               EC_NAME,
                               ec=True)

    if ERRORS == 0:
        log.info("TEST PASSED")
    else:
        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))

    assert ERRORS == 0

    try:
        yield
    finally:
        log.info('Ending ceph_objectstore_tool')
def task(ctx, config):
    """
    Run ceph_objectstore_tool test

    The config should be as follows::

        ceph_objectstore_tool:
          objects: 20 # <number of objects>
          pgnum: 12
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'ceph_objectstore_tool task only accepts a dict for configuration'

    log.info('Beginning ceph_objectstore_tool...')

    log.debug(config)
    log.debug(ctx)
    clients = ctx.cluster.only(teuthology.is_type('client'))
    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
    (cli_remote, _) = clients.remotes.popitem()
    log.debug(cli_remote)

    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
    # client = clients.popitem()
    # log.info(client)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    log.info("OSDS")
    log.info(osds)
    log.info(osds.remotes)

    manager = ctx.managers['ceph']
    while (len(manager.get_osd_status()['up']) !=
           len(manager.get_osd_status()['raw'])):
        time.sleep(10)
    while (len(manager.get_osd_status()['in']) !=
           len(manager.get_osd_status()['up'])):
        time.sleep(10)
    manager.raw_cluster_cmd('osd', 'set', 'noout')
    manager.raw_cluster_cmd('osd', 'set', 'nodown')

    PGNUM = config.get('pgnum', 12)
    log.info("pgnum: {num}".format(num=PGNUM))

    ERRORS = 0

    REP_POOL = "rep_pool"
    REP_NAME = "REPobject"
    create_replicated_pool(cli_remote, REP_POOL, PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)

    EC_POOL = "ec_pool"
    EC_NAME = "ECobject"
    create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
    ERRORS += test_objectstore(ctx, config, cli_remote,
                               EC_POOL, EC_NAME, ec=True)

    if ERRORS == 0:
        log.info("TEST PASSED")
    else:
        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))

    assert ERRORS == 0

    try:
        yield
    finally:
        log.info('Ending ceph_objectstore_tool')
Beispiel #10
0
def configure_regions_and_zones(ctx, config, regions, role_endpoints):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])

    for client in config.iterkeys():
        for role, (_, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default'])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default'])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64'])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64)

            rgwadmin(ctx, client,
                     cmd=['-n', client, 'zone', 'set', '--rgw-zone', zone],
                     stdin=StringIO(json.dumps(dict(
                         zone_info.items() + user_info.items()))),
                     check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['-n', client, 'region', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['-n', client,
                              'region', 'default',
                              '--rgw-region', region],
                         check_status=True)

        rgwadmin(ctx, client, cmd=['-n', client, 'regionmap', 'update'])
    yield
Beispiel #11
0
def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    for client in config.iterkeys():
        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        if zonegroup == master_zonegroup and zone == master_zone:
            master_client = client
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)
    log.debug('config %r ', config)

    (ret, out)=rgwadmin(ctx, master_client,
                        cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
    log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
    assert ret == 0 or ret != -errno.EEXIST
    if ret is -errno.EEXIST:
        log.debug('realm %r exists', realm)

    for client in config.iterkeys():
        for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default'])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default'])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64'])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64)
            zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
            log.debug('zone info is: %r', zone_json)
            rgwadmin(ctx, client,
                 cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                      '--rgw-zone', zone],
                 stdin=StringIO(zone_json),
                 check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['zonegroup', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
                         check_status=True)

        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        rgwadmin(ctx, client,
                 cmd=['zone', 'default', zone],
                 check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'period', 'update', '--commit'],
             check_status=True)

    yield
Beispiel #12
0
def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, realm, master_client):
    """
    Configure multisite regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring multisite regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)
    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('role_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)

    rgwadmin(ctx, master_client,
             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
             check_status=True)

    for region, info in region_info.iteritems():
        region_json = json.dumps(info)
        log.debug('region info is: %s', region_json)
        rgwadmin(ctx, master_client,
                 cmd=['zonegroup', 'set'],
                 stdin=StringIO(region_json),
                 check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
             check_status=True)

    for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
        (remote,) = ctx.cluster.only(role).remotes.keys()
        for pool_info in zone_info['placement_pools']:
            remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                             pool_info['val']['index_pool'], '64', '64'])
            if ctx.rgw.ec_data_pool:
                create_ec_pool(remote, pool_info['val']['data_pool'],
                               zone, 64, ctx.rgw.erasure_code_profile)
            else:
                create_replicated_pool(remote, pool_info['val']['data_pool'], 64)

    (zonegroup, zone, zone_info, user_info) = role_zones[master_client]
    zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
    log.debug("zone info is: %r", zone_json)
    rgwadmin(ctx, master_client,
             cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                  '--rgw-zone', zone],
             stdin=StringIO(zone_json),
             check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'zone', 'default', zone],
             check_status=True)

    rgwadmin(ctx, master_client,
             cmd=['-n', master_client, 'period', 'update', '--commit'],
             check_status=True)

    yield
def task(ctx, config):
    """
    Run ceph_objectstore_tool test

    The config should be as follows::

        ceph_objectstore_tool:
          objects: <number of objects>
    """

    if config is None:
        config = {}
    assert isinstance(config, dict), \
        'ceph_objectstore_tool task only accepts a dict for configuration'
    TEUTHDIR = teuthology.get_testdir(ctx)

    # clients = config['clients']
    # assert len(clients) > 0,
    #    'ceph_objectstore_tool task needs at least 1 client'

    REP_POOL = "rep_pool"
    REP_NAME = "REPobject"
    # EC_POOL = "ec_pool"
    # EC_NAME = "ECobject"
    NUM_OBJECTS = config.get('objects', 10)
    ERRORS = 0
    DATADIR = os.path.join(TEUTHDIR, "data")
    # Put a test dir below the data dir
    # TESTDIR = os.path.join(DATADIR, "test")
    DATALINECOUNT = 10000
    # PROFNAME = "testecprofile"

    log.info('Beginning ceph_objectstore_tool...')
    log.info("objects: {num}".format(num=NUM_OBJECTS))

    log.debug(config)
    log.debug(ctx)
    clients = ctx.cluster.only(teuthology.is_type('client'))
    assert len(clients.remotes) > 0, 'Must specify at least 1 client'
    (cli_remote, _) = clients.remotes.popitem()
    log.debug(cli_remote)

    # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
    # client = clients.popitem()
    # log.info(client)
    osds = ctx.cluster.only(teuthology.is_type('osd'))
    log.info("OSDS")
    log.info(osds)
    log.info(osds.remotes)

    first_mon = teuthology.get_first_mon(ctx, config)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    manager = ceph_manager.CephManager(
        mon,
        ctx=ctx,
        config=config,
        logger=log.getChild('ceph_manager'),
        )
    ctx.manager = manager

    # ctx.manager.raw_cluster_cmd('osd', 'pool', 'create', REP_POOL, '12', '12', 'replicated')
    create_replicated_pool(cli_remote, REP_POOL, 12)
    REPID = get_pool_id(ctx, REP_POOL)

    log.debug("repid={num}".format(num=REPID))

    while len(manager.get_osd_status()['up']) != len(manager.get_osd_status()['raw']):
        time.sleep(10)
    while len(manager.get_osd_status()['in']) != len(manager.get_osd_status()['up']):
        time.sleep(10)
    manager.raw_cluster_cmd('osd', 'set', 'noout')
    manager.raw_cluster_cmd('osd', 'set', 'nodown')

    db = {}

    LOCALDIR = tempfile.mkdtemp("cod")

    cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, REP_NAME, DATALINECOUNT)
    allremote = []
    allremote.append(cli_remote)
    allremote += osds.remotes.keys()
    allremote = list(set(allremote))
    for remote in allremote:
        cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT)

    ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT, REP_POOL, db)

    pgs = {}
    jsontext = manager.raw_cluster_cmd('pg', 'dump_json')
    pgdump = json.loads(jsontext)
    PGS = [str(p["pgid"]) for p in pgdump["pg_stats"] if p["pgid"].find(str(REPID) + ".") == 0]
    for stats in pgdump["pg_stats"]:
        if stats["pgid"] in PGS:
            for osd in stats["acting"]:
                if not pgs.has_key(osd):
                    pgs[osd] = []
                pgs[osd].append(stats["pgid"])


    log.info(pgs)
    log.info(db)

    for osd in manager.get_osd_status()['up']:
        manager.kill_osd(osd)
    time.sleep(5)

    pgswithobjects = set()
    objsinpg = {}

    # Test --op list and generate json for all objects
    log.info("Test --op list by generating json for all objects")
    prefix = "sudo ceph_objectstore_tool --data-path {fpath} --journal-path {jpath} ".format(fpath=FSPATH, jpath=JPATH)
    for remote in osds.remotes.iterkeys():
        log.debug(remote)
        log.debug(osds.remotes[remote])
        for role in osds.remotes[remote]:
            if string.find(role, "osd.") != 0:
                continue
            osdid = int(role.split('.')[1])
            log.info("process osd.{id} on {remote}".format(id=osdid, remote=remote))
            for pg in pgs[osdid]:
                cmd = (prefix + "--op list --pgid {pg}").format(id=osdid, pg=pg)
                proc = remote.run(args=cmd.split(), check_status=False, stdout=StringIO())
                # proc.wait()
                if proc.exitstatus != 0:
                    log.error("Bad exit status {ret} from --op list request".format(ret=proc.exitstatus))
                    ERRORS += 1
                else:
                    data = proc.stdout.getvalue()
                    if len(data):
                        # This pg has some objects in it
                        pgswithobjects.add(pg)
                        pglines = data.split('\n')
                        # All copies of a pg are the same so we can overwrite
                        objsinpg[pg] = []
                        while(len(pglines)):
                            # Drop any blank lines
                            if (len(pglines[-1]) == 0):
                                pglines.pop()
                                continue
                            objjson = pglines.pop()
                            name = json.loads(objjson)['oid']
                            objsinpg[pg].append(name)
                            db[name]["pgid"] = pg
                            db[name]["json"] = objjson

    log.info(db)
    log.info(pgswithobjects)
    log.info(objsinpg)

    # Test get-bytes
    log.info("Test get-bytes and set-bytes")
    for basename in db.keys():
        file = os.path.join(DATADIR, basename)
        JSON = db[basename]["json"]
        GETNAME = os.path.join(DATADIR, "get")
        SETNAME = os.path.join(DATADIR, "set")

        for remote in osds.remotes.iterkeys():
            for role in osds.remotes[remote]:
                if string.find(role, "osd.") != 0:
                    continue
                osdid = int(role.split('.')[1])

                pg = db[basename]['pgid']
                if pg in pgs[osdid]:
                    cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                    cmd.append(run.Raw("'{json}'".format(json=JSON)))
                    cmd += "get-bytes {fname}".format(fname=GETNAME).split()
                    proc = remote.run(args=cmd, check_status=False)
                    if proc.exitstatus != 0:
                        remote.run(args="rm -f {getfile}".format(getfile=GETNAME).split())
                        log.error("Bad exit status {ret}".format(ret=proc.exitstatus))
                        ERRORS += 1
                        continue
                    cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME)
                    proc = remote.run(args=cmd.split())
                    if proc.exitstatus != 0:
                        log.error("Data from get-bytes differ")
                        # log.debug("Got:")
                        # cat_file(logging.DEBUG, GETNAME)
                        # log.debug("Expected:")
                        # cat_file(logging.DEBUG, file)
                        ERRORS += 1
                    remote.run(args="rm -f {getfile}".format(getfile=GETNAME).split())

                    data = "put-bytes going into {file}\n".format(file=file)
                    teuthology.write_file(remote, SETNAME, data)
                    cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                    cmd.append(run.Raw("'{json}'".format(json=JSON)))
                    cmd += "set-bytes {fname}".format(fname=SETNAME).split()
                    proc = remote.run(args=cmd, check_status=False)
                    proc.wait()
                    if proc.exitstatus != 0:
                        log.info("set-bytes failed for object {obj} in pg {pg} osd.{id} ret={ret}".format(obj=basename, pg=pg, id=osdid, ret=proc.exitstatus))
                        ERRORS += 1

                    cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                    cmd.append(run.Raw("'{json}'".format(json=JSON)))
                    cmd += "get-bytes -".split()
                    proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                    proc.wait()
                    if proc.exitstatus != 0:
                        log.error("get-bytes after set-bytes ret={ret}".format(ret=proc.exitstatus))
                        ERRORS += 1
                    else:
                        if data != proc.stdout.getvalue():
                            log.error("Data inconsistent after set-bytes, got:")
                            log.error(proc.stdout.getvalue())
                            ERRORS += 1

                    cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                    cmd.append(run.Raw("'{json}'".format(json=JSON)))
                    cmd += "set-bytes {fname}".format(fname=file).split()
                    proc = remote.run(args=cmd, check_status=False)
                    proc.wait()
                    if proc.exitstatus != 0:
                        log.info("set-bytes failed for object {obj} in pg {pg} osd.{id} ret={ret}".format(obj=basename, pg=pg, id=osdid, ret=proc.exitstatus))
                        ERRORS += 1

    log.info("Test list-attrs get-attr")
    for basename in db.keys():
        file = os.path.join(DATADIR, basename)
        JSON = db[basename]["json"]
        GETNAME = os.path.join(DATADIR, "get")
        SETNAME = os.path.join(DATADIR, "set")

        for remote in osds.remotes.iterkeys():
            for role in osds.remotes[remote]:
                if string.find(role, "osd.") != 0:
                    continue
                osdid = int(role.split('.')[1])

                pg = db[basename]['pgid']
                if pg in pgs[osdid]:
                    cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                    cmd.append(run.Raw("'{json}'".format(json=JSON)))
                    cmd += ["list-attrs"]
                    proc = remote.run(args=cmd, check_status=False, stdout=StringIO(), stderr=StringIO())
                    proc.wait()
                    if proc.exitstatus != 0:
                        log.error("Bad exit status {ret}".format(ret=proc.exitstatus))
                        ERRORS += 1
                        continue
                    keys = proc.stdout.getvalue().split()
                    values = dict(db[basename]["xattr"])

                    for key in keys:
                        if key == "_" or key == "snapset":
                            continue
                        key = key.strip("_")
                        if key not in values:
                            log.error("The key {key} should be present".format(key=key))
                            ERRORS += 1
                            continue
                        exp = values.pop(key)
                        cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split()
                        cmd.append(run.Raw("'{json}'".format(json=JSON)))
                        cmd += "get-attr {key}".format(key="_" + key).split()
                        proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                        proc.wait()
                        if proc.exitstatus != 0:
                            log.error("get-attr failed with {ret}".format(ret=proc.exitstatus))
                            ERRORS += 1
                            continue
                        val = proc.stdout.getvalue()
                        if exp != val:
                            log.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp))
                            ERRORS += 1
                    if len(values) != 0:
                        log.error("Not all keys found, remaining keys:")
                        log.error(values)

    log.info("Test pg info")
    for remote in osds.remotes.iterkeys():
        for role in osds.remotes[remote]:
            if string.find(role, "osd.") != 0:
                continue
            osdid = int(role.split('.')[1])

            for pg in pgs[osdid]:
                cmd = (prefix + "--op info --pgid {pg}").format(id=osdid, pg=pg).split()
                proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                proc.wait()
                if proc.exitstatus != 0:
                    log.error("Failure of --op info command with {ret}".format(proc.exitstatus))
                    ERRORS += 1
                    continue
                info = proc.stdout.getvalue()
                if not str(pg) in info:
                    log.error("Bad data from info: {info}".format(info=info))
                    ERRORS += 1

    log.info("Test pg logging")
    for remote in osds.remotes.iterkeys():
        for role in osds.remotes[remote]:
            if string.find(role, "osd.") != 0:
                continue
            osdid = int(role.split('.')[1])

            for pg in pgs[osdid]:
                cmd = (prefix + "--op log --pgid {pg}").format(id=osdid, pg=pg).split()
                proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                proc.wait()
                if proc.exitstatus != 0:
                    log.error("Getting log failed for pg {pg} from osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus))
                    ERRORS += 1
                    continue
                HASOBJ = pg in pgswithobjects
                MODOBJ = "modify" in proc.stdout.getvalue()
                if HASOBJ != MODOBJ:
                    log.error("Bad log for pg {pg} from osd.{id}".format(pg=pg, id=osdid))
                    MSG = (HASOBJ and [""] or ["NOT "])[0]
                    log.error("Log should {msg}have a modify entry".format(msg=MSG))
                    ERRORS += 1

    log.info("Test pg export")
    EXP_ERRORS = 0
    for remote in osds.remotes.iterkeys():
        for role in osds.remotes[remote]:
            if string.find(role, "osd.") != 0:
                continue
            osdid = int(role.split('.')[1])

            for pg in pgs[osdid]:
                fpath = os.path.join(DATADIR, "osd{id}.{pg}".format(id=osdid, pg=pg))

                cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=osdid, pg=pg, file=fpath)
                proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                proc.wait()
                if proc.exitstatus != 0:
                    log.error("Exporting failed for pg {pg} on osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus))
                    EXP_ERRORS += 1

    ERRORS += EXP_ERRORS

    log.info("Test pg removal")
    RM_ERRORS = 0
    for remote in osds.remotes.iterkeys():
        for role in osds.remotes[remote]:
            if string.find(role, "osd.") != 0:
                continue
            osdid = int(role.split('.')[1])

            for pg in pgs[osdid]:
                cmd = (prefix + "--op remove --pgid {pg}").format(pg=pg, id=osdid)
                proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                proc.wait()
                if proc.exitstatus != 0:
                    log.error("Removing failed for pg {pg} on osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus))
                    RM_ERRORS += 1

    ERRORS += RM_ERRORS

    IMP_ERRORS = 0
    if EXP_ERRORS == 0 and RM_ERRORS == 0:
        log.info("Test pg import")

        for remote in osds.remotes.iterkeys():
            for role in osds.remotes[remote]:
                if string.find(role, "osd.") != 0:
                    continue
                osdid = int(role.split('.')[1])

                for pg in pgs[osdid]:
                    fpath = os.path.join(DATADIR, "osd{id}.{pg}".format(id=osdid, pg=pg))

                    cmd = (prefix + "--op import --file {file}").format(id=osdid, file=fpath)
                    proc = remote.run(args=cmd, check_status=False, stdout=StringIO())
                    proc.wait()
                    if proc.exitstatus != 0:
                        log.error("Import failed from {file} with {ret}".format(file=fpath, ret=proc.exitstatus))
                        IMP_ERRORS += 1
    else:
        log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")

    ERRORS += IMP_ERRORS

    if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
        log.info("Restarting OSDs....")
        # They are still look to be up because of setting nodown
        for osd in manager.get_osd_status()['up']:
            manager.revive_osd(osd)
        # Wait for health?
        time.sleep(5)
        # Let scrub after test runs verify consistency of all copies
        log.info("Verify replicated import data")
        objects = range(1, NUM_OBJECTS + 1)
        for i in objects:
            NAME = REP_NAME + "{num}".format(num=i)
            TESTNAME = os.path.join(DATADIR, "gettest")
            REFNAME = os.path.join(DATADIR, NAME)

            proc = rados(ctx, cli_remote, ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)

            ret = proc.wait()
            if ret != 0:
               log.errors("After import, rados get failed with {ret}".format(ret=r[0].exitstatus))
               ERRORS += 1
               continue

            cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, ref=REFNAME)
            proc = cli_remote.run(args=cmd, check_status=False)
            proc.wait()
            if proc.exitstatus != 0:
                log.error("Data comparison failed for {obj}".format(obj=NAME))
                ERRORS += 1

    if ERRORS == 0:
        log.info("TEST PASSED")
    else:
        log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))

    try:
        yield
    finally:
        log.info('Ending ceph_objectstore_tool')
Beispiel #14
0
def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints,
                                          realm, master_client):
    """
    Configure multisite regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and regions is None. '
            'Bailing')
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_multisite_regions_and_zones() and realm is None. '
            'Bailing')
        yield
        return

    log.info('Configuring multisite regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)

    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('role_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([(region_name,
                         extract_region_info(region_name, r_config))
                        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    cluster_name, daemon_type, client_id = teuthology.split_role(master_client)
    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
    (mon, ) = ctx.cluster.only(first_mon).remotes.iterkeys()

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)

    rgwadmin(ctx,
             master_client,
             cmd=['realm', 'create', '--rgw-realm', realm, '--default'],
             check_status=True)

    for region, info in region_info.iteritems():
        region_json = json.dumps(info)
        log.debug('region info is: %s', region_json)
        rgwadmin(ctx,
                 master_client,
                 cmd=['zonegroup', 'set'],
                 stdin=StringIO(region_json),
                 check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
             check_status=True)

    for role, (zonegroup, zone, zone_info,
               user_info) in role_zones.iteritems():
        (remote, ) = ctx.cluster.only(role).remotes.keys()
        for pool_info in zone_info['placement_pools']:
            remote.run(args=[
                'sudo', 'ceph', 'osd', 'pool', 'create', pool_info['val']
                ['index_pool'], '64', '64', '--cluster', cluster_name
            ])
            if ctx.rgw.ec_data_pool:
                create_ec_pool(remote, pool_info['val']['data_pool'], zone, 64,
                               ctx.rgw.erasure_code_profile, cluster_name)
            else:
                create_replicated_pool(remote, pool_info['val']['data_pool'],
                                       64, cluster_name)

    (zonegroup, zone, zone_info, user_info) = role_zones[master_client]
    zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
    log.debug("zone info is: %r", zone_json)
    rgwadmin(
        ctx,
        master_client,
        cmd=['zone', 'set', '--rgw-zonegroup', zonegroup, '--rgw-zone', zone],
        stdin=StringIO(zone_json),
        check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['zone', 'default', '--rgw-zone', zone],
             check_status=True)

    rgwadmin(ctx,
             master_client,
             cmd=['period', 'update', '--commit'],
             check_status=True)

    yield
Beispiel #15
0
def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm):
    """
    Configure regions and zones from rados and rgw.
    """
    if not regions:
        log.debug(
            'In rgw.configure_regions_and_zones() and regions is None. '
            'Bailing')
        configure_compression_in_default_zone(ctx, config)
        yield
        return

    if not realm:
        log.debug(
            'In rgw.configure_regions_and_zones() and realm is None. '
            'Bailing')
        configure_compression_in_default_zone(ctx, config)
        yield
        return

    log.info('Configuring regions and zones...')

    log.debug('config is %r', config)
    log.debug('regions are %r', regions)
    log.debug('role_endpoints = %r', role_endpoints)
    log.debug('realm is %r', realm)

    # extract the zone info
    role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
                       for client, c_config in config.iteritems()])
    log.debug('roles_zones = %r', role_zones)

    # extract the user info and append it to the payload tuple for the given
    # client
    for client, c_config in config.iteritems():
        if not c_config:
            user_info = None
        else:
            user_info = extract_user_info(c_config)

        (region, zone, zone_info) = role_zones[client]
        role_zones[client] = (region, zone, zone_info, user_info)

    region_info = dict([
        (region_name, extract_region_info(region_name, r_config))
        for region_name, r_config in regions.iteritems()])

    fill_in_endpoints(region_info, role_zones, role_endpoints)

    # clear out the old defaults
    cluster_name, daemon_type, client_id = teuthology.split_role(client)
    first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
    (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
    # removing these objects from .rgw.root and the per-zone root pools
    # may or may not matter
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'region_info.default', '--cluster', cluster_name])
    rados(ctx, mon,
          cmd=['-p', '.rgw.root', 'rm', 'zone_info.default', '--cluster', cluster_name])

    # read master zonegroup and master_zone
    for zonegroup, zg_info in region_info.iteritems():
        if zg_info['is_master']:
            master_zonegroup = zonegroup
            master_zone = zg_info['master_zone']
            break

    for client in config.iterkeys():
        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        if zonegroup == master_zonegroup and zone == master_zone:
            master_client = client
            break

    log.debug('master zonegroup =%r', master_zonegroup)
    log.debug('master zone = %r', master_zone)
    log.debug('master client = %r', master_client)
    log.debug('config %r ', config)

    (ret, out)=rgwadmin(ctx, client,
                        cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
    log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
    assert ret == 0 or ret != -errno.EEXIST
    if ret is -errno.EEXIST:
        log.debug('realm %r exists', realm)

    for client in config.iterkeys():
        for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'region_info.default', '--cluster', cluster_name])
            rados(ctx, mon,
                  cmd=['-p', zone_info['domain_root'],
                       'rm', 'zone_info.default', '--cluster', cluster_name])

            (remote,) = ctx.cluster.only(role).remotes.keys()
            for pool_info in zone_info['placement_pools']:
                remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
                                 pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name])
                if ctx.rgw.ec_data_pool:
                    create_ec_pool(remote, pool_info['val']['data_pool'],
                                   zone, 64, ctx.rgw.erasure_code_profile, cluster_name)
                else:
                    create_replicated_pool(
                        remote, pool_info['val']['data_pool'],
                        64, cluster_name)
            zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
            log.debug('zone info is: %r', zone_json)
            rgwadmin(ctx, client,
                 cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
                      '--rgw-zone', zone],
                 stdin=StringIO(zone_json),
                 check_status=True)

        for region, info in region_info.iteritems():
            region_json = json.dumps(info)
            log.debug('region info is: %s', region_json)
            rgwadmin(ctx, client,
                     cmd=['zonegroup', 'set'],
                     stdin=StringIO(region_json),
                     check_status=True)
            if info['is_master']:
                rgwadmin(ctx, client,
                         cmd=['zonegroup', 'default', '--rgw-zonegroup', master_zonegroup],
                         check_status=True)

        (zonegroup, zone, zone_info, user_info) = role_zones[client]
        rgwadmin(ctx, client,
                 cmd=['zone', 'default', '--rgw-zone', zone],
                 check_status=True)

    #this used to take master_client, need to edit that accordingly
    rgwadmin(ctx, client,
             cmd=['period', 'update', '--commit'],
             check_status=True)

    yield