Ejemplo n.º 1
0
def _test_configure_endpoints(dest_url, dest_region, dest_zone,
                              expected_src_url, expected_src_region,
                              expected_src_zone, specified_src_url=None,
                              meta_only=False):
    dest = client.parse_endpoint(dest_url)
    if specified_src_url is not None:
        src = client.parse_endpoint(specified_src_url)
    else:
        src = client.Endpoint(None, None, None)
    region_map = client.RegionMap(REGION_MAP)
    client.configure_endpoints(region_map, dest, src, meta_only)
    assert dest.region.name == dest_region
    assert dest.zone.name == dest_zone
    assert src == client.parse_endpoint(expected_src_url)
    assert src.region.name == expected_src_region
    assert src.zone.name == expected_src_zone
Ejemplo n.º 2
0
def _test_configure_endpoints(dest_url, dest_region, dest_zone,
                              expected_src_url, expected_src_region,
                              expected_src_zone, specified_src_url=None,
                              meta_only=False):
    dest = client.parse_endpoint(dest_url)
    if specified_src_url is not None:
        src = client.parse_endpoint(specified_src_url)
    else:
        src = client.Endpoint(None, None, None)
    region_map = client.RegionMap(REGION_MAP)
    client.configure_endpoints(region_map, dest, src, meta_only)
    assert dest.region.name == dest_region
    assert dest.zone.name == dest_zone
    assert src == client.parse_endpoint(expected_src_url)
    assert src.region.name == expected_src_region
    assert src.zone.name == expected_src_zone
Ejemplo n.º 3
0
def main():
    # root (a.k.a. 'parent') and agent loggers
    root_logger = logging.getLogger()

    # allow all levels at root_logger, handlers control individual levels
    root_logger.setLevel(logging.DEBUG)

    # Console handler, meant only for user-facing information
    console_loglevel = logging.INFO

    sh = logging.StreamHandler()
    sh.setFormatter(util.log.color_format())
    # this console level set here before reading options from the arguments
    # so that we can get errors if they pop up before
    sh.setLevel(console_loglevel)

    agent_logger = logging.getLogger('radosgw_agent')
    agent_logger.addHandler(sh)

    # After initial logging is configured, now parse args
    args = parse_args()

    # File handler
    log_file = args.log_file or 'radosgw-agent.log'
    try:
        fh = logging.handlers.WatchedFileHandler(log_file)
    except IOError as err:
        agent_logger.warning('unable to use log location: %s' % log_file)
        agent_logger.warning(err)
        agent_logger.warning('will fallback to ./radosgw-agent.log')
        # if the location is not present, fallback to cwd
        fh = logging.handlers.WatchedFileHandler('radosgw-agent.log')

    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter(util.log.BASE_FORMAT))

    root_logger.addHandler(fh)

    if args.verbose:
        console_loglevel = logging.DEBUG
    elif args.quiet:
        console_loglevel = logging.WARN

    # now that we have parsed the actual log level we need
    # reset it in the handler
    sh.setLevel(console_loglevel)

    # after loggin is set ensure that the arguments are present in the
    # config object
    set_args_to_config(args)

    log_header()
    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except AgentError:
        # anything that we know about and are correctly raising should
        # just get raised so that the decorator can handle it
        raise
    except Exception as error:
        # otherwise, we have the below exception that will nicely deal with
        # explaining what happened
        raise RegionMapError(error)

    client.configure_endpoints(region_map, dest, src, args.metadata_only)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if config['args']['versioned']:
        log.debug('versioned flag enabled, overriding versioning check')
        config['use_versioning'] = True
    else:
        config['use_versioning'] = check_versioning(src)

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src,
                           dest,
                           args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer, args.num_workers,
                              args.lock_timeout, args.incremental_sync_delay,
                              args.metadata_only, args.prepare_error_delay)
Ejemplo n.º 4
0
def main():
    args = parse_args()
    log = logging.getLogger()
    log_level = logging.INFO
    lib_log_level = logging.WARN
    if args.verbose:
        log_level = logging.DEBUG
        lib_log_level = logging.DEBUG
    elif args.quiet:
        log_level = logging.WARN
    logging.basicConfig(level=log_level)
    logging.getLogger('boto').setLevel(lib_log_level)
    logging.getLogger('requests').setLevel(lib_log_level)

    if args.log_file is not None:
        handler = logging.handlers.WatchedFileHandler(
            filename=args.log_file,
            )
        formatter = logging.Formatter(
            fmt='%(asctime)s.%(msecs)03d %(process)d:%(levelname)s:%(name)s:%(message)s',
            datefmt='%Y-%m-%dT%H:%M:%S',
            )
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)

    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except Exception:
        log.exception('Could not retrieve region map from destination')
        sys.exit(1)

    try:
        client.configure_endpoints(region_map, dest, src, args.metadata_only)
    except client.ClientException as e:
        log.error(e)
        sys.exit(1)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src, dest, args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer,
                              args.num_workers,
                              args.lock_timeout,
                              args.incremental_sync_delay,
                              args.metadata_only,
                              args.prepare_error_delay)
Ejemplo n.º 5
0
def main():
    # root (a.k.a. 'parent') and agent loggers
    root_logger = logging.getLogger()

    # allow all levels at root_logger, handlers control individual levels
    root_logger.setLevel(logging.DEBUG)

    # Console handler, meant only for user-facing information
    console_loglevel = logging.INFO

    sh = logging.StreamHandler()
    sh.setFormatter(util.log.color_format())
    # this console level set here before reading options from the arguments
    # so that we can get errors if they pop up before
    sh.setLevel(console_loglevel)

    agent_logger = logging.getLogger('radosgw_agent')
    agent_logger.addHandler(sh)

    # After initial logging is configured, now parse args
    args = parse_args()

    # File handler
    log_file = args.log_file or 'radosgw-agent.log'
    try:
        fh = logging.handlers.WatchedFileHandler(log_file)
    except IOError as err:
        agent_logger.warning('unable to use log location: %s' % log_file)
        agent_logger.warning(err)
        agent_logger.warning('will fallback to ./radosgw-agent.log')
        # if the location is not present, fallback to cwd
        fh = logging.handlers.WatchedFileHandler('radosgw-agent.log')

    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter(util.log.BASE_FORMAT))

    root_logger.addHandler(fh)

    if args.verbose:
        console_loglevel = logging.DEBUG
    elif args.quiet:
        console_loglevel = logging.WARN

    # now that we have parsed the actual log level we need
    # reset it in the handler
    sh.setLevel(console_loglevel)

    # after loggin is set ensure that the arguments are present in the
    # config object
    set_args_to_config(args)

    log_header()
    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except AgentError:
        # anything that we know about and are correctly raising should
        # just get raised so that the decorator can handle it
        raise
    except Exception as error:
        # otherwise, we have the below exception that will nicely deal with
        # explaining what happened
        raise RegionMapError(error)

    client.configure_endpoints(region_map, dest, src, args.metadata_only)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if config['args']['versioned']:
        log.debug('versioned flag enabled, overriding versioning check')
        config['use_versioning'] = True
    else:
        config['use_versioning'] = check_versioning(src)

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src, dest, args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer,
                              args.num_workers,
                              args.lock_timeout,
                              args.incremental_sync_delay,
                              args.metadata_only,
                              args.prepare_error_delay)