コード例 #1
0
ファイル: sync.py プロジェクト: kri5/radosgw-agent
 def __init__(self, type_, src, dest, daemon_id):
     self._type = type_
     self.src = src
     self.dest = dest
     self.src_conn = client.connection(src)
     self.dest_conn = client.connection(dest)
     self.daemon_id = daemon_id
コード例 #2
0
 def __init__(self, src, dest, max_entries, *args, **kwargs):
     self.src = src
     self.dest = dest
     self.src_conn = client.connection(src)
     self.dest_conn = client.connection(dest)
     self.daemon_id = DAEMON_ID
     self.worker_cls = None  # filled in by subclass constructor
     self.num_shards = None
     self.max_entries = max_entries
     self.object_sync_timeout = kwargs.get('object_sync_timeout')
コード例 #3
0
ファイル: sync.py プロジェクト: Ved-vampir/radosgw-agent
 def __init__(self, src, dest, max_entries, *args, **kwargs):
     self.src = src
     self.dest = dest
     self.src_conn = client.connection(src)
     self.dest_conn = client.connection(dest)
     self.daemon_id = DAEMON_ID
     self.worker_cls = None # filled in by subclass constructor
     self.num_shards = None
     self.max_entries = max_entries
     self.object_sync_timeout = kwargs.get('object_sync_timeout')
コード例 #4
0
ファイル: worker.py プロジェクト: benagricola/radosgw-agent
    def __init__(self, work_queue, result_queue, log_lock_time, src, dest, **kwargs):
        super(Worker, self).__init__()
        self.src = src
        self.dest = dest
        self.work_queue = work_queue
        self.result_queue = result_queue
        self.log_lock_time = log_lock_time
        self.lock = None

        self.local_lock_id = socket.gethostname() + ":" + str(os.getpid())

        # construct the two connection objects
        self.src_conn = client.connection(src)
        self.dest_conn = client.connection(dest)
コード例 #5
0
ファイル: worker.py プロジェクト: toabctl/radosgw-agent
    def __init__(self, work_queue, result_queue, log_lock_time,
                 src, dest, **kwargs):
        super(Worker, self).__init__()
        self.src = src
        self.dest = dest
        self.work_queue = work_queue
        self.result_queue = result_queue
        self.log_lock_time = log_lock_time
        self.lock = None

        self.local_lock_id = socket.gethostname() + ':' + str(os.getpid())

        # construct the two connection objects
        self.src_conn = client.connection(src)
        self.dest_conn = client.connection(dest)
コード例 #6
0
ファイル: test_client.py プロジェクト: scibian/radosgw-agent
    def test_url_response(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{"msg": "ok"}',
            content_type="application/json",
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        result = client.request(connection, 'get', '/%7E~', _retries=0)
        assert result == {'msg': 'ok'}
コード例 #7
0
    def test_url_response(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{"msg": "ok"}',
            content_type="application/json",
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        result = client.request(connection, 'get', '/%7E~', _retries=0)
        assert result == {'msg': 'ok'}
コード例 #8
0
    def test_url_bad(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{}',
            content_type="application/json",
            status=500,
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        with py.test.raises(exc.HttpError):
            client.request(connection, 'get', '/%7E~', _retries=0)
コード例 #9
0
    def test_url(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{}',
            content_type="application/json",
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        client.request(connection, 'get', '/%7E~', _retries=0)
        server_request = httpretty.last_request()
        assert server_request.path == '/%257E%7E'
コード例 #10
0
ファイル: test_client.py プロジェクト: scibian/radosgw-agent
    def test_url_bad(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{}',
            content_type="application/json",
            status=500,
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        with py.test.raises(exc.HttpError):
            client.request(connection, 'get', '/%7E~', _retries=0)
コード例 #11
0
ファイル: test_client.py プロジェクト: scibian/radosgw-agent
    def test_url(self):

        httpretty.register_uri(
            httpretty.GET,
            re.compile("http://localhost:8888/(.*)"),
            body='{}',
            content_type="application/json",
        )
        connection = client.connection(
            client.Endpoint('localhost', 8888, False, 'key', 'secret'),
            True,
        )

        client.request(connection, 'get', '/%7E~', _retries=0)
        server_request = httpretty.last_request()
        assert server_request.path == '/%257E%7E'
コード例 #12
0
ファイル: cli.py プロジェクト: hnuzhoulin/radosgw-agent
def main():
    args = parse_args()
    log = logging.getLogger()
    log_level = logging.INFO
    lib_log_level = logging.WARN
    if args.verbose:
        log_level = logging.DEBUG
        lib_log_level = logging.DEBUG
    elif args.quiet:
        log_level = logging.WARN
    logging.basicConfig(level=log_level)
    logging.getLogger('boto').setLevel(lib_log_level)
    logging.getLogger('requests').setLevel(lib_log_level)

    if args.log_file is not None:
        handler = logging.handlers.WatchedFileHandler(
            filename=args.log_file,
            )
        formatter = logging.Formatter(
            fmt='%(asctime)s.%(msecs)03d %(process)d:%(levelname)s:%(name)s:%(message)s',
            datefmt='%Y-%m-%dT%H:%M:%S',
            )
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)

    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except Exception:
        log.exception('Could not retrieve region map from destination')
        sys.exit(1)

    try:
        client.configure_endpoints(region_map, dest, src, args.metadata_only)
    except client.ClientException as e:
        log.error(e)
        sys.exit(1)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src, dest, args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer,
                              args.num_workers,
                              args.lock_timeout,
                              args.incremental_sync_delay,
                              args.metadata_only,
                              args.prepare_error_delay)
コード例 #13
0
ファイル: cli.py プロジェクト: Ved-vampir/radosgw-agent
def main():
    # root (a.k.a. 'parent') and agent loggers
    root_logger = logging.getLogger()

    # allow all levels at root_logger, handlers control individual levels
    root_logger.setLevel(logging.DEBUG)

    # Console handler, meant only for user-facing information
    console_loglevel = logging.INFO

    sh = logging.StreamHandler()
    sh.setFormatter(util.log.color_format())
    # this console level set here before reading options from the arguments
    # so that we can get errors if they pop up before
    sh.setLevel(console_loglevel)

    agent_logger = logging.getLogger('radosgw_agent')
    agent_logger.addHandler(sh)

    # After initial logging is configured, now parse args
    args = parse_args()

    # File handler
    log_file = args.log_file or 'radosgw-agent.log'
    try:
        fh = logging.handlers.WatchedFileHandler(log_file)
    except IOError as err:
        agent_logger.warning('unable to use log location: %s' % log_file)
        agent_logger.warning(err)
        agent_logger.warning('will fallback to ./radosgw-agent.log')
        # if the location is not present, fallback to cwd
        fh = logging.handlers.WatchedFileHandler('radosgw-agent.log')

    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter(util.log.BASE_FORMAT))

    root_logger.addHandler(fh)

    if args.verbose:
        console_loglevel = logging.DEBUG
    elif args.quiet:
        console_loglevel = logging.WARN

    # now that we have parsed the actual log level we need
    # reset it in the handler
    sh.setLevel(console_loglevel)

    # after loggin is set ensure that the arguments are present in the
    # config object
    set_args_to_config(args)

    log_header()
    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except AgentError:
        # anything that we know about and are correctly raising should
        # just get raised so that the decorator can handle it
        raise
    except Exception as error:
        # otherwise, we have the below exception that will nicely deal with
        # explaining what happened
        raise RegionMapError(error)

    client.configure_endpoints(region_map, dest, src, args.metadata_only)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if config['args']['versioned']:
        log.debug('versioned flag enabled, overriding versioning check')
        config['use_versioning'] = True
    else:
        config['use_versioning'] = check_versioning(src)

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src, dest, args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer,
                              args.num_workers,
                              args.lock_timeout,
                              args.incremental_sync_delay,
                              args.metadata_only,
                              args.prepare_error_delay)
コード例 #14
0
#sh
#access_key = '9N6JSFR97Z2LL3F8KZAT'
#secret_key = 'ncLzrQRFY9qG0TLMglv1UwjpBl+DJohpdJ0ZQL8G'

def url_safe(component):
    if isinstance(component, basestring):
        string = component.encode('utf8')
    else:
        string = str(component)
    return urllib.quote(string)

logging.basicConfig(filename="boto.log", level=logging.DEBUG)
host='cn-sz-radosgw-test1'

dest=client.Endpoint(host, 80, False, access_key, secret_key, 'cn', 'cn-sz')
conn=client.connection(dest)
#conn = boto.s3.connection.S3Connection(
#        aws_access_key_id = access_key,
#        aws_secret_access_key = secret_key,
#        host = 'cn-sz-radosgw-test1',
#        is_secure=False,
        #calling_format = boto.s3.connection.OrdinaryCallingFormat(),
#	calling_format = 'boto.s3.connection.OrdinaryCallingFormat'
#        )
bucket_name = 'docker-image-bucket'
obj_name='test/images/34e94e67e63a0f079d9336b3c2a52e814d138e5b3f1f614a0cfe273814ed7c0a/json'
src_zone='cn-sh'
client_id='radosgw-agent'
op_id='cn-sh-radosgw-test1'
#client.sync_object_intra_region(conn, bucket_name, obj_name, src_zone, client_id, op_id)
path = u'{bucket}/{object}'.format(
コード例 #15
0
ファイル: test_client.py プロジェクト: scibian/radosgw-agent
 def setup(self):
     self.connection = client.connection(
         client.Endpoint('localhost', 8888, False, 'key', 'secret'),
         True,
     )
     self.body = """
コード例 #16
0
#secret_key = 'ncLzrQRFY9qG0TLMglv1UwjpBl+DJohpdJ0ZQL8G'


def url_safe(component):
    if isinstance(component, basestring):
        string = component.encode('utf8')
    else:
        string = str(component)
    return urllib.quote(string)


logging.basicConfig(filename="boto.log", level=logging.DEBUG)
host = 'cn-sz-radosgw-test1'

dest = client.Endpoint(host, 80, False, access_key, secret_key, 'cn', 'cn-sz')
conn = client.connection(dest)
#conn = boto.s3.connection.S3Connection(
#        aws_access_key_id = access_key,
#        aws_secret_access_key = secret_key,
#        host = 'cn-sz-radosgw-test1',
#        is_secure=False,
#calling_format = boto.s3.connection.OrdinaryCallingFormat(),
#	calling_format = 'boto.s3.connection.OrdinaryCallingFormat'
#        )
bucket_name = 'docker-image-bucket'
obj_name = 'test/images/34e94e67e63a0f079d9336b3c2a52e814d138e5b3f1f614a0cfe273814ed7c0a/json'
src_zone = 'cn-sh'
client_id = 'radosgw-agent'
op_id = 'cn-sh-radosgw-test1'
#client.sync_object_intra_region(conn, bucket_name, obj_name, src_zone, client_id, op_id)
path = u'{bucket}/{object}'.format(
コード例 #17
0
def main():
    # root (a.k.a. 'parent') and agent loggers
    root_logger = logging.getLogger()

    # allow all levels at root_logger, handlers control individual levels
    root_logger.setLevel(logging.DEBUG)

    # Console handler, meant only for user-facing information
    console_loglevel = logging.INFO

    sh = logging.StreamHandler()
    sh.setFormatter(util.log.color_format())
    # this console level set here before reading options from the arguments
    # so that we can get errors if they pop up before
    sh.setLevel(console_loglevel)

    agent_logger = logging.getLogger('radosgw_agent')
    agent_logger.addHandler(sh)

    # After initial logging is configured, now parse args
    args = parse_args()

    # File handler
    log_file = args.log_file or 'radosgw-agent.log'
    try:
        fh = logging.handlers.WatchedFileHandler(log_file)
    except IOError as err:
        agent_logger.warning('unable to use log location: %s' % log_file)
        agent_logger.warning(err)
        agent_logger.warning('will fallback to ./radosgw-agent.log')
        # if the location is not present, fallback to cwd
        fh = logging.handlers.WatchedFileHandler('radosgw-agent.log')

    fh.setLevel(logging.DEBUG)
    fh.setFormatter(logging.Formatter(util.log.BASE_FORMAT))

    root_logger.addHandler(fh)

    if args.verbose:
        console_loglevel = logging.DEBUG
    elif args.quiet:
        console_loglevel = logging.WARN

    # now that we have parsed the actual log level we need
    # reset it in the handler
    sh.setLevel(console_loglevel)

    # after loggin is set ensure that the arguments are present in the
    # config object
    set_args_to_config(args)

    log_header()
    dest = args.destination
    dest.access_key = args.dest_access_key
    dest.secret_key = args.dest_secret_key
    src = args.source or client.Endpoint(None, None, None)
    if args.src_zone:
        src.zone = args.src_zone
    dest_conn = client.connection(dest)

    try:
        region_map = client.get_region_map(dest_conn)
    except AgentError:
        # anything that we know about and are correctly raising should
        # just get raised so that the decorator can handle it
        raise
    except Exception as error:
        # otherwise, we have the below exception that will nicely deal with
        # explaining what happened
        raise RegionMapError(error)

    client.configure_endpoints(region_map, dest, src, args.metadata_only)

    src.access_key = args.src_access_key
    src.secret_key = args.src_secret_key

    if config['args']['versioned']:
        log.debug('versioned flag enabled, overriding versioning check')
        config['use_versioning'] = True
    else:
        config['use_versioning'] = check_versioning(src)

    if args.test_server_host:
        log.warn('TEST MODE - do not run unless you are testing this program')
        TestHandler.src = src
        TestHandler.dest = dest
        TestHandler.num_workers = args.num_workers
        TestHandler.lock_timeout = args.lock_timeout
        TestHandler.max_entries = args.max_entries
        TestHandler.rgw_data_log_window = args.rgw_data_log_window
        TestHandler.object_sync_timeout = args.object_sync_timeout
        server = HTTPServer((args.test_server_host, args.test_server_port),
                            TestHandler)
        server.serve_forever()
        sys.exit()

    if args.sync_scope == 'full':
        meta_cls = sync.MetaSyncerFull
        data_cls = sync.DataSyncerFull
    else:
        meta_cls = sync.MetaSyncerInc
        data_cls = sync.DataSyncerInc

    meta_syncer = meta_cls(src, dest, args.max_entries)
    data_syncer = data_cls(src,
                           dest,
                           args.max_entries,
                           rgw_data_log_window=args.rgw_data_log_window,
                           object_sync_timeout=args.object_sync_timeout)

    # fetch logs first since data logs need to wait before becoming usable
    # due to rgw's window of data log updates during which the bucket index
    # log may still be updated without the data log getting a new entry for
    # the bucket
    sync.prepare_sync(meta_syncer, args.prepare_error_delay)
    if not args.metadata_only:
        sync.prepare_sync(data_syncer, args.prepare_error_delay)

    if args.sync_scope == 'full':
        log.info('syncing all metadata')
        meta_syncer.sync(args.num_workers, args.lock_timeout)
        if not args.metadata_only:
            log.info('syncing all data')
            data_syncer.sync(args.num_workers, args.lock_timeout)
        log.info('Finished full sync. Check logs to see any issues that '
                 'incremental sync will retry.')
    else:
        sync.incremental_sync(meta_syncer, data_syncer, args.num_workers,
                              args.lock_timeout, args.incremental_sync_delay,
                              args.metadata_only, args.prepare_error_delay)
コード例 #18
0
 def setup(self):
     self.connection = client.connection(
         client.Endpoint('localhost', 8888, False, 'key', 'secret'),
         True,
     )
     self.body = """