def test_endpoint_equality(): default_port = client.Endpoint('a.org', None, True) secure = client.Endpoint('a.org', 443, True) insecure = client.Endpoint('a.org', 80, False) assert default_port == secure assert secure == insecure assert insecure == default_port
def test_endpoint_inequality(): base = client.Endpoint('a.org', 80, True) diff_host = client.Endpoint('b.org', 80, True) diff_port = client.Endpoint('a.org', 81, True) insecure = client.Endpoint('a.org', 8080, False) assert base != diff_host assert base != diff_port assert base != insecure
def test_url_response(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{"msg": "ok"}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) result = client.request(connection, 'get', '/%7E~', _retries=0) assert result == {'msg': 'ok'}
def test_url_bad(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", status=500, ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) with py.test.raises(exc.HttpError): client.request(connection, 'get', '/%7E~', _retries=0)
def test_url(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) client.request(connection, 'get', '/%7E~', _retries=0) server_request = httpretty.last_request() assert server_request.path == '/%257E%7E'
def _test_configure_endpoints(dest_url, dest_region, dest_zone, expected_src_url, expected_src_region, expected_src_zone, specified_src_url=None, meta_only=False): dest = client.parse_endpoint(dest_url) if specified_src_url is not None: src = client.parse_endpoint(specified_src_url) else: src = client.Endpoint(None, None, None) region_map = client.RegionMap(REGION_MAP) client.configure_endpoints(region_map, dest, src, meta_only) assert dest.region.name == dest_region assert dest.zone.name == dest_zone assert src == client.parse_endpoint(expected_src_url) assert src.region.name == expected_src_region assert src.zone.name == expected_src_zone
def main(): # root (a.k.a. 'parent') and agent loggers root_logger = logging.getLogger() # allow all levels at root_logger, handlers control individual levels root_logger.setLevel(logging.DEBUG) # Console handler, meant only for user-facing information console_loglevel = logging.INFO sh = logging.StreamHandler() sh.setFormatter(util.log.color_format()) # this console level set here before reading options from the arguments # so that we can get errors if they pop up before sh.setLevel(console_loglevel) agent_logger = logging.getLogger('radosgw_agent') agent_logger.addHandler(sh) # After initial logging is configured, now parse args args = parse_args() # File handler log_file = args.log_file or 'radosgw-agent.log' try: fh = logging.handlers.WatchedFileHandler(log_file) except IOError as err: agent_logger.warning('unable to use log location: %s' % log_file) agent_logger.warning(err) agent_logger.warning('will fallback to ./radosgw-agent.log') # if the location is not present, fallback to cwd fh = logging.handlers.WatchedFileHandler('radosgw-agent.log') fh.setLevel(logging.DEBUG) fh.setFormatter(logging.Formatter(util.log.BASE_FORMAT)) root_logger.addHandler(fh) if args.verbose: console_loglevel = logging.DEBUG elif args.quiet: console_loglevel = logging.WARN # now that we have parsed the actual log level we need # reset it in the handler sh.setLevel(console_loglevel) # after loggin is set ensure that the arguments are present in the # config object set_args_to_config(args) log_header() dest = args.destination dest.access_key = args.dest_access_key dest.secret_key = args.dest_secret_key src = args.source or client.Endpoint(None, None, None) if args.src_zone: src.zone = args.src_zone dest_conn = client.connection(dest) try: region_map = client.get_region_map(dest_conn) except AgentError: # anything that we know about and are correctly raising should # just get raised so that the decorator can handle it raise except Exception as error: # otherwise, we have the below exception that will nicely deal with # explaining what happened raise RegionMapError(error) client.configure_endpoints(region_map, dest, src, args.metadata_only) src.access_key = args.src_access_key src.secret_key = args.src_secret_key if config['args']['versioned']: log.debug('versioned flag enabled, overriding versioning check') config['use_versioning'] = True else: config['use_versioning'] = check_versioning(src) if args.test_server_host: log.warn('TEST MODE - do not run unless you are testing this program') TestHandler.src = src TestHandler.dest = dest TestHandler.num_workers = args.num_workers TestHandler.lock_timeout = args.lock_timeout TestHandler.max_entries = args.max_entries TestHandler.rgw_data_log_window = args.rgw_data_log_window TestHandler.object_sync_timeout = args.object_sync_timeout server = HTTPServer((args.test_server_host, args.test_server_port), TestHandler) server.serve_forever() sys.exit() if args.sync_scope == 'full': meta_cls = sync.MetaSyncerFull data_cls = sync.DataSyncerFull else: meta_cls = sync.MetaSyncerInc data_cls = sync.DataSyncerInc meta_syncer = meta_cls(src, dest, args.max_entries) data_syncer = data_cls(src, dest, args.max_entries, rgw_data_log_window=args.rgw_data_log_window, object_sync_timeout=args.object_sync_timeout) # fetch logs first since data logs need to wait before becoming usable # due to rgw's window of data log updates during which the bucket index # log may still be updated without the data log getting a new entry for # the bucket sync.prepare_sync(meta_syncer, args.prepare_error_delay) if not args.metadata_only: sync.prepare_sync(data_syncer, args.prepare_error_delay) if args.sync_scope == 'full': log.info('syncing all metadata') meta_syncer.sync(args.num_workers, args.lock_timeout) if not args.metadata_only: log.info('syncing all data') data_syncer.sync(args.num_workers, args.lock_timeout) log.info('Finished full sync. Check logs to see any issues that ' 'incremental sync will retry.') else: sync.incremental_sync(meta_syncer, data_syncer, args.num_workers, args.lock_timeout, args.incremental_sync_delay, args.metadata_only, args.prepare_error_delay)
#access_key = '9N6JSFR97Z2LL3F8KZAT' #secret_key = 'ncLzrQRFY9qG0TLMglv1UwjpBl+DJohpdJ0ZQL8G' def url_safe(component): if isinstance(component, basestring): string = component.encode('utf8') else: string = str(component) return urllib.quote(string) logging.basicConfig(filename="boto.log", level=logging.DEBUG) host = 'cn-sz-radosgw-test1' dest = client.Endpoint(host, 80, False, access_key, secret_key, 'cn', 'cn-sz') conn = client.connection(dest) #conn = boto.s3.connection.S3Connection( # aws_access_key_id = access_key, # aws_secret_access_key = secret_key, # host = 'cn-sz-radosgw-test1', # is_secure=False, #calling_format = boto.s3.connection.OrdinaryCallingFormat(), # calling_format = 'boto.s3.connection.OrdinaryCallingFormat' # ) bucket_name = 'docker-image-bucket' obj_name = 'test/images/34e94e67e63a0f079d9336b3c2a52e814d138e5b3f1f614a0cfe273814ed7c0a/json' src_zone = 'cn-sh' client_id = 'radosgw-agent' op_id = 'cn-sh-radosgw-test1' #client.sync_object_intra_region(conn, bucket_name, obj_name, src_zone, client_id, op_id)
def setup(self): self.connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) self.body = """
def test_endpoint_port_specified(): endpoint = client.Endpoint('example.org', 80, True) assert endpoint.port == 80 endpoint = client.Endpoint('example.org', 443, True) assert endpoint.port == 443
def test_endpoint_default_port(): endpoint = client.Endpoint('example.org', None, True) assert endpoint.port == 443 endpoint = client.Endpoint('example.org', None, False) assert endpoint.port == 80