def test_url(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) client.request(connection, 'get', '/%7E~', _retries=0) server_request = httpretty.last_request() assert server_request.path == '/%257E%7E'
def set_datalog_work_bound(self, shard_num, time_to_use): (ret, out) = client.request(self.source_conn, ['replica_log', 'set', 'work_bound'], {"id": shard_num, 'type': self._type, 'marker': 'FIIK', }) if 200 != ret: print 'data list failed, returned http code: ', ret
def test_url_bad(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", status=500, ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) with py.test.raises(exc.HttpError): client.request(connection, 'get', '/%7E~', _retries=0)
def test_url_bad(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", status=500, ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) with py.test.raises(exc.HttpError): client.request(connection, 'get', '/%7E~', _retries=0)
def test_url(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) client.request(connection, 'get', '/%7E~', _retries=0) server_request = httpretty.last_request() assert server_request.path == '/%257E%7E'
def process_entries_for_data_log_shard(self, shard_num, entries): ret = 200 # we need this due to a bug in rgw that isn't auto-filling in sensible # defaults when start-time is omitted really_old_time = "2010-10-10 12:12:00" # NOTE rgw deals in UTC time. Make sure you adjust # your calls accordingly sync_start_time = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") # sync each entry / tag pair # bail on any user where a non-200 status is returned for bucket_name in entries: if self.relock_log: ret = self.acquire_log_lock(self.source_conn, \ self.local_lock_id, \ self.source_zone, shard_num) if 200 != ret: print 'error acquiring lock for shard ', shard_num, \ ' lock_id: ', self.local_lock_id, \ ' in zone ', self.source_zone, \ ' in process_entries_for_data_log_shard(). ' \ ' Returned http code ', ret # log unlocking and adding the return value to the # result queue will be handled by the calling # function return ret ret = self.sync_bucket(shard_num, bucket_name) if 200 != ret: print 'sync_bucket() failed for bucket ', bucket_name, \ ', returned http code: ', ret # if there is an error, release the log lock and bail ret = self.release_log_lock(self.source_conn, \ self.local_lock_id, \ self.source_zone, shard_num) return ret # TODO trim the log and then unlock it # trim the log for this bucket now that its objects are synced (ret, out) = client.request(self.source_conn, ['log', 'trim', 'id=' + str(shard_num)], {'id': shard_num, 'type': 'data', 'start-time': really_old_time, 'end-time': sync_start_time}) if 200 != ret: print 'data log trim for shard ', shard_num, ' returned http code ', ret # normally we would unlock and return a avlue here, # but since that's going to happen next, we effectively just fall through # into it ret = self.release_log_lock(self.source_conn, \ self.local_lock_id, \ self.source_zone, shard_num) return ret
def test_url_response(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{"msg": "ok"}', content_type="application/json", ) result = client.request(self.connection, 'get', '/%7E~') assert result == {'msg': 'ok'}
def test_url_response(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{"msg": "ok"}', content_type="application/json", ) result = client.request(self.connection, 'get', '/%7E~') assert result == {'msg': 'ok'}
def run(self): while True: # keep looping until we break shard_num = self.work_queue.get() if shard_num is None: log.debug('process {0} is done with all available shards', self.processName) break log.debug('{0} is processing shard {1}', self.processName, shard_num) # first, lock the data log ret = self.acquire_log_lock(self.source_conn, self.local_lock_id, self.source_zone, shard_num) if 200 != ret: print 'acquire_log_lock() failed, returned http code: ', ret self.result_queue.put((self.processID, shard_num, ret)) continue # get the log for this data log shard (ret, out) = client.request(self.source_conn, ['log', 'list', 'id=' + str(shard_num)], {'type': 'data', 'id': shard_num}) if 200 != ret: print 'data list for shard ', shard_num, \ ' failed, returned http code: ', ret # we hit an error getting the data to sync. # Bail and unlock the log self.release_log_lock(self.source_conn, self.local_lock_id, \ self.source_zone, shard_num) self.result_queue.put((self.processID, shard_num, ret)) continue log.debug('data list for shard {0} returned {1}', shard_num, ret) log_entry_list = out() log.debug('shard {0} has {1} entries', shard_num, len(log_entry_list)) # filter the entries so that a given entry only shows up once # to be synced buckets_to_sync = self.sort_and_filter_entries(log_entry_list) ret = self.process_entries_for_data_log_shard(shard_num, \ buckets_to_sync) self.result_queue.put((self.processID, shard_num, '200'))
def test_url_response(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{"msg": "ok"}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) result = client.request(connection, 'get', '/%7E~', _retries=0) assert result == {'msg': 'ok'}
def test_url_response(self): httpretty.register_uri( httpretty.GET, re.compile("http://localhost:8888/(.*)"), body='{"msg": "ok"}', content_type="application/json", ) connection = client.connection( client.Endpoint('localhost', 8888, False, 'key', 'secret'), True, ) result = client.request(connection, 'get', '/%7E~', _retries=0) assert result == {'msg': 'ok'}
# is_secure=False, #calling_format = boto.s3.connection.OrdinaryCallingFormat(), # calling_format = 'boto.s3.connection.OrdinaryCallingFormat' # ) bucket_name = 'docker-image-bucket' obj_name = 'test/images/34e94e67e63a0f079d9336b3c2a52e814d138e5b3f1f614a0cfe273814ed7c0a/json' src_zone = 'cn-sh' client_id = 'radosgw-agent' op_id = 'cn-sh-radosgw-test1' #client.sync_object_intra_region(conn, bucket_name, obj_name, src_zone, client_id, op_id) path = u'{bucket}/{object}'.format( bucket=bucket_name, object=obj_name, ) params = { 'rgwx-source-zone': src_zone, 'rgwx-client-id': client_id, 'rgwx-op-id': op_id, } client.request(conn, 'put', path, params=params, headers={ 'x-amz-copy-source': url_safe('%s/%s' % (bucket_name, obj_name)), }, expect_json=False)
def sync_bucket(self, shard_num, bucket_name): ret = 200 # There is not an explicit bucket-index log lock. This is coverred # by the lock on the datalog for this shard just_the_bucket = bucket_name.split(':')[0] print 'just the bucket: ', just_the_bucket # get the bilog for this bucket (ret, out) = client.request(self.source_conn, ['log', 'list', 'type=bucket-index'], #{"bucket":bucket_name, 'marker':dummy_marker }) {"bucket":bucket_name, 'bucket-instance':bucket_name}) #{"rgwx-bucket-instance":bucket_name}) #{"bucket":just_the_bucket}) if 200 != ret: print 'get bucket-index for bucket ', bucket_name, \ ' failed, returned http code: ', ret return ret bucket_events = out() print 'bilog for bucket ', bucket_name, ' has ', \ len(bucket_events), ' entries' # first, make sure the events are sorted in index_ver order sorted_events = sorted(bucket_events, key=lambda entry: entry['index_ver']) #reverse=True) for event in sorted_events: #make sure we still have the lock if self.relock_log: ret = self.acquire_log_lock(self.source_conn, \ self.local_lock_id, \ self.source_zone, shard_num) if 200 != ret: print 'error acquiring lock for shard ', shard_num, \ ' lock_id: ', self.local_lock_id, \ ' in zone ', self.source_zone, \ ' in process_entries_for_data_log_shard(). ' \ ' Returned http code ', ret # log unlocking and adding the return value to the result queue # will be handled by the calling function return ret if event['state'] == 'complete': print ' applying: ', event if event['op'] == 'write': print 'copying object ', bucket_name + '/' + event['object'] # sync this operation from source to destination # issue this against the destination rgw, since the # operation is implemented as a 'pull' of the object # # TODO put real values in for rgwx-client-id and rgwx-op-od (ret, out) = client.request(self.dest_conn, ['object', 'add', bucket_name + '/' + event['object']], #{"bucket":bucket_name, 'marker':dummy_marker }) {"rgwx-source-zone":self.source_zone, "rgwx-client-id":'joe bucks awesome client', "rgwx-op-od":"42"}) elif event['op'] == 'del': print 'deleting object ', bucket_name + '/' + event['object'] # delete this object from the destination (ret, out) = client.request(self.dest_conn, ['object', 'rm', bucket_name + '/' + event['object']], ) #{"bucket":bucket_name, 'marker':dummy_marker }) #{"rgwx-source-zone":source_zone}) else: print 'no idea what op this is: ', event['op'] ret = 500 if ret < 200 or ret > 299: # all 200 - 299 codes are success print 'sync of object ', event['object'], \ ' failed, returned http code: ', ret, \ '. Bailing' return ret return ret
# aws_secret_access_key = secret_key, # host = 'cn-sz-radosgw-test1', # is_secure=False, #calling_format = boto.s3.connection.OrdinaryCallingFormat(), # calling_format = 'boto.s3.connection.OrdinaryCallingFormat' # ) bucket_name = 'docker-image-bucket' obj_name='test/images/34e94e67e63a0f079d9336b3c2a52e814d138e5b3f1f614a0cfe273814ed7c0a/json' src_zone='cn-sh' client_id='radosgw-agent' op_id='cn-sh-radosgw-test1' #client.sync_object_intra_region(conn, bucket_name, obj_name, src_zone, client_id, op_id) path = u'{bucket}/{object}'.format( bucket=bucket_name, object=obj_name, ) params = { 'rgwx-source-zone': src_zone, 'rgwx-client-id': client_id, 'rgwx-op-id': op_id, } client.request(conn, 'put', path, params=params, headers={ 'x-amz-copy-source': url_safe('%s/%s' % (bucket_name, obj_name)), }, expect_json=False)