def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) anode = anodes[0] cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill(self.pids[self.port2server[cnode['port']]], SIGTERM) obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') self.pids[self.port2server[cnode['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnode['port'] - 6001) / 10)]).pid sleep(2) self.assert_(not direct_client.direct_get_container(cnode, cpart, self.account, container)[1]) ps = [] for n in xrange(1, 5): ps.append(Popen(['chase-object-updater', '/etc/chase/object-server/%d.conf' % n, 'once'])) for p in ps: p.wait() objs = [o['name'] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] self.assert_(obj in objs)
def _run(self, thread): if time.time() - self.heartbeat >= 15: self.heartbeat = time.time() self._log_status('PUTS') name = uuid.uuid4().hex if self.object_sources: source = random.choice(self.files) else: source = '0' * self.object_size device = random.choice(self.devices) partition = str(random.randint(1, 3000)) container_name = random.choice(self.containers) with self.connection() as conn: try: if self.use_proxy: client.put_object(self.url, self.token, container_name, name, source, content_length=len(source), http_conn=conn) else: node = {'ip': self.ip, 'port': self.port, 'device': device} direct_client.direct_put_object(node, partition, self.account, container_name, name, source, content_length=len(source)) except client.ClientException, e: self.logger.debug(str(e)) self.failures += 1 else:
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) anode = anodes[0] cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] kill(self.pids[self.port2server[cnode['port']]], SIGTERM) obj = 'object-%s' % uuid4() client.put_object(self.url, self.token, container, obj, '') self.pids[self.port2server[cnode['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnode['port'] - 6001) / 10)]).pid sleep(2) self.assert_(not direct_client.direct_get_container( cnode, cpart, self.account, container)[1]) ps = [] for n in xrange(1, 5): ps.append( Popen([ 'chase-object-updater', '/etc/chase/object-server/%d.conf' % n, 'once' ])) for p in ps: p.wait() objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] self.assert_(obj in objs)
def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err
def _setup_data_file(self, container, obj, data): client.put_container(self.url, self.token, container) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] self.assertEquals(odata, data) opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] node_id = (onode["port"] - 6000) / 10 device = onode["device"] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf("/etc/chase/object-server/%s.conf" % node_id) devices = obj_server_conf["app:object-server"]["devices"] obj_dir = "%s/%s/objects/%s/%s/%s/" % (devices, device, opart, hash_str[-3:], hash_str) data_file = self._get_data_file_path(obj_dir) return onode, opart, data_file
def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err
def _setup_data_file(self, container, obj, data): client.put_container(self.url, self.token, container) client.put_object(self.url, self.token, container, obj, data) odata = client.get_object(self.url, self.token, container, obj)[-1] self.assertEquals(odata, data) opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] node_id = (onode['port'] - 6000) / 10 device = onode['device'] hash_str = hash_path(self.account, container, obj) obj_server_conf = readconf('/etc/chase/object-server/%s.conf' % node_id) devices = obj_server_conf['app:object-server']['devices'] obj_dir = '%s/%s/objects/%s/%s/%s/' % (devices, device, opart, hash_str[-3:], hash_str) data_file = self._get_data_file_path(obj_dir) return onode, opart, data_file
def test_main(self): container1 = 'container1' client.put_container(self.url, self.token, container1) container2 = 'container2' client.put_container(self.url, self.token, container2) headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '0') self.assertEquals(headers['x-account-bytes-used'], '0') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) self.assert_(found1) self.assert_(found2) client.put_object(self.url, self.token, container2, 'object1', '1234') headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '0') self.assertEquals(headers['x-account-bytes-used'], '0') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) self.assert_(found1) self.assert_(found2) get_to_final_state() headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) self.assert_(found1) self.assert_(found2) apart, anodes = self.account_ring.get_nodes(self.account) kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM) client.delete_container(self.url, self.token, container1) client.put_object(self.url, self.token, container2, 'object2', '12345') headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) self.assert_(not found1) self.assert_(found2) ps = [] for n in xrange(1, 5): ps.append( Popen([ 'chase-container-updater', '/etc/chase/container-server/%d.conf' % n, 'once' ])) for p in ps: p.wait() headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '2') self.assertEquals(headers['x-account-bytes-used'], '9') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 2) self.assertEquals(c['bytes'], 9) self.assert_(not found1) self.assert_(found2) self.pids[self.port2server[anodes[0]['port']]] = \ Popen(['chase-account-server', '/etc/chase/account-server/%d.conf' % ((anodes[0]['port'] - 6002) / 10)]).pid sleep(2) # This is the earlier counts and bytes because the first node doesn't # have the newest udpates yet. headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True # This is the earlier count and bytes because the first node # doesn't have the newest udpates yet. self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) # This okay because the first node hasn't got the update that # container1 was deleted yet. self.assert_(found1) self.assert_(found2) get_to_final_state() headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '2') self.assertEquals(headers['x-account-bytes-used'], '9') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 2) self.assertEquals(c['bytes'], 9) self.assert_(not found1) self.assert_(found2)
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill(self.pids[self.port2server[node['port']]], SIGTERM) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: self.pids[self.port2server[node['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((node['port'] - 6000) / 10) ]).pid sleep(2) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10) ]).pid sleep(2) exc = False try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append( Popen([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once' ])) for p in ps: p.wait() call([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once' ]) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') # Because POST has changed to a COPY by default, POSTs will succeed on all up # nodes now if at least one up node has the object. # kill(self.pids[self.port2server[onode['port']]], SIGTERM) # client.post_object(self.url, self.token, container, obj, # headers={'x-object-meta-probe': 'value'}) # oheaders = client.head_object(self.url, self.token, container, obj) # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception('Metadata incorrect, was %s' % repr(oheaders)) # exc = False # try: # direct_client.direct_get_object(another_onode, opart, self.account, # container, obj) # except Exception: # exc = True # if not exc: # raise Exception('Handoff server claimed it had the object when ' # 'it should not have it') # self.pids[self.port2server[onode['port']]] = Popen([ # 'chase-object-server', # '/etc/chase/object-server/%d.conf' % # ((onode['port'] - 6000) / 10)]).pid # sleep(2) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') == 'value': # raise Exception('Previously downed object server had the new ' # 'metadata when it should not have it') # # Run the extra server last so it'll remove it's extra partition # ps = [] # for n in onodes: # ps.append(Popen(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((n['port'] - 6000) / 10), 'once'])) # for p in ps: # p.wait() # call(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((another_onode['port'] - 6000) / 10), 'once']) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception( # 'Previously downed object server did not have the new metadata') kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.delete_object(self.url, self.token, container, obj) exc = False try: client.head_object(self.url, self.token, container, obj) except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10) ]).pid sleep(2) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append( Popen([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once' ])) for p in ps: p.wait() call([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once' ]) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object')
class TestRunningWithEachTypeDown(unittest.TestCase): def setUp(self): self.pids, self.port2server, self.account_ring, self.container_ring, \ self.object_ring, self.url, self.token, self.account = \ reset_environment() def tearDown(self): kill_pids(self.pids) def test_main(self): # TODO: This test "randomly" pass or doesn't pass; need to find out why return apart, anodes = self.account_ring.get_nodes(self.account) kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM) cpart, cnodes = \ self.container_ring.get_nodes(self.account, 'container1') kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) opart, onodes = \ self.object_ring.get_nodes(self.account, 'container1', 'object1') kill(self.pids[self.port2server[onodes[0]['port']]], SIGTERM) try: client.put_container(self.url, self.token, 'container1') except client.ClientException, err: # This might 503 if one of the up container nodes tries to update # the down account node. It'll still be saved on one node, but we # can't assure the user. pass client.put_object(self.url, self.token, 'container1', 'object1', '1234') get_to_final_state() headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True self.assertEquals(container['count'], 1) self.assertEquals(container['bytes'], 4) self.assert_(found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) self.assert_(found1) self.pids[self.port2server[anodes[0]['port']]] = \ Popen(['chase-account-server', '/etc/chase/account-server/%d.conf' % ((anodes[0]['port'] - 6002) / 10)]).pid self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid self.pids[self.port2server[onodes[0]['port']]] = \ Popen(['chase-object-server', '/etc/chase/object-server/%d.conf' % ((onodes[0]['port'] - 6000) / 10)]).pid sleep(2) headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True # The account node was previously down. self.assert_(not found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) # The first container node 404s, but the proxy will try the next node # and succeed. self.assert_(found1) get_to_final_state() headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True self.assertEquals(container['count'], 1) self.assertEquals(container['bytes'], 4) self.assert_(found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) self.assert_(found1)
def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]])
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill(self.pids[self.port2server[node['port']]], SIGTERM) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: self.pids[self.port2server[node['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((node['port'] - 6000) / 10)]).pid sleep(2) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) exc = False try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append(Popen(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') # Because POST has changed to a COPY by default, POSTs will succeed on all up # nodes now if at least one up node has the object. # kill(self.pids[self.port2server[onode['port']]], SIGTERM) # client.post_object(self.url, self.token, container, obj, # headers={'x-object-meta-probe': 'value'}) # oheaders = client.head_object(self.url, self.token, container, obj) # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception('Metadata incorrect, was %s' % repr(oheaders)) # exc = False # try: # direct_client.direct_get_object(another_onode, opart, self.account, # container, obj) # except Exception: # exc = True # if not exc: # raise Exception('Handoff server claimed it had the object when ' # 'it should not have it') # self.pids[self.port2server[onode['port']]] = Popen([ # 'chase-object-server', # '/etc/chase/object-server/%d.conf' % # ((onode['port'] - 6000) / 10)]).pid # sleep(2) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') == 'value': # raise Exception('Previously downed object server had the new ' # 'metadata when it should not have it') # # Run the extra server last so it'll remove it's extra partition # ps = [] # for n in onodes: # ps.append(Popen(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((n['port'] - 6000) / 10), 'once'])) # for p in ps: # p.wait() # call(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((another_onode['port'] - 6000) / 10), 'once']) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception( # 'Previously downed object server did not have the new metadata') kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.delete_object(self.url, self.token, container, obj) exc = False try: client.head_object(self.url, self.token, container, obj) except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append(Popen(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object')
except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in
class TestContainerFailures(unittest.TestCase): def setUp(self): self.pids, self.port2server, self.account_ring, self.container_ring, \ self.object_ring, self.url, self.token, self.account = \ reset_environment() def tearDown(self): kill_pids(self.pids) def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) def test_second_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[1]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[1]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ])
def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ])
def test_ok(self): c.http_connection = self.fake_http_connection(200) args = ('http://www.test.com', 'asdf', 'asdf', 'asdf', 'asdf') value = c.put_object(*args) self.assertTrue(isinstance(value, basestring))
except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in
def test_main(self): container1 = 'container1' client.put_container(self.url, self.token, container1) container2 = 'container2' client.put_container(self.url, self.token, container2) headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '0') self.assertEquals(headers['x-account-bytes-used'], '0') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) self.assert_(found1) self.assert_(found2) client.put_object(self.url, self.token, container2, 'object1', '1234') headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '0') self.assertEquals(headers['x-account-bytes-used'], '0') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) self.assert_(found1) self.assert_(found2) get_to_final_state() headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True self.assertEquals(c['count'], 0) self.assertEquals(c['bytes'], 0) elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) self.assert_(found1) self.assert_(found2) apart, anodes = self.account_ring.get_nodes(self.account) kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM) client.delete_container(self.url, self.token, container1) client.put_object(self.url, self.token, container2, 'object2', '12345') headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) self.assert_(not found1) self.assert_(found2) ps = [] for n in xrange(1, 5): ps.append(Popen(['chase-container-updater', '/etc/chase/container-server/%d.conf' % n, 'once'])) for p in ps: p.wait() headers, containers = client.get_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '2') self.assertEquals(headers['x-account-bytes-used'], '9') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 2) self.assertEquals(c['bytes'], 9) self.assert_(not found1) self.assert_(found2) self.pids[self.port2server[anodes[0]['port']]] = \ Popen(['chase-account-server', '/etc/chase/account-server/%d.conf' % ((anodes[0]['port'] - 6002) / 10)]).pid sleep(2) # This is the earlier counts and bytes because the first node doesn't # have the newest udpates yet. headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) self.assertEquals(headers['x-account-container-count'], '2') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True # This is the earlier count and bytes because the first node # doesn't have the newest udpates yet. self.assertEquals(c['count'], 1) self.assertEquals(c['bytes'], 4) # This okay because the first node hasn't got the update that # container1 was deleted yet. self.assert_(found1) self.assert_(found2) get_to_final_state() headers, containers = \ direct_client.direct_get_account(anodes[0], apart, self.account) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '2') self.assertEquals(headers['x-account-bytes-used'], '9') found1 = False found2 = False for c in containers: if c['name'] == container1: found1 = True elif c['name'] == container2: found2 = True self.assertEquals(c['count'], 2) self.assertEquals(c['bytes'], 9) self.assert_(not found1) self.assert_(found2)
'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name'] }) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') headers['x-timestamp'] = row['created_at'] headers['x-container-sync-key'] = sync_key put_object(sync_to, name=row['name'], headers=headers, contents=_Iter2FileLikeObject(body), proxy=self.proxy) self.container_puts += 1 except ClientException, err: if err.http_status == 401: self.logger.info( _('Unauth %(sync_from)r ' '=> %(sync_to)r'), { 'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to }) elif err.http_status == 404:
if exc: raise exc raise Exception(_('Unknown exception trying to GET: ' '%(node)r %(account)r %(container)r %(object)r'), {'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name']}) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') headers['x-timestamp'] = row['created_at'] headers['x-container-sync-key'] = sync_key put_object(sync_to, name=row['name'], headers=headers, contents=_Iter2FileLikeObject(body), proxy=self.proxy) self.container_puts += 1 except ClientException, err: if err.http_status == 401: self.logger.info(_('Unauth %(sync_from)r ' '=> %(sync_to)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to}) elif err.http_status == 404: self.logger.info(_('Not found %(sync_from)r ' '=> %(sync_to)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to}) else: