def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err
def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err
def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]])
# server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [o['name'] for o in direct_client.direct_get_container(cnodes[0], cpart, self.account, container)[1]]) # And, of course, our new object2 exists. self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_(container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) self.assert_(object2 in [o['name'] for o in client.get_container(self.url, self.token, container)[1]]) def _get_db_file_path(self, obj_dir): files = sorted(os.listdir(obj_dir), reverse=True)
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes( self.account, container, obj) onode = onodes[0] kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill(self.pids[self.port2server[node['port']]], SIGTERM) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: self.pids[self.port2server[node['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((node['port'] - 6000) / 10)]).pid sleep(2) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container(cnode, cpart, self.account, container)[1]] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) exc = False try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append(Popen(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') # Because POST has changed to a COPY by default, POSTs will succeed on all up # nodes now if at least one up node has the object. # kill(self.pids[self.port2server[onode['port']]], SIGTERM) # client.post_object(self.url, self.token, container, obj, # headers={'x-object-meta-probe': 'value'}) # oheaders = client.head_object(self.url, self.token, container, obj) # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception('Metadata incorrect, was %s' % repr(oheaders)) # exc = False # try: # direct_client.direct_get_object(another_onode, opart, self.account, # container, obj) # except Exception: # exc = True # if not exc: # raise Exception('Handoff server claimed it had the object when ' # 'it should not have it') # self.pids[self.port2server[onode['port']]] = Popen([ # 'chase-object-server', # '/etc/chase/object-server/%d.conf' % # ((onode['port'] - 6000) / 10)]).pid # sleep(2) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') == 'value': # raise Exception('Previously downed object server had the new ' # 'metadata when it should not have it') # # Run the extra server last so it'll remove it's extra partition # ps = [] # for n in onodes: # ps.append(Popen(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((n['port'] - 6000) / 10), 'once'])) # for p in ps: # p.wait() # call(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((another_onode['port'] - 6000) / 10), 'once']) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception( # 'Previously downed object server did not have the new metadata') kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.delete_object(self.url, self.token, container, obj) exc = False try: client.head_object(self.url, self.token, container, obj) except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') objs = [o['name'] for o in client.get_container(self.url, self.token, container)[1]] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1]] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)]).pid sleep(2) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append(Popen(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once'])) for p in ps: p.wait() call(['chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once']) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object')
def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ])
class TestContainerFailures(unittest.TestCase): def setUp(self): self.pids, self.port2server, self.account_ring, self.container_ring, \ self.object_ring, self.url, self.token, self.account = \ reset_environment() def tearDown(self): kill_pids(self.pids) def test_first_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) def test_second_node_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.pids[self.port2server[cnodes[1]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[1]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) def test_first_two_nodes_fail(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) object1 = 'object1' client.put_object(self.url, self.token, container, object1, 'test') self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) cpart, cnodes = self.container_ring.get_nodes(self.account, container) for x in xrange(2): kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM) client.delete_object(self.url, self.token, container, object1) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) for x in xrange(2): self.pids[self.port2server[cnodes[x]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[x]['port'] - 6001) / 10)]).pid sleep(2) self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # This okay because the first node hasn't got the update that the # object was deleted yet. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # This fails because all three nodes have to indicate deletion before # we tell the user it worked. Since the first node 409s (it hasn't got # the update that the object was deleted yet), the whole must 503 # (until every is synced up, then the delete would work). exc = None try: client.delete_container(self.url, self.token, container) except client.ClientException, err: exc = err self.assert_(exc) self.assert_(exc.http_status, 503) # Unfortunately, the following might pass or fail, depending on the # position of the account server associated with the first container # server we had killed. If the associated happens to be the first # account server, this'll pass, otherwise the first account server will # serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') # First node still doesn't know object1 was deleted yet; this is okay. self.assert_(object1 in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ])
# serve the listing and not have the container. # self.assert_(container in [c['name'] for c in # client.get_account(self.url, self.token)[1]]) object2 = 'object2' # This will work because at least one (in this case, just one) account # server has to indicate the container exists for the put to continue. client.put_object(self.url, self.token, container, object2, 'test') self.assert_(object1 not in [ o['name'] for o in direct_client.direct_get_container( cnodes[0], cpart, self.account, container)[1] ]) # And, of course, our new object2 exists. self.assert_(object2 in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) get_to_final_state() # Our container delete never "finalized" because we started using it # before the delete settled. self.assert_( container in [c['name'] for c in client.get_account(self.url, self.token)[1]]) # And, so our object2 should still exist and object1's delete should # have finalized. self.assert_(object1 not in [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ]) self.assert_(object2 in [
def test_main(self): container = 'container-%s' % uuid4() client.put_container(self.url, self.token, container) apart, anodes = self.account_ring.get_nodes(self.account) cpart, cnodes = self.container_ring.get_nodes(self.account, container) cnode = cnodes[0] obj = 'object-%s' % uuid4() opart, onodes = self.object_ring.get_nodes(self.account, container, obj) onode = onodes[0] kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.put_object(self.url, self.token, container, obj, 'VERIFY') odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) # Kill all primaries to ensure GET handoff works for node in onodes[1:]: kill(self.pids[self.port2server[node['port']]], SIGTERM) odata = client.get_object(self.url, self.token, container, obj)[-1] if odata != 'VERIFY': raise Exception('Object GET did not return VERIFY, instead it ' 'returned: %s' % repr(odata)) for node in onodes[1:]: self.pids[self.port2server[node['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((node['port'] - 6000) / 10) ]).pid sleep(2) # We've indirectly verified the handoff node has the object, but let's # directly verify it. another_onode = self.object_ring.get_more_nodes(opart).next() odata = direct_client.direct_get_object(another_onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj not in objs: raise Exception('Container listing did not know about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj not in objs: raise Exception( 'Container server %s:%s did not know about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10) ]).pid sleep(2) exc = False try: direct_client.direct_get_object(onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Previously downed object server had test object') # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append( Popen([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once' ])) for p in ps: p.wait() call([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once' ]) odata = direct_client.direct_get_object(onode, opart, self.account, container, obj)[-1] if odata != 'VERIFY': raise Exception('Direct object GET did not return VERIFY, instead ' 'it returned: %s' % repr(odata)) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had test object') # Because POST has changed to a COPY by default, POSTs will succeed on all up # nodes now if at least one up node has the object. # kill(self.pids[self.port2server[onode['port']]], SIGTERM) # client.post_object(self.url, self.token, container, obj, # headers={'x-object-meta-probe': 'value'}) # oheaders = client.head_object(self.url, self.token, container, obj) # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception('Metadata incorrect, was %s' % repr(oheaders)) # exc = False # try: # direct_client.direct_get_object(another_onode, opart, self.account, # container, obj) # except Exception: # exc = True # if not exc: # raise Exception('Handoff server claimed it had the object when ' # 'it should not have it') # self.pids[self.port2server[onode['port']]] = Popen([ # 'chase-object-server', # '/etc/chase/object-server/%d.conf' % # ((onode['port'] - 6000) / 10)]).pid # sleep(2) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') == 'value': # raise Exception('Previously downed object server had the new ' # 'metadata when it should not have it') # # Run the extra server last so it'll remove it's extra partition # ps = [] # for n in onodes: # ps.append(Popen(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((n['port'] - 6000) / 10), 'once'])) # for p in ps: # p.wait() # call(['chase-object-replicator', # '/etc/chase/object-server/%d.conf' % # ((another_onode['port'] - 6000) / 10), 'once']) # oheaders = direct_client.direct_get_object(onode, opart, self.account, # container, obj)[0] # if oheaders.get('x-object-meta-probe') != 'value': # raise Exception( # 'Previously downed object server did not have the new metadata') kill(self.pids[self.port2server[onode['port']]], SIGTERM) client.delete_object(self.url, self.token, container, obj) exc = False try: client.head_object(self.url, self.token, container, obj) except Exception: exc = True if not exc: raise Exception('Regular object HEAD was still successful') objs = [ o['name'] for o in client.get_container(self.url, self.token, container)[1] ] if obj in objs: raise Exception('Container listing still knew about object') for cnode in cnodes: objs = [ o['name'] for o in direct_client.direct_get_container( cnode, cpart, self.account, container)[1] ] if obj in objs: raise Exception( 'Container server %s:%s still knew about object' % (cnode['ip'], cnode['port'])) self.pids[self.port2server[onode['port']]] = Popen([ 'chase-object-server', '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10) ]).pid sleep(2) direct_client.direct_get_object(onode, opart, self.account, container, obj) # Run the extra server last so it'll remove it's extra partition ps = [] for n in onodes: ps.append( Popen([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((n['port'] - 6000) / 10), 'once' ])) for p in ps: p.wait() call([ 'chase-object-replicator', '/etc/chase/object-server/%d.conf' % ((another_onode['port'] - 6000) / 10), 'once' ]) exc = False try: direct_client.direct_get_object(another_onode, opart, self.account, container, obj) except Exception: exc = True if not exc: raise Exception('Handoff object server still had the object')
def test_no_content(self): c.http_connection = self.fake_http_connection(204) value = c.get_container('http://www.test.com', 'asdf', 'asdf')[1] self.assertEquals(value, [])
class TestRunningWithEachTypeDown(unittest.TestCase): def setUp(self): self.pids, self.port2server, self.account_ring, self.container_ring, \ self.object_ring, self.url, self.token, self.account = \ reset_environment() def tearDown(self): kill_pids(self.pids) def test_main(self): # TODO: This test "randomly" pass or doesn't pass; need to find out why return apart, anodes = self.account_ring.get_nodes(self.account) kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM) cpart, cnodes = \ self.container_ring.get_nodes(self.account, 'container1') kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM) opart, onodes = \ self.object_ring.get_nodes(self.account, 'container1', 'object1') kill(self.pids[self.port2server[onodes[0]['port']]], SIGTERM) try: client.put_container(self.url, self.token, 'container1') except client.ClientException, err: # This might 503 if one of the up container nodes tries to update # the down account node. It'll still be saved on one node, but we # can't assure the user. pass client.put_object(self.url, self.token, 'container1', 'object1', '1234') get_to_final_state() headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True self.assertEquals(container['count'], 1) self.assertEquals(container['bytes'], 4) self.assert_(found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) self.assert_(found1) self.pids[self.port2server[anodes[0]['port']]] = \ Popen(['chase-account-server', '/etc/chase/account-server/%d.conf' % ((anodes[0]['port'] - 6002) / 10)]).pid self.pids[self.port2server[cnodes[0]['port']]] = \ Popen(['chase-container-server', '/etc/chase/container-server/%d.conf' % ((cnodes[0]['port'] - 6001) / 10)]).pid self.pids[self.port2server[onodes[0]['port']]] = \ Popen(['chase-object-server', '/etc/chase/object-server/%d.conf' % ((onodes[0]['port'] - 6000) / 10)]).pid sleep(2) headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True # The account node was previously down. self.assert_(not found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) # The first container node 404s, but the proxy will try the next node # and succeed. self.assert_(found1) get_to_final_state() headers, containers = client.head_account(self.url, self.token) self.assertEquals(headers['x-account-container-count'], '1') self.assertEquals(headers['x-account-object-count'], '1') self.assertEquals(headers['x-account-bytes-used'], '4') found1 = False for container in containers: if container['name'] == 'container1': found1 = True self.assertEquals(container['count'], 1) self.assertEquals(container['bytes'], 4) self.assert_(found1) found1 = False for obj in client.get_container(self.url, self.token, 'container1')[1]: if obj['name'] == 'object1': found1 = True self.assertEquals(obj['bytes'], 4) self.assert_(found1)