def test_main(self):
     container = 'container-%s' % uuid4()
     client.put_container(self.url, self.token, container)
     apart, anodes = self.account_ring.get_nodes(self.account)
     anode = anodes[0]
     cpart, cnodes = self.container_ring.get_nodes(self.account, container)
     cnode = cnodes[0]
     kill(self.pids[self.port2server[cnode['port']]], SIGTERM)
     obj = 'object-%s' % uuid4()
     client.put_object(self.url, self.token, container, obj, '')
     self.pids[self.port2server[cnode['port']]] = \
         Popen(['chase-container-server',
                '/etc/chase/container-server/%d.conf' %
                 ((cnode['port'] - 6001) / 10)]).pid
     sleep(2)
     self.assert_(not direct_client.direct_get_container(cnode, cpart,
                         self.account, container)[1])
     ps = []
     for n in xrange(1, 5):
         ps.append(Popen(['chase-object-updater',
                          '/etc/chase/object-server/%d.conf' % n, 'once']))
     for p in ps:
         p.wait()
     objs = [o['name'] for o in direct_client.direct_get_container(cnode,
                                 cpart, self.account, container)[1]]
     self.assert_(obj in objs)
 def test_main(self):
     container = 'container-%s' % uuid4()
     client.put_container(self.url, self.token, container)
     apart, anodes = self.account_ring.get_nodes(self.account)
     anode = anodes[0]
     cpart, cnodes = self.container_ring.get_nodes(self.account, container)
     cnode = cnodes[0]
     kill(self.pids[self.port2server[cnode['port']]], SIGTERM)
     obj = 'object-%s' % uuid4()
     client.put_object(self.url, self.token, container, obj, '')
     self.pids[self.port2server[cnode['port']]] = \
         Popen(['chase-container-server',
                '/etc/chase/container-server/%d.conf' %
                 ((cnode['port'] - 6001) / 10)]).pid
     sleep(2)
     self.assert_(not direct_client.direct_get_container(
         cnode, cpart, self.account, container)[1])
     ps = []
     for n in xrange(1, 5):
         ps.append(
             Popen([
                 'chase-object-updater',
                 '/etc/chase/object-server/%d.conf' % n, 'once'
             ]))
     for p in ps:
         p.wait()
     objs = [
         o['name'] for o in direct_client.direct_get_container(
             cnode, cpart, self.account, container)[1]
     ]
     self.assert_(obj in objs)
예제 #3
0
    def test_first_two_nodes_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        for x in xrange(2):
            kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        for x in xrange(2):
            self.pids[self.port2server[cnodes[x]['port']]] = \
                Popen(['chase-container-server',
                       '/etc/chase/container-server/%d.conf' %
                        ((cnodes[x]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # This fails because all three nodes have to indicate deletion before
        # we tell the user it worked. Since the first node 409s (it hasn't got
        # the update that the object was deleted yet), the whole must 503
        # (until every is synced up, then the delete would work).
        exc = None
        try:
            client.delete_container(self.url, self.token, container)
        except client.ClientException, err:
            exc = err
예제 #4
0
    def test_first_two_nodes_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        for x in xrange(2):
            kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        for x in xrange(2):
            self.pids[self.port2server[cnodes[x]['port']]] = \
                Popen(['chase-container-server',
                       '/etc/chase/container-server/%d.conf' %
                        ((cnodes[x]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])

        # This fails because all three nodes have to indicate deletion before
        # we tell the user it worked. Since the first node 409s (it hasn't got
        # the update that the object was deleted yet), the whole must 503
        # (until every is synced up, then the delete would work).
        exc = None
        try:
            client.delete_container(self.url, self.token, container)
        except client.ClientException, err:
            exc = err
예제 #5
0
    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['chase-container-server',
                   '/etc/chase/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
예제 #6
0
        self.assert_(exc)
        self.assert_(exc.http_status, 503)
        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #     client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
예제 #7
0
    def test_main(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        apart, anodes = self.account_ring.get_nodes(self.account)

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(
            self.account, container, obj)
        onode = onodes[0]
        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Kill all primaries to ensure GET handoff works
        for node in onodes[1:]:
            kill(self.pids[self.port2server[node['port']]], SIGTERM)
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        for node in onodes[1:]:
            self.pids[self.port2server[node['port']]] = Popen([
                'chase-object-server',
                '/etc/chase/object-server/%d.conf' %
                ((node['port'] - 6000) / 10)]).pid
        sleep(2)
        # We've indirectly verified the handoff node has the object, but let's
        # directly verify it.
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(another_onode, opart,
                    self.account, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(cnode, cpart,
                        self.account, container)[1]]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'chase-object-server',
            '/etc/chase/object-server/%d.conf' %
            ((onode['port'] - 6000) / 10)]).pid
        sleep(2)
        exc = False
        try:
            direct_client.direct_get_object(onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Previously downed object server had test object')
        # Run the extra server last so it'll remove it's extra partition
        ps = []
        for n in onodes:
            ps.append(Popen(['chase-object-replicator',
                             '/etc/chase/object-server/%d.conf' %
                             ((n['port'] - 6000) / 10), 'once']))
        for p in ps:
            p.wait()
        call(['chase-object-replicator',
              '/etc/chase/object-server/%d.conf' %
              ((another_onode['port'] - 6000) / 10), 'once'])
        odata = direct_client.direct_get_object(onode, opart, self.account,
                                                container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had test object')

# Because POST has changed to a COPY by default, POSTs will succeed on all up
# nodes now if at least one up node has the object.
#       kill(self.pids[self.port2server[onode['port']]], SIGTERM)
#       client.post_object(self.url, self.token, container, obj,
#                          headers={'x-object-meta-probe': 'value'})
#       oheaders = client.head_object(self.url, self.token, container, obj)
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception('Metadata incorrect, was %s' % repr(oheaders))
#       exc = False
#       try:
#           direct_client.direct_get_object(another_onode, opart, self.account,
#                                           container, obj)
#       except Exception:
#           exc = True
#       if not exc:
#           raise Exception('Handoff server claimed it had the object when '
#                           'it should not have it')
#       self.pids[self.port2server[onode['port']]] = Popen([
#           'chase-object-server',
#           '/etc/chase/object-server/%d.conf' %
#           ((onode['port'] - 6000) / 10)]).pid
#       sleep(2)
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') == 'value':
#           raise Exception('Previously downed object server had the new '
#                           'metadata when it should not have it')
#       # Run the extra server last so it'll remove it's extra partition
#       ps = []
#       for n in onodes:
#           ps.append(Popen(['chase-object-replicator',
#                            '/etc/chase/object-server/%d.conf' %
#                            ((n['port'] - 6000) / 10), 'once']))
#       for p in ps:
#           p.wait()
#       call(['chase-object-replicator',
#             '/etc/chase/object-server/%d.conf' %
#             ((another_onode['port'] - 6000) / 10), 'once'])
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception(
#               'Previously downed object server did not have the new metadata')

        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.delete_object(self.url, self.token, container, obj)
        exc = False
        try:
            client.head_object(self.url, self.token, container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Regular object HEAD was still successful')
        objs = [o['name'] for o in
                client.get_container(self.url, self.token, container)[1]]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [o['name'] for o in
                    direct_client.direct_get_container(
                        cnode, cpart, self.account, container)[1]]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'chase-object-server',
            '/etc/chase/object-server/%d.conf' %
            ((onode['port'] - 6000) / 10)]).pid
        sleep(2)
        direct_client.direct_get_object(onode, opart, self.account, container,
                                        obj)
        # Run the extra server last so it'll remove it's extra partition
        ps = []
        for n in onodes:
            ps.append(Popen(['chase-object-replicator',
                             '/etc/chase/object-server/%d.conf' %
                             ((n['port'] - 6000) / 10), 'once']))
        for p in ps:
            p.wait()
        call(['chase-object-replicator',
              '/etc/chase/object-server/%d.conf' %
              ((another_onode['port'] - 6000) / 10), 'once'])
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had the object')
예제 #8
0
    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['chase-container-server',
                   '/etc/chase/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
예제 #9
0
class TestContainerFailures(unittest.TestCase):
    def setUp(self):
        self.pids, self.port2server, self.account_ring, self.container_ring, \
            self.object_ring, self.url, self.token, self.account = \
                reset_environment()

    def tearDown(self):
        kill_pids(self.pids)

    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['chase-container-server',
                   '/etc/chase/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

    def test_second_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[1]['port']]] = \
            Popen(['chase-container-server',
                   '/etc/chase/container-server/%d.conf' %
                    ((cnodes[1]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #     client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

    def test_first_two_nodes_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        for x in xrange(2):
            kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        for x in xrange(2):
            self.pids[self.port2server[cnodes[x]['port']]] = \
                Popen(['chase-container-server',
                       '/etc/chase/container-server/%d.conf' %
                        ((cnodes[x]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # This fails because all three nodes have to indicate deletion before
        # we tell the user it worked. Since the first node 409s (it hasn't got
        # the update that the object was deleted yet), the whole must 503
        # (until every is synced up, then the delete would work).
        exc = None
        try:
            client.delete_container(self.url, self.token, container)
        except client.ClientException, err:
            exc = err
        self.assert_(exc)
        self.assert_(exc.http_status, 503)
        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
예제 #10
0
        self.assert_(exc)
        self.assert_(exc.http_status, 503)
        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #     client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
예제 #11
0
    def test_main(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        apart, anodes = self.account_ring.get_nodes(self.account)

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes[0]
        obj = 'object-%s' % uuid4()
        opart, onodes = self.object_ring.get_nodes(self.account, container,
                                                   obj)
        onode = onodes[0]
        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.put_object(self.url, self.token, container, obj, 'VERIFY')
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        # Kill all primaries to ensure GET handoff works
        for node in onodes[1:]:
            kill(self.pids[self.port2server[node['port']]], SIGTERM)
        odata = client.get_object(self.url, self.token, container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Object GET did not return VERIFY, instead it '
                            'returned: %s' % repr(odata))
        for node in onodes[1:]:
            self.pids[self.port2server[node['port']]] = Popen([
                'chase-object-server',
                '/etc/chase/object-server/%d.conf' %
                ((node['port'] - 6000) / 10)
            ]).pid
        sleep(2)
        # We've indirectly verified the handoff node has the object, but let's
        # directly verify it.
        another_onode = self.object_ring.get_more_nodes(opart).next()
        odata = direct_client.direct_get_object(another_onode, opart,
                                                self.account, container,
                                                obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj not in objs:
            raise Exception('Container listing did not know about object')
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            if obj not in objs:
                raise Exception(
                    'Container server %s:%s did not know about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'chase-object-server',
            '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)
        ]).pid
        sleep(2)
        exc = False
        try:
            direct_client.direct_get_object(onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Previously downed object server had test object')
        # Run the extra server last so it'll remove it's extra partition
        ps = []
        for n in onodes:
            ps.append(
                Popen([
                    'chase-object-replicator',
                    '/etc/chase/object-server/%d.conf' %
                    ((n['port'] - 6000) / 10), 'once'
                ]))
        for p in ps:
            p.wait()
        call([
            'chase-object-replicator',
            '/etc/chase/object-server/%d.conf' %
            ((another_onode['port'] - 6000) / 10), 'once'
        ])
        odata = direct_client.direct_get_object(onode, opart, self.account,
                                                container, obj)[-1]
        if odata != 'VERIFY':
            raise Exception('Direct object GET did not return VERIFY, instead '
                            'it returned: %s' % repr(odata))
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had test object')


# Because POST has changed to a COPY by default, POSTs will succeed on all up
# nodes now if at least one up node has the object.
#       kill(self.pids[self.port2server[onode['port']]], SIGTERM)
#       client.post_object(self.url, self.token, container, obj,
#                          headers={'x-object-meta-probe': 'value'})
#       oheaders = client.head_object(self.url, self.token, container, obj)
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception('Metadata incorrect, was %s' % repr(oheaders))
#       exc = False
#       try:
#           direct_client.direct_get_object(another_onode, opart, self.account,
#                                           container, obj)
#       except Exception:
#           exc = True
#       if not exc:
#           raise Exception('Handoff server claimed it had the object when '
#                           'it should not have it')
#       self.pids[self.port2server[onode['port']]] = Popen([
#           'chase-object-server',
#           '/etc/chase/object-server/%d.conf' %
#           ((onode['port'] - 6000) / 10)]).pid
#       sleep(2)
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') == 'value':
#           raise Exception('Previously downed object server had the new '
#                           'metadata when it should not have it')
#       # Run the extra server last so it'll remove it's extra partition
#       ps = []
#       for n in onodes:
#           ps.append(Popen(['chase-object-replicator',
#                            '/etc/chase/object-server/%d.conf' %
#                            ((n['port'] - 6000) / 10), 'once']))
#       for p in ps:
#           p.wait()
#       call(['chase-object-replicator',
#             '/etc/chase/object-server/%d.conf' %
#             ((another_onode['port'] - 6000) / 10), 'once'])
#       oheaders = direct_client.direct_get_object(onode, opart, self.account,
#                                                   container, obj)[0]
#       if oheaders.get('x-object-meta-probe') != 'value':
#           raise Exception(
#               'Previously downed object server did not have the new metadata')

        kill(self.pids[self.port2server[onode['port']]], SIGTERM)
        client.delete_object(self.url, self.token, container, obj)
        exc = False
        try:
            client.head_object(self.url, self.token, container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Regular object HEAD was still successful')
        objs = [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ]
        if obj in objs:
            raise Exception('Container listing still knew about object')
        for cnode in cnodes:
            objs = [
                o['name'] for o in direct_client.direct_get_container(
                    cnode, cpart, self.account, container)[1]
            ]
            if obj in objs:
                raise Exception(
                    'Container server %s:%s still knew about object' %
                    (cnode['ip'], cnode['port']))
        self.pids[self.port2server[onode['port']]] = Popen([
            'chase-object-server',
            '/etc/chase/object-server/%d.conf' % ((onode['port'] - 6000) / 10)
        ]).pid
        sleep(2)
        direct_client.direct_get_object(onode, opart, self.account, container,
                                        obj)
        # Run the extra server last so it'll remove it's extra partition
        ps = []
        for n in onodes:
            ps.append(
                Popen([
                    'chase-object-replicator',
                    '/etc/chase/object-server/%d.conf' %
                    ((n['port'] - 6000) / 10), 'once'
                ]))
        for p in ps:
            p.wait()
        call([
            'chase-object-replicator',
            '/etc/chase/object-server/%d.conf' %
            ((another_onode['port'] - 6000) / 10), 'once'
        ])
        exc = False
        try:
            direct_client.direct_get_object(another_onode, opart, self.account,
                                            container, obj)
        except Exception:
            exc = True
        if not exc:
            raise Exception('Handoff object server still had the object')
예제 #12
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`chase.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                objects = direct_get_container(node, part, account, container,
                        marker=marker, conn_timeout=self.conn_timeout,
                        response_timeout=self.node_timeout)[1]
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
            except ClientException, err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
            if not objects:
                break
            try:
                for obj in objects:
                    if isinstance(obj['name'], unicode):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'])
                pool.waitall()
            except (Exception, Timeout):
                self.logger.exception(_('Exception with objects for container '
                    '%(container)s for account %(account)s'),
                    {'container': container, 'account': account})
            marker = objects[-1]['name']
예제 #13
0
    def reap_container(self, account, account_partition, account_nodes,
                       container):
        """
        Deletes the data and the container itself for the given container. This
        will call :func:`reap_object` up to sqrt(self.concurrency) times
        concurrently for the objects in the container.

        If there is any exception while deleting a single object, the process
        will continue for any other objects in the container and the failed
        objects will be tried again the next time this function is called with
        the same parameters.

        If there is any exception while listing the objects for deletion, the
        process will stop (but will obviously be tried again the next time this
        function is called with the same parameters). This is a possibility
        since the listing comes from querying just the primary remote container
        server.

        Once all objects have been attempted to be deleted, the container
        itself will be attempted to be deleted by sending a delete request to
        all container nodes. The format of the delete request is such that each
        container server will update a corresponding account server, removing
        the container from the account's listing.

        This function returns nothing and should raise no exception but only
        update various self.stats_* values for what occurs.

        :param account: The name of the account for the container.
        :param account_partition: The partition for the account on the account
                                  ring.
        :param account_nodes: The primary node dicts for the account.
        :param container: The name of the container to delete.

        * See also: :func:`chase.common.ring.Ring.get_nodes` for a description
          of the account node dicts.
        """
        account_nodes = list(account_nodes)
        part, nodes = self.get_container_ring().get_nodes(account, container)
        node = nodes[-1]
        pool = GreenPool(size=self.object_concurrency)
        marker = ''
        while True:
            objects = None
            try:
                objects = direct_get_container(
                    node,
                    part,
                    account,
                    container,
                    marker=marker,
                    conn_timeout=self.conn_timeout,
                    response_timeout=self.node_timeout)[1]
                self.stats_return_codes[2] = \
                    self.stats_return_codes.get(2, 0) + 1
            except ClientException, err:
                if self.logger.getEffectiveLevel() <= DEBUG:
                    self.logger.exception(
                        _('Exception with %(ip)s:%(port)s/%(device)s'), node)
                self.stats_return_codes[err.http_status / 100] = \
                    self.stats_return_codes.get(err.http_status / 100, 0) + 1
            if not objects:
                break
            try:
                for obj in objects:
                    if isinstance(obj['name'], unicode):
                        obj['name'] = obj['name'].encode('utf8')
                    pool.spawn(self.reap_object, account, container, part,
                               nodes, obj['name'])
                pool.waitall()
            except (Exception, Timeout):
                self.logger.exception(
                    _('Exception with objects for container '
                      '%(container)s for account %(account)s'), {
                          'container': container,
                          'account': account
                      })
            marker = objects[-1]['name']