Esempio n. 1
0
 def test_two_nodes_fail(self):
     # Create container1
     # Kill container1 servers excepting one of the primaries
     # Delete container1 directly to the one primary still up
     # Restart other container1 servers
     # Get to a final state
     # Assert all container1 servers indicate container1 is gone (happens
     #   because the one node that knew about the delete replicated to the
     #   others.)
     # Assert account level also indicates container1 is gone
     container1 = 'container-%s' % uuid4()
     cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
     client.put_container(self.url, self.token, container1)
     cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnodes[0]['port'], self.port2server, self.pids)
     kill_server(cnodes[1]['port'], self.port2server, self.pids)
     direct_client.direct_delete_container(cnodes[2], cpart, self.account,
                                           container1)
     start_server(cnodes[0]['port'], self.port2server, self.pids)
     start_server(cnodes[1]['port'], self.port2server, self.pids)
     start_server(cnp_port, self.port2server, self.pids)
     get_to_final_state()
     for cnode in cnodes:
         exc = None
         try:
             direct_client.direct_get_container(cnode, cpart, self.account,
                                                container1)
         except client.ClientException as err:
             exc = err
         self.assertEquals(exc.http_status, 404)
     headers, containers = client.get_account(self.url, self.token)
     self.assertEquals(headers['x-account-container-count'], '0')
     self.assertEquals(headers['x-account-object-count'], '0')
     self.assertEquals(headers['x-account-bytes-used'], '0')
 def test_two_nodes_fail(self):
     # Create container1
     # Kill container1 servers excepting one of the primaries
     # Delete container1 directly to the one primary still up
     # Restart other container1 servers
     # Get to a final state
     # Assert all container1 servers indicate container1 is gone (happens
     #   because the one node that knew about the delete replicated to the
     #   others.)
     # Assert account level also indicates container1 is gone
     container1 = 'container-%s' % uuid4()
     cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
     client.put_container(self.url, self.token, container1)
     cnp_port = kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnodes[0]['port'], self.port2server, self.pids)
     kill_server(cnodes[1]['port'], self.port2server, self.pids)
     direct_client.direct_delete_container(cnodes[2], cpart, self.account,
                                           container1)
     start_server(cnodes[0]['port'], self.port2server, self.pids)
     start_server(cnodes[1]['port'], self.port2server, self.pids)
     start_server(cnp_port, self.port2server, self.pids)
     get_to_final_state()
     for cnode in cnodes:
         exc = None
         try:
             direct_client.direct_get_container(cnode, cpart, self.account,
                                                container1)
         except client.ClientException as err:
             exc = err
         self.assertEquals(exc.http_status, 404)
     headers, containers = client.get_account(self.url, self.token)
     self.assertEquals(headers['x-account-container-count'], '0')
     self.assertEquals(headers['x-account-object-count'], '0')
     self.assertEquals(headers['x-account-bytes-used'], '0')
Esempio n. 3
0
 def test_one_node_fails(self):
     # Create container1
     # Kill container1 servers excepting two of the primaries
     # Delete container1
     # Restart other container1 primary server
     # Create container1/object1 (allowed because at least server thinks the
     #   container exists)
     # Get to a final state
     # Assert all container1 servers indicate container1 is alive and
     #   well with object1
     # Assert account level also indicates container1 is alive and
     #   well with object1
     container1 = 'container-%s' % uuid4()
     cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
     client.put_container(self.url, self.token, container1)
     kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnodes[0]['port'], self.port2server, self.pids)
     client.delete_container(self.url, self.token, container1)
     start_server(cnodes[0]['port'], self.port2server, self.pids)
     client.put_object(self.url, self.token, container1, 'object1', '123')
     get_to_final_state()
     for cnode in cnodes:
         self.assertEquals(
             [o['name'] for o in direct_client.direct_get_container(
                 cnode, cpart, self.account, container1)[1]],
             ['object1'])
     headers, containers = client.get_account(self.url, self.token)
     self.assertEquals(headers['x-account-container-count'], '1')
     self.assertEquals(headers['x-account-object-count'], '1')
     self.assertEquals(headers['x-account-bytes-used'], '3')
 def test_one_node_fails(self):
     # Create container1
     # Kill container1 servers excepting two of the primaries
     # Delete container1
     # Restart other container1 primary server
     # Create container1/object1 (allowed because at least server thinks the
     #   container exists)
     # Get to a final state
     # Assert all container1 servers indicate container1 is alive and
     #   well with object1
     # Assert account level also indicates container1 is alive and
     #   well with object1
     container1 = 'container-%s' % uuid4()
     cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
     client.put_container(self.url, self.token, container1)
     kill_nonprimary_server(cnodes, self.port2server, self.pids)
     kill_server(cnodes[0]['port'], self.port2server, self.pids)
     client.delete_container(self.url, self.token, container1)
     start_server(cnodes[0]['port'], self.port2server, self.pids)
     client.put_object(self.url, self.token, container1, 'object1', '123')
     get_to_final_state()
     for cnode in cnodes:
         self.assertEquals([
             o['name'] for o in direct_client.direct_get_container(
                 cnode, cpart, self.account, container1)[1]
         ], ['object1'])
     headers, containers = client.get_account(self.url, self.token)
     self.assertEquals(headers['x-account-container-count'], '1')
     self.assertEquals(headers['x-account-object-count'], '1')
     self.assertEquals(headers['x-account-bytes-used'], '3')
Esempio n. 5
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
Esempio n. 6
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
    def test_sysmeta_after_replication_with_prior_post(self):
        sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
        usermeta = {'x-object-meta-bar': 'meta-bar'}
        self.brain.put_container(policy_index=0)
        # put object
        self._put_object()

        # put user meta to first server subset
        self.brain.stop_handoff_half()
        self._post_object(headers=usermeta)
        metadata = self._get_object_metadata()
        for key in usermeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], usermeta[key])
        self.brain.start_handoff_half()

        # put newer object with sysmeta to second server subset
        self.brain.stop_primary_half()
        self._put_object(headers=sysmeta)
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        self.brain.start_primary_half()

        # run replicator
        get_to_final_state()

        # check stale user metadata is not replicated to first server subset
        # and sysmeta is unchanged
        self.brain.stop_primary_half()
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        for key in usermeta:
            self.assertFalse(key in metadata)
        self.brain.start_primary_half()

        # check stale user metadata is removed from second server subset
        # and sysmeta is replicated
        self.brain.stop_handoff_half()
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        for key in usermeta:
            self.assertFalse(key in metadata)
    def test_sysmeta_after_replication_with_prior_post(self):
        sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
        usermeta = {'x-object-meta-bar': 'meta-bar'}
        self.brain.put_container(policy_index=0)
        # put object
        self._put_object()

        # put user meta to first server subset
        self.brain.stop_handoff_half()
        self._post_object(headers=usermeta)
        metadata = self._get_object_metadata()
        for key in usermeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], usermeta[key])
        self.brain.start_handoff_half()

        # put newer object with sysmeta to second server subset
        self.brain.stop_primary_half()
        self._put_object(headers=sysmeta)
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        self.brain.start_primary_half()

        # run replicator
        get_to_final_state()

        # check stale user metadata is not replicated to first server subset
        # and sysmeta is unchanged
        self.brain.stop_primary_half()
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        for key in usermeta:
            self.assertFalse(key in metadata)
        self.brain.start_primary_half()

        # check stale user metadata is removed from second server subset
        # and sysmeta is replicated
        self.brain.stop_handoff_half()
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        for key in usermeta:
            self.assertFalse(key in metadata)
    def test_sysmeta_after_replication_with_subsequent_post(self):
        sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
        usermeta = {'x-object-meta-bar': 'meta-bar'}
        self.brain.put_container(policy_index=0)
        # put object
        self._put_object()
        # put newer object with sysmeta to first server subset
        self.brain.stop_primary_half()
        self._put_object(headers=sysmeta)
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        self.brain.start_primary_half()

        # post some user meta to second server subset
        self.brain.stop_handoff_half()
        self._post_object(usermeta)
        metadata = self._get_object_metadata()
        for key in usermeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], usermeta[key])
        for key in sysmeta:
            self.assertFalse(key in metadata)
        self.brain.start_handoff_half()

        # run replicator
        get_to_final_state()

        # check user metadata has been replicated to first server subset
        # and sysmeta is unchanged
        self.brain.stop_primary_half()
        metadata = self._get_object_metadata()
        expected = dict(sysmeta)
        expected.update(usermeta)
        for key in expected.keys():
            self.assertTrue(key in metadata, key)
            self.assertEqual(metadata[key], expected[key])
        self.brain.start_primary_half()

        # check user metadata and sysmeta both on second server subset
        self.brain.stop_handoff_half()
        metadata = self._get_object_metadata()
        for key in expected.keys():
            self.assertTrue(key in metadata, key)
            self.assertEqual(metadata[key], expected[key])
    def test_sysmeta_after_replication_with_subsequent_post(self):
        sysmeta = {'x-object-sysmeta-foo': 'sysmeta-foo'}
        usermeta = {'x-object-meta-bar': 'meta-bar'}
        self.brain.put_container(policy_index=0)
        # put object
        self._put_object()
        # put newer object with sysmeta to first server subset
        self.brain.stop_primary_half()
        self._put_object(headers=sysmeta)
        metadata = self._get_object_metadata()
        for key in sysmeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], sysmeta[key])
        self.brain.start_primary_half()

        # post some user meta to second server subset
        self.brain.stop_handoff_half()
        self._post_object(usermeta)
        metadata = self._get_object_metadata()
        for key in usermeta:
            self.assertTrue(key in metadata)
            self.assertEqual(metadata[key], usermeta[key])
        for key in sysmeta:
            self.assertFalse(key in metadata)
        self.brain.start_handoff_half()

        # run replicator
        get_to_final_state()

        # check user metadata has been replicated to first server subset
        # and sysmeta is unchanged
        self.brain.stop_primary_half()
        metadata = self._get_object_metadata()
        expected = dict(sysmeta)
        expected.update(usermeta)
        for key in expected.keys():
            self.assertTrue(key in metadata, key)
            self.assertEqual(metadata[key], expected[key])
        self.brain.start_primary_half()

        # check user metadata and sysmeta both on second server subset
        self.brain.stop_handoff_half()
        metadata = self._get_object_metadata()
        for key in expected.keys():
            self.assertTrue(key in metadata, key)
            self.assertEqual(metadata[key], expected[key])
    def test_object_delete_is_replicated(self):
        self.brain.put_container(policy_index=0)
        # put object
        self._put_object()

        # put newer object with sysmeta to first server subset
        self.brain.stop_primary_half()
        self._put_object()
        self.brain.start_primary_half()

        # delete object on second server subset
        self.brain.stop_handoff_half()
        self._delete_object()
        self.brain.start_handoff_half()

        # run replicator
        get_to_final_state()

        # check object deletion has been replicated on first server set
        self.brain.stop_primary_half()
        self._get_object(expect_statuses=(4,))
        self.brain.start_primary_half()

        # check object deletion persists on second server set
        self.brain.stop_handoff_half()
        self._get_object(expect_statuses=(4,))

        # put newer object to second server set
        self._put_object()
        self.brain.start_handoff_half()

        # run replicator
        get_to_final_state()

        # check new object  has been replicated on first server set
        self.brain.stop_primary_half()
        self._get_object()
        self.brain.start_primary_half()

        # check new object persists on second server set
        self.brain.stop_handoff_half()
        self._get_object()
Esempio n. 12
0
    def test_main(self):
        # Create container1 and container2
        # Assert account level sees them
        # Create container2/object1
        # Assert account level doesn't see it yet
        # Get to final state
        # Assert account level now sees the container2/object1
        # Kill account servers excepting two of the primaries
        # Delete container1
        # Assert account level knows container1 is gone but doesn't know about
        #   container2/object2 yet
        # Put container2/object2
        # Run container updaters
        # Assert account level now knows about container2/object2
        # Restart other primary account server
        # Assert that server doesn't know about container1's deletion or the
        #   new container2/object2 yet
        # Get to final state
        # Assert that server is now up to date

        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        client.put_object(self.url, self.token, container2, 'object1', '1234')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill_nonprimary_server(anodes, self.port2server, self.pids)
        kill_server(anodes[0]['port'], self.port2server, self.pids)

        client.delete_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container2, 'object2', '12345')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        processes = []
        for node in xrange(1, 5):
            processes.append(Popen([
                'swift-container-updater',
                self.configs['container'] % node,
                'once']))
        for process in processes:
            process.wait()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        start_server(anodes[0]['port'], self.port2server, self.pids)

        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
Esempio n. 13
0
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(Timestamp(metadata['x-backend-timestamp']) >
                             create_timestamp)
Esempio n. 14
0
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(list(POLICIES))
        new_policy = random.choice([p for p in POLICIES if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses if int(old_policy)
            == int(metadata['X-Backend-Storage-Policy-Index'])
        ]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object()
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        client = InternalClient(conf_file, 'probe-test', 3)
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        get_to_final_state()

        # verify entry in the queue
        client = InternalClient(conf_file, 'probe-test', 3)
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        get_to_final_state()
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)
Esempio n. 15
0
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' %
                      (self.account, self.container_name, self.object_name,
                       expected_policy_index, TIMEOUT))
Esempio n. 16
0
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assert_(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assert_(
         len(new_found_policy_indexes) == 1,
         'primary nodes disagree about policy index %r' % dict(
             (node['port'], metadata['X-Backend-Storage-Policy-Index'])
             for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' %
                           (self.account, self.container_name,
                            self.object_name, orig_policy_index, node))
Esempio n. 17
0
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(list(POLICIES))
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in POLICIES if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Esempio n. 18
0
    def test_main(self):
        # Create container1 and container2
        # Assert account level sees them
        # Create container2/object1
        # Assert account level doesn't see it yet
        # Get to final state
        # Assert account level now sees the container2/object1
        # Kill account servers excepting two of the primaries
        # Delete container1
        # Assert account level knows container1 is gone but doesn't know about
        #   container2/object2 yet
        # Put container2/object2
        # Run container updaters
        # Assert account level now knows about container2/object2
        # Restart other primary account server
        # Assert that server doesn't know about container1's deletion or the
        #   new container2/object2 yet
        # Get to final state
        # Assert that server is now up to date

        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        client.put_object(self.url, self.token, container2, 'object1', '1234')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill_nonprimary_server(anodes, self.port2server, self.pids)
        kill_server(anodes[0]['port'], self.port2server, self.pids)

        client.delete_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container2, 'object2', '12345')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        Manager(['container-updater']).once()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        start_server(anodes[0]['port'], self.port2server, self.pids)

        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
Esempio n. 19
0
    def test_main(self):
        # Create container1 and container2
        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)

        # Assert account level sees them
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        # Create container2/object1
        client.put_object(self.url, self.token, container2, 'object1', '1234')

        # Assert account level doesn't see it yet
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        # Get to final state
        get_to_final_state()

        # Assert account level now sees the container2/object1
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
                self.assertEquals(container['count'], 0)
                self.assertEquals(container['bytes'], 0)
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill_nonprimary_server(anodes, self.port2server, self.pids)
        kill_server(anodes[0]['port'], self.port2server, self.pids)
        # Kill account servers excepting two of the primaries

        # Delete container1
        client.delete_container(self.url, self.token, container1)

        # Put container2/object2
        client.put_object(self.url, self.token, container2, 'object2', '12345')

        # Assert account level knows container1 is gone but doesn't know about
        #   container2/object2 yet
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        # Run container updaters
        Manager(['container-updater']).once()

        # Assert account level now knows about container2/object2
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        # Restart other primary account server
        start_server(anodes[0]['port'], self.port2server, self.pids)

        # Assert that server doesn't know about container1's deletion or the
        #   new container2/object2 yet
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        # Get to final state
        get_to_final_state()

        # Assert that server is now up to date
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for container in containers:
            if container['name'] == container1:
                found1 = True
            elif container['name'] == container2:
                found2 = True
                self.assertEquals(container['count'], 2)
                self.assertEquals(container['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['swift-container-server',
                   '/etc/swift/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #     client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

    def _get_db_file_path(self, obj_dir):
        files = sorted(os.listdir(obj_dir), reverse=True)
        for file in files:
            if file.endswith('db'):
Esempio n. 23
0
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(
                    Timestamp(metadata['x-backend-timestamp']) >
                    create_timestamp)
class TestRunningWithEachTypeDown(unittest.TestCase):
    def setUp(self):
        self.pids, self.port2server, self.account_ring, self.container_ring, \
            self.object_ring, self.url, self.token, self.account = \
                reset_environment()

    def tearDown(self):
        kill_pids(self.pids)

    def test_main(self):
        # TODO: This test "randomly" pass or doesn't pass; need to find out why
        return
        apart, anodes = self.account_ring.get_nodes(self.account)
        kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)
        cpart, cnodes = \
            self.container_ring.get_nodes(self.account, 'container1')
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)
        opart, onodes = \
            self.object_ring.get_nodes(self.account, 'container1', 'object1')
        kill(self.pids[self.port2server[onodes[0]['port']]], SIGTERM)

        try:
            client.put_container(self.url, self.token, 'container1')
        except client.ClientException, err:
            # This might 503 if one of the up container nodes tries to update
            # the down account node. It'll still be saved on one node, but we
            # can't assure the user.
            pass
        client.put_object(self.url, self.token, 'container1', 'object1',
                          '1234')
        get_to_final_state()
        headers, containers = client.head_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        for container in containers:
            if container['name'] == 'container1':
                found1 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        found1 = False
        for obj in client.get_container(self.url, self.token, 'container1')[1]:
            if obj['name'] == 'object1':
                found1 = True
                self.assertEquals(obj['bytes'], 4)
        self.assert_(found1)

        self.pids[self.port2server[anodes[0]['port']]] = \
            Popen(['swift-account-server',
                   '/etc/swift/account-server/%d.conf' %
                    ((anodes[0]['port'] - 6002) / 10)]).pid
        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['swift-container-server',
                   '/etc/swift/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        self.pids[self.port2server[onodes[0]['port']]] = \
            Popen(['swift-object-server',
                   '/etc/swift/object-server/%d.conf' %
                    ((onodes[0]['port'] - 6000) / 10)]).pid
        sleep(2)
        headers, containers = client.head_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        for container in containers:
            if container['name'] == 'container1':
                found1 = True
        # The account node was previously down.
        self.assert_(not found1)
        found1 = False
        for obj in client.get_container(self.url, self.token, 'container1')[1]:
            if obj['name'] == 'object1':
                found1 = True
                self.assertEquals(obj['bytes'], 4)
        # The first container node 404s, but the proxy will try the next node
        # and succeed.
        self.assert_(found1)

        get_to_final_state()
        headers, containers = client.head_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        for container in containers:
            if container['name'] == 'container1':
                found1 = True
                self.assertEquals(container['count'], 1)
                self.assertEquals(container['bytes'], 4)
        self.assert_(found1)
        found1 = False
        for obj in client.get_container(self.url, self.token, 'container1')[1]:
            if obj['name'] == 'object1':
                found1 = True
                self.assertEquals(obj['bytes'], 4)
        self.assert_(found1)
Esempio n. 25
0
    def test_main(self):
        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        client.put_object(self.url, self.token, container2, 'object1', '1234')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)

        client.delete_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container2, 'object2', '12345')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        ps = []
        for n in xrange(1, 5):
            ps.append(Popen(['swift-container-updater',
                             '/etc/swift/container-server/%d.conf' % n,
                             'once']))
        for p in ps:
            p.wait()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 2)
                self.assertEquals(c['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        self.pids[self.port2server[anodes[0]['port']]] = \
            Popen(['swift-account-server',
                   '/etc/swift/account-server/%d.conf' %
                    ((anodes[0]['port'] - 6002) / 10)]).pid
        sleep(2)
        # This is the earlier counts and bytes because the first node doesn't
        # have the newest udpates yet.
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                # This is the earlier count and bytes because the first node
                # doesn't have the newest udpates yet.
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        # This okay because the first node hasn't got the update that
        # container1 was deleted yet.
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 2)
                self.assertEquals(c['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assert_(len(found_policy_indexes) > 1,
                  'primary nodes did not disagree about policy index %r' %
                  head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assert_(len(new_found_policy_indexes) == 1,
                  'primary nodes disagree about policy index %r' %
                  dict((node['port'],
                        metadata['X-Backend-Storage-Policy-Index'])
                       for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' % (
                     self.account, self.container_name, self.object_name,
                     orig_policy_index, node))
    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['chase-container-server',
                   '/etc/chase/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [o['name'] for o in
                     direct_client.direct_get_container(cnodes[0], cpart,
                     self.account, container)[1]])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(container in [c['name'] for c in
                     client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
        self.assert_(object2 in [o['name'] for o in
                     client.get_container(self.url, self.token, container)[1]])
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url, self.token, self.container_name,
                              part_name, contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(ENABLED_POLICIES)
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url, self.token, self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }
        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
                                         in self.container_ring.devs),
            'x-container-device': ','.join(n['device'] for n in
                                           self.container_ring.devs),
            'x-container-partition': container_part,
            'X-Backend-Storage-Policy-Index': wrong_policy.idx,
            'X-Static-Large-Object': 'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node, part, self.account, self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
                                       for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
class TestContainerFailures(unittest.TestCase):
    def setUp(self):
        self.pids, self.port2server, self.account_ring, self.container_ring, \
            self.object_ring, self.url, self.token, self.account = \
                reset_environment()

    def tearDown(self):
        kill_pids(self.pids)

    def test_first_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[0]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[0]['port']]] = \
            Popen(['swift-container-server',
                   '/etc/swift/container-server/%d.conf' %
                    ((cnodes[0]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

    def test_second_node_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        kill(self.pids[self.port2server[cnodes[1]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        self.pids[self.port2server[cnodes[1]['port']]] = \
            Popen(['swift-container-server',
                   '/etc/swift/container-server/%d.conf' %
                    ((cnodes[1]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #     client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        self.assert_(object1 not in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

    def test_first_two_nodes_fail(self):
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])

        object1 = 'object1'
        client.put_object(self.url, self.token, container, object1, 'test')
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        for x in xrange(2):
            kill(self.pids[self.port2server[cnodes[x]['port']]], SIGTERM)

        client.delete_object(self.url, self.token, container, object1)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        for x in xrange(2):
            self.pids[self.port2server[cnodes[x]['port']]] = \
                Popen(['swift-container-server',
                       '/etc/swift/container-server/%d.conf' %
                        ((cnodes[x]['port'] - 6001) / 10)]).pid
        sleep(2)
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # This okay because the first node hasn't got the update that the
        # object was deleted yet.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])

        # This fails because all three nodes have to indicate deletion before
        # we tell the user it worked. Since the first node 409s (it hasn't got
        # the update that the object was deleted yet), the whole must 503
        # (until every is synced up, then the delete would work).
        exc = None
        try:
            client.delete_container(self.url, self.token, container)
        except client.ClientException, err:
            exc = err
        self.assert_(exc)
        self.assert_(exc.http_status, 503)
        # Unfortunately, the following might pass or fail, depending on the
        # position of the account server associated with the first container
        # server we had killed. If the associated happens to be the first
        # account server, this'll pass, otherwise the first account server will
        # serve the listing and not have the container.
        # self.assert_(container in [c['name'] for c in
        #              client.get_account(self.url, self.token)[1]])

        object2 = 'object2'
        # This will work because at least one (in this case, just one) account
        # server has to indicate the container exists for the put to continue.
        client.put_object(self.url, self.token, container, object2, 'test')
        # First node still doesn't know object1 was deleted yet; this is okay.
        self.assert_(object1 in [
            o['name'] for o in direct_client.direct_get_container(
                cnodes[0], cpart, self.account, container)[1]
        ])
        # And, of course, our new object2 exists.
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])

        get_to_final_state()
        # Our container delete never "finalized" because we started using it
        # before the delete settled.
        self.assert_(
            container in
            [c['name'] for c in client.get_account(self.url, self.token)[1]])
        # And, so our object2 should still exist and object1's delete should
        # have finalized.
        self.assert_(object1 not in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
        self.assert_(object2 in [
            o['name']
            for o in client.get_container(self.url, self.token, container)[1]
        ])
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice([p for p in ENABLED_POLICIES
                                    if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses
            if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object()
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        client = InternalClient(conf_file, 'probe-test', 3)
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        get_to_final_state()

        # verify entry in the queue
        client = InternalClient(conf_file, 'probe-test', 3)
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        get_to_final_state()
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)
Esempio n. 31
0
    def test_main(self):
        container1 = 'container1'
        client.put_container(self.url, self.token, container1)
        container2 = 'container2'
        client.put_container(self.url, self.token, container2)
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        client.put_object(self.url, self.token, container2, 'object1', '1234')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '0')
        self.assertEquals(headers['x-account-bytes-used'], '0')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
                self.assertEquals(c['count'], 0)
                self.assertEquals(c['bytes'], 0)
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        self.assert_(found1)
        self.assert_(found2)

        apart, anodes = self.account_ring.get_nodes(self.account)
        kill(self.pids[self.port2server[anodes[0]['port']]], SIGTERM)

        client.delete_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container2, 'object2', '12345')
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        self.assert_(not found1)
        self.assert_(found2)

        ps = []
        for n in xrange(1, 5):
            ps.append(
                Popen([
                    'chase-container-updater',
                    '/etc/chase/container-server/%d.conf' % n, 'once'
                ]))
        for p in ps:
            p.wait()
        headers, containers = client.get_account(self.url, self.token)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 2)
                self.assertEquals(c['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)

        self.pids[self.port2server[anodes[0]['port']]] = \
            Popen(['chase-account-server',
                   '/etc/chase/account-server/%d.conf' %
                    ((anodes[0]['port'] - 6002) / 10)]).pid
        sleep(2)
        # This is the earlier counts and bytes because the first node doesn't
        # have the newest udpates yet.
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '2')
        self.assertEquals(headers['x-account-object-count'], '1')
        self.assertEquals(headers['x-account-bytes-used'], '4')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                # This is the earlier count and bytes because the first node
                # doesn't have the newest udpates yet.
                self.assertEquals(c['count'], 1)
                self.assertEquals(c['bytes'], 4)
        # This okay because the first node hasn't got the update that
        # container1 was deleted yet.
        self.assert_(found1)
        self.assert_(found2)

        get_to_final_state()
        headers, containers = \
            direct_client.direct_get_account(anodes[0], apart, self.account)
        self.assertEquals(headers['x-account-container-count'], '1')
        self.assertEquals(headers['x-account-object-count'], '2')
        self.assertEquals(headers['x-account-bytes-used'], '9')
        found1 = False
        found2 = False
        for c in containers:
            if c['name'] == container1:
                found1 = True
            elif c['name'] == container2:
                found2 = True
                self.assertEquals(c['count'], 2)
                self.assertEquals(c['bytes'], 9)
        self.assert_(not found1)
        self.assert_(found2)
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) > 1,
                     'primary nodes did not disagree about policy index %r' %
                     head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' % (
                          self.account, self.container_name, self.object_name,
                          expected_policy_index, TIMEOUT))