Exemplo n.º 1
0
    def test_sync_unexpired_object_metadata(self):
        # verify that metadata can be sync'd to a frag that has missed a POST
        # and consequently that frag appears to be expired, when in fact the
        # POST removed the x-delete-at header
        client.put_container(self.url, self.token, self.container_name,
                             headers={'x-storage-policy': self.policy.name})
        opart, onodes = self.object_ring.get_nodes(
            self.account, self.container_name, self.object_name)
        delete_at = int(time.time() + 3)
        contents = 'body-%s' % uuid.uuid4()
        headers = {'x-delete-at': delete_at}
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name, headers=headers, contents=contents)
        # fail a primary
        post_fail_node = random.choice(onodes)
        post_fail_path = self.device_dir('object', post_fail_node)
        self.kill_drive(post_fail_path)
        # post over w/o x-delete-at
        client.post_object(self.url, self.token, self.container_name,
                           self.object_name, {'content-type': 'something-new'})
        # revive failed primary
        self.revive_drive(post_fail_path)
        # wait for the delete_at to pass, and check that it thinks the object
        # is expired
        timeout = time.time() + 5
        while time.time() < timeout:
            try:
                direct_client.direct_head_object(
                    post_fail_node, opart, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': int(self.policy)})
            except direct_client.ClientException as err:
                if err.http_status != 404:
                    raise
                break
            else:
                time.sleep(0.1)
        else:
            self.fail('Failed to get a 404 from node with expired object')
        self.assertEqual(err.http_status, 404)
        self.assertIn('X-Backend-Timestamp', err.http_headers)

        # but from the proxy we've got the whole story
        headers, body = client.get_object(self.url, self.token,
                                          self.container_name,
                                          self.object_name)
        self.assertNotIn('X-Delete-At', headers)
        self.reconstructor.once()

        # ... and all the nodes have the final unexpired state
        for node in onodes:
            headers = direct_client.direct_head_object(
                node, opart, self.account, self.container_name,
                self.object_name, headers={
                    'X-Backend-Storage-Policy-Index': int(self.policy)})
            self.assertNotIn('X-Delete-At', headers)
Exemplo n.º 2
0
    def test_direct_head_object_error(self):
        with mocked_http_conn(500) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_object(self.node, self.part,
                                                 self.account, self.container,
                                                 self.obj)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(raised.exception.http_status, 500)
        self.assertTrue('HEAD' in str(raised.exception))
Exemplo n.º 3
0
    def test_direct_head_object_error(self):
        with mocked_http_conn(500) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_object(
                    self.node, self.part, self.account, self.container,
                    self.obj)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(raised.exception.http_status, 500)
        self.assertTrue('HEAD' in str(raised.exception))
Exemplo n.º 4
0
    def test_direct_head_object_error(self):
        with mocked_http_conn(500) as conn:
            try:
                direct_client.direct_head_object(
                    self.node, self.part, self.account, self.container,
                    self.obj)
            except ClientException as err:
                pass
            else:
                self.fail('ClientException not raised')
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(err.http_status, 500)
        self.assertTrue('HEAD' in str(err))
Exemplo n.º 5
0
    def run_quarantine_zero_byte_head(self):
        container = 'container-zbyte-%s' % uuid4()
        obj = 'object-zbyte-%s' % uuid4()
        onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
        metadata = read_metadata(data_file)
        unlink(data_file)

        with open(data_file, 'w') as fpointer:
            write_metadata(fpointer, metadata)
        try:
            direct_client.direct_head_object(onode, opart, self.account,
                                             container, obj, conn_timeout=1,
                                             response_timeout=1)
            raise Exception("Did not quarantine object")
        except client.ClientException as err:
            self.assertEquals(err.http_status, 404)
Exemplo n.º 6
0
    def run_quarantine_zero_byte_head(self):
        container = 'container-zbyte-%s' % uuid4()
        obj = 'object-zbyte-%s' % uuid4()
        onode, opart, data_file = self._setup_data_file(container, obj, 'DATA')
        metadata = read_metadata(data_file)
        unlink(data_file)

        with open(data_file, 'w') as fpointer:
            write_metadata(fpointer, metadata)
        try:
            direct_client.direct_head_object(onode, opart, self.account,
                                             container, obj, conn_timeout=1,
                                             response_timeout=1)
            raise Exception("Did not quarantine object")
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
Exemplo n.º 7
0
    def test_direct_head_object_not_found(self):
        important_timestamp = Timestamp.now().internal
        stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
        with mocked_http_conn(404, headers=stub_headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_object(self.node, self.part,
                                                 self.account, self.container,
                                                 self.obj)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(raised.exception.http_status, 404)
        self.assertEqual(
            raised.exception.http_headers['x-backend-important-timestamp'],
            important_timestamp)
Exemplo n.º 8
0
    def test_direct_head_object_not_found(self):
        important_timestamp = Timestamp.now().internal
        stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
        with mocked_http_conn(404, headers=stub_headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_object(
                    self.node, self.part, self.account, self.container,
                    self.obj)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(raised.exception.http_status, 404)
        self.assertEqual(
            raised.exception.http_headers['x-backend-important-timestamp'],
            important_timestamp)
Exemplo n.º 9
0
    def test_direct_head_object_not_found(self):
        important_timestamp = Timestamp(time.time()).internal
        stub_headers = {'X-Backend-Important-Timestamp': important_timestamp}
        with mocked_http_conn(404, headers=stub_headers) as conn:
            try:
                direct_client.direct_head_object(
                    self.node, self.part, self.account, self.container,
                    self.obj)
            except ClientException as err:
                pass
            else:
                self.fail('ClientException not raised')
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(err.http_status, 404)
        self.assertEqual(err.http_headers['x-backend-important-timestamp'],
                         important_timestamp)
Exemplo n.º 10
0
    def test_ec_missing_all_durable_fragments(self):
        # This tests helps assert the behavior that when
        # the proxy has enough fragments to reconstruct the object
        # but none are marked as durable, the proxy should return a 404.

        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container_name,
                             headers=headers)

        # PUT object, should go to primary nodes
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=b'object contents')

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, container_name, object_name)

        # sanity test
        odata = client.get_object(self.url, self.token, container_name,
                                  object_name)[-1]
        self.assertEqual(b'object contents', odata)

        # make all fragments non-durable
        for node in onodes:
            part_dir = self.storage_dir(node, part=opart)
            for dirs, subdirs, files in os.walk(part_dir):
                for fname in files:
                    if fname.endswith('.data'):
                        non_durable_fname = fname.replace('#d', '')
                        os.rename(os.path.join(dirs, fname),
                                  os.path.join(dirs, non_durable_fname))
                        break
            headers = direct_client.direct_head_object(
                node, opart, self.account, container_name, object_name,
                headers={
                    'X-Backend-Storage-Policy-Index': self.policy.idx,
                    'X-Backend-Fragment-Preferences': json.dumps([])})
            self.assertNotIn('X-Backend-Durable-Timestamp', headers)

        # Now a new GET should return *404* because all fragments
        # are non-durable, even if they are reconstructable
        try:
            client.get_object(self.url, self.token, container_name,
                              object_name)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
Exemplo n.º 11
0
    def run_quarantine_zero_byte_head(self):
        container = "container-zbyte-%s" % uuid4()
        obj = "object-zbyte-%s" % uuid4()
        onode, opart, data_file = self._setup_data_file(container, obj, "DATA")
        metadata = read_metadata(data_file)
        unlink(data_file)

        with open(data_file, "w") as fpointer:
            write_metadata(fpointer, metadata)
        try:
            direct_client.direct_head_object(
                onode,
                opart,
                self.account,
                container,
                obj,
                conn_timeout=1,
                response_timeout=1,
                headers={"X-Backend-Storage-Policy-Index": self.policy.idx},
            )
            raise Exception("Did not quarantine object")
        except ClientException as err:
            self.assertEquals(err.http_status, 404)
Exemplo n.º 12
0
    def _check_node(self, node, part, etag, headers_post):
        # get fragment archive etag
        headers, fragment_archive_etag = self.direct_get(node, part)
        self.assertIn('X-Backend-Durable-Timestamp', headers)  # sanity check
        durable_timestamp = headers['X-Backend-Durable-Timestamp']

        # make the data file non-durable on the selected node
        part_dir = self.storage_dir('object', node, part=part)
        for dirs, subdirs, files in os.walk(part_dir):
            for fname in files:
                if fname.endswith('.data'):
                    non_durable_fname = fname.replace('#d', '')
                    os.rename(os.path.join(dirs, fname),
                              os.path.join(dirs, non_durable_fname))
        try:
            os.remove(os.path.join(part_dir, 'hashes.pkl'))
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise

        # sanity check that fragment is no longer durable
        headers = direct_client.direct_head_object(
            node,
            part,
            self.account,
            self.container_name,
            self.object_name,
            headers={
                'X-Backend-Storage-Policy-Index': int(self.policy),
                'X-Backend-Fragment-Preferences': json.dumps([])
            })
        self.assertNotIn('X-Backend-Durable-Timestamp', headers)

        # fire up reconstructor to propagate durable state
        self.reconstructor.once()

        # fragment is still exactly as it was before!
        headers, fragment_archive_etag_2 = self.direct_get(node, part)
        self.assertEqual(fragment_archive_etag, fragment_archive_etag_2)
        self.assertIn('X-Backend-Durable-Timestamp', headers)
        self.assertEqual(durable_timestamp,
                         headers['X-Backend-Durable-Timestamp'])

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])
Exemplo n.º 13
0
    def test_direct_head_object(self):
        headers = HeaderKeyDict({'x-foo': 'bar'})

        with mocked_http_conn(200, headers) as conn:
            resp = direct_client.direct_head_object(
                self.node, self.part, self.account, self.container,
                self.obj, headers=headers)
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.obj_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual('bar', conn.req_headers.get('x-foo'))
        self.assertTrue('x-timestamp' not in conn.req_headers,
                        'x-timestamp was in HEAD request headers')
        self.assertEqual(headers, resp)
Exemplo n.º 14
0
 def _check_nodes(self, opart, onodes, container_name, object_name):
     found_frags = defaultdict(int)
     req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
     for node in onodes + list(self.object_ring.get_more_nodes(opart)):
         try:
             headers = direct_client.direct_head_object(
                 node, opart, self.account, container_name,
                 object_name, headers=req_headers)
         except socket.error as e:
             if e.errno != errno.ECONNREFUSED:
                 raise
         except direct_client.DirectClientException as e:
             if e.http_status != 404:
                 raise
         else:
             found_frags[headers['X-Object-Sysmeta-Ec-Frag-Index']] += 1
     return found_frags
Exemplo n.º 15
0
    def test_direct_head_object(self):
        node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'}
        part = '0'
        account = 'a'
        container = 'c'
        name = 'o'
        headers = {'key': 'value'}

        was_http_connector = direct_client.http_connect
        direct_client.http_connect = mock_http_connect(200, headers)

        resp = direct_client.direct_head_object(node, part, account,
                                                container, name)
        headers.update({'user-agent': 'direct-client %s' % os.getpid()})
        self.assertEqual(headers, resp)

        direct_client.http_connect = was_http_connector
Exemplo n.º 16
0
 def _check_nodes(self, opart, onodes, container_name, object_name):
     found_frags = defaultdict(int)
     req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
     for node in onodes + list(self.object_ring.get_more_nodes(opart)):
         try:
             headers = direct_client.direct_head_object(
                 node, opart, self.account, container_name,
                 object_name, headers=req_headers)
         except socket.error as e:
             if e.errno != errno.ECONNREFUSED:
                 raise
         except direct_client.DirectClientException as e:
             if e.http_status != 404:
                 raise
         else:
             found_frags[headers['X-Object-Sysmeta-Ec-Frag-Index']] += 1
     return found_frags
    def test_reconcile_symlink(self):
        if 'symlink' not in self.cluster_info:
            raise unittest.SkipTest(
                "Symlink not enabled in proxy; can't test "
                "symlink reconciliation")
        wrong_policy = random.choice(ENABLED_POLICIES)
        policy = random.choice([p for p in ENABLED_POLICIES
                                if p is not wrong_policy])
        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some target data
        client.put_object(self.url, self.token, self.container_name, 'target',
                          contents='this is the target data')

        # write the symlink
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        client.put_object(
            self.url, self.token, self.container_name, 'symlink',
            headers={
                'X-Symlink-Target': '%s/target' % self.container_name,
                'Content-Type': 'application/symlink',
            })

        # at this point we have a broken symlink (the container_info has the
        # proxy looking for the target in the wrong policy)
        with self.assertRaises(ClientException) as ctx:
            client.get_object(self.url, self.token, self.container_name,
                              'symlink')
        self.assertEqual(ctx.exception.http_status, 404)

        # of course the symlink itself is fine
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink',
                                           query_string='symlink=get')
        self.assertEqual(metadata['x-symlink-target'],
                         '%s/target' % self.container_name)
        self.assertEqual(metadata['content-type'], 'application/symlink')
        self.assertEqual(body, '')
        # ... although in the wrong policy
        object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, 'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node, part, self.account, self.container_name, 'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)

        # let the reconciler run
        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # now the symlink works
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink')
        self.assertEqual(body, 'this is the target data')
        # and it's in the correct policy
        object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, 'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node, part, self.account, self.container_name, 'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)
Exemplo n.º 18
0
    def test_ec_handoff_overwrite(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container_name,
                             headers=headers)

        # PUT object
        old_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=old_contents)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, container_name, object_name)

        # shutdown one of the primary data nodes
        failed_primary = random.choice(onodes)
        failed_primary_device_path = self.device_dir(failed_primary)
        # first read its ec etag value for future reference - this may not
        # equal old_contents.etag if for example the proxy has crypto enabled
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(
            failed_primary, opart, self.account, container_name,
            object_name, headers=req_headers)
        old_backend_etag = headers['X-Object-Sysmeta-EC-Etag']

        self.kill_drive(failed_primary_device_path)

        # overwrite our object with some new data
        new_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=new_contents)
        self.assertNotEqual(new_contents.etag, old_contents.etag)

        # restore failed primary device
        self.revive_drive(failed_primary_device_path)

        # sanity - failed node has old contents
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(
            failed_primary, opart, self.account, container_name,
            object_name, headers=req_headers)
        self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
                         old_backend_etag)

        # we have 1 primary with wrong old etag, and we should have 5 with
        # new etag plus a handoff with the new etag, so killing 2 other
        # primaries forces proxy to try to GET from all primaries plus handoff.
        other_nodes = [n for n in onodes if n != failed_primary]
        random.shuffle(other_nodes)
        # grab the value of the new content's ec etag for future reference
        headers = direct_client.direct_head_object(
            other_nodes[0], opart, self.account, container_name,
            object_name, headers=req_headers)
        new_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
        for node in other_nodes[:2]:
            self.kill_drive(self.device_dir(node))

        # sanity, after taking out two primaries we should be down to
        # only four primaries, one of which has the old etag - but we
        # also have a handoff with the new etag out there
        found_frags = defaultdict(int)
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        for node in onodes + list(self.object_ring.get_more_nodes(opart)):
            try:
                headers = direct_client.direct_head_object(
                    node, opart, self.account, container_name,
                    object_name, headers=req_headers)
            except Exception:
                continue
            found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
        self.assertEqual(found_frags, {
            new_backend_etag: 4,  # this should be enough to rebuild!
            old_backend_etag: 1,
        })

        # clear node error limiting
        Manager(['proxy']).restart()

        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, new_contents.etag)
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(expected_policy_index)
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(len(found_policy_indexes) == 1,
                        'primary nodes disagree about policy index %r' %
                        head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(expected_policy_index)
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assertTrue(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     self.get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assertTrue(len(new_found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     dict((node['port'],
                          metadata['X-Backend-Storage-Policy-Index'])
                          for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' % (
                     self.account, self.container_name, self.object_name,
                     orig_policy_index, node))
Exemplo n.º 22
0
    def test_ec_handoff_overwrite(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container_name,
                             headers=headers)

        # PUT object
        old_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=old_contents)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, container_name, object_name)

        # shutdown one of the primary data nodes
        failed_primary = random.choice(onodes)
        failed_primary_device_path = self.device_dir('object', failed_primary)
        self.kill_drive(failed_primary_device_path)

        # overwrite our object with some new data
        new_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=new_contents)
        self.assertNotEqual(new_contents.etag, old_contents.etag)

        # restore failed primary device
        self.revive_drive(failed_primary_device_path)

        # sanity - failed node has old contents
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(
            failed_primary, opart, self.account, container_name,
            object_name, headers=req_headers)
        self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
                         old_contents.etag)

        # we have 1 primary with wrong old etag, and we should have 5 with
        # new etag plus a handoff with the new etag, so killing 2 other
        # primaries forces proxy to try to GET from all primaries plus handoff.
        other_nodes = [n for n in onodes if n != failed_primary]
        random.shuffle(other_nodes)
        for node in other_nodes[:2]:
            self.kill_drive(self.device_dir('object', node))

        # sanity, after taking out two primaries we should be down to
        # only four primaries, one of which has the old etag - but we
        # also have a handoff with the new etag out there
        found_frags = defaultdict(int)
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        for node in onodes + list(self.object_ring.get_more_nodes(opart)):
            try:
                headers = direct_client.direct_head_object(
                    node, opart, self.account, container_name,
                    object_name, headers=req_headers)
            except Exception:
                continue
            found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
        self.assertEqual(found_frags, {
            new_contents.etag: 4,  # this should be enough to rebuild!
            old_contents.etag: 1,
        })

        # clear node error limiting
        Manager(['proxy']).restart()

        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, new_contents.etag)
    def test_reconcile_symlink(self):
        if 'symlink' not in self.cluster_info:
            raise unittest.SkipTest("Symlink not enabled in proxy; can't test "
                                    "symlink reconciliation")
        wrong_policy = random.choice(ENABLED_POLICIES)
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some target data
        target_name = self.get_object_name('target')
        self.brain.client.put_object(self.container_name, target_name, {},
                                     b'this is the target data')

        # write the symlink
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        symlink_name = self.get_object_name('symlink')
        self.brain.client.put_object(
            self.container_name, symlink_name, {
                'X-Symlink-Target': '%s/%s' %
                (self.container_name, target_name),
                'Content-Type': 'application/symlink',
            }, b'')

        # at this point we have a broken symlink (the container_info has the
        # proxy looking for the target in the wrong policy)
        with self.assertRaises(ClientException) as ctx:
            self.brain.client.get_object(self.container_name, symlink_name)
        self.assertEqual(ctx.exception.http_status, 404)

        # of course the symlink itself is fine
        metadata, body = self.brain.client.get_object(
            self.container_name, symlink_name, query_string='symlink=get')
        self.assertEqual(
            metadata['x-symlink-target'],
            utils.quote('%s/%s' % (self.container_name, target_name)))
        self.assertEqual(metadata['content-type'], 'application/symlink')
        self.assertEqual(body, b'')
        # ... although in the wrong policy
        object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            symlink_name)
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                symlink_name,
                headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
            self.assertEqual(
                metadata['X-Object-Sysmeta-Symlink-Target'],
                utils.quote('%s/%s' % (self.container_name, target_name)))

        # let the reconciler run
        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        self.brain.client.post_container(self.container_name, {})

        # now the symlink works
        metadata, body = self.brain.client.get_object(self.container_name,
                                                      symlink_name)
        self.assertEqual(body, b'this is the target data')
        # and it's in the correct policy
        object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            symlink_name)
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                symlink_name,
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            self.assertEqual(
                metadata['X-Object-Sysmeta-Symlink-Target'],
                utils.quote('%s/%s' % (self.container_name, target_name)))
Exemplo n.º 24
0
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assert_(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assert_(
         len(new_found_policy_indexes) == 1,
         'primary nodes disagree about policy index %r' % dict(
             (node['port'], metadata['X-Backend-Storage-Policy-Index'])
             for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' %
                           (self.account, self.container_name,
                            self.object_name, orig_policy_index, node))
Exemplo n.º 25
0
    def authorize(self, req):
        ret = super(ZodiacAuth, self).authorize(req)
        if ret is None and req.method == "GET":
            # passed swauth rules, now check zodiac rules

            # split the path
            # this should be safe since it was already checked against an error
            # in the super call
            version, account, container, obj = split_path(req.path, 1, 4, True)

            # grab our acl to use
            acl = self.zodiac_acl

            # get the current zodiac sign for this access request
            access_sign = zodiac_sign_datetime(datetime.datetime.now())

            # get the client ip
            client_addr = get_remote_client(req)

            if container:
                # there is a container so let's try to get the timestamp
                container_nodes = self.app.container_ring.get_nodes(account, container)
                if container_nodes:
                    # direct head requests return a timestamp whereas calls
                    # to the proxy do not. this might not be the best thing
                    # to do. open to suggestions.
                    try:
                        container_meta = direct_head_container(
                            container_nodes[1][0], container_nodes[0], account, container
                        )
                    except ClientException:
                        return ret
                    container_date = datetime.datetime.fromtimestamp(float(container_meta["x-timestamp"]))
                    container_sign = zodiac_sign_datetime(container_date)

                    # ensure the container sign has access rules
                    if container_sign in acl and access_sign in acl[container_sign]:
                        if client_addr not in acl[container_sign][access_sign]:
                            ret = self.denied_response(req)
                    else:
                        # sign missing from acl rules or access sign not present
                        ret = self.denied_response(req)

                    if ret is None and obj:
                        # we passed the container permissions and there is an
                        # object.
                        # get the object's store sign and check permissions
                        obj_nodes = self.app.container_ring.get_nodes(account, container, obj)
                        if obj_nodes:
                            try:
                                obj_meta = direct_head_object(
                                    obj_nodes[1][0], container_nodes[0], account, container, obj
                                )
                            except ClientException:
                                return ret
                            obj_date = datetime.datetime.fromtimestamp(float(obj_meta["x-timestamp"]))
                            obj_sign = zodiac_sign_datetime(obj_date)

                            # ensure the object sign has access rules
                            if obj_sign in acl and access_sign in acl[obj_sign]:
                                if client_addr not in acl[obj_sign][access_sign]:
                                    ret = self.denied_response(req)
                            else:
                                # object sign missing from acl rules or
                                # access sign not present
                                ret = self.denied_response(req)
        return ret
Exemplo n.º 26
0
    def test_sync_unexpired_object_metadata(self):
        # verify that metadata can be sync'd to a frag that has missed a POST
        # and consequently that frag appears to be expired, when in fact the
        # POST removed the x-delete-at header
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers={'x-storage-policy': self.policy.name})
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        delete_at = int(time.time() + 3)
        contents = ('body-%s' % uuid.uuid4()).encode()
        headers = {'x-delete-at': delete_at}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          headers=headers,
                          contents=contents)
        # fail a primary
        post_fail_node = random.choice(onodes)
        post_fail_path = self.device_dir(post_fail_node)
        self.kill_drive(post_fail_path)
        # post over w/o x-delete-at
        client.post_object(self.url, self.token, self.container_name,
                           self.object_name, {'content-type': 'something-new'})
        # revive failed primary
        self.revive_drive(post_fail_path)
        # wait for the delete_at to pass, and check that it thinks the object
        # is expired
        timeout = time.time() + 5
        err = None
        while time.time() < timeout:
            try:
                direct_client.direct_head_object(
                    post_fail_node,
                    opart,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': int(self.policy)
                    })
            except direct_client.ClientException as client_err:
                if client_err.http_status != 404:
                    raise
                err = client_err
                break
            else:
                time.sleep(0.1)
        else:
            self.fail('Failed to get a 404 from node with expired object')
        self.assertEqual(err.http_status, 404)
        self.assertIn('X-Backend-Timestamp', err.http_headers)

        # but from the proxy we've got the whole story
        headers, body = client.get_object(self.url, self.token,
                                          self.container_name,
                                          self.object_name)
        self.assertNotIn('X-Delete-At', headers)
        self.reconstructor.once()

        # ... and all the nodes have the final unexpired state
        for node in onodes:
            headers = direct_client.direct_head_object(
                node,
                opart,
                self.account,
                self.container_name,
                self.object_name,
                headers={'X-Backend-Storage-Policy-Index': int(self.policy)})
            self.assertNotIn('X-Delete-At', headers)
Exemplo n.º 27
0
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' %
                      (self.account, self.container_name, self.object_name,
                       expected_policy_index, TIMEOUT))
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) > 1,
                     'primary nodes did not disagree about policy index %r' %
                     head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' % (
                          self.account, self.container_name, self.object_name,
                          expected_policy_index, TIMEOUT))