示例#1
0
    def test_ec_primary_timeout(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             container_name,
                             headers=headers)

        # PUT object, should go to primary nodes
        old_contents = Body()
        client.put_object(self.url,
                          self.token,
                          container_name,
                          object_name,
                          contents=old_contents)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   container_name, object_name)

        # shutdown three of the primary data nodes
        for i in range(3):
            failed_primary = onodes[i]
            failed_primary_device_path = self.device_dir(
                'object', failed_primary)
            self.kill_drive(failed_primary_device_path)

        # Indirectly (i.e., through proxy) try to GET object, it should return
        # a 503, since all primaries will Timeout and handoffs return a 404.
        try:
            client.get_object(self.url, self.token, container_name,
                              object_name)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 503)
        else:
            self.fail("Expected ClientException but didn't get it")

        # Send a delete to write down tombstones in the handoff nodes
        client.delete_object(self.url, self.token, container_name, object_name)

        # Now a new GET should return 404 because the handoff nodes
        # return a 404 with a Tombstone.
        try:
            client.get_object(self.url, self.token, container_name,
                              object_name)
        except client.ClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail("Expected ClientException but didn't get it")
示例#2
0
    def test_ec_handoff_duplicate_available(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, container_name, object_name)

        # find both primary servers that have both of their devices in
        # the primary node list
        group_nodes_by_config = defaultdict(list)
        for n in onodes:
            group_nodes_by_config[self.config_number(n)].append(n)
        double_disk_primary = []
        for config_number, node_list in group_nodes_by_config.items():
            if len(node_list) > 1:
                double_disk_primary.append((config_number, node_list))

        # sanity, in a 4+2 with 8 disks two servers will be doubled
        self.assertEqual(len(double_disk_primary), 2)

        # shutdown the first double primary
        primary0_config_number, primary0_node_list = double_disk_primary[0]
        Manager(['object-server']).stop(number=primary0_config_number)

        # PUT object
        contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=contents)

        # sanity fetch two frags on handoffs
        handoff_frags = []
        for node in self.object_ring.get_more_nodes(opart):
            headers, data = direct_client.direct_get_object(
                node, opart, self.account, container_name, object_name,
                headers={'X-Backend-Storage-Policy-Index': int(self.policy)}
            )
            handoff_frags.append((node, headers, data))

        # bring the first double primary back, and fail the other one
        Manager(['object-server']).start(number=primary0_config_number)
        primary1_config_number, primary1_node_list = double_disk_primary[1]
        Manager(['object-server']).stop(number=primary1_config_number)

        # we can still GET the object
        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, contents.etag)

        # now start to "revert" the first handoff frag
        node = primary0_node_list[0]
        handoff_node, headers, data = handoff_frags[0]
        # N.B. object server api returns quoted ETag
        headers['ETag'] = headers['Etag'].strip('"')
        headers['X-Backend-Storage-Policy-Index'] = int(self.policy)
        direct_client.direct_put_object(
            node, opart,
            self.account, container_name, object_name,
            contents=data, headers=headers)

        # sanity - check available frags
        frag2count = self._check_nodes(opart, onodes,
                                       container_name, object_name)
        # ... five frags total
        self.assertEqual(sum(frag2count.values()), 5)
        # ... only 4 unique indexes
        self.assertEqual(len(frag2count), 4)

        # we can still GET the object
        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, contents.etag)

        # ... but we need both handoffs or we get a error
        for handoff_node, hdrs, data in handoff_frags:
            Manager(['object-server']).stop(
                number=self.config_number(handoff_node))
            with self.assertRaises(Exception) as cm:
                self.get_object(container_name, object_name)
            self.assertIn(cm.exception.http_status, (404, 503))
            Manager(['object-server']).start(
                number=self.config_number(handoff_node))

        # fix everything
        Manager(['object-server']).start(number=primary1_config_number)
        Manager(["object-reconstructor"]).once()

        # sanity - check available frags
        frag2count = self._check_nodes(opart, onodes,
                                       container_name, object_name)
        # ... six frags total
        self.assertEqual(sum(frag2count.values()), 6)
        # ... all six unique
        self.assertEqual(len(frag2count), 6)
示例#3
0
    def test_ec_handoff_overwrite(self):
        container_name = 'container-%s' % uuid4()
        object_name = 'object-%s' % uuid4()

        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url, self.token, container_name,
                             headers=headers)

        # PUT object
        old_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=old_contents)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(
            self.account, container_name, object_name)

        # shutdown one of the primary data nodes
        failed_primary = random.choice(onodes)
        failed_primary_device_path = self.device_dir(failed_primary)
        # first read its ec etag value for future reference - this may not
        # equal old_contents.etag if for example the proxy has crypto enabled
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(
            failed_primary, opart, self.account, container_name,
            object_name, headers=req_headers)
        old_backend_etag = headers['X-Object-Sysmeta-EC-Etag']

        self.kill_drive(failed_primary_device_path)

        # overwrite our object with some new data
        new_contents = Body()
        client.put_object(self.url, self.token, container_name,
                          object_name, contents=new_contents)
        self.assertNotEqual(new_contents.etag, old_contents.etag)

        # restore failed primary device
        self.revive_drive(failed_primary_device_path)

        # sanity - failed node has old contents
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        headers = direct_client.direct_head_object(
            failed_primary, opart, self.account, container_name,
            object_name, headers=req_headers)
        self.assertEqual(headers['X-Object-Sysmeta-EC-Etag'],
                         old_backend_etag)

        # we have 1 primary with wrong old etag, and we should have 5 with
        # new etag plus a handoff with the new etag, so killing 2 other
        # primaries forces proxy to try to GET from all primaries plus handoff.
        other_nodes = [n for n in onodes if n != failed_primary]
        random.shuffle(other_nodes)
        # grab the value of the new content's ec etag for future reference
        headers = direct_client.direct_head_object(
            other_nodes[0], opart, self.account, container_name,
            object_name, headers=req_headers)
        new_backend_etag = headers['X-Object-Sysmeta-EC-Etag']
        for node in other_nodes[:2]:
            self.kill_drive(self.device_dir(node))

        # sanity, after taking out two primaries we should be down to
        # only four primaries, one of which has the old etag - but we
        # also have a handoff with the new etag out there
        found_frags = defaultdict(int)
        req_headers = {'X-Backend-Storage-Policy-Index': int(self.policy)}
        for node in onodes + list(self.object_ring.get_more_nodes(opart)):
            try:
                headers = direct_client.direct_head_object(
                    node, opart, self.account, container_name,
                    object_name, headers=req_headers)
            except Exception:
                continue
            found_frags[headers['X-Object-Sysmeta-EC-Etag']] += 1
        self.assertEqual(found_frags, {
            new_backend_etag: 4,  # this should be enough to rebuild!
            old_backend_etag: 1,
        })

        # clear node error limiting
        Manager(['proxy']).restart()

        resp_etag = self.get_object(container_name, object_name)
        self.assertEqual(resp_etag, new_contents.etag)
    def test_revert_object(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)

        # kill 2 a parity count number of primary nodes so we can
        # force data onto handoffs, we do that by renaming dev dirs
        # to induce 507
        p_dev1 = self.device_dir(onodes[0])
        p_dev2 = self.device_dir(onodes[1])
        self.kill_drive(p_dev1)
        self.kill_drive(p_dev2)

        # PUT object
        contents = Body()
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents,
                          headers=headers)
        client.post_object(self.url,
                           self.token,
                           self.container_name,
                           self.object_name,
                           headers=headers_post)
        # (Some versions of?) swiftclient will mutate the headers dict on post
        headers_post.pop('X-Auth-Token', None)

        # these primaries can't serve the data any more, we expect 507
        # here and not 404 because we're using mount_check to kill nodes
        for onode in (onodes[0], onodes[1]):
            try:
                self.direct_get(onode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 507)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (onode, ))

        # now take out another primary
        p_dev3 = self.device_dir(onodes[2])
        self.kill_drive(p_dev3)

        # this node can't servce the data any more
        try:
            self.direct_get(onodes[2], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 507)
        else:
            self.fail('Node data on %r was not fully destroyed!' % (onode, ))

        # make sure we can still GET the object and its correct
        # we're now pulling from handoffs and reconstructing
        etag = self.proxy_get()
        self.assertEqual(etag, contents.etag)

        # rename the dev dirs so they don't 507 anymore
        self.revive_drive(p_dev1)
        self.revive_drive(p_dev2)
        self.revive_drive(p_dev3)

        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) // 10
            self.reconstructor.once(number=hnode_id)

        # first three primaries have data again
        for onode in (onodes[0], onodes[2]):
            self.direct_get(onode, opart)

        # check meta
        meta = client.head_object(self.url, self.token, self.container_name,
                                  self.object_name)
        for key in headers_post:
            self.assertIn(key, meta)
            self.assertEqual(meta[key], headers_post[key])

        # handoffs are empty
        for hnode in hnodes:
            try:
                self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (hnode, ))
    def test_reconstruct_from_reverted_fragment_archive(self):
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)

        # find a primary server that only has one of it's devices in the
        # primary node list
        group_nodes_by_config = defaultdict(list)
        for n in onodes:
            group_nodes_by_config[self.config_number(n)].append(n)
        for config_number, node_list in group_nodes_by_config.items():
            if len(node_list) == 1:
                break
        else:
            self.fail('ring balancing did not use all available nodes')
        primary_node = node_list[0]

        # ... and 507 it's device
        primary_device = self.device_dir(primary_node)
        self.kill_drive(primary_device)

        # PUT object
        contents = Body()
        etag = client.put_object(self.url,
                                 self.token,
                                 self.container_name,
                                 self.object_name,
                                 contents=contents)
        self.assertEqual(contents.etag, etag)

        # fix the primary device and sanity GET
        self.revive_drive(primary_device)
        self.assertEqual(etag, self.proxy_get())

        # find a handoff holding the fragment
        for hnode in self.object_ring.get_more_nodes(opart):
            try:
                reverted_fragment_etag = self.direct_get(hnode, opart)
            except direct_client.DirectClientException as err:
                if err.http_status != 404:
                    raise
            else:
                break
        else:
            self.fail('Unable to find handoff fragment!')

        # we'll force the handoff device to revert instead of potentially
        # racing with rebuild by deleting any other fragments that may be on
        # the same server
        handoff_fragment_etag = None
        for node in onodes:
            if self.is_local_to(node, hnode):
                # we'll keep track of the etag of this fragment we're removing
                # in case we need it later (queue forshadowing music)...
                try:
                    handoff_fragment_etag = self.direct_get(node, opart)
                except direct_client.DirectClientException as err:
                    if err.http_status != 404:
                        raise
                    # this just means our handoff device was on the same
                    # machine as the primary!
                    continue
                # use the primary nodes device - not the hnode device
                part_dir = self.storage_dir(node, part=opart)
                shutil.rmtree(part_dir, True)

        # revert from handoff device with reconstructor
        self.reconstructor.once(number=self.config_number(hnode))

        # verify fragment reverted to primary server
        self.assertEqual(reverted_fragment_etag,
                         self.direct_get(primary_node, opart))

        # now we'll remove some data on one of the primary node's partners
        partner = random.choice(
            reconstructor._get_partners(primary_node['index'], onodes))

        try:
            rebuilt_fragment_etag = self.direct_get(partner, opart)
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
            # partner already had it's fragment removed
            if (handoff_fragment_etag is not None
                    and self.is_local_to(hnode, partner)):
                # oh, well that makes sense then...
                rebuilt_fragment_etag = handoff_fragment_etag
            else:
                # I wonder what happened?
                self.fail('Partner inexplicably missing fragment!')
        part_dir = self.storage_dir(partner, part=opart)
        shutil.rmtree(part_dir, True)

        # sanity, it's gone
        try:
            self.direct_get(partner, opart)
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
        else:
            self.fail('successful GET of removed partner fragment archive!?')

        # and force the primary node to do a rebuild
        self.reconstructor.once(number=self.config_number(primary_node))

        # and validate the partners rebuilt_fragment_etag
        try:
            self.assertEqual(rebuilt_fragment_etag,
                             self.direct_get(partner, opart))
        except direct_client.DirectClientException as err:
            if err.http_status != 404:
                raise
            else:
                self.fail('Did not find rebuilt fragment on partner node')
    def test_delete_propagate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = list(
            itertools.islice(self.object_ring.get_more_nodes(opart), 2))

        # PUT object
        contents = Body()
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents)

        # now lets shut down a couple of primaries
        failed_nodes = random.sample(onodes, 2)
        for node in failed_nodes:
            self.kill_drive(self.device_dir(node))

        # Write tombstones over the nodes that are still online
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # spot check the primary nodes that are still online
        delete_timestamp = None
        for node in onodes:
            if node in failed_nodes:
                continue
            try:
                self.direct_get(node, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
                delete_timestamp = err.http_headers['X-Backend-Timestamp']
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (node, ))

        # run the reconstructor on the handoff node multiple times until
        # tombstone is pushed out - each handoff node syncs to a few
        # primaries each time
        iterations = 0
        while iterations < 52:
            self.reconstructor.once(number=self.config_number(hnodes[0]))
            iterations += 1
            # see if the tombstone is reverted
            try:
                self.direct_get(hnodes[0], opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
                if 'X-Backend-Timestamp' not in err.http_headers:
                    # this means the tombstone is *gone* so it's reverted
                    break
        else:
            self.fail('Still found tombstone on %r after %s iterations' %
                      (hnodes[0], iterations))

        # tombstone is still on the *second* handoff
        try:
            self.direct_get(hnodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertEqual(err.http_headers['X-Backend-Timestamp'],
                             delete_timestamp)
        else:
            self.fail('Found obj data on %r' % hnodes[1])

        # repair the primaries
        self.revive_drive(self.device_dir(failed_nodes[0]))
        self.revive_drive(self.device_dir(failed_nodes[1]))

        # run reconstructor on second handoff
        self.reconstructor.once(number=self.config_number(hnodes[1]))

        # verify tombstone is reverted on the first pass
        try:
            self.direct_get(hnodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertNotIn('X-Backend-Timestamp', err.http_headers)
        else:
            self.fail('Found obj data on %r' % hnodes[1])

        # sanity make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destroyed!' % (onodes[0]))
示例#7
0
    def test_delete_propagate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = list(
            itertools.islice(self.object_ring.get_more_nodes(opart), 2))

        # PUT object
        contents = Body()
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents)

        # now lets shut down a couple primaries
        failed_nodes = random.sample(onodes, 2)
        for node in failed_nodes:
            self.kill_drive(self.device_dir('object', node))

        # Write tombstones over the nodes that are still online
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # spot check the primary nodes that are still online
        delete_timestamp = None
        for node in onodes:
            if node in failed_nodes:
                continue
            try:
                self.direct_get(node, opart)
            except direct_client.DirectClientException as err:
                self.assertEqual(err.http_status, 404)
                delete_timestamp = err.http_headers['X-Backend-Timestamp']
            else:
                self.fail('Node data on %r was not fully destroyed!' %
                          (node, ))

        # repair the first primary
        self.revive_drive(self.device_dir('object', failed_nodes[0]))

        # run the reconstructor on the *second* handoff node
        self.reconstructor.once(number=self.config_number(hnodes[1]))

        # make sure it's tombstone was pushed out
        try:
            self.direct_get(hnodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertNotIn('X-Backend-Timestamp', err.http_headers)
        else:
            self.fail('Found obj data on %r' % hnodes[1])

        # ... and it's on the first failed (now repaired) primary
        try:
            self.direct_get(failed_nodes[0], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertEqual(err.http_headers['X-Backend-Timestamp'],
                             delete_timestamp)
        else:
            self.fail('Found obj data on %r' % failed_nodes[0])

        # repair the second primary
        self.revive_drive(self.device_dir('object', failed_nodes[1]))

        # run the reconstructor on the *first* handoff node
        self.reconstructor.once(number=self.config_number(hnodes[0]))

        # make sure it's tombstone was pushed out
        try:
            self.direct_get(hnodes[0], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertNotIn('X-Backend-Timestamp', err.http_headers)
        else:
            self.fail('Found obj data on %r' % hnodes[0])

        # ... and now it's on the second failed primary too!
        try:
            self.direct_get(failed_nodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
            self.assertEqual(err.http_headers['X-Backend-Timestamp'],
                             delete_timestamp)
        else:
            self.fail('Found obj data on %r' % failed_nodes[1])

        # sanity make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destroyed!' % (onodes[0]))
示例#8
0
    def test_handoff_non_durable(self):
        # verify that reconstructor reverts non-durable frags from handoff to
        # primary (and also durable frag of same object on same handoff) and
        # cleans up non-durable data files on handoffs after revert
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        pdevs = [self.device_dir(onode) for onode in onodes]
        hnodes = list(
            itertools.islice(self.object_ring.get_more_nodes(opart), 2))

        # kill a primary nodes so we can force data onto a handoff
        self.kill_drive(pdevs[0])

        # PUT object at t1
        contents = Body(total=3.5 * 2**20)
        headers = {'x-object-meta-foo': 'meta-foo'}
        headers_post = {'x-object-meta-bar': 'meta-bar'}
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents,
                          headers=headers)
        client.post_object(self.url,
                           self.token,
                           self.container_name,
                           self.object_name,
                           headers=headers_post)
        # (Some versions of?) swiftclient will mutate the headers dict on post
        headers_post.pop('X-Auth-Token', None)

        # this primary can't serve the data; we expect 507 here and not 404
        # because we're using mount_check to kill nodes
        self.assert_direct_get_fails(onodes[0], opart, 507)
        # these primaries and first handoff do have the data
        for onode in (onodes[1:]):
            self.assert_direct_get_succeeds(onode, opart)
        _hdrs, older_frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart)
        self.assert_direct_get_fails(hnodes[1], opart, 404)

        # make sure we can GET the object; there's 5 primaries and 1 handoff
        headers, older_obj_etag = self.proxy_get()
        self.assertEqual(contents.etag, older_obj_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))

        # PUT object at t2; make all frags non-durable so that the previous
        # durable frags at t1 remain on object server; use InternalClient so
        # that x-backend-no-commit is passed through
        internal_client = self.make_internal_client()
        contents2 = Body(total=2.5 * 2**20)  # different content
        self.assertNotEqual(contents2.etag, older_obj_etag)  # sanity check
        headers = {
            'x-backend-no-commit': 'True',
            'x-object-meta-bar': 'meta-bar-new'
        }
        internal_client.upload_object(contents2, self.account,
                                      self.container_name.decode('utf8'),
                                      self.object_name.decode('utf8'), headers)
        # GET should still return the older durable object
        headers, obj_etag = self.proxy_get()
        self.assertEqual(older_obj_etag, obj_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
        # on handoff we have older durable and newer non-durable
        _hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        _hdrs, newer_frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart, require_durable=False)
        self.assertNotEqual(older_frag_etag, newer_frag_etag)

        # now make all the newer frags durable only on the 5 primaries
        self.assertEqual(5, self.make_durable(onodes[1:], opart))
        # now GET will return the newer object
        headers, newer_obj_etag = self.proxy_get()
        self.assertEqual(contents2.etag, newer_obj_etag)
        self.assertNotEqual(older_obj_etag, newer_obj_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # fix the 507'ing primary
        self.revive_drive(pdevs[0])

        # fire up reconstructor on handoff node only; commit_window is
        # set to zero to ensure the nondurable handoff frag is purged
        hnode_id = self.config_number(hnodes[0])
        self.run_custom_daemon(ObjectReconstructor, 'object-reconstructor',
                               hnode_id, {'commit_window': '0'})

        # primary now has only the newer non-durable frag
        self.assert_direct_get_fails(onodes[0], opart, 404)
        _hdrs, frag_etag = self.assert_direct_get_succeeds(
            onodes[0], opart, require_durable=False)
        self.assertEqual(newer_frag_etag, frag_etag)

        # handoff has only the older durable
        _hdrs, frag_etag = self.assert_direct_get_succeeds(hnodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        headers, frag_etag = self.assert_direct_get_succeeds(
            hnodes[0], opart, require_durable=False)
        self.assertEqual(older_frag_etag, frag_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))

        # fire up reconstructor on handoff node only, again
        self.reconstructor.once(number=hnode_id)

        # primary now has the newer non-durable frag and the older durable frag
        headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
        self.assertEqual(older_frag_etag, frag_etag)
        self.assertEqual('meta-bar', headers.get('x-object-meta-bar'))
        headers, frag_etag = self.assert_direct_get_succeeds(
            onodes[0], opart, require_durable=False)
        self.assertEqual(newer_frag_etag, frag_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # handoff has nothing
        self.assert_direct_get_fails(hnodes[0],
                                     opart,
                                     404,
                                     require_durable=False)

        # kill all but first two primaries
        for pdev in pdevs[2:]:
            self.kill_drive(pdev)
        # fire up reconstructor on the remaining primary[1]; without the
        # other primaries, primary[1] cannot rebuild the frag but it can let
        # primary[0] know that its non-durable frag can be made durable
        self.reconstructor.once(number=self.config_number(onodes[1]))

        # first primary now has a *durable* *newer* frag - it *was* useful to
        # sync the non-durable!
        headers, frag_etag = self.assert_direct_get_succeeds(onodes[0], opart)
        self.assertEqual(newer_frag_etag, frag_etag)
        self.assertEqual('meta-bar-new', headers.get('x-object-meta-bar'))

        # revive primaries (in case we want to debug)
        for pdev in pdevs[2:]:
            self.revive_drive(pdev)
    def test_delete_propogate(self):
        # create EC container
        headers = {'X-Storage-Policy': self.policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

        # get our node lists
        opart, onodes = self.object_ring.get_nodes(self.account,
                                                   self.container_name,
                                                   self.object_name)
        hnodes = self.object_ring.get_more_nodes(opart)
        p_dev2 = self.device_dir('object', onodes[1])

        # PUT object
        contents = Body()
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=contents)

        # now lets shut one down
        self.kill_drive(p_dev2)

        # delete on the ones that are left
        client.delete_object(self.url, self.token, self.container_name,
                             self.object_name)

        # spot check a node
        try:
            self.direct_get(onodes[0], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' %
                      (onodes[0], ))

        # enable the first node again
        self.revive_drive(p_dev2)

        # propagate the delete...
        # fire up reconstructor on handoff nodes only
        for hnode in hnodes:
            hnode_id = (hnode['port'] - 6000) / 10
            self.reconstructor.once(number=hnode_id)

        # check the first node to make sure its gone
        try:
            self.direct_get(onodes[1], opart)
        except direct_client.DirectClientException as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' % (onodes[0]))

        # make sure proxy get can't find it
        try:
            self.proxy_get()
        except Exception as err:
            self.assertEqual(err.http_status, 404)
        else:
            self.fail('Node data on %r was not fully destoryed!' % (onodes[0]))