Exemplo n.º 1
0
    def _check_reload(self, server_name, ip, port):
        manager = Manager([server_name])
        manager.start()

        starting_pids = {
            pid
            for server in manager.servers
            for (_, pid) in server.iter_pid_files()
        }

        body = b'test' * 10
        conn = httplib.HTTPConnection('%s:%s' % (ip, port))

        # sanity request
        putrequest(conn, 'PUT', 'blah', headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        # Start the request before reloading...
        putrequest(conn, 'PUT', 'blah', headers={'Content-Length': len(body)})

        manager.reload()

        post_reload_pids = {
            pid
            for server in manager.servers
            for (_, pid) in server.iter_pid_files()
        }

        # none of the pids we started with are being tracked after reload
        msg = 'expected all pids from %r to have died, but found %r' % (
            starting_pids, post_reload_pids)
        self.assertFalse(starting_pids & post_reload_pids, msg)

        # ... and make sure we can finish what we were doing, and even
        # start part of a new request
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        # We can even read the body
        self.assertTrue(resp.read())

        # After this, we're in a funny spot. With eventlet 0.22.0, the
        # connection's now closed, but with prior versions we could keep
        # going indefinitely. See https://bugs.launchpad.net/swift/+bug/1792615

        # Close our connection, to make sure old eventlet shuts down
        conn.close()

        # sanity
        post_close_pids = {
            pid
            for server in manager.servers
            for (_, pid) in server.iter_pid_files()
        }
        self.assertEqual(post_reload_pids, post_close_pids)
Exemplo n.º 2
0
 def test_main(self):
     reconciler = Manager(['container-reconciler'])
     with spawn_services(self.ip_ports) as q:
         reconciler.start()
         # wait for the reconciler to connect
         q.get()
         # once it's hung in our connection - send it sig term
         print('Attempting to stop reconciler!')
         reconciler.stop()
     self.assertEqual(1, reconciler.status())
Exemplo n.º 3
0
 def test_main(self):
     reconciler = Manager(['container-reconciler'])
     with spawn_services(self.ip_ports) as q:
         reconciler.start()
         # wait for the reconciler to connect
         q.get()
         # once it's hung in our connection - send it sig term
         print('Attempting to stop reconciler!')
         reconciler.stop()
     self.assertEqual(1, reconciler.status())
Exemplo n.º 4
0
    def _check_reload(self, server_name, ip, port):
        manager = Manager([server_name])
        manager.start()

        starting_pids = set(pid for server in manager.servers
                            for (_, pid) in server.iter_pid_files())

        body = 'test' * 10
        conn = httplib.HTTPConnection('%s:%s' % (ip, port))

        # sanity request
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        # Start the request before reloading...
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})

        manager.reload()

        post_reload_pids = set(pid for server in manager.servers
                               for (_, pid) in server.iter_pid_files())

        # none of the pids we started with are being tracked after reload
        msg = 'expected all pids from %r to have died, but found %r' % (
            starting_pids, post_reload_pids)
        self.assertFalse(starting_pids & post_reload_pids, msg)

        # ... and make sure we can finish what we were doing, and even
        # start part of a new request
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        # We can even read the body
        self.assertTrue(resp.read())

        # After this, we're in a funny spot. With eventlet 0.22.0, the
        # connection's now closed, but with prior versions we could keep
        # going indefinitely. See https://bugs.launchpad.net/swift/+bug/1792615

        # Close our connection, to make sure old eventlet shuts down
        conn.close()

        # sanity
        post_close_pids = set(pid for server in manager.servers
                              for (_, pid) in server.iter_pid_files())
        self.assertEqual(post_reload_pids, post_close_pids)
Exemplo n.º 5
0
    def _check_reload(self, server_name, ip, port):
        manager = Manager([server_name])
        manager.start()

        starting_pids = set(pid for server in manager.servers
                            for (_, pid) in server.iter_pid_files())

        body = 'test' * 10
        conn = httplib.HTTPConnection('%s:%s' % (ip, port))

        # sanity request
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        manager.reload()

        post_reload_pids = set(pid for server in manager.servers
                               for (_, pid) in server.iter_pid_files())

        # none of the pids we started with are being tracked after reload
        msg = 'expected all pids from %r to have died, but found %r' % (
            starting_pids, post_reload_pids)
        self.assertFalse(starting_pids & post_reload_pids, msg)

        # ... and yet we can keep using the same connection!
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        # close our connection
        conn.close()

        # sanity
        post_close_pids = set(pid for server in manager.servers
                              for (_, pid) in server.iter_pid_files())
        self.assertEqual(post_reload_pids, post_close_pids)
Exemplo n.º 6
0
    def _check_reload(self, server_name, ip, port):
        manager = Manager([server_name])
        manager.start()

        starting_pids = set(pid for server in manager.servers
                            for (_, pid) in server.iter_pid_files())

        body = 'test' * 10
        conn = httplib.HTTPConnection('%s:%s' % (ip, port))

        # sanity request
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        manager.reload()

        post_reload_pids = set(pid for server in manager.servers
                               for (_, pid) in server.iter_pid_files())

        # none of the pids we started with are being tracked after reload
        msg = 'expected all pids from %r to have died, but found %r' % (
            starting_pids, post_reload_pids)
        self.assertFalse(starting_pids & post_reload_pids, msg)

        # ... and yet we can keep using the same connection!
        putrequest(conn, 'PUT', 'blah',
                   headers={'Content-Length': len(body)})
        conn.send(body)
        resp = conn.getresponse()
        self.assertEqual(resp.status // 100, 4)
        resp.read()

        # close our connection
        conn.close()

        # sanity
        post_close_pids = set(pid for server in manager.servers
                              for (_, pid) in server.iter_pid_files())
        self.assertEqual(post_reload_pids, post_close_pids)
Exemplo n.º 7
0
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account, self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice(
            [p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_object_should_not_be_expired(self):

        # Current object-expirer checks the correctness via x-if-delete-at
        # header that it can be deleted by expirer. If there are objects
        # either which doesn't have x-delete-at header as metadata or which
        # has different x-delete-at value from x-if-delete-at value,
        # object-expirer's delete will fail as 412 PreconditionFailed.
        # However, if some of the objects are in handoff nodes, the expirer
        # can put the tombstone with the timestamp as same as x-delete-at and
        # the object consistency will be resolved as the newer timestamp will
        # be winner (in particular, overwritten case w/o x-delete-at). This
        # test asserts such a situation that, at least, the overwriten object
        # which have larger timestamp than the original expirered date should
        # be safe.

        def put_object(headers):
            # use internal client to PUT objects so that X-Timestamp in headers
            # is effective
            headers['Content-Length'] = '0'
            path = self.client.make_path(self.account, self.container_name,
                                         self.object_name)
            try:
                self.client.make_request('PUT', path, headers, (2, ))
            except UnexpectedResponse as e:
                self.fail('Expected 201 for PUT object but got %s' %
                          e.resp.status)

        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        obj_brain.put_container()

        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        # so here, T(obj_created) < T(obj_deleted with x-delete-at)
        now = time.time()
        delete_at = int(now + 2.0)
        recreate_at = delete_at + 1.0
        put_object(headers={
            'X-Delete-At': str(delete_at),
            'X-Timestamp': Timestamp(now).normal
        })

        # some object servers stopped to make a situation that the
        # object-expirer can put tombstone in the primary nodes.
        obj_brain.stop_primary_half()

        # increment the X-Timestamp explicitly
        # (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
        put_object(
            headers={
                'X-Object-Meta-Expired': 'False',
                'X-Timestamp': Timestamp(recreate_at).normal
            })

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # sanity, the newer object is still there
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

        # some object servers recovered
        obj_brain.start_primary_half()

        # sleep until after recreated_at
        while time.time() <= recreate_at:
            time.sleep(0.1)
        # Now, expirer runs at the time after obj is recreated
        self.expirer.once()

        # verify that original object was deleted by expirer
        obj_brain.stop_handoff_half()
        try:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ))
        except UnexpectedResponse as e:
            self.fail('Expected 404 for HEAD object but got %s' %
                      e.resp.status)
        obj_brain.start_handoff_half()

        # and inconsistent state of objects is recovered by replicator
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

    def _test_expirer_delete_outdated_object_version(self, object_exists):
        # This test simulates a case where the expirer tries to delete
        # an outdated version of an object.
        # One case is where the expirer gets a 404, whereas the newest version
        # of the object is offline.
        # Another case is where the expirer gets a 412, since the old version
        # of the object mismatches the expiration time sent by the expirer.
        # In any of these cases, the expirer should retry deleting the object
        # later, for as long as a reclaim age has not passed.
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        obj_brain.put_container()

        if object_exists:
            obj_brain.put_object()

        # currently, the object either doesn't exist, or does not have
        # an expiration

        # stop primary servers and put a newer version of the object, this
        # time with an expiration. only the handoff servers will have
        # the new version
        obj_brain.stop_primary_half()
        now = time.time()
        delete_at = int(now + 2.0)
        obj_brain.put_object({'X-Delete-At': str(delete_at)})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()

        # update object record in the container listing
        Manager(['container-replicator']).once()

        # take handoff servers down, and bring up the outdated primary servers
        obj_brain.start_primary_half()
        obj_brain.stop_handoff_half()

        # wait until object expiration time
        while time.time() <= delete_at:
            time.sleep(0.1)

        # run expirer against the outdated servers. it should fail since
        # the outdated version does not match the expiration time
        self.expirer.once()

        # bring all servers up, and run replicator to update servers
        obj_brain.start_handoff_half()
        Manager(['object-replicator']).once()

        # verify the deletion has failed by checking the container listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # run expirer again, delete should now succeed
        self.expirer.once()

        # this is mainly to paper over lp bug #1652323
        self.get_to_final_state()

        # verify the deletion by checking the container listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

    def test_expirer_delete_returns_outdated_404(self):
        self._test_expirer_delete_outdated_object_version(object_exists=False)

    def test_expirer_delete_returns_outdated_412(self):
        self._test_expirer_delete_outdated_object_version(object_exists=True)
Exemplo n.º 8
0
class TestObjectExpirer(unittest.TestCase):

    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token,
         self.account, self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(Timestamp(metadata['x-backend-timestamp']) >
                             create_timestamp)
Exemplo n.º 9
0
class BrainSplitter(object):

    __metaclass__ = meta_command

    def __init__(self,
                 url,
                 token,
                 container_name='test',
                 object_name='test',
                 server_type='container',
                 policy=None):
        self.url = url
        self.token = token
        self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
        self.container_name = container_name
        self.object_name = object_name
        server_list = ['%s-server' % server_type] if server_type else ['all']
        self.servers = Manager(server_list)
        policies = list(ENABLED_POLICIES)
        random.shuffle(policies)
        self.policies = itertools.cycle(policies)

        o = object_name if server_type == 'object' else None
        c = container_name if server_type in ('object', 'container') else None
        if server_type in ('container', 'account'):
            if policy:
                raise TypeError('Metadata server brains do not '
                                'support specific storage policies')
            self.policy = None
            self.ring = ring.Ring('/etc/swift/%s.ring.gz' % server_type)
        elif server_type == 'object':
            if not policy:
                raise TypeError('Object BrainSplitters need to '
                                'specify the storage policy')
            self.policy = policy
            policy.load_ring('/etc/swift')
            self.ring = policy.object_ring
        else:
            raise ValueError('Unkonwn server_type: %r' % server_type)
        self.server_type = server_type

        part, nodes = self.ring.get_nodes(self.account, c, o)

        node_ids = [n['id'] for n in nodes]
        if all(n_id in node_ids for n_id in (0, 1)):
            self.primary_numbers = (1, 2)
            self.handoff_numbers = (3, 4)
        else:
            self.primary_numbers = (3, 4)
            self.handoff_numbers = (1, 2)

    @command
    def start_primary_half(self):
        """
        start servers 1 & 2
        """
        tuple(self.servers.start(number=n) for n in self.primary_numbers)

    @command
    def stop_primary_half(self):
        """
        stop servers 1 & 2
        """
        tuple(self.servers.stop(number=n) for n in self.primary_numbers)

    @command
    def start_handoff_half(self):
        """
        start servers 3 & 4
        """
        tuple(self.servers.start(number=n) for n in self.handoff_numbers)

    @command
    def stop_handoff_half(self):
        """
        stop servers 3 & 4
        """
        tuple(self.servers.stop(number=n) for n in self.handoff_numbers)

    @command
    def put_container(self, policy_index=None):
        """
        put container with next storage policy
        """
        policy = next(self.policies)
        if policy_index is not None:
            policy = POLICIES.get_by_index(int(policy_index))
            if not policy:
                raise ValueError('Unknown policy with index %s' % policy)
        headers = {'X-Storage-Policy': policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

    @command
    def delete_container(self):
        """
        delete container
        """
        client.delete_container(self.url, self.token, self.container_name)

    @command
    def put_object(self, headers=None):
        """
        issue put for zero byte test object
        """
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          headers=headers)

    @command
    def delete_object(self):
        """
        issue delete for test object
        """
        try:
            client.delete_object(self.url, self.token, self.container_name,
                                 self.object_name)
        except ClientException as err:
            if err.http_status != HTTP_NOT_FOUND:
                raise
Exemplo n.º 10
0
class BaseBrain(object):
    def _setup(self, account, container_name, object_name,
               server_type, policy):
        self.account = account
        self.container_name = container_name
        self.object_name = object_name
        server_list = ['%s-server' % server_type] if server_type else ['all']
        self.servers = Manager(server_list)
        policies = list(ENABLED_POLICIES)
        random.shuffle(policies)
        self.policies = itertools.cycle(policies)

        o = object_name if server_type == 'object' else None
        c = container_name if server_type in ('object', 'container') else None
        if server_type in ('container', 'account'):
            if policy:
                raise TypeError('Metadata server brains do not '
                                'support specific storage policies')
            self.policy = None
            self.ring = ring.Ring(
                '/etc/swift/%s.ring.gz' % server_type)
        elif server_type == 'object':
            if not policy:
                raise TypeError('Object BrainSplitters need to '
                                'specify the storage policy')
            self.policy = policy
            policy.load_ring('/etc/swift')
            self.ring = policy.object_ring
        else:
            raise ValueError('Unknown server_type: %r' % server_type)
        self.server_type = server_type

        self.part, self.nodes = self.ring.get_nodes(self.account, c, o)

        self.node_numbers = [n['id'] + 1 for n in self.nodes]
        if 1 in self.node_numbers and 2 in self.node_numbers:
            self.primary_numbers = (1, 2)
            self.handoff_numbers = (3, 4)
        else:
            self.primary_numbers = (3, 4)
            self.handoff_numbers = (1, 2)

    @command
    def start_primary_half(self):
        """
        start servers 1 & 2
        """
        tuple(self.servers.start(number=n) for n in self.primary_numbers)

    @command
    def stop_primary_half(self):
        """
        stop servers 1 & 2
        """
        tuple(self.servers.stop(number=n) for n in self.primary_numbers)

    @command
    def start_handoff_half(self):
        """
        start servers 3 & 4
        """
        tuple(self.servers.start(number=n) for n in self.handoff_numbers)

    @command
    def stop_handoff_half(self):
        """
        stop servers 3 & 4
        """
        tuple(self.servers.stop(number=n) for n in self.handoff_numbers)

    @command
    def put_container(self, policy_index=None):
        """
        put container with next storage policy
        """

        if policy_index is not None:
            policy = POLICIES.get_by_index(int(policy_index))
            if not policy:
                raise ValueError('Unknown policy with index %s' % policy)
        elif not self.policy:
            policy = next(self.policies)
        else:
            policy = self.policy

        headers = {'X-Storage-Policy': policy.name}
        self.client.put_container(self.container_name, headers=headers)

    @command
    def delete_container(self):
        """
        delete container
        """
        self.client.delete_container(self.container_name)

    @command
    def put_object(self, headers=None, contents=None):
        """
        issue put for test object
        """
        self.client.put_object(self.container_name, self.object_name,
                               headers=headers, contents=contents)

    @command
    def delete_object(self):
        """
        issue delete for test object
        """
        self.client.delete_object(self.container_name, self.object_name)

    @command
    def get_object(self):
        """
        issue GET for test object
        """
        return self.client.get_object(self.container_name, self.object_name)
Exemplo n.º 11
0
class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertTrue('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue('x-backend-timestamp' in metadata)
                self.assertTrue(Timestamp(metadata['x-backend-timestamp']) >
                                create_timestamp)

    def test_expirer_object_should_not_be_expired(self):
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        # T(obj_created)
        first_created_at = time.time()
        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        delete_at = int(time.time() + 1.5)
        # T(obj_recreated)
        recreated_at = time.time() + 2.0
        # T(expirer_executed) - 'now'
        sleep_for_expirer = 2.01

        obj_brain.put_container(int(self.policy))
        obj_brain.put_object(
            headers={'X-Delete-At': delete_at,
                     'X-Timestamp': Timestamp(first_created_at).internal})

        # some object servers stopped
        obj_brain.stop_primary_half()
        obj_brain.put_object(
            headers={'X-Timestamp': Timestamp(recreated_at).internal,
                     'X-Object-Meta-Expired': 'False'})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # some object servers recovered
        obj_brain.start_primary_half()
        # sleep to make sure expirer runs at the time after obj is recreated
        time.sleep(sleep_for_expirer)
        self.expirer.once()
        # inconsistent state of objects is recovered
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name)
        self.assertIn('x-object-meta-expired', metadata)
Exemplo n.º 12
0
class BrainSplitter(object):

    __metaclass__ = meta_command

    def __init__(self, url, token, container_name='test', object_name='test',
                 server_type='container', policy=None):
        self.url = url
        self.token = token
        self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
        self.container_name = container_name
        self.object_name = object_name
        server_list = ['%s-server' % server_type] if server_type else ['all']
        self.servers = Manager(server_list)
        policies = list(ENABLED_POLICIES)
        random.shuffle(policies)
        self.policies = itertools.cycle(policies)

        o = object_name if server_type == 'object' else None
        c = container_name if server_type in ('object', 'container') else None
        if server_type in ('container', 'account'):
            if policy:
                raise TypeError('Metadata server brains do not '
                                'support specific storage policies')
            self.policy = None
            self.ring = ring.Ring(
                '/etc/swift/%s.ring.gz' % server_type)
        elif server_type == 'object':
            if not policy:
                raise TypeError('Object BrainSplitters need to '
                                'specify the storage policy')
            self.policy = policy
            policy.load_ring('/etc/swift')
            self.ring = policy.object_ring
        else:
            raise ValueError('Unkonwn server_type: %r' % server_type)
        self.server_type = server_type

        part, nodes = self.ring.get_nodes(self.account, c, o)

        node_ids = [n['id'] for n in nodes]
        if all(n_id in node_ids for n_id in (0, 1)):
            self.primary_numbers = (1, 2)
            self.handoff_numbers = (3, 4)
        else:
            self.primary_numbers = (3, 4)
            self.handoff_numbers = (1, 2)

    @command
    def start_primary_half(self):
        """
        start servers 1 & 2
        """
        tuple(self.servers.start(number=n) for n in self.primary_numbers)

    @command
    def stop_primary_half(self):
        """
        stop servers 1 & 2
        """
        tuple(self.servers.stop(number=n) for n in self.primary_numbers)

    @command
    def start_handoff_half(self):
        """
        start servers 3 & 4
        """
        tuple(self.servers.start(number=n) for n in self.handoff_numbers)

    @command
    def stop_handoff_half(self):
        """
        stop servers 3 & 4
        """
        tuple(self.servers.stop(number=n) for n in self.handoff_numbers)

    @command
    def put_container(self, policy_index=None):
        """
        put container with next storage policy
        """
        policy = self.policies.next()
        if policy_index is not None:
            policy = POLICIES.get_by_index(int(policy_index))
            if not policy:
                raise ValueError('Unknown policy with index %s' % policy)
        headers = {'X-Storage-Policy': policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

    @command
    def delete_container(self):
        """
        delete container
        """
        client.delete_container(self.url, self.token, self.container_name)

    @command
    def put_object(self, headers=None):
        """
        issue put for zero byte test object
        """
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name, headers=headers)

    @command
    def delete_object(self):
        """
        issue delete for test object
        """
        try:
            client.delete_object(self.url, self.token, self.container_name,
                                 self.object_name)
        except ClientException as err:
            if err.http_status != HTTP_NOT_FOUND:
                raise
class BrainSplitter(object):

    __metaclass__ = meta_command

    def __init__(self, url, token, container_name='test', object_name='test'):
        self.url = url
        self.token = token
        self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
        self.container_name = container_name
        self.object_name = object_name
        self.servers = Manager(['container-server'])
        policies = list(POLICIES)
        random.shuffle(policies)
        self.policies = itertools.cycle(policies)

        container_part, container_nodes = ring.Ring(
            '/etc/swift/container.ring.gz').get_nodes(self.account,
                                                      self.container_name)
        container_node_ids = [n['id'] for n in container_nodes]
        if all(n_id in container_node_ids for n_id in (0, 1)):
            self.primary_numbers = (1, 2)
            self.handoff_numbers = (3, 4)
        else:
            self.primary_numbers = (3, 4)
            self.handoff_numbers = (1, 2)

    @command
    def start_primary_half(self):
        """
        start container servers 1 & 2
        """
        tuple(self.servers.start(number=n) for n in self.primary_numbers)

    @command
    def stop_primary_half(self):
        """
        stop container servers 1 & 2
        """
        tuple(self.servers.stop(number=n) for n in self.primary_numbers)

    @command
    def start_handoff_half(self):
        """
        start container servers 3 & 4
        """
        tuple(self.servers.start(number=n) for n in self.handoff_numbers)

    @command
    def stop_handoff_half(self):
        """
        stop container servers 3 & 4
        """
        tuple(self.servers.stop(number=n) for n in self.handoff_numbers)

    @command
    def put_container(self, policy_index=None):
        """
        put container with next storage policy
        """
        policy = self.policies.next()
        if policy_index is not None:
            policy = POLICIES.get_by_index(int(policy_index))
            if not policy:
                raise ValueError('Unknown policy with index %s' % policy)
        headers = {'X-Storage-Policy': policy.name}
        client.put_container(self.url,
                             self.token,
                             self.container_name,
                             headers=headers)

    @command
    def delete_container(self):
        """
        delete container
        """
        client.delete_container(self.url, self.token, self.container_name)

    @command
    def put_object(self, headers=None):
        """
        issue put for zero byte test object
        """
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          headers=headers)

    @command
    def delete_object(self):
        """
        issue delete for test object
        """
        try:
            client.delete_object(self.url, self.token, self.container_name,
                                 self.object_name)
        except ClientException as err:
            if err.http_status != HTTP_NOT_FOUND:
                raise
class BrainSplitter(object):

    __metaclass__ = meta_command

    def __init__(self, url, token, container_name='test', object_name='test'):
        self.url = url
        self.token = token
        self.account = utils.split_path(urlparse(url).path, 2, 2)[1]
        self.container_name = container_name
        self.object_name = object_name
        self.servers = Manager(['container-server'])
        policies = list(POLICIES)
        random.shuffle(policies)
        self.policies = itertools.cycle(policies)

        container_part, container_nodes = ring.Ring(
            '/etc/swift/container.ring.gz').get_nodes(
                self.account, self.container_name)
        container_node_ids = [n['id'] for n in container_nodes]
        if all(n_id in container_node_ids for n_id in (0, 1)):
            self.primary_numbers = (1, 2)
            self.handoff_numbers = (3, 4)
        else:
            self.primary_numbers = (3, 4)
            self.handoff_numbers = (1, 2)

    @command
    def start_primary_half(self):
        """
        start container servers 1 & 2
        """
        tuple(self.servers.start(number=n) for n in self.primary_numbers)

    @command
    def stop_primary_half(self):
        """
        stop container servers 1 & 2
        """
        tuple(self.servers.stop(number=n) for n in self.primary_numbers)

    @command
    def start_handoff_half(self):
        """
        start container servers 3 & 4
        """
        tuple(self.servers.start(number=n) for n in self.handoff_numbers)

    @command
    def stop_handoff_half(self):
        """
        stop container servers 3 & 4
        """
        tuple(self.servers.stop(number=n) for n in self.handoff_numbers)

    @command
    def put_container(self, policy_index=None):
        """
        put container with next storage policy
        """
        policy = self.policies.next()
        if policy_index is not None:
            policy = POLICIES.get_by_index(int(policy_index))
            if not policy:
                raise ValueError('Unknown policy with index %s' % policy)
        headers = {'X-Storage-Policy': policy.name}
        client.put_container(self.url, self.token, self.container_name,
                             headers=headers)

    @command
    def delete_container(self):
        """
        delete container
        """
        client.delete_container(self.url, self.token, self.container_name)

    @command
    def put_object(self, headers=None):
        """
        issue put for zero byte test object
        """
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name, headers=headers)

    @command
    def delete_object(self):
        """
        issue delete for test object
        """
        try:
            client.delete_object(self.url, self.token, self.container_name,
                                 self.object_name)
        except ClientException as err:
            if err.http_status != HTTP_NOT_FOUND:
                raise
Exemplo n.º 15
0
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        if len(ENABLED_POLICIES) < 2:
            raise SkipTest("Need more than one policy")

        self.expirer = Manager(["object-expirer"])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest("Unable to verify object-expirer service")

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, "probe-test", 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = "container-%s" % uuid.uuid4()
        self.object_name = "object-%s" % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name, self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={"X-Delete-After": 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        create_timestamp = Timestamp(metadata["x-timestamp"])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(["object-updater"]).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(["container-updater"]).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4,),
            headers={"X-Backend-Storage-Policy-Index": int(old_policy)},
        )
        self.assertTrue("x-backend-timestamp" in metadata)
        self.assertEqual(Timestamp(metadata["x-backend-timestamp"]), create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                break
        else:
            self.fail("Did not find listing for %s" % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj["name"]:
                self.fail("Found listing for %s" % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4,),
                headers={"X-Backend-Storage-Policy-Index": int(policy)},
            )
            if "x-backend-timestamp" in metadata:
                if found_in_policy:
                    self.fail("found object in %s and also %s" % (found_in_policy, policy))
                found_in_policy = policy
                self.assertTrue("x-backend-timestamp" in metadata)
                self.assertTrue(Timestamp(metadata["x-backend-timestamp"]) > create_timestamp)
Exemplo n.º 16
0
class TestObjectExpirer(unittest.TestCase):
    def setUp(self):
        if len(POLICIES) < 2:
            raise SkipTest('Need more than one policy')

        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        (self.pids, self.port2server, self.account_ring, self.container_ring,
         self.object_ring, self.policy, self.url, self.token, self.account,
         self.configs) = reset_environment()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def test_expirer_object_split_brain(self):
        old_policy = random.choice(list(POLICIES))
        wrong_policy = random.choice([p for p in POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assert_('x-backend-timestamp' in metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj['name']:
                break
        else:
            self.fail('Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        for obj in self.client.iter_objects(self.account, self.container_name):
            if self.object_name == obj['name']:
                self.fail('Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assert_('x-backend-timestamp' in metadata)
                self.assert_(
                    Timestamp(metadata['x-backend-timestamp']) >
                    create_timestamp)
Exemplo n.º 17
0
class TestObjectExpirer(ReplProbeTest):

    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account,
                                            self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirer again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_doesnt_make_async_pendings(self):
        # The object expirer cleans up its own queue. The inner loop
        # basically looks like this:
        #
        #    for obj in stuff_to_delete:
        #        delete_the_object(obj)
        #        remove_the_queue_entry(obj)
        #
        # By default, upon receipt of a DELETE request for an expiring
        # object, the object servers will create async_pending records to
        # clean the expirer queue. Since the expirer cleans its own queue,
        # this is unnecessary. The expirer can make requests in such a way
        # tha the object server does not write out any async pendings; this
        # test asserts that this is the case.

        # Make an expiring object in each policy
        for policy in ENABLED_POLICIES:
            container_name = "expirer-test-%d" % policy.idx
            container_headers = {'X-Storage-Policy': policy.name}
            client.put_container(self.url, self.token, container_name,
                                 headers=container_headers)

            now = time.time()
            delete_at = int(now + 2.0)
            client.put_object(
                self.url, self.token, container_name, "some-object",
                headers={'X-Delete-At': str(delete_at),
                         'X-Timestamp': Timestamp(now).normal},
                contents='dontcare')

        time.sleep(2.0)
        # make sure auto-created expirer-queue containers get in the account
        # listing so the expirer can find them
        Manager(['container-updater']).once()

        # Make sure there's no async_pendings anywhere. Probe tests only run
        # on single-node installs anyway, so this set should be small enough
        # that an exhaustive check doesn't take too long.
        all_obj_nodes = self.get_all_object_nodes()
        pendings_before = self.gather_async_pendings(all_obj_nodes)

        # expire the objects
        Manager(['object-expirer']).once()
        pendings_after = self.gather_async_pendings(all_obj_nodes)
        self.assertEqual(pendings_after, pendings_before)

    def test_expirer_object_should_not_be_expired(self):

        # Current object-expirer checks the correctness via x-if-delete-at
        # header that it can be deleted by expirer. If there are objects
        # either which doesn't have x-delete-at header as metadata or which
        # has different x-delete-at value from x-if-delete-at value,
        # object-expirer's delete will fail as 412 PreconditionFailed.
        # However, if some of the objects are in handoff nodes, the expirer
        # can put the tombstone with the timestamp as same as x-delete-at and
        # the object consistency will be resolved as the newer timestamp will
        # be winner (in particular, overwritten case w/o x-delete-at). This
        # test asserts such a situation that, at least, the overwriten object
        # which have larger timestamp than the original expirered date should
        # be safe.

        def put_object(headers):
            # use internal client to PUT objects so that X-Timestamp in headers
            # is effective
            headers['Content-Length'] = '0'
            path = self.client.make_path(
                self.account, self.container_name, self.object_name)
            try:
                self.client.make_request('PUT', path, headers, (2,))
            except UnexpectedResponse as e:
                self.fail(
                    'Expected 201 for PUT object but got %s' % e.resp.status)

        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        obj_brain.put_container()

        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        # so here, T(obj_created) < T(obj_deleted with x-delete-at)
        now = time.time()
        delete_at = int(now + 2.0)
        recreate_at = delete_at + 1.0
        put_object(headers={'X-Delete-At': str(delete_at),
                            'X-Timestamp': Timestamp(now).normal})

        # some object servers stopped to make a situation that the
        # object-expirer can put tombstone in the primary nodes.
        obj_brain.stop_primary_half()

        # increment the X-Timestamp explicitly
        # (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
        put_object(headers={'X-Object-Meta-Expired': 'False',
                            'X-Timestamp': Timestamp(recreate_at).normal})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # sanity, the newer object is still there
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name)
        except UnexpectedResponse as e:
            self.fail(
                'Expected 200 for HEAD object but got %s' % e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

        # some object servers recovered
        obj_brain.start_primary_half()

        # sleep until after recreated_at
        while time.time() <= recreate_at:
            time.sleep(0.1)
        # Now, expirer runs at the time after obj is recreated
        self.expirer.once()

        # verify that original object was deleted by expirer
        obj_brain.stop_handoff_half()
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,))
        except UnexpectedResponse as e:
            self.fail(
                'Expected 404 for HEAD object but got %s' % e.resp.status)
        obj_brain.start_handoff_half()

        # and inconsistent state of objects is recovered by replicator
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        try:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name)
        except UnexpectedResponse as e:
            self.fail(
                'Expected 200 for HEAD object but got %s' % e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

    def _test_expirer_delete_outdated_object_version(self, object_exists):
        # This test simulates a case where the expirer tries to delete
        # an outdated version of an object.
        # One case is where the expirer gets a 404, whereas the newest version
        # of the object is offline.
        # Another case is where the expirer gets a 412, since the old version
        # of the object mismatches the expiration time sent by the expirer.
        # In any of these cases, the expirer should retry deleting the object
        # later, for as long as a reclaim age has not passed.
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        obj_brain.put_container()

        if object_exists:
            obj_brain.put_object()

        # currently, the object either doesn't exist, or does not have
        # an expiration

        # stop primary servers and put a newer version of the object, this
        # time with an expiration. only the handoff servers will have
        # the new version
        obj_brain.stop_primary_half()
        now = time.time()
        delete_at = int(now + 2.0)
        obj_brain.put_object({'X-Delete-At': str(delete_at)})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()

        # update object record in the container listing
        Manager(['container-replicator']).once()

        # take handoff servers down, and bring up the outdated primary servers
        obj_brain.start_primary_half()
        obj_brain.stop_handoff_half()

        # wait until object expiration time
        while time.time() <= delete_at:
            time.sleep(0.1)

        # run expirer against the outdated servers. it should fail since
        # the outdated version does not match the expiration time
        self.expirer.once()

        # bring all servers up, and run replicator to update servers
        obj_brain.start_handoff_half()
        Manager(['object-replicator']).once()

        # verify the deletion has failed by checking the container listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # run expirer again, delete should now succeed
        self.expirer.once()

        # verify the deletion by checking the container listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

    def test_expirer_delete_returns_outdated_404(self):
        self._test_expirer_delete_outdated_object_version(object_exists=False)

    def test_expirer_delete_returns_outdated_412(self):
        self._test_expirer_delete_outdated_object_version(object_exists=True)
Exemplo n.º 18
0
class TestObjectExpirer(ReplProbeTest):
    def setUp(self):
        self.expirer = Manager(['object-expirer'])
        self.expirer.start()
        err = self.expirer.stop()
        if err:
            raise unittest.SkipTest('Unable to verify object-expirer service')

        conf_files = []
        for server in self.expirer.servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        self.client = InternalClient(conf_file, 'probe-test', 3)

        super(TestObjectExpirer, self).setUp()
        self.container_name = 'container-%s' % uuid.uuid4()
        self.object_name = 'object-%s' % uuid.uuid4()
        self.brain = BrainSplitter(self.url, self.token, self.container_name,
                                   self.object_name)

    def _check_obj_in_container_listing(self):
        for obj in self.client.iter_objects(self.account, self.container_name):

            if self.object_name == obj['name']:
                return True

        return False

    @unittest.skipIf(len(ENABLED_POLICIES) < 2, "Need more than one policy")
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice(
            [p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirer again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)

    def test_expirer_doesnt_make_async_pendings(self):
        # The object expirer cleans up its own queue. The inner loop
        # basically looks like this:
        #
        #    for obj in stuff_to_delete:
        #        delete_the_object(obj)
        #        remove_the_queue_entry(obj)
        #
        # By default, upon receipt of a DELETE request for an expiring
        # object, the object servers will create async_pending records to
        # clean the expirer queue. Since the expirer cleans its own queue,
        # this is unnecessary. The expirer can make requests in such a way
        # tha the object server does not write out any async pendings; this
        # test asserts that this is the case.

        # Make an expiring object in each policy
        for policy in ENABLED_POLICIES:
            container_name = "expirer-test-%d" % policy.idx
            container_headers = {'X-Storage-Policy': policy.name}
            client.put_container(self.url,
                                 self.token,
                                 container_name,
                                 headers=container_headers)

            now = time.time()
            delete_at = int(now + 2.0)
            client.put_object(self.url,
                              self.token,
                              container_name,
                              "some-object",
                              headers={
                                  'X-Delete-At': str(delete_at),
                                  'X-Timestamp': Timestamp(now).normal
                              },
                              contents='dontcare')

        time.sleep(2.0)
        # make sure auto-created expirer-queue containers get in the account
        # listing so the expirer can find them
        Manager(['container-updater']).once()

        # Make sure there's no async_pendings anywhere. Probe tests only run
        # on single-node installs anyway, so this set should be small enough
        # that an exhaustive check doesn't take too long.
        all_obj_nodes = self.get_all_object_nodes()
        pendings_before = self.gather_async_pendings(all_obj_nodes)

        # expire the objects
        Manager(['object-expirer']).once()
        pendings_after = self.gather_async_pendings(all_obj_nodes)
        self.assertEqual(pendings_after, pendings_before)

    def test_expirer_object_should_not_be_expired(self):

        # Current object-expirer checks the correctness via x-if-delete-at
        # header that it can be deleted by expirer. If there are objects
        # either which doesn't have x-delete-at header as metadata or which
        # has different x-delete-at value from x-if-delete-at value,
        # object-expirer's delete will fail as 412 PreconditionFailed.
        # However, if some of the objects are in handoff nodes, the expirer
        # can put the tombstone with the timestamp as same as x-delete-at and
        # the object consistency will be resolved as the newer timestamp will
        # be winner (in particular, overwritten case w/o x-delete-at). This
        # test asserts such a situation that, at least, the overwriten object
        # which have larger timestamp than the original expirered date should
        # be safe.

        def put_object(headers):
            # use internal client to PUT objects so that X-Timestamp in headers
            # is effective
            headers['Content-Length'] = '0'
            path = self.client.make_path(self.account, self.container_name,
                                         self.object_name)
            try:
                self.client.make_request('PUT', path, headers, (2, ))
            except UnexpectedResponse as e:
                self.fail('Expected 201 for PUT object but got %s' %
                          e.resp.status)

        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        # T(obj_created) < T(obj_deleted with x-delete-at) < T(obj_recreated)
        #   < T(expirer_executed)
        # Recreated obj should be appeared in any split brain case

        obj_brain.put_container()

        # T(obj_deleted with x-delete-at)
        # object-server accepts req only if X-Delete-At is later than 'now'
        # so here, T(obj_created) < T(obj_deleted with x-delete-at)
        now = time.time()
        delete_at = int(now + 2.0)
        recreate_at = delete_at + 1.0
        put_object(headers={
            'X-Delete-At': str(delete_at),
            'X-Timestamp': Timestamp(now).normal
        })

        # some object servers stopped to make a situation that the
        # object-expirer can put tombstone in the primary nodes.
        obj_brain.stop_primary_half()

        # increment the X-Timestamp explicitly
        # (will be T(obj_deleted with x-delete-at) < T(obj_recreated))
        put_object(
            headers={
                'X-Object-Meta-Expired': 'False',
                'X-Timestamp': Timestamp(recreate_at).normal
            })

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # sanity, the newer object is still there
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

        # some object servers recovered
        obj_brain.start_primary_half()

        # sleep until after recreated_at
        while time.time() <= recreate_at:
            time.sleep(0.1)
        # Now, expirer runs at the time after obj is recreated
        self.expirer.once()

        # verify that original object was deleted by expirer
        obj_brain.stop_handoff_half()
        try:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ))
        except UnexpectedResponse as e:
            self.fail('Expected 404 for HEAD object but got %s' %
                      e.resp.status)
        obj_brain.start_handoff_half()

        # and inconsistent state of objects is recovered by replicator
        Manager(['object-replicator']).once()

        # check if you can get recreated object
        try:
            metadata = self.client.get_object_metadata(self.account,
                                                       self.container_name,
                                                       self.object_name)
        except UnexpectedResponse as e:
            self.fail('Expected 200 for HEAD object but got %s' %
                      e.resp.status)

        self.assertIn('x-object-meta-expired', metadata)

    def _test_expirer_delete_outdated_object_version(self, object_exists):
        # This test simulates a case where the expirer tries to delete
        # an outdated version of an object.
        # One case is where the expirer gets a 404, whereas the newest version
        # of the object is offline.
        # Another case is where the expirer gets a 412, since the old version
        # of the object mismatches the expiration time sent by the expirer.
        # In any of these cases, the expirer should retry deleting the object
        # later, for as long as a reclaim age has not passed.
        obj_brain = BrainSplitter(self.url, self.token, self.container_name,
                                  self.object_name, 'object', self.policy)

        obj_brain.put_container()

        if object_exists:
            obj_brain.put_object()

        # currently, the object either doesn't exist, or does not have
        # an expiration

        # stop primary servers and put a newer version of the object, this
        # time with an expiration. only the handoff servers will have
        # the new version
        obj_brain.stop_primary_half()
        now = time.time()
        delete_at = int(now + 2.0)
        obj_brain.put_object({'X-Delete-At': str(delete_at)})

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()

        # update object record in the container listing
        Manager(['container-replicator']).once()

        # take handoff servers down, and bring up the outdated primary servers
        obj_brain.start_primary_half()
        obj_brain.stop_handoff_half()

        # wait until object expiration time
        while time.time() <= delete_at:
            time.sleep(0.1)

        # run expirer against the outdated servers. it should fail since
        # the outdated version does not match the expiration time
        self.expirer.once()

        # bring all servers up, and run replicator to update servers
        obj_brain.start_handoff_half()
        Manager(['object-replicator']).once()

        # verify the deletion has failed by checking the container listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # run expirer again, delete should now succeed
        self.expirer.once()

        # verify the deletion by checking the container listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

    def test_expirer_delete_returns_outdated_404(self):
        self._test_expirer_delete_outdated_object_version(object_exists=False)

    def test_expirer_delete_returns_outdated_412(self):
        self._test_expirer_delete_outdated_object_version(object_exists=True)

    def test_slo_async_delete(self):
        if not self.cluster_info.get('slo', {}).get('allow_async_delete'):
            raise unittest.SkipTest('allow_async_delete not enabled')

        segment_container = self.container_name + '_segments'
        client.put_container(self.url, self.token, self.container_name, {})
        client.put_container(self.url, self.token, segment_container, {})
        client.put_object(self.url, self.token, segment_container, 'segment_1',
                          b'1234')
        client.put_object(self.url, self.token, segment_container, 'segment_2',
                          b'5678')
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'slo',
                          json.dumps([
                              {
                                  'path': segment_container + '/segment_1'
                              },
                              {
                                  'data': 'Cg=='
                              },
                              {
                                  'path': segment_container + '/segment_2'
                              },
                          ]),
                          query_string='multipart-manifest=put')
        _, body = client.get_object(self.url, self.token, self.container_name,
                                    'slo')
        self.assertEqual(body, b'1234\n5678')

        client.delete_object(
            self.url,
            self.token,
            self.container_name,
            'slo',
            query_string='multipart-manifest=delete&async=true')

        # Object's deleted
        _, objects = client.get_container(self.url, self.token,
                                          self.container_name)
        self.assertEqual(objects, [])
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, self.container_name, 'slo')
        self.assertEqual(404, caught.exception.http_status)

        # But segments are still around and accessible
        _, objects = client.get_container(self.url, self.token,
                                          segment_container)
        self.assertEqual([o['name'] for o in objects],
                         ['segment_1', 'segment_2'])
        _, body = client.get_object(self.url, self.token, segment_container,
                                    'segment_1')
        self.assertEqual(body, b'1234')
        _, body = client.get_object(self.url, self.token, segment_container,
                                    'segment_2')
        self.assertEqual(body, b'5678')

        # make sure auto-created expirer-queue containers get in the account
        # listing so the expirer can find them
        Manager(['container-updater']).once()
        self.expirer.once()

        # Now the expirer has cleaned up the segments
        _, objects = client.get_container(self.url, self.token,
                                          segment_container)
        self.assertEqual(objects, [])
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, segment_container,
                              'segment_1')
        self.assertEqual(404, caught.exception.http_status)
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url, self.token, segment_container,
                              'segment_2')
        self.assertEqual(404, caught.exception.http_status)