Example #1
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
Example #2
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        Manager(['account-reaper']).once()

        get_to_final_state()

        for policy, container, obj in all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, node))
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
Example #3
0
    def test_direct_head_container_deleted(self):
        important_timestamp = Timestamp.now().internal
        headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
                                 important_timestamp})

        with mocked_http_conn(404, headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_container(
                    self.node, self.part, self.account, self.container)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(raised.exception.http_status, 404)
        self.assertEqual(raised.exception.http_headers, headers)
Example #4
0
 def _eat_client_exception(*args):
     try:
         return direct_head_container(*args)
     except ClientException as err:
         if err.http_status == 404:
             return err.http_headers
     except (Timeout, socket.error):
         pass
Example #5
0
 def _eat_client_exception(*args):
     try:
         return direct_head_container(*args)
     except ClientException as err:
         if err.http_status == 404:
             return err.http_headers
     except (Timeout, socket.error):
         pass
Example #6
0
    def test_direct_head_container_error(self):
        headers = HeaderKeyDict(key='value')

        with mocked_http_conn(503, headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_container(
                    self.node, self.part, self.account, self.container)
            # check request
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(raised.exception.http_status, 503)
        self.assertEqual(raised.exception.http_headers, headers)
        self.assertTrue('HEAD' in str(raised.exception))
Example #7
0
    def test_delayed_reap(self):
        # define reapers which are supposed to operate 3 seconds later
        account_reapers = []
        for conf_file in self.configs['account-server'].values():
            conf = utils.readconf(conf_file, 'account-reaper')
            conf['delay_reaping'] = '3'
            account_reapers.append(reaper.AccountReaper(conf))

        self.assertTrue(account_reapers)

        # run reaper, and make sure that nothing is reaped
        for account_reaper in account_reapers:
            account_reaper.run_once()

        for policy, container, obj in self.all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException:
                    self.fail(
                        "Nothing should be reaped. Container should exist")

            part, nodes = policy.object_ring.get_nodes(self.account, container,
                                                       obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException:
                    self.fail("Nothing should be reaped. Object should exist")

        # wait 3 seconds, run reaper, and make sure that all is reaped
        sleep(3)
        for account_reaper in account_reapers:
            account_reaper.run_once()

        self._verify_account_reaped()
Example #8
0
    def test_direct_head_container_error(self):
        headers = HeaderKeyDict(key='value')

        with mocked_http_conn(503, headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_container(self.node, self.part,
                                                    self.account,
                                                    self.container)
            # check request
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(raised.exception.http_status, 503)
        self.assertEqual(raised.exception.http_headers, headers)
        self.assertTrue('HEAD' in str(raised.exception))
Example #9
0
    def test_direct_head_container_deleted(self):
        important_timestamp = Timestamp.now().internal
        headers = HeaderKeyDict(
            {'X-Backend-Important-Timestamp': important_timestamp})

        with mocked_http_conn(404, headers) as conn:
            with self.assertRaises(ClientException) as raised:
                direct_client.direct_head_container(self.node, self.part,
                                                    self.account,
                                                    self.container)
            self.assertEqual(conn.host, self.node['ip'])
            self.assertEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(raised.exception.http_status, 404)
        self.assertEqual(raised.exception.http_headers, headers)
Example #10
0
 def _eat_client_exception(*args):
     try:
         return direct_head_container(
             *args, headers={USE_REPLICATION_NETWORK_HEADER: 'true'})
     except ClientException as err:
         if err.http_status == 404:
             return err.http_headers
     except (Timeout, socket.error):
         pass
Example #11
0
    def test_direct_head_container_error(self):
        headers = HeaderKeyDict(key='value')

        with mocked_http_conn(503, headers) as conn:
            try:
                direct_client.direct_head_container(
                    self.node, self.part, self.account, self.container)
            except ClientException as err:
                pass
            else:
                self.fail('ClientException not raised')
            # check request
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(err.http_status, 503)
        self.assertEqual(err.http_headers, headers)
        self.assertTrue('HEAD' in str(err))
Example #12
0
    def test_direct_head_container_deleted(self):
        important_timestamp = Timestamp(time.time()).internal
        headers = HeaderKeyDict({'X-Backend-Important-Timestamp':
                                 important_timestamp})

        with mocked_http_conn(404, headers) as conn:
            try:
                direct_client.direct_head_container(
                    self.node, self.part, self.account, self.container)
            except Exception as err:
                self.assertTrue(isinstance(err, ClientException))
            else:
                self.fail('ClientException not raised')
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(err.http_status, 404)
        self.assertEqual(err.http_headers, headers)
Example #13
0
    def test_delayed_reap(self):
        # define reapers which are supposed to operate 3 seconds later
        account_reapers = []
        for conf_file in self.configs['account-server'].values():
                conf = utils.readconf(conf_file, 'account-reaper')
                conf['delay_reaping'] = '3'
                account_reapers.append(reaper.AccountReaper(conf))

        self.assertTrue(account_reapers)

        # run reaper, and make sure that nothing is reaped
        for account_reaper in account_reapers:
            account_reaper.run_once()

        for policy, container, obj in self.all_objects:
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException:
                    self.fail(
                        "Nothing should be reaped. Container should exist")

            part, nodes = policy.object_ring.get_nodes(self.account,
                                                       container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj, headers=headers)
                except ClientException:
                    self.fail("Nothing should be reaped. Object should exist")

        # wait 3 seconds, run reaper, and make sure that all is reaped
        sleep(3)
        for account_reaper in account_reapers:
            account_reaper.run_once()

        self._verify_account_reaped()
Example #14
0
    def test_direct_head_container(self):
        headers = HeaderKeyDict(key='value')

        with mocked_http_conn(200, headers) as conn:
            resp = direct_client.direct_head_container(
                self.node, self.part, self.account, self.container)
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'],
                         self.user_agent)
        self.assertEqual(headers, resp)
Example #15
0
    def test_direct_head_container(self):
        node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'}
        part = '0'
        account = 'a'
        container = 'c'
        headers = {'key': 'value'}

        was_http_connector = direct_client.http_connect
        direct_client.http_connect = mock_http_connect(200, headers)

        resp = direct_client.direct_head_container(node, part, account, container)

        headers.update({'user-agent': 'direct-client %s' % os.getpid()})
        self.assertEqual(headers, resp)

        direct_client.http_connect = was_http_connector
Example #16
0
    def test_direct_head_container(self):
        node = {'ip': '1.2.3.4', 'port': '6000', 'device': 'sda'}
        part = '0'
        account = 'a'
        container = 'c'
        headers = {'key': 'value'}

        was_http_connector = direct_client.http_connect
        direct_client.http_connect = mock_http_connect(200, headers)

        resp = direct_client.direct_head_container(node, part, account, container)

        headers.update({'user-agent': 'direct-client %s' % os.getpid()})
        self.assertEqual(headers, resp)

        direct_client.http_connect = was_http_connector
Example #17
0
    def test_direct_head_container_replication_net(self):
        headers = HeaderKeyDict(key='value')

        with mocked_http_conn(200, headers) as conn:
            resp = direct_client.direct_head_container(
                self.node,
                self.part,
                self.account,
                self.container,
                headers={'X-Backend-Use-Replication-Network': 'on'})
            self.assertEqual(conn.host, self.node['replication_ip'])
            self.assertEqual(conn.port, self.node['replication_port'])
            self.assertNotEqual(conn.host, self.node['ip'])
            self.assertNotEqual(conn.port, self.node['port'])
            self.assertEqual(conn.method, 'HEAD')
            self.assertEqual(conn.path, self.container_path)

        self.assertEqual(conn.req_headers['user-agent'], self.user_agent)
        self.assertEqual(headers, resp)
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(len(found_policy_indexes) == 1,
                        'primary nodes disagree about policy index %r' %
                        head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(expected_policy_index)
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice(
            [p for p in ENABLED_POLICIES if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = [
            (node,
             direct_client.direct_head_container(node, container_part,
                                                 self.account,
                                                 self.container_name))
            for node in container_nodes
        ]
        old_container_nodes = [
            node for node, metadata in head_responses if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])
        ]
        self.assertEqual(2, len(old_container_nodes))

        # hopefully memcache still has the new policy cached
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents=b'VERIFY')
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        int_client = InternalClient(conf_file, 'probe-test', 3)
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            for node in old_container_nodes:
                server.once(number=self.config_number(node))

        # verify entry in the queue for the "misplaced" new_policy
        for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
            for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        self.get_to_final_state()

        # verify entry in the queue
        for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
            for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        self.get_to_final_state()
        for container in int_client.iter_containers(MISPLACED_OBJECTS_ACCOUNT):
            for obj in int_client.iter_objects(MISPLACED_OBJECTS_ACCOUNT,
                                               container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)

        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(int(new_policy))
        self.assertEqual(b'VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
Example #20
0
    def test_metadata_sync(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url,
                             self.token,
                             container,
                             headers={
                                 'X-Storage-Policy': self.policy.name,
                                 'X-Container-Meta-A': '1',
                                 'X-Container-Meta-B': '1',
                                 'X-Container-Meta-C': '1'
                             })

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes.pop()
        # 2 of 3 container servers are temporarily down
        for node in cnodes:
            kill_server((node['ip'], node['port']), self.ipport2server)

        # Put some meta on the lone server, to make sure it's merged properly
        # This will 503 (since we don't have a quorum), but we don't care (!)
        try:
            client.post_container(self.url,
                                  self.token,
                                  container,
                                  headers={
                                      'X-Container-Meta-A': '2',
                                      'X-Container-Meta-B': '2',
                                      'X-Container-Meta-D': '2'
                                  })
        except ClientException:
            pass

        # object updates come to only one container server
        for _ in range(self.object_puts):
            obj = 'object-%s' % uuid4()
            client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # 2 container servers make comeback
        for node in cnodes:
            start_server((node['ip'], node['port']), self.ipport2server)
        # But, container-server which got object updates is down
        kill_server((cnode['ip'], cnode['port']), self.ipport2server)

        # Metadata update will be applied to 2 container servers
        # (equal to quorum)
        client.post_container(self.url,
                              self.token,
                              container,
                              headers={
                                  'X-Container-Meta-B': '3',
                                  'X-Container-Meta-E': '3'
                              })
        # container-server which got object updates makes comeback
        start_server((cnode['ip'], cnode['port']), self.ipport2server)

        # other nodes have no objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            self.assertIn(resp_headers.get('x-container-object-count'),
                          (None, '0', 0))

        # If container-replicator on the node which got the object updates
        # runs in first, db file may be replicated by rsync to other
        # containers. In that case, the db file does not information about
        # metadata, so metadata should be synced before replication
        Manager(['container-replicator'
                 ]).once(number=self.config_number(cnode))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '2',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
        }

        # node that got the object updates still doesn't have the meta
        resp_headers = direct_client.direct_head_container(
            cnode, cpart, self.account, container)
        for header, value in expected_meta.items():
            self.assertIn(header, resp_headers)
            self.assertEqual(value, resp_headers[header])
        self.assertNotIn(resp_headers.get('x-container-object-count'),
                         (None, '0', 0))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '3',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
            'x-container-meta-e': '3',
        }

        # other nodes still have the meta, as well as objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))

        # and after full pass on remaining nodes
        for node in cnodes:
            Manager(['container-replicator'
                     ]).once(number=self.config_number(node))

        # ... all is right
        for node in cnodes + [cnode]:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))
Example #21
0
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' %
                      (self.account, self.container_name, self.object_name,
                       expected_policy_index, TIMEOUT))
Example #22
0
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(list(POLICIES))
        new_policy = random.choice([p for p in POLICIES if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses if int(old_policy)
            == int(metadata['X-Backend-Storage-Policy-Index'])
        ]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object()
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        client = InternalClient(conf_file, 'probe-test', 3)
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        get_to_final_state()

        # verify entry in the queue
        client = InternalClient(conf_file, 'probe-test', 3)
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        get_to_final_state()
        for container in client.iter_containers('.misplaced_objects'):
            for obj in client.iter_objects('.misplaced_objects',
                                           container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)
Example #23
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url, self.token, container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        # run the reaper
        Manager(['account-reaper']).once()

        for policy, container, obj in all_objects:
            # verify that any container deletes were at same timestamp
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)

                else:
                    # Container replicas may not yet be deleted if we have a
                    # policy with object replicas < container replicas, so
                    # ignore successful HEAD. We'll check for all replicas to
                    # be deleted again after running the replicators.
                    pass
            self.assertEqual(1, len(delete_times), delete_times)

            # verify that all object deletes were at same timestamp
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj, headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))

        # run replicators and updaters
        self.get_to_final_state()

        for policy, container, obj in all_objects:
            # verify that ALL container replicas are now deleted
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, cnode))

            # sanity check that object state is still consistent...
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj, headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) > 1,
            'primary nodes did not disagree about policy index %r' %
            head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(self.account,
                                                self.container_name,
                                                self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node,
                        part,
                        self.account,
                        self.container_name,
                        self.object_name,
                        headers={
                            'X-Backend-Storage-Policy-Index': policy_index
                        })
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' %
                      (self.account, self.container_name, self.object_name,
                       found_policy_indexes))
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assertTrue(
            len(found_policy_indexes) == 1,
            'primary nodes disagree about policy index %r' % head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node,
                    part,
                    self.account,
                    self.container_name,
                    self.object_name,
                    headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index
                    })
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' %
                          (self.account, self.container_name, self.object_name,
                           orig_policy_index))
        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(expected_policy_index)
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
    def test_merge_storage_policy_index(self):
        # generic split brain
        self.brain.stop_primary_half()
        self.brain.put_container()
        self.brain.start_primary_half()
        self.brain.stop_handoff_half()
        self.brain.put_container()
        self.brain.put_object()
        self.brain.start_handoff_half()
        # make sure we have some manner of split brain
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) > 1,
                     'primary nodes did not disagree about policy index %r' %
                     head_responses)
        # find our object
        orig_policy_index = None
        for policy_index in found_policy_indexes:
            object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
            part, nodes = object_ring.get_nodes(
                self.account, self.container_name, self.object_name)
            for node in nodes:
                try:
                    direct_client.direct_head_object(
                        node, part, self.account, self.container_name,
                        self.object_name,
                        headers={'X-Backend-Storage-Policy-Index':
                                 policy_index})
                except direct_client.ClientException as err:
                    continue
                orig_policy_index = policy_index
                break
            if orig_policy_index is not None:
                break
        else:
            self.fail('Unable to find /%s/%s/%s in %r' % (
                self.account, self.container_name, self.object_name,
                found_policy_indexes))
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # validate containers
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        found_policy_indexes = \
            set(metadata['X-Backend-Storage-Policy-Index'] for
                node, metadata in head_responses)
        self.assert_(len(found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     head_responses)

        expected_policy_index = found_policy_indexes.pop()
        self.assertNotEqual(orig_policy_index, expected_policy_index)
        # validate object placement
        orig_policy_ring = POLICIES.get_object_ring(orig_policy_index,
                                                    '/etc/swift')
        for node in orig_policy_ring.devs:
            try:
                direct_client.direct_head_object(
                    node, part, self.account, self.container_name,
                    self.object_name, headers={
                        'X-Backend-Storage-Policy-Index': orig_policy_index})
            except direct_client.ClientException as err:
                if err.http_status == HTTP_NOT_FOUND:
                    continue
                raise
            else:
                self.fail('Found /%s/%s/%s in %s' % (
                    self.account, self.container_name, self.object_name,
                    orig_policy_index))
        # use proxy to access object (bad container info might be cached...)
        timeout = time.time() + TIMEOUT
        while time.time() < timeout:
            try:
                metadata = client.head_object(self.url, self.token,
                                              self.container_name,
                                              self.object_name)
            except ClientException as err:
                if err.http_status != HTTP_NOT_FOUND:
                    raise
                time.sleep(1)
            else:
                break
        else:
            self.fail('could not HEAD /%s/%s/%s/ from policy %s '
                      'after %s seconds.' % (
                          self.account, self.container_name, self.object_name,
                          expected_policy_index, TIMEOUT))
Example #26
0
    def test_metadata_sync(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy': self.policy.name,
                                      'X-Container-Meta-A': '1',
                                      'X-Container-Meta-B': '1',
                                      'X-Container-Meta-C': '1'})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes.pop()
        # 2 of 3 container servers are temporarily down
        for node in cnodes:
            kill_server((node['ip'], node['port']),
                        self.ipport2server)

        # Put some meta on the lone server, to make sure it's merged properly
        # This will 503 (since we don't have a quorum), but we don't care (!)
        try:
            client.post_container(self.url, self.token, container,
                                  headers={'X-Container-Meta-A': '2',
                                           'X-Container-Meta-B': '2',
                                           'X-Container-Meta-D': '2'})
        except ClientException:
            pass

        # object updates come to only one container server
        for _ in range(self.object_puts):
            obj = 'object-%s' % uuid4()
            client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # 2 container servers make comeback
        for node in cnodes:
            start_server((node['ip'], node['port']),
                         self.ipport2server)
        # But, container-server which got object updates is down
        kill_server((cnode['ip'], cnode['port']),
                    self.ipport2server)

        # Metadata update will be applied to 2 container servers
        # (equal to quorum)
        client.post_container(self.url, self.token, container,
                              headers={'X-Container-Meta-B': '3',
                                       'X-Container-Meta-E': '3'})
        # container-server which got object updates makes comeback
        start_server((cnode['ip'], cnode['port']),
                     self.ipport2server)

        # other nodes have no objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            self.assertIn(resp_headers.get('x-container-object-count'),
                          (None, '0', 0))

        # If container-replicator on the node which got the object updates
        # runs in first, db file may be replicated by rsync to other
        # containers. In that case, the db file does not information about
        # metadata, so metadata should be synced before replication
        Manager(['container-replicator']).once(
            number=self.config_number(cnode))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '3',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
            'x-container-meta-e': '3',
        }

        # node that got the object updates now has the meta
        resp_headers = direct_client.direct_head_container(
            cnode, cpart, self.account, container)
        for header, value in expected_meta.items():
            self.assertIn(header, resp_headers)
            self.assertEqual(value, resp_headers[header])
        self.assertNotIn(resp_headers.get('x-container-object-count'),
                         (None, '0', 0))

        # other nodes still have the meta, as well as objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))

        # and after full pass on remaining nodes
        for node in cnodes:
            Manager(['container-replicator']).once(
                number=self.config_number(node))

        # ... all is right
        for node in cnodes + [cnode]:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assertTrue(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' % (
             self.account, self.container_name, self.object_name,
             found_policy_indexes))
     self.get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assertTrue(len(new_found_policy_indexes) == 1,
                     'primary nodes disagree about policy index %r' %
                     dict((node['port'],
                          metadata['X-Backend-Storage-Policy-Index'])
                          for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(
             self.account, self.container_name, self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node, part, self.account, self.container_name,
                     self.object_name,
                     headers={'X-Backend-Storage-Policy-Index':
                              policy_index})
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' % (
                     self.account, self.container_name, self.object_name,
                     orig_policy_index, node))
Example #28
0
    def authorize(self, req):
        ret = super(ZodiacAuth, self).authorize(req)
        if ret is None and req.method == "GET":
            # passed swauth rules, now check zodiac rules

            # split the path
            # this should be safe since it was already checked against an error
            # in the super call
            version, account, container, obj = split_path(req.path, 1, 4, True)

            # grab our acl to use
            acl = self.zodiac_acl

            # get the current zodiac sign for this access request
            access_sign = zodiac_sign_datetime(datetime.datetime.now())

            # get the client ip
            client_addr = get_remote_client(req)

            if container:
                # there is a container so let's try to get the timestamp
                container_nodes = self.app.container_ring.get_nodes(account, container)
                if container_nodes:
                    # direct head requests return a timestamp whereas calls
                    # to the proxy do not. this might not be the best thing
                    # to do. open to suggestions.
                    try:
                        container_meta = direct_head_container(
                            container_nodes[1][0], container_nodes[0], account, container
                        )
                    except ClientException:
                        return ret
                    container_date = datetime.datetime.fromtimestamp(float(container_meta["x-timestamp"]))
                    container_sign = zodiac_sign_datetime(container_date)

                    # ensure the container sign has access rules
                    if container_sign in acl and access_sign in acl[container_sign]:
                        if client_addr not in acl[container_sign][access_sign]:
                            ret = self.denied_response(req)
                    else:
                        # sign missing from acl rules or access sign not present
                        ret = self.denied_response(req)

                    if ret is None and obj:
                        # we passed the container permissions and there is an
                        # object.
                        # get the object's store sign and check permissions
                        obj_nodes = self.app.container_ring.get_nodes(account, container, obj)
                        if obj_nodes:
                            try:
                                obj_meta = direct_head_object(
                                    obj_nodes[1][0], container_nodes[0], account, container, obj
                                )
                            except ClientException:
                                return ret
                            obj_date = datetime.datetime.fromtimestamp(float(obj_meta["x-timestamp"]))
                            obj_sign = zodiac_sign_datetime(obj_date)

                            # ensure the object sign has access rules
                            if obj_sign in acl and access_sign in acl[obj_sign]:
                                if client_addr not in acl[obj_sign][access_sign]:
                                    ret = self.denied_response(req)
                            else:
                                # object sign missing from acl rules or
                                # access sign not present
                                ret = self.denied_response(req)
        return ret
    def test_reconciler_move_object_twice(self):
        # select some policies
        old_policy = random.choice(ENABLED_POLICIES)
        new_policy = random.choice([p for p in ENABLED_POLICIES
                                    if p != old_policy])

        # setup a split brain
        self.brain.stop_handoff_half()
        # get old_policy on two primaries
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.start_handoff_half()
        self.brain.stop_primary_half()
        # force a recreate on handoffs
        self.brain.put_container(policy_index=int(old_policy))
        self.brain.delete_container()
        self.brain.put_container(policy_index=int(new_policy))
        self.brain.put_object()  # populate memcache with new_policy
        self.brain.start_primary_half()

        # at this point two primaries have old policy
        container_part, container_nodes = self.container_ring.get_nodes(
            self.account, self.container_name)
        head_responses = []
        for node in container_nodes:
            metadata = direct_client.direct_head_container(
                node, container_part, self.account, self.container_name)
            head_responses.append((node, metadata))
        old_container_node_ids = [
            node['id'] for node, metadata in head_responses
            if int(old_policy) ==
            int(metadata['X-Backend-Storage-Policy-Index'])]
        self.assertEqual(2, len(old_container_node_ids))

        # hopefully memcache still has the new policy cached
        self.brain.put_object(headers={'x-object-meta-test': 'custom-meta'},
                              contents='VERIFY')
        # double-check object correctly written to new policy
        conf_files = []
        for server in Manager(['container-reconciler']).servers:
            conf_files.extend(server.conf_files())
        conf_file = conf_files[0]
        int_client = InternalClient(conf_file, 'probe-test', 3)
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # shutdown the containers that know about the new policy
        self.brain.stop_handoff_half()

        # and get rows enqueued from old nodes
        for server_type in ('container-replicator', 'container-updater'):
            server = Manager([server_type])
            tuple(server.once(number=n + 1) for n in old_container_node_ids)

        # verify entry in the queue for the "misplaced" new_policy
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (new_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # verify object in old_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # verify object is *not* in new_policy
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})

        self.get_to_final_state()

        # verify entry in the queue
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                expected = '%d:/%s/%s/%s' % (old_policy, self.account,
                                             self.container_name,
                                             self.object_name)
                self.assertEqual(obj['name'], expected)

        Manager(['container-reconciler']).once()

        # and now it flops back
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(new_policy)})
        int_client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})

        # make sure the queue is settled
        self.get_to_final_state()
        for container in int_client.iter_containers('.misplaced_objects'):
            for obj in int_client.iter_objects('.misplaced_objects',
                                               container['name']):
                self.fail('Found unexpected object %r in the queue' % obj)

        # verify that the object data read by external client is correct
        headers, data = self._get_object_patiently(int(new_policy))
        self.assertEqual('VERIFY', data)
        self.assertEqual('custom-meta', headers['x-object-meta-test'])
Example #30
0
    def _verify_account_reaped(self):
        for policy, container, obj in self.all_objects:
            # verify that any container deletes were at same timestamp
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    # Container replicas may not yet be deleted if we have a
                    # policy with object replicas < container replicas, so
                    # ignore successful HEAD. We'll check for all replicas to
                    # be deleted again after running the replicators.
                    pass
            self.assertEqual(1, len(delete_times), delete_times)

            # verify that all object deletes were at same timestamp
            part, nodes = policy.object_ring.get_nodes(self.account,
                                                       container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj, headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))

        # run replicators and updaters
        self.get_to_final_state()

        for policy, container, obj in self.all_objects:
            # verify that ALL container replicas are now deleted
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, cnode))
            self.assertEqual(1, len(delete_times))

            # sanity check that object state is still consistent...
            part, nodes = policy.object_ring.get_nodes(self.account,
                                                       container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node, part, self.account,
                                      container, obj, headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))
Example #31
0
    def test_sync(self):
        all_objects = []
        # upload some containers
        for policy in ENABLED_POLICIES:
            container = 'container-%s-%s' % (policy.name, uuid.uuid4())
            client.put_container(self.url,
                                 self.token,
                                 container,
                                 headers={'X-Storage-Policy': policy.name})
            obj = 'object-%s' % uuid.uuid4()
            body = 'test-body'
            client.put_object(self.url, self.token, container, obj, body)
            all_objects.append((policy, container, obj))

        Manager(['container-updater']).once()

        headers = client.head_account(self.url, self.token)

        self.assertEqual(int(headers['x-account-container-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-object-count']),
                         len(ENABLED_POLICIES))
        self.assertEqual(int(headers['x-account-bytes-used']),
                         len(ENABLED_POLICIES) * len(body))

        part, nodes = self.account_ring.get_nodes(self.account)
        for node in nodes:
            direct_delete_account(node, part, self.account)

        # run the reaper
        Manager(['account-reaper']).once()

        for policy, container, obj in all_objects:
            # verify that any container deletes were at same timestamp
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)

                else:
                    # Container replicas may not yet be deleted if we have a
                    # policy with object replicas < container replicas, so
                    # ignore successful HEAD. We'll check for all replicas to
                    # be deleted again after running the replicators.
                    pass
            self.assertEqual(1, len(delete_times), delete_times)

            # verify that all object deletes were at same timestamp
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))

        # run replicators and updaters
        self.get_to_final_state()

        for policy, container, obj in all_objects:
            # verify that ALL container replicas are now deleted
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, cnode))

            # sanity check that object state is still consistent...
            object_ring = POLICIES.get_object_ring(policy.idx, '/etc/swift/')
            part, nodes = object_ring.get_nodes(self.account, container, obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException as err:
                    self.assertEquals(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))
Example #32
0
    def _verify_account_reaped(self):
        for policy, container, obj in self.all_objects:
            # verify that any container deletes were at same timestamp
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    # Container replicas may not yet be deleted if we have a
                    # policy with object replicas < container replicas, so
                    # ignore successful HEAD. We'll check for all replicas to
                    # be deleted again after running the replicators.
                    pass
            self.assertEqual(1, len(delete_times), delete_times)

            # verify that all object deletes were at same timestamp
            part, nodes = policy.object_ring.get_nodes(self.account, container,
                                                       obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))

        # run replicators and updaters
        self.get_to_final_state()

        for policy, container, obj in self.all_objects:
            # verify that ALL container replicas are now deleted
            cpart, cnodes = self.container_ring.get_nodes(
                self.account, container)
            delete_times = set()
            for cnode in cnodes:
                try:
                    direct_head_container(cnode, cpart, self.account,
                                          container)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get(
                        'X-Backend-DELETE-Timestamp')
                    # 'X-Backend-DELETE-Timestamp' confirms it was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s on %r' %
                              (self.account, container, cnode))
            self.assertEqual(1, len(delete_times))

            # sanity check that object state is still consistent...
            part, nodes = policy.object_ring.get_nodes(self.account, container,
                                                       obj)
            headers = {'X-Backend-Storage-Policy-Index': int(policy)}
            delete_times = set()
            for node in nodes:
                try:
                    direct_get_object(node,
                                      part,
                                      self.account,
                                      container,
                                      obj,
                                      headers=headers)
                except ClientException as err:
                    self.assertEqual(err.http_status, 404)
                    delete_time = err.http_headers.get('X-Backend-Timestamp')
                    # 'X-Backend-Timestamp' confirms obj was deleted
                    self.assertTrue(delete_time)
                    delete_times.add(delete_time)
                else:
                    self.fail('Found un-reaped /%s/%s/%s on %r in %s!' %
                              (self.account, container, obj, node, policy))
            self.assertEqual(1, len(delete_times))
Example #33
0
 def test_reconcile_delete(self):
     # generic split brain
     self.brain.stop_primary_half()
     self.brain.put_container()
     self.brain.put_object()
     self.brain.start_primary_half()
     self.brain.stop_handoff_half()
     self.brain.put_container()
     self.brain.delete_object()
     self.brain.start_handoff_half()
     # make sure we have some manner of split brain
     container_part, container_nodes = self.container_ring.get_nodes(
         self.account, self.container_name)
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for
             node, metadata in head_responses)
     self.assert_(
         len(found_policy_indexes) > 1,
         'primary nodes did not disagree about policy index %r' %
         head_responses)
     # find our object
     orig_policy_index = ts_policy_index = None
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if 'x-backend-timestamp' in err.http_headers:
                     ts_policy_index = policy_index
                     break
             else:
                 orig_policy_index = policy_index
                 break
     if not orig_policy_index:
         self.fail('Unable to find /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     if not ts_policy_index:
         self.fail('Unable to find tombstone /%s/%s/%s in %r' %
                   (self.account, self.container_name, self.object_name,
                    found_policy_indexes))
     get_to_final_state()
     Manager(['container-reconciler']).once()
     # validate containers
     head_responses = []
     for node in container_nodes:
         metadata = direct_client.direct_head_container(
             node, container_part, self.account, self.container_name)
         head_responses.append((node, metadata))
     new_found_policy_indexes = \
         set(metadata['X-Backend-Storage-Policy-Index'] for node,
             metadata in head_responses)
     self.assert_(
         len(new_found_policy_indexes) == 1,
         'primary nodes disagree about policy index %r' % dict(
             (node['port'], metadata['X-Backend-Storage-Policy-Index'])
             for node, metadata in head_responses))
     expected_policy_index = new_found_policy_indexes.pop()
     self.assertEqual(orig_policy_index, expected_policy_index)
     # validate object fully deleted
     for policy_index in found_policy_indexes:
         object_ring = POLICIES.get_object_ring(policy_index, '/etc/swift')
         part, nodes = object_ring.get_nodes(self.account,
                                             self.container_name,
                                             self.object_name)
         for node in nodes:
             try:
                 direct_client.direct_head_object(
                     node,
                     part,
                     self.account,
                     self.container_name,
                     self.object_name,
                     headers={
                         'X-Backend-Storage-Policy-Index': policy_index
                     })
             except direct_client.ClientException as err:
                 if err.http_status == HTTP_NOT_FOUND:
                     continue
             else:
                 self.fail('Found /%s/%s/%s in %s on %s' %
                           (self.account, self.container_name,
                            self.object_name, orig_policy_index, node))