Exemplo n.º 1
0
    def test_run_once_with_disk_unmounted(self, mock_ismount):
        mock_ismount.return_value = False
        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'})
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(0))
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'TrUe',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed '
                               'to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(os.path.exists(odd_dir))  # skipped - not mounted!
        # mount_check == True means ismount was checked
        self.assertEqual([
            mock.call(self.sda1),
        ], mock_ismount.mock_calls)
        self.assertEqual(cu.logger.get_increment_counts(), {'errors': 1})
Exemplo n.º 2
0
    def test_run_once_with_disk_unmounted(self, mock_ismount):
        mock_ismount.return_value = False
        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'})
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'TrUe',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed '
                               'to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(os.path.exists(odd_dir))  # skipped - not mounted!
        # mount_check == True means ismount was checked
        self.assertEqual([
            mock.call(self.sda1),
        ], mock_ismount.mock_calls)
        self.assertEqual(cu.logger.get_increment_counts(), {'errors': 1})
Exemplo n.º 3
0
    def test_obj_put_async_root_update_redirected_previous_success(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        # run once
        with mocked_http_conn(
                507, 200, 507) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/0/a/c/o'] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {'failures': 1, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(dict(orig_async_data, successes=[1]), async_data)

        # run again - expect 3 redirected updates despite previous success
        ts_redirect = next(self.ts_iter)
        resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
                          'X-Backend-Redirect-Timestamp': ts_redirect.internal}
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 2 + [(200, {})] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests[:2], ts_obj, policies[0])
        self._check_update_requests(conn.requests[2:], ts_obj, policies[0])
        root_part = daemon.container_ring.get_part('a/c')
        shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
        self.assertEqual(
            ['/sda1/%s/a/c/o' % root_part] * 2 +
            ['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 1, 'successes': 1, 'failures': 1, 'unlinks': 1,
             'async_pendings': 1},
            daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 4
0
    def test_obj_put_legacy_updates(self):
        ts = (normalize_timestamp(t) for t in itertools.count(int(time())))
        policy = POLICIES.get_by_index(0)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        async_dir = os.path.join(self.sda1, get_async_dir(policy))
        os.mkdir(async_dir)

        account, container, obj = 'a', 'c', 'o'
        # write an async
        for op in ('PUT', 'DELETE'):
            self.logger._clear()
            daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
            dfmanager = DiskFileManager(conf, daemon.logger)
            # don't include storage-policy-index in headers_out pickle
            headers_out = HeaderKeyDict({
                'x-size': 0,
                'x-content-type': 'text/plain',
                'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
                'x-timestamp': next(ts),
            })
            data = {
                'op': op,
                'account': account,
                'container': container,
                'obj': obj,
                'headers': headers_out
            }
            dfmanager.pickle_async_update(self.sda1, account, container, obj,
                                          data, next(ts), policy)

            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [200, 200, 200]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, op)
                self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
                                 str(int(policy)))
            self.assertEqual(daemon.logger.get_increment_counts(), {
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            })
Exemplo n.º 5
0
    def test_obj_put_async_root_update_redirected(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])

        # run once
        ts_redirect_1 = next(self.ts_iter)
        ts_redirect_2 = next(self.ts_iter)
        fake_responses = [
            # first round of update attempts, newest redirect should be chosen
            (200, {}),
            (301, {
                'Location': '/.shards_a/c_shard_new/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_2.internal
            }),
            (301, {
                'Location': '/.shards_a/c_shard_old/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_1.internal
            }),
            # second round of update attempts
            (200, {}),
            (200, {}),
            (200, {}),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        self.assertEqual(['/sda1/0/a/c/o'] * 3 +
                         ['/sda1/0/.shards_a/c_shard_new/o'] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {
                'redirects': 1,
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            }, daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 6
0
    def test_sweep_logs_multiple_policies(self):
        for policy in _mocked_policies:
            asyncdir = os.path.join(self.sda1, get_async_dir(policy.idx))
            prefix_dir = os.path.join(asyncdir, 'abc')
            mkpath(prefix_dir)

            for o, t in [('abc', 123), ('def', 234), ('ghi', 345)]:
                ohash = hash_path('account', 'container%d' % policy.idx, o)
                o_path = os.path.join(prefix_dir,
                                      ohash + '-' + normalize_timestamp(t))
                write_pickle({}, o_path)

        class MockObjectUpdater(object_updater.ObjectUpdater):
            def process_object_update(self, update_path, device, policy):
                os.unlink(update_path)
                self.stats.successes += 1
                self.stats.unlinks += 1

        logger = FakeLogger()
        ou = MockObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'false',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'report_interval': '10.0',
                'node_timeout': '5'
            },
            logger=logger)

        now = [time()]

        def mock_time():
            rv = now[0]
            now[0] += 0.01
            return rv

        with mock.patch('swift.obj.updater.time',
                        mock.MagicMock(time=mock_time)):
            ou.object_sweep(self.sda1)

        completion_lines = [
            l for l in logger.get_lines_for_level('info')
            if "sweep complete" in l
        ]

        self.assertEqual(len(completion_lines), 1)
        self.assertIn("sweep complete", completion_lines[0])
        self.assertIn(
            "6 successes, 0 failures, 0 quarantines, 6 unlinks, 0 errors, "
            "0 redirects", completion_lines[0])
Exemplo n.º 7
0
 def pickle_async_update(self, device, account, container, obj, data, timestamp, policy):
     # This method invokes swiftonfile's writepickle method.
     # Is patching just write_pickle and calling parent method better ?
     device_path = self.construct_dev_path(device)
     async_dir = os.path.join(device_path, get_async_dir(policy))
     ohash = hash_path(account, container, obj)
     self.threadpools[device].run_in_thread(
         write_pickle,
         data,
         os.path.join(async_dir, ohash[-3:], ohash + "-" + normalize_timestamp(timestamp)),
         os.path.join(device_path, "tmp"),
     )
     self.logger.increment("async_pendings")
Exemplo n.º 8
0
 def pickle_async_update(self, device, account, container, obj, data,
                         timestamp, policy):
     # This should be using the JSON blob stuff instead of a pickle.
     # Didn't we deprecate it?
     device_path = self.construct_dev_path(device)
     async_dir = os.path.join(device_path, get_async_dir(policy))
     ohash = hash_path(account, container, obj)
     write_pickle(
         data,
         os.path.join(async_dir, ohash[-3:], ohash + '-' +
                      normalize_timestamp(timestamp)),
         os.path.join(device_path, 'tmp'))
     self.logger.increment('async_pendings')
Exemplo n.º 9
0
 def pickle_async_update(self, device, account, container, obj, data,
                         timestamp, policy):
     # This method invokes swiftonfile's writepickle method.
     # Is patching just write_pickle and calling parent method better ?
     device_path = self.construct_dev_path(device)
     async_dir = os.path.join(device_path, get_async_dir(policy))
     ohash = hash_path(account, container, obj)
     self.threadpools[device].run_in_thread(
         write_pickle, data,
         os.path.join(async_dir, ohash[-3:],
                      ohash + '-' + normalize_timestamp(timestamp)),
         os.path.join(device_path, 'tmp'))
     self.logger.increment('async_pendings')
Exemplo n.º 10
0
    def test_obj_put_async_updates(self):
        ts = (normalize_timestamp(t) for t in
              itertools.count(int(time())))
        policy = random.choice(list(POLICIES))
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policy))
        os.mkdir(async_dir)

        # write an async
        dfmanager = DiskFileManager(conf, daemon.logger)
        account, container, obj = 'a', 'c', 'o'
        op = 'PUT'
        headers_out = swob.HeaderKeyDict({
            'x-size': 0,
            'x-content-type': 'text/plain',
            'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
            'x-timestamp': next(ts),
            'X-Backend-Storage-Policy-Index': int(policy),
        })
        data = {'op': op, 'account': account, 'container': container,
                'obj': obj, 'headers': headers_out}
        dfmanager.pickle_async_update(self.sda1, account, container, obj,
                                      data, next(ts), policy)

        request_log = []

        def capture(*args, **kwargs):
            request_log.append((args, kwargs))

        # run once
        fake_status_codes = [
            200,  # object update success
            200,  # object update success
            200,  # object update conflict
        ]
        with mocked_http_conn(*fake_status_codes, give_connect=capture):
            daemon.run_once()
        self.assertEqual(len(fake_status_codes), len(request_log))
        for request_args, request_kwargs in request_log:
            ip, part, method, path, headers, qs, ssl = request_args
            self.assertEqual(method, 'PUT')
            self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
                             str(int(policy)))
        self.assertEqual(daemon.logger.get_increment_counts(),
                         {'successes': 1, 'unlinks': 1, 'async_pendings': 1})
Exemplo n.º 11
0
    def test_sweep_logs_multiple_policies(self):
        for policy in _mocked_policies:
            asyncdir = os.path.join(self.sda1, get_async_dir(policy.idx))
            prefix_dir = os.path.join(asyncdir, 'abc')
            mkpath(prefix_dir)

            for o, t in [('abc', 123), ('def', 234), ('ghi', 345)]:
                ohash = hash_path('account', 'container%d' % policy.idx, o)
                o_path = os.path.join(prefix_dir, ohash + '-' +
                                      normalize_timestamp(t))
                write_pickle({}, o_path)

        class MockObjectUpdater(object_updater.ObjectUpdater):
            def process_object_update(self, update_path, device, policy):
                os.unlink(update_path)
                self.stats.successes += 1
                self.stats.unlinks += 1

        logger = FakeLogger()
        ou = MockObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'report_interval': '10.0',
            'node_timeout': '5'}, logger=logger)

        now = [time()]

        def mock_time():
            rv = now[0]
            now[0] += 0.01
            return rv

        with mock.patch('swift.obj.updater.time',
                        mock.MagicMock(time=mock_time)):
            ou.object_sweep(self.sda1)

        completion_lines = [l for l in logger.get_lines_for_level('info')
                            if "sweep complete" in l]

        self.assertEqual(len(completion_lines), 1)
        self.assertIn("sweep complete", completion_lines[0])
        self.assertIn(
            "6 successes, 0 failures, 0 quarantines, 6 unlinks, 0 errors, "
            "0 redirects",
            completion_lines[0])
Exemplo n.º 12
0
    def test_obj_put_async_root_update_redirected(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])

        # run once
        ts_redirect_1 = next(self.ts_iter)
        ts_redirect_2 = next(self.ts_iter)
        fake_responses = [
            # first round of update attempts, newest redirect should be chosen
            (200, {}),
            (301, {'Location': '/.shards_a/c_shard_new/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
            (301, {'Location': '/.shards_a/c_shard_old/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
            # second round of update attempts
            (200, {}),
            (200, {}),
            (200, {}),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        self.assertEqual(['/sda1/0/a/c/o'] * 3 +
                         ['/sda1/0/.shards_a/c_shard_new/o'] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 1, 'successes': 1,
             'unlinks': 1, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 13
0
    def test_obj_put_legacy_updates(self):
        ts = (normalize_timestamp(t) for t in itertools.count(int(time())))
        policy = POLICIES.get_by_index(0)
        # setup updater
        conf = {"devices": self.devices_dir, "mount_check": "false", "swift_dir": self.testdir}
        async_dir = os.path.join(self.sda1, get_async_dir(policy.idx))
        os.mkdir(async_dir)

        account, container, obj = "a", "c", "o"
        # write an async
        for op in ("PUT", "DELETE"):
            self.logger._clear()
            daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
            dfmanager = DiskFileManager(conf, daemon.logger)
            # don't include storage-policy-index in headers_out pickle
            headers_out = swob.HeaderKeyDict(
                {
                    "x-size": 0,
                    "x-content-type": "text/plain",
                    "x-etag": "d41d8cd98f00b204e9800998ecf8427e",
                    "x-timestamp": ts.next(),
                }
            )
            data = {"op": op, "account": account, "container": container, "obj": obj, "headers": headers_out}
            dfmanager.pickle_async_update(self.sda1, account, container, obj, data, ts.next(), policy.idx)

            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [200, 200, 200]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, op)
                self.assertEqual(headers["X-Backend-Storage-Policy-Index"], str(policy.idx))
            self.assertEqual(daemon.logger.get_increment_counts(), {"successes": 1, "unlinks": 1, "async_pendings": 1})
Exemplo n.º 14
0
    def test_run_once_with_disk_unmounted(self, mock_check_drive):
        mock_check_drive.side_effect = ValueError
        ou = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'
        })
        ou.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # each run calls check_device
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', False),
            mock.call(self.devices_dir, 'sda1', False),
        ], mock_check_drive.mock_calls)
        mock_check_drive.reset_mock()

        ou = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'TrUe',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here')
        os.mkdir(odd_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertTrue(os.path.exists(odd_dir))  # skipped - not mounted!
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', True),
        ], mock_check_drive.mock_calls)
        self.assertEqual(ou.logger.get_increment_counts(), {})
Exemplo n.º 15
0
    def _check_obj_put_async_update_bad_redirect_headers(self, headers):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        fake_responses = [
            (301, headers),
            (301, headers),
            (301, headers),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/0/a/c/o'] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual({
            'failures': 1,
            'async_pendings': 1
        }, daemon.logger.get_increment_counts())
        # async file still intact
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(orig_async_data, async_data)
        return daemon
Exemplo n.º 16
0
    def _check_obj_put_async_update_bad_redirect_headers(self, headers):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        fake_responses = [
            (301, headers),
            (301, headers),
            (301, headers),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/0/a/c/o'] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {'failures': 1, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        # async file still intact
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(orig_async_data, async_data)
        return daemon
Exemplo n.º 17
0
    def test_run_once_with_disk_unmounted(self, mock_ismount):
        mock_ismount.return_value = False
        cu = object_updater.ObjectUpdater(
            {
                "devices": self.devices_dir,
                "mount_check": "false",
                "swift_dir": self.testdir,
                "interval": "1",
                "concurrency": "1",
                "node_timeout": "15",
            }
        )
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(0))
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater(
            {
                "devices": self.devices_dir,
                "mount_check": "TrUe",
                "swift_dir": self.testdir,
                "interval": "1",
                "concurrency": "1",
                "node_timeout": "15",
            },
            logger=self.logger,
        )
        odd_dir = os.path.join(async_dir, "not really supposed " "to be here")
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(os.path.exists(odd_dir))  # skipped - not mounted!
        # mount_check == True means ismount was checked
        self.assertEqual([mock.call(self.sda1)], mock_ismount.mock_calls)
        self.assertEqual(cu.logger.get_increment_counts(), {"errors": 1})
Exemplo n.º 18
0
    def test_run_once_with_disk_unmounted(self, mock_check_drive):
        mock_check_drive.side_effect = ValueError
        ou = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'})
        ou.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # each run calls check_device
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', False),
            mock.call(self.devices_dir, 'sda1', False),
        ], mock_check_drive.mock_calls)
        mock_check_drive.reset_mock()

        ou = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'TrUe',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed '
                               'to be here')
        os.mkdir(odd_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertTrue(os.path.exists(odd_dir))  # skipped - not mounted!
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', True),
        ], mock_check_drive.mock_calls)
        self.assertEqual(ou.logger.get_increment_counts(), {})
Exemplo n.º 19
0
    def test_run_once(self, mock_check_drive):
        mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
        ou = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'false',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        ou.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # each run calls check_device
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', False),
            mock.call(self.devices_dir, 'sda1', False),
        ], mock_check_drive.mock_calls)
        mock_check_drive.reset_mock()

        ou = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'TrUe',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here')
        os.mkdir(odd_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', True),
        ], mock_check_drive.mock_calls)

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(
            odir, '%s-%s' % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(odir,
                               '%s-%s' % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, 'wb') as async_pending:
                pickle.dump(
                    {
                        'op': 'PUT',
                        'account': 'a',
                        'container': 'c',
                        'obj': 'o',
                        'headers': {
                            'X-Container-Timestamp': normalize_timestamp(0)
                        }
                    }, async_pending)
        ou.run_once()
        self.assertTrue(not os.path.exists(older_op_path))
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {
            'failures': 1,
            'unlinks': 1
        })
        self.assertIsNone(pickle.load(open(op_path)).get('successes'))

        bindsock = listen_zero()

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEqual(inc.readline(),
                                     'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = HeaderKeyDict()
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0]] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assertTrue('x-container-timestamp' in headers)
                    self.assertTrue(
                        'X-Backend-Storage-Policy-Index' in headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            try:
                events = []
                for code in return_codes:
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(spawn(accepter, sock, code))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in ou.get_container_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]

        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [404, 201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0, 2], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err

        # we remove the async_pending and its containing suffix dir, but not
        # anything above that
        self.assertFalse(os.path.exists(op_path))
        self.assertFalse(os.path.exists(os.path.dirname(op_path)))
        self.assertTrue(
            os.path.exists(os.path.dirname(os.path.dirname(op_path))))
        self.assertEqual(ou.logger.get_increment_counts(), {
            'unlinks': 1,
            'successes': 1
        })
Exemplo n.º 20
0
    def test_obj_put_async_update_redirection_loop(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        # run once
        ts_redirect = next(self.ts_iter)

        resp_headers_1 = {'Location': '/.shards_a/c_shard_1/o',
                          'X-Backend-Redirect-Timestamp': ts_redirect.internal}
        resp_headers_2 = {'Location': '/.shards_a/c_shard_2/o',
                          'X-Backend-Redirect-Timestamp': ts_redirect.internal}
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_2)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        # only *one* set of redirected requests is attempted per cycle
        root_part = daemon.container_ring.get_part('a/c')
        shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
        shard_2_part = daemon.container_ring.get_part('.shards_a/c_shard_2')
        shard_3_part = daemon.container_ring.get_part('.shards_a/c_shard_3')
        self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3 +
                         ['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 2, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value added to data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data, container_path='.shards_a/c_shard_2',
                 redirect_history=['.shards_a/c_shard_1',
                                   '.shards_a/c_shard_2']),
            async_data)

        # next cycle, more redirects! first is to previously visited location
        resp_headers_3 = {'Location': '/.shards_a/c_shard_3/o',
                          'X-Backend-Redirect-Timestamp': ts_redirect.internal}
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        # first try the previously persisted container path, response to that
        # creates a loop so ignore and send to root
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_2/o' % shard_2_part] * 3 +
            ['/sda1/%s/a/c/o' % root_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 4, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value from root added to persisted data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        # note: redirect_history was reset when falling back to root
        self.assertEqual(
            dict(orig_async_data, container_path='.shards_a/c_shard_3',
                 redirect_history=['.shards_a/c_shard_3']),
            async_data)

        # next cycle, more redirects! first is to a location visited previously
        # but not since last fall back to root, so that location IS tried;
        # second is to a location visited since last fall back to root so that
        # location is NOT tried
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_3/o' % shard_3_part] * 3 +
            ['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 6, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        # update failed, we still have pending file, but container_path is None
        # because most recent redirect location was a repeat
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data, container_path=None,
                 redirect_history=[]),
            async_data)

        # next cycle, persisted container path is None so update should go to
        # root, this time it succeeds
        fake_responses = [(200, {})] * 3
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 6, 'successes': 1, 'unlinks': 1,
             'async_pendings': 1},
            daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 21
0
    def test_obj_put_async_update_redirection_loop(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0])
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        # run once
        ts_redirect = next(self.ts_iter)

        resp_headers_1 = {
            'Location': '/.shards_a/c_shard_1/o',
            'X-Backend-Redirect-Timestamp': ts_redirect.internal
        }
        resp_headers_2 = {
            'Location': '/.shards_a/c_shard_2/o',
            'X-Backend-Redirect-Timestamp': ts_redirect.internal
        }
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_2)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        # only *one* set of redirected requests is attempted per cycle
        root_part = daemon.container_ring.get_part('a/c')
        shard_1_part = daemon.container_ring.get_part('.shards_a/c_shard_1')
        shard_2_part = daemon.container_ring.get_part('.shards_a/c_shard_2')
        shard_3_part = daemon.container_ring.get_part('.shards_a/c_shard_3')
        self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3 +
                         ['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual({
            'redirects': 2,
            'async_pendings': 1
        }, daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value added to data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data,
                 container_path='.shards_a/c_shard_2',
                 redirect_history=[
                     '.shards_a/c_shard_1', '.shards_a/c_shard_2'
                 ]), async_data)

        # next cycle, more redirects! first is to previously visited location
        resp_headers_3 = {
            'Location': '/.shards_a/c_shard_3/o',
            'X-Backend-Redirect-Timestamp': ts_redirect.internal
        }
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests[:3], ts_obj, policies[0])
        self._check_update_requests(conn.requests[3:], ts_obj, policies[0])
        # first try the previously persisted container path, response to that
        # creates a loop so ignore and send to root
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_2/o' % shard_2_part] * 3 +
            ['/sda1/%s/a/c/o' % root_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual({
            'redirects': 4,
            'async_pendings': 1
        }, daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value from root added to persisted data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        # note: redirect_history was reset when falling back to root
        self.assertEqual(
            dict(orig_async_data,
                 container_path='.shards_a/c_shard_3',
                 redirect_history=['.shards_a/c_shard_3']), async_data)

        # next cycle, more redirects! first is to a location visited previously
        # but not since last fall back to root, so that location IS tried;
        # second is to a location visited since last fall back to root so that
        # location is NOT tried
        fake_responses = (
            # 1st round of redirects, 2nd round of redirects
            [(301, resp_headers_1)] * 3 + [(301, resp_headers_3)] * 3)
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_3/o' % shard_3_part] * 3 +
            ['/sda1/%s/.shards_a/c_shard_1/o' % shard_1_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual({
            'redirects': 6,
            'async_pendings': 1
        }, daemon.logger.get_increment_counts())
        # update failed, we still have pending file, but container_path is None
        # because most recent redirect location was a repeat
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data, container_path=None, redirect_history=[]),
            async_data)

        # next cycle, persisted container path is None so update should go to
        # root, this time it succeeds
        fake_responses = [(200, {})] * 3
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()
        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/%s/a/c/o' % root_part] * 3,
                         [req['path'] for req in conn.requests])
        self.assertEqual(
            {
                'redirects': 6,
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            }, daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 22
0
    def test_run_once(self, mock_check_drive):
        mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
        ou = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        ou.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # each run calls check_device
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', False),
            mock.call(self.devices_dir, 'sda1', False),
        ], mock_check_drive.mock_calls)
        mock_check_drive.reset_mock()

        ou = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'TrUe',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed '
                               'to be here')
        os.mkdir(odd_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', True),
        ], mock_check_drive.mock_calls)

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(
            odir,
            '%s-%s' % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(
            odir,
            '%s-%s' % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, 'wb') as async_pending:
                pickle.dump({'op': 'PUT', 'account': 'a',
                             'container': 'c',
                             'obj': 'o', 'headers': {
                                 'X-Container-Timestamp':
                                 normalize_timestamp(0)}},
                            async_pending)
        ou.run_once()
        self.assertTrue(not os.path.exists(older_op_path))
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(),
                         {'failures': 1, 'unlinks': 1})
        self.assertIsNone(pickle.load(open(op_path)).get('successes'))

        bindsock = listen_zero()

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEqual(inc.readline(),
                                     'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = HeaderKeyDict()
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0]] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assertTrue('x-container-timestamp' in headers)
                    self.assertTrue('X-Backend-Storage-Policy-Index' in
                                    headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            try:
                events = []
                for code in return_codes:
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(
                            spawn(accepter, sock, code))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in ou.get_container_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]

        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(),
                         {'failures': 1})
        self.assertEqual([0],
                         pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [404, 201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(),
                         {'failures': 1})
        self.assertEqual([0, 2],
                         pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err

        # we remove the async_pending and its containing suffix dir, but not
        # anything above that
        self.assertFalse(os.path.exists(op_path))
        self.assertFalse(os.path.exists(os.path.dirname(op_path)))
        self.assertTrue(os.path.exists(os.path.dirname(os.path.dirname(
            op_path))))
        self.assertEqual(ou.logger.get_increment_counts(),
                         {'unlinks': 1, 'successes': 1})
Exemplo n.º 23
0
    def test_run_once(self, mock_ismount):
        mock_ismount.return_value = True
        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'TrUe',
            'swift_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15'}, logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed '
                               'to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(not os.path.exists(odd_dir))
        # mount_check == True means ismount was checked
        self.assertEqual([
            mock.call(self.sda1),
        ], mock_ismount.mock_calls)

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(
            odir,
            '%s-%s' % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(
            odir,
            '%s-%s' % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, 'wb') as async_pending:
                pickle.dump({'op': 'PUT', 'account': 'a',
                             'container': 'c',
                             'obj': 'o', 'headers': {
                                 'X-Container-Timestamp':
                                 normalize_timestamp(0)}},
                            async_pending)
        cu.run_once()
        self.assert_(not os.path.exists(older_op_path))
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(),
                         {'failures': 1, 'unlinks': 1})
        self.assertEqual(None,
                         pickle.load(open(op_path)).get('successes'))

        bindsock = listen(('127.0.0.1', 0))

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEquals(inc.readline(),
                                      'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = swob.HeaderKeyDict()
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0]] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assertTrue('x-container-timestamp' in headers)
                    self.assertTrue('X-Backend-Storage-Policy-Index' in
                                    headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            codes = iter(return_codes)
            try:
                events = []
                for x in range(len(return_codes)):
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(
                            spawn(accepter, sock, next(codes)))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in cu.get_container_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]

        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(),
                         {'failures': 1})
        self.assertEqual([0],
                         pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [404, 500])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(),
                         {'failures': 1})
        self.assertEqual([0, 1],
                         pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [201])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(not os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(),
                         {'unlinks': 1, 'successes': 1})
Exemplo n.º 24
0
    def test_obj_put_async_updates(self):
        ts_iter = make_timestamp_iter()
        policies = list(POLICIES)
        random.shuffle(policies)

        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)

        def do_test(headers_out, expected, container_path=None):
            # write an async
            dfmanager = DiskFileManager(conf, daemon.logger)
            self._write_async_update(dfmanager, next(ts_iter), policies[0],
                                     headers=headers_out,
                                     container_path=container_path)
            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [
                200,  # object update success
                200,  # object update success
                200,  # object update conflict
            ]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, 'PUT')
                self.assertDictEqual(expected, headers)
            self.assertEqual(
                daemon.logger.get_increment_counts(),
                {'successes': 1, 'unlinks': 1, 'async_pendings': 1})
            self.assertFalse(os.listdir(async_dir))
            daemon.logger.clear()

        ts = next(ts_iter)
        # use a dict rather than HeaderKeyDict so we can vary the case of the
        # pickled headers
        headers_out = {
            'x-size': 0,
            'x-content-type': 'text/plain',
            'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
            'x-timestamp': ts.normal,
            'X-Backend-Storage-Policy-Index': int(policies[0]),
            'User-Agent': 'object-server %s' % os.getpid()
        }
        expected = {
            'X-Size': '0',
            'X-Content-Type': 'text/plain',
            'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
            'X-Timestamp': ts.normal,
            'X-Backend-Storage-Policy-Index': str(int(policies[0])),
            'User-Agent': 'object-updater %s' % os.getpid(),
            'X-Backend-Accept-Redirect': 'true',
        }
        # always expect X-Backend-Accept-Redirect to be true
        do_test(headers_out, expected, container_path='.shards_a/shard_c')
        do_test(headers_out, expected)

        # ...unless X-Backend-Accept-Redirect is already set
        expected['X-Backend-Accept-Redirect'] = 'false'
        headers_out_2 = dict(headers_out)
        headers_out_2['X-Backend-Accept-Redirect'] = 'false'
        do_test(headers_out_2, expected)

        # updater should add policy header if missing
        expected['X-Backend-Accept-Redirect'] = 'true'
        headers_out['X-Backend-Storage-Policy-Index'] = None
        do_test(headers_out, expected)

        # updater should not overwrite a mismatched policy header
        headers_out['X-Backend-Storage-Policy-Index'] = int(policies[1])
        expected['X-Backend-Storage-Policy-Index'] = str(int(policies[1]))
        do_test(headers_out, expected)

        # check for case insensitivity
        headers_out['user-agent'] = headers_out.pop('User-Agent')
        headers_out['x-backend-storage-policy-index'] = headers_out.pop(
            'X-Backend-Storage-Policy-Index')
        do_test(headers_out, expected)
Exemplo n.º 25
0
    def object_sweep(self, device):
        """
        If there are async pendings on the device, walk each one and update.

        :param device: path to device
        """
        start_time = time.time()
        # loop through async pending dirs for all policies
        for asyncdir in self._listdir(device):
            # skip stuff like "accounts", "containers", etc.
            if not (asyncdir == ASYNCDIR_BASE or
                    asyncdir.startswith(ASYNCDIR_BASE + '-')):
                continue

            # we only care about directories
            async_pending = os.path.join(device, asyncdir)
            if not os.path.isdir(async_pending):
                continue

            if asyncdir == ASYNCDIR_BASE:
                policy_idx = 0
            else:
                _junk, policy_idx = asyncdir.split('-', 1)
                try:
                    policy_idx = int(policy_idx)
                    get_async_dir(policy_idx)
                except ValueError:
                    self.logger.warn(_('Directory %s does not map to a '
                                       'valid policy') % asyncdir)
                    continue

            for prefix in self._listdir(async_pending):
                prefix_path = os.path.join(async_pending, prefix)
                if not os.path.isdir(prefix_path):
                    continue
                last_obj_hash = None
                for update in sorted(self._listdir(prefix_path), reverse=True):
                    update_path = os.path.join(prefix_path, update)
                    if not os.path.isfile(update_path):
                        continue
                    try:
                        obj_hash, timestamp = update.split('-')
                    except ValueError:
                        self.logger.increment('errors')
                        self.logger.error(
                            _('ERROR async pending file with unexpected '
                              'name %s')
                            % (update_path))
                        continue
                    if obj_hash == last_obj_hash:
                        self.logger.increment("unlinks")
                        os.unlink(update_path)
                    else:
                        self.process_object_update(update_path, device,
                                                   policy_idx)
                        last_obj_hash = obj_hash
                    time.sleep(self.slowdown)
                try:
                    os.rmdir(prefix_path)
                except OSError:
                    pass
            self.logger.timing_since('timing', start_time)
Exemplo n.º 26
0
    def test_run_once(self, mock_ismount):
        mock_ismount.return_value = True
        cu = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'false',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        cu.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'TrUe',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertTrue(not os.path.exists(odd_dir))
        # mount_check == True means ismount was checked
        self.assertEqual([
            mock.call(self.sda1),
        ], mock_ismount.mock_calls)

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(
            odir, '%s-%s' % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(odir,
                               '%s-%s' % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, 'wb') as async_pending:
                pickle.dump(
                    {
                        'op': 'PUT',
                        'account': 'a',
                        'container': 'c',
                        'obj': 'o',
                        'headers': {
                            'X-Container-Timestamp': normalize_timestamp(0)
                        }
                    }, async_pending)
        cu.run_once()
        self.assertTrue(not os.path.exists(older_op_path))
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {
            'failures': 1,
            'unlinks': 1
        })
        self.assertIsNone(pickle.load(open(op_path)).get('successes'))

        bindsock = listen(('127.0.0.1', 0))

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEqual(inc.readline(),
                                     'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = swob.HeaderKeyDict()
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0]] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assertTrue('x-container-timestamp' in headers)
                    self.assertTrue(
                        'X-Backend-Storage-Policy-Index' in headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            codes = iter(return_codes)
            try:
                events = []
                for x in range(len(return_codes)):
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(spawn(accepter, sock, next(codes)))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in cu.get_container_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]

        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [404, 500])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0, 1], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [201])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(not os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {
            'unlinks': 1,
            'successes': 1
        })
Exemplo n.º 27
0
    def test_run_once(self, mock_ismount):
        mock_ismount.return_value = True
        cu = object_updater.ObjectUpdater(
            {
                "devices": self.devices_dir,
                "mount_check": "false",
                "swift_dir": self.testdir,
                "interval": "1",
                "concurrency": "1",
                "node_timeout": "15",
            },
            logger=self.logger,
        )
        cu.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(0))
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        # mount_check == False means no call to ismount
        self.assertEqual([], mock_ismount.mock_calls)

        cu = object_updater.ObjectUpdater(
            {
                "devices": self.devices_dir,
                "mount_check": "TrUe",
                "swift_dir": self.testdir,
                "interval": "1",
                "concurrency": "1",
                "node_timeout": "15",
            },
            logger=self.logger,
        )
        odd_dir = os.path.join(async_dir, "not really supposed " "to be here")
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(not os.path.exists(odd_dir))
        # mount_check == True means ismount was checked
        self.assertEqual([mock.call(self.sda1)], mock_ismount.mock_calls)

        ohash = hash_path("a", "c", "o")
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(odir, "%s-%s" % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(odir, "%s-%s" % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, "wb") as async_pending:
                pickle.dump(
                    {
                        "op": "PUT",
                        "account": "a",
                        "container": "c",
                        "obj": "o",
                        "headers": {"X-Container-Timestamp": normalize_timestamp(0)},
                    },
                    async_pending,
                )
        cu.run_once()
        self.assert_(not os.path.exists(older_op_path))
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {"failures": 1, "unlinks": 1})
        self.assertEqual(None, pickle.load(open(op_path)).get("successes"))

        bindsock = listen(("127.0.0.1", 0))

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile("rb")
                    out = sock.makefile("wb")
                    out.write("HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n" % return_code)
                    out.flush()
                    self.assertEquals(inc.readline(), "PUT /sda1/0/a/c/o HTTP/1.1\r\n")
                    headers = swob.HeaderKeyDict()
                    line = inc.readline()
                    while line and line != "\r\n":
                        headers[line.split(":")[0]] = line.split(":")[1].strip()
                        line = inc.readline()
                    self.assertTrue("x-container-timestamp" in headers)
                    self.assertTrue("X-Backend-Storage-Policy-Index" in headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            codes = iter(return_codes)
            try:
                events = []
                for x in xrange(len(return_codes)):
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(spawn(accepter, sock, codes.next()))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in cu.get_container_ring().devs:
            if dev is not None:
                dev["port"] = bindsock.getsockname()[1]

        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {"failures": 1})
        self.assertEqual([0], pickle.load(open(op_path)).get("successes"))

        event = spawn(accept, [404, 500])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {"failures": 1})
        self.assertEqual([0, 1], pickle.load(open(op_path)).get("successes"))

        event = spawn(accept, [201])
        cu.logger._clear()
        cu.run_once()
        err = event.wait()
        if err:
            raise err
        self.assert_(not os.path.exists(op_path))
        self.assertEqual(cu.logger.get_increment_counts(), {"unlinks": 1, "successes": 1})
Exemplo n.º 28
0
def async_key(policy, hashpath, timestamp):
    async_policy = diskfile.get_async_dir(policy)
    return "%s.%s.%s" % (async_policy, hashpath, timestamp)
Exemplo n.º 29
0
    def test_obj_put_async_updates(self):
        ts_iter = make_timestamp_iter()
        policies = list(POLICIES)
        random.shuffle(policies)

        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)

        def do_test(headers_out, expected, container_path=None):
            # write an async
            dfmanager = DiskFileManager(conf, daemon.logger)
            self._write_async_update(dfmanager,
                                     next(ts_iter),
                                     policies[0],
                                     headers=headers_out,
                                     container_path=container_path)
            request_log = []

            def capture(*args, **kwargs):
                request_log.append((args, kwargs))

            # run once
            fake_status_codes = [
                200,  # object update success
                200,  # object update success
                200,  # object update conflict
            ]
            with mocked_http_conn(*fake_status_codes, give_connect=capture):
                daemon.run_once()
            self.assertEqual(len(fake_status_codes), len(request_log))
            for request_args, request_kwargs in request_log:
                ip, part, method, path, headers, qs, ssl = request_args
                self.assertEqual(method, 'PUT')
                self.assertDictEqual(expected, headers)
            self.assertEqual(daemon.logger.get_increment_counts(), {
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            })
            self.assertFalse(os.listdir(async_dir))
            daemon.logger.clear()

        ts = next(ts_iter)
        # use a dict rather than HeaderKeyDict so we can vary the case of the
        # pickled headers
        headers_out = {
            'x-size': 0,
            'x-content-type': 'text/plain',
            'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
            'x-timestamp': ts.normal,
            'X-Backend-Storage-Policy-Index': int(policies[0]),
            'User-Agent': 'object-server %s' % os.getpid()
        }
        expected = {
            'X-Size': '0',
            'X-Content-Type': 'text/plain',
            'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e',
            'X-Timestamp': ts.normal,
            'X-Backend-Storage-Policy-Index': str(int(policies[0])),
            'User-Agent': 'object-updater %s' % os.getpid(),
            'X-Backend-Accept-Redirect': 'true',
        }
        # always expect X-Backend-Accept-Redirect to be true
        do_test(headers_out, expected, container_path='.shards_a/shard_c')
        do_test(headers_out, expected)

        # ...unless X-Backend-Accept-Redirect is already set
        expected['X-Backend-Accept-Redirect'] = 'false'
        headers_out_2 = dict(headers_out)
        headers_out_2['X-Backend-Accept-Redirect'] = 'false'
        do_test(headers_out_2, expected)

        # updater should add policy header if missing
        expected['X-Backend-Accept-Redirect'] = 'true'
        headers_out['X-Backend-Storage-Policy-Index'] = None
        do_test(headers_out, expected)

        # updater should not overwrite a mismatched policy header
        headers_out['X-Backend-Storage-Policy-Index'] = int(policies[1])
        expected['X-Backend-Storage-Policy-Index'] = str(int(policies[1]))
        do_test(headers_out, expected)

        # check for case insensitivity
        headers_out['user-agent'] = headers_out.pop('User-Agent')
        headers_out['x-backend-storage-policy-index'] = headers_out.pop(
            'X-Backend-Storage-Policy-Index')
        do_test(headers_out, expected)
Exemplo n.º 30
0
    def test_obj_put_async_shard_update_redirected_twice(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager, ts_obj, policies[0],
                                 container_path='.shards_a/c_shard_older')
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        # run once
        ts_redirect_1 = next(self.ts_iter)
        ts_redirect_2 = next(self.ts_iter)
        ts_redirect_3 = next(self.ts_iter)
        fake_responses = [
            # 1st round of redirects, newest redirect should be chosen
            (301, {'Location': '/.shards_a/c_shard_old/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
            (301, {'Location': '/.shards_a/c_shard_new/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_2.internal}),
            (301, {'Location': '/.shards_a/c_shard_old/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_1.internal}),
            # 2nd round of redirects
            (301, {'Location': '/.shards_a/c_shard_newer/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
            (301, {'Location': '/.shards_a/c_shard_newer/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
            (301, {'Location': '/.shards_a/c_shard_newer/o',
                   'X-Backend-Redirect-Timestamp': ts_redirect_3.internal}),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        # only *one* set of redirected requests is attempted per cycle
        older_part = daemon.container_ring.get_part('.shards_a/c_shard_older')
        new_part = daemon.container_ring.get_part('.shards_a/c_shard_new')
        newer_part = daemon.container_ring.get_part('.shards_a/c_shard_newer')
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_older/o' % older_part] * 3 +
            ['/sda1/%s/.shards_a/c_shard_new/o' % new_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 2, 'async_pendings': 1},
            daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value added to data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data, container_path='.shards_a/c_shard_newer',
                 redirect_history=['.shards_a/c_shard_new',
                                   '.shards_a/c_shard_newer']),
            async_data)

        # next cycle, should get latest redirect from pickled async update
        fake_responses = [(200, {})] * 3
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(
                *fake_status_codes, headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_newer/o' % newer_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual(
            {'redirects': 2, 'successes': 1, 'unlinks': 1,
             'async_pendings': 1},
            daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 31
0
    def object_sweep(self, device):
        """
        If there are async pendings on the device, walk each one and update.

        :param device: path to device
        """
        start_time = time.time()
        # loop through async pending dirs for all policies
        for asyncdir in os.listdir(device):
            # skip stuff like "accounts", "containers", etc.
            if not (asyncdir == ASYNCDIR_BASE or
                    asyncdir.startswith(ASYNCDIR_BASE + '-')):
                continue

            # we only care about directories
            async_pending = os.path.join(device, asyncdir)
            if not os.path.isdir(async_pending):
                continue

            if asyncdir == ASYNCDIR_BASE:
                policy_idx = 0
            else:
                _junk, policy_idx = asyncdir.split('-', 1)
                try:
                    policy_idx = int(policy_idx)
                    get_async_dir(policy_idx)
                except ValueError:
                    self.logger.warn(_('Directory %s does not map to a '
                                       'valid policy') % asyncdir)
                    continue

            for prefix in os.listdir(async_pending):
                prefix_path = os.path.join(async_pending, prefix)
                if not os.path.isdir(prefix_path):
                    continue
                last_obj_hash = None
                for update in sorted(os.listdir(prefix_path), reverse=True):
                    update_path = os.path.join(prefix_path, update)
                    if not os.path.isfile(update_path):
                        continue
                    try:
                        obj_hash, timestamp = update.split('-')
                    except ValueError:
                        self.logger.increment('errors')
                        self.logger.error(
                            _('ERROR async pending file with unexpected '
                              'name %s')
                            % (update_path))
                        continue
                    if obj_hash == last_obj_hash:
                        self.logger.increment("unlinks")
                        os.unlink(update_path)
                    else:
                        self.process_object_update(update_path, device,
                                                   policy_idx)
                        last_obj_hash = obj_hash
                    time.sleep(self.slowdown)
                try:
                    os.rmdir(prefix_path)
                except OSError:
                    pass
            self.logger.timing_since('timing', start_time)
Exemplo n.º 32
0
    def test_obj_put_async_shard_update_redirected_twice(self):
        policies = list(POLICIES)
        random.shuffle(policies)
        # setup updater
        conf = {
            'devices': self.devices_dir,
            'mount_check': 'false',
            'swift_dir': self.testdir,
        }
        daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
        async_dir = os.path.join(self.sda1, get_async_dir(policies[0]))
        os.mkdir(async_dir)
        dfmanager = DiskFileManager(conf, daemon.logger)

        ts_obj = next(self.ts_iter)
        self._write_async_update(dfmanager,
                                 ts_obj,
                                 policies[0],
                                 container_path='.shards_a/c_shard_older')
        orig_async_path, orig_async_data = self._check_async_file(async_dir)

        # run once
        ts_redirect_1 = next(self.ts_iter)
        ts_redirect_2 = next(self.ts_iter)
        ts_redirect_3 = next(self.ts_iter)
        fake_responses = [
            # 1st round of redirects, newest redirect should be chosen
            (301, {
                'Location': '/.shards_a/c_shard_old/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_1.internal
            }),
            (301, {
                'Location': '/.shards_a/c_shard_new/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_2.internal
            }),
            (301, {
                'Location': '/.shards_a/c_shard_old/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_1.internal
            }),
            # 2nd round of redirects
            (301, {
                'Location': '/.shards_a/c_shard_newer/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_3.internal
            }),
            (301, {
                'Location': '/.shards_a/c_shard_newer/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_3.internal
            }),
            (301, {
                'Location': '/.shards_a/c_shard_newer/o',
                'X-Backend-Redirect-Timestamp': ts_redirect_3.internal
            }),
        ]
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        # only *one* set of redirected requests is attempted per cycle
        older_part = daemon.container_ring.get_part('.shards_a/c_shard_older')
        new_part = daemon.container_ring.get_part('.shards_a/c_shard_new')
        newer_part = daemon.container_ring.get_part('.shards_a/c_shard_newer')
        self.assertEqual(
            ['/sda1/%s/.shards_a/c_shard_older/o' % older_part] * 3 +
            ['/sda1/%s/.shards_a/c_shard_new/o' % new_part] * 3,
            [req['path'] for req in conn.requests])
        self.assertEqual({
            'redirects': 2,
            'async_pendings': 1
        }, daemon.logger.get_increment_counts())
        # update failed, we still have pending file with most recent redirect
        # response Location header value added to data
        async_path, async_data = self._check_async_file(async_dir)
        self.assertEqual(orig_async_path, async_path)
        self.assertEqual(
            dict(orig_async_data,
                 container_path='.shards_a/c_shard_newer',
                 redirect_history=[
                     '.shards_a/c_shard_new', '.shards_a/c_shard_newer'
                 ]), async_data)

        # next cycle, should get latest redirect from pickled async update
        fake_responses = [(200, {})] * 3
        fake_status_codes, fake_headers = zip(*fake_responses)
        with mocked_http_conn(*fake_status_codes,
                              headers=fake_headers) as conn:
            with mock.patch('swift.obj.updater.dump_recon_cache'):
                daemon.run_once()

        self._check_update_requests(conn.requests, ts_obj, policies[0])
        self.assertEqual(['/sda1/%s/.shards_a/c_shard_newer/o' % newer_part] *
                         3, [req['path'] for req in conn.requests])
        self.assertEqual(
            {
                'redirects': 2,
                'successes': 1,
                'unlinks': 1,
                'async_pendings': 1
            }, daemon.logger.get_increment_counts())
        self.assertFalse(os.listdir(async_dir))  # no async file
Exemplo n.º 33
0
def async_key(policy_index, hashpath, timestamp):
    async_policy = diskfile.get_async_dir(policy_index)
    return '%s.%s.%s' % (async_policy, hashpath, timestamp)