Esempio n. 1
0
    def test_validate_ring(self):
        test_policies = [
            ECStoragePolicy(0,
                            'ec8-2',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=8,
                            ec_nparity=2,
                            object_ring=FakeRing(replicas=8),
                            is_default=True),
            ECStoragePolicy(1,
                            'ec10-4',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=4,
                            object_ring=FakeRing(replicas=10)),
            ECStoragePolicy(2,
                            'ec4-2',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=4,
                            ec_nparity=2,
                            object_ring=FakeRing(replicas=7)),
        ]
        policies = StoragePolicyCollection(test_policies)

        for policy in policies:
            msg = 'EC ring for policy %s needs to be configured with ' \
                  'exactly %d nodes.' % \
                  (policy.name, policy.ec_ndata + policy.ec_nparity)
            self.assertRaisesWithMessage(RingValidationError, msg,
                                         policy._validate_ring)
Esempio n. 2
0
 def test_quorum_size_erasure_coding(self):
     test_ec_policies = [
         ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE,
                         ec_ndata=8, ec_nparity=2),
         ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4',
                         ec_ndata=10, ec_nparity=6),
     ]
     for ec_policy in test_ec_policies:
         k = ec_policy.ec_ndata
         expected_size = \
             k + ec_policy.pyeclib_driver.min_parity_fragments_needed()
         self.assertEqual(expected_size, ec_policy.quorum)
Esempio n. 3
0
def patch_policies(thing_or_policies=None, legacy_only=False,
                   with_ec_default=False, fake_ring_args=None):
    if isinstance(thing_or_policies, (
            Iterable, storage_policy.StoragePolicyCollection)):
        return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)

    if legacy_only:
        default_policies = [
            StoragePolicy(0, name='legacy', is_default=True),
        ]
        default_ring_args = [{}]
    elif with_ec_default:
        default_policies = [
            ECStoragePolicy(0, name='ec', is_default=True,
                            ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
                            ec_nparity=4, ec_segment_size=4096),
            StoragePolicy(1, name='unu'),
        ]
        default_ring_args = [{'replicas': 14}, {}]
    else:
        default_policies = [
            StoragePolicy(0, name='nulo', is_default=True),
            StoragePolicy(1, name='unu'),
        ]
        default_ring_args = [{}, {}]

    fake_ring_args = fake_ring_args or default_ring_args
    decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)

    if not thing_or_policies:
        return decorator
    else:
        # it's a thing, we return the wrapped thing instead of the decorator
        return decorator(thing_or_policies)
Esempio n. 4
0
 def test_storage_policy_repr(self):
     test_policies = [
         StoragePolicy(0, 'aay', True),
         StoragePolicy(1, 'bee', False),
         StoragePolicy(2, 'cee', False),
         ECStoragePolicy(10,
                         'ten',
                         ec_type='jerasure_rs_vand',
                         ec_ndata=10,
                         ec_nparity=3)
     ]
     policies = StoragePolicyCollection(test_policies)
     for policy in policies:
         policy_repr = repr(policy)
         self.assert_(policy.__class__.__name__ in policy_repr)
         self.assert_('is_default=%s' % policy.is_default in policy_repr)
         self.assert_('is_deprecated=%s' %
                      policy.is_deprecated in policy_repr)
         self.assert_(policy.name in policy_repr)
         if policy.policy_type == EC_POLICY:
             self.assert_('ec_type=%s' % policy.ec_type in policy_repr)
             self.assert_('ec_ndata=%s' % policy.ec_ndata in policy_repr)
             self.assert_('ec_nparity=%s' %
                          policy.ec_nparity in policy_repr)
             self.assert_('ec_segment_size=%s' %
                          policy.ec_segment_size in policy_repr)
     collection_repr = repr(policies)
     collection_repr_lines = collection_repr.splitlines()
     self.assert_(policies.__class__.__name__ in collection_repr_lines[0])
     self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
     for policy, line in zip(policies, collection_repr_lines[1:-1]):
         self.assert_(repr(policy) in line)
     with patch_policies(policies):
         self.assertEqual(repr(POLICIES), collection_repr)
Esempio n. 5
0
 def test_policies_type_attribute(self):
     test_policies = [
         StoragePolicy(0, 'zero', is_default=True),
         StoragePolicy(1, 'one'),
         StoragePolicy(2, 'two'),
         StoragePolicy(3, 'three', is_deprecated=True),
         ECStoragePolicy(10,
                         'ten',
                         ec_type='jerasure_rs_vand',
                         ec_ndata=10,
                         ec_nparity=3),
     ]
     policies = StoragePolicyCollection(test_policies)
     self.assertEquals(policies.get_by_index(0).policy_type, REPL_POLICY)
     self.assertEquals(policies.get_by_index(1).policy_type, REPL_POLICY)
     self.assertEquals(policies.get_by_index(2).policy_type, REPL_POLICY)
     self.assertEquals(policies.get_by_index(3).policy_type, REPL_POLICY)
     self.assertEquals(policies.get_by_index(10).policy_type, EC_POLICY)
Esempio n. 6
0
from test.unit import FakeLogger, patch_policies, make_timestamp_iter, \
    DEFAULT_TEST_EC_TYPE
from swift.obj import auditor
from swift.obj.diskfile import DiskFile, write_metadata, invalidate_hash, \
    get_data_dir, DiskFileManager, ECDiskFileManager, AuditLocation, \
    clear_auditor_status, get_auditor_status
from swift.common.utils import mkdirs, normalize_timestamp, Timestamp
from swift.common.storage_policy import ECStoragePolicy, StoragePolicy, \
    POLICIES

_mocked_policies = [
    StoragePolicy(0, 'zero', False),
    StoragePolicy(1, 'one', True),
    ECStoragePolicy(2,
                    'two',
                    ec_type=DEFAULT_TEST_EC_TYPE,
                    ec_ndata=2,
                    ec_nparity=1,
                    ec_segment_size=4096),
]


def works_only_once(callable_thing, exception):
    called = [False]

    def only_once(*a, **kw):
        if called[0]:
            raise exception
        else:
            called[0] = True
            return callable_thing(*a, **kw)
Esempio n. 7
0
def setup_servers(the_object_server=object_server, extra_conf=None):
    """
    Setup proxy, account, container and object servers using a set of fake
    rings and policies.

    :param the_object_server: The object server module to use (optional,
                              defaults to swift.obj.server)
    :param extra_conf: A dict of config options that will update the basic
                       config passed to all server instances.
    :returns: A dict containing the following entries:
                  orig_POLICIES: the value of storage_policy.POLICIES prior to
                                 it being patched with fake policies
                  orig_SysLogHandler: the value of utils.SysLogHandler prior to
                                      it being patched
                  testdir: root directory used for test files
                  test_POLICIES: a StoragePolicyCollection of fake policies
                  test_servers: a tuple of test server instances
                  test_sockets: a tuple of sockets used by test servers
                  test_coros: a tuple of greenthreads in which test servers are
                              running
    """
    context = {
        "orig_POLICIES": storage_policy._POLICIES,
        "orig_SysLogHandler": utils.SysLogHandler}

    utils.HASH_PATH_SUFFIX = b'endcap'
    utils.SysLogHandler = mock.MagicMock()
    # Since we're starting up a lot here, we're going to test more than
    # just chunked puts; we're also going to test parts of
    # proxy_server.Application we couldn't get to easily otherwise.
    context["testdir"] = _testdir = \
        os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
    mkdirs(_testdir)
    rmtree(_testdir)
    for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
                  'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1',
                  'sdk1', 'sdl1'):
        mkdirs(os.path.join(_testdir, drive, 'tmp'))
    conf = {'devices': _testdir, 'swift_dir': _testdir,
            'mount_check': 'false', 'allowed_headers':
            'content-encoding, x-object-manifest, content-disposition, foo',
            'allow_versions': 't'}
    if extra_conf:
        conf.update(extra_conf)
    prolis = listen_zero()
    acc1lis = listen_zero()
    acc2lis = listen_zero()
    con1lis = listen_zero()
    con2lis = listen_zero()
    obj1lis = listen_zero()
    obj2lis = listen_zero()
    obj3lis = listen_zero()
    obj4lis = listen_zero()
    obj5lis = listen_zero()
    obj6lis = listen_zero()
    objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis]
    context["test_sockets"] = \
        (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis,
         obj4lis, obj5lis, obj6lis)
    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    account_devs = [
        {'port': acc1lis.getsockname()[1]},
        {'port': acc2lis.getsockname()[1]},
    ]
    write_fake_ring(account_ring_path, *account_devs)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    container_devs = [
        {'port': con1lis.getsockname()[1]},
        {'port': con2lis.getsockname()[1]},
    ]
    write_fake_ring(container_ring_path, *container_devs)
    storage_policy._POLICIES = storage_policy.StoragePolicyCollection([
        StoragePolicy(0, 'zero', True),
        StoragePolicy(1, 'one', False),
        StoragePolicy(2, 'two', False),
        ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=2, ec_nparity=1, ec_segment_size=4096),
        ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=2, ec_nparity=1, ec_segment_size=4096,
                        ec_duplication_factor=2)])
    obj_rings = {
        0: ('sda1', 'sdb1'),
        1: ('sdc1', 'sdd1'),
        2: ('sde1', 'sdf1'),
        # sdg1, sdh1, sdi1 taken by policy 3 (see below)
    }
    for policy_index, devices in obj_rings.items():
        policy = storage_policy.POLICIES[policy_index]
        obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
        obj_devs = [
            {'port': objsock.getsockname()[1], 'device': dev}
            for objsock, dev in zip(objsocks, devices)]
        write_fake_ring(obj_ring_path, *obj_devs)

    # write_fake_ring can't handle a 3-element ring, and the EC policy needs
    # at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2),
    # so we do it manually
    devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
             'port': obj1lis.getsockname()[1]},
            {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
             'port': obj2lis.getsockname()[1]},
            {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
             'port': obj3lis.getsockname()[1]},
            {'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1',
             'port': obj4lis.getsockname()[1]},
            {'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1',
             'port': obj5lis.getsockname()[1]},
            {'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1',
             'port': obj6lis.getsockname()[1]}]
    pol3_replica2part2dev_id = [[0, 1, 2, 0],
                                [1, 2, 0, 1],
                                [2, 0, 1, 2]]
    pol4_replica2part2dev_id = [[0, 1, 2, 3],
                                [1, 2, 3, 4],
                                [2, 3, 4, 5],
                                [3, 4, 5, 0],
                                [4, 5, 0, 1],
                                [5, 0, 1, 2]]
    obj3_ring_path = os.path.join(
        _testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz')
    part_shift = 30
    with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
        pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)

    obj4_ring_path = os.path.join(
        _testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz')
    part_shift = 30
    with closing(GzipFile(obj4_ring_path, 'wb')) as fh:
        pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh)

    prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
    for policy in storage_policy.POLICIES:
        # make sure all the rings are loaded
        prosrv.get_object_ring(policy.idx)
    # don't lose this one!
    context["test_POLICIES"] = storage_policy._POLICIES
    acc1srv = account_server.AccountController(
        conf, logger=debug_logger('acct1'))
    acc2srv = account_server.AccountController(
        conf, logger=debug_logger('acct2'))
    con1srv = container_server.ContainerController(
        conf, logger=debug_logger('cont1'))
    con2srv = container_server.ContainerController(
        conf, logger=debug_logger('cont2'))
    obj1srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj1'))
    obj2srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj2'))
    obj3srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj3'))
    obj4srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj4'))
    obj5srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj5'))
    obj6srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj6'))
    context["test_servers"] = \
        (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv,
         obj4srv, obj5srv, obj6srv)
    nl = NullLogger()
    logging_prosv = proxy_logging.ProxyLoggingMiddleware(
        listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger)
    prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
    acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
    acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
    con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
    con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
    obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
    obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
    obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
    obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl)
    obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl)
    obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl)
    context["test_coros"] = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa,
         obj4spa, obj5spa, obj6spa)
    # Create account
    ts = normalize_timestamp(time.time())
    partition, nodes = prosrv.account_ring.get_nodes('a')
    for node in nodes:
        conn = swift.proxy.controllers.obj.http_connect(node['ip'],
                                                        node['port'],
                                                        node['device'],
                                                        partition, 'PUT', '/a',
                                                        {'X-Timestamp': ts,
                                                         'x-trans-id': 'test'})
        resp = conn.getresponse()
        assert(resp.status == 201)
    # Create another account
    # used for account-to-account tests
    ts = normalize_timestamp(time.time())
    partition, nodes = prosrv.account_ring.get_nodes('a1')
    for node in nodes:
        conn = swift.proxy.controllers.obj.http_connect(node['ip'],
                                                        node['port'],
                                                        node['device'],
                                                        partition, 'PUT',
                                                        '/a1',
                                                        {'X-Timestamp': ts,
                                                         'x-trans-id': 'test'})
        resp = conn.getresponse()
        assert(resp.status == 201)
    # Create containers, 1 per test policy
    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
             'Connection: close\r\nX-Auth-Token: t\r\n'
             'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
        exp, headers[:len(exp)])
    # Create container in other account
    # used for account-to-account tests
    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
             'Connection: close\r\nX-Auth-Token: t\r\n'
             'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
        exp, headers[:len(exp)])

    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write(
        'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
        'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
        'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, \
        "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])

    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write(
        'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
        'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
        'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, \
        "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
    return context
Esempio n. 8
0
class TestRelinker(unittest.TestCase):
    def setUp(self):
        skip_if_no_xattrs()
        self.logger = FakeLogger()
        self.testdir = tempfile.mkdtemp()
        self.devices = os.path.join(self.testdir, 'node')
        shutil.rmtree(self.testdir, ignore_errors=1)
        os.mkdir(self.testdir)
        os.mkdir(self.devices)

        self.rb = ring.RingBuilder(8, 6.0, 1)

        for i in range(6):
            ip = "127.0.0.%s" % i
            self.rb.add_dev({
                'id': i,
                'region': 0,
                'zone': 0,
                'weight': 1,
                'ip': ip,
                'port': 10000,
                'device': 'sda1'
            })
        self.rb.rebalance(seed=1)

        self.existing_device = 'sda1'
        os.mkdir(os.path.join(self.devices, self.existing_device))
        self.objects = os.path.join(self.devices, self.existing_device,
                                    'objects')
        os.mkdir(self.objects)
        self._hash = utils.hash_path('a/c/o')
        digest = binascii.unhexlify(self._hash)
        part = struct.unpack_from('>I', digest)[0] >> 24
        self.next_part = struct.unpack_from('>I', digest)[0] >> 23
        self.objdir = os.path.join(self.objects, str(part), self._hash[-3:],
                                   self._hash)
        os.makedirs(self.objdir)
        self.object_fname = "1278553064.00000.data"
        self.objname = os.path.join(self.objdir, self.object_fname)
        with open(self.objname, "wb") as dummy:
            dummy.write(b"Hello World!")
            write_metadata(dummy, {'name': '/a/c/o', 'Content-Length': '12'})

        test_policies = [StoragePolicy(0, 'platin', True)]
        storage_policy._POLICIES = StoragePolicyCollection(test_policies)

        self.expected_dir = os.path.join(self.objects, str(self.next_part),
                                         self._hash[-3:], self._hash)
        self.expected_file = os.path.join(self.expected_dir, self.object_fname)

    def _save_ring(self):
        rd = self.rb.get_ring()
        for policy in POLICIES:
            rd.save(os.path.join(self.testdir,
                                 '%s.ring.gz' % policy.ring_name))
            # Enforce ring reloading in relinker
            policy.object_ring = None

    def tearDown(self):
        shutil.rmtree(self.testdir, ignore_errors=1)
        storage_policy.reload_storage_policies()

    def test_relink(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        relinker.relink(self.testdir, self.devices, True)

        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))

        stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
        stat_new = os.stat(self.expected_file)
        self.assertEqual(stat_old.st_ino, stat_new.st_ino)

    def _common_test_cleanup(self, relink=True):
        # Create a ring that has prev_part_power set
        self.rb.prepare_increase_partition_power()
        self.rb.increase_partition_power()
        self._save_ring()

        os.makedirs(self.expected_dir)

        if relink:
            # Create a hardlink to the original object name. This is expected
            # after a normal relinker run
            os.link(os.path.join(self.objdir, self.object_fname),
                    self.expected_file)

    def test_cleanup(self):
        self._common_test_cleanup()
        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_not_yet_relinked(self):
        self._common_test_cleanup(relink=False)
        self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True))

        self.assertTrue(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_deleted(self):
        self._common_test_cleanup()

        # Pretend the object got deleted inbetween and there is a tombstone
        fname_ts = self.expected_file[:-4] + "ts"
        os.rename(self.expected_file, fname_ts)

        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))

    def test_cleanup_doesnotexist(self):
        self._common_test_cleanup()

        # Pretend the file in the new place got deleted inbetween
        os.remove(self.expected_file)

        self.assertEqual(
            1, relinker.cleanup(self.testdir, self.devices, True, self.logger))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [
            'Error cleaning up %s: %s' %
            (self.objname, repr(exceptions.DiskFileNotExist()))
        ])

    @patch_policies([
        ECStoragePolicy(0,
                        name='platin',
                        is_default=True,
                        ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=4,
                        ec_nparity=2)
    ])
    def test_cleanup_non_durable_fragment(self):
        self._common_test_cleanup()

        # Switch the policy type so that actually all fragments are non-durable
        # and raise a DiskFileNotExist in EC in this test. However, if the
        # counterpart exists in the new location, this is ok - it will be fixed
        # by the reconstructor later on
        self.assertEqual(
            0, relinker.cleanup(self.testdir, self.devices, True, self.logger))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [])

    def test_cleanup_quarantined(self):
        self._common_test_cleanup()
        # Pretend the object in the new place got corrupted
        with open(self.expected_file, "wb") as obj:
            obj.write(b'trash')

        self.assertEqual(
            1, relinker.cleanup(self.testdir, self.devices, True, self.logger))

        self.assertIn('failed audit and was quarantined',
                      self.logger.get_lines_for_level('warning')[0])
Esempio n. 9
0
class TestRelinker(unittest.TestCase):
    def setUp(self):
        skip_if_no_xattrs()
        self.logger = FakeLogger()
        self.testdir = tempfile.mkdtemp()
        self.devices = os.path.join(self.testdir, 'node')
        shutil.rmtree(self.testdir, ignore_errors=True)
        os.mkdir(self.testdir)
        os.mkdir(self.devices)

        self.rb = ring.RingBuilder(PART_POWER, 6.0, 1)

        for i in range(6):
            ip = "127.0.0.%s" % i
            self.rb.add_dev({
                'id': i,
                'region': 0,
                'zone': 0,
                'weight': 1,
                'ip': ip,
                'port': 10000,
                'device': 'sda1'
            })
        self.rb.rebalance(seed=1)

        self.existing_device = 'sda1'
        os.mkdir(os.path.join(self.devices, self.existing_device))
        self.objects = os.path.join(self.devices, self.existing_device,
                                    'objects')
        self._setup_object()

    def _setup_object(self, condition=None):
        attempts = []
        for _ in range(50):
            account = 'a'
            container = 'c'
            obj = 'o-' + str(uuid.uuid4())
            self._hash = utils.hash_path(account, container, obj)
            digest = binascii.unhexlify(self._hash)
            self.part = struct.unpack_from('>I', digest)[0] >> 24
            self.next_part = struct.unpack_from('>I', digest)[0] >> 23
            path = os.path.join(os.path.sep, account, container, obj)
            # There's 1/512 chance that both old and new parts will be 0;
            # that's not a terribly interesting case, as there's nothing to do
            attempts.append((self.part, self.next_part, 2**PART_POWER))
            if (self.part != self.next_part
                    and (condition(self.part) if condition else True)):
                break
        else:
            self.fail(
                'Failed to setup object satisfying test preconditions %s' %
                attempts)

        shutil.rmtree(self.objects, ignore_errors=True)
        os.mkdir(self.objects)
        self.objdir = os.path.join(self.objects, str(self.part),
                                   self._hash[-3:], self._hash)
        os.makedirs(self.objdir)
        self.object_fname = utils.Timestamp.now().internal + ".data"

        self.objname = os.path.join(self.objdir, self.object_fname)
        with open(self.objname, "wb") as dummy:
            dummy.write(b"Hello World!")
            write_metadata(dummy, {'name': path, 'Content-Length': '12'})

        self.policy = StoragePolicy(0, 'platinum', True)
        storage_policy._POLICIES = StoragePolicyCollection([self.policy])

        self.part_dir = os.path.join(self.objects, str(self.part))
        self.suffix_dir = os.path.join(self.part_dir, self._hash[-3:])
        self.next_part_dir = os.path.join(self.objects, str(self.next_part))
        self.next_suffix_dir = os.path.join(self.next_part_dir,
                                            self._hash[-3:])
        self.expected_dir = os.path.join(self.next_suffix_dir, self._hash)
        self.expected_file = os.path.join(self.expected_dir, self.object_fname)

    def _save_ring(self):
        self.rb._ring = None
        rd = self.rb.get_ring()
        for policy in POLICIES:
            rd.save(os.path.join(self.testdir,
                                 '%s.ring.gz' % policy.ring_name))
            # Enforce ring reloading in relinker
            policy.object_ring = None

    def tearDown(self):
        shutil.rmtree(self.testdir, ignore_errors=True)
        storage_policy.reload_storage_policies()

    @contextmanager
    def _mock_listdir(self):
        orig_listdir = utils.listdir

        def mocked(path):
            if path == self.objects:
                raise OSError
            return orig_listdir(path)

        with mock.patch('swift.common.utils.listdir', mocked):
            yield

    def _do_test_relinker_drop_privileges(self, command):
        @contextmanager
        def do_mocks():
            # attach mocks to call_capture so that call order can be asserted
            call_capture = mock.Mock()
            with mock.patch('swift.cli.relinker.drop_privileges') as mock_dp:
                with mock.patch('swift.cli.relinker.' + command,
                                return_value=0) as mock_command:
                    call_capture.attach_mock(mock_dp, 'drop_privileges')
                    call_capture.attach_mock(mock_command, command)
                    yield call_capture

        # no user option
        with do_mocks() as capture:
            self.assertEqual(0, relinker.main([command]))
        self.assertEqual([(command, mock.ANY, mock.ANY)], capture.method_calls)

        # cli option --user
        with do_mocks() as capture:
            self.assertEqual(0, relinker.main([command, '--user', 'cli_user']))
        self.assertEqual([('drop_privileges', ('cli_user', ), {}),
                          (command, mock.ANY, mock.ANY)], capture.method_calls)

        # cli option --user takes precedence over conf file user
        with do_mocks() as capture:
            with mock.patch('swift.cli.relinker.readconf',
                            return_value={'user': '******'}):
                self.assertEqual(
                    0,
                    relinker.main([command, 'conf_file', '--user',
                                   'cli_user']))
        self.assertEqual([('drop_privileges', ('cli_user', ), {}),
                          (command, mock.ANY, mock.ANY)], capture.method_calls)

        # conf file user
        with do_mocks() as capture:
            with mock.patch('swift.cli.relinker.readconf',
                            return_value={'user': '******'}):
                self.assertEqual(0, relinker.main([command, 'conf_file']))
        self.assertEqual([('drop_privileges', ('conf_user', ), {}),
                          (command, mock.ANY, mock.ANY)], capture.method_calls)

    def test_relinker_drop_privileges(self):
        self._do_test_relinker_drop_privileges('relink')
        self._do_test_relinker_drop_privileges('cleanup')

    def _do_test_relinker_files_per_second(self, command):
        # no files per second
        with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
            self.assertEqual(
                0,
                relinker.main([
                    command,
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        it.assert_not_called()

        # zero files per second
        with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
            self.assertEqual(
                0,
                relinker.main([
                    command, '--swift-dir', self.testdir, '--devices',
                    self.devices, '--skip-mount', '--files-per-second', '0'
                ]))
        it.assert_not_called()

        # positive files per second
        locations = iter([])
        with mock.patch('swift.cli.relinker.audit_location_generator',
                        return_value=locations):
            with mock.patch('swift.cli.relinker.RateLimitedIterator') as it:
                self.assertEqual(
                    0,
                    relinker.main([
                        command, '--swift-dir', self.testdir, '--devices',
                        self.devices, '--skip-mount', '--files-per-second',
                        '1.23'
                    ]))
        it.assert_called_once_with(locations, 1.23)

        # negative files per second
        err = StringIO()
        with mock.patch('sys.stderr', err):
            with self.assertRaises(SystemExit) as cm:
                relinker.main([
                    command, '--swift-dir', self.testdir, '--devices',
                    self.devices, '--skip-mount', '--files-per-second', '-1'
                ])
        self.assertEqual(2, cm.exception.code)  # NB exit code 2 from argparse
        self.assertIn('--files-per-second: invalid non_negative_float value',
                      err.getvalue())

    def test_relink_files_per_second(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        self._do_test_relinker_files_per_second('relink')

    def test_cleanup_files_per_second(self):
        self._common_test_cleanup()
        self._do_test_relinker_files_per_second('cleanup')

    def test_conf_file(self):
        config = """
        [DEFAULT]
        swift_dir = %s
        devices = /test/node
        mount_check = false
        reclaim_age = 5184000

        [object-relinker]
        log_level = WARNING
        log_name = test-relinker
        """ % self.testdir
        conf_file = os.path.join(self.testdir, 'relinker.conf')
        with open(conf_file, 'w') as f:
            f.write(dedent(config))

        # cite conf file on command line
        with mock.patch('swift.cli.relinker.relink') as mock_relink:
            relinker.main(['relink', conf_file, '--device', 'sdx', '--debug'])
        exp_conf = {
            '__file__': mock.ANY,
            'swift_dir': self.testdir,
            'devices': '/test/node',
            'mount_check': False,
            'reclaim_age': '5184000',
            'files_per_second': 0.0,
            'log_name': 'test-relinker',
            'log_level': 'DEBUG',
        }
        mock_relink.assert_called_once_with(exp_conf, mock.ANY, device='sdx')
        logger = mock_relink.call_args[0][1]
        # --debug overrides conf file
        self.assertEqual(logging.DEBUG, logger.getEffectiveLevel())
        self.assertEqual('test-relinker', logger.logger.name)

        # check the conf is passed to DiskFileRouter
        self._save_ring()
        with mock.patch('swift.cli.relinker.diskfile.DiskFileRouter',
                        side_effect=DiskFileRouter) as mock_dfr:
            relinker.main(['relink', conf_file, '--device', 'sdx', '--debug'])
        mock_dfr.assert_called_once_with(exp_conf, mock.ANY)

        # flip mount_check, no --debug...
        config = """
        [DEFAULT]
        swift_dir = test/swift/dir
        devices = /test/node
        mount_check = true

        [object-relinker]
        log_level = WARNING
        log_name = test-relinker
        files_per_second = 11.1
        """
        with open(conf_file, 'w') as f:
            f.write(dedent(config))
        with mock.patch('swift.cli.relinker.relink') as mock_relink:
            relinker.main(['relink', conf_file, '--device', 'sdx'])
        mock_relink.assert_called_once_with(
            {
                '__file__': mock.ANY,
                'swift_dir': 'test/swift/dir',
                'devices': '/test/node',
                'mount_check': True,
                'files_per_second': 11.1,
                'log_name': 'test-relinker',
                'log_level': 'WARNING',
            },
            mock.ANY,
            device='sdx')
        logger = mock_relink.call_args[0][1]
        self.assertEqual(logging.WARNING, logger.getEffectiveLevel())
        self.assertEqual('test-relinker', logger.logger.name)

        # override with cli options...
        with mock.patch('swift.cli.relinker.relink') as mock_relink:
            relinker.main([
                'relink', conf_file, '--device', 'sdx', '--debug',
                '--swift-dir', 'cli-dir', '--devices', 'cli-devs',
                '--skip-mount-check', '--files-per-second', '2.2'
            ])
        mock_relink.assert_called_once_with(
            {
                '__file__': mock.ANY,
                'swift_dir': 'cli-dir',
                'devices': 'cli-devs',
                'mount_check': False,
                'files_per_second': 2.2,
                'log_level': 'DEBUG',
                'log_name': 'test-relinker',
            },
            mock.ANY,
            device='sdx')

        with mock.patch('swift.cli.relinker.relink') as mock_relink, \
                mock.patch('logging.basicConfig') as mock_logging_config:
            relinker.main([
                'relink', '--device', 'sdx', '--swift-dir', 'cli-dir',
                '--devices', 'cli-devs', '--skip-mount-check'
            ])
        mock_relink.assert_called_once_with(
            {
                'swift_dir': 'cli-dir',
                'devices': 'cli-devs',
                'mount_check': False,
                'files_per_second': 0.0,
                'log_level': 'INFO',
            },
            mock.ANY,
            device='sdx')
        mock_logging_config.assert_called_once_with(format='%(message)s',
                                                    level=logging.INFO,
                                                    filename=None)

        with mock.patch('swift.cli.relinker.relink') as mock_relink, \
                mock.patch('logging.basicConfig') as mock_logging_config:
            relinker.main([
                'relink', '--device', 'sdx', '--debug', '--swift-dir',
                'cli-dir', '--devices', 'cli-devs', '--skip-mount-check'
            ])
        mock_relink.assert_called_once_with(
            {
                'swift_dir': 'cli-dir',
                'devices': 'cli-devs',
                'mount_check': False,
                'files_per_second': 0.0,
                'log_level': 'DEBUG',
            },
            mock.ANY,
            device='sdx')
        # --debug is now effective
        mock_logging_config.assert_called_once_with(format='%(message)s',
                                                    level=logging.DEBUG,
                                                    filename=None)

    def test_relink_first_quartile_no_rehash(self):
        # we need object name in lower half of current part
        self._setup_object(lambda part: part < 2**(PART_POWER - 1))
        self.assertLess(self.next_part, 2**PART_POWER)
        self.rb.prepare_increase_partition_power()
        self._save_ring()

        with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
                        return_value='foo') as mock_hash_suffix:
            self.assertEqual(
                0,
                relinker.main([
                    'relink',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        # ... and no rehash
        self.assertEqual([], mock_hash_suffix.call_args_list)

        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))

        stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
        stat_new = os.stat(self.expected_file)
        self.assertEqual(stat_old.st_ino, stat_new.st_ino)
        # Invalidated now, rehashed during cleanup
        with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
            self.assertEqual(fp.read(), self._hash[-3:] + '\n')
        self.assertFalse(
            os.path.exists(os.path.join(self.next_part_dir, 'hashes.pkl')))

    def test_relink_second_quartile_does_rehash(self):
        # we need a part in upper half of current part power
        self._setup_object(lambda part: part >= 2**(PART_POWER - 1))
        self.assertGreaterEqual(self.next_part, 2**PART_POWER)
        self.assertTrue(self.rb.prepare_increase_partition_power())
        self._save_ring()

        with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
                        return_value='foo') as mock_hash_suffix:
            self.assertEqual(
                0,
                relinker.main([
                    'relink',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        # we rehash the new suffix dirs as we go
        self.assertEqual([mock.call(self.next_suffix_dir, policy=self.policy)],
                         mock_hash_suffix.call_args_list)

        # Invalidated and rehashed during relinking
        with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
            self.assertEqual(fp.read(), '')
        with open(os.path.join(self.next_part_dir, 'hashes.pkl'), 'rb') as fp:
            hashes = pickle.load(fp)
        self.assertIn(self._hash[-3:], hashes)
        self.assertEqual('foo', hashes[self._hash[-3:]])
        self.assertFalse(
            os.path.exists(os.path.join(self.part_dir, 'hashes.invalid')))

    def test_relink_no_applicable_policy(self):
        # NB do not prepare part power increase
        self._save_ring()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                2,
                relinker.main([
                    'relink',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                ]))
        self.assertEqual(self.logger.get_lines_for_level('warning'),
                         ['No policy found to increase the partition power.'])

    def test_relink_not_mounted(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                1,
                relinker.main([
                    'relink',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                ]))
        self.assertEqual(
            self.logger.get_lines_for_level('warning'),
            ['Skipping sda1 as it is not mounted', '1 disks were unmounted'])

    def test_relink_listdir_error(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            with self._mock_listdir():
                self.assertEqual(
                    1,
                    relinker.main([
                        'relink', '--swift-dir', self.testdir, '--devices',
                        self.devices, '--skip-mount-check'
                    ]))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [
            'Skipping %s because ' % self.objects,
            'There were 1 errors listing partition directories'
        ])

    def test_relink_device_filter(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        self.assertEqual(
            0,
            relinker.main([
                'relink',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
                '--device',
                self.existing_device,
            ]))

        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))

        stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
        stat_new = os.stat(self.expected_file)
        self.assertEqual(stat_old.st_ino, stat_new.st_ino)

    def test_relink_device_filter_invalid(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        self.assertEqual(
            0,
            relinker.main([
                'relink',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
                '--device',
                'none',
            ]))

        self.assertFalse(os.path.isdir(self.expected_dir))
        self.assertFalse(os.path.isfile(self.expected_file))

    def _common_test_cleanup(self, relink=True):
        # Create a ring that has prev_part_power set
        self.rb.prepare_increase_partition_power()
        self._save_ring()

        if relink:
            conf = {
                'swift_dir': self.testdir,
                'devices': self.devices,
                'mount_check': False,
                'files_per_second': 0
            }
            self.assertEqual(
                0,
                relinker.relink(conf,
                                logger=self.logger,
                                device=self.existing_device))
        self.rb.increase_partition_power()
        self._save_ring()

    def test_cleanup_first_quartile_does_rehash(self):
        # we need object name in lower half of current part
        self._setup_object(lambda part: part < 2**(PART_POWER - 1))
        self.assertLess(self.next_part, 2**PART_POWER)
        self._common_test_cleanup()

        # don't mock re-hash for variety (and so we can assert side-effects)
        self.assertEqual(
            0,
            relinker.main([
                'cleanup',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
            ]))

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))
        self.assertFalse(os.path.exists(self.part_dir))

        with open(os.path.join(self.next_part_dir, 'hashes.invalid')) as fp:
            self.assertEqual(fp.read(), '')
        with open(os.path.join(self.next_part_dir, 'hashes.pkl'), 'rb') as fp:
            hashes = pickle.load(fp)
        self.assertIn(self._hash[-3:], hashes)

        # create an object in a first quartile partition and pretend it should
        # be there; check that cleanup does not fail and does not remove the
        # partition!
        self._setup_object(lambda part: part < 2**(PART_POWER - 1))
        with mock.patch('swift.cli.relinker.replace_partition_in_path',
                        lambda *args: args[0]):
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        self.assertTrue(os.path.exists(self.objname))

    def test_cleanup_second_quartile_no_rehash(self):
        # we need a part in upper half of current part power
        self._setup_object(lambda part: part >= 2**(PART_POWER - 1))
        self.assertGreater(self.part, 2**(PART_POWER - 1))
        self._common_test_cleanup()

        def fake_hash_suffix(suffix_dir, policy):
            # check that the suffix dir is empty and remove it just like the
            # real _hash_suffix
            self.assertEqual([self._hash], os.listdir(suffix_dir))
            hash_dir = os.path.join(suffix_dir, self._hash)
            self.assertEqual([], os.listdir(hash_dir))
            os.rmdir(hash_dir)
            os.rmdir(suffix_dir)
            raise PathNotDir()

        with mock.patch('swift.obj.diskfile.DiskFileManager._hash_suffix',
                        side_effect=fake_hash_suffix) as mock_hash_suffix:
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))

        # the old suffix dir is rehashed before the old partition is removed,
        # but the new suffix dir is not rehashed
        self.assertEqual([mock.call(self.suffix_dir, policy=self.policy)],
                         mock_hash_suffix.call_args_list)

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))
        self.assertFalse(os.path.exists(self.part_dir))

        with open(
                os.path.join(self.objects, str(self.next_part),
                             'hashes.invalid')) as fp:
            self.assertEqual(fp.read(), '')
        with open(
                os.path.join(self.objects, str(self.next_part), 'hashes.pkl'),
                'rb') as fp:
            hashes = pickle.load(fp)
        self.assertIn(self._hash[-3:], hashes)

    def test_cleanup_no_applicable_policy(self):
        # NB do not prepare part power increase
        self._save_ring()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                2,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                ]))
        self.assertEqual(self.logger.get_lines_for_level('warning'),
                         ['No policy found to increase the partition power.'])

    def test_cleanup_not_mounted(self):
        self._common_test_cleanup()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                1,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                ]))
        self.assertEqual(
            self.logger.get_lines_for_level('warning'),
            ['Skipping sda1 as it is not mounted', '1 disks were unmounted'])

    def test_cleanup_listdir_error(self):
        self._common_test_cleanup()
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            with self._mock_listdir():
                self.assertEqual(
                    1,
                    relinker.main([
                        'cleanup', '--swift-dir', self.testdir, '--devices',
                        self.devices, '--skip-mount-check'
                    ]))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [
            'Skipping %s because ' % self.objects,
            'There were 1 errors listing partition directories'
        ])

    def test_cleanup_device_filter(self):
        self._common_test_cleanup()
        self.assertEqual(
            0,
            relinker.main([
                'cleanup',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
                '--device',
                self.existing_device,
            ]))

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_device_filter_invalid(self):
        self._common_test_cleanup()
        self.assertEqual(
            0,
            relinker.main([
                'cleanup',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
                '--device',
                'none',
            ]))

        # Old objectname should still exist, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertTrue(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))

    def test_relink_cleanup(self):
        state_file = os.path.join(self.devices, self.existing_device,
                                  'relink.objects.json')

        self.rb.prepare_increase_partition_power()
        self._save_ring()
        self.assertEqual(
            0,
            relinker.main([
                'relink',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
            ]))
        state = {str(self.part): True}
        with open(state_file, 'rt') as f:
            orig_inode = os.stat(state_file).st_ino
            self.assertEqual(
                json.load(f), {
                    "part_power": PART_POWER,
                    "next_part_power": PART_POWER + 1,
                    "state": state
                })

        self.rb.increase_partition_power()
        self.rb._ring = None  # Force builder to reload ring
        self._save_ring()
        with open(state_file, 'rt') as f:
            # Keep the state file open during cleanup so the inode can't be
            # released/re-used when it gets unlinked
            self.assertEqual(orig_inode, os.stat(state_file).st_ino)
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
            self.assertNotEqual(orig_inode, os.stat(state_file).st_ino)
        if self.next_part < 2**PART_POWER:
            state[str(self.next_part)] = True
        with open(state_file, 'rt') as f:
            # NB: part_power/next_part_power tuple changed, so state was reset
            # (though we track prev_part_power for an efficient clean up)
            self.assertEqual(
                json.load(f), {
                    "prev_part_power": PART_POWER,
                    "part_power": PART_POWER + 1,
                    "next_part_power": PART_POWER + 1,
                    "state": state
                })

    def test_devices_filter_filtering(self):
        # With no filtering, returns all devices
        devices = relinker.devices_filter(None, "", [self.existing_device])
        self.assertEqual(set([self.existing_device]), devices)

        # With a matching filter, returns what is matching
        devices = relinker.devices_filter(self.existing_device, "",
                                          [self.existing_device, 'sda2'])
        self.assertEqual(set([self.existing_device]), devices)

        # With a non matching filter, returns nothing
        devices = relinker.devices_filter('none', "", [self.existing_device])
        self.assertEqual(set(), devices)

    def test_hook_pre_post_device_locking(self):
        locks = [None]
        device_path = os.path.join(self.devices, self.existing_device)
        datadir = 'object'
        lock_file = os.path.join(device_path, '.relink.%s.lock' % datadir)

        # The first run gets the lock
        states = {"state": {}}
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertNotEqual([None], locks)

        # A following run would block
        with self.assertRaises(IOError) as raised:
            with open(lock_file, 'a') as f:
                fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
        self.assertEqual(errno.EAGAIN, raised.exception.errno)

        # Another must not get the lock, so it must return an empty list
        relinker.hook_post_device(locks, "")
        self.assertEqual([None], locks)

        with open(lock_file, 'a') as f:
            fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)

    def test_state_file(self):
        device_path = os.path.join(self.devices, self.existing_device)
        datadir = 'objects'
        datadir_path = os.path.join(device_path, datadir)
        state_file = os.path.join(device_path, 'relink.%s.json' % datadir)

        def call_partition_filter(part_power, next_part_power, parts):
            # Partition 312 will be ignored because it must have been created
            # by the relinker
            return relinker.partitions_filter(states, part_power,
                                              next_part_power, datadir_path,
                                              parts)

        # Start relinking
        states = {
            "part_power": PART_POWER,
            "next_part_power": PART_POWER + 1,
            "state": {}
        }

        # Load the states: As it starts, it must be empty
        locks = [None]
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual({}, states["state"])
        os.close(locks[0])  # Release the lock

        # Partition 312 is ignored because it must have been created with the
        # next_part_power, so it does not need to be relinked
        # 96 and 227 are reverse ordered
        # auditor_status_ALL.json is ignored because it's not a partition
        self.assertEqual(
            ['227', '96'],
            call_partition_filter(PART_POWER, PART_POWER + 1,
                                  ['96', '227', '312', 'auditor_status.json']))
        self.assertEqual(states["state"], {'96': False, '227': False})

        pol = POLICIES[0]
        mgr = DiskFileRouter({
            'devices': self.devices,
            'mount_check': False
        }, self.logger)[pol]

        # Ack partition 96
        relinker.hook_post_partition(states, relinker.STEP_RELINK, pol, mgr,
                                     os.path.join(datadir_path, '96'))
        self.assertEqual(states["state"], {'96': True, '227': False})
        with open(state_file, 'rt') as f:
            self.assertEqual(
                json.load(f), {
                    "part_power": PART_POWER,
                    "next_part_power": PART_POWER + 1,
                    "state": {
                        '96': True,
                        '227': False
                    }
                })

        # Restart relinking after only part 96 was done
        self.assertEqual(['227'],
                         call_partition_filter(PART_POWER, PART_POWER + 1,
                                               ['96', '227', '312']))
        self.assertEqual(states["state"], {'96': True, '227': False})

        # Ack partition 227
        relinker.hook_post_partition(states, relinker.STEP_RELINK, pol, mgr,
                                     os.path.join(datadir_path, '227'))
        self.assertEqual(states["state"], {'96': True, '227': True})
        with open(state_file, 'rt') as f:
            self.assertEqual(
                json.load(f), {
                    "part_power": PART_POWER,
                    "next_part_power": PART_POWER + 1,
                    "state": {
                        '96': True,
                        '227': True
                    }
                })

        # If the process restarts, it reload the state
        locks = [None]
        states = {
            "part_power": PART_POWER,
            "next_part_power": PART_POWER + 1,
            "state": {},
        }
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(
            states, {
                "part_power": PART_POWER,
                "next_part_power": PART_POWER + 1,
                "state": {
                    '96': True,
                    '227': True
                }
            })
        os.close(locks[0])  # Release the lock

        # Start cleanup -- note that part_power and next_part_power now match!
        states = {
            "part_power": PART_POWER + 1,
            "next_part_power": PART_POWER + 1,
            "state": {},
        }
        # ...which means our state file was ignored
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(
            states, {
                "prev_part_power": PART_POWER,
                "part_power": PART_POWER + 1,
                "next_part_power": PART_POWER + 1,
                "state": {}
            })
        os.close(locks[0])  # Release the lock

        self.assertEqual(['227', '96'],
                         call_partition_filter(PART_POWER + 1, PART_POWER + 1,
                                               ['96', '227', '312']))
        # Ack partition 227
        relinker.hook_post_partition(states, relinker.STEP_CLEANUP, pol, mgr,
                                     os.path.join(datadir_path, '227'))
        self.assertEqual(states["state"], {'96': False, '227': True})
        with open(state_file, 'rt') as f:
            self.assertEqual(
                json.load(f), {
                    "prev_part_power": PART_POWER,
                    "part_power": PART_POWER + 1,
                    "next_part_power": PART_POWER + 1,
                    "state": {
                        '96': False,
                        '227': True
                    }
                })

        # Restart cleanup after only part 227 was done
        self.assertEqual(['96'],
                         call_partition_filter(PART_POWER + 1, PART_POWER + 1,
                                               ['96', '227', '312']))
        self.assertEqual(states["state"], {'96': False, '227': True})

        # Ack partition 96
        relinker.hook_post_partition(states, relinker.STEP_CLEANUP, pol, mgr,
                                     os.path.join(datadir_path, '96'))
        self.assertEqual(states["state"], {'96': True, '227': True})
        with open(state_file, 'rt') as f:
            self.assertEqual(
                json.load(f), {
                    "prev_part_power": PART_POWER,
                    "part_power": PART_POWER + 1,
                    "next_part_power": PART_POWER + 1,
                    "state": {
                        '96': True,
                        '227': True
                    }
                })

        # At the end, the state is still accurate
        locks = [None]
        states = {
            "prev_part_power": PART_POWER,
            "part_power": PART_POWER + 1,
            "next_part_power": PART_POWER + 1,
            "state": {},
        }
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states["state"], {'96': True, '227': True})
        os.close(locks[0])  # Release the lock

        # If the part_power/next_part_power tuple differs, restart from scratch
        locks = [None]
        states = {
            "part_power": PART_POWER + 1,
            "next_part_power": PART_POWER + 2,
            "state": {},
        }
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states["state"], {})
        self.assertFalse(os.path.exists(state_file))
        os.close(locks[0])  # Release the lock

        # If the file gets corrupted, restart from scratch
        with open(state_file, 'wt') as f:
            f.write('NOT JSON')
        locks = [None]
        states = {
            "part_power": PART_POWER,
            "next_part_power": PART_POWER + 1,
            "state": {}
        }
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states["state"], {})
        self.assertFalse(os.path.exists(state_file))
        os.close(locks[0])  # Release the lock

    def test_cleanup_not_yet_relinked(self):
        self._common_test_cleanup(relink=False)
        self.assertEqual(
            1,
            relinker.main([
                'cleanup',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
            ]))

        self.assertTrue(
            os.path.isfile(os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_deleted(self):
        self._common_test_cleanup()

        # Pretend the object got deleted in between and there is a tombstone
        fname_ts = self.expected_file[:-4] + "ts"
        os.rename(self.expected_file, fname_ts)

        self.assertEqual(
            0,
            relinker.main([
                'cleanup',
                '--swift-dir',
                self.testdir,
                '--devices',
                self.devices,
                '--skip-mount',
            ]))

    def test_cleanup_reapable(self):
        # relink a tombstone
        fname_ts = self.objname[:-4] + "ts"
        os.rename(self.objname, fname_ts)
        self.objname = fname_ts
        self.expected_file = self.expected_file[:-4] + "ts"
        self._common_test_cleanup()
        self.assertTrue(os.path.exists(self.expected_file))  # sanity check

        with mock.patch.object(relinker.logging, 'getLogger',
                               return_value=self.logger), \
                mock.patch('time.time', return_value=1e11):  # far, far future
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        self.assertEqual(self.logger.get_lines_for_level('error'), [])
        self.assertEqual(self.logger.get_lines_for_level('warning'), [])
        self.assertIn("Found reapable on-disk file: %s" % self.objname,
                      self.logger.get_lines_for_level('debug'))
        # self.expected_file may or may not exist; it depends on whether the
        # object was in the upper-half of the partition space. ultimately,
        # that part doesn't really matter much -- but we definitely *don't*
        # want self.objname around polluting the old partition space.
        self.assertFalse(os.path.exists(self.objname))

    def test_cleanup_doesnotexist(self):
        self._common_test_cleanup()

        # Pretend the file in the new place got deleted inbetween
        os.remove(self.expected_file)

        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                1,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [
            'Error cleaning up %s: %s' %
            (self.objname, repr(exceptions.DiskFileNotExist()))
        ])

    @patch_policies([
        ECStoragePolicy(0,
                        name='platinum',
                        is_default=True,
                        ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=4,
                        ec_nparity=2)
    ])
    def test_cleanup_diskfile_error(self):
        self._common_test_cleanup()

        # Switch the policy type so all fragments raise DiskFileError.
        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
        log_lines = self.logger.get_lines_for_level('warning')
        self.assertEqual(3, len(log_lines),
                         'Expected 3 log lines, got %r' % log_lines)
        # Once to check the old partition space...
        self.assertIn('Bad fragment index: None', log_lines[0])
        # ... again for the new partition ...
        self.assertIn('Bad fragment index: None', log_lines[0])
        # ... and one last time for the rehash
        self.assertIn('Bad fragment index: None', log_lines[1])

    def test_cleanup_quarantined(self):
        self._common_test_cleanup()
        # Pretend the object in the new place got corrupted
        with open(self.expected_file, "wb") as obj:
            obj.write(b'trash')

        with mock.patch.object(relinker.logging,
                               'getLogger',
                               return_value=self.logger):
            self.assertEqual(
                1,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))

        log_lines = self.logger.get_lines_for_level('warning')
        self.assertEqual(2, len(log_lines),
                         'Expected 2 log lines, got %r' % log_lines)
        self.assertIn(
            'metadata content-length 12 does not match '
            'actual object size 5', log_lines[0])
        self.assertIn('failed audit and was quarantined', log_lines[1])

    def test_rehashing(self):
        calls = []

        @contextmanager
        def do_mocks():
            orig_invalidate = relinker.diskfile.invalidate_hash
            orig_get_hashes = DiskFileManager.get_hashes

            def mock_invalidate(suffix_dir):
                calls.append(('invalidate', suffix_dir))
                return orig_invalidate(suffix_dir)

            def mock_get_hashes(self, *args):
                calls.append(('get_hashes', ) + args)
                return orig_get_hashes(self, *args)

            with mock.patch.object(relinker.diskfile, 'invalidate_hash',
                                   mock_invalidate), \
                    mock.patch.object(DiskFileManager, 'get_hashes',
                                      mock_get_hashes):
                yield

        with do_mocks():
            self.rb.prepare_increase_partition_power()
            self._save_ring()
            self.assertEqual(
                0,
                relinker.main([
                    'relink',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
            expected = [('invalidate', self.next_suffix_dir)]
            if self.part >= 2**(PART_POWER - 1):
                expected.extend([
                    ('get_hashes', self.existing_device, self.next_part & ~1,
                     [], POLICIES[0]),
                    ('get_hashes', self.existing_device, self.next_part | 1,
                     [], POLICIES[0]),
                ])

            self.assertEqual(calls, expected)
            # Depending on partition, there may or may not be a get_hashes here
            self.rb._ring = None  # Force builder to reload ring
            self.rb.increase_partition_power()
            self._save_ring()
            self.assertEqual(
                0,
                relinker.main([
                    'cleanup',
                    '--swift-dir',
                    self.testdir,
                    '--devices',
                    self.devices,
                    '--skip-mount',
                ]))
            if self.part < 2**(PART_POWER - 1):
                expected.append(('get_hashes', self.existing_device,
                                 self.next_part, [], POLICIES[0]))
            expected.extend([
                ('invalidate', self.suffix_dir),
                ('get_hashes', self.existing_device, self.part, [],
                 POLICIES[0]),
            ])
            self.assertEqual(calls, expected)
Esempio n. 10
0
class TestStoragePolicies(unittest.TestCase):
    def _conf(self, conf_str):
        conf_str = "\n".join(line.strip() for line in conf_str.split("\n"))
        conf = ConfigParser()
        conf.readfp(StringIO.StringIO(conf_str))
        return conf

    def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs):
        try:
            f(*args, **kwargs)
        except exc_class as err:
            err_msg = str(err)
            self.assert_(
                message in err_msg, 'Error message %r did not '
                'have expected substring %r' % (err_msg, message))
        else:
            self.fail('%r did not raise %s' % (message, exc_class.__name__))

    def test_policy_baseclass_instantiate(self):
        self.assertRaisesWithMessage(TypeError,
                                     "Can't instantiate BaseStoragePolicy",
                                     BaseStoragePolicy, 1, 'one')

    @patch_policies([
        StoragePolicy(0, 'zero', is_default=True),
        StoragePolicy(1, 'one'),
        StoragePolicy(2, 'two'),
        StoragePolicy(3, 'three', is_deprecated=True),
        ECStoragePolicy(10,
                        'ten',
                        ec_type='jerasure_rs_vand',
                        ec_ndata=10,
                        ec_nparity=4),
    ])
    def test_swift_info(self):
        # the deprecated 'three' should not exist in expect
        expect = [{
            'default': True,
            'name': 'zero'
        }, {
            'name': 'two'
        }, {
            'name': 'one'
        }, {
            'name': 'ten'
        }]
        swift_info = POLICIES.get_policy_info()
        self.assertEquals(sorted(expect, key=lambda k: k['name']),
                          sorted(swift_info, key=lambda k: k['name']))

    @patch_policies
    def test_get_policy_string(self):
        self.assertEquals(get_policy_string('something', 0), 'something')
        self.assertEquals(get_policy_string('something', None), 'something')
        self.assertEquals(get_policy_string('something', ''), 'something')
        self.assertEquals(get_policy_string('something', 1),
                          'something' + '-1')
        self.assertRaises(PolicyError, get_policy_string, 'something', 99)

    @patch_policies
    def test_split_policy_string(self):
        expectations = {
            'something': ('something', POLICIES[0]),
            'something-1': ('something', POLICIES[1]),
            'tmp': ('tmp', POLICIES[0]),
            'objects': ('objects', POLICIES[0]),
            'tmp-1': ('tmp', POLICIES[1]),
            'objects-1': ('objects', POLICIES[1]),
            'objects-': PolicyError,
            'objects-0': PolicyError,
            'objects--1': ('objects-', POLICIES[1]),
            'objects-+1': PolicyError,
            'objects--': PolicyError,
            'objects-foo': PolicyError,
            'objects--bar': PolicyError,
            'objects-+bar': PolicyError,
            # questionable, demonstrated as inverse of get_policy_string
            'objects+0': ('objects+0', POLICIES[0]),
            '': ('', POLICIES[0]),
            '0': ('0', POLICIES[0]),
            '-1': ('', POLICIES[1]),
        }
        for policy_string, expected in expectations.items():
            if expected == PolicyError:
                try:
                    invalid = split_policy_string(policy_string)
                except PolicyError:
                    continue  # good
                else:
                    self.fail('The string %r returned %r '
                              'instead of raising a PolicyError' %
                              (policy_string, invalid))
            self.assertEqual(expected, split_policy_string(policy_string))
            # should be inverse of get_policy_string
            self.assertEqual(policy_string, get_policy_string(*expected))

    def test_defaults(self):
        self.assertTrue(len(POLICIES) > 0)

        # test class functions
        default_policy = POLICIES.default
        self.assert_(default_policy.is_default)
        zero_policy = POLICIES.get_by_index(0)
        self.assert_(zero_policy.idx == 0)
        zero_policy_by_name = POLICIES.get_by_name(zero_policy.name)
        self.assert_(zero_policy_by_name.idx == 0)

    def test_storage_policy_repr(self):
        test_policies = [
            StoragePolicy(0, 'aay', True),
            StoragePolicy(1, 'bee', False),
            StoragePolicy(2, 'cee', False),
            ECStoragePolicy(10,
                            'ten',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=3)
        ]
        policies = StoragePolicyCollection(test_policies)
        for policy in policies:
            policy_repr = repr(policy)
            self.assert_(policy.__class__.__name__ in policy_repr)
            self.assert_('is_default=%s' % policy.is_default in policy_repr)
            self.assert_('is_deprecated=%s' %
                         policy.is_deprecated in policy_repr)
            self.assert_(policy.name in policy_repr)
            if policy.policy_type == EC_POLICY:
                self.assert_('ec_type=%s' % policy.ec_type in policy_repr)
                self.assert_('ec_ndata=%s' % policy.ec_ndata in policy_repr)
                self.assert_('ec_nparity=%s' %
                             policy.ec_nparity in policy_repr)
                self.assert_('ec_segment_size=%s' %
                             policy.ec_segment_size in policy_repr)
        collection_repr = repr(policies)
        collection_repr_lines = collection_repr.splitlines()
        self.assert_(policies.__class__.__name__ in collection_repr_lines[0])
        self.assertEqual(len(policies), len(collection_repr_lines[1:-1]))
        for policy, line in zip(policies, collection_repr_lines[1:-1]):
            self.assert_(repr(policy) in line)
        with patch_policies(policies):
            self.assertEqual(repr(POLICIES), collection_repr)

    def test_validate_policies_defaults(self):
        # 0 explicit default
        test_policies = [
            StoragePolicy(0, 'zero', True),
            StoragePolicy(1, 'one', False),
            StoragePolicy(2, 'two', False)
        ]
        policies = StoragePolicyCollection(test_policies)
        self.assertEquals(policies.default, test_policies[0])
        self.assertEquals(policies.default.name, 'zero')

        # non-zero explicit default
        test_policies = [
            StoragePolicy(0, 'zero', False),
            StoragePolicy(1, 'one', False),
            StoragePolicy(2, 'two', True)
        ]
        policies = StoragePolicyCollection(test_policies)
        self.assertEquals(policies.default, test_policies[2])
        self.assertEquals(policies.default.name, 'two')

        # multiple defaults
        test_policies = [
            StoragePolicy(0, 'zero', False),
            StoragePolicy(1, 'one', True),
            StoragePolicy(2, 'two', True)
        ]
        self.assertRaisesWithMessage(PolicyError, 'Duplicate default',
                                     StoragePolicyCollection, test_policies)

        # nothing specified
        test_policies = []
        policies = StoragePolicyCollection(test_policies)
        self.assertEquals(policies.default, policies[0])
        self.assertEquals(policies.default.name, 'Policy-0')

        # no default specified with only policy index 0
        test_policies = [StoragePolicy(0, 'zero')]
        policies = StoragePolicyCollection(test_policies)
        self.assertEqual(policies.default, policies[0])

        # no default specified with multiple policies
        test_policies = [
            StoragePolicy(0, 'zero', False),
            StoragePolicy(1, 'one', False),
            StoragePolicy(2, 'two', False)
        ]
        self.assertRaisesWithMessage(PolicyError,
                                     'Unable to find default policy',
                                     StoragePolicyCollection, test_policies)

    def test_deprecate_policies(self):
        # deprecation specified
        test_policies = [
            StoragePolicy(0, 'zero', True),
            StoragePolicy(1, 'one', False),
            StoragePolicy(2, 'two', False, is_deprecated=True)
        ]
        policies = StoragePolicyCollection(test_policies)
        self.assertEquals(policies.default, test_policies[0])
        self.assertEquals(policies.default.name, 'zero')
        self.assertEquals(len(policies), 3)

        # multiple policies requires default
        test_policies = [
            StoragePolicy(0, 'zero', False),
            StoragePolicy(1, 'one', False, is_deprecated=True),
            StoragePolicy(2, 'two', False)
        ]
        self.assertRaisesWithMessage(PolicyError,
                                     'Unable to find default policy',
                                     StoragePolicyCollection, test_policies)

    def test_validate_policies_indexes(self):
        # duplicate indexes
        test_policies = [
            StoragePolicy(0, 'zero', True),
            StoragePolicy(1, 'one', False),
            StoragePolicy(1, 'two', False)
        ]
        self.assertRaises(PolicyError, StoragePolicyCollection, test_policies)

    def test_validate_policy_params(self):
        StoragePolicy(0, 'name')  # sanity
        # bogus indexes
        self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name')
        self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name')

        # non-zero Policy-0
        self.assertRaisesWithMessage(PolicyError, 'reserved',
                                     FakeStoragePolicy, 1, 'policy-0')
        # deprecate default
        self.assertRaisesWithMessage(PolicyError,
                                     'Deprecated policy can not be default',
                                     FakeStoragePolicy,
                                     1,
                                     'Policy-1',
                                     is_default=True,
                                     is_deprecated=True)
        # weird names
        names = (
            '',
            'name_foo',
            'name\nfoo',
            'name foo',
            u'name \u062a',
            'name \xd8\xaa',
        )
        for name in names:
            self.assertRaisesWithMessage(PolicyError, 'Invalid name',
                                         FakeStoragePolicy, 1, name)

    def test_validate_policies_names(self):
        # duplicate names
        test_policies = [
            StoragePolicy(0, 'zero', True),
            StoragePolicy(1, 'zero', False),
            StoragePolicy(2, 'two', False)
        ]
        self.assertRaises(PolicyError, StoragePolicyCollection, test_policies)

    def test_validate_policies_type_default(self):
        # no type specified - make sure the policy is initialized to
        # DEFAULT_POLICY_TYPE
        test_policy = FakeStoragePolicy(0, 'zero', True)
        self.assertEquals(test_policy.policy_type, 'fake')

    def test_validate_policies_type_invalid(self):
        class BogusStoragePolicy(FakeStoragePolicy):
            policy_type = 'bogus'

        # unsupported policy type - initialization with FakeStoragePolicy
        self.assertRaisesWithMessage(PolicyError, 'Invalid type',
                                     BogusStoragePolicy, 1, 'one')

    def test_policies_type_attribute(self):
        test_policies = [
            StoragePolicy(0, 'zero', is_default=True),
            StoragePolicy(1, 'one'),
            StoragePolicy(2, 'two'),
            StoragePolicy(3, 'three', is_deprecated=True),
            ECStoragePolicy(10,
                            'ten',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=3),
        ]
        policies = StoragePolicyCollection(test_policies)
        self.assertEquals(policies.get_by_index(0).policy_type, REPL_POLICY)
        self.assertEquals(policies.get_by_index(1).policy_type, REPL_POLICY)
        self.assertEquals(policies.get_by_index(2).policy_type, REPL_POLICY)
        self.assertEquals(policies.get_by_index(3).policy_type, REPL_POLICY)
        self.assertEquals(policies.get_by_index(10).policy_type, EC_POLICY)

    def test_names_are_normalized(self):
        test_policies = [
            StoragePolicy(0, 'zero', True),
            StoragePolicy(1, 'ZERO', False)
        ]
        self.assertRaises(PolicyError, StoragePolicyCollection, test_policies)

        policies = StoragePolicyCollection(
            [StoragePolicy(0, 'zEro', True),
             StoragePolicy(1, 'One', False)])

        pol0 = policies[0]
        pol1 = policies[1]

        for name in ('zero', 'ZERO', 'zErO', 'ZeRo'):
            self.assertEqual(pol0, policies.get_by_name(name))
            self.assertEqual(policies.get_by_name(name).name, 'zEro')
        for name in ('one', 'ONE', 'oNe', 'OnE'):
            self.assertEqual(pol1, policies.get_by_name(name))
            self.assertEqual(policies.get_by_name(name).name, 'One')

    def test_deprecated_default(self):
        bad_conf = self._conf("""
        [storage-policy:1]
        name = one
        deprecated = yes
        default = yes
        """)

        self.assertRaisesWithMessage(PolicyError,
                                     "Deprecated policy can not be default",
                                     parse_storage_policies, bad_conf)

    def test_multiple_policies_with_no_policy_index_zero(self):
        bad_conf = self._conf("""
        [storage-policy:1]
        name = one
        default = yes
        """)

        # Policy-0 will not be implicitly added if other policies are defined
        self.assertRaisesWithMessage(
            PolicyError, "must specify a storage policy section "
            "for policy index 0", parse_storage_policies, bad_conf)

    def test_no_default(self):
        orig_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = one
        default = yes
        """)

        policies = parse_storage_policies(orig_conf)
        self.assertEqual(policies.default, policies[1])
        self.assert_(policies[0].name, 'Policy-0')

        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = one
        deprecated = yes
        """)

        # multiple polices and no explicit default
        self.assertRaisesWithMessage(PolicyError, "Unable to find default",
                                     parse_storage_policies, bad_conf)

        good_conf = self._conf("""
        [storage-policy:0]
        name = Policy-0
        default = yes
        [storage-policy:1]
        name = one
        deprecated = yes
        """)

        policies = parse_storage_policies(good_conf)
        self.assertEqual(policies.default, policies[0])
        self.assert_(policies[1].is_deprecated, True)

    def test_parse_storage_policies(self):
        # ValueError when deprecating policy 0
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        deprecated = yes

        [storage-policy:1]
        name = one
        deprecated = yes
        """)

        self.assertRaisesWithMessage(
            PolicyError, "Unable to find policy that's not deprecated",
            parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:-1]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:x]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:x-1]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:x]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:x:1]
        name = zero
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid index',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:1]
        name = zero
        boo = berries
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid option',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:0]
        name =
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid name',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:3]
        name = Policy-0
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid name',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:1]
        name = policY-0
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid name',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:0]
        name = one
        [storage-policy:1]
        name = ONE
        """)

        self.assertRaisesWithMessage(PolicyError, 'Duplicate name',
                                     parse_storage_policies, bad_conf)

        bad_conf = self._conf("""
        [storage-policy:0]
        name = good_stuff
        """)

        self.assertRaisesWithMessage(PolicyError, 'Invalid name',
                                     parse_storage_policies, bad_conf)

        # policy_type = erasure_coding

        # missing ec_type, ec_num_data_fragments and ec_num_parity_fragments
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = ec10-4
        policy_type = erasure_coding
        """)

        self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
                                     parse_storage_policies, bad_conf)

        # missing ec_type, but other options valid...
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = ec10-4
        policy_type = erasure_coding
        ec_num_data_fragments = 10
        ec_num_parity_fragments = 4
        """)

        self.assertRaisesWithMessage(PolicyError, 'Missing ec_type',
                                     parse_storage_policies, bad_conf)

        # ec_type specified, but invalid...
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        default = yes
        [storage-policy:1]
        name = ec10-4
        policy_type = erasure_coding
        ec_type = garbage_alg
        ec_num_data_fragments = 10
        ec_num_parity_fragments = 4
        """)

        self.assertRaisesWithMessage(
            PolicyError, 'Wrong ec_type garbage_alg for policy '
            'ec10-4, should be one of "%s"' % (', '.join(VALID_EC_TYPES)),
            parse_storage_policies, bad_conf)

        # missing and invalid ec_num_parity_fragments
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = ec10-4
        policy_type = erasure_coding
        ec_type = jerasure_rs_vand
        ec_num_data_fragments = 10
        """)

        self.assertRaisesWithMessage(PolicyError,
                                     'Invalid ec_num_parity_fragments',
                                     parse_storage_policies, bad_conf)

        for num_parity in ('-4', '0', 'x'):
            bad_conf = self._conf("""
            [storage-policy:0]
            name = zero
            [storage-policy:1]
            name = ec10-4
            policy_type = erasure_coding
            ec_type = jerasure_rs_vand
            ec_num_data_fragments = 10
            ec_num_parity_fragments = %s
            """ % num_parity)

            self.assertRaisesWithMessage(PolicyError,
                                         'Invalid ec_num_parity_fragments',
                                         parse_storage_policies, bad_conf)

        # missing and invalid ec_num_data_fragments
        bad_conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:1]
        name = ec10-4
        policy_type = erasure_coding
        ec_type = jerasure_rs_vand
        ec_num_parity_fragments = 4
        """)

        self.assertRaisesWithMessage(PolicyError,
                                     'Invalid ec_num_data_fragments',
                                     parse_storage_policies, bad_conf)

        for num_data in ('-10', '0', 'x'):
            bad_conf = self._conf("""
            [storage-policy:0]
            name = zero
            [storage-policy:1]
            name = ec10-4
            policy_type = erasure_coding
            ec_type = jerasure_rs_vand
            ec_num_data_fragments = %s
            ec_num_parity_fragments = 4
            """ % num_data)

            self.assertRaisesWithMessage(PolicyError,
                                         'Invalid ec_num_data_fragments',
                                         parse_storage_policies, bad_conf)

        # invalid ec_object_segment_size
        for segment_size in ('-4', '0', 'x'):
            bad_conf = self._conf("""
            [storage-policy:0]
            name = zero
            [storage-policy:1]
            name = ec10-4
            policy_type = erasure_coding
            ec_object_segment_size = %s
            ec_type = jerasure_rs_vand
            ec_num_data_fragments = 10
            ec_num_parity_fragments = 4
            """ % segment_size)

            self.assertRaisesWithMessage(PolicyError,
                                         'Invalid ec_object_segment_size',
                                         parse_storage_policies, bad_conf)

        # Additional section added to ensure parser ignores other sections
        conf = self._conf("""
        [some-other-section]
        foo = bar
        [storage-policy:0]
        name = zero
        [storage-policy:5]
        name = one
        default = yes
        [storage-policy:6]
        name = duplicate-sections-are-ignored
        [storage-policy:6]
        name = apple
        """)
        policies = parse_storage_policies(conf)

        self.assertEquals(True, policies.get_by_index(5).is_default)
        self.assertEquals(False, policies.get_by_index(0).is_default)
        self.assertEquals(False, policies.get_by_index(6).is_default)

        self.assertEquals("object", policies.get_by_name("zero").ring_name)
        self.assertEquals("object-5", policies.get_by_name("one").ring_name)
        self.assertEquals("object-6", policies.get_by_name("apple").ring_name)

        self.assertEqual(0, int(policies.get_by_name('zero')))
        self.assertEqual(5, int(policies.get_by_name('one')))
        self.assertEqual(6, int(policies.get_by_name('apple')))

        self.assertEquals("zero", policies.get_by_index(0).name)
        self.assertEquals("zero", policies.get_by_index("0").name)
        self.assertEquals("one", policies.get_by_index(5).name)
        self.assertEquals("apple", policies.get_by_index(6).name)
        self.assertEquals("zero", policies.get_by_index(None).name)
        self.assertEquals("zero", policies.get_by_index('').name)

        self.assertEqual(policies.get_by_index(0), policies.legacy)

    def test_reload_invalid_storage_policies(self):
        conf = self._conf("""
        [storage-policy:0]
        name = zero
        [storage-policy:00]
        name = double-zero
        """)
        with NamedTemporaryFile() as f:
            conf.write(f)
            f.flush()
            with mock.patch('swift.common.storage_policy.SWIFT_CONF_FILE',
                            new=f.name):
                try:
                    reload_storage_policies()
                except SystemExit as e:
                    err_msg = str(e)
                else:
                    self.fail('SystemExit not raised')
        parts = [
            'Invalid Storage Policy Configuration',
            'Duplicate index',
        ]
        for expected in parts:
            self.assert_(expected in err_msg,
                         '%s was not in %s' % (expected, err_msg))

    def test_storage_policy_ordering(self):
        test_policies = StoragePolicyCollection([
            StoragePolicy(0, 'zero', is_default=True),
            StoragePolicy(503, 'error'),
            StoragePolicy(204, 'empty'),
            StoragePolicy(404, 'missing'),
        ])
        self.assertEqual([0, 204, 404, 503],
                         [int(p) for p in sorted(list(test_policies))])

        p503 = test_policies[503]
        self.assertTrue(501 < p503 < 507)

    def test_get_object_ring(self):
        test_policies = [
            StoragePolicy(0, 'aay', True),
            StoragePolicy(1, 'bee', False),
            StoragePolicy(2, 'cee', False)
        ]
        policies = StoragePolicyCollection(test_policies)

        class NamedFakeRing(FakeRing):
            def __init__(self, swift_dir, ring_name=None):
                self.ring_name = ring_name
                super(NamedFakeRing, self).__init__()

        with mock.patch('swift.common.storage_policy.Ring', new=NamedFakeRing):
            for policy in policies:
                self.assertFalse(policy.object_ring)
                ring = policies.get_object_ring(int(policy), '/path/not/used')
                self.assertEqual(ring.ring_name, policy.ring_name)
                self.assertTrue(policy.object_ring)
                self.assert_(isinstance(policy.object_ring, NamedFakeRing))

        def blow_up(*args, **kwargs):
            raise Exception('kaboom!')

        with mock.patch('swift.common.storage_policy.Ring', new=blow_up):
            for policy in policies:
                policy.load_ring('/path/not/used')
                expected = policies.get_object_ring(int(policy),
                                                    '/path/not/used')
                self.assertEqual(policy.object_ring, expected)

        # bad policy index
        self.assertRaises(PolicyError, policies.get_object_ring, 99,
                          '/path/not/used')

    def test_bind_ports_cache(self):
        test_policies = [
            StoragePolicy(0, 'aay', True),
            StoragePolicy(1, 'bee', False),
            StoragePolicy(2, 'cee', False)
        ]

        my_ips = ['1.2.3.4', '2.3.4.5']
        other_ips = ['3.4.5.6', '4.5.6.7']
        bind_ip = my_ips[1]
        devs_by_ring_name1 = {
            'object': [  # 'aay'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[0],
                    'port': 6006
                }, {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[0],
                    'port': 6007
                }, {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[1],
                    'port': 6008
                }, None, {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6009
                }
            ],
            'object-1': [  # 'bee'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[1],
                    'port': 6006
                },  # dupe
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[0],
                    'port': 6010
                },
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[1],
                    'port': 6011
                },
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6012
                }
            ],
            'object-2': [  # 'cee'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[0],
                    'port': 6010
                },  # on our IP and a not-us IP
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[0],
                    'port': 6013
                },
                None,
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[1],
                    'port': 6014
                },
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6015
                }
            ],
        }
        devs_by_ring_name2 = {
            'object': [  # 'aay'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[0],
                    'port': 6016
                }, {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6019
                }
            ],
            'object-1': [  # 'bee'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[1],
                    'port': 6016
                },  # dupe
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6022
                }
            ],
            'object-2': [  # 'cee'
                {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': my_ips[0],
                    'port': 6020
                }, {
                    'id': 0,
                    'zone': 0,
                    'region': 1,
                    'ip': other_ips[1],
                    'port': 6025
                }
            ],
        }
        ring_files = [
            ring_name + '.ring.gz' for ring_name in sorted(devs_by_ring_name1)
        ]

        def _fake_load(gz_path, stub_objs, metadata_only=False):
            return RingData(devs=stub_objs[os.path.basename(gz_path)[:-8]],
                            replica2part2dev_id=[],
                            part_shift=24)

        with mock.patch(
            'swift.common.storage_policy.RingData.load'
        ) as mock_ld, \
                patch_policies(test_policies), \
                mock.patch('swift.common.storage_policy.whataremyips') \
                as mock_whataremyips, \
                temptree(ring_files) as tempdir:
            mock_whataremyips.return_value = my_ips

            cache = BindPortsCache(tempdir, bind_ip)

            self.assertEqual([
                mock.call(bind_ip),
            ], mock_whataremyips.mock_calls)
            mock_whataremyips.reset_mock()

            mock_ld.side_effect = partial(_fake_load,
                                          stub_objs=devs_by_ring_name1)
            self.assertEqual(set([
                6006,
                6008,
                6011,
                6010,
                6014,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([
                mock.call(os.path.join(tempdir, ring_files[0]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[1]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[2]),
                          metadata_only=True),
            ], mock_ld.mock_calls)
            mock_ld.reset_mock()

            mock_ld.side_effect = partial(_fake_load,
                                          stub_objs=devs_by_ring_name2)
            self.assertEqual(set([
                6006,
                6008,
                6011,
                6010,
                6014,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([], mock_ld.mock_calls)

            # but when all the file mtimes are made different, it'll
            # reload
            for gz_file in [os.path.join(tempdir, n) for n in ring_files]:
                os.utime(gz_file, (88, 88))

            self.assertEqual(set([
                6016,
                6020,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([
                mock.call(os.path.join(tempdir, ring_files[0]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[1]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[2]),
                          metadata_only=True),
            ], mock_ld.mock_calls)
            mock_ld.reset_mock()

            # Don't do something stupid like crash if a ring file is missing.
            os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))

            self.assertEqual(set([
                6016,
                6020,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([], mock_ld.mock_calls)

        # whataremyips() is only called in the constructor
        self.assertEqual([], mock_whataremyips.mock_calls)

    def test_singleton_passthrough(self):
        test_policies = [
            StoragePolicy(0, 'aay', True),
            StoragePolicy(1, 'bee', False),
            StoragePolicy(2, 'cee', False)
        ]
        with patch_policies(test_policies):
            for policy in POLICIES:
                self.assertEqual(POLICIES[int(policy)], policy)

    def test_quorum_size_replication(self):
        expected_sizes = {1: 1, 2: 2, 3: 2, 4: 3, 5: 3}
        for n, expected in expected_sizes.items():
            policy = StoragePolicy(0, 'zero', object_ring=FakeRing(replicas=n))
            self.assertEqual(policy.quorum, expected)

    def test_quorum_size_erasure_coding(self):
        test_ec_policies = [
            ECStoragePolicy(10,
                            'ec8-2',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=8,
                            ec_nparity=2),
            ECStoragePolicy(11,
                            'df10-6',
                            ec_type='flat_xor_hd_4',
                            ec_ndata=10,
                            ec_nparity=6),
        ]
        for ec_policy in test_ec_policies:
            k = ec_policy.ec_ndata
            expected_size = \
                k + ec_policy.pyeclib_driver.min_parity_fragments_needed()
            self.assertEqual(expected_size, ec_policy.quorum)

    def test_validate_ring(self):
        test_policies = [
            ECStoragePolicy(0,
                            'ec8-2',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=8,
                            ec_nparity=2,
                            object_ring=FakeRing(replicas=8),
                            is_default=True),
            ECStoragePolicy(1,
                            'ec10-4',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=4,
                            object_ring=FakeRing(replicas=10)),
            ECStoragePolicy(2,
                            'ec4-2',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=4,
                            ec_nparity=2,
                            object_ring=FakeRing(replicas=7)),
        ]
        policies = StoragePolicyCollection(test_policies)

        for policy in policies:
            msg = 'EC ring for policy %s needs to be configured with ' \
                  'exactly %d nodes.' % \
                  (policy.name, policy.ec_ndata + policy.ec_nparity)
            self.assertRaisesWithMessage(RingValidationError, msg,
                                         policy._validate_ring)

    def test_storage_policy_get_info(self):
        test_policies = [
            StoragePolicy(0, 'zero', is_default=True),
            StoragePolicy(1, 'one', is_deprecated=True),
            ECStoragePolicy(10,
                            'ten',
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=3),
            ECStoragePolicy(11,
                            'done',
                            is_deprecated=True,
                            ec_type='jerasure_rs_vand',
                            ec_ndata=10,
                            ec_nparity=3),
        ]
        policies = StoragePolicyCollection(test_policies)
        expected = {
            # default replication
            (0, True): {
                'name': 'zero',
                'default': True,
                'deprecated': False,
                'policy_type': REPL_POLICY
            },
            (0, False): {
                'name': 'zero',
                'default': True,
            },
            # deprecated replication
            (1, True): {
                'name': 'one',
                'default': False,
                'deprecated': True,
                'policy_type': REPL_POLICY
            },
            (1, False): {
                'name': 'one',
                'deprecated': True,
            },
            # enabled ec
            (10, True): {
                'name': 'ten',
                'default': False,
                'deprecated': False,
                'policy_type': EC_POLICY,
                'ec_type': 'jerasure_rs_vand',
                'ec_num_data_fragments': 10,
                'ec_num_parity_fragments': 3,
                'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
            },
            (10, False): {
                'name': 'ten',
            },
            # deprecated ec
            (11, True): {
                'name': 'done',
                'default': False,
                'deprecated': True,
                'policy_type': EC_POLICY,
                'ec_type': 'jerasure_rs_vand',
                'ec_num_data_fragments': 10,
                'ec_num_parity_fragments': 3,
                'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
            },
            (11, False): {
                'name': 'done',
                'deprecated': True,
            },
        }
        self.maxDiff = None
        for policy in policies:
            expected_info = expected[(int(policy), True)]
            self.assertEqual(policy.get_info(config=True), expected_info)
            expected_info = expected[(int(policy), False)]
            self.assertEqual(policy.get_info(config=False), expected_info)
Esempio n. 11
0
 def test_storage_policy_get_info(self):
     test_policies = [
         StoragePolicy(0, 'zero', is_default=True),
         StoragePolicy(1, 'one', is_deprecated=True),
         ECStoragePolicy(10,
                         'ten',
                         ec_type='jerasure_rs_vand',
                         ec_ndata=10,
                         ec_nparity=3),
         ECStoragePolicy(11,
                         'done',
                         is_deprecated=True,
                         ec_type='jerasure_rs_vand',
                         ec_ndata=10,
                         ec_nparity=3),
     ]
     policies = StoragePolicyCollection(test_policies)
     expected = {
         # default replication
         (0, True): {
             'name': 'zero',
             'default': True,
             'deprecated': False,
             'policy_type': REPL_POLICY
         },
         (0, False): {
             'name': 'zero',
             'default': True,
         },
         # deprecated replication
         (1, True): {
             'name': 'one',
             'default': False,
             'deprecated': True,
             'policy_type': REPL_POLICY
         },
         (1, False): {
             'name': 'one',
             'deprecated': True,
         },
         # enabled ec
         (10, True): {
             'name': 'ten',
             'default': False,
             'deprecated': False,
             'policy_type': EC_POLICY,
             'ec_type': 'jerasure_rs_vand',
             'ec_num_data_fragments': 10,
             'ec_num_parity_fragments': 3,
             'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
         },
         (10, False): {
             'name': 'ten',
         },
         # deprecated ec
         (11, True): {
             'name': 'done',
             'default': False,
             'deprecated': True,
             'policy_type': EC_POLICY,
             'ec_type': 'jerasure_rs_vand',
             'ec_num_data_fragments': 10,
             'ec_num_parity_fragments': 3,
             'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE,
         },
         (11, False): {
             'name': 'done',
             'deprecated': True,
         },
     }
     self.maxDiff = None
     for policy in policies:
         expected_info = expected[(int(policy), True)]
         self.assertEqual(policy.get_info(config=True), expected_info)
         expected_info = expected[(int(policy), False)]
         self.assertEqual(policy.get_info(config=False), expected_info)
Esempio n. 12
0
class TestRelinker(unittest.TestCase):
    def setUp(self):
        skip_if_no_xattrs()
        self.logger = FakeLogger()
        self.testdir = tempfile.mkdtemp()
        self.devices = os.path.join(self.testdir, 'node')
        shutil.rmtree(self.testdir, ignore_errors=1)
        os.mkdir(self.testdir)
        os.mkdir(self.devices)

        self.rb = ring.RingBuilder(PART_POWER, 6.0, 1)

        for i in range(6):
            ip = "127.0.0.%s" % i
            self.rb.add_dev({'id': i, 'region': 0, 'zone': 0, 'weight': 1,
                             'ip': ip, 'port': 10000, 'device': 'sda1'})
        self.rb.rebalance(seed=1)

        self.existing_device = 'sda1'
        os.mkdir(os.path.join(self.devices, self.existing_device))
        self.objects = os.path.join(self.devices, self.existing_device,
                                    'objects')
        os.mkdir(self.objects)
        self._hash = utils.hash_path('a/c/o')
        digest = binascii.unhexlify(self._hash)
        self.part = struct.unpack_from('>I', digest)[0] >> 24
        self.next_part = struct.unpack_from('>I', digest)[0] >> 23
        self.objdir = os.path.join(
            self.objects, str(self.part), self._hash[-3:], self._hash)
        os.makedirs(self.objdir)
        self.object_fname = "1278553064.00000.data"
        self.objname = os.path.join(self.objdir, self.object_fname)
        with open(self.objname, "wb") as dummy:
            dummy.write(b"Hello World!")
            write_metadata(dummy, {'name': '/a/c/o', 'Content-Length': '12'})

        test_policies = [StoragePolicy(0, 'platin', True)]
        storage_policy._POLICIES = StoragePolicyCollection(test_policies)

        self.expected_dir = os.path.join(
            self.objects, str(self.next_part), self._hash[-3:], self._hash)
        self.expected_file = os.path.join(self.expected_dir, self.object_fname)

    def _save_ring(self):
        rd = self.rb.get_ring()
        for policy in POLICIES:
            rd.save(os.path.join(
                self.testdir, '%s.ring.gz' % policy.ring_name))
            # Enforce ring reloading in relinker
            policy.object_ring = None

    def tearDown(self):
        shutil.rmtree(self.testdir, ignore_errors=1)
        storage_policy.reload_storage_policies()

    def test_relink(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        relinker.relink(self.testdir, self.devices, True)

        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))

        stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
        stat_new = os.stat(self.expected_file)
        self.assertEqual(stat_old.st_ino, stat_new.st_ino)

    def test_relink_device_filter(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        relinker.relink(self.testdir, self.devices, True,
                        device=self.existing_device)

        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))

        stat_old = os.stat(os.path.join(self.objdir, self.object_fname))
        stat_new = os.stat(self.expected_file)
        self.assertEqual(stat_old.st_ino, stat_new.st_ino)

    def test_relink_device_filter_invalid(self):
        self.rb.prepare_increase_partition_power()
        self._save_ring()
        relinker.relink(self.testdir, self.devices, True, device='none')

        self.assertFalse(os.path.isdir(self.expected_dir))
        self.assertFalse(os.path.isfile(self.expected_file))

    def _common_test_cleanup(self, relink=True):
        # Create a ring that has prev_part_power set
        self.rb.prepare_increase_partition_power()
        self.rb.increase_partition_power()
        self._save_ring()

        os.makedirs(self.expected_dir)

        if relink:
            # Create a hardlink to the original object name. This is expected
            # after a normal relinker run
            os.link(os.path.join(self.objdir, self.object_fname),
                    self.expected_file)

    def test_cleanup(self):
        self._common_test_cleanup()
        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(os.path.isfile(
            os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_device_filter(self):
        self._common_test_cleanup()
        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
                                             device=self.existing_device))

        # Old objectname should be removed, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertFalse(os.path.isfile(
            os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_device_filter_invalid(self):
        self._common_test_cleanup()
        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True,
                                             device='none'))

        # Old objectname should still exist, new should still exist
        self.assertTrue(os.path.isdir(self.expected_dir))
        self.assertTrue(os.path.isfile(self.expected_file))
        self.assertTrue(os.path.isfile(
            os.path.join(self.objdir, self.object_fname)))

    def test_relink_cleanup(self):
        state_file = os.path.join(self.devices, self.existing_device,
                                  'relink.objects.json')

        self.rb.prepare_increase_partition_power()
        self._save_ring()
        relinker.relink(self.testdir, self.devices, True)
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f), {str(self.part): [True, False]})

        self.rb.increase_partition_power()
        self.rb._ring = None  # Force builder to reload ring
        self._save_ring()
        relinker.cleanup(self.testdir, self.devices, True)
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f),
                             {str(self.part): [True, True],
                              str(self.next_part): [True, True]})

    def test_devices_filter_filtering(self):
        # With no filtering, returns all devices
        devices = relinker.devices_filter(None, "", [self.existing_device])
        self.assertEqual(set([self.existing_device]), devices)

        # With a matching filter, returns what is matching
        devices = relinker.devices_filter(self.existing_device, "",
                                          [self.existing_device, 'sda2'])
        self.assertEqual(set([self.existing_device]), devices)

        # With a non matching filter, returns nothing
        devices = relinker.devices_filter('none', "", [self.existing_device])
        self.assertEqual(set(), devices)

    def test_hook_pre_post_device_locking(self):
        locks = [None]
        device_path = os.path.join(self.devices, self.existing_device)
        datadir = 'object'
        lock_file = os.path.join(device_path, '.relink.%s.lock' % datadir)

        # The first run gets the lock
        relinker.hook_pre_device(locks, {}, datadir, device_path)
        self.assertNotEqual([None], locks)

        # A following run would block
        with self.assertRaises(IOError) as raised:
            with open(lock_file, 'a') as f:
                fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
        self.assertEqual(errno.EAGAIN, raised.exception.errno)

        # Another must not get the lock, so it must return an empty list
        relinker.hook_post_device(locks, "")
        self.assertEqual([None], locks)

        with open(lock_file, 'a') as f:
            fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)

    def test_state_file(self):
        device_path = os.path.join(self.devices, self.existing_device)
        datadir = 'objects'
        datadir_path = os.path.join(device_path, datadir)
        state_file = os.path.join(device_path, 'relink.%s.json' % datadir)

        def call_partition_filter(step, parts):
            # Partition 312 will be ignored because it must have been created
            # by the relinker
            return relinker.partitions_filter(states, step,
                                              PART_POWER, PART_POWER + 1,
                                              datadir_path, parts)

        # Start relinking
        states = {}

        # Load the states: As it starts, it must be empty
        locks = [None]
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual({}, states)
        os.close(locks[0])  # Release the lock

        # Partition 312 is ignored because it must have been created with the
        # next_part_power, so it does not need to be relinked
        # 96 and 227 are reverse ordered
        # auditor_status_ALL.json is ignored because it's not a partition
        self.assertEqual(['227', '96'],
                         call_partition_filter(relinker.STEP_RELINK,
                                               ['96', '227', '312',
                                                'auditor_status.json']))
        self.assertEqual(states, {'96': [False, False], '227': [False, False]})

        # Ack partition 96
        relinker.hook_post_partition(states, relinker.STEP_RELINK,
                                     os.path.join(datadir_path, '96'))
        self.assertEqual(states, {'96': [True, False], '227': [False, False]})
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f), {'96': [True, False],
                                            '227': [False, False]})

        # Restart relinking after only part 96 was done
        self.assertEqual(['227'],
                         call_partition_filter(relinker.STEP_RELINK,
                                               ['96', '227', '312']))
        self.assertEqual(states, {'96': [True, False], '227': [False, False]})

        # Ack partition 227
        relinker.hook_post_partition(states, relinker.STEP_RELINK,
                                     os.path.join(datadir_path, '227'))
        self.assertEqual(states, {'96': [True, False], '227': [True, False]})
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f), {'96': [True, False],
                                            '227': [True, False]})

        # If the process restarts, it reload the state
        locks = [None]
        states = {}
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states, {'96': [True, False], '227': [True, False]})
        os.close(locks[0])  # Release the lock

        # Start cleanup
        self.assertEqual(['227', '96'],
                         call_partition_filter(relinker.STEP_CLEANUP,
                                               ['96', '227', '312']))
        # Ack partition 227
        relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
                                     os.path.join(datadir_path, '227'))
        self.assertEqual(states, {'96': [True, False], '227': [True, True]})
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f), {'96': [True, False],
                                            '227': [True, True]})

        # Restart cleanup after only part 227 was done
        self.assertEqual(['96'],
                         call_partition_filter(relinker.STEP_CLEANUP,
                                               ['96', '227', '312']))
        self.assertEqual(states, {'96': [True, False], '227': [True, True]})

        # Ack partition 96
        relinker.hook_post_partition(states, relinker.STEP_CLEANUP,
                                     os.path.join(datadir_path, '96'))
        self.assertEqual(states, {'96': [True, True], '227': [True, True]})
        with open(state_file, 'rt') as f:
            self.assertEqual(json.load(f), {'96': [True, True],
                                            '227': [True, True]})

        # At the end, the state is still accurate
        locks = [None]
        states = {}
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states, {'96': [True, True], '227': [True, True]})
        os.close(locks[0])  # Release the lock

        # If the file gets corrupted, restart from scratch
        with open(state_file, 'wt') as f:
            f.write('NOT JSON')
        locks = [None]
        states = {}
        relinker.hook_pre_device(locks, states, datadir, device_path)
        self.assertEqual(states, {})
        os.close(locks[0])  # Release the lock

    def test_cleanup_not_yet_relinked(self):
        self._common_test_cleanup(relink=False)
        self.assertEqual(1, relinker.cleanup(self.testdir, self.devices, True))

        self.assertTrue(os.path.isfile(
            os.path.join(self.objdir, self.object_fname)))

    def test_cleanup_deleted(self):
        self._common_test_cleanup()

        # Pretend the object got deleted inbetween and there is a tombstone
        fname_ts = self.expected_file[:-4] + "ts"
        os.rename(self.expected_file, fname_ts)

        self.assertEqual(0, relinker.cleanup(self.testdir, self.devices, True))

    def test_cleanup_doesnotexist(self):
        self._common_test_cleanup()

        # Pretend the file in the new place got deleted inbetween
        os.remove(self.expected_file)

        self.assertEqual(
            1, relinker.cleanup(self.testdir, self.devices, True, self.logger))
        self.assertEqual(self.logger.get_lines_for_level('warning'),
                         ['Error cleaning up %s: %s' % (self.objname,
                          repr(exceptions.DiskFileNotExist()))])

    @patch_policies(
        [ECStoragePolicy(
         0, name='platin', is_default=True, ec_type=DEFAULT_TEST_EC_TYPE,
         ec_ndata=4, ec_nparity=2)])
    def test_cleanup_non_durable_fragment(self):
        self._common_test_cleanup()

        # Switch the policy type so that actually all fragments are non-durable
        # and raise a DiskFileNotExist in EC in this test. However, if the
        # counterpart exists in the new location, this is ok - it will be fixed
        # by the reconstructor later on
        self.assertEqual(
            0, relinker.cleanup(self.testdir, self.devices, True,
                                self.logger))
        self.assertEqual(self.logger.get_lines_for_level('warning'), [])

    def test_cleanup_quarantined(self):
        self._common_test_cleanup()
        # Pretend the object in the new place got corrupted
        with open(self.expected_file, "wb") as obj:
            obj.write(b'trash')

        self.assertEqual(
            1, relinker.cleanup(self.testdir, self.devices, True, self.logger))

        self.assertIn('failed audit and was quarantined',
                      self.logger.get_lines_for_level('warning')[0])
Esempio n. 13
0
    def test_multiple_names_EC(self):
        # checking duplicate names on insert
        test_policies_ec = [
            ECStoragePolicy(
                0, 'ec8-2',
                aliases='zeus, jupiter',
                ec_type=DEFAULT_TEST_EC_TYPE,
                ec_ndata=8, ec_nparity=2,
                object_ring=FakeRing(replicas=8),
                is_default=True),
            ECStoragePolicy(
                1, 'ec10-4',
                aliases='ec8-2',
                ec_type=DEFAULT_TEST_EC_TYPE,
                ec_ndata=10, ec_nparity=4,
                object_ring=FakeRing(replicas=10))]

        self.assertRaises(PolicyError, StoragePolicyCollection,
                          test_policies_ec)

        # checking correct retrival using other names
        good_test_policies_EC = [
            ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter',
                            ec_type=DEFAULT_TEST_EC_TYPE,
                            ec_ndata=8, ec_nparity=2,
                            object_ring=FakeRing(replicas=8),
                            is_default=True),
            ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva',
                            ec_type=DEFAULT_TEST_EC_TYPE,
                            ec_ndata=10, ec_nparity=4,
                            object_ring=FakeRing(replicas=10)),
            ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune',
                            ec_type=DEFAULT_TEST_EC_TYPE,
                            ec_ndata=4, ec_nparity=2,
                            object_ring=FakeRing(replicas=7)),
        ]
        ec_policies = StoragePolicyCollection(good_test_policies_EC)

        for name in ('ec8-2', 'zeus', 'jupiter'):
            self.assertEqual(ec_policies.get_by_name(name), ec_policies[0])
        for name in ('ec10-4', 'athena', 'minerva'):
            self.assertEqual(ec_policies.get_by_name(name), ec_policies[1])

        # Testing parsing of conf files/text
        good_ec_conf = self._conf("""
        [storage-policy:0]
        name = ec8-2
        aliases = zeus, jupiter
        policy_type = erasure_coding
        ec_type = %(ec_type)s
        default = yes
        ec_num_data_fragments = 8
        ec_num_parity_fragments = 2
        [storage-policy:1]
        name = ec10-4
        aliases = poseidon, neptune
        policy_type = erasure_coding
        ec_type = %(ec_type)s
        ec_num_data_fragments = 10
        ec_num_parity_fragments = 4
        """ % {'ec_type': DEFAULT_TEST_EC_TYPE})

        ec_policies = parse_storage_policies(good_ec_conf)
        self.assertEqual(ec_policies.get_by_name('ec8-2'),
                         ec_policies[0])
        self.assertEqual(ec_policies.get_by_name('ec10-4'),
                         ec_policies.get_by_name('poseidon'))

        name_repeat_ec_conf = self._conf("""
        [storage-policy:0]
        name = ec8-2
        aliases = ec8-2
        policy_type = erasure_coding
        ec_type = %(ec_type)s
        default = yes
        ec_num_data_fragments = 8
        ec_num_parity_fragments = 2
        """ % {'ec_type': DEFAULT_TEST_EC_TYPE})
        # Test on line below should not generate errors. Repeat of main
        # name under aliases is permitted during construction
        # but only because automated testing requires it.
        ec_policies = parse_storage_policies(name_repeat_ec_conf)

        bad_ec_conf = self._conf("""
        [storage-policy:0]
        name = ec8-2
        aliases = zeus, zeus
        policy_type = erasure_coding
        ec_type = %(ec_type)s
        default = yes
        ec_num_data_fragments = 8
        ec_num_parity_fragments = 2
        """ % {'ec_type': DEFAULT_TEST_EC_TYPE})
        self.assertRaisesWithMessage(PolicyError,
                                     'is already assigned to this policy',
                                     parse_storage_policies, bad_ec_conf)