Beispiel #1
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:
            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(
                self.devices, 'sda',
                storage_directory(get_data_dir(0), '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            write_metadata(fp, {'X-Timestamp': '99999', 'name': '/a/c/o'})
            fp.close()

        etag = md5()
        with self.disk_file.create() as writer:
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            writer.put(metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(writer._fd, metadata)
        return ts_file_path
 def test_delete_partition(self):
     df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
     mkdirs(df.datadir)
     part_path = os.path.join(self.objects, '1')
     self.assertTrue(os.access(part_path, os.F_OK))
     self.replicator.replicate()
     self.assertFalse(os.access(part_path, os.F_OK))
Beispiel #3
0
    def mkstemp(self, size=None):
        """
        Contextmanager to make a temporary file.

        :param size: optional initial size of file to allocate on disk
        :raises DiskFileNoSpace: if a size is specified and fallocate fails
        """
        if not os.path.exists(self.tmpdir):
            mkdirs(self.tmpdir)
        fd, self.tmppath = mkstemp(dir=self.tmpdir)
        try:
            if size is not None and size > 0:
                try:
                    fallocate(fd, size)
                except OSError:
                    raise DiskFileNoSpace()
            yield fd
        finally:
            try:
                os.close(fd)
            except OSError:
                pass
            tmppath, self.tmppath = self.tmppath, None
            try:
                os.unlink(tmppath)
            except OSError:
                pass
Beispiel #4
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
        self.devices = os.path.join(self.testdir, "node")
        self.rcache = os.path.join(self.testdir, "object.recon")
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, "sda"))
        os.mkdir(os.path.join(self.devices, "sdb"))

        # policy 0
        self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
        self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
        self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ["0", "1", "2", "3"]:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
        self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
Beispiel #5
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.rcache = os.path.join(self.testdir, 'object.recon')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        os.mkdir(os.path.join(self.devices, 'sdb'))

        # policy 0
        self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
        self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
        self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(
            devices=self.devices,
            mount_check='false',
            object_size_stats='10,100,1024,10240')
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
        self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
                                                     'o', 1)
Beispiel #6
0
    def create(self, size=None):
        """
        Context manager to create a file. We create a temporary file first, and
        then return a DiskFileWriter object to encapsulate the state.

        .. note::

            An implementation is not required to perform on-disk
            preallocations even if the parameter is specified. But if it does
            and it fails, it must raise a `DiskFileNoSpace` exception.

        :param size: optional initial size of file to explicitly allocate on
                     disk
        :raises DiskFileNoSpace: if a size is specified and allocation fails
        """
        if not exists(self._tmpdir):
            mkdirs(self._tmpdir)
        fd, tmppath = mkstemp(dir=self._tmpdir)
        try:
            if size is not None and size > 0:
                try:
                    fallocate(fd, size)
                except OSError:
                    raise DiskFileNoSpace()
            yield DiskFileWriter(self._name, self._datadir, fd, tmppath,
                                 self._bytes_per_sync, self._threadpool)
        finally:
            try:
                os.close(fd)
            except OSError:
                pass
            try:
                os.unlink(tmppath)
            except OSError:
                pass
    def test_invalidate_hash(self):

        def assertFileData(file_path, data):
            with open(file_path, 'r') as fp:
                fdata = fp.read()
                self.assertEquals(pickle.loads(fdata), pickle.loads(data))

        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hashes_file = os.path.join(self.objects, '0',
                                   object_replicator.HASH_FILE)
        # test that non existant file except caught
        self.assertEquals(object_replicator.invalidate_hash(whole_path_from),
                          None)
        # test that hashes get cleared
        check_pickle_data = pickle.dumps({data_dir: None},
                                         object_replicator.PICKLE_PROTOCOL)
        for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
            with open(hashes_file, 'wb') as fp:
                pickle.dump(data_hash, fp, object_replicator.PICKLE_PROTOCOL)
            object_replicator.invalidate_hash(whole_path_from)
            assertFileData(hashes_file, check_pickle_data)
Beispiel #8
0
    def test_quarantine_same_file(self):
        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
                                    FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                              normalize_timestamp(time()) + '.data'), 'wb')
        setxattr(f.fileno(), diskfile.METADATA_KEY,
                 pickle.dumps({}, diskfile.PICKLE_PROTOCOL))
        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
                                    FakeLogger())
        new_dir = df.quarantine()
        quar_dir = os.path.join(self.testdir, 'sda1', 'quarantined',
                                'objects', os.path.basename(os.path.dirname(
                                                            df.data_file)))
        self.assert_(os.path.isdir(quar_dir))
        self.assertEquals(quar_dir, new_dir)
        # have to remake the datadir and file
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                              normalize_timestamp(time()) + '.data'), 'wb')
        setxattr(f.fileno(), diskfile.METADATA_KEY,
                 pickle.dumps({}, diskfile.PICKLE_PROTOCOL))

        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
                                    FakeLogger(), keep_data_fp=True)
        double_uuid_path = df.quarantine()
        self.assert_(os.path.isdir(double_uuid_path))
        self.assert_('-' in os.path.basename(double_uuid_path))
Beispiel #9
0
 def setup_partition(self, pool, partition):
     path = os.path.join(self.root, pool, self.srvdir, partition)
     if self.fs_per_part:
         fs = '%s/%s/%s' %(self.topfs, self.srvdir, partition)
         zfs_create(pool, fs, path)
     else:
         mkdirs(path)
Beispiel #10
0
    def add_synced_container(self, broker):
        """
        Adds the container db represented by broker to the list of synced
        containers.

        :param broker: An instance of ContainerBroker representing the
                       container to add.
        """
        sync_file = self._container_to_synced_container_path(broker.db_file)
        stat = None
        try:
            stat = os.stat(sync_file)
        except OSError as oserr:
            if oserr.errno != errno.ENOENT:
                raise oserr

        if stat is not None:
            return

        sync_path = os.path.dirname(sync_file)
        mkdirs(sync_path)

        try:
            os.symlink(broker.db_file, sync_file)
        except OSError as oserr:
            if oserr.errno != errno.EEXIST or not os.path.islink(sync_file):
                raise oserr
    def test_run_once_recover_from_timeout(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir, devices=self.devices,
                mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        was_get_hashes = object_replicator.get_hashes
        was_execute = tpool.execute
        self.get_hash_count = 0
        try:

            def fake_get_hashes(*args, **kwargs):
                self.get_hash_count += 1
                if self.get_hash_count == 3:
                    # raise timeout on last call to get hashes
                    raise Timeout()
                return 2, {'abc': 'def'}

            def fake_exc(tester, *args, **kwargs):
                if 'Error syncing partition' in args[0]:
                    tester.i_failed = True

            self.i_failed = False
            object_replicator.http_connect = mock_http_connect(200)
            object_replicator.get_hashes = fake_get_hashes
            replicator.logger.exception = \
                lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
            # Write some files into '1' and run replicate- they should be moved
            # to the other partitions and then node should get deleted.
            cur_part = '1'
            df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                          FakeLogger())
            mkdirs(df.datadir)
            f = open(os.path.join(df.datadir,
                                  normalize_timestamp(time.time()) + '.data'),
                     'wb')
            f.write('1234567890')
            f.close()
            ohash = hash_path('a', 'c', 'o')
            data_dir = ohash[-3:]
            whole_path_from = os.path.join(self.objects, cur_part, data_dir)
            process_arg_checker = []
            nodes = [node for node in
                     self.ring.get_part_nodes(int(cur_part))
                         if node['ip'] not in _ips()]
            for node in nodes:
                rsync_mod = '%s::object/sda/objects/%s' % (node['ip'],
                                                           cur_part)
                process_arg_checker.append(
                    (0, '', ['rsync', whole_path_from, rsync_mod]))
            self.assertTrue(os.access(os.path.join(self.objects,
                                                   '1', data_dir, ohash),
                                      os.F_OK))
            with _mock_process(process_arg_checker):
                replicator.run_once()
            self.assertFalse(process_errors)
            self.assertFalse(self.i_failed)
        finally:
            object_replicator.http_connect = was_connector
            object_replicator.get_hashes = was_get_hashes
            tpool.execute = was_execute
    def find_and_process(self):
        src_filename = time.strftime(self.filename_format)
        working_dir = os.path.join(self.target_dir, ".%-stats_tmp" % self.stats_type)
        shutil.rmtree(working_dir, ignore_errors=True)
        mkdirs(working_dir)
        tmp_filename = os.path.join(working_dir, src_filename)
        hasher = hashlib.md5()
        try:
            with open(tmp_filename, "wb") as statfile:
                statfile.write(self.get_header())
                for device in os.listdir(self.devices):
                    if self.mount_check and not check_mount(self.devices, device):
                        self.logger.error(_("Device %s is not mounted, skipping.") % device)
                        continue
                    db_dir = os.path.join(self.devices, device, self.data_dir)
                    if not os.path.exists(db_dir):
                        self.logger.debug(_("Path %s does not exist, skipping.") % db_dir)
                        continue
                    for root, dirs, files in os.walk(db_dir, topdown=False):
                        for filename in files:
                            if filename.endswith(".db"):
                                db_path = os.path.join(root, filename)
                                try:
                                    line_data = self.get_data(db_path)
                                except sqlite3.Error, err:
                                    self.logger.info(_("Error accessing db %s: %s") % (db_path, err))
                                    continue
                                if line_data:
                                    statfile.write(line_data)
                                    hasher.update(line_data)

            src_filename += hasher.hexdigest()
            renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
Beispiel #13
0
    def writer(self, size=None):
        """
        Context manager to write a file. We create a temporary file first, and
        then return a DiskWriter object to encapsulate the state.

        :param size: optional initial size of file to explicitly allocate on
                     disk
        :raises DiskFileNoSpace: if a size is specified and allocation fails
        """
        if not os.path.exists(self.tmpdir):
            mkdirs(self.tmpdir)
        fd, tmppath = mkstemp(dir=self.tmpdir)
        try:
            if size is not None and size > 0:
                try:
                    fallocate(fd, size)
                except OSError:
                    raise DiskFileNoSpace()
            yield DiskWriter(self, fd, tmppath, self.threadpool)
        finally:
            try:
                os.close(fd)
            except OSError:
                pass
            try:
                os.unlink(tmppath)
            except OSError:
                pass
Beispiel #14
0
 def test_run_once_recover_from_failure(self):
     replicator = object_replicator.ObjectReplicator(
         dict(swift_dir=self.testdir, devices=self.devices, mount_check="false", timeout="300", stats_interval="1")
     )
     was_connector = object_replicator.http_connect
     try:
         object_replicator.http_connect = mock_http_connect(200)
         # Write some files into '1' and run replicate- they should be moved
         # to the other partitoins and then node should get deleted.
         cur_part = "1"
         df = DiskFile(self.devices, "sda", cur_part, "a", "c", "o", FakeLogger())
         mkdirs(df.datadir)
         f = open(os.path.join(df.datadir, normalize_timestamp(time.time()) + ".data"), "wb")
         f.write("1234567890")
         f.close()
         ohash = hash_path("a", "c", "o")
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, cur_part, data_dir)
         process_arg_checker = []
         nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) if node["ip"] not in _ips()]
         for node in nodes:
             rsync_mod = "%s::object/sda/objects/%s" % (node["ip"], cur_part)
             process_arg_checker.append((0, "", ["rsync", whole_path_from, rsync_mod]))
         self.assertTrue(os.access(os.path.join(self.objects, "1", data_dir, ohash), os.F_OK))
         with _mock_process(process_arg_checker):
             replicator.run_once()
         self.assertFalse(process_errors)
         for i, result in [("0", True), ("1", False), ("2", True), ("3", True)]:
             self.assertEquals(
                 os.access(os.path.join(self.objects, i, object_replicator.HASH_FILE), os.F_OK), result
             )
     finally:
         object_replicator.http_connect = was_connector
Beispiel #15
0
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir, devices=self.devices, mount_check="false", timeout="300", stats_interval="1")
        )
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = "0"
        df = DiskFile(self.devices, "sda", cur_part, "a", "c", "o", FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir, normalize_timestamp(time.time()) + ".data"), "wb")
        f.write("1234567890")
        f.close()
        ohash = hash_path("a", "c", "o")
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [node for node in self.ring.get_part_nodes(int(cur_part)) if node["ip"] not in _ips()]
        for node in nodes:
            rsync_mod = "%s::object/sda/objects/%s" % (node["ip"], cur_part)
            process_arg_checker.append((0, "", ["rsync", whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Beispiel #16
0
 def setUp(self):
     self.orig_hp = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
     utils.HASH_PATH_PREFIX = 'info'
     utils.HASH_PATH_SUFFIX = 'info'
     self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_info')
     utils.mkdirs(self.testdir)
     rmtree(self.testdir)
     utils.mkdirs(os.path.join(self.testdir, 'sda1'))
     utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
     utils.mkdirs(os.path.join(self.testdir, 'sdb1'))
     utils.mkdirs(os.path.join(self.testdir, 'sdb1', 'tmp'))
     self.account_ring_path = os.path.join(self.testdir, 'account.ring.gz')
     with closing(GzipFile(self.account_ring_path, 'wb')) as f:
         pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                     [{'id': 0, 'zone': 0, 'device': 'sda1',
                       'ip': '127.0.0.1', 'port': 42},
                      {'id': 1, 'zone': 1, 'device': 'sdb1',
                       'ip': '127.0.0.2', 'port': 43}], 30),
                     f)
     self.container_ring_path = os.path.join(self.testdir,
                                             'container.ring.gz')
     with closing(GzipFile(self.container_ring_path, 'wb')) as f:
         pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                     [{'id': 0, 'zone': 0, 'device': 'sda1',
                       'ip': '127.0.0.3', 'port': 42},
                      {'id': 1, 'zone': 1, 'device': 'sdb1',
                       'ip': '127.0.0.4', 'port': 43}], 30),
                     f)
Beispiel #17
0
 def initialize(self):
     print "initialize start"
     utils.mkdirs(os.path.dirname(self.db_file))
     self.conn = sqlite3.connect(self.db_file, check_same_thread=False)
     self.conn.executescript(
         """
         CREATE TABLE data_crawlers (
             dev_path TEXT,
             datatype TEXT,
             slowdown FLOAT DEFAULT 0.001,
             cycle INTEGER DEFAULT 0,
             UNIQUE (dev_path, datatype)  
         );
         CREATE TABLE data_crawler_runtime (
             dev_path TEXT,
             datatype TEXT,
             cycle INTEGER,
             interval FLOAT,
             slowdown FLOAT, 
             parts INTEGER, 
             timestamp TEXT,
             UNIQUE (dev_path, datatype)  
         );
         """
     )
     self.conn.commit()
     print "initialize DONE"
Beispiel #18
0
 def test_delete_partition_with_handoff_delete_fail_in_other_region(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         df = self.df_mgr.get_diskfile('sda', '1', 'a', 'c', 'o')
         mkdirs(df._datadir)
         f = open(os.path.join(df._datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         ring = self.replicator.get_object_ring(0)
         nodes = [node for node in
                  ring.get_part_nodes(1)
                  if node['ip'] not in _ips()]
         process_arg_checker = []
         for node in nodes:
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             if node['region'] != 1:
                 #  the rsync calls for other region to fail
                 ret_code = 1
             else:
                 ret_code = 0
             process_arg_checker.append(
                 (ret_code, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         # The file should still exist
         self.assertTrue(os.access(part_path, os.F_OK))
Beispiel #19
0
 def test_run_once_1(self):
     conf = dict(swift_dir=self.testdir, devices=self.devices,
                 mount_check='false', timeout='300', stats_interval='1')
     replicator = object_replicator.ObjectReplicator(conf)
     was_connector = object_replicator.http_connect
     object_replicator.http_connect = mock_http_connect(200)
     cur_part = '0'
     df = self.df_mgr.get_diskfile('sda', cur_part, 'a', 'c', 'o',
                                   policy_idx=1)
     mkdirs(df._datadir)
     f = open(os.path.join(df._datadir,
                           normalize_timestamp(time.time()) + '.data'),
              'wb')
     f.write('1234567890')
     f.close()
     ohash = hash_path('a', 'c', 'o')
     data_dir = ohash[-3:]
     whole_path_from = os.path.join(self.objects_1, cur_part, data_dir)
     process_arg_checker = []
     ring = replicator.get_object_ring(1)
     nodes = [node for node in
              ring.get_part_nodes(int(cur_part))
              if node['ip'] not in _ips()]
     rsync_mods = tuple(['%s::object/sda/objects-1/%s' %
                         (node['ip'], cur_part) for node in nodes])
     for node in nodes:
         process_arg_checker.append(
             (0, '', ['rsync', whole_path_from, rsync_mods]))
     with _mock_process(process_arg_checker):
         replicator.run_once()
     self.assertFalse(process_errors)
     object_replicator.http_connect = was_connector
Beispiel #20
0
 def test_delete_partition_with_handoff_delete_failures(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         self.replicator.handoff_delete = 2
         df = diskfile.DiskFile(self.devices,
                                'sda', '1', 'a', 'c', 'o', FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(os.path.join(df.datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [node for node in
                  self.ring.get_part_nodes(1)
                  if node['ip'] not in _ips()]
         process_arg_checker = []
         for i, node in enumerate(nodes):
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             if i in (0, 1):
                 # force two of the rsync calls to fail
                 ret_code = 1
             else:
                 ret_code = 0
             process_arg_checker.append(
                 (ret_code, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         # The file should still exist
         self.assertTrue(os.access(part_path, os.F_OK))
Beispiel #21
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:

            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(self.devices, 'sda',
                               storage_directory(DATADIR, '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            fp.close()

        etag = md5()
        with self.disk_file.mkstemp() as (fd, tmppath):
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            self.disk_file.put(fd, tmppath, metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(fd, metadata)
        if self.disk_file.data_file:
            return self.disk_file.data_file
        return ts_file_path
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir, devices=self.devices,
                mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = '0'
        df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                      FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                              normalize_timestamp(time.time()) + '.data'),
                 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [node for node in
                 self.ring.get_part_nodes(int(cur_part))
                     if node['ip'] not in _ips()]
        for node in nodes:
            rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], cur_part)
            process_arg_checker.append(
                (0, '', ['rsync', whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Beispiel #23
0
 def test_delete_partition(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         df = diskfile.DiskFile(self.devices,
                                'sda', '1', 'a', 'c', 'o', FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(os.path.join(df.datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [node for node in
                  self.ring.get_part_nodes(1)
                  if node['ip'] not in _ips()]
         process_arg_checker = []
         for node in nodes:
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             process_arg_checker.append(
                 (0, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         self.assertFalse(os.access(part_path, os.F_OK))
Beispiel #24
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ""
        if with_ts:

            name_hash = hash_path("a", "c", "o")
            dir_path = os.path.join(self.devices, "sda", storage_directory(DATADIR, "0", name_hash))
            ts_file_path = os.path.join(dir_path, "99999.ts")
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, "w")
            fp.close()

        etag = md5()
        with self.disk_file.mkstemp() as (fd, tmppath):
            etag = etag.hexdigest()
            metadata = {"ETag": etag, "X-Timestamp": str(normalize_timestamp(time.time())), "Content-Length": 10}
            self.disk_file.put(fd, tmppath, metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata["ETag"] = etag
            write_metadata(fd, metadata)
        if self.disk_file.data_file:
            return self.disk_file.data_file
        return ts_file_path
Beispiel #25
0
    def test_hash_suffix_multi_file_two(self):
        df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        for tdiff in [1, 50, 100, 500]:
            suffs = ['.meta', '.data']
            if tdiff > 50:
                suffs.append('.ts')
            for suff in suffs:
                f = open(
                    os.path.join(
                        df.datadir,
                        normalize_timestamp(int(time()) - tdiff) + suff),
                    'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        diskfile.hash_suffix(whole_path_from, 99)
        # only the meta and data should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
Beispiel #26
0
    def setUp(self):
        super(TestBaseSsync, self).setUp()
        self.device = 'dev'
        self.partition = '9'
        # sender side setup
        self.tx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
        utils.mkdirs(os.path.join(self.tx_testdir, self.device))
        self.daemon = FakeReplicator(self.tx_testdir)

        # rx side setup
        self.rx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_receiver')
        utils.mkdirs(os.path.join(self.rx_testdir, self.device))
        conf = {
            'devices': self.rx_testdir,
            'mount_check': 'false',
            'replication_one_per_device': 'false',
            'log_requests': 'false'}
        self.rx_controller = server.ObjectController(conf)
        self.ts_iter = (Timestamp(t)
                        for t in itertools.count(int(time.time())))
        self.rx_ip = '127.0.0.1'
        sock = eventlet.listen((self.rx_ip, 0))
        self.rx_server = eventlet.spawn(
            eventlet.wsgi.server, sock, self.rx_controller, utils.NullLogger())
        self.rx_port = sock.getsockname()[1]
        self.rx_node = {'replication_ip': self.rx_ip,
                        'replication_port': self.rx_port,
                        'device': self.device}
Beispiel #27
0
    def _create_expiring_tracker_object(self, object_path):
        try:

            # Check if gsexpiring volume is present in ring
            if not any(d.get('device', None) == self.expiring_objects_account
                       for d in self.object_ring.devs):
                raise Exception("%s volume not in ring" %
                                self.expiring_objects_account)

            # Check if gsexpiring is mounted.
            expiring_objects_account_path = \
                os.path.join(self.devices, self.expiring_objects_account)
            mount_check = self._diskfile_router['junk'].mount_check
            if mount_check and not do_ismount(expiring_objects_account_path):
                raise Exception("Path %s doesn't exist or is not a mount "
                                "point" % expiring_objects_account_path)

            # Create object directory
            object_dir = os.path.dirname(object_path)
            try:
                mkdirs(object_dir)
            except OSError as err:
                mkdirs(object_dir)  # handle race

            # Create zero-byte file
            try:
                os.mknod(object_path)
            except OSError as err:
                if err.errno != errno.EEXIST:
                    raise
        except Exception as e:
            self.logger.error("Creation of tracker object %s failed: %s" %
                              (object_path, str(e)))
Beispiel #28
0
 def _create_ondisk_file(self, df, data, timestamp, ext=".data"):
     mkdirs(df.datadir)
     timestamp = normalize_timestamp(timestamp)
     data_file = os.path.join(df.datadir, timestamp + ext)
     with open(data_file, "wb") as f:
         f.write(data)
         md = {"X-Timestamp": timestamp}
         setxattr(f.fileno(), diskfile.METADATA_KEY, pickle.dumps(md, diskfile.PICKLE_PROTOCOL))
Beispiel #29
0
    def setup_datadir(self):
        """
        Setup datadir, devises/device/datadir

        :returns: path to datadir
        """
        path = os.path.join(self.devices, self.device, self.datadir)
        mkdirs(path)
        return path
Beispiel #30
0
    def setup_tmp(self):
        """
        Setup tmp, devises/device/tmp

        :returns: path to tmp
        """
        path = os.path.join(self.devices, self.device, 'tmp')
        mkdirs(path)
        return path
Beispiel #31
0
    def test_hash_suffix_multi_file_one(self):
        df = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
        mkdirs(df._datadir)
        for tdiff in [1, 50, 100, 500]:
            for suff in ['.meta', '.data', '.ts']:
                f = open(
                    os.path.join(
                        df._datadir,
                        normalize_timestamp(int(time()) - tdiff) + suff), 'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        diskfile.hash_suffix(whole_path_from, 99)
        # only the tombstone should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
Beispiel #32
0
    def test_get_hashes_unmodified_and_zero_bytes(self):
        df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        part = os.path.join(self.objects, '0')
        open(os.path.join(part, diskfile.HASH_FILE), 'w')
        # Now the hash file is zero bytes.
        i = [0]

        def _getmtime(filename):
            i[0] += 1
            return 1

        with unit_mock({'swift.obj.diskfile.getmtime': _getmtime}):
            hashed, hashes = diskfile.get_hashes(part, recalculate=[])
        # getmtime will actually not get called.  Initially, the pickle.load
        # will raise an exception first and later, force_rewrite will
        # short-circuit the if clause to determine whether to write out a
        # fresh hashes_file.
        self.assertEquals(i[0], 0)
        self.assertTrue('a83' in hashes)
Beispiel #33
0
    def test_hash_suffix_hash_dir_is_file_quarantine(self):
        df = diskfile.DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(os.path.dirname(df.datadir))
        open(df.datadir, 'wb').close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        orig_quarantine_renamer = diskfile.quarantine_renamer
        called = [False]

        def wrapped(*args, **kwargs):
            called[0] = True
            return orig_quarantine_renamer(*args, **kwargs)

        try:
            diskfile.quarantine_renamer = wrapped
            diskfile.hash_suffix(whole_path_from, 101)
        finally:
            diskfile.quarantine_renamer = orig_quarantine_renamer
        self.assertTrue(called[0])
    def test_get_hashes_modified(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        with open(
                os.path.join(df.datadir,
                             normalize_timestamp(time.time()) + '.ts'),
                'wb') as f:
            f.write('1234567890')
        part = os.path.join(self.objects, '0')
        hashed, hashes = object_replicator.get_hashes(part)
        i = [0]

        def getmtime(filename):
            if i[0] < 3:
                i[0] += 1
            return i[0]

        with mock({'os.path.getmtime': getmtime}):
            hashed, hashes = object_replicator.get_hashes(part,
                                                          recalculate=['a83'])
        self.assertEquals(i[0], 3)
Beispiel #35
0
 def dispatch(self, replicate_args, args):
     if not hasattr(args, 'pop'):
         return HTTPBadRequest(body='Invalid object type')
     op = args.pop(0)
     drive, partition, hsh = replicate_args
     if self.mount_check and not ismount(os.path.join(self.root, drive)):
         return Response(status='507 %s is not mounted' % drive)
     db_file = os.path.join(self.root, drive,
                            storage_directory(self.datadir, partition, hsh),
                            hsh + '.db')
     if op == 'rsync_then_merge':
         return self.rsync_then_merge(drive, db_file, args)
     if op == 'complete_rsync':
         return self.complete_rsync(drive, db_file, args)
     else:
         # someone might be about to rsync a db to us,
         # make sure there's a tmp dir to receive it.
         mkdirs(os.path.join(self.root, drive, 'tmp'))
         if not os.path.exists(db_file):
             return HTTPNotFound()
         return getattr(self, op)(self.broker_class(db_file), args)
Beispiel #36
0
    def setUp(self):
        self.app = proxy.Application(None,
                                     FakeMemcache(),
                                     logger=debug_logger('proxy-ut'),
                                     account_ring=FakeRing(replicas=1),
                                     container_ring=FakeRing(replicas=1))
        monkey_patch_mimetools()
        self.tmpdir = mkdtemp()
        self.testdir = os.path.join(self.tmpdir,
                                    'tmp_test_object_server_ObjectController')
        mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
        conf = {'devices': self.testdir, 'mount_check': 'false'}
        self.obj_ctlr = object_server.ObjectController(
            conf, logger=debug_logger('obj-ut'))

        http_connect = get_http_connect(fake_http_connect(200),
                                        fake_http_connect(200),
                                        FakeServerConnection(self.obj_ctlr))

        swift.proxy.controllers.base.http_connect = http_connect
        swift.proxy.controllers.obj.http_connect = http_connect
Beispiel #37
0
 def test_run_once_recover_from_failure(self):
     replicator = object_replicator.ObjectReplicator(
         dict(swift_dir=self.testdir, devices=self.devices,
             mount_check='false', timeout='300', stats_interval='1'))
     was_connector = object_replicator.http_connect
     object_replicator.http_connect = mock_http_connect(200)
     # Write some files into '1' and run replicate- they should be moved
     # to the other partitoins and then node should get deleted.
     cur_part = '1'
     df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o')
     mkdirs(df.datadir)
     f = open(os.path.join(df.datadir,
                           normalize_timestamp(time.time()) + '.data'),
              'wb')
     f.write('1234567890')
     f.close()
     ohash = hash_path('a', 'c', 'o')
     data_dir = ohash[-3:]
     whole_path_from = os.path.join(self.objects, cur_part, data_dir)
     process_arg_checker = []
     nodes = [node for node in
              self.ring.get_part_nodes(int(cur_part)) \
                  if node['ip'] not in _ips()]
     for node in nodes:
         rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], cur_part)
         process_arg_checker.append(
             (0, '', ['rsync', whole_path_from, rsync_mod]))
     self.assertTrue(os.access(os.path.join(self.objects,
                                            '1', data_dir, ohash),
                               os.F_OK))
     with _mock_process(process_arg_checker):
         replicator.run_once()
     self.assertFalse(process_errors)
     for i, result in [('0', True), ('1', False),
                       ('2', True), ('3', True)]:
         self.assertEquals(os.access(
                 os.path.join(self.objects,
                              i, object_replicator.HASH_FILE),
                 os.F_OK), result)
     object_replicator.http_connect = was_connector
Beispiel #38
0
    def test_print_obj_curl_command_ipv6(self):
        # Note: policy 3 has IPv6 addresses in its ring
        datafile3 = os.path.join(
            self.testdir,
            'sda', 'objects-3', '1', 'ea8',
            'db4449e025aca992307c7c804a67eea8', '1402017884.18202.data')
        utils.mkdirs(os.path.dirname(datafile3))
        with open(datafile3, 'wb') as fp:
            md = {'name': '/AUTH_admin/c/obj',
                  'Content-Type': 'application/octet-stream',
                  'ETag': 'd41d8cd98f00b204e9800998ecf8427e',
                  'Content-Length': 0}
            write_metadata(fp, md)

        object_ring = ring.Ring(self.testdir, ring_name='object-3')
        part, nodes = object_ring.get_nodes('AUTH_admin', 'c', 'obj')
        node = nodes[0]

        out = StringIO()
        hash_dir = os.path.dirname(datafile3)
        file_name = os.path.basename(datafile3)

        # Change working directory to object hash dir
        cwd = os.getcwd()
        try:
            os.chdir(hash_dir)
            with mock.patch('sys.stdout', out):
                print_obj(file_name, swift_dir=self.testdir)
        finally:
            os.chdir(cwd)

        exp_curl = (
            'curl -g -I -XHEAD '
            '"http://[{host}]:{port}'
            '/{device}/{part}/AUTH_admin/c/obj" ').format(
                host=node['ip'],
                port=node['port'],
                device=node['device'],
                part=part)
        self.assertIn(exp_curl, out.getvalue())
    def find_and_process(self):
        src_filename = datetime.now(self.time_zone).strftime(
            self.filename_format)
        working_dir = os.path.join(self.target_dir,
                                   '.%-stats_tmp' % self.stats_type)
        shutil.rmtree(working_dir, ignore_errors=True)
        mkdirs(working_dir)
        tmp_filename = os.path.join(working_dir, src_filename)
        hasher = hashlib.md5()
        try:
            with open(tmp_filename, 'wb') as statfile:
                statfile.write(self.get_header())
                for device in os.listdir(self.devices):
                    if self.mount_check and not check_mount(
                            self.devices, device):
                        self.logger.error(
                            _("Device %s is not mounted, skipping.") % device)
                        continue
                    db_dir = os.path.join(self.devices, device, self.data_dir)
                    if not os.path.exists(db_dir):
                        self.logger.debug(
                            _("Path %s does not exist, skipping.") % db_dir)
                        continue
                    for root, dirs, files in os.walk(db_dir, topdown=False):
                        for filename in files:
                            if filename.endswith('.db'):
                                db_path = os.path.join(root, filename)
                                try:
                                    line_data = self.get_data(db_path)
                                except sqlite3.Error, err:
                                    self.logger.info(
                                        _("Error accessing db %s: %s") %
                                        (db_path, err))
                                    continue
                                if line_data:
                                    statfile.write(line_data)
                                    hasher.update(line_data)

            src_filename += hasher.hexdigest()
            renamer(tmp_filename, os.path.join(self.target_dir, src_filename))
Beispiel #40
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.rcache = os.path.join(self.testdir, 'object.recon')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        os.mkdir(os.path.join(self.devices, 'sdb'))

        # policy 0
        self.objects = os.path.join(self.devices, 'sda',
                                    get_data_dir(POLICIES[0]))
        self.objects_2 = os.path.join(self.devices, 'sdb',
                                      get_data_dir(POLICIES[0]))
        os.mkdir(self.objects)
        # policy 1
        self.objects_p1 = os.path.join(self.devices, 'sda',
                                       get_data_dir(POLICIES[1]))
        self.objects_2_p1 = os.path.join(self.devices, 'sdb',
                                         get_data_dir(POLICIES[1]))
        os.mkdir(self.objects_p1)

        self.parts = self.parts_p1 = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            self.parts_p1[part] = os.path.join(self.objects_p1, part)
            os.mkdir(os.path.join(self.objects, part))
            os.mkdir(os.path.join(self.objects_p1, part))

        self.conf = dict(
            devices=self.devices,
            mount_check='false',
            object_size_stats='10,100,1024,10240')
        self.df_mgr = DiskFileManager(self.conf, self.logger)

        # diskfiles for policy 0, 1
        self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
                                                  policy=POLICIES[0])
        self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
                                                     'o', policy=POLICIES[1])
Beispiel #41
0
    def test_invalidate_hash(self):
        def assertFileData(file_path, data):
            with open(file_path, 'r') as fp:
                fdata = fp.read()
                self.assertEquals(pickle.loads(fdata), pickle.loads(data))

        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hashes_file = os.path.join(self.objects, '0', object_base.HASH_FILE)
        # test that non existent file except caught
        self.assertEquals(object_base.invalidate_hash(whole_path_from), None)
        # test that hashes get cleared
        check_pickle_data = pickle.dumps({data_dir: None},
                                         object_base.PICKLE_PROTOCOL)
        for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
            with open(hashes_file, 'wb') as fp:
                pickle.dump(data_hash, fp, object_base.PICKLE_PROTOCOL)
            object_base.invalidate_hash(whole_path_from)
            assertFileData(hashes_file, check_pickle_data)
Beispiel #42
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        self.objects = os.path.join(self.devices, 'sda', 'objects')

        os.mkdir(os.path.join(self.devices, 'sdb'))
        self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')

        os.mkdir(self.objects)
        self.parts = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            os.mkdir(os.path.join(self.objects, part))

        self.conf = dict(devices=self.devices,
                         mount_check='false',
                         object_size_stats='10,100,1024,10240')
        self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                                  self.logger)
Beispiel #43
0
 def test_run_once_1(self):
     conf = dict(swift_dir=self.testdir,
                 devices=self.devices,
                 mount_check='false',
                 timeout='300',
                 stats_interval='1')
     replicator = object_replicator.ObjectReplicator(conf)
     was_connector = object_replicator.http_connect
     object_replicator.http_connect = mock_http_connect(200)
     cur_part = '0'
     df = self.df_mgr.get_diskfile('sda',
                                   cur_part,
                                   'a',
                                   'c',
                                   'o',
                                   policy_idx=1)
     mkdirs(df._datadir)
     f = open(
         os.path.join(df._datadir,
                      normalize_timestamp(time.time()) + '.data'), 'wb')
     f.write('1234567890')
     f.close()
     ohash = hash_path('a', 'c', 'o')
     data_dir = ohash[-3:]
     whole_path_from = os.path.join(self.objects_1, cur_part, data_dir)
     process_arg_checker = []
     ring = replicator.get_object_ring(1)
     nodes = [
         node for node in ring.get_part_nodes(int(cur_part))
         if node['ip'] not in _ips()
     ]
     for node in nodes:
         rsync_mod = '%s::object/sda/objects-1/%s' % (node['ip'], cur_part)
         process_arg_checker.append(
             (0, '', ['rsync', whole_path_from, rsync_mod]))
     with _mock_process(process_arg_checker):
         replicator.run_once()
     self.assertFalse(process_errors)
     object_replicator.http_connect = was_connector
Beispiel #44
0
    def setUp(self):
        skip_if_no_xattrs()
        self.app = proxy.Application(None,
                                     logger=debug_logger('proxy-ut'),
                                     account_ring=FakeRing(replicas=1),
                                     container_ring=FakeRing(replicas=1))
        self.copy_app = ServerSideCopyMiddleware(self.app, {})
        self.tmpdir = mkdtemp()
        self.testdir = os.path.join(self.tmpdir,
                                    'tmp_test_object_server_ObjectController')
        mkdirs(os.path.join(self.testdir, 'sda', 'tmp'))
        conf = {'devices': self.testdir, 'mount_check': 'false'}
        self.obj_ctlr = object_server.ObjectController(
            conf, logger=debug_logger('obj-ut'))

        http_connect = get_http_connect(fake_http_connect(200),
                                        fake_http_connect(200),
                                        FakeServerConnection(self.obj_ctlr))

        self.orig_base_http_connect = swift.proxy.controllers.base.http_connect
        self.orig_obj_http_connect = swift.proxy.controllers.obj.http_connect
        swift.proxy.controllers.base.http_connect = http_connect
        swift.proxy.controllers.obj.http_connect = http_connect
Beispiel #45
0
    def test_hash_suffix_multi_file_two(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        for tdiff in [1, 50, 100, 500]:
            suffs = ['.meta', '.data']
            if tdiff > 50:
                suffs.append('.ts')
            for suff in suffs:
                f = open(os.path.join(df.datadir,
                        normalize_timestamp(int(time.time()) - tdiff) + suff),
                         'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        object_replicator.hash_suffix(whole_path_from, 99)
        # only the meta and data should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
Beispiel #46
0
    def test_quarantine_same_file(self):
        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
                               FakeLogger())
        mkdirs(df.datadir)
        f = open(
            os.path.join(df.datadir,
                         normalize_timestamp(time()) + '.data'), 'wb')
        setxattr(f.fileno(), diskfile.METADATA_KEY,
                 pickle.dumps({}, diskfile.PICKLE_PROTOCOL))
        df = diskfile.DiskFile(self.testdir, 'sda1', '0', 'a', 'c', 'o',
                               FakeLogger())
        new_dir = df.quarantine()
        quar_dir = os.path.join(
            self.testdir, 'sda1', 'quarantined', 'objects',
            os.path.basename(os.path.dirname(df.data_file)))
        self.assert_(os.path.isdir(quar_dir))
        self.assertEquals(quar_dir, new_dir)
        # have to remake the datadir and file
        mkdirs(df.datadir)
        f = open(
            os.path.join(df.datadir,
                         normalize_timestamp(time()) + '.data'), 'wb')
        setxattr(f.fileno(), diskfile.METADATA_KEY,
                 pickle.dumps({}, diskfile.PICKLE_PROTOCOL))

        df = diskfile.DiskFile(self.testdir,
                               'sda1',
                               '0',
                               'a',
                               'c',
                               'o',
                               FakeLogger(),
                               keep_data_fp=True)
        double_uuid_path = df.quarantine()
        self.assert_(os.path.isdir(double_uuid_path))
        self.assert_('-' in os.path.basename(double_uuid_path))
Beispiel #47
0
 def setUp(self):
     self.orig_hp = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
     utils.HASH_PATH_PREFIX = 'info'
     utils.HASH_PATH_SUFFIX = 'info'
     self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_info')
     utils.mkdirs(self.testdir)
     rmtree(self.testdir)
     utils.mkdirs(os.path.join(self.testdir, 'sda1'))
     utils.mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
     utils.mkdirs(os.path.join(self.testdir, 'sdb1'))
     utils.mkdirs(os.path.join(self.testdir, 'sdb1', 'tmp'))
     self.account_ring_path = os.path.join(self.testdir, 'account.ring.gz')
     account_devs = [
         {'ip': '127.0.0.1', 'port': 42},
         {'ip': '127.0.0.2', 'port': 43},
     ]
     write_fake_ring(self.account_ring_path, *account_devs)
     self.container_ring_path = os.path.join(self.testdir,
                                             'container.ring.gz')
     container_devs = [
         {'ip': '127.0.0.3', 'port': 42},
         {'ip': '127.0.0.4', 'port': 43},
     ]
     write_fake_ring(self.container_ring_path, *container_devs)
     self.object_ring_path = os.path.join(self.testdir, 'object.ring.gz')
     object_devs = [
         {'ip': '127.0.0.3', 'port': 42},
         {'ip': '127.0.0.4', 'port': 43},
     ]
     write_fake_ring(self.object_ring_path, *object_devs)
     # another ring for policy 1
     self.one_ring_path = os.path.join(self.testdir, 'object-1.ring.gz')
     write_fake_ring(self.one_ring_path, *object_devs)
     # ... and another for policy 2
     self.two_ring_path = os.path.join(self.testdir, 'object-2.ring.gz')
     write_fake_ring(self.two_ring_path, *object_devs)
Beispiel #48
0
 def setUp(self):
     self.testdir = os.path.join(mkdtemp(), 'tmp_test_cli_find_shards')
     utils.mkdirs(self.testdir)
     rmtree(self.testdir)
     self.shard_data = [
         {'index': 0, 'lower': '', 'upper': 'obj09', 'object_count': 10},
         {'index': 1, 'lower': 'obj09', 'upper': 'obj19',
          'object_count': 10},
         {'index': 2, 'lower': 'obj19', 'upper': 'obj29',
          'object_count': 10},
         {'index': 3, 'lower': 'obj29', 'upper': 'obj39',
          'object_count': 10},
         {'index': 4, 'lower': 'obj39', 'upper': 'obj49',
          'object_count': 10},
         {'index': 5, 'lower': 'obj49', 'upper': 'obj59',
          'object_count': 10},
         {'index': 6, 'lower': 'obj59', 'upper': 'obj69',
          'object_count': 10},
         {'index': 7, 'lower': 'obj69', 'upper': 'obj79',
          'object_count': 10},
         {'index': 8, 'lower': 'obj79', 'upper': 'obj89',
          'object_count': 10},
         {'index': 9, 'lower': 'obj89', 'upper': '', 'object_count': 10},
     ]
Beispiel #49
0
 def test_delete_partition_with_handoff_delete_failures(self):
     with mock.patch('swift.obj.replicator.http_connect',
                     mock_http_connect(200)):
         self.replicator.handoff_delete = 2
         df = diskfile.DiskFile(self.devices, 'sda', '1', 'a', 'c', 'o',
                                FakeLogger())
         mkdirs(df.datadir)
         print df.datadir
         f = open(
             os.path.join(df.datadir,
                          normalize_timestamp(time.time()) + '.data'), 'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, '1', data_dir)
         part_path = os.path.join(self.objects, '1')
         self.assertTrue(os.access(part_path, os.F_OK))
         nodes = [
             node for node in self.ring.get_part_nodes(1)
             if node['ip'] not in _ips()
         ]
         process_arg_checker = []
         for i, node in enumerate(nodes):
             rsync_mod = '%s::object/sda/objects/%s' % (node['ip'], 1)
             if i in (0, 1):
                 # force two of the rsync calls to fail
                 ret_code = 1
             else:
                 ret_code = 0
             process_arg_checker.append(
                 (ret_code, '', ['rsync', whole_path_from, rsync_mod]))
         with _mock_process(process_arg_checker):
             self.replicator.replicate()
         # The file should still exist
         self.assertTrue(os.access(part_path, os.F_OK))
Beispiel #50
0
 def setUp(self):
     """ Set up for testing swift.object_server.ObjectController """
     self.testdir = os.path.join(mkdtemp(),
                                 'tmp_test_object_server_ObjectController')
     mkdirs(self.testdir)
     rmtree(self.testdir)
     mkdirs(os.path.join(self.testdir, 'sda1'))
     mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
     self.controller = container_server.ContainerController(
         {'devices': self.testdir, 'mount_check': 'false'})
Beispiel #51
0
    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for getting partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        ec_policies = (policy for policy in POLICIES
                       if policy.policy_type == EC_POLICY)

        policy2devices = {}

        for policy in ec_policies:
            self.load_object_ring(policy)
            local_devices = list(
                six.moves.filter(
                    lambda dev: dev and is_local_device(
                        ips, self.port, dev['replication_ip'], dev[
                            'replication_port']), policy.object_ring.devs))

            if override_devices:
                local_devices = list(
                    six.moves.filter(
                        lambda dev_info: dev_info['device'] in
                        override_devices, local_devices))

            policy2devices[policy] = local_devices
            self.device_count += len(local_devices)

        all_parts = []

        for policy, local_devices in policy2devices.items():
            df_mgr = self._df_router[policy]
            for local_dev in local_devices:
                dev_path = df_mgr.get_dev_path(local_dev['device'])
                if not dev_path:
                    self.logger.warning(_('%s is not mounted'),
                                        local_dev['device'])
                    continue
                data_dir = get_data_dir(policy)
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - df_mgr.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception('Unable to create %s' % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception('Unable to list partitions in %r' %
                                          obj_path)
                    continue

                self.part_count += len(partitions)
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if partition in ('auditor_status_ALL.json',
                                     'auditor_status_ZBF.json'):
                        continue
                    if not partition.isdigit():
                        self.logger.warning(
                            'Unexpected entity in data dir: %r' % part_path)
                        self.delete_partition(part_path)
                        self.reconstruction_part_count += 1
                        continue
                    partition = int(partition)
                    if override_partitions and (partition
                                                not in override_partitions):
                        continue
                    part_info = {
                        'local_dev': local_dev,
                        'policy': policy,
                        'partition': partition,
                        'part_path': part_path,
                    }
                    all_parts.append(part_info)
        random.shuffle(all_parts)
        return all_parts
Beispiel #52
0
    def build_replication_jobs(self,
                               policy,
                               ips,
                               override_devices=None,
                               override_partitions=None):
        """
        Helper function for collect_jobs to build jobs for replication
        using replication style storage policy
        """
        jobs = []
        self.all_devs_info.update([(dev['replication_ip'], dev['device'])
                                   for dev in policy.object_ring.devs if dev])
        data_dir = get_data_dir(policy)
        found_local = False
        for local_dev in [
                dev for dev in policy.object_ring.devs if
            (dev and is_local_device(ips, self.port, dev['replication_ip'],
                                     dev['replication_port']) and
             (override_devices is None or dev['device'] in override_devices))
        ]:
            found_local = True
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, data_dir)
            tmp_path = join(dev_path, get_tmp_dir(policy))
            if self.mount_check and not ismount(dev_path):
                self._add_failure_stats([
                    (failure_dev['replication_ip'], failure_dev['device'])
                    for failure_dev in policy.object_ring.devs if failure_dev
                ])
                self.logger.warning(_('%s is not mounted'),
                                    local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                if (override_partitions is not None
                        and partition not in override_partitions):
                    continue

                if (partition.startswith('auditor_status_')
                        and partition.endswith('.json')):
                    # ignore auditor status files
                    continue

                part_nodes = None
                try:
                    job_path = join(obj_path, partition)
                    part_nodes = policy.object_ring.get_part_nodes(
                        int(partition))
                    nodes = [
                        node for node in part_nodes
                        if node['id'] != local_dev['id']
                    ]
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             obj_path=obj_path,
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             policy=policy,
                             partition=partition,
                             region=local_dev['region']))
                except ValueError:
                    if part_nodes:
                        self._add_failure_stats([
                            (failure_dev['replication_ip'],
                             failure_dev['device']) for failure_dev in nodes
                        ])
                    else:
                        self._add_failure_stats([
                            (failure_dev['replication_ip'],
                             failure_dev['device'])
                            for failure_dev in policy.object_ring.devs
                            if failure_dev
                        ])
                    continue
        if not found_local:
            self.logger.error(
                "Can't find itself in policy with index %d with"
                " ips %s and with port %s in ring file, not"
                " replicating", int(policy), ", ".join(ips), self.port)
        return jobs
Beispiel #53
0
    def test_run_once(self, mock_check_drive):
        mock_check_drive.side_effect = lambda r, d, mc: os.path.join(r, d)
        ou = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'false',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        ou.run_once()
        async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0]))
        os.mkdir(async_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        # each run calls check_device
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', False),
            mock.call(self.devices_dir, 'sda1', False),
        ], mock_check_drive.mock_calls)
        mock_check_drive.reset_mock()

        ou = object_updater.ObjectUpdater(
            {
                'devices': self.devices_dir,
                'mount_check': 'TrUe',
                'swift_dir': self.testdir,
                'interval': '1',
                'concurrency': '1',
                'node_timeout': '15'
            },
            logger=self.logger)
        odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here')
        os.mkdir(odd_dir)
        ou.run_once()
        self.assertTrue(os.path.exists(async_dir))
        self.assertEqual([
            mock.call(self.devices_dir, 'sda1', True),
        ], mock_check_drive.mock_calls)

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        older_op_path = os.path.join(
            odir, '%s-%s' % (ohash, normalize_timestamp(time() - 1)))
        op_path = os.path.join(odir,
                               '%s-%s' % (ohash, normalize_timestamp(time())))
        for path in (op_path, older_op_path):
            with open(path, 'wb') as async_pending:
                pickle.dump(
                    {
                        'op': 'PUT',
                        'account': 'a',
                        'container': 'c',
                        'obj': 'o',
                        'headers': {
                            'X-Container-Timestamp': normalize_timestamp(0)
                        }
                    }, async_pending)
        ou.run_once()
        self.assertTrue(not os.path.exists(older_op_path))
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {
            'failures': 1,
            'unlinks': 1
        })
        self.assertIsNone(pickle.load(open(op_path)).get('successes'))

        bindsock = listen_zero()

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEqual(inc.readline(),
                                     'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = HeaderKeyDict()
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0]] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assertTrue('x-container-timestamp' in headers)
                    self.assertTrue(
                        'X-Backend-Storage-Policy-Index' in headers)
            except BaseException as err:
                return err
            return None

        def accept(return_codes):
            try:
                events = []
                for code in return_codes:
                    with Timeout(3):
                        sock, addr = bindsock.accept()
                        events.append(spawn(accepter, sock, code))
                for event in events:
                    err = event.wait()
                    if err:
                        raise err
            except BaseException as err:
                return err
            return None

        event = spawn(accept, [201, 500, 500])
        for dev in ou.get_container_ring().devs:
            if dev is not None:
                dev['port'] = bindsock.getsockname()[1]

        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [404, 201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err
        self.assertTrue(os.path.exists(op_path))
        self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1})
        self.assertEqual([0, 2], pickle.load(open(op_path)).get('successes'))

        event = spawn(accept, [201])
        ou.logger._clear()
        ou.run_once()
        err = event.wait()
        if err:
            raise err

        # we remove the async_pending and its containing suffix dir, but not
        # anything above that
        self.assertFalse(os.path.exists(op_path))
        self.assertFalse(os.path.exists(os.path.dirname(op_path)))
        self.assertTrue(
            os.path.exists(os.path.dirname(os.path.dirname(op_path))))
        self.assertEqual(ou.logger.get_increment_counts(), {
            'unlinks': 1,
            'successes': 1
        })
Beispiel #54
0
def setup_servers(the_object_server=object_server, extra_conf=None):
    """
    Setup proxy, account, container and object servers using a set of fake
    rings and policies.

    :param the_object_server: The object server module to use (optional,
                              defaults to swift.obj.server)
    :param extra_conf: A dict of config options that will update the basic
                       config passed to all server instances.
    :returns: A dict containing the following entries:
                  orig_POLICIES: the value of storage_policy.POLICIES prior to
                                 it being patched with fake policies
                  orig_SysLogHandler: the value of utils.SysLogHandler prior to
                                      it being patched
                  testdir: root directory used for test files
                  test_POLICIES: a StoragePolicyCollection of fake policies
                  test_servers: a tuple of test server instances
                  test_sockets: a tuple of sockets used by test servers
                  test_coros: a tuple of greenthreads in which test servers are
                              running
    """
    context = {
        "orig_POLICIES": storage_policy._POLICIES,
        "orig_SysLogHandler": utils.SysLogHandler}

    utils.HASH_PATH_SUFFIX = b'endcap'
    utils.SysLogHandler = mock.MagicMock()
    # Since we're starting up a lot here, we're going to test more than
    # just chunked puts; we're also going to test parts of
    # proxy_server.Application we couldn't get to easily otherwise.
    context["testdir"] = _testdir = \
        os.path.join(mkdtemp(), 'tmp_test_proxy_server_chunked')
    mkdirs(_testdir)
    rmtree(_testdir)
    for drive in ('sda1', 'sdb1', 'sdc1', 'sdd1', 'sde1',
                  'sdf1', 'sdg1', 'sdh1', 'sdi1', 'sdj1',
                  'sdk1', 'sdl1'):
        mkdirs(os.path.join(_testdir, drive, 'tmp'))
    conf = {'devices': _testdir, 'swift_dir': _testdir,
            'mount_check': 'false', 'allowed_headers':
            'content-encoding, x-object-manifest, content-disposition, foo',
            'allow_versions': 't'}
    if extra_conf:
        conf.update(extra_conf)
    prolis = listen_zero()
    acc1lis = listen_zero()
    acc2lis = listen_zero()
    con1lis = listen_zero()
    con2lis = listen_zero()
    obj1lis = listen_zero()
    obj2lis = listen_zero()
    obj3lis = listen_zero()
    obj4lis = listen_zero()
    obj5lis = listen_zero()
    obj6lis = listen_zero()
    objsocks = [obj1lis, obj2lis, obj3lis, obj4lis, obj5lis, obj6lis]
    context["test_sockets"] = \
        (prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis, obj3lis,
         obj4lis, obj5lis, obj6lis)
    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    account_devs = [
        {'port': acc1lis.getsockname()[1]},
        {'port': acc2lis.getsockname()[1]},
    ]
    write_fake_ring(account_ring_path, *account_devs)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    container_devs = [
        {'port': con1lis.getsockname()[1]},
        {'port': con2lis.getsockname()[1]},
    ]
    write_fake_ring(container_ring_path, *container_devs)
    storage_policy._POLICIES = storage_policy.StoragePolicyCollection([
        StoragePolicy(0, 'zero', True),
        StoragePolicy(1, 'one', False),
        StoragePolicy(2, 'two', False),
        ECStoragePolicy(3, 'ec', ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=2, ec_nparity=1, ec_segment_size=4096),
        ECStoragePolicy(4, 'ec-dup', ec_type=DEFAULT_TEST_EC_TYPE,
                        ec_ndata=2, ec_nparity=1, ec_segment_size=4096,
                        ec_duplication_factor=2)])
    obj_rings = {
        0: ('sda1', 'sdb1'),
        1: ('sdc1', 'sdd1'),
        2: ('sde1', 'sdf1'),
        # sdg1, sdh1, sdi1 taken by policy 3 (see below)
    }
    for policy_index, devices in obj_rings.items():
        policy = storage_policy.POLICIES[policy_index]
        obj_ring_path = os.path.join(_testdir, policy.ring_name + '.ring.gz')
        obj_devs = [
            {'port': objsock.getsockname()[1], 'device': dev}
            for objsock, dev in zip(objsocks, devices)]
        write_fake_ring(obj_ring_path, *obj_devs)

    # write_fake_ring can't handle a 3-element ring, and the EC policy needs
    # at least 6 devs to work with (ec_k=2, ec_m=1, duplication_factor=2),
    # so we do it manually
    devs = [{'id': 0, 'zone': 0, 'device': 'sdg1', 'ip': '127.0.0.1',
             'port': obj1lis.getsockname()[1]},
            {'id': 1, 'zone': 0, 'device': 'sdh1', 'ip': '127.0.0.1',
             'port': obj2lis.getsockname()[1]},
            {'id': 2, 'zone': 0, 'device': 'sdi1', 'ip': '127.0.0.1',
             'port': obj3lis.getsockname()[1]},
            {'id': 3, 'zone': 0, 'device': 'sdj1', 'ip': '127.0.0.1',
             'port': obj4lis.getsockname()[1]},
            {'id': 4, 'zone': 0, 'device': 'sdk1', 'ip': '127.0.0.1',
             'port': obj5lis.getsockname()[1]},
            {'id': 5, 'zone': 0, 'device': 'sdl1', 'ip': '127.0.0.1',
             'port': obj6lis.getsockname()[1]}]
    pol3_replica2part2dev_id = [[0, 1, 2, 0],
                                [1, 2, 0, 1],
                                [2, 0, 1, 2]]
    pol4_replica2part2dev_id = [[0, 1, 2, 3],
                                [1, 2, 3, 4],
                                [2, 3, 4, 5],
                                [3, 4, 5, 0],
                                [4, 5, 0, 1],
                                [5, 0, 1, 2]]
    obj3_ring_path = os.path.join(
        _testdir, storage_policy.POLICIES[3].ring_name + '.ring.gz')
    part_shift = 30
    with closing(GzipFile(obj3_ring_path, 'wb')) as fh:
        pickle.dump(RingData(pol3_replica2part2dev_id, devs, part_shift), fh)

    obj4_ring_path = os.path.join(
        _testdir, storage_policy.POLICIES[4].ring_name + '.ring.gz')
    part_shift = 30
    with closing(GzipFile(obj4_ring_path, 'wb')) as fh:
        pickle.dump(RingData(pol4_replica2part2dev_id, devs, part_shift), fh)

    prosrv = proxy_server.Application(conf, logger=debug_logger('proxy'))
    for policy in storage_policy.POLICIES:
        # make sure all the rings are loaded
        prosrv.get_object_ring(policy.idx)
    # don't lose this one!
    context["test_POLICIES"] = storage_policy._POLICIES
    acc1srv = account_server.AccountController(
        conf, logger=debug_logger('acct1'))
    acc2srv = account_server.AccountController(
        conf, logger=debug_logger('acct2'))
    con1srv = container_server.ContainerController(
        conf, logger=debug_logger('cont1'))
    con2srv = container_server.ContainerController(
        conf, logger=debug_logger('cont2'))
    obj1srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj1'))
    obj2srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj2'))
    obj3srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj3'))
    obj4srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj4'))
    obj5srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj5'))
    obj6srv = the_object_server.ObjectController(
        conf, logger=debug_logger('obj6'))
    context["test_servers"] = \
        (prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv, obj3srv,
         obj4srv, obj5srv, obj6srv)
    nl = NullLogger()
    logging_prosv = proxy_logging.ProxyLoggingMiddleware(
        listing_formats.ListingFilter(prosrv), conf, logger=prosrv.logger)
    prospa = spawn(wsgi.server, prolis, logging_prosv, nl)
    acc1spa = spawn(wsgi.server, acc1lis, acc1srv, nl)
    acc2spa = spawn(wsgi.server, acc2lis, acc2srv, nl)
    con1spa = spawn(wsgi.server, con1lis, con1srv, nl)
    con2spa = spawn(wsgi.server, con2lis, con2srv, nl)
    obj1spa = spawn(wsgi.server, obj1lis, obj1srv, nl)
    obj2spa = spawn(wsgi.server, obj2lis, obj2srv, nl)
    obj3spa = spawn(wsgi.server, obj3lis, obj3srv, nl)
    obj4spa = spawn(wsgi.server, obj4lis, obj4srv, nl)
    obj5spa = spawn(wsgi.server, obj5lis, obj5srv, nl)
    obj6spa = spawn(wsgi.server, obj6lis, obj6srv, nl)
    context["test_coros"] = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa, obj3spa,
         obj4spa, obj5spa, obj6spa)
    # Create account
    ts = normalize_timestamp(time.time())
    partition, nodes = prosrv.account_ring.get_nodes('a')
    for node in nodes:
        conn = swift.proxy.controllers.obj.http_connect(node['ip'],
                                                        node['port'],
                                                        node['device'],
                                                        partition, 'PUT', '/a',
                                                        {'X-Timestamp': ts,
                                                         'x-trans-id': 'test'})
        resp = conn.getresponse()
        assert(resp.status == 201)
    # Create another account
    # used for account-to-account tests
    ts = normalize_timestamp(time.time())
    partition, nodes = prosrv.account_ring.get_nodes('a1')
    for node in nodes:
        conn = swift.proxy.controllers.obj.http_connect(node['ip'],
                                                        node['port'],
                                                        node['device'],
                                                        partition, 'PUT',
                                                        '/a1',
                                                        {'X-Timestamp': ts,
                                                         'x-trans-id': 'test'})
        resp = conn.getresponse()
        assert(resp.status == 201)
    # Create containers, 1 per test policy
    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write('PUT /v1/a/c HTTP/1.1\r\nHost: localhost\r\n'
             'Connection: close\r\nX-Auth-Token: t\r\n'
             'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
        exp, headers[:len(exp)])
    # Create container in other account
    # used for account-to-account tests
    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write('PUT /v1/a1/c1 HTTP/1.1\r\nHost: localhost\r\n'
             'Connection: close\r\nX-Auth-Token: t\r\n'
             'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, "Expected '%s', encountered '%s'" % (
        exp, headers[:len(exp)])

    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write(
        'PUT /v1/a/c1 HTTP/1.1\r\nHost: localhost\r\n'
        'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: one\r\n'
        'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, \
        "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])

    sock = connect_tcp(('localhost', prolis.getsockname()[1]))
    fd = sock.makefile()
    fd.write(
        'PUT /v1/a/c2 HTTP/1.1\r\nHost: localhost\r\n'
        'Connection: close\r\nX-Auth-Token: t\r\nX-Storage-Policy: two\r\n'
        'Content-Length: 0\r\n\r\n')
    fd.flush()
    headers = readuntil2crlfs(fd)
    exp = 'HTTP/1.1 201'
    assert headers[:len(exp)] == exp, \
        "Expected '%s', encountered '%s'" % (exp, headers[:len(exp)])
    return context
Beispiel #55
0
    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for yielding partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            if policy.policy_type != EC_POLICY:
                continue
            self._diskfile_mgr = self._df_router[policy]
            self.load_object_ring(policy)
            data_dir = get_data_dir(policy)
            local_devices = list(
                six.moves.filter(
                    lambda dev: dev and is_local_device(
                        ips, self.port, dev['replication_ip'], dev[
                            'replication_port']), policy.object_ring.devs))

            if override_devices:
                self.device_count = len(override_devices)
            else:
                self.device_count = len(local_devices)

            for local_dev in local_devices:
                if override_devices and (local_dev['device']
                                         not in override_devices):
                    continue
                self.reconstruction_device_count += 1
                dev_path = self._df_router[policy].get_dev_path(
                    local_dev['device'])
                if not dev_path:
                    self.logger.warning(_('%s is not mounted'),
                                        local_dev['device'])
                    continue
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - self.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception('Unable to create %s' % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception('Unable to list partitions in %r' %
                                          obj_path)
                    continue

                self.part_count += len(partitions)
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if not (partition.isdigit() and os.path.isdir(part_path)):
                        self.logger.warning(
                            'Unexpected entity in data dir: %r' % part_path)
                        remove_file(part_path)
                        self.reconstruction_part_count += 1
                        continue
                    partition = int(partition)
                    if override_partitions and (partition
                                                not in override_partitions):
                        continue
                    part_info = {
                        'local_dev': local_dev,
                        'policy': policy,
                        'partition': partition,
                        'part_path': part_path,
                    }
                    yield part_info
                    self.reconstruction_part_count += 1
Beispiel #56
0
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
    """
    If SWIFT_TEST_POLICY is set:
    - look in swift.conf file for specified policy
    - move this to be policy-0 but preserving its options
    - copy its ring file to test dir, changing its devices to suit
      in process testing, and renaming it to suit policy-0
    Otherwise, create a default ring file.
    """
    conf = ConfigParser()
    conf.read(swift_conf)
    sp_prefix = 'storage-policy:'

    try:
        # policy index 0 will be created if no policy exists in conf
        policies = parse_storage_policies(conf)
    except PolicyError as e:
        raise InProcessException(e)

    # clear all policies from test swift.conf before adding test policy back
    for policy in policies:
        conf.remove_section(sp_prefix + str(policy.idx))

    if policy_specified:
        policy_to_test = policies.get_by_name(policy_specified)
        if policy_to_test is None:
            raise InProcessException('Failed to find policy name "%s"'
                                     % policy_specified)
        _info('Using specified policy %s' % policy_to_test.name)
    else:
        policy_to_test = policies.default
        _info('Defaulting to policy %s' % policy_to_test.name)

    # make policy_to_test be policy index 0 and default for the test config
    sp_zero_section = sp_prefix + '0'
    conf.add_section(sp_zero_section)
    for (k, v) in policy_to_test.get_info(config=True).items():
        conf.set(sp_zero_section, k, v)
    conf.set(sp_zero_section, 'default', True)

    with open(swift_conf, 'w') as fp:
        conf.write(fp)

    # look for a source ring file
    ring_file_src = ring_file_test = 'object.ring.gz'
    if policy_to_test.idx:
        ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
    try:
        ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
                                                   use_sample=False)
    except InProcessException as e:
        if policy_specified:
            raise InProcessException('Failed to find ring file %s'
                                     % ring_file_src)
        ring_file_src = None

    ring_file_test = os.path.join(testdir, ring_file_test)
    if ring_file_src:
        # copy source ring file to a policy-0 test ring file, re-homing servers
        _info('Using source ring file %s' % ring_file_src)
        ring_data = ring.RingData.load(ring_file_src)
        obj_sockets = []
        for dev in ring_data.devs:
            device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
            utils.mkdirs(os.path.join(_testdir, 'sda1'))
            utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
            obj_socket = listen_zero()
            obj_sockets.append(obj_socket)
            dev['port'] = obj_socket.getsockname()[1]
            dev['ip'] = '127.0.0.1'
            dev['device'] = device
            dev['replication_port'] = dev['port']
            dev['replication_ip'] = dev['ip']
        ring_data.save(ring_file_test)
    else:
        # make default test ring, 3 replicas, 4 partitions, 3 devices
        # which will work for a replication policy or a 2+1 EC policy
        _info('No source object ring file, creating 3rep/4part/3dev ring')
        obj_sockets = [listen_zero() for _ in (0, 1, 2)]
        replica2part2dev_id = [[0, 1, 2, 0],
                               [1, 2, 0, 1],
                               [2, 0, 1, 2]]
        devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                 'port': obj_sockets[0].getsockname()[1]},
                {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                 'port': obj_sockets[1].getsockname()[1]},
                {'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
                 'port': obj_sockets[2].getsockname()[1]}]
        ring_data = ring.RingData(replica2part2dev_id, devs, 30)
        with closing(GzipFile(ring_file_test, 'wb')) as f:
            pickle.dump(ring_data, f)

    for dev in ring_data.devs:
        _debug('Ring file dev: %s' % dev)

    return obj_sockets
Beispiel #57
0
    def test_run_parallel_audit(self):
        class StopForever(Exception):
            pass

        class Bogus(Exception):
            pass

        loop_error = Bogus('exception')

        class LetMeOut(BaseException):
            pass

        class ObjectAuditorMock(object):
            check_args = ()
            check_kwargs = {}
            check_device_dir = None
            fork_called = 0
            master = 0
            wait_called = 0

            def mock_run(self, *args, **kwargs):
                self.check_args = args
                self.check_kwargs = kwargs
                if 'zero_byte_fps' in kwargs:
                    self.check_device_dir = kwargs.get('device_dirs')

            def mock_sleep_stop(self):
                raise StopForever('stop')

            def mock_sleep_continue(self):
                return

            def mock_audit_loop_error(self,
                                      parent,
                                      zbo_fps,
                                      override_devices=None,
                                      **kwargs):
                raise loop_error

            def mock_fork(self):
                self.fork_called += 1
                if self.master:
                    return self.fork_called
                else:
                    return 0

            def mock_wait(self):
                self.wait_called += 1
                return (self.wait_called, 0)

        for i in string.ascii_letters[2:26]:
            mkdirs(os.path.join(self.devices, 'sd%s' % i))

        my_auditor = auditor.ObjectAuditor(
            dict(devices=self.devices,
                 mount_check='false',
                 zero_byte_files_per_second=89,
                 concurrency=1))

        mocker = ObjectAuditorMock()
        my_auditor.logger.exception = mock.MagicMock()
        real_audit_loop = my_auditor.audit_loop
        my_auditor.audit_loop = mocker.mock_audit_loop_error
        my_auditor.run_audit = mocker.mock_run
        was_fork = os.fork
        was_wait = os.wait
        os.fork = mocker.mock_fork
        os.wait = mocker.mock_wait
        try:
            my_auditor._sleep = mocker.mock_sleep_stop
            my_auditor.run_once(zero_byte_fps=50)
            my_auditor.logger.exception.assert_called_once_with(
                'ERROR auditing: %s', loop_error)
            my_auditor.logger.exception.reset_mock()
            self.assertRaises(StopForever, my_auditor.run_forever)
            my_auditor.logger.exception.assert_called_once_with(
                'ERROR auditing: %s', loop_error)
            my_auditor.audit_loop = real_audit_loop

            self.assertRaises(StopForever,
                              my_auditor.run_forever,
                              zero_byte_fps=50)
            self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 50)
            self.assertEqual(mocker.fork_called, 0)

            self.assertRaises(SystemExit, my_auditor.run_once)
            self.assertEqual(mocker.fork_called, 1)
            self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89)
            self.assertEqual(mocker.check_device_dir, [])
            self.assertEqual(mocker.check_args, ())

            device_list = ['sd%s' % i for i in string.ascii_letters[2:10]]
            device_string = ','.join(device_list)
            device_string_bogus = device_string + ',bogus'

            mocker.fork_called = 0
            self.assertRaises(SystemExit,
                              my_auditor.run_once,
                              devices=device_string_bogus)
            self.assertEqual(mocker.fork_called, 1)
            self.assertEqual(mocker.check_kwargs['zero_byte_fps'], 89)
            self.assertEqual(sorted(mocker.check_device_dir), device_list)

            mocker.master = 1

            mocker.fork_called = 0
            self.assertRaises(StopForever, my_auditor.run_forever)
            # Fork is called 2 times since the zbf process is forked just
            # once before self._sleep() is called and StopForever is raised
            # Also wait is called just once before StopForever is raised
            self.assertEqual(mocker.fork_called, 2)
            self.assertEqual(mocker.wait_called, 1)

            my_auditor._sleep = mocker.mock_sleep_continue
            my_auditor.audit_loop = works_only_once(my_auditor.audit_loop,
                                                    LetMeOut())

            my_auditor.concurrency = 2
            mocker.fork_called = 0
            mocker.wait_called = 0
            self.assertRaises(LetMeOut, my_auditor.run_forever)
            # Fork is called no. of devices + (no. of devices)/2 + 1 times
            # since zbf process is forked (no.of devices)/2 + 1 times
            no_devices = len(os.listdir(self.devices))
            self.assertEqual(mocker.fork_called,
                             no_devices + no_devices / 2 + 1)
            self.assertEqual(mocker.wait_called,
                             no_devices + no_devices / 2 + 1)

        finally:
            os.fork = was_fork
            os.wait = was_wait
Beispiel #58
0
    def test_run_once_recover_from_timeout(self):
        replicator = object_replicator.ObjectReplicator(
            dict(swift_dir=self.testdir,
                 devices=self.devices,
                 mount_check='false',
                 timeout='300',
                 stats_interval='1'))
        was_connector = object_replicator.http_connect
        was_get_hashes = object_replicator.get_hashes
        was_execute = tpool.execute
        self.get_hash_count = 0
        try:

            def fake_get_hashes(*args, **kwargs):
                self.get_hash_count += 1
                if self.get_hash_count == 3:
                    # raise timeout on last call to get hashes
                    raise Timeout()
                return 2, {'abc': 'def'}

            def fake_exc(tester, *args, **kwargs):
                if 'Error syncing partition' in args[0]:
                    tester.i_failed = True

            self.i_failed = False
            object_replicator.http_connect = mock_http_connect(200)
            object_replicator.get_hashes = fake_get_hashes
            replicator.logger.exception = \
                lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
            # Write some files into '1' and run replicate- they should be moved
            # to the other partitions and then node should get deleted.
            cur_part = '1'
            df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                          FakeLogger())
            mkdirs(df.datadir)
            f = open(
                os.path.join(df.datadir,
                             normalize_timestamp(time.time()) + '.data'), 'wb')
            f.write('1234567890')
            f.close()
            ohash = hash_path('a', 'c', 'o')
            data_dir = ohash[-3:]
            whole_path_from = os.path.join(self.objects, cur_part, data_dir)
            process_arg_checker = []
            nodes = [node for node in
                     self.ring.get_part_nodes(int(cur_part)) \
                         if node['ip'] not in _ips()]
            for node in nodes:
                rsync_mod = '%s::object/sda/objects/%s' % (node['ip'],
                                                           cur_part)
                process_arg_checker.append(
                    (0, '', ['rsync', whole_path_from, rsync_mod]))
            self.assertTrue(
                os.access(os.path.join(self.objects, '1', data_dir, ohash),
                          os.F_OK))
            with _mock_process(process_arg_checker):
                replicator.run_once()
            self.assertFalse(process_errors)
            self.assertFalse(self.i_failed)
        finally:
            object_replicator.http_connect = was_connector
            object_replicator.get_hashes = was_get_hashes
            tpool.execute = was_execute
Beispiel #59
0
    def initialize(self, put_timestamp=None):
        """
        Create the DB

        :param put_timestamp: timestamp of initial PUT request
        """
        if self.db_file == ':memory:':
            tmp_db_file = None
            conn = get_db_connection(self.db_file, self.timeout)
        else:
            mkdirs(self.db_dir)
            fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
            os.close(fd)
            conn = sqlite3.connect(tmp_db_file,
                                   check_same_thread=False,
                                   factory=GreenDBConnection,
                                   timeout=0)
        # creating dbs implicitly does a lot of transactions, so we
        # pick fast, unsafe options here and do a big fsync at the end.
        with closing(conn.cursor()) as cur:
            cur.execute('PRAGMA synchronous = OFF')
            cur.execute('PRAGMA temp_store = MEMORY')
            cur.execute('PRAGMA journal_mode = MEMORY')
        conn.create_function('chexor', 3, chexor)
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        conn.executescript("""
            CREATE TABLE outgoing_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TABLE incoming_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
        """)
        if not put_timestamp:
            put_timestamp = normalize_timestamp(0)
        self._initialize(conn, put_timestamp)
        conn.commit()
        if tmp_db_file:
            conn.close()
            with open(tmp_db_file, 'r+b') as fp:
                os.fsync(fp.fileno())
            with lock_parent_directory(self.db_file, self.pending_timeout):
                if os.path.exists(self.db_file):
                    # It's as if there was a "condition" where different parts
                    # of the system were "racing" each other.
                    raise DatabaseAlreadyExists(self.db_file)
                renamer(tmp_db_file, self.db_file)
            self.conn = get_db_connection(self.db_file, self.timeout)
        else:
            self.conn = conn
Beispiel #60
0
def in_process_setup(the_object_server=object_server):
    _info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
    _info('Using object_server class: %s' % the_object_server.__name__)
    conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
    show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')

    if conf_src_dir is not None:
        if not os.path.isdir(conf_src_dir):
            msg = 'Config source %s is not a dir' % conf_src_dir
            raise InProcessException(msg)
        _info('Using config source dir: %s' % conf_src_dir)

    # If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
    # prefer config files from there, otherwise read config from source tree
    # sample files. A mixture of files from the two sources is allowed.
    proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
    _info('Using proxy config from %s' % proxy_conf)
    swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
    _info('Using swift config from %s' % swift_conf_src)

    monkey_patch_mimetools()

    global _testdir
    _testdir = os.path.join(mkdtemp(), 'tmp_functional')
    utils.mkdirs(_testdir)
    rmtree(_testdir)
    utils.mkdirs(os.path.join(_testdir, 'sda1'))
    utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1'))
    utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
    utils.mkdirs(os.path.join(_testdir, 'sdc1'))
    utils.mkdirs(os.path.join(_testdir, 'sdc1', 'tmp'))

    swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
    _info('prepared swift.conf: %s' % swift_conf)

    # Call the associated method for the value of
    # 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
    conf_loader_label = os.environ.get(
        'SWIFT_TEST_IN_PROCESS_CONF_LOADER')
    if conf_loader_label is not None:
        try:
            conf_loader = conf_loaders[conf_loader_label]
            _debug('Calling method %s mapped to conf loader %s' %
                   (conf_loader.__name__, conf_loader_label))
        except KeyError as missing_key:
            raise InProcessException('No function mapped for conf loader %s' %
                                     missing_key)

        try:
            # Pass-in proxy_conf, swift_conf files
            proxy_conf, swift_conf = conf_loader(proxy_conf, swift_conf)
            _debug('Now using proxy conf %s' % proxy_conf)
            _debug('Now using swift conf %s' % swift_conf)
        except Exception as err:  # noqa
            raise InProcessException(err)

    obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)

    # load new swift.conf file
    if set_swift_dir(os.path.dirname(swift_conf)):
        constraints.reload_constraints()
        storage_policy.reload_storage_policies()

    global config
    if constraints.SWIFT_CONSTRAINTS_LOADED:
        # Use the swift constraints that are loaded for the test framework
        # configuration
        _c = dict((k, str(v))
                  for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
        config.update(_c)
    else:
        # In-process swift constraints were not loaded, somethings wrong
        raise SkipTest

    global _test_socks
    _test_socks = []
    # We create the proxy server listening socket to get its port number so
    # that we can add it as the "auth_port" value for the functional test
    # clients.
    prolis = listen_zero()
    _test_socks.append(prolis)

    # The following set of configuration values is used both for the
    # functional test frame work and for the various proxy, account, container
    # and object servers.
    config.update({
        # Values needed by the various in-process swift servers
        'devices': _testdir,
        'swift_dir': _testdir,
        'mount_check': 'false',
        'client_timeout': '4',
        'allow_account_management': 'true',
        'account_autocreate': 'true',
        'allow_versions': 'True',
        'allow_versioned_writes': 'True',
        # Below are values used by the functional test framework, as well as
        # by the various in-process swift servers
        'auth_host': '127.0.0.1',
        'auth_port': str(prolis.getsockname()[1]),
        'auth_ssl': 'no',
        'auth_prefix': '/auth/',
        # Primary functional test account (needs admin access to the
        # account)
        'account': 'test',
        'username': '******',
        'password': '******',
        # User on a second account (needs admin access to the account)
        'account2': 'test2',
        'username2': 'tester2',
        'password2': 'testing2',
        # User on same account as first, but without admin access
        'username3': 'tester3',
        'password3': 'testing3',
        # Service user and prefix (emulates glance, cinder, etc. user)
        'account5': 'test5',
        'username5': 'tester5',
        'password5': 'testing5',
        'service_prefix': 'SERVICE',
        # For tempauth middleware. Update reseller_prefix
        'reseller_prefix': 'AUTH, SERVICE',
        'SERVICE_require_group': 'service',
        # Reseller admin user (needs reseller_admin_role)
        'account6': 'test6',
        'username6': 'tester6',
        'password6': 'testing6'
    })

    # If an env var explicitly specifies the proxy-server object_post_as_copy
    # option then use its value, otherwise leave default config unchanged.
    object_post_as_copy = os.environ.get(
        'SWIFT_TEST_IN_PROCESS_OBJECT_POST_AS_COPY')
    if object_post_as_copy is not None:
        object_post_as_copy = config_true_value(object_post_as_copy)
        config['object_post_as_copy'] = str(object_post_as_copy)
        _debug('Setting object_post_as_copy to %r' % object_post_as_copy)

    acc1lis = listen_zero()
    acc2lis = listen_zero()
    con1lis = listen_zero()
    con2lis = listen_zero()
    _test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets

    account_ring_path = os.path.join(_testdir, 'account.ring.gz')
    with closing(GzipFile(account_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': acc1lis.getsockname()[1]},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': acc2lis.getsockname()[1]}], 30),
                    f)
    container_ring_path = os.path.join(_testdir, 'container.ring.gz')
    with closing(GzipFile(container_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': con1lis.getsockname()[1]},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': con2lis.getsockname()[1]}], 30),
                    f)

    eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
    # Turn off logging requests by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
    logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
    # Redirect logging other messages by the underlying WSGI software.
    eventlet.wsgi.HttpProtocol.log_message = \
        lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
    # Default to only 4 seconds for in-process functional test runs
    eventlet.wsgi.WRITE_TIMEOUT = 4

    def get_logger_name(name):
        if show_debug_logs:
            return debug_logger(name)
        else:
            return None

    acc1srv = account_server.AccountController(
        config, logger=get_logger_name('acct1'))
    acc2srv = account_server.AccountController(
        config, logger=get_logger_name('acct2'))
    con1srv = container_server.ContainerController(
        config, logger=get_logger_name('cont1'))
    con2srv = container_server.ContainerController(
        config, logger=get_logger_name('cont2'))

    objsrvs = [
        (obj_sockets[index],
         the_object_server.ObjectController(
             config, logger=get_logger_name('obj%d' % (index + 1))))
        for index in range(len(obj_sockets))
    ]

    if show_debug_logs:
        logger = debug_logger('proxy')

    def get_logger(name, *args, **kwargs):
        return logger

    with mock.patch('swift.common.utils.get_logger', get_logger):
        with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
                        FakeMemcacheMiddleware):
            try:
                app = loadapp(proxy_conf, global_conf=config)
            except Exception as e:
                raise InProcessException(e)

    nl = utils.NullLogger()
    global proxy_srv
    proxy_srv = prolis
    prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
    acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
    acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
    con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
    con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)

    objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
              for objsrv in objsrvs]

    global _test_coros
    _test_coros = \
        (prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)

    # Create accounts "test" and "test2"
    def create_account(act):
        ts = utils.normalize_timestamp(time())
        account_ring = Ring(_testdir, ring_name='account')
        partition, nodes = account_ring.get_nodes(act)
        for node in nodes:
            # Note: we are just using the http_connect method in the object
            # controller here to talk to the account server nodes.
            conn = swift.proxy.controllers.obj.http_connect(
                node['ip'], node['port'], node['device'], partition, 'PUT',
                '/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
            resp = conn.getresponse()
            assert resp.status == 201, 'Unable to create account: %s\n%s' % (
                resp.status, resp.body)

    create_account('AUTH_test')
    create_account('AUTH_test2')