Esempio n. 1
0
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(chase_dir=self.testdir, devices=self.devices,
                mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = '0'
        df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                      FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                              normalize_timestamp(time.time()) + '.data'),
                 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [node for node in
                 self.ring.get_part_nodes(int(cur_part)) \
                     if node['ip'] not in _ips()]
        for node in nodes:
            rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part)
            process_arg_checker.append(
                (0, '', ['rsync', whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Esempio n. 2
0
    def test_run_once_recover_from_timeout(self):
        replicator = object_replicator.ObjectReplicator(
            dict(chase_dir=self.testdir, devices=self.devices,
                mount_check='false', timeout='300', stats_interval='1'))
        was_connector = object_replicator.http_connect
        was_get_hashes = object_replicator.get_hashes
        was_execute = tpool.execute
        self.get_hash_count = 0
        try:

            def fake_get_hashes(*args, **kwargs):
                self.get_hash_count += 1
                if self.get_hash_count == 3:
                    # raise timeout on last call to get hashes
                    raise Timeout()
                return 2, {'abc': 'def'}

            def fake_exc(tester, *args, **kwargs):
                if 'Error syncing partition' in args[0]:
                    tester.i_failed = True

            self.i_failed = False
            object_replicator.http_connect = mock_http_connect(200)
            object_replicator.get_hashes = fake_get_hashes
            replicator.logger.exception = \
                lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
            # Write some files into '1' and run replicate- they should be moved
            # to the other partitoins and then node should get deleted.
            cur_part = '1'
            df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                          FakeLogger())
            mkdirs(df.datadir)
            f = open(os.path.join(df.datadir,
                                  normalize_timestamp(time.time()) + '.data'),
                     'wb')
            f.write('1234567890')
            f.close()
            ohash = hash_path('a', 'c', 'o')
            data_dir = ohash[-3:]
            whole_path_from = os.path.join(self.objects, cur_part, data_dir)
            process_arg_checker = []
            nodes = [node for node in
                     self.ring.get_part_nodes(int(cur_part)) \
                         if node['ip'] not in _ips()]
            for node in nodes:
                rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'],
                                                             cur_part)
                process_arg_checker.append(
                    (0, '', ['rsync', whole_path_from, rsync_mod]))
            self.assertTrue(os.access(os.path.join(self.objects,
                                                   '1', data_dir, ohash),
                                      os.F_OK))
            with _mock_process(process_arg_checker):
                replicator.run_once()
            self.assertFalse(process_errors)
            self.assertFalse(self.i_failed)
        finally:
            object_replicator.http_connect = was_connector
            object_replicator.get_hashes = was_get_hashes
            tpool.execute = was_execute
Esempio n. 3
0
    def test_run_once(self):
        replicator = object_replicator.ObjectReplicator(
            dict(chase_dir=self.testdir,
                 devices=self.devices,
                 mount_check='false',
                 timeout='300',
                 stats_interval='1'))
        was_connector = object_replicator.http_connect
        object_replicator.http_connect = mock_http_connect(200)
        cur_part = '0'
        df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                      FakeLogger())
        mkdirs(df.datadir)
        f = open(
            os.path.join(df.datadir,
                         normalize_timestamp(time.time()) + '.data'), 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, cur_part, data_dir)
        process_arg_checker = []
        nodes = [node for node in
                 self.ring.get_part_nodes(int(cur_part)) \
                     if node['ip'] not in _ips()]
        for node in nodes:
            rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'], cur_part)
            process_arg_checker.append(
                (0, '', ['rsync', whole_path_from, rsync_mod]))
        with _mock_process(process_arg_checker):
            replicator.run_once()
        self.assertFalse(process_errors)

        object_replicator.http_connect = was_connector
Esempio n. 4
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:

            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(self.devices, 'sda',
                               storage_directory(DATADIR, '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            fp.close()

        etag = md5()
        with self.disk_file.mkstemp() as (fd, tmppath):
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            self.disk_file.put(fd, tmppath, metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(fd, metadata)
        if self.disk_file.data_file:
            return self.disk_file.data_file
        return ts_file_path
Esempio n. 5
0
    def setup_bad_zero_byte(self, with_ts=False):
        self.auditor = auditor.ObjectAuditor(self.conf)
        self.auditor.log_time = 0
        ts_file_path = ''
        if with_ts:

            name_hash = hash_path('a', 'c', 'o')
            dir_path = os.path.join(self.devices, 'sda',
                                    storage_directory(DATADIR, '0', name_hash))
            ts_file_path = os.path.join(dir_path, '99999.ts')
            if not os.path.exists(dir_path):
                mkdirs(dir_path)
            fp = open(ts_file_path, 'w')
            fp.close()

        etag = md5()
        with self.disk_file.mkstemp() as (fd, tmppath):
            etag = etag.hexdigest()
            metadata = {
                'ETag': etag,
                'X-Timestamp': str(normalize_timestamp(time.time())),
                'Content-Length': 10,
            }
            self.disk_file.put(fd, tmppath, metadata)
            etag = md5()
            etag = etag.hexdigest()
            metadata['ETag'] = etag
            write_metadata(fd, metadata)
        if self.disk_file.data_file:
            return self.disk_file.data_file
        return ts_file_path
Esempio n. 6
0
    def test_hash_suffix_multi_file_two(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        for tdiff in [1, 50, 100, 500]:
            suffs = ['.meta', '.data']
            if tdiff > 50:
                suffs.append('.ts')
            for suff in suffs:
                f = open(
                    os.path.join(
                        df.datadir,
                        normalize_timestamp(int(time.time()) - tdiff) + suff),
                    'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        object_replicator.hash_suffix(whole_path_from, 99)
        # only the meta and data should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 2)
Esempio n. 7
0
    def test_invalidate_hash(self):

        def assertFileData(file_path, data):
            with open(file_path, 'r') as fp:
                fdata = fp.read()
                self.assertEquals(pickle.loads(fdata), pickle.loads(data))

        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hashes_file = os.path.join(self.objects, '0',
                                   object_replicator.HASH_FILE)
        # test that non existant file except caught
        self.assertEquals(object_replicator.invalidate_hash(whole_path_from),
                          None)
        # test that hashes get cleared
        check_pickle_data = pickle.dumps({data_dir: None},
                                         object_replicator.PICKLE_PROTOCOL)
        for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
            with open(hashes_file, 'wb') as fp:
                pickle.dump(data_hash, fp, object_replicator.PICKLE_PROTOCOL)
            object_replicator.invalidate_hash(whole_path_from)
            assertFileData(hashes_file, check_pickle_data)
Esempio n. 8
0
 def test_delete_partition(self):
     df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
     mkdirs(df.datadir)
     ohash = hash_path('a', 'c', 'o')
     data_dir = ohash[-3:]
     part_path = os.path.join(self.objects, '1')
     self.assertTrue(os.access(part_path, os.F_OK))
     self.replicator.replicate()
     self.assertFalse(os.access(part_path, os.F_OK))
Esempio n. 9
0
 def test_delete_partition(self):
     df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
     mkdirs(df.datadir)
     ohash = hash_path('a', 'c', 'o')
     data_dir = ohash[-3:]
     part_path = os.path.join(self.objects, '1')
     self.assertTrue(os.access(part_path, os.F_OK))
     self.replicator.replicate()
     self.assertFalse(os.access(part_path, os.F_OK))
Esempio n. 10
0
    def test_run_once(self):
        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'chase_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15',
            })
        cu.run_once()
        async_dir = os.path.join(self.sda1, object_server.ASYNCDIR)
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))

        odd_dir = os.path.join(async_dir, 'not really supposed to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(not os.path.exists(odd_dir))

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        op_path = os.path.join(odir,
            '%s-%s' % (ohash, normalize_timestamp(time())))
        pickle.dump({'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o',
            'headers': {'X-Container-Timestamp': normalize_timestamp(0)}},
            open(op_path, 'wb'))
        cu.run_once()
        self.assert_(os.path.exists(op_path))

        bindsock = listen(('127.0.0.1', 0))

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEquals(inc.readline(),
                                      'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = {}
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0].lower()] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assert_('x-container-timestamp' in headers)
            except BaseException, err:
                return err
            return None
Esempio n. 11
0
    def test_mkdirs(self):
        testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
        try:
            os.unlink(testroot)
        except Exception:
            pass
        rmtree(testroot, ignore_errors=1)
        self.assert_(not os.path.exists(testroot))
        utils.mkdirs(testroot)
        self.assert_(os.path.exists(testroot))
        utils.mkdirs(testroot)
        self.assert_(os.path.exists(testroot))
        rmtree(testroot, ignore_errors=1)

        testdir = os.path.join(testroot, 'one/two/three')
        self.assert_(not os.path.exists(testdir))
        utils.mkdirs(testdir)
        self.assert_(os.path.exists(testdir))
        utils.mkdirs(testdir)
        self.assert_(os.path.exists(testdir))
        rmtree(testroot, ignore_errors=1)

        open(testroot, 'wb').close()
        self.assert_(not os.path.exists(testdir))
        self.assertRaises(OSError, utils.mkdirs, testdir)
        os.unlink(testroot)
Esempio n. 12
0
 def test_object_audit_no_meta(self):
     timestamp = str(normalize_timestamp(time.time()))
     path = os.path.join(self.disk_file.datadir, timestamp + '.data')
     mkdirs(self.disk_file.datadir)
     fp = open(path, 'w')
     fp.write('0' * 1024)
     fp.close()
     invalidate_hash(os.path.dirname(self.disk_file.datadir))
     self.auditor = auditor.AuditorWorker(self.conf)
     pre_quarantines = self.auditor.quarantines
     self.auditor.object_audit(
         os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda',
         '0')
     self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
Esempio n. 13
0
 def test_object_audit_no_meta(self):
     timestamp = str(normalize_timestamp(time.time()))
     path = os.path.join(self.disk_file.datadir, timestamp + '.data')
     mkdirs(self.disk_file.datadir)
     fp = open(path, 'w')
     fp.write('0' * 1024)
     fp.close()
     invalidate_hash(os.path.dirname(self.disk_file.datadir))
     self.auditor = auditor.AuditorWorker(self.conf)
     pre_quarantines = self.auditor.quarantines
     self.auditor.object_audit(
         os.path.join(self.disk_file.datadir, timestamp + '.data'),
         'sda', '0')
     self.assertEquals(self.auditor.quarantines, pre_quarantines + 1)
Esempio n. 14
0
    def test_mkdirs(self):
        testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
        try:
            os.unlink(testroot)
        except Exception:
            pass
        rmtree(testroot, ignore_errors=1)
        self.assert_(not os.path.exists(testroot))
        utils.mkdirs(testroot)
        self.assert_(os.path.exists(testroot))
        utils.mkdirs(testroot)
        self.assert_(os.path.exists(testroot))
        rmtree(testroot, ignore_errors=1)

        testdir = os.path.join(testroot, 'one/two/three')
        self.assert_(not os.path.exists(testdir))
        utils.mkdirs(testdir)
        self.assert_(os.path.exists(testdir))
        utils.mkdirs(testdir)
        self.assert_(os.path.exists(testdir))
        rmtree(testroot, ignore_errors=1)

        open(testroot, 'wb').close()
        self.assert_(not os.path.exists(testdir))
        self.assertRaises(OSError, utils.mkdirs, testdir)
        os.unlink(testroot)
Esempio n. 15
0
 def test_run_once_recover_from_failure(self):
     replicator = object_replicator.ObjectReplicator(
         dict(chase_dir=self.testdir,
              devices=self.devices,
              mount_check='false',
              timeout='300',
              stats_interval='1'))
     was_connector = object_replicator.http_connect
     try:
         object_replicator.http_connect = mock_http_connect(200)
         # Write some files into '1' and run replicate- they should be moved
         # to the other partitoins and then node should get deleted.
         cur_part = '1'
         df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                       FakeLogger())
         mkdirs(df.datadir)
         f = open(
             os.path.join(df.datadir,
                          normalize_timestamp(time.time()) + '.data'), 'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, cur_part, data_dir)
         process_arg_checker = []
         nodes = [node for node in
                  self.ring.get_part_nodes(int(cur_part)) \
                      if node['ip'] not in _ips()]
         for node in nodes:
             rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'],
                                                          cur_part)
             process_arg_checker.append(
                 (0, '', ['rsync', whole_path_from, rsync_mod]))
         self.assertTrue(
             os.access(os.path.join(self.objects, '1', data_dir, ohash),
                       os.F_OK))
         with _mock_process(process_arg_checker):
             replicator.run_once()
         self.assertFalse(process_errors)
         for i, result in [('0', True), ('1', False), ('2', True),
                           ('3', True)]:
             self.assertEquals(
                 os.access(
                     os.path.join(self.objects, i,
                                  object_replicator.HASH_FILE), os.F_OK),
                 result)
     finally:
         object_replicator.http_connect = was_connector
Esempio n. 16
0
    def test_hash_suffix_one_file(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        f = open(
            os.path.join(df.datadir,
                         normalize_timestamp(time.time() - 100) + '.ts'), 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        object_replicator.hash_suffix(whole_path_from, 101)
        self.assertEquals(len(os.listdir(self.parts['0'])), 1)

        object_replicator.hash_suffix(whole_path_from, 99)
        self.assertEquals(len(os.listdir(self.parts['0'])), 0)
Esempio n. 17
0
 def mkstemp(self):
     """Contextmanager to make a temporary file."""
     if not os.path.exists(self.tmpdir):
         mkdirs(self.tmpdir)
     fd, tmppath = mkstemp(dir=self.tmpdir)
     try:
         yield fd, tmppath
     finally:
         try:
             os.close(fd)
         except OSError:
             pass
         try:
             os.unlink(tmppath)
         except OSError:
             pass
Esempio n. 18
0
    def test_hash_suffix_one_file(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        f = open(os.path.join(df.datadir,
                     normalize_timestamp(time.time() - 100) + '.ts'),
                 'wb')
        f.write('1234567890')
        f.close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        object_replicator.hash_suffix(whole_path_from, 101)
        self.assertEquals(len(os.listdir(self.parts['0'])), 1)

        object_replicator.hash_suffix(whole_path_from, 99)
        self.assertEquals(len(os.listdir(self.parts['0'])), 0)
Esempio n. 19
0
 def mkstemp(self):
     """Contextmanager to make a temporary file."""
     if not os.path.exists(self.tmpdir):
         mkdirs(self.tmpdir)
     fd, tmppath = mkstemp(dir=self.tmpdir)
     try:
         yield fd, tmppath
     finally:
         try:
             os.close(fd)
         except OSError:
             pass
         try:
             os.unlink(tmppath)
         except OSError:
             pass
Esempio n. 20
0
 def test_get_hashes(self):
     df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
     mkdirs(df.datadir)
     with open(os.path.join(df.datadir, normalize_timestamp(
                 time.time()) + '.ts'), 'wb') as f:
         f.write('1234567890')
     part = os.path.join(self.objects, '0')
     hashed, hashes = object_replicator.get_hashes(part)
     self.assertEquals(hashed, 1)
     self.assert_('a83' in hashes)
     hashed, hashes = object_replicator.get_hashes(part, do_listdir=True)
     self.assertEquals(hashed, 0)
     self.assert_('a83' in hashes)
     hashed, hashes = object_replicator.get_hashes(part,
                                                   recalculate=['a83'])
     self.assertEquals(hashed, 1)
     self.assert_('a83' in hashes)
Esempio n. 21
0
 def test_run_once_recover_from_failure(self):
     replicator = object_replicator.ObjectReplicator(
         dict(chase_dir=self.testdir, devices=self.devices,
             mount_check='false', timeout='300', stats_interval='1'))
     was_connector = object_replicator.http_connect
     try:
         object_replicator.http_connect = mock_http_connect(200)
         # Write some files into '1' and run replicate- they should be moved
         # to the other partitoins and then node should get deleted.
         cur_part = '1'
         df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                       FakeLogger())
         mkdirs(df.datadir)
         f = open(os.path.join(df.datadir,
                               normalize_timestamp(time.time()) + '.data'),
                  'wb')
         f.write('1234567890')
         f.close()
         ohash = hash_path('a', 'c', 'o')
         data_dir = ohash[-3:]
         whole_path_from = os.path.join(self.objects, cur_part, data_dir)
         process_arg_checker = []
         nodes = [node for node in
                  self.ring.get_part_nodes(int(cur_part)) \
                      if node['ip'] not in _ips()]
         for node in nodes:
             rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'],
                                                          cur_part)
             process_arg_checker.append(
                 (0, '', ['rsync', whole_path_from, rsync_mod]))
         self.assertTrue(os.access(os.path.join(self.objects,
                                                '1', data_dir, ohash),
                                   os.F_OK))
         with _mock_process(process_arg_checker):
             replicator.run_once()
         self.assertFalse(process_errors)
         for i, result in [('0', True), ('1', False),
                           ('2', True), ('3', True)]:
             self.assertEquals(os.access(
                     os.path.join(self.objects,
                                  i, object_replicator.HASH_FILE),
                     os.F_OK), result)
     finally:
         object_replicator.http_connect = was_connector
Esempio n. 22
0
 def test_get_hashes(self):
     df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
     mkdirs(df.datadir)
     with open(
             os.path.join(df.datadir,
                          normalize_timestamp(time.time()) + '.ts'),
             'wb') as f:
         f.write('1234567890')
     part = os.path.join(self.objects, '0')
     hashed, hashes = object_replicator.get_hashes(part)
     self.assertEquals(hashed, 1)
     self.assert_('a83' in hashes)
     hashed, hashes = object_replicator.get_hashes(part, do_listdir=True)
     self.assertEquals(hashed, 0)
     self.assert_('a83' in hashes)
     hashed, hashes = object_replicator.get_hashes(part,
                                                   recalculate=['a83'])
     self.assertEquals(hashed, 1)
     self.assert_('a83' in hashes)
Esempio n. 23
0
    def test_hash_suffix_hash_dir_is_file_quarantine(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(os.path.dirname(df.datadir))
        open(df.datadir, 'wb').close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        orig_quarantine_renamer = object_replicator.quarantine_renamer
        called = [False]

        def wrapped(*args, **kwargs):
            called[0] = True
            return orig_quarantine_renamer(*args, **kwargs)

        try:
            object_replicator.quarantine_renamer = wrapped
            object_replicator.hash_suffix(whole_path_from, 101)
        finally:
            object_replicator.quarantine_renamer = orig_quarantine_renamer
        self.assertTrue(called[0])
Esempio n. 24
0
    def test_hash_suffix_multi_file_one(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        for tdiff in [1, 50, 100, 500]:
            for suff in ['.meta', '.data', '.ts']:
                f = open(os.path.join(df.datadir,
                        normalize_timestamp(int(time.time()) - tdiff) + suff),
                         'wb')
                f.write('1234567890')
                f.close()

        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hsh_path = os.listdir(whole_path_from)[0]
        whole_hsh_path = os.path.join(whole_path_from, hsh_path)

        object_replicator.hash_suffix(whole_path_from, 99)
        # only the tombstone should be left
        self.assertEquals(len(os.listdir(whole_hsh_path)), 1)
Esempio n. 25
0
    def test_hash_suffix_hash_dir_is_file_quarantine(self):
        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(os.path.dirname(df.datadir))
        open(df.datadir, 'wb').close()
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        orig_quarantine_renamer = object_replicator.quarantine_renamer
        called = [False]

        def wrapped(*args, **kwargs):
            called[0] = True
            return orig_quarantine_renamer(*args, **kwargs)

        try:
            object_replicator.quarantine_renamer = wrapped
            object_replicator.hash_suffix(whole_path_from, 101)
        finally:
            object_replicator.quarantine_renamer = orig_quarantine_renamer
        self.assertTrue(called[0])
Esempio n. 26
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        self.objects = os.path.join(self.devices, 'sda', 'objects')

        os.mkdir(os.path.join(self.devices, 'sdb'))
        self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')

        os.mkdir(self.objects)
        self.parts = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            os.mkdir(os.path.join(self.objects, part))

        self.conf = dict(devices=self.devices, mount_check='false')
        self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                                  self.logger)
Esempio n. 27
0
 def dispatch(self, replicate_args, args):
     if not hasattr(args, 'pop'):
         return HTTPBadRequest(body='Invalid object type')
     op = args.pop(0)
     drive, partition, hsh = replicate_args
     if self.mount_check and \
             not os.path.ismount(os.path.join(self.root, drive)):
         return Response(status='507 %s is not mounted' % drive)
     db_file = os.path.join(self.root, drive,
             storage_directory(self.datadir, partition, hsh), hsh + '.db')
     if op == 'rsync_then_merge':
         return self.rsync_then_merge(drive, db_file, args)
     if op == 'complete_rsync':
         return self.complete_rsync(drive, db_file, args)
     else:
         # someone might be about to rsync a db to us,
         # make sure there's a tmp dir to receive it.
         mkdirs(os.path.join(self.root, drive, 'tmp'))
         if not os.path.exists(db_file):
             return HTTPNotFound()
         return getattr(self, op)(self.broker_class(db_file), args)
Esempio n. 28
0
 def dispatch(self, replicate_args, args):
     if not hasattr(args, 'pop'):
         return HTTPBadRequest(body='Invalid object type')
     op = args.pop(0)
     drive, partition, hsh = replicate_args
     if self.mount_check and \
             not os.path.ismount(os.path.join(self.root, drive)):
         return Response(status='507 %s is not mounted' % drive)
     db_file = os.path.join(self.root, drive,
                            storage_directory(self.datadir, partition, hsh),
                            hsh + '.db')
     if op == 'rsync_then_merge':
         return self.rsync_then_merge(drive, db_file, args)
     if op == 'complete_rsync':
         return self.complete_rsync(drive, db_file, args)
     else:
         # someone might be about to rsync a db to us,
         # make sure there's a tmp dir to receive it.
         mkdirs(os.path.join(self.root, drive, 'tmp'))
         if not os.path.exists(db_file):
             return HTTPNotFound()
         return getattr(self, op)(self.broker_class(db_file), args)
Esempio n. 29
0
    def setUp(self):
        self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
        self.devices = os.path.join(self.testdir, 'node')
        self.logger = FakeLogger()
        rmtree(self.testdir, ignore_errors=1)
        mkdirs(os.path.join(self.devices, 'sda'))
        self.objects = os.path.join(self.devices, 'sda', 'objects')

        os.mkdir(os.path.join(self.devices, 'sdb'))
        self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')

        os.mkdir(self.objects)
        self.parts = {}
        for part in ['0', '1', '2', '3']:
            self.parts[part] = os.path.join(self.objects, part)
            os.mkdir(os.path.join(self.objects, part))

        self.conf = dict(
            devices=self.devices,
            mount_check='false')
        self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o',
                                  self.logger)
Esempio n. 30
0
    def test_invalidate_hash(self):
        def assertFileData(file_path, data):
            with open(file_path, 'r') as fp:
                fdata = fp.read()
                self.assertEquals(pickle.loads(fdata), pickle.loads(data))

        df = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', FakeLogger())
        mkdirs(df.datadir)
        ohash = hash_path('a', 'c', 'o')
        data_dir = ohash[-3:]
        whole_path_from = os.path.join(self.objects, '0', data_dir)
        hashes_file = os.path.join(self.objects, '0',
                                   object_replicator.HASH_FILE)
        # test that non existant file except caught
        self.assertEquals(object_replicator.invalidate_hash(whole_path_from),
                          None)
        # test that hashes get cleared
        check_pickle_data = pickle.dumps({data_dir: None},
                                         object_replicator.PICKLE_PROTOCOL)
        for data_hash in [{data_dir: None}, {data_dir: 'abcdefg'}]:
            with open(hashes_file, 'wb') as fp:
                pickle.dump(data_hash, fp, object_replicator.PICKLE_PROTOCOL)
            object_replicator.invalidate_hash(whole_path_from)
            assertFileData(hashes_file, check_pickle_data)
Esempio n. 31
0
 def setUp(self):
     """ Set up for testing chase.object_server.ObjectController """
     self.testdir = os.path.join(mkdtemp(),
                                 'tmp_test_object_server_ObjectController')
     mkdirs(self.testdir)
     rmtree(self.testdir)
     mkdirs(os.path.join(self.testdir, 'sda1'))
     mkdirs(os.path.join(self.testdir, 'sda1', 'tmp'))
     self.controller = container_server.ContainerController(
         {'devices': self.testdir, 'mount_check': 'false'})
Esempio n. 32
0
        """
        Handle REPLICATE requests for the Chase Object Server.  This is used
        by the object replicator to get hashes for directories.
        """
        try:
            device, partition, suffix = split_path(unquote(request.path), 2, 3,
                                                   True)
        except ValueError, e:
            return HTTPBadRequest(body=str(e),
                                  request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            return Response(status='507 %s is not mounted' % device)
        path = os.path.join(self.devices, device, DATADIR, partition)
        if not os.path.exists(path):
            mkdirs(path)
        suffixes = suffix.split('-') if suffix else []
        _junk, hashes = tpool.execute(tpooled_get_hashes,
                                      path,
                                      recalculate=suffixes)
        # See tpooled_get_hashes "Hack".
        if isinstance(hashes, BaseException):
            raise hashes
        return Response(body=pickle.dumps(hashes))

    def __call__(self, env, start_response):
        """WSGI Application entry point for the Chase Object Server."""
        start_time = time.time()
        req = Request(env)
        self.logger.txn_id = req.headers.get('x-trans-id', None)
        if not check_utf8(req.path_info):
Esempio n. 33
0
    def test_run_once(self):
        cu = object_updater.ObjectUpdater({
            'devices': self.devices_dir,
            'mount_check': 'false',
            'chase_dir': self.testdir,
            'interval': '1',
            'concurrency': '1',
            'node_timeout': '15',
        })
        cu.run_once()
        async_dir = os.path.join(self.sda1, object_server.ASYNCDIR)
        os.mkdir(async_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))

        odd_dir = os.path.join(async_dir, 'not really supposed to be here')
        os.mkdir(odd_dir)
        cu.run_once()
        self.assert_(os.path.exists(async_dir))
        self.assert_(not os.path.exists(odd_dir))

        ohash = hash_path('a', 'c', 'o')
        odir = os.path.join(async_dir, ohash[-3:])
        mkdirs(odir)
        op_path = os.path.join(odir,
                               '%s-%s' % (ohash, normalize_timestamp(time())))
        pickle.dump(
            {
                'op': 'PUT',
                'account': 'a',
                'container': 'c',
                'obj': 'o',
                'headers': {
                    'X-Container-Timestamp': normalize_timestamp(0)
                }
            }, open(op_path, 'wb'))
        cu.run_once()
        self.assert_(os.path.exists(op_path))

        bindsock = listen(('127.0.0.1', 0))

        def accepter(sock, return_code):
            try:
                with Timeout(3):
                    inc = sock.makefile('rb')
                    out = sock.makefile('wb')
                    out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
                              return_code)
                    out.flush()
                    self.assertEquals(inc.readline(),
                                      'PUT /sda1/0/a/c/o HTTP/1.1\r\n')
                    headers = {}
                    line = inc.readline()
                    while line and line != '\r\n':
                        headers[line.split(':')[0].lower()] = \
                            line.split(':')[1].strip()
                        line = inc.readline()
                    self.assert_('x-container-timestamp' in headers)
            except BaseException, err:
                return err
            return None
Esempio n. 34
0
    def REPLICATE(self, request):
        """
        Handle REPLICATE requests for the Chase Object Server.  This is used
        by the object replicator to get hashes for directories.
        """
        try:
            device, partition, suffix = split_path(
                unquote(request.path), 2, 3, True)
        except ValueError, e:
            return HTTPBadRequest(body=str(e), request=request,
                                  content_type='text/plain')
        if self.mount_check and not check_mount(self.devices, device):
            return Response(status='507 %s is not mounted' % device)
        path = os.path.join(self.devices, device, DATADIR, partition)
        if not os.path.exists(path):
            mkdirs(path)
        suffixes = suffix.split('-') if suffix else []
        _junk, hashes = tpool.execute(tpooled_get_hashes, path,
                                      recalculate=suffixes)
        # See tpooled_get_hashes "Hack".
        if isinstance(hashes, BaseException):
            raise hashes
        return Response(body=pickle.dumps(hashes))

    def __call__(self, env, start_response):
        """WSGI Application entry point for the Chase Object Server."""
        start_time = time.time()
        req = Request(env)
        self.logger.txn_id = req.headers.get('x-trans-id', None)
        if not check_utf8(req.path_info):
            res = HTTPPreconditionFailed(body='Invalid UTF8')
Esempio n. 35
0
    def test_run_once_recover_from_timeout(self):
        replicator = object_replicator.ObjectReplicator(
            dict(chase_dir=self.testdir,
                 devices=self.devices,
                 mount_check='false',
                 timeout='300',
                 stats_interval='1'))
        was_connector = object_replicator.http_connect
        was_get_hashes = object_replicator.get_hashes
        was_execute = tpool.execute
        self.get_hash_count = 0
        try:

            def fake_get_hashes(*args, **kwargs):
                self.get_hash_count += 1
                if self.get_hash_count == 3:
                    # raise timeout on last call to get hashes
                    raise Timeout()
                return 2, {'abc': 'def'}

            def fake_exc(tester, *args, **kwargs):
                if 'Error syncing partition' in args[0]:
                    tester.i_failed = True

            self.i_failed = False
            object_replicator.http_connect = mock_http_connect(200)
            object_replicator.get_hashes = fake_get_hashes
            replicator.logger.exception = \
                lambda *args, **kwargs: fake_exc(self, *args, **kwargs)
            # Write some files into '1' and run replicate- they should be moved
            # to the other partitoins and then node should get deleted.
            cur_part = '1'
            df = DiskFile(self.devices, 'sda', cur_part, 'a', 'c', 'o',
                          FakeLogger())
            mkdirs(df.datadir)
            f = open(
                os.path.join(df.datadir,
                             normalize_timestamp(time.time()) + '.data'), 'wb')
            f.write('1234567890')
            f.close()
            ohash = hash_path('a', 'c', 'o')
            data_dir = ohash[-3:]
            whole_path_from = os.path.join(self.objects, cur_part, data_dir)
            process_arg_checker = []
            nodes = [node for node in
                     self.ring.get_part_nodes(int(cur_part)) \
                         if node['ip'] not in _ips()]
            for node in nodes:
                rsync_mod = '[%s]::object/sda/objects/%s' % (node['ip'],
                                                             cur_part)
                process_arg_checker.append(
                    (0, '', ['rsync', whole_path_from, rsync_mod]))
            self.assertTrue(
                os.access(os.path.join(self.objects, '1', data_dir, ohash),
                          os.F_OK))
            with _mock_process(process_arg_checker):
                replicator.run_once()
            self.assertFalse(process_errors)
            self.assertFalse(self.i_failed)
        finally:
            object_replicator.http_connect = was_connector
            object_replicator.get_hashes = was_get_hashes
            tpool.execute = was_execute