def test_passphrase(self): self.mkfs() passphrase_new = 'sd982jhd' proc = subprocess.Popen(self.s3ql_cmd_argv('s3qladm') + [ '--quiet', '--fatal-warnings', '--log', 'none', '--authfile', '/dev/null', 'passphrase', self.storage_url ], stdin=subprocess.PIPE, universal_newlines=True) print(self.passphrase, file=proc.stdin) print(passphrase_new, file=proc.stdin) print(passphrase_new, file=proc.stdin) proc.stdin.close() self.assertEqual(proc.wait(), 0) plain_backend = local.Backend(self.storage_url, None, None) backend = BetterBackend(passphrase_new.encode(), ('zlib', 6), plain_backend) backend.fetch('s3ql_passphrase') # will fail with wrong pw
def setUp(self): self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-') plain_backend = local.Backend('local://' + self.backend_dir, None, None) self.backend_pool = BackendPool( lambda: BetterBackend(b'schwubl', ('zlib', 6), plain_backend)) self.backend = self.backend_pool.pop_conn() self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-') self.max_obj_size = 1024 # Destructors are not guaranteed to run, and we can't unlink # the file immediately because apsw refers to it by name. # Therefore, we unlink the file manually in tearDown() self.dbfile = tempfile.NamedTemporaryFile(delete=False) self.db = Connection(self.dbfile.name) create_tables(self.db) init_tables(self.db) # Tested methods assume that they are called from # file system request handler llfuse.lock.acquire() cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache", self.max_obj_size * 5) self.block_cache = cache self.server = fs.Operations(cache, self.db, self.max_obj_size, InodeCache(self.db, 0)) self.server.init() # Monkeypatch around the need for removal and upload threads cache.to_remove = DummyQueue(cache) class DummyDistributor: def put(self, arg): cache._do_upload(*arg) cache.to_upload = DummyDistributor() # Keep track of unused filenames self.name_cnt = 0
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # /dev/urandom may be slow, so we cache the data first log.info('Preparing test data...') rnd_fh = tempfile.TemporaryFile() with open('/dev/urandom', 'rb', 0) as src: copied = 0 while copied < 50 * 1024 * 1024: buf = src.read(BUFSIZE) rnd_fh.write(buf) copied += len(buf) log.info('Measuring throughput to cache...') backend_dir = tempfile.mkdtemp() mnt_dir = tempfile.mkdtemp() atexit.register(shutil.rmtree, backend_dir) atexit.register(shutil.rmtree, mnt_dir) write_time = 0 size = 50 * 1024 * 1024 while write_time < 3: log.debug('Write took %.3g seconds, retrying', write_time) subprocess.check_call(['mkfs.s3ql', '--plain', 'local://%s' % backend_dir, '--quiet', '--force', '--cachedir', options.cachedir]) subprocess.check_call(['mount.s3ql', '--threads', '1', '--quiet', '--cachesize', '%d' % (2 * size / 1024), '--log', '%s/mount.log' % backend_dir, '--cachedir', options.cachedir, 'local://%s' % backend_dir, mnt_dir]) try: size *= 2 with open('%s/bigfile' % mnt_dir, 'wb', 0) as dst: rnd_fh.seek(0) write_time = time.time() copied = 0 while copied < size: buf = rnd_fh.read(BUFSIZE) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) write_time = time.time() - write_time os.unlink('%s/bigfile' % mnt_dir) finally: subprocess.check_call(['umount.s3ql', mnt_dir]) fuse_speed = copied / write_time log.info('Cache throughput: %d KiB/sec', fuse_speed / 1024) # Upload random data to prevent effects of compression # on the network layer log.info('Measuring raw backend throughput..') try: backend = get_backend(options, plain=True) except DanglingStorageURLError as exc: raise QuietError(str(exc)) upload_time = 0 size = 512 * 1024 while upload_time < 10: size *= 2 def do_write(dst): rnd_fh.seek(0) stamp = time.time() copied = 0 while copied < size: buf = rnd_fh.read(BUFSIZE) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) return (copied, stamp) (upload_size, upload_time) = backend.perform_write(do_write, 's3ql_testdata') upload_time = time.time() - upload_time backend_speed = upload_size / upload_time log.info('Backend throughput: %d KiB/sec', backend_speed / 1024) backend.delete('s3ql_testdata') src = options.file size = os.fstat(options.file.fileno()).st_size log.info('Test file size: %.2f MiB', (size / 1024 ** 2)) in_speed = dict() out_speed = dict() for alg in ALGS: log.info('compressing with %s...', alg) backend = BetterBackend('pass', alg, Backend('local://' + backend_dir, None, None)) def do_write(dst): #pylint: disable=E0102 src.seek(0) stamp = time.time() while True: buf = src.read(BUFSIZE) if not buf: break dst.write(buf) return (dst, stamp) (dst_fh, stamp) = backend.perform_write(do_write, 's3ql_testdata') dt = time.time() - stamp in_speed[alg] = size / dt out_speed[alg] = dst_fh.get_obj_size() / dt log.info('%s compression speed: %d KiB/sec per thread (in)', alg, in_speed[alg] / 1024) log.info('%s compression speed: %d KiB/sec per thread (out)', alg, out_speed[alg] / 1024) print('') threads = set([1,2,4,8]) cores = os.sysconf('SC_NPROCESSORS_ONLN') if cores != -1: threads.add(cores) if options.threads: threads.add(options.threads) print('%-26s' % 'Threads:', ('%12d' * len(threads)) % tuple(sorted(threads))) for alg in ALGS: speeds = [] limits = [] for t in sorted(threads): if fuse_speed > t * in_speed[alg]: limit = 'CPU' speed = t * in_speed[alg] else: limit = 'S3QL/FUSE' speed = fuse_speed if speed / in_speed[alg] * out_speed[alg] > backend_speed: limit = 'uplink' speed = backend_speed * in_speed[alg] / out_speed[alg] limits.append(limit) speeds.append(speed / 1024) print('%-26s' % ('Max FS throughput (%s):' % alg), ('%7d KiB/s' * len(threads)) % tuple(speeds)) print('%-26s' % '..limited by:', ('%12s' * len(threads)) % tuple(limits)) print('') print('All numbers assume that the test file is representative and that', 'there are enough processor cores to run all active threads in parallel.', 'To compensate for network latency, you should use about twice as', 'many upload threads as indicated by the above table.\n', sep='\n')
def _wrap_backend(self): return BetterBackend(b'schlurz', ('zlib', 6), self.plain_backend)
def _wrap_backend(self): return BetterBackend(b'schluz', (None, 0), self.plain_backend)
def _wrap_backend(self): return BetterBackend(None, ('lzma', 6), self.plain_backend)
def main(args=None): if args is None: args = sys.argv[1:] options = parse_args(args) setup_logging(options) # /dev/urandom may be slow, so we cache the data first log.info('Preparing test data...') rnd_fh = tempfile.TemporaryFile() with open('/dev/urandom', 'rb', 0) as src: copied = 0 while copied < 50 * 1024 * 1024: buf = src.read(BUFSIZE) rnd_fh.write(buf) copied += len(buf) log.info('Measuring throughput to cache...') backend_dir = tempfile.mkdtemp(prefix='s3ql-benchmark-') mnt_dir = tempfile.mkdtemp(prefix='s3ql-mnt') atexit.register(shutil.rmtree, backend_dir) atexit.register(shutil.rmtree, mnt_dir) block_sizes = [2**b for b in range(12, 18)] for blocksize in block_sizes: write_time = 0 size = 50 * 1024 * 1024 while write_time < 3: log.debug('Write took %.3g seconds, retrying', write_time) subprocess.check_call([ exec_prefix + 'mkfs.s3ql', '--plain', 'local://%s' % backend_dir, '--quiet', '--force', '--cachedir', options.cachedir ]) subprocess.check_call([ exec_prefix + 'mount.s3ql', '--threads', '1', '--quiet', '--cachesize', '%d' % (2 * size / 1024), '--log', '%s/mount.log' % backend_dir, '--cachedir', options.cachedir, 'local://%s' % backend_dir, mnt_dir ]) try: size *= 2 with open('%s/bigfile' % mnt_dir, 'wb', 0) as dst: rnd_fh.seek(0) write_time = time.time() copied = 0 while copied < size: buf = rnd_fh.read(blocksize) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) write_time = time.time() - write_time os.unlink('%s/bigfile' % mnt_dir) finally: subprocess.check_call([exec_prefix + 'umount.s3ql', mnt_dir]) fuse_speed = copied / write_time log.info('Cache throughput with %3d KiB blocks: %d KiB/sec', blocksize / 1024, fuse_speed / 1024) # Upload random data to prevent effects of compression # on the network layer log.info('Measuring raw backend throughput..') try: backend = get_backend(options, plain=True) except DanglingStorageURLError as exc: raise QuietError(str(exc)) from None upload_time = 0 size = 512 * 1024 while upload_time < 10: size *= 2 def do_write(dst): rnd_fh.seek(0) stamp = time.time() copied = 0 while copied < size: buf = rnd_fh.read(BUFSIZE) if not buf: rnd_fh.seek(0) continue dst.write(buf) copied += len(buf) return (copied, stamp) (upload_size, upload_time) = backend.perform_write(do_write, 's3ql_testdata') upload_time = time.time() - upload_time backend_speed = upload_size / upload_time log.info('Backend throughput: %d KiB/sec', backend_speed / 1024) backend.delete('s3ql_testdata') src = options.file size = os.fstat(options.file.fileno()).st_size log.info('Test file size: %.2f MiB', (size / 1024**2)) in_speed = dict() out_speed = dict() for alg in ALGS: log.info('compressing with %s-6...', alg) backend = BetterBackend(b'pass', (alg, 6), Backend('local://' + backend_dir, None, None)) def do_write(dst): #pylint: disable=E0102 src.seek(0) stamp = time.time() while True: buf = src.read(BUFSIZE) if not buf: break dst.write(buf) return (dst, stamp) (dst_fh, stamp) = backend.perform_write(do_write, 's3ql_testdata') dt = time.time() - stamp in_speed[alg] = size / dt out_speed[alg] = dst_fh.get_obj_size() / dt log.info('%s compression speed: %d KiB/sec per thread (in)', alg, in_speed[alg] / 1024) log.info('%s compression speed: %d KiB/sec per thread (out)', alg, out_speed[alg] / 1024) print('') print('With %d KiB blocks, maximum performance for different compression' % (block_sizes[-1] / 1024), 'algorithms and thread counts is:', '', sep='\n') threads = set([1, 2, 4, 8]) cores = os.sysconf('SC_NPROCESSORS_ONLN') if cores != -1: threads.add(cores) if options.threads: threads.add(options.threads) print('%-26s' % 'Threads:', ('%12d' * len(threads)) % tuple(sorted(threads))) for alg in ALGS: speeds = [] limits = [] for t in sorted(threads): if fuse_speed > t * in_speed[alg]: limit = 'CPU' speed = t * in_speed[alg] else: limit = 'S3QL/FUSE' speed = fuse_speed if speed / in_speed[alg] * out_speed[alg] > backend_speed: limit = 'uplink' speed = backend_speed * in_speed[alg] / out_speed[alg] limits.append(limit) speeds.append(speed / 1024) print('%-26s' % ('Max FS throughput (%s):' % alg), ('%7d KiB/s' * len(threads)) % tuple(speeds)) print('%-26s' % '..limited by:', ('%12s' * len(threads)) % tuple(limits)) print('') print( 'All numbers assume that the test file is representative and that', 'there are enough processor cores to run all active threads in parallel.', 'To compensate for network latency, you should use about twice as', 'many upload threads as indicated by the above table.\n', sep='\n')