Exemplo n.º 1
0
    def bench_net_throughput(self, config, redis: RedisCluster, java, nodes_trace):
        """
        This benchmark proceeds similarly to _bench_checksum, but captures the network traffic coming and going to Redis
        servers.
        """
        dumpcap = ['/usr/bin/dumpcap', '-q', '-i', 'any', '-f', 'tcp port 6379', '-s', '64', '-w']

        print("Starting dumpcap")
        isoformat = datetime.today().isoformat()
        capture_dir = '/opt/erasuretester/results/'
        write_capture_file = 'capture_%s_%s_write.pcapng' % (isoformat, config[0])
        sleep(5)
        dumpcap_proc = subprocess.Popen(dumpcap + [capture_dir + write_capture_file])
        sleep(10)
        print("Uncompressing archive")
        tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/httpd.tar.bz2', '-C', self.mount],
                                    stdout=subprocess.PIPE, bufsize=1)
        self._show_subprocess_percent(tar_proc, 2614)
        sleep(10)
        kill_pid(dumpcap_proc)
        subprocess.check_call(['chmod', '666', capture_dir + write_capture_file])

        measures = []
        for redis_size in (x[0] for x in nodes_trace):
            redis.scale(redis_size, brutal=True)
            capture_file = 'capture_%s_%s_read_%d.pcapng' % (isoformat, config[0], redis_size)

            dumpcap_proc = subprocess.Popen(dumpcap + [capture_dir + capture_file])
            sleep(10)
            print('Checking files...')
            sha_proc = subprocess.Popen(
                ['sha256sum', '-c', '/opt/erasuretester/httpd.sha256'],
                stdout=subprocess.PIPE, bufsize=1)
            sha_output = self._show_subprocess_percent(sha_proc, 2517)
            ok_files = len([x for x in sha_output if b' OK' in x])
            failed_files = len([x for x in sha_output if b' FAILED' in x])

            sleep(10)
            kill_pid(dumpcap_proc)
            subprocess.check_call(['chmod', '666', capture_dir + capture_file])

            measures.append({
                'ok': ok_files,
                'failed': failed_files,
                'capture': capture_file,
                'redis_initial': config[1],
                'redis_current': redis_size
            })

        return {
            'write_capture': write_capture_file,
            'measures': measures
        }
Exemplo n.º 2
0
    def _bench_checksum(self, config, redis: RedisCluster, java, nodes_trace, archive_name, sha_name, tar_log_lines, sha_log_lines):
        """
        This benchmark does the following:
         1. Extract a tar archive into the erasure coded filesystem
         2. Check the integrity of each file stored in the erasure coded filesystem
         3. Adjust the size of the cluster according to the trace (or quit if exhausted)
         4. Goto 2
        """
        print('Uncompressing archive (%s)...' % archive_name)
        tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/%s' % archive_name, '-C', self.mount],
                                    stdout=subprocess.PIPE, bufsize=1)
        self._show_subprocess_percent(tar_proc, tar_log_lines)

        results = dict()
        for redis_size in (x[0] for x in nodes_trace):
            redis.scale(redis_size, brutal=True)
            print('Flushing read cache...')
            java.flush_read_cache()
            print('Waiting 5 seconds for things to stabilize...')
            sleep(5)

            print('Checking files...')
            sha_proc = subprocess.Popen(
                ['sha256sum', '-c', '/opt/erasuretester/%s' % sha_name],
                stdout=subprocess.PIPE, bufsize=1)
            sha_output = self._show_subprocess_percent(sha_proc, sha_log_lines)
            ok_files = len([x for x in sha_output if b' OK' in x])
            failed_files = len([x for x in sha_output if b' FAILED' in x])
            print('   Checked. %d correct, %d failed' % (ok_files, failed_files))
            inter_results = {
                'RS0': config[1],
                'RS': redis.cluster_size,
                'OK Files': ok_files,
                'Failed files': failed_files,
                'Failure ratio': failed_files / (ok_files + failed_files),
            }
            results['RS=%d' % redis.cluster_size] = inter_results

            if ok_files == 0:  # It's no use to continue
                break
        return results
Exemplo n.º 3
0
    def bench_repair(self, config, redis: RedisCluster, java, nodes_trace):
        start1 = time()

        print('Uncompressing httpd...')
        tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/httpd.tar.bz2', '-C', self.mount],
                                    stdout=subprocess.PIPE, bufsize=1)
        self._show_subprocess_percent(tar_proc, 2614)
        print('Uncompressing bc...')
        tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/bc.tar.gz', '-C', self.mount],
                                    stdout=subprocess.PIPE, bufsize=1)
        self._show_subprocess_percent(tar_proc, 94)
        print('Uncompressing 10bytes...')
        tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/10bytes.tar.bz2', '-C', self.mount],
                                    stdout=subprocess.PIPE, bufsize=1)
        self._show_subprocess_percent(tar_proc, 1001)

        start2 = time()
        last_size = -1
        for redis_size in (x[0] for x in nodes_trace):
            if redis_size == last_size:
                sleep(1)
                continue
            redis.scale(redis_size, brutal=True)

            if redis_size < last_size:
                print("Flushing read cache")
                java.flush_read_cache()
                print("Repairing all files")
                java.repair_all_files()
            last_size = redis_size

        return {
            'start': start1,
            'trace_start': start2,
            'end': time()
        }