def main(): args = commandLineArguments() chan = args['channel'] # Invoke queue and message rc = RedisCluster() queue = rc.subscribe(chan) get_message(queue)
def main(): args = commandLineArguments() channel = args["channel"] message = args["message"] # Invoke queue and message rc = RedisCluster() print(rc.ping()) rc.publish(channel, message)
def bench_net_throughput(self, config, redis: RedisCluster, java, nodes_trace): """ This benchmark proceeds similarly to _bench_checksum, but captures the network traffic coming and going to Redis servers. """ dumpcap = ['/usr/bin/dumpcap', '-q', '-i', 'any', '-f', 'tcp port 6379', '-s', '64', '-w'] print("Starting dumpcap") isoformat = datetime.today().isoformat() capture_dir = '/opt/erasuretester/results/' write_capture_file = 'capture_%s_%s_write.pcapng' % (isoformat, config[0]) sleep(5) dumpcap_proc = subprocess.Popen(dumpcap + [capture_dir + write_capture_file]) sleep(10) print("Uncompressing archive") tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/httpd.tar.bz2', '-C', self.mount], stdout=subprocess.PIPE, bufsize=1) self._show_subprocess_percent(tar_proc, 2614) sleep(10) kill_pid(dumpcap_proc) subprocess.check_call(['chmod', '666', capture_dir + write_capture_file]) measures = [] for redis_size in (x[0] for x in nodes_trace): redis.scale(redis_size, brutal=True) capture_file = 'capture_%s_%s_read_%d.pcapng' % (isoformat, config[0], redis_size) dumpcap_proc = subprocess.Popen(dumpcap + [capture_dir + capture_file]) sleep(10) print('Checking files...') sha_proc = subprocess.Popen( ['sha256sum', '-c', '/opt/erasuretester/httpd.sha256'], stdout=subprocess.PIPE, bufsize=1) sha_output = self._show_subprocess_percent(sha_proc, 2517) ok_files = len([x for x in sha_output if b' OK' in x]) failed_files = len([x for x in sha_output if b' FAILED' in x]) sleep(10) kill_pid(dumpcap_proc) subprocess.check_call(['chmod', '666', capture_dir + capture_file]) measures.append({ 'ok': ok_files, 'failed': failed_files, 'capture': capture_file, 'redis_initial': config[1], 'redis_current': redis_size }) return { 'write_capture': write_capture_file, 'measures': measures }
def run_benchmarks(self): for nodes_trace_config in self.redis_trace_configs: for erasure_config in self.erasure_configs: erasure_code = erasure_config['code'] stripe_size = erasure_config['stripe'] parity_size = erasure_config['parity'] src = erasure_config['src'] for bench, bench_param in list( zip(self.benches, self.bench_params)) * self.execute_times: nodes_trace = NodesTrace(**nodes_trace_config) initial_redis_size = nodes_trace.initial_size() with RedisCluster(initial_redis_size) as redis: sb = 'Jedis' if initial_redis_size > 0 else 'Memory' config = [ erasure_code, initial_redis_size, sb, stripe_size, parity_size, src ] print("Running with " + str(config)) (params, env) = self._get_java_params(redis, *config) with JavaProgram(params, env) as java: try: self._run_benchmark(bench, bench_param, config, redis, java, nodes_trace) except Exception as ex: logging.exception( "The benchmark crashed, continuing with the rest..." ) self.save_results_to_file()
def _bench_checksum(self, config, redis: RedisCluster, java, nodes_trace, archive_name, sha_name, tar_log_lines, sha_log_lines): """ This benchmark does the following: 1. Extract a tar archive into the erasure coded filesystem 2. Check the integrity of each file stored in the erasure coded filesystem 3. Adjust the size of the cluster according to the trace (or quit if exhausted) 4. Goto 2 """ print('Uncompressing archive (%s)...' % archive_name) tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/%s' % archive_name, '-C', self.mount], stdout=subprocess.PIPE, bufsize=1) self._show_subprocess_percent(tar_proc, tar_log_lines) results = dict() for redis_size in (x[0] for x in nodes_trace): redis.scale(redis_size, brutal=True) print('Flushing read cache...') java.flush_read_cache() print('Waiting 5 seconds for things to stabilize...') sleep(5) print('Checking files...') sha_proc = subprocess.Popen( ['sha256sum', '-c', '/opt/erasuretester/%s' % sha_name], stdout=subprocess.PIPE, bufsize=1) sha_output = self._show_subprocess_percent(sha_proc, sha_log_lines) ok_files = len([x for x in sha_output if b' OK' in x]) failed_files = len([x for x in sha_output if b' FAILED' in x]) print(' Checked. %d correct, %d failed' % (ok_files, failed_files)) inter_results = { 'RS0': config[1], 'RS': redis.cluster_size, 'OK Files': ok_files, 'Failed files': failed_files, 'Failure ratio': failed_files / (ok_files + failed_files), } results['RS=%d' % redis.cluster_size] = inter_results if ok_files == 0: # It's no use to continue break return results
def bench_repair(self, config, redis: RedisCluster, java, nodes_trace): start1 = time() print('Uncompressing httpd...') tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/httpd.tar.bz2', '-C', self.mount], stdout=subprocess.PIPE, bufsize=1) self._show_subprocess_percent(tar_proc, 2614) print('Uncompressing bc...') tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/bc.tar.gz', '-C', self.mount], stdout=subprocess.PIPE, bufsize=1) self._show_subprocess_percent(tar_proc, 94) print('Uncompressing 10bytes...') tar_proc = subprocess.Popen(['tar', '-xvf', '/opt/erasuretester/10bytes.tar.bz2', '-C', self.mount], stdout=subprocess.PIPE, bufsize=1) self._show_subprocess_percent(tar_proc, 1001) start2 = time() last_size = -1 for redis_size in (x[0] for x in nodes_trace): if redis_size == last_size: sleep(1) continue redis.scale(redis_size, brutal=True) if redis_size < last_size: print("Flushing read cache") java.flush_read_cache() print("Repairing all files") java.repair_all_files() last_size = redis_size return { 'start': start1, 'trace_start': start2, 'end': time() }