def write_video(frames, outfpath): # Following instructions from # https://github.com/ContinuumIO/anaconda-issues/issues/223#issuecomment-285523938 # noQA # Define the codec and create VideoWriter object misc.mkdir_p(osp.dirname(outfpath)) if 0: # Tried writing video with opencv, clearly didn't work fourcc = cv2.VideoWriter_fourcc(chr(ord('X')), chr(ord('V')), chr(ord('I')), chr(ord('D'))) out = cv2.VideoWriter(outfpath, fourcc, 20.0, frames.shape[-3:-1][::-1]) for i in range(frames.shape[0]): out.write(frames[i].astype(np.uint8)) out.release() logger.warning('Writen to %s', outfpath) else: frame_dir = outfpath + '_frames' misc.mkdir_p(frame_dir) for i in range(frames.shape[0]): cv2.imwrite(osp.join(frame_dir, '{0:06d}.jpg'.format(i)), frames[i].astype(np.uint8)) # convert to video and delete the frames misc.run_cmd( 'ffmpeg -loglevel panic -i {0}/%06d.jpg -crf {2} {1}'.format( frame_dir, outfpath, 24 / cfg.TEST.SAMPLE_RATE)) misc.run_cmd('rm -r {}'.format(frame_dir))
def _init(self, scale): """ recreate the database (drop + create) and populate it with given scale """ # initialize results for this dataset scale self._results['results'] = { 'init': None, 'runs': [], 'warmup': None, } log("recreating '%s' database" % (self._dbname,)) run_cmd(['dropdb', '--if-exists', self._dbname], env=self._env) run_cmd(['createdb', self._dbname], env=self._env) log("initializing pgbench '%s' with scale %s" % (self._dbname, scale)) r = run_cmd(['pgbench', '-s', str(scale), '-h', SOCKET_PATH, '-p', '5432', self._dbname], env=self._env, cwd=self._outdir) with open(BASE_PATH + '/pgbench_log.txt', 'w+') as file: file.write("pgbench log: \n") file.write(r[1].decode("utf-8")) #r = run_cmd(['pgbench', self._dbname, '-r'], env=self._env, cwd=self._outdir) # remember the init duration self._results['results']['init'] = r[2]
def _run(self, run, scale, duration, nclients=1, njobs=1, read_only=False, aggregate=True, csv_queue=None): 'run pgbench on the database (either a warmup or actual benchmark run)' # Create a separate directory for each pgbench run if read_only: rtag = "ro" else: rtag = "rw" rdir = "%s/pgbench-%s-%d-%d-%s" % (self._outdir, rtag, scale, nclients, str(run)) if not (os.path.exists(rdir)): os.mkdir(rdir) args = [ 'pgbench', '-c', str(nclients), '-j', str(njobs), '-T', str(duration) ] # aggregate on per second resolution if aggregate: args.extend(['-l', '--aggregate-interval', '1']) if read_only: args.extend(['-S']) args.extend([self._dbname]) # do an explicit checkpoint before each run run_cmd(['psql', self._dbname, '-c', 'checkpoint'], env=self._env) log("pgbench: clients=%d, jobs=%d, aggregate=%s, read-only=%s, " "duration=%d" % (nclients, njobs, aggregate, read_only, duration)) start = time.time() r = run_cmd(args, env=self._env, cwd=rdir) end = time.time() r = PgBench._parse_results(r[1]) r.update({'read-only': read_only}) r.update({'start': start, 'end': end}) if csv_queue is not None: csv_queue.put([ start, end, r['scale'], nclients, njobs, mode, duration, latency, tps ]) return r
def _collect_system_info(self): 'collect cpuinfo, meminfo, mounts' log("Collecting system info") system = {} system['cpu'] = {} system['os'] = {} system['memory'] = {} system['disk'] = {} system['process'] = {} system['compiler'] = {} system['compiler']['make'] = run_cmd(['make', '--version'], env=self._env) system['compiler']['gcc'] = run_cmd(['gcc', '--version'], env=self._env) system['cpu']['information'] = get_cpu_info() system['cpu']['number'] = psutil.cpu_count() system['cpu']['times'] = psutil.cpu_times(percpu=False) system['cpu']['percent'] = psutil.cpu_times_percent(percpu=False) system['cpu']['stats'] = psutil.cpu_stats() system['cpu']['load_avg'] = psutil.getloadavg() system['os']['architecture'] = platform.architecture() system['os']['processor'] = platform.processor() system['os']['release'] = platform.release() system['os']['version'] = platform.version() system['os']['libc'] = platform.libc_ver() system['memory']['virtual'] = psutil.virtual_memory() system['memory']['swap'] = psutil.swap_memory() system['memory']['mounts'] = psutil.disk_partitions() system['disk']['usage'] = psutil.disk_usage('/') system['disk']['io'] = psutil.disk_io_counters(perdisk=False, nowrap=True) process = psutil.Process() system['process']['io'] = process.io_counters() system['process']['context_switches'] = process.num_ctx_switches() system['process']['cpu_times'] = process.cpu_times() system['process']['threads'] = process.num_threads() system['process']['cpu_percent'] = process.cpu_percent() system['process']['memory'] = process.memory_info() system['process']['memory_percent'] = process.memory_percent() # files to be uploaded and saved somewhere ''' with open('/proc/cpuinfo', 'r') as f: system['cpuinfo'] = f.read() with open('/proc/meminfo', 'r') as f: system['meminfo'] = f.read() with open('/proc/mounts', 'r') as f: system['mounts'] = f.read() ''' return system
def stop(self): log("stopping collectd") try: pidfile = open(COLLECTD_PIDFILE, 'r') pid = pidfile.read().strip() run_cmd(['kill', pid]) except FileNotFoundError: log('collectd pid not found - processes may still be running')
def _collect_sysctl(self): 'collect kernel configuration' log("collecting sysctl") r = run_cmd(['sysctl', '-a'], env=self._env) return r[1]
def run_custem_script(self): args = ['pgbench'] for script in self._scriptFileList: print(script) args.append('-f') path = os.path.abspath(script) args.append(path) args.append(self._dbname) r = run_cmd(args, env=self._env) return r
def _init(self, scale): """ recreate the database (drop + create) and populate it with given scale """ # initialize results for this dataset scale self._results['results'] = { 'init': None, 'runs': [], 'warmup': None, } log("recreating '%s' database" % (self._dbname,)) run_cmd(['dropdb', '--if-exists', self._dbname], env=self._env) run_cmd(['createdb', self._dbname], env=self._env) log("initializing pgbench '%s' with scale %s" % (self._dbname, scale)) r = run_cmd(['pgbench', '-i', '-s', str(scale), self._dbname], env=self._env, cwd=self._outdir) # remember the init duration self._results['results']['init'] = r[2]
def start(self): log("starting collectd") cmd = 'collectd -C %s -P %s' % (COLLECTD_CONFIG, COLLECTD_PIDFILE) run_cmd(cmd.split(' '), env=self._env)
def stop(self): log("stopping collectd") pidfile = open(COLLECTD_PIDFILE, 'r') pid = pidfile.read().strip() run_cmd(['kill', pid])