def test_stash_envlog0001(mock_platform): path = os.path.join(TEST_DIR, "example.krun") config = Config(path) env = "A=1\nB=2\nC=3\n" with NamedTemporaryFile(prefix="kruntest-", delete=False) as fh: fh.write(env) filename = fh.name stash_envlog(filename, config, mock_platform, "bench:vm:variant", 1337) stashed_logdir = os.path.join(TEST_DIR, "example_envlogs") stashed_logfile = os.path.join(stashed_logdir, "bench__vm__variant__1337.env") with open(stashed_logfile) as fh: got = fh.read() os.unlink(stashed_logfile) os.rmdir(stashed_logdir) assert got == env
def run(self, mailer, dry_run=False): """Runs this job (execution)""" flag = None entry_point = self.sched.config.VARIANTS[self.variant] vm_def = self.vm_info["vm_def"] vm_def.dry_run = dry_run # Set heap limit heap_limit_kb = self.sched.config.HEAP_LIMIT stack_limit_kb = self.sched.config.STACK_LIMIT in_proc_iters = self.vm_info["n_iterations"] if not dry_run: self.sched.platform.collect_starting_throttle_counts() stdout, stderr, rc, envlog_filename, timed_out = \ vm_def.run_exec(entry_point, in_proc_iters, self.parameter, heap_limit_kb, stack_limit_kb, self.key, self.key_pexec_idx) if timed_out: measurements = self.empty_measurements instr_data = {} flag = "T" elif not dry_run: try: self.sched.platform.check_throttle_counts(self.sched.manifest) measurements = util.check_and_parse_execution_results( stdout, stderr, rc, self.sched.config, self.key, instrument=vm_def.instrument) flag = "C" except util.RerunExecution as e: subject = ("Benchmark needs to be re-run: %s (exec_idx=%s)" % (self.key, self.sched.manifest.next_exec_idx)) util.log_and_mail(mailer, warn, subject, e.message, manifest=self.sched.manifest, bypass_limiter=True) measurements = self.empty_measurements flag = "O" # i.e. still outstanding except util.ExecutionFailed as e: util.log_and_mail(mailer, error, "Benchmark failure: %s" % self.key, e.message, manifest=self.sched.manifest) measurements = self.empty_measurements flag = "E" # Collect instrumentation data if vm_def.instrument and flag == "C": instr_data = vm_def.get_instr_data() for k, v in instr_data.iteritems(): assert len(instr_data[k]) == in_proc_iters else: # The benchmark either failed, needs to be re-run, or had # instrumentation turned off. instr_data = {} else: measurements = self.empty_measurements instr_data = {} flag = "C" # We print the status *after* benchmarking, so that I/O cannot be # committed during benchmarking. In production, we will be rebooting # before the next execution, so we are grand. info("Finished '%s(%d)' (%s variant) under '%s'" % (self.benchmark, self.parameter, self.variant, self.vm_name)) # Move the environment log out of /tmp. # # We don't do this for re-runs (O) as the log for the re-run pexec is # the one we want. # # We don't do this for timeouts (T) because the wrapper script is # killed upon timeout, and thus doesn't get a chance to log the # environment. if not dry_run and flag not in ("O", "T"): key_exec_num = self.sched.manifest.completed_exec_counts[self.key] util.stash_envlog(envlog_filename, self.sched.config, self.sched.platform, self.key, key_exec_num) assert flag is not None return measurements, instr_data, flag