def test_testcases(base_dir, soluzione, language, assume=None): global task, file_cacher # Use a disabled FileCacher with a FSBackend in order to avoid to fill # the database with junk and to save up space. if file_cacher is None: file_cacher = FileCacher(path=os.path.join(config.cache_dir, 'cmsMake'), enabled=False) # Load the task if task is None: loader = YamlLoader(os.path.realpath(os.path.join(base_dir, "..")), file_cacher) # Normally we should import the contest before, but YamlLoader # accepts get_task() even without previous get_contest() calls task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1]) # Prepare the EvaluationJob dataset = task.active_dataset if dataset.task_type != "OutputOnly": digest = file_cacher.put_file_from_path( os.path.join(base_dir, soluzione), "Solution %s for task %s" % (soluzione, task.name)) executables = { task.name: Executable(filename=task.name, digest=digest) } jobs = [(t, EvaluationJob(language=language, task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] tasktype = get_task_type(dataset=dataset) else: print("Generating outputs...", end='') files = {} for t in sorted(dataset.testcases.keys()): with file_cacher.get_file(dataset.testcases[t].input) as fin: with TemporaryFile() as fout: print(str(t), end='') call(soluzione, stdin=fin, stdout=fout, cwd=base_dir) fout.seek(0) digest = file_cacher.put_file_from_fobj(fout) outname = "output_%s.txt" % t files[outname] = File(filename=outname, digest=digest) jobs = [(t, EvaluationJob(task_type=dataset.task_type, task_type_parameters=json.loads( dataset.task_type_parameters), managers=dict(dataset.managers), files=files, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] for k, job in jobs: job._key = k tasktype = get_task_type(dataset=dataset) print() ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0], end='') sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) if dataset.task_type != "OutputOnly": status = job.plus["exit_status"] info.append("Time: %5.3f Wall: %5.3f Memory: %s" % (job.plus["execution_time"], job.plus["execution_wall_clock_time"], mem_human(job.plus["execution_memory"]))) else: status = "ok" info.append("N/A") points.append(float(job.outcome)) comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print() print("Want to stop and consider everything to timeout? [y/N]", end='') if assume is not None: print(assume) tmp = assume else: tmp = raw_input().lower() if tmp in ['y', 'yes']: stop = True else: ask_again = False # Result pretty printing print() clen = max(len(c) for c in comments) ilen = max(len(i) for i in info) for (i, p, c, b) in zip(tcnames, points, comments, info): print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))) return zip(points, comments, info)
def test_testcases(base_dir, soluzione, language, assume=None): global task, file_cacher # Use a FileCacher with a NullBackend in order to avoid to fill # the database with junk if file_cacher is None: file_cacher = FileCacher(null=True) # Load the task # TODO - This implies copying a lot of data to the FileCacher, # which is annoying if you have to do it continuously; it would be # better to use a persistent cache (although local, possibly # filesystem-based instead of database-based) and somehow detect # when the task has already been loaded if task is None: loader = YamlLoader( os.path.realpath(os.path.join(base_dir, "..")), file_cacher) # Normally we should import the contest before, but YamlLoader # accepts get_task() even without previous get_contest() calls task = loader.get_task(os.path.split(os.path.realpath(base_dir))[1]) # Prepare the EvaluationJob dataset = task.active_dataset digest = file_cacher.put_file_from_path( os.path.join(base_dir, soluzione), "Solution %s for task %s" % (soluzione, task.name)) executables = {task.name: Executable(filename=task.name, digest=digest)} jobs = [(t, EvaluationJob( language=language, task_type=dataset.task_type, task_type_parameters=json.loads(dataset.task_type_parameters), managers=dict(dataset.managers), executables=executables, input=dataset.testcases[t].input, output=dataset.testcases[t].output, time_limit=dataset.time_limit, memory_limit=dataset.memory_limit)) for t in dataset.testcases] tasktype = get_task_type(dataset=dataset) ask_again = True last_status = "ok" status = "ok" stop = False info = [] points = [] comments = [] tcnames = [] for jobinfo in sorted(jobs): print(jobinfo[0], end='') sys.stdout.flush() job = jobinfo[1] # Skip the testcase if we decide to consider everything to # timeout if stop: info.append("Time limit exceeded") points.append(0.0) comments.append("Timeout.") continue # Evaluate testcase last_status = status tasktype.evaluate(job, file_cacher) status = job.plus["exit_status"] info.append("Time: %5.3f Wall: %5.3f Memory: %s" % (job.plus["execution_time"], job.plus["execution_wall_clock_time"], mem_human(job.plus["execution_memory"]))) points.append(float(job.outcome)) comments.append(format_status_text(job.text)) tcnames.append(jobinfo[0]) # If we saw two consecutive timeouts, ask wether we want to # consider everything to timeout if ask_again and status == "timeout" and last_status == "timeout": print() print("Want to stop and consider everything to timeout? [y/N]", end='') if assume is not None: print(assume) tmp = assume else: tmp = raw_input().lower() if tmp in ['y', 'yes']: stop = True else: ask_again = False # Result pretty printing print() clen = max(len(c) for c in comments) ilen = max(len(i) for i in info) for (i, p, c, b) in zip(tcnames, points, comments, info): print("%s) %5.2lf --- %s [%s]" % (i, p, c.ljust(clen), b.center(ilen))) return zip(points, comments, info)