def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create the sandbox sandbox = create_sandbox(file_cacher) # Prepare the execution executable_filename = job.executables.keys()[0] language = job.language commands = get_evaluation_commands(language, executable_filename) executables_to_get = { executable_filename: job.executables[executable_filename].digest } input_filename, output_filename = self.parameters[1] stdin_redirect = None stdout_redirect = None files_allowing_write = [] if input_filename == "": input_filename = "input.txt" stdin_redirect = input_filename if output_filename == "": output_filename = "output.txt" stdout_redirect = output_filename else: files_allowing_write.append(output_filename) files_to_get = {input_filename: job.input} # Put the required files into the sandbox for filename, digest in executables_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest, executable=True) for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Actually performs the execution success, plus = evaluation_step(sandbox, commands, job.time_limit, job.memory_limit, writable_files=files_allowing_write, stdin_redirect=stdin_redirect, stdout_redirect=stdout_redirect) job.sandboxes = [sandbox.path] job.plus = plus outcome = None text = None # Error in the sandbox: nothing to do! if not success: pass # Contestant's error: the marks won't be good elif not is_evaluation_passed(plus): outcome = 0.0 text = human_evaluation_message(plus) if job.get_output: job.user_output = None # Otherwise, advance to checking the solution else: # Check that the output file was created if not sandbox.file_exists(output_filename): outcome = 0.0 text = [ N_("Evaluation didn't produce file %s"), output_filename ] if job.get_output: job.user_output = None else: # If asked so, put the output file into the storage if job.get_output: job.user_output = sandbox.get_file_to_storage( output_filename, "Output file in job %s" % job.info, trunc_len=100 * 1024) # If just asked to execute, fill text and set dummy # outcome. if job.only_execution: outcome = 0.0 text = [N_("Execution completed successfully")] # Otherwise evaluate the output file. else: # Put the reference solution into the sandbox sandbox.create_file_from_storage("res.txt", job.output) # Check the solution with white_diff if self.parameters[2] == "diff": outcome, text = white_diff_step( sandbox, output_filename, "res.txt") # Check the solution with a comparator elif self.parameters[2] == "comparator": manager_filename = "checker" if manager_filename not in job.managers: logger.error( "Configuration error: missing or " "invalid comparator (it must be " "named 'checker')", extra={"operation": job.info}) success = False else: sandbox.create_file_from_storage( manager_filename, job.managers[manager_filename].digest, executable=True) # Rewrite input file. The untrusted # contestant program should not be able to # modify it; however, the grader may # destroy the input file to prevent the # contestant's program from directly # accessing it. Since we cannot create # files already existing in the sandbox, # we try removing the file first. try: sandbox.remove_file(input_filename) except OSError as e: # Let us be extra sure that the file # was actually removed and we did not # mess up with permissions. assert not sandbox.file_exists(input_filename) sandbox.create_file_from_storage( input_filename, job.input) success, _ = evaluation_step( sandbox, [[ "./%s" % manager_filename, input_filename, "res.txt", output_filename ]]) if success: try: outcome, text = \ extract_outcome_and_text(sandbox) except ValueError, e: logger.error( "Invalid output from " "comparator: %s", e.message, extra={"operation": job.info}) success = False else: raise ValueError("Unrecognized third parameter" " `%s' for Batch tasktype." % self.parameters[2])
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create the sandbox sandbox = create_sandbox(file_cacher) # Prepare the execution executable_filename = job.executables.keys()[0] language = job.language commands = get_evaluation_commands(language, executable_filename) executables_to_get = { executable_filename: job.executables[executable_filename].digest } input_filename, output_filename = self.parameters[1] stdin_redirect = None stdout_redirect = None if input_filename == "": input_filename = "input.txt" stdin_redirect = input_filename if output_filename == "": output_filename = "output.txt" stdout_redirect = output_filename files_to_get = {input_filename: job.input} # Put the required files into the sandbox for filename, digest in executables_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest, executable=True) for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Actually performs the execution success, plus = evaluation_step(sandbox, commands, job.time_limit, job.memory_limit, stdin_redirect=stdin_redirect, stdout_redirect=stdout_redirect) job.sandboxes = [sandbox.path] job.plus = plus outcome = None text = None # Error in the sandbox: nothing to do! if not success: pass # Contestant's error: the marks won't be good elif not is_evaluation_passed(plus): outcome = 0.0 text = human_evaluation_message(plus) if job.get_output: job.user_output = None # Otherwise, advance to checking the solution else: # Check that the output file was created if not sandbox.file_exists(output_filename): outcome = 0.0 text = [ N_("Evaluation didn't produce file %s"), output_filename ] if job.get_output: job.user_output = None else: # If asked so, put the output file into the storage if job.get_output: job.user_output = sandbox.get_file_to_storage( output_filename, "Output file in job %s" % job.info, trunc_len=100 * 1024) # If not asked otherwise, evaluate the output file if not job.only_execution: # Put the reference solution into the sandbox sandbox.create_file_from_storage("res.txt", job.output) # Check the solution with white_diff if self.parameters[2] == "diff": outcome, text = white_diff_step( sandbox, output_filename, "res.txt") # Check the solution with a comparator elif self.parameters[2] == "comparator": manager_filename = "checker" if not manager_filename in job.managers: logger.error( "Configuration error: missing or " "invalid comparator (it must be " "named 'checker')", extra={"operation": job.info}) success = False else: sandbox.create_file_from_storage( manager_filename, job.managers[manager_filename].digest, executable=True) success, _ = evaluation_step( sandbox, [[ "./%s" % manager_filename, input_filename, "res.txt", output_filename ]]) if success: try: outcome, text = \ extract_outcome_and_text(sandbox) except ValueError, e: logger.error("Invalid output from " "comparator: %s" % (e.message, ), extra={"operation": job.info}) success = False else: raise ValueError("Unrecognized third parameter" " `%s' for Batch tasktype." % self.parameters[2])
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create the sandbox sandbox = create_sandbox(file_cacher) # Prepare the execution executable_filename = job.executables.keys()[0] language = job.language commands = get_evaluation_commands(language, executable_filename) executables_to_get = { executable_filename: job.executables[executable_filename].digest } input_filename, output_filename = self.parameters[1] stdin_redirect = None stdout_redirect = None if input_filename == "": input_filename = "input.txt" stdin_redirect = input_filename if output_filename == "": output_filename = "output.txt" stdout_redirect = output_filename files_to_get = { input_filename: job.input } # Put the required files into the sandbox for filename, digest in executables_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest, executable=True) for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Actually performs the execution success, plus = evaluation_step( sandbox, commands, job.time_limit, job.memory_limit, stdin_redirect=stdin_redirect, stdout_redirect=stdout_redirect) job.sandboxes = [sandbox.path] job.plus = plus outcome = None text = None # Error in the sandbox: nothing to do! if not success: pass # Contestant's error: the marks won't be good elif not is_evaluation_passed(plus): outcome = 0.0 text = human_evaluation_message(plus) if job.get_output: job.user_output = None # Otherwise, advance to checking the solution else: # Check that the output file was created if not sandbox.file_exists(output_filename): outcome = 0.0 text = [N_("Evaluation didn't produce file %s"), output_filename] if job.get_output: job.user_output = None else: # If asked so, put the output file into the storage if job.get_output: job.user_output = sandbox.get_file_to_storage( output_filename, "Output file in job %s" % job.info, trunc_len=100 * 1024) # If not asked otherwise, evaluate the output file if not job.only_execution: # Put the reference solution into the sandbox sandbox.create_file_from_storage( "res.txt", job.output) # Check the solution with white_diff if self.parameters[2] == "diff": outcome, text = white_diff_step( sandbox, output_filename, "res.txt") # Check the solution with a comparator elif self.parameters[2] == "comparator": manager_filename = "checker" if not manager_filename in job.managers: logger.error("Configuration error: missing or " "invalid comparator (it must be " "named 'checker')", extra={"operation": job.info}) success = False else: sandbox.create_file_from_storage( manager_filename, job.managers[manager_filename].digest, executable=True) success, _ = evaluation_step( sandbox, [["./%s" % manager_filename, input_filename, "res.txt", output_filename]]) if success: try: outcome, text = \ extract_outcome_and_text(sandbox) except ValueError, e: logger.error("Invalid output from " "comparator: %s" % (e.message,), extra={"operation": job.info}) success = False else: raise ValueError("Unrecognized third parameter" " `%s' for Batch tasktype." % self.parameters[2])
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create the sandbox sandbox = create_sandbox(file_cacher) # Prepare the execution executable_filename = job.executables.keys()[0] language = job.language commands = get_evaluation_commands(language, executable_filename) executables_to_get = { executable_filename: job.executables[executable_filename].digest } input_filename, output_filename = self.parameters[1] stdin_redirect = None stdout_redirect = None files_allowing_write = [] if input_filename == "": input_filename = "input.txt" stdin_redirect = input_filename if output_filename == "": output_filename = "output.txt" stdout_redirect = output_filename else: files_allowing_write.append(output_filename) files_to_get = { input_filename: job.input } # Put the required files into the sandbox for filename, digest in executables_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest, executable=True) for filename, digest in files_to_get.iteritems(): sandbox.create_file_from_storage(filename, digest) # Actually performs the execution success, plus = evaluation_step( sandbox, commands, job.time_limit, job.memory_limit, writable_files=files_allowing_write, stdin_redirect=stdin_redirect, stdout_redirect=stdout_redirect) job.sandboxes = [sandbox.path] job.plus = plus outcome = None text = None # Error in the sandbox: nothing to do! if not success: pass # Contestant's error: the marks won't be good elif not is_evaluation_passed(plus): outcome = 0.0 text = human_evaluation_message(plus) if job.get_output: job.user_output = None # Otherwise, advance to checking the solution else: # Check that the output file was created if not sandbox.file_exists(output_filename): outcome = 0.0 text = [N_("Evaluation didn't produce file %s"), output_filename] if job.get_output: job.user_output = None else: # If asked so, put the output file into the storage if job.get_output: job.user_output = sandbox.get_file_to_storage( output_filename, "Output file in job %s" % job.info, trunc_len=100 * 1024) # If just asked to execute, fill text and set dummy # outcome. if job.only_execution: outcome = 0.0 text = [N_("Execution completed successfully")] # Otherwise evaluate the output file. else: # Put the reference solution into the sandbox sandbox.create_file_from_storage( "res.txt", job.output) # Check the solution with white_diff if self.parameters[2] == "diff": outcome, text = white_diff_step( sandbox, output_filename, "res.txt") # Check the solution with a comparator elif self.parameters[2] == "comparator": manager_filename = "checker" if manager_filename not in job.managers: logger.error("Configuration error: missing or " "invalid comparator (it must be " "named 'checker')", extra={"operation": job.info}) success = False else: sandbox.create_file_from_storage( manager_filename, job.managers[manager_filename].digest, executable=True) # Rewrite input file. The untrusted # contestant program should not be able to # modify it; however, the grader may # destroy the input file to prevent the # contestant's program from directly # accessing it. Since we cannot create # files already existing in the sandbox, # we try removing the file first. try: sandbox.remove_file(input_filename) except OSError as e: # Let us be extra sure that the file # was actually removed and we did not # mess up with permissions. assert not sandbox.file_exists(input_filename) sandbox.create_file_from_storage( input_filename, job.input) # Allow using any number of processes (because e.g. # one may want to write a bash checker who calls # other processes). Set to a high number because # to avoid fork-bombing the worker. sandbox.max_processes = 1000 success, _ = evaluation_step( sandbox, [["./%s" % manager_filename, input_filename, "res.txt", output_filename]]) if success: try: outcome, text = \ extract_outcome_and_text(sandbox) except ValueError, e: logger.error("Invalid output from " "comparator: %s", e.message, extra={"operation": job.info}) success = False else: raise ValueError("Unrecognized third parameter" " `%s' for Batch tasktype." % self.parameters[2])