def eval(self, function, *args, **kwargs): """Evaluate the function in this sandbox""" r_pipe, w_pipe = os.pipe() pid = os.fork() if pid == 0: with self: valid = True os.close(r_pipe) result = function(*args, **kwargs) try: result2 = json.dumps(utils.jsoniblify(result)) except: result2 = json.dumps({"invalidJSON": str(result)}) valid = False os.write(w_pipe, result2.encode("UTF-8")) os.close(w_pipe) sys.exit(0 if valid else 1) else: os.close(w_pipe) r_pipe = os.fdopen(r_pipe, "r") with r_pipe: content = r_pipe.read() (_pid, status) = os.waitpid(pid, 0) if status & 0xff != 0: raise ValueError("The function tried to access to restricted syscalls or used too much resources: status={}".format(status)) if (status & 0xff00) != 0: raise ValueError("The subprocess exited with resultcode {}; returned content: {}".format(status >> 8, content)) try: return json.loads(content) except: raise ValueError("Content {} does not follow JSON format".format(content))
def main(argv): import argparse parser = argparse.ArgumentParser() parser.add_argument('-d', '--dict', action="append", default=[], help="Use the specified JSON string as the input dictionary (prefix by @ to specify a path, @- for stdin)") parser.add_argument("-e", "--entry", action="append", default=[], help="Add a new entry 'key=value' in the dictionary (prefix the value by @ to specify a file where a string must be read, # to copy the value from another previously specified entry)") parser.add_argument("-s", "--sandbox", choices=list(taxonomy.SANDBOXES), default=taxonomy.DEFAULT_SANDBOX, help="Use a preset sandbox configuration") parser.add_argument("-o", "--output", default="-", help="File where the output must be written (by default stdout)") parser.add_argument("-b", "--bench", choices=list(taxonomy.BENCHS), action="append", default=[], help="Specify a whitelisted bench") parser.add_argument("-l", "--language", default=None, help="Print the internationalized strings in the given language (2 letter ISO code like en, fr, de...)") parser.add_argument("-r", "--rootdir", default=None, help="Root directory under where local files must be searched") args = parser.parse_args(argv) data = OrderedDict() # configure the logger logging.getLogger("").setLevel(logging.DEBUG) logging.getLogger("").addHandler(logging.StreamHandler()) for d in args.dict: try: data.update(load_dictionary(d)) except Exception as e: traceback.print_exc() print("Cannot read the dictionary specified with {} due to {}".format(d, e), file=sys.stderr) return -1 for entry in args.entry: try: k, v = entry.split("=", 1) data[k] = read_string(v, data) except Exception as e: print("Cannot add the new entry {} in the dictionary due to {}".format(entry, e), file=sys.stderr) return -1 # the dictionary is ready! try: dump_file = sys.stdout if args.output == "-" else open(args.output, "w") except: print("Cannot open the output file {}".format(args.output), file=sys.stderr) return -1 output = run_test(data, taxonomy.SANDBOXES[args.sandbox], args.bench, rootdir=args.rootdir) if args.language: output = jsoniblify(i18nize(output), language=args.language) with dump_file: print(json.dumps(output, indent=True), file=dump_file) if not output or output.get("error", False): return 1 else: return 0
def create(self, grade, message=None, weight=1.0, attributes=None): if attributes is None: attributes = {} if grade is True: grade = 1.0 elif grade is False: grade = 0.0 elif grade is None: grade = -1.0 elif isinstance(grade, str): message = grade grade = 0.0 try: grade = float(grade) except: grade = -2.0 # problem when expliciting the verdict if message is None: if grade < -1.0: message = self.DEFAULT_EXCEPTION_MESSAGE elif grade < 0.0: message = self.DEFAULT_FATAL_FAILURE_MESSAGE elif grade == 0.0: message = self.DEFAULT_FAILURE_MESSAGE elif grade < 1.0: message = self.DEFAULT_PARTIAL_SUCCESS_MESSAGE.format(grade) elif grade >= 1.0: message = self.DEFAULT_SUCCESS_MESSAGE if isinstance(message, (str, i18n)): message = message.format(**attributes) try: message = jsoniblify(message) except: message = None try: weight = float(weight) except: weight = 1.0 return TestVerdict(grade, message, weight)
def eval(self, function, *args, **kwargs): """Evaluate the function in this sandbox""" r_pipe, w_pipe = os.pipe() pid = os.fork() if pid == 0: with self: valid = True os.close(r_pipe) result = function(*args, **kwargs) try: result2 = json.dumps(utils.jsoniblify(result)) except: result2 = json.dumps({"invalidJSON": str(result)}) valid = False os.write(w_pipe, result2.encode("UTF-8")) os.close(w_pipe) sys.exit(0 if valid else 1) else: os.close(w_pipe) r_pipe = os.fdopen(r_pipe, "r") with r_pipe: content = r_pipe.read() (_pid, status) = os.waitpid(pid, 0) if status & 0xff != 0: raise ValueError( "The function tried to access to restricted syscalls or used too much resources: status={}" .format(status)) if (status & 0xff00) != 0: raise ValueError( "The subprocess exited with resultcode {}; returned content: {}" .format(status >> 8, content)) try: return json.loads(content) except: raise ValueError( "Content {} does not follow JSON format".format( content))
def to_dict(self): d = super(SimpleTestVerdict, self).to_dict() d.update(results=jsoniblify(self.results)) return d
def to_dict(self): """Convert the result to a JSONizable dictionary form""" return {"status": str(self.status.name).lower(), "grades": jsoniblify(self.grades)}
def run_test(data, sandbox, bench_whitelist, rootdir=None): """ Dictionary of the test instance is provided by the data argument The result output is returned as a dictionary Some common problems are specified with an entry in the dictionary (with an explanation as value): - formatException: the PL dictionary does not follow the format (e.g. no grader specified) - resourceException: the grading test consumed too much resource (CPU time and/or memory, dangerous syscalls...) - platformException: due to a bug in the bench test code """ try: benchname = data["bench"] # select the bench to use if benchname not in bench_whitelist: # check if the grader is authorized return {"error": True, "resourceException": "The specified bench {} is not authorized".format(benchname)} bench = taxonomy.BENCHS[benchname](data) bench.set_rootdir(rootdir) except Exception as e: return {"error": True, "formatException": "The specified bench is not available: {}".format(data.get("bench", None)), "exception": str(e)} # the sandbox resources may be lowered using the data dictionary try: if "memoryLimit" in data: sandbox.memory_limit = min(sandbox.memory_limit, int(data["memoryLimit"])) if "cpuLimit" in data: sandbox.cpu_limit = min(sandbox.cpu_limit, int(data["cpuLimit"])) except: return {"error": True, "formatException": "Adjusting resource limits failed"} pipe_r, pipe_w = os.pipe() print("Forking...", file=sys.stderr) sys.stdout.flush() sys.stderr.flush() pid = os.fork() if pid > 0: # parent case # wait for the end of execution os.close(pipe_w) pipe_r = os.fdopen(pipe_r, "r") # wrap with a file object read_output = pipe_r.read(config.MAX_OUTPUT_SIZE) if pipe_r.read(1): # data remain to be read return {"error": True, "platformException": "The test bench is too talkative", "output": read_output} os.close(pipe_r.fileno()) resultcode = os.waitpid(pid, 0)[1] sig = resultcode & 0x00ff code = resultcode & 0xff00 print("Resultcode: {}".format(resultcode), file=sys.stderr) if sig == signal.SIGXCPU: return {"error": True, "resourceException": "Code consumed too much CPU time"} elif sig in (signal.SIGKILL, signal.SIGSYS): return {"error": True, "resourceException": "Grader process was killed due to anormal syscalls or overconsumption of resources (memory, time...)"} elif sig == signal.SIGSEGV: return {"error": True, "platformException": "A segmentation error was detected"} try: return json.loads(read_output, object_pairs_hook=OrderedDict) except: return {"error": True, "platformException": "The produced output does not follow the JSON format", "output": read_output} else: os.close(pipe_r) pipe_w = os.fdopen(pipe_w, "w") # wrap with a file object try: bench.prepare() with sandbox: result = bench.execute() if hasattr(result, "to_dict"): result = result.to_dict() result = jsoniblify(result) except Exception as e: if not sandbox.enabled: traceback.print_exc() result = {"error": True, "platformException": "Exception while running the test bench", "cause": str(e)} try: output = json.dumps(result) except Exception as e: output = {"error": True, "platformException": "Cannot JSONify the result", "output": str(result)} try: print(output, file=pipe_w) except BrokenPipeError as e: pass # since we are taoo talkative the parent process does not listen to us anymore pipe_w.close() sys.exit(0)
def to_dict(self): """Convert the result to a JSONizable dictionary form""" d = OrderedDict() d["grade"] = self.grade d["message"] = jsoniblify(self.message) return d
def to_dict(self): """Convert the result to a JSONizable dictionary form""" return { "status": str(self.status.name).lower(), "grades": jsoniblify(self.grades) }
import argparse parser = argparse.ArgumentParser() parser.add_argument("-r", "--root", help="Root directory (that forbids climbing upon it)") parser.add_argument( "-l", "--language", default=None, help= "Filter the internationalized entries by keeping only those matching the language" ) parser.add_argument( "-d", "--disable-sandbox", action="store_true", default=False, help= "Disable the sandbox mode to generate the assignment (useful for debug purposes)" ) parser.add_argument( "filepath", help="Filepath to the PL-formatted file (- to read from STDIN)") args = parser.parse_args(sys.argv[1:]) print( json.dumps( jsoniblify(load_dictionary(args.filepath, args.root, sandbox=not args.disable_sandbox), language=args.language)))