def destroy_cache(self): """Completely remove and destroy the cache. Nothing that could have been created by this object will be left on disk. After that, this instance isn't usable anymore. """ rmtree(self.file_dir)
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s" % self.path) # Delete the working directory. rmtree(self.path)
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s.", self.path) # Delete the working directory. rmtree(self.path)
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s" % self.path) # Tell isolate to cleanup the sandbox. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) + ["--box-id=%d" % self.box_id] subprocess.call(box_cmd + ["--cleanup"]) # Delete the working directory. rmtree(self.outer_temp_dir)
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s.", self.path) # Tell isolate to cleanup the sandbox. if not self._has_cleanedup: self.cleanup() # Delete the working directory. rmtree(self.outer_temp_dir)
def delete(self): """Delete the directory where the sandbox operated. """ logger.debug("Deleting sandbox in %s.", self.path) # Tell isolate to cleanup the sandbox. box_cmd = [self.box_exec] + (["--cg"] if self.cgroup else []) \ + ["--box-id=%d" % self.box_id] subprocess.call(box_cmd + ["--cleanup"]) # Delete the working directory. rmtree(self.outer_temp_dir)
def __init__(self, service=None, path=None, null=False): """Initialize. By default the database-powered backend will be used, but this can be changed using the parameters. service (Service|None): the service we are running for. Only used if present to determine the location of the file-system cache (and to provide the shard number to the Sandbox... sigh!). path (string|None): if specified, back the FileCacher with a file system-based storage instead of the default database-based one. The specified directory will be used as root for the storage and it will be created if it doesn't exist. null (bool): if True, back the FileCacher with a NullBackend, that just discards every file it receives. This setting takes priority over path. """ self.service = service if null: self.backend = NullBackend() elif path is None: self.backend = DBBackend() else: self.backend = FSBackend(path) if service is None: self.file_dir = tempfile.mkdtemp(dir=config.temp_dir) # Delete this directory on exit since it has a random name and # won't be used again. atexit.register(lambda: rmtree(self.file_dir)) else: self.file_dir = os.path.join( config.cache_dir, "fs-cache-%s-%d" % (service.name, service.shard)) self.temp_dir = os.path.join(self.file_dir, "_temp") if not mkdir(config.cache_dir) or not mkdir(config.temp_dir) \ or not mkdir(self.file_dir) or not mkdir(self.temp_dir): logger.error("Cannot create necessary directories.") raise RuntimeError("Cannot create necessary directories.") atexit.register(lambda: rmtree(self.temp_dir))
def execute(self, entry): """Print a print job. This is the core of PrintingService. entry (QueueEntry): the entry containing the operation to perform. """ # TODO: automatically re-enqueue in case of a recoverable # error. printjob_id = entry.item.printjob_id with SessionGen() as session: # Obtain print job. printjob = PrintJob.get_from_id(printjob_id, session) if printjob is None: raise ValueError("Print job %d not found in the database." % printjob_id) user = printjob.participation.user contest = printjob.participation.contest timezone = get_timezone(user, contest) timestr = format_datetime(printjob.timestamp, timezone) filename = printjob.filename # Check if it's ready to be printed. if printjob.done: logger.info("Print job %d was already sent to the printer.", printjob_id) directory = tempfile.mkdtemp(dir=config.temp_dir) logger.info("Preparing print job in directory %s", directory) # Take the base name just to be sure. relname = "source_" + os.path.basename(filename) source = os.path.join(directory, relname) with open(source, "wb") as file_: self.file_cacher.get_file_to_fobj(printjob.digest, file_) if filename.endswith(".pdf") and config.pdf_printing_allowed: source_pdf = source else: # Convert text to ps. source_ps = os.path.join(directory, "source.ps") cmd = ["a2ps", source, "--delegate=no", "--output=" + source_ps, "--medium=%s" % config.paper_size.capitalize(), "--portrait", "--columns=1", "--rows=1", "--pages=1-%d" % (config.max_pages_per_job), "--header=", "--footer=", "--left-footer=", "--right-footer=", "--center-title=" + filename, "--left-title=" + timestr] ret = subprocess.call(cmd, cwd=directory) if ret != 0: raise Exception( "Failed to convert text file to ps with command: %s" "(error %d)" % (pretty_print_cmdline(cmd), ret)) if not os.path.exists(source_ps): logger.warning("Unable to convert from text to ps.") printjob.done = True printjob.status = json.dumps([ N_("Invalid file")]) session.commit() rmtree(directory) return # Convert ps to pdf source_pdf = os.path.join(directory, "source.pdf") cmd = ["ps2pdf", "-sPAPERSIZE=%s" % config.paper_size.lower(), source_ps] ret = subprocess.call(cmd, cwd=directory) if ret != 0: raise Exception( "Failed to convert ps file to pdf with command: %s" "(error %d)" % (pretty_print_cmdline(cmd), ret)) # Find out number of pages with open(source_pdf, "rb") as file_: pdfreader = PdfFileReader(file_) page_count = pdfreader.getNumPages() logger.info("Preparing %d page(s) (plus the title page)", page_count) if page_count > config.max_pages_per_job: logger.info("Too many pages.") printjob.done = True printjob.status = json.dumps([ N_("Print job has too many pages")]) session.commit() rmtree(directory) return # Add the title page title_tex = os.path.join(directory, "title_page.tex") title_pdf = os.path.join(directory, "title_page.pdf") with open(title_tex, "w") as f: f.write(self.template_loader.load("title_page.tex") .generate(user=user, filename=filename, timestr=timestr, page_count=page_count, paper_size=config.paper_size)) cmd = ["pdflatex", "-interaction", "nonstopmode", title_tex] ret = subprocess.call(cmd, cwd=directory) if ret != 0: raise Exception( "Failed to create title page with command: %s" "(error %d)" % (pretty_print_cmdline(cmd), ret)) pdfmerger = PdfFileMerger() with open(title_pdf, "rb") as file_: pdfmerger.append(file_) with open(source_pdf, "rb") as file_: pdfmerger.append(file_) result = os.path.join(directory, "document.pdf") with open(result, "wb") as file_: pdfmerger.write(file_) try: printer_connection = cups.Connection() printer_connection.printFile( config.printer, result, "Printout %d" % printjob_id, {}) except cups.IPPError as error: logger.error("Unable to print: `%s'.", error) else: printjob.done = True printjob.status = json.dumps([N_("Sent to printer")]) session.commit() finally: rmtree(directory)
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" if len(self.parameters) <= 0: num_processes = 1 else: num_processes = self.parameters[0] indices = range(num_processes) # Create sandboxes and FIFOs sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox) sandbox_user = [ create_sandbox(file_cacher, job.multithreaded_sandbox) for i in indices ] fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices] fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices] fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices] for i in indices: os.mkfifo(fifo_in[i]) os.mkfifo(fifo_out[i]) os.chmod(fifo_dir[i], 0o755) os.chmod(fifo_in[i], 0o666) os.chmod(fifo_out[i], 0o666) # First step: we start the manager. manager_filename = "manager" manager_command = ["./%s" % manager_filename] for i in indices: manager_command.append(fifo_in[i]) manager_command.append(fifo_out[i]) manager_executables_to_get = { manager_filename: job.managers[manager_filename].digest } manager_files_to_get = {"input.txt": job.input} manager_allow_dirs = fifo_dir for filename, digest in manager_executables_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest, executable=True) for filename, digest in manager_files_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest) manager = evaluation_step_before_run(sandbox_mgr, manager_command, num_processes * job.time_limit, 0, allow_dirs=manager_allow_dirs, writable_files=["output.txt"], stdin_redirect="input.txt") # Second step: we start the user submission compiled with the # stub. language = get_language(job.language) executable_filename = job.executables.keys()[0] executables_to_get = { executable_filename: job.executables[executable_filename].digest } processes = [None for i in indices] for i in indices: args = [fifo_out[i], fifo_in[i]] if num_processes != 1: args.append(str(i)) commands = language.get_evaluation_commands(executable_filename, main="stub", args=args) user_allow_dirs = [fifo_dir[i]] for filename, digest in executables_to_get.iteritems(): sandbox_user[i].create_file_from_storage(filename, digest, executable=True) # Assumes that the actual execution of the user solution # is the last command in commands, and that the previous # are "setup" that doesn't need tight control. if len(commands) > 1: evaluation_step(sandbox_user[i], commands[:-1], 10, 256) processes[i] = evaluation_step_before_run( sandbox_user[i], commands[-1], job.time_limit, job.memory_limit, allow_dirs=user_allow_dirs) # Consume output. wait_without_std(processes + [manager]) # TODO: check exit codes with translate_box_exitcode. user_results = [evaluation_step_after_run(s) for s in sandbox_user] success_user = all(r[0] for r in user_results) plus_user = reduce(merge_evaluation_results, [r[1] for r in user_results]) success_mgr, unused_plus_mgr = \ evaluation_step_after_run(sandbox_mgr) if plus_user['exit_status'] == Sandbox.EXIT_OK and \ plus_user["execution_time"] >= job.time_limit: plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT # Merge results. job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path] job.plus = plus_user # If at least one evaluation had problems, we report the # problems. if not success_user or not success_mgr: success, outcome, text = False, None, None # If the user sandbox detected some problem (timeout, ...), # the outcome is 0.0 and the text describes that problem. elif not is_evaluation_passed(plus_user): success = True outcome, text = 0.0, human_evaluation_message(plus_user) # Otherwise, we use the manager to obtain the outcome. else: success = True outcome, text = extract_outcome_and_text(sandbox_mgr) # If asked so, save the output file, provided that it exists if job.get_output: if sandbox_mgr.file_exists("output.txt"): job.user_output = sandbox_mgr.get_file_to_storage( "output.txt", "Output file in job %s" % job.info) else: job.user_output = None # Whatever happened, we conclude. job.success = success job.outcome = "%s" % outcome if outcome is not None else None job.text = text delete_sandbox(sandbox_mgr, job.success) for s in sandbox_user: delete_sandbox(s, job.success) if not config.keep_sandbox: for d in fifo_dir: rmtree(d)
def do_export(self): """Run the actual export code.""" logger.info("Starting export.") export_dir = self.export_target archive_info = get_archive_info(self.export_target) if archive_info["write_mode"] != "": # We are able to write to this archive. if os.path.exists(self.export_target): logger.critical("The specified file already exists, " "I won't overwrite it.") return False export_dir = os.path.join(tempfile.mkdtemp(), archive_info["basename"]) logger.info("Creating dir structure.") try: os.mkdir(export_dir) except OSError: logger.critical("The specified directory already exists, " "I won't overwrite it.") return False files_dir = os.path.join(export_dir, "files") descr_dir = os.path.join(export_dir, "descriptions") os.mkdir(files_dir) os.mkdir(descr_dir) with SessionGen() as session: # Export files. logger.info("Exporting files.") if self.dump_files: for contest_id in self.contests_ids: contest = Contest.get_from_id(contest_id, session) files = contest.enumerate_files(self.skip_submissions, self.skip_user_tests, self.skip_generated) for file_ in files: if not self.safe_get_file(file_, os.path.join(files_dir, file_), os.path.join(descr_dir, file_)): return False # Export data in JSON format. if self.dump_model: logger.info("Exporting data to a JSON file.") # We use strings because they'll be the keys of a JSON # object self.ids = {} self.queue = [] data = dict() for cls, lst in [(Contest, self.contests_ids), (User, self.users_ids), (Task, self.tasks_ids)]: for i in lst: obj = cls.get_from_id(i, session) self.get_id(obj) # Specify the "root" of the data graph data["_objects"] = self.ids.values() while len(self.queue) > 0: obj = self.queue.pop(0) data[self.ids[obj.sa_identity_key]] = \ self.export_object(obj) data["_version"] = model_version with io.open(os.path.join(export_dir, "contest.json"), "wb") as fout: json.dump(data, fout, encoding="utf-8", indent=4, sort_keys=True) # If the admin requested export to file, we do that. if archive_info["write_mode"] != "": archive = tarfile.open(self.export_target, archive_info["write_mode"]) archive.add(export_dir, arcname=archive_info["basename"]) archive.close() rmtree(export_dir) logger.info("Export finished.") return True
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" if len(self.parameters) <= 0: num_processes = 1 else: num_processes = self.parameters[0] indices = range(num_processes) # Create sandboxes and FIFOs sandbox_mgr = create_sandbox(file_cacher) sandbox_user = [create_sandbox(file_cacher) for i in indices] fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices] fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices] fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices] for i in indices: os.mkfifo(fifo_in[i]) os.mkfifo(fifo_out[i]) os.chmod(fifo_dir[i], 0o755) os.chmod(fifo_in[i], 0o666) os.chmod(fifo_out[i], 0o666) # First step: we start the manager. manager_filename = "manager" manager_command = ["./%s" % manager_filename] for i in indices: manager_command.append(fifo_in[i]) manager_command.append(fifo_out[i]) manager_executables_to_get = { manager_filename: job.managers[manager_filename].digest } manager_files_to_get = { "input.txt": job.input } manager_allow_dirs = fifo_dir for filename, digest in manager_executables_to_get.iteritems(): sandbox_mgr.create_file_from_storage( filename, digest, executable=True) for filename, digest in manager_files_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest) manager = evaluation_step_before_run( sandbox_mgr, manager_command, num_processes * job.time_limit, 0, allow_dirs=manager_allow_dirs, writable_files=["output.txt"], stdin_redirect="input.txt") # Second step: we start the user submission compiled with the # stub. executable_filename = job.executables.keys()[0] executables_to_get = { executable_filename: job.executables[executable_filename].digest } processes = [None for i in indices] for i in indices: command = ["./%s" % executable_filename, fifo_out[i], fifo_in[i]] if num_processes != 1: command.append(str(i)) user_allow_dirs = [fifo_dir[i]] for filename, digest in executables_to_get.iteritems(): sandbox_user[i].create_file_from_storage( filename, digest, executable=True) processes[i] = evaluation_step_before_run( sandbox_user[i], command, job.time_limit, job.memory_limit, allow_dirs=user_allow_dirs) # Consume output. wait_without_std(processes + [manager]) # TODO: check exit codes with translate_box_exitcode. user_results = [evaluation_step_after_run(s) for s in sandbox_user] success_user = all(r[0] for r in user_results) plus_user = reduce(merge_evaluation_results, [r[1] for r in user_results]) success_mgr, unused_plus_mgr = \ evaluation_step_after_run(sandbox_mgr) if plus_user['exit_status'] == Sandbox.EXIT_OK and \ plus_user["execution_time"] >= job.time_limit: plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT # Merge results. job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path] job.plus = plus_user # If at least one evaluation had problems, we report the # problems. if not success_user or not success_mgr: success, outcome, text = False, None, None # If the user sandbox detected some problem (timeout, ...), # the outcome is 0.0 and the text describes that problem. elif not is_evaluation_passed(plus_user): success = True outcome, text = 0.0, human_evaluation_message(plus_user) # Otherwise, we use the manager to obtain the outcome. else: success = True outcome, text = extract_outcome_and_text(sandbox_mgr) # If asked so, save the output file, provided that it exists if job.get_output: if sandbox_mgr.file_exists("output.txt"): job.user_output = sandbox_mgr.get_file_to_storage( "output.txt", "Output file in job %s" % job.info) else: job.user_output = None # Whatever happened, we conclude. job.success = success job.outcome = "%s" % outcome if outcome is not None else None job.text = text delete_sandbox(sandbox_mgr) for s in sandbox_user: delete_sandbox(s) if not config.keep_sandbox: for d in fifo_dir: rmtree(d)
def do_import(self): """Run the actual import code.""" logger.info("Starting import.") if not os.path.isdir(self.import_source): if self.import_source.endswith(".zip"): archive = zipfile.ZipFile(self.import_source, "r") file_names = archive.infolist() self.import_dir = tempfile.mkdtemp() archive.extractall(self.import_dir) elif self.import_source.endswith(".tar.gz") \ or self.import_source.endswith(".tgz") \ or self.import_source.endswith(".tar.bz2") \ or self.import_source.endswith(".tbz2") \ or self.import_source.endswith(".tar"): archive = tarfile.open(name=self.import_source) file_names = archive.getnames() elif self.import_source.endswith(".tar.xz") \ or self.import_source.endswith(".txz"): try: import lzma except ImportError: logger.critical("LZMA compression format not " "supported. Please install package " "lzma.") return False archive = tarfile.open( fileobj=lzma.LZMAFile(self.import_source)) file_names = archive.getnames() else: logger.critical("Unable to import from %s." % self.import_source) return False root = find_root_of_archive(file_names) if root is None: logger.critical("Cannot find a root directory in %s." % self.import_source) return False self.import_dir = tempfile.mkdtemp() archive.extractall(self.import_dir) self.import_dir = os.path.join(self.import_dir, root) if self.drop: logger.info("Dropping and recreating the database.") try: if not (drop_db() and init_db()): logger.critical( "Unexpected error while dropping " "and recreating the database.", exc_info=True) return False except Exception as error: logger.critical("Unable to access DB.\n%r" % error) return False with SessionGen() as session: # Import the contest in JSON format. if self.load_model: logger.info("Importing the contest from a JSON file.") with io.open(os.path.join(self.import_dir, "contest.json"), "rb") as fin: # TODO - Throughout all the code we'll assume the # input is correct without actually doing any # validations. Thus, for example, we're not # checking that the decoded object is a dict... self.datas = json.load(fin, encoding="utf-8") # If the dump has been exported using a data model # different than the current one (that is, a previous # one) we try to update it. # If no "_version" field is found we assume it's a v1.0 # export (before the new dump format was introduced). dump_version = self.datas.get("_version", 0) if dump_version < model_version: logger.warning( "The dump you're trying to import has been created " "by an old version of CMS. It may take a while to " "adapt it to the current data model. You can use " "cmsDumpUpdater to update the on-disk dump and " "speed up future imports.") if dump_version > model_version: logger.critical( "The dump you're trying to import has been created " "by a version of CMS newer than this one and there " "is no way to adapt it to the current data model. " "You probably need to update CMS to handle it. It's " "impossible to proceed with the importation.") return False for version in range(dump_version, model_version): # Update from version to version+1 updater = __import__( "cmscontrib.updaters.update_%d" % (version + 1), globals(), locals(), ["Updater"]).Updater(self.datas) self.datas = updater.run() self.datas["_version"] = version + 1 assert self.datas["_version"] == model_version self.objs = dict() for id_, data in self.datas.iteritems(): if not id_.startswith("_"): self.objs[id_] = self.import_object(data) for id_, data in self.datas.iteritems(): if not id_.startswith("_"): self.add_relationships(data, self.objs[id_]) for k, v in list(self.objs.iteritems()): # Skip submissions if requested if self.skip_submissions and isinstance(v, Submission): del self.objs[k] # Skip user_tests if requested if self.skip_user_tests and isinstance(v, UserTest): del self.objs[k] # Skip generated data if requested if self.skip_generated and \ isinstance(v, (SubmissionResult, UserTestResult)): del self.objs[k] contest_id = list() contest_files = set() # Add each base object and all its dependencies for id_ in self.datas["_objects"]: contest = self.objs[id_] # We explictly add only the contest since all child # objects will be automatically added by cascade. # Adding each object individually would also add # orphaned objects like the ones that depended on # submissions or user_tests that we (possibly) # removed above. session.add(contest) session.flush() contest_id += [contest.id] contest_files |= contest.enumerate_files( self.skip_submissions, self.skip_user_tests, self.skip_generated) session.commit() else: contest_id = None contest_files = None # Import files. if self.load_files: logger.info("Importing files.") files_dir = os.path.join(self.import_dir, "files") descr_dir = os.path.join(self.import_dir, "descriptions") files = set(os.listdir(files_dir)) descr = set(os.listdir(descr_dir)) if not descr <= files: logger.warning("Some files do not have an associated " "description.") if not files <= descr: logger.warning("Some descriptions do not have an " "associated file.") if not (contest_files is None or files <= contest_files): # FIXME Check if it's because this is a light import # or because we're skipping submissions or user_tests logger.warning("The dump contains some files that are " "not needed by the contest.") if not (contest_files is None or contest_files <= files): # The reason for this could be that it was a light # export that's not being reimported as such. logger.warning("The contest needs some files that are " "not contained in the dump.") # Limit import to files we actually need. if contest_files is not None: files &= contest_files for digest in files: file_ = os.path.join(files_dir, digest) desc = os.path.join(descr_dir, digest) if not self.safe_put_file(file_, desc): logger.critical( "Unable to put file `%s' in the database. " "Aborting. Please remove the contest " "from the database." % file_) # TODO: remove contest from the database. return False if contest_id is not None: logger.info("Import finished (contest id: %s)." % ", ".join(str(id_) for id_ in contest_id)) else: logger.info("Import finished.") # If we extracted an archive, we remove it. if self.import_dir != self.import_source: rmtree(self.import_dir) return True
def do_import(self): """Run the actual import code.""" logger.info("Starting import.") if not os.path.isdir(self.import_source): if self.import_source.endswith(".zip"): archive = zipfile.ZipFile(self.import_source, "r") file_names = archive.infolist() self.import_dir = tempfile.mkdtemp() archive.extractall(self.import_dir) elif self.import_source.endswith(".tar.gz") \ or self.import_source.endswith(".tgz") \ or self.import_source.endswith(".tar.bz2") \ or self.import_source.endswith(".tbz2") \ or self.import_source.endswith(".tar"): archive = tarfile.open(name=self.import_source) file_names = archive.getnames() elif self.import_source.endswith(".tar.xz") \ or self.import_source.endswith(".txz"): try: import lzma except ImportError: logger.critical("LZMA compression format not " "supported. Please install package " "lzma.") return False archive = tarfile.open( fileobj=lzma.LZMAFile(self.import_source)) file_names = archive.getnames() else: logger.critical("Unable to import from %s." % self.import_source) return False root = find_root_of_archive(file_names) if root is None: logger.critical("Cannot find a root directory in %s." % self.import_source) return False self.import_dir = tempfile.mkdtemp() archive.extractall(self.import_dir) self.import_dir = os.path.join(self.import_dir, root) if self.drop: logger.info("Dropping and recreating the database.") try: if not (drop_db() and init_db()): logger.critical("Unexpected error while dropping " "and recreating the database.", exc_info=True) return False except Exception as error: logger.critical("Unable to access DB.\n%r" % error) return False with SessionGen() as session: # Import the contest in JSON format. if self.load_model: logger.info("Importing the contest from a JSON file.") with io.open(os.path.join(self.import_dir, "contest.json"), "rb") as fin: # TODO - Throughout all the code we'll assume the # input is correct without actually doing any # validations. Thus, for example, we're not # checking that the decoded object is a dict... self.datas = json.load(fin, encoding="utf-8") # If the dump has been exported using a data model # different than the current one (that is, a previous # one) we try to update it. # If no "_version" field is found we assume it's a v1.0 # export (before the new dump format was introduced). dump_version = self.datas.get("_version", 0) if dump_version < model_version: logger.warning( "The dump you're trying to import has been created " "by an old version of CMS. It may take a while to " "adapt it to the current data model. You can use " "cmsDumpUpdater to update the on-disk dump and " "speed up future imports.") if dump_version > model_version: logger.critical( "The dump you're trying to import has been created " "by a version of CMS newer than this one and there " "is no way to adapt it to the current data model. " "You probably need to update CMS to handle it. It's " "impossible to proceed with the importation.") return False for version in range(dump_version, model_version): # Update from version to version+1 updater = __import__( "cmscontrib.updaters.update_%d" % (version + 1), globals(), locals(), ["Updater"]).Updater(self.datas) self.datas = updater.run() self.datas["_version"] = version + 1 assert self.datas["_version"] == model_version self.objs = dict() for id_, data in self.datas.iteritems(): if not id_.startswith("_"): self.objs[id_] = self.import_object(data) for id_, data in self.datas.iteritems(): if not id_.startswith("_"): self.add_relationships(data, self.objs[id_]) for k, v in list(self.objs.iteritems()): # Skip submissions if requested if self.skip_submissions and isinstance(v, Submission): del self.objs[k] # Skip user_tests if requested if self.skip_user_tests and isinstance(v, UserTest): del self.objs[k] # Skip generated data if requested if self.skip_generated and \ isinstance(v, (SubmissionResult, UserTestResult)): del self.objs[k] contest_id = list() contest_files = set() # Add each base object and all its dependencies for id_ in self.datas["_objects"]: contest = self.objs[id_] # We explictly add only the contest since all child # objects will be automatically added by cascade. # Adding each object individually would also add # orphaned objects like the ones that depended on # submissions or user_tests that we (possibly) # removed above. session.add(contest) session.flush() contest_id += [contest.id] contest_files |= contest.enumerate_files( self.skip_submissions, self.skip_user_tests, self.skip_generated) session.commit() else: contest_id = None contest_files = None # Import files. if self.load_files: logger.info("Importing files.") files_dir = os.path.join(self.import_dir, "files") descr_dir = os.path.join(self.import_dir, "descriptions") files = set(os.listdir(files_dir)) descr = set(os.listdir(descr_dir)) if not descr <= files: logger.warning("Some files do not have an associated " "description.") if not files <= descr: logger.warning("Some descriptions do not have an " "associated file.") if not (contest_files is None or files <= contest_files): # FIXME Check if it's because this is a light import # or because we're skipping submissions or user_tests logger.warning("The dump contains some files that are " "not needed by the contest.") if not (contest_files is None or contest_files <= files): # The reason for this could be that it was a light # export that's not being reimported as such. logger.warning("The contest needs some files that are " "not contained in the dump.") # Limit import to files we actually need. if contest_files is not None: files &= contest_files for digest in files: file_ = os.path.join(files_dir, digest) desc = os.path.join(descr_dir, digest) if not self.safe_put_file(file_, desc): logger.critical("Unable to put file `%s' in the database. " "Aborting. Please remove the contest " "from the database." % file_) # TODO: remove contest from the database. return False if contest_id is not None: logger.info("Import finished (contest id: %s)." % ", ".join(str(id_) for id_ in contest_id)) else: logger.info("Import finished.") # If we extracted an archive, we remove it. if self.import_dir != self.import_source: rmtree(self.import_dir) return True
def destroy_cache(self): """Completely destroys the cache. The FileCacher is not usable anymore. """ rmtree(self.base_dir)
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create sandboxes and FIFOs sandbox_mgr = create_sandbox(file_cacher) sandbox_user = create_sandbox(file_cacher) fifo_dir = tempfile.mkdtemp(dir=config.temp_dir) fifo_in = os.path.join(fifo_dir, "in") fifo_out = os.path.join(fifo_dir, "out") os.mkfifo(fifo_in) os.mkfifo(fifo_out) os.chmod(fifo_dir, 0o755) os.chmod(fifo_in, 0o666) os.chmod(fifo_out, 0o666) # First step: we start the manager. manager_filename = "manager" manager_command = ["./%s" % manager_filename, fifo_in, fifo_out] manager_executables_to_get = { manager_filename: job.managers[manager_filename].digest } manager_files_to_get = {"input.txt": job.input} manager_allow_dirs = [fifo_dir] for filename, digest in manager_executables_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest, executable=True) for filename, digest in manager_files_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest) manager = evaluation_step_before_run(sandbox_mgr, manager_command, job.time_limit, 0, allow_dirs=manager_allow_dirs, writable_files=["output.txt"], stdin_redirect="input.txt") # Second step: we start the user submission compiled with the # stub. executable_filename = job.executables.keys()[0] command = ["./%s" % executable_filename, fifo_out, fifo_in] executables_to_get = { executable_filename: job.executables[executable_filename].digest } user_allow_dirs = [fifo_dir] for filename, digest in executables_to_get.iteritems(): sandbox_user.create_file_from_storage(filename, digest, executable=True) process = evaluation_step_before_run(sandbox_user, command, job.time_limit, job.memory_limit, allow_dirs=user_allow_dirs) # Consume output. wait_without_std([process, manager]) # TODO: check exit codes with translate_box_exitcode. success_user, plus_user = \ evaluation_step_after_run(sandbox_user) success_mgr, unused_plus_mgr = \ evaluation_step_after_run(sandbox_mgr) job.sandboxes = [sandbox_user.path, sandbox_mgr.path] job.plus = plus_user # If at least one evaluation had problems, we report the # problems. if not success_user or not success_mgr: success, outcome, text = False, None, None # If the user sandbox detected some problem (timeout, ...), # the outcome is 0.0 and the text describes that problem. elif not is_evaluation_passed(plus_user): success = True outcome, text = 0.0, human_evaluation_message(plus_user) # Otherwise, we use the manager to obtain the outcome. else: success = True outcome, text = extract_outcome_and_text(sandbox_mgr) # If asked so, save the output file, provided that it exists if job.get_output: if sandbox_mgr.file_exists("output.txt"): job.user_output = sandbox_mgr.get_file_to_storage( "output.txt", "Output file in job %s" % job.info) else: job.user_output = None # Whatever happened, we conclude. job.success = success job.outcome = "%s" % outcome if outcome is not None else None job.text = text delete_sandbox(sandbox_mgr) delete_sandbox(sandbox_user) if not config.keep_sandbox: rmtree(fifo_dir)
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" if len(self.parameters) <= 0: num_processes = 1 else: num_processes = self.parameters[0] indices = range(num_processes) # Create sandboxes and FIFOs sandbox_mgr = create_sandbox(file_cacher, job.multithreaded_sandbox) sandbox_user = [ create_sandbox(file_cacher, job.multithreaded_sandbox) for i in indices ] fifo_dir = [tempfile.mkdtemp(dir=config.temp_dir) for i in indices] fifo_in = [os.path.join(fifo_dir[i], "in%d" % i) for i in indices] fifo_out = [os.path.join(fifo_dir[i], "out%d" % i) for i in indices] for i in indices: os.mkfifo(fifo_in[i]) os.mkfifo(fifo_out[i]) os.chmod(fifo_dir[i], 0o755) os.chmod(fifo_in[i], 0o666) os.chmod(fifo_out[i], 0o666) # First step: we start the manager. manager_filename = "manager" manager_command = ["./%s" % manager_filename] for i in indices: manager_command.append(fifo_in[i]) manager_command.append(fifo_out[i]) manager_executables_to_get = { manager_filename: job.managers[manager_filename].digest } manager_files_to_get = {"input.txt": job.input} manager_allow_dirs = fifo_dir for filename, digest in manager_executables_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest, executable=True) for filename, digest in manager_files_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest) manager = evaluation_step_before_run( sandbox_mgr, manager_command, num_processes * job.time_limit, 0, allow_dirs=manager_allow_dirs, writable_files=["output.txt"], stdin_redirect="input.txt", stdout_redirect="output.txt", ) # Second step: we start the user submission compiled with the # stub. language = get_language(job.language) executable_filename = job.executables.keys()[0] executables_to_get = { executable_filename: job.executables[executable_filename].digest } processes = [None for i in indices] for i in indices: args = [fifo_out[i], fifo_in[i]] if num_processes != 1: args.append(str(i)) commands = language.get_evaluation_commands(executable_filename, main="grader", args=args) user_allow_dirs = [fifo_dir[i]] for filename, digest in executables_to_get.iteritems(): sandbox_user[i].create_file_from_storage(filename, digest, executable=True) # Assumes that the actual execution of the user solution # is the last command in commands, and that the previous # are "setup" that doesn't need tight control. if len(commands) > 1: evaluation_step(sandbox_user[i], commands[:-1], 10, 256) processes[i] = evaluation_step_before_run( sandbox_user[i], commands[-1], job.time_limit, job.memory_limit, allow_dirs=user_allow_dirs) # Consume output. wait_without_std(processes + [manager]) # TODO: check exit codes with translate_box_exitcode. user_results = [evaluation_step_after_run(s) for s in sandbox_user] success_user = all(r[0] for r in user_results) plus_user = reduce(merge_evaluation_results, [r[1] for r in user_results]) success_mgr, unused_plus_mgr = \ evaluation_step_after_run(sandbox_mgr) if plus_user['exit_status'] == Sandbox.EXIT_OK and \ plus_user["execution_time"] >= job.time_limit: plus_user['exit_status'] = Sandbox.EXIT_TIMEOUT # Merge results. job.sandboxes = [s.path for s in sandbox_user] + [sandbox_mgr.path] job.plus = plus_user # If at least one evaluation had problems, we report the # problems. if not success_user or not success_mgr: success, outcome, text = False, None, None # If the user sandbox detected some problem (timeout, ...), # the outcome is 0.0 and the text describes that problem. elif not is_evaluation_passed(plus_user): success = True outcome, text = 0.0, human_evaluation_message(plus_user) if job.get_output: job.user_output = None # Otherwise, we use the manager to obtain the outcome. else: success = True outcome = None text = None input_filename = "input.txt" output_filename = "output.txt" # Check that the output file was created if not sandbox_mgr.file_exists(output_filename): outcome = 0.0 text = [ N_("Evaluation didn't produce file %s"), output_filename ] if job.get_output: job.user_output = None else: # If asked so, put the output file into the storage if job.get_output: job.user_output = sandbox_mgr.get_file_to_storage( output_filename, "Output file in job %s" % job.info, trunc_len=1024 * 1024 * 10) # If just asked to execute, fill text and set dummy # outcome. if job.only_execution: outcome = 0.0 text = [N_("Execution completed successfully")] # Otherwise evaluate the output file. else: # Put the reference solution into the sandbox sandbox_mgr.create_file_from_storage("res.txt", job.output) # Check the solution with white_diff if self.parameters[1] == "diff": outcome, text = white_diff_step( sandbox_mgr, output_filename, "res.txt") # Check the solution with a comparator elif self.parameters[1] == "comparator": manager_filename = "checker" if manager_filename not in job.managers: logger.error( "Configuration error: missing or " "invalid comparator (it must be " "named 'checker')", extra={"operation": job.info}) success = False else: sandbox_mgr.create_file_from_storage( manager_filename, job.managers[manager_filename].digest, executable=True) # Rewrite input file. The untrusted # contestant program should not be able to # modify it; however, the grader may # destroy the input file to prevent the # contestant's program from directly # accessing it. Since we cannot create # files already existing in the sandbox, # we try removing the file first. try: sandbox_mgr.remove_file(input_filename) except OSError as e: # Let us be extra sure that the file # was actually removed and we did not # mess up with permissions. assert not sandbox_mgr.file_exists( input_filename) sandbox_mgr.create_file_from_storage( input_filename, job.input) # Allow using any number of processes (because e.g. # one may want to write a bash checker who calls # other processes). Set to a high number because # to avoid fork-bombing the worker. sandbox_mgr.max_processes = 1000 success, _ = evaluation_step( sandbox_mgr, [[ "./%s" % manager_filename, input_filename, "res.txt", output_filename ]]) if success: try: outcome, text = \ extract_outcome_and_text(sandbox_mgr) except ValueError as e: logger.error( "Invalid output from " "comparator: %s", e.message, extra={"operation": job.info}) success = False else: raise ValueError("Unrecognized second parameter" " `%s' for Communication tasktype." % self.parameters[2]) # Whatever happened, we conclude. job.success = success job.outcome = "%s" % outcome if outcome is not None else None job.text = text delete_sandbox(sandbox_mgr, job.success) for s in sandbox_user: delete_sandbox(s, job.success) if not config.keep_sandbox: for d in fifo_dir: rmtree(d)
def do_export(self): """Run the actual export code.""" logger.info("Starting export.") export_dir = self.export_target archive_info = get_archive_info(self.export_target) if archive_info["write_mode"] != "": # We are able to write to this archive. if os.path.exists(self.export_target): logger.critical("The specified file already exists, " "I won't overwrite it.") return False export_dir = os.path.join(tempfile.mkdtemp(), archive_info["basename"]) logger.info("Creating dir structure.") try: os.mkdir(export_dir) except OSError: logger.critical("The specified directory already exists, " "I won't overwrite it.") return False files_dir = os.path.join(export_dir, "files") descr_dir = os.path.join(export_dir, "descriptions") os.mkdir(files_dir) os.mkdir(descr_dir) with SessionGen() as session: # Export files. logger.info("Exporting files.") if self.dump_files: for contest_id in self.contests_ids: contest = Contest.get_from_id(contest_id, session) files = contest.enumerate_files(self.skip_submissions, self.skip_user_tests, self.skip_generated) for file_ in files: if not self.safe_get_file( file_, os.path.join(files_dir, file_), os.path.join(descr_dir, file_)): return False # Export data in JSON format. if self.dump_model: logger.info("Exporting data to a JSON file.") # We use strings because they'll be the keys of a JSON # object self.ids = {} self.queue = [] data = dict() for cls, lst in [(Contest, self.contests_ids), (User, self.users_ids), (Task, self.tasks_ids)]: for i in lst: obj = cls.get_from_id(i, session) self.get_id(obj) # Specify the "root" of the data graph data["_objects"] = self.ids.values() while len(self.queue) > 0: obj = self.queue.pop(0) data[self.ids[obj.sa_identity_key]] = \ self.export_object(obj) data["_version"] = model_version with io.open(os.path.join(export_dir, "contest.json"), "wb") as fout: json.dump(data, fout, encoding="utf-8", indent=4, sort_keys=True) # If the admin requested export to file, we do that. if archive_info["write_mode"] != "": archive = tarfile.open(self.export_target, archive_info["write_mode"]) archive.add(export_dir, arcname=archive_info["basename"]) archive.close() rmtree(export_dir) logger.info("Export finished.") return True
def evaluate(self, job, file_cacher): """See TaskType.evaluate.""" # Create sandboxes and FIFOs sandbox_mgr = create_sandbox(file_cacher) sandbox_user = create_sandbox(file_cacher) fifo_dir = tempfile.mkdtemp(dir=config.temp_dir) fifo_in = os.path.join(fifo_dir, "in") fifo_out = os.path.join(fifo_dir, "out") os.mkfifo(fifo_in) os.mkfifo(fifo_out) os.chmod(fifo_dir, 0o755) os.chmod(fifo_in, 0o666) os.chmod(fifo_out, 0o666) # First step: we start the manager. manager_filename = "manager" manager_command = ["./%s" % manager_filename, fifo_in, fifo_out] manager_executables_to_get = {manager_filename: job.managers[manager_filename].digest} manager_files_to_get = {"input.txt": job.input} manager_allow_dirs = [fifo_dir] for filename, digest in manager_executables_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest, executable=True) for filename, digest in manager_files_to_get.iteritems(): sandbox_mgr.create_file_from_storage(filename, digest) manager = evaluation_step_before_run( sandbox_mgr, manager_command, job.time_limit, 0, allow_dirs=manager_allow_dirs, writable_files=["output.txt"], stdin_redirect="input.txt", ) # Second step: we start the user submission compiled with the # stub. executable_filename = job.executables.keys()[0] command = ["./%s" % executable_filename, fifo_out, fifo_in] executables_to_get = {executable_filename: job.executables[executable_filename].digest} user_allow_dirs = [fifo_dir] for filename, digest in executables_to_get.iteritems(): sandbox_user.create_file_from_storage(filename, digest, executable=True) process = evaluation_step_before_run( sandbox_user, command, job.time_limit, job.memory_limit, allow_dirs=user_allow_dirs ) # Consume output. wait_without_std([process, manager]) # TODO: check exit codes with translate_box_exitcode. success_user, plus_user = evaluation_step_after_run(sandbox_user) success_mgr, unused_plus_mgr = evaluation_step_after_run(sandbox_mgr) job.sandboxes = [sandbox_user.path, sandbox_mgr.path] job.plus = plus_user # If at least one evaluation had problems, we report the # problems. if not success_user or not success_mgr: success, outcome, text = False, None, None # If the user sandbox detected some problem (timeout, ...), # the outcome is 0.0 and the text describes that problem. elif not is_evaluation_passed(plus_user): success = True outcome, text = 0.0, human_evaluation_message(plus_user) # Otherwise, we use the manager to obtain the outcome. else: success = True outcome, text = extract_outcome_and_text(sandbox_mgr) # If asked so, save the output file, provided that it exists if job.get_output: if sandbox_mgr.file_exists("output.txt"): job.user_output = sandbox_mgr.get_file_to_storage("output.txt", "Output file in job %s" % job.info) else: job.user_output = None # Whatever happened, we conclude. job.success = success job.outcome = "%s" % outcome if outcome is not None else None job.text = text delete_sandbox(sandbox_mgr) delete_sandbox(sandbox_user) if not config.keep_sandbox: rmtree(fifo_dir)