Esempio n. 1
0
class DumpExporter(object):

    """This service exports every data that CMS knows. The process of
    exporting and importing again should be idempotent.

    """

    def __init__(self, contest_ids, export_target,
                 dump_files, dump_model, skip_generated,
                 skip_submissions, skip_user_tests):
        if contest_ids is None:
            with SessionGen() as session:
                contests = session.query(Contest).all()
                self.contests_ids = [contest.id for contest in contests]
                users = session.query(User).all()
                self.users_ids = [user.id for user in users]
                tasks = session.query(Task)\
                    .filter(Task.contest_id.is_(None)).all()
                self.tasks_ids = [task.id for task in tasks]
        else:
            # FIXME: this is ATM broken, because if you export a contest, you
            # then export the users who participated in it and then all of the
            # contests those users participated in.
            self.contests_ids = contest_ids
            self.users_ids = []
            self.tasks_ids = []
        self.dump_files = dump_files
        self.dump_model = dump_model
        self.skip_generated = skip_generated
        self.skip_submissions = skip_submissions
        self.skip_user_tests = skip_user_tests
        self.export_target = export_target

        # If target is not provided, we use the contest's name.
        if export_target == "":
            self.export_target = "dump_%s.tar.gz" % date.today().isoformat()
            logger.warning("export_target not given, using \"%s\"",
                           self.export_target)

        self.file_cacher = FileCacher()

    def do_export(self):
        """Run the actual export code."""
        logger.info("Starting export.")

        export_dir = self.export_target
        archive_info = get_archive_info(self.export_target)

        if archive_info["write_mode"] != "":
            # We are able to write to this archive.
            if os.path.exists(self.export_target):
                logger.critical("The specified file already exists, "
                                "I won't overwrite it.")
                return False
            export_dir = os.path.join(tempfile.mkdtemp(),
                                      archive_info["basename"])

        logger.info("Creating dir structure.")
        try:
            os.mkdir(export_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False

        files_dir = os.path.join(export_dir, "files")
        descr_dir = os.path.join(export_dir, "descriptions")
        os.mkdir(files_dir)
        os.mkdir(descr_dir)

        with SessionGen() as session:
            # Export files.
            logger.info("Exporting files.")
            if self.dump_files:
                for contest_id in self.contests_ids:
                    contest = Contest.get_from_id(contest_id, session)
                    files = contest.enumerate_files(self.skip_submissions,
                                                    self.skip_user_tests,
                                                    self.skip_generated)
                    for file_ in files:
                        if not self.safe_get_file(file_,
                                                  os.path.join(files_dir,
                                                               file_),
                                                  os.path.join(descr_dir,
                                                               file_)):
                            return False

            # Export data in JSON format.
            if self.dump_model:
                logger.info("Exporting data to a JSON file.")

                # We use strings because they'll be the keys of a JSON
                # object
                self.ids = {}
                self.queue = []

                data = dict()

                for cls, lst in [(Contest, self.contests_ids),
                                 (User, self.users_ids),
                                 (Task, self.tasks_ids)]:
                    for i in lst:
                        obj = cls.get_from_id(i, session)
                        self.get_id(obj)

                # Specify the "root" of the data graph
                data["_objects"] = self.ids.values()

                while len(self.queue) > 0:
                    obj = self.queue.pop(0)
                    data[self.ids[obj.sa_identity_key]] = \
                        self.export_object(obj)

                data["_version"] = model_version

                with io.open(os.path.join(export_dir,
                                          "contest.json"), "wb") as fout:
                    json.dump(data, fout, encoding="utf-8",
                              indent=4, sort_keys=True)

        # If the admin requested export to file, we do that.
        if archive_info["write_mode"] != "":
            archive = tarfile.open(self.export_target,
                                   archive_info["write_mode"])
            archive.add(export_dir, arcname=archive_info["basename"])
            archive.close()
            rmtree(export_dir)

        logger.info("Export finished.")

        return True

    def get_id(self, obj):
        obj_key = obj.sa_identity_key
        if obj_key not in self.ids:
            # We use strings because they'll be the keys of a JSON object
            self.ids[obj_key] = "%d" % len(self.ids)
            self.queue.append(obj)

        return self.ids[obj_key]

    def export_object(self, obj):

        """Export the given object, returning a JSON-encodable dict.

        The returned dict will contain a "_class" item (the name of the
        class of the given object), an item for each column property
        (with a value properly translated to a JSON-compatible type)
        and an item for each relationship property (which will be an ID
        or a collection of IDs).

        The IDs used in the exported dict aren't related to the ones
        used in the DB: they are newly generated and their scope is
        limited to the exported file only. They are shared among all
        classes (that is, two objects can never share the same ID, even
        if they are of different classes).

        If, when exporting the relationship, we find an object without
        an ID we generate a new ID, assign it to the object and append
        the object to the queue of objects to export.

        The self.skip_submissions flag controls wheter we export
        submissions (and all other objects that can be reached only by
        passing through a submission) or not.

        """

        cls = type(obj)

        data = {"_class": cls.__name__}

        for prp in cls._col_props:
            col, = prp.columns
            col_type = type(col.type)

            val = getattr(obj, prp.key)
            if col_type in \
                    [Boolean, Integer, Float, Unicode, RepeatedUnicode, Enum]:
                data[prp.key] = val
            elif col_type is String:
                data[prp.key] = \
                    val.decode('latin1') if val is not None else None
            elif col_type is DateTime:
                data[prp.key] = \
                    make_timestamp(val) if val is not None else None
            elif col_type is Interval:
                data[prp.key] = \
                    val.total_seconds() if val is not None else None
            else:
                raise RuntimeError("Unknown SQLAlchemy column type: %s"
                                   % col_type)

        for prp in cls._rel_props:
            other_cls = prp.mapper.class_

            # Skip submissions if requested
            if self.skip_submissions and other_cls is Submission:
                continue

            # Skip user_tests if requested
            if self.skip_user_tests and other_cls is UserTest:
                continue

            # Skip generated data if requested
            if self.skip_generated and other_cls in (SubmissionResult,
                                                     UserTestResult):
                continue

            val = getattr(obj, prp.key)
            if val is None:
                data[prp.key] = None
            elif isinstance(val, other_cls):
                data[prp.key] = self.get_id(val)
            elif isinstance(val, list):
                data[prp.key] = list(self.get_id(i) for i in val)
            elif isinstance(val, dict):
                data[prp.key] = \
                    dict((k, self.get_id(v)) for k, v in val.iteritems())
            else:
                raise RuntimeError("Unknown SQLAlchemy relationship type: %s"
                                   % type(val))

        return data

    def safe_get_file(self, digest, path, descr_path=None):

        """Get file from FileCacher ensuring that the digest is
        correct.

        digest (string): the digest of the file to retrieve.
        path (string): the path where to save the file.
        descr_path (string): the path where to save the description.

        return (bool): True if all ok, False if something wrong.

        """

        # TODO - Probably this method could be merged in FileCacher

        # First get the file
        try:
            self.file_cacher.get_file_to_path(digest, path)
        except Exception:
            logger.error("File %s could not retrieved from file server.",
                         digest, exc_info=True)
            return False

        # Then check the digest
        calc_digest = sha1sum(path)
        if digest != calc_digest:
            logger.critical("File %s has wrong hash %s.",
                            digest, calc_digest)
            return False

        # If applicable, retrieve also the description
        if descr_path is not None:
            with io.open(descr_path, 'wt', encoding='utf-8') as fout:
                fout.write(self.file_cacher.describe(digest))

        return True
Esempio n. 2
0
class SpoolExporter(object):
    """This service creates a tree structure "similar" to the one used
    in Italian IOI repository for storing the results of a contest.

    """
    def __init__(self, contest_id, spool_dir):
        self.contest_id = contest_id
        self.spool_dir = spool_dir
        self.upload_dir = os.path.join(self.spool_dir, "upload")
        self.contest = None
        self.submissions = None

        self.file_cacher = FileCacher()

    def run(self):
        """Interface to make the class do its job."""
        return self.do_export()

    def do_export(self):
        """Run the actual export code.

        """
        logger.operation = "exporting contest %s" % self.contest_id
        logger.info("Starting export.")

        logger.info("Creating dir structure.")
        try:
            os.mkdir(self.spool_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False
        os.mkdir(self.upload_dir)

        with SessionGen() as session:
            self.contest = Contest.get_from_id(self.contest_id, session)
            self.submissions = sorted(
                (submission
                 for submission in self.contest.get_submissions()
                 if not submission.user.hidden),
                key=lambda submission: submission.timestamp)

            # Creating users' directory.
            for user in self.contest.users:
                if not user.hidden:
                    os.mkdir(os.path.join(self.upload_dir, user.username))

            try:
                self.export_submissions()
                self.export_ranking()
            except Exception:
                logger.critical("Generic error.", exc_info=True)
                return False

        logger.info("Export finished.")
        logger.operation = ""

        return True

    def export_submissions(self):
        """Export submissions' source files.

        """
        logger.info("Exporting submissions.")

        queue_file = codecs.open(os.path.join(self.spool_dir, "queue"), "w",
                                 encoding="utf-8")
        for submission in sorted(self.submissions, key=lambda x: x.timestamp):
            logger.info("Exporting submission %s." % submission.id)
            username = submission.user.username
            task = submission.task.name
            timestamp = time.mktime(submission.timestamp.timetuple())

            # Get source files to the spool directory.
            submission_dir = os.path.join(
                self.upload_dir, username, "%s.%d.%s" %
                (task, timestamp, submission.language))
            os.mkdir(submission_dir)
            for filename, file_ in submission.files.iteritems():
                self.file_cacher.get_file_to_path(
                    file_.digest,
                    os.path.join(submission_dir, filename))
            last_submission_dir = os.path.join(
                self.upload_dir, username, "%s.%s" %
                (task, submission.language))
            try:
                os.unlink(last_submission_dir)
            except OSError:
                pass
            os.symlink(os.path.basename(submission_dir), last_submission_dir)
            print("./upload/%s/%s.%d.%s" %
                  (username, task, timestamp, submission.language),
                  file=queue_file)

            # Write results file for the submission.
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            if result.evaluated():
                res_file = codecs.open(os.path.join(
                    self.spool_dir,
                    "%d.%s.%s.%s.res" % (timestamp, username,
                                         task, submission.language)),
                    "w", encoding="utf-8")
                res2_file = codecs.open(
                    os.path.join(self.spool_dir,
                                 "%s.%s.%s.res" % (username, task,
                                                   submission.language)),
                    "w", encoding="utf-8")
                total = 0.0
                for evaluation in result.evaluations:
                    outcome = float(evaluation.outcome)
                    total += outcome
                    line = "Executing on file with codename '%s' %s (%.4f)" % \
                        (evaluation.testcase.codename,
                         evaluation.text, outcome)
                    print(line, file=res_file)
                    print(line, file=res2_file)
                line = "Score: %.6f" % total
                print(line, file=res_file)
                print(line, file=res2_file)
                res_file.close()
                res2_file.close()

        print(file=queue_file)
        queue_file.close()

    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((user.username, 0.0)
                      for user in self.contest.users
                      if not user.hidden)
        task_scores = dict((task.id, dict((user.username, 0.0)
                                          for user in self.contest.users
                                          if not user.hidden))
                           for task in self.contest.tasks)
        last_scores = dict((task.id, dict((user.username, 0.0)
                                          for user in self.contest.users
                                          if not user.hidden))
                           for task in self.contest.tasks)

        # Make the score type compute the scores.
        scorers = {}
        for task in self.contest.tasks:
            scorers[task.id] = get_score_type(dataset=task.active_dataset)

        for submission in self.submissions:
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            scorers[submission.task_id].add_submission(
                submission.id, submission.timestamp,
                submission.user.username,
                result.evaluated(),
                dict((ev.codename,
                      {"outcome": ev.outcome,
                       "text": ev.text,
                       "time": ev.execution_time,
                       "memory": ev.execution_memory})
                     for ev in result.evaluations),
                submission.tokened())

        # Put together all the scores.
        for submission in self.submissions:
            task_id = submission.task_id
            username = submission.user.username
            details = scorers[task_id].pool[submission.id]
            last_scores[task_id][username] = details["score"]
            if details["tokened"]:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    details["score"])

        # Merge tokened and last submissions.
        for username in scores:
            for task_id in task_scores:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    last_scores[task_id][username])
            # print(username, [task_scores[task_id][username]
            #                  for task_id in task_scores])
            scores[username] = sum(task_scores[task_id][username]
                                   for task_id in task_scores)

        sorted_usernames = sorted(scores.keys(),
                                  key=lambda username: (scores[username],
                                                        username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks,
                              key=lambda task: task.num)

        ranking_file = codecs.open(
            os.path.join(self.spool_dir, "classifica.txt"),
            "w", encoding="utf-8")
        ranking_csv = codecs.open(
            os.path.join(self.spool_dir, "classifica.csv"),
            "w", encoding="utf-8")

        # Write rankings' header.
        n_tasks = len(sorted_tasks)
        print("Classifica finale del contest `%s'" %
              self.contest.description, file=ranking_file)
        points_line = " %10s" * n_tasks
        csv_points_line = ",%s" * n_tasks
        print(("%20s %10s" % ("Utente", "Totale")) +
              (points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_file)
        print(("%s,%s" % ("utente", "totale")) +
              (csv_points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_csv)

        # Write rankings' content.
        points_line = " %10.3f" * n_tasks
        csv_points_line = ",%.6f" * n_tasks
        for username in sorted_usernames:
            user_scores = [task_scores[task.id][username]
                           for task in sorted_tasks]
            print(("%20s %10.3f" % (username, scores[username])) +
                  (points_line % tuple(user_scores)),
                  file=ranking_file)
            print(("%s,%.6f" % (username, scores[username])) +
                  (csv_points_line % tuple(user_scores)),
                  file=ranking_csv)

        ranking_file.close()
        ranking_csv.close()
Esempio n. 3
0
class DumpExporter:

    """This service exports every data that CMS knows. The process of
    exporting and importing again should be idempotent.

    """

    def __init__(self, contest_ids, export_target,
                 dump_files, dump_model, skip_generated,
                 skip_submissions, skip_user_tests, skip_users, skip_print_jobs):
        if contest_ids is None:
            with SessionGen() as session:
                contests = session.query(Contest).all()
                self.contests_ids = [contest.id for contest in contests]
                if not skip_users:
                  users = session.query(User).all()
                  self.users_ids = [user.id for user in users]
                  tasks = session.query(Task)\
                      .filter(Task.contest_id.is_(None)).all()
                else:
                  self.user_ids = []
                self.tasks_ids = [task.id for task in tasks]
        else:
            # FIXME: this is ATM broken, because if you export a contest, you
            # then export the users who participated in it and then all of the
            # contests those users participated in.
            self.contests_ids = contest_ids
            self.users_ids = []
            self.tasks_ids = []
        self.dump_files = dump_files
        self.dump_model = dump_model
        self.skip_generated = skip_generated
        self.skip_submissions = skip_submissions
        self.skip_user_tests = skip_user_tests
        self.skip_users = skip_users
        self.skip_print_jobs = skip_print_jobs
        self.export_target = export_target

        # If target is not provided, we use the contest's name.
        if len(export_target) == 0:
            self.export_target = "dump_%s.tar.gz" % date.today().isoformat()
            logger.warning("export_target not given, using \"%s\"",
                           self.export_target)

        self.file_cacher = FileCacher()

    def do_export(self):
        """Run the actual export code."""
        logger.info("Starting export.")

        export_dir = self.export_target
        archive_info = get_archive_info(self.export_target)

        if archive_info["write_mode"] != "":
            # We are able to write to this archive.
            if os.path.exists(self.export_target):
                logger.critical("The specified file already exists, "
                                "I won't overwrite it.")
                return False
            export_dir = os.path.join(tempfile.mkdtemp(),
                                      archive_info["basename"])

        logger.info("Creating dir structure.")
        try:
            os.mkdir(export_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False

        files_dir = os.path.join(export_dir, "files")
        descr_dir = os.path.join(export_dir, "descriptions")
        os.mkdir(files_dir)
        os.mkdir(descr_dir)

        with SessionGen() as session:
            # Export files.
            logger.info("Exporting files.")
            if self.dump_files:
                for contest_id in self.contests_ids:
                    contest = Contest.get_from_id(contest_id, session)
                    files = enumerate_files(
                        session, contest,
                        skip_submissions=self.skip_submissions,
                        skip_user_tests=self.skip_user_tests,
                        skip_users=self.skip_users,
                        skip_print_jobs=self.skip_print_jobs,
                        skip_generated=self.skip_generated)
                    for file_ in files:
                        if not self.safe_get_file(file_,
                                                  os.path.join(files_dir,
                                                               file_),
                                                  os.path.join(descr_dir,
                                                               file_)):
                            return False

            # Export data in JSON format.
            if self.dump_model:
                logger.info("Exporting data to a JSON file.")

                # We use strings because they'll be the keys of a JSON
                # object
                self.ids = {}
                self.queue = []

                data = dict()

                for cls, lst in [(Contest, self.contests_ids),
                                 (User, self.users_ids),
                                 (Task, self.tasks_ids)]:
                    for i in lst:
                        obj = cls.get_from_id(i, session)
                        self.get_id(obj)

                # Specify the "root" of the data graph
                data["_objects"] = list(self.ids.values())

                while len(self.queue) > 0:
                    obj = self.queue.pop(0)
                    data[self.ids[obj.sa_identity_key]] = \
                        self.export_object(obj)

                data["_version"] = model_version

                destination = os.path.join(export_dir, "contest.json")
                with open(destination, "wt", encoding="utf-8") as fout:
                    json.dump(data, fout, indent=4, sort_keys=True)

        # If the admin requested export to file, we do that.
        if archive_info["write_mode"] != "":
            with tarfile.open(self.export_target,
                              archive_info["write_mode"]) as archive:
                archive.add(export_dir, arcname=archive_info["basename"])
            rmtree(export_dir)

        logger.info("Export finished.")

        return True

    def get_id(self, obj):
        obj_key = obj.sa_identity_key
        if obj_key not in self.ids:
            # We use strings because they'll be the keys of a JSON object
            self.ids[obj_key] = "%d" % len(self.ids)
            self.queue.append(obj)

        return self.ids[obj_key]

    def export_object(self, obj):

        """Export the given object, returning a JSON-encodable dict.

        The returned dict will contain a "_class" item (the name of the
        class of the given object), an item for each column property
        (with a value properly translated to a JSON-compatible type)
        and an item for each relationship property (which will be an ID
        or a collection of IDs).

        The IDs used in the exported dict aren't related to the ones
        used in the DB: they are newly generated and their scope is
        limited to the exported file only. They are shared among all
        classes (that is, two objects can never share the same ID, even
        if they are of different classes).

        If, when exporting the relationship, we find an object without
        an ID we generate a new ID, assign it to the object and append
        the object to the queue of objects to export.

        The self.skip_submissions flag controls whether we export
        submissions (and all other objects that can be reached only by
        passing through a submission) or not.

        """

        cls = type(obj)

        data = {"_class": cls.__name__}

        for prp in cls._col_props:
            col, = prp.columns

            val = getattr(obj, prp.key)
            data[prp.key] = encode_value(col.type, val)

        user_related_classes = [User, Admin, UserTest, Submission, PrintJob, Message, Question, Announcement, Participation]
        for prp in cls._rel_props:
            other_cls = prp.mapper.class_

            # Skip submissions if requested
            if self.skip_submissions and other_cls is Submission:
                continue

            # Skip user_tests if requested
            if self.skip_user_tests and other_cls is UserTest:
                continue

            if self.skip_users:
                skip = False
                for rel_class in user_related_classes:
                    if other_cls is rel_class:
                        skip = True
                        break                 
                if skip:
                  continue

            # Skip print jobs if requested
            if self.skip_print_jobs and other_cls is PrintJob:
                continue

            # Skip generated data if requested
            if self.skip_generated and other_cls in (SubmissionResult,
                                                     UserTestResult):
                continue

            val = getattr(obj, prp.key)
            if val is None:
                data[prp.key] = None
            elif isinstance(val, other_cls):
                data[prp.key] = self.get_id(val)
            elif isinstance(val, list):
                data[prp.key] = list(self.get_id(i) for i in val)
            elif isinstance(val, dict):
                data[prp.key] = \
                    dict((k, self.get_id(v)) for k, v in val.items())
            else:
                raise RuntimeError("Unknown SQLAlchemy relationship type: %s"
                                   % type(val))

        return data

    def safe_get_file(self, digest, path, descr_path=None):

        """Get file from FileCacher ensuring that the digest is
        correct.

        digest (string): the digest of the file to retrieve.
        path (string): the path where to save the file.
        descr_path (string): the path where to save the description.

        return (bool): True if all ok, False if something wrong.

        """

        # TODO - Probably this method could be merged in FileCacher

        # First get the file
        try:
            self.file_cacher.get_file_to_path(digest, path)
        except Exception:
            logger.error("File %s could not retrieved from file server.",
                         digest, exc_info=True)
            return False

        # Then check the digest
        calc_digest = path_digest(path)
        if digest != calc_digest:
            logger.critical("File %s has wrong hash %s.",
                            digest, calc_digest)
            return False

        # If applicable, retrieve also the description
        if descr_path is not None:
            with open(descr_path, 'wt', encoding='utf-8') as fout:
                fout.write(self.file_cacher.describe(digest))

        return True
Esempio n. 4
0
def main():
    """Parse arguments and launch process.

    """
    parser = argparse.ArgumentParser(
        description="Export CMS submissions to a folder.\n",
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument("-c",
                        "--contest-id",
                        action="store",
                        type=int,
                        help="id of contest (default: all contests)")
    parser.add_argument("-t",
                        "--task-id",
                        action="store",
                        type=int,
                        help="id of task (default: all tasks)")
    parser.add_argument("-u",
                        "--user-id",
                        action="store",
                        type=int,
                        help="id of user (default: all users)")
    parser.add_argument("-s",
                        "--submission-id",
                        action="store",
                        type=int,
                        help="id of submission (default: all submissions)")
    parser.add_argument("--utf8",
                        action="store_true",
                        help="if set, the files will be encoded in utf8"
                        " when possible")
    parser.add_argument("--add-info",
                        action="store_true",
                        help="if set, information on the submission will"
                        " be added in the first lines of each file")
    parser.add_argument("--min-score",
                        action="store",
                        type=float,
                        help="ignore submissions which scored strictly"
                        " less than this (default: 0.0)",
                        default=0.0)
    parser.add_argument("--filename",
                        action="store",
                        type=utf8_decoder,
                        help="the filename format to use\n"
                        "Variables:\n"
                        "  id: submission id\n"
                        "  file: filename without extension\n"
                        "  ext: filename extension\n"
                        "  time: submission timestamp\n"
                        "  user: username\n"
                        "  task: taskname\n"
                        "  score: raw score\n"
                        " (default: {id}.{file}{ext})",
                        default="{id}.{file}{ext}")
    parser.add_argument("output_dir",
                        action="store",
                        type=utf8_decoder,
                        help="directory where to save the submissions")
    parser.add_argument("-y",
                        "--yes",
                        action="store_true",
                        help="if set, confirmation will not be shown")

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument("--unique",
                       action="store_true",
                       help="if set, only the earliest best submission"
                       " will be exported for each (user, task)")
    group.add_argument("--best",
                       action="store_true",
                       help="if set, only the best submissions will be"
                       " exported for each (user, task)")

    args = parser.parse_args()

    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)
    if not os.path.isdir(args.output_dir):
        logger.critical("The output-dir parameter must point to a directory")
        return 1

    cacher = FileCacher()

    with SessionGen() as session:
        q = session.query(Submission)\
            .join(Submission.task)\
            .join(Submission.files)\
            .join(Submission.results)\
            .join(SubmissionResult.dataset)\
            .join(Submission.participation)\
            .join(Participation.user)\
            .filter(Dataset.id == Task.active_dataset_id)\
            .filter(SubmissionResult.score >= args.min_score)\
            .with_entities(Submission.id, Submission.language,
                           Submission.timestamp,
                           SubmissionResult.score,
                           File.filename, File.digest,
                           User.id, User.username, User.first_name,
                           User.last_name,
                           Task.id, Task.name)

        if args.contest_id:
            q = q.filter(Participation.contest_id == args.contest_id)

        if args.task_id:
            q = q.filter(Submission.task_id == args.task_id)

        if args.user_id:
            q = q.filter(Participation.user_id == args.user_id)

        if args.submission_id:
            q = q.filter(Submission.id == args.submission_id)

        results = q.all()

        if args.unique or args.best:
            results = filter_top_scoring(results, args.unique)

        print("%s file(s) will be created." % len(results))
        if not args.yes and (input("Continue? [Y/n] ").strip().lower()
                             not in ["y", ""]):
            return 0

        done = 0
        for row in results:
            s_id, s_language, s_timestamp, sr_score, f_filename, f_digest, \
                u_id, u_name, u_fname, u_lname, t_id, t_name = row

            timef = s_timestamp.strftime('%Y%m%dT%H%M%S')

            ext = languagemanager.get_language(s_language).source_extension \
                if s_language else '.txt'
            filename_base, filename_ext = os.path.splitext(
                f_filename.replace('.%l', ext))

            # "name" is a deprecated specifier with the same meaning as "file"
            filename = args.filename.format(id=s_id,
                                            file=filename_base,
                                            name=filename_base,
                                            ext=filename_ext,
                                            time=timef,
                                            user=u_name,
                                            task=t_name,
                                            score=sr_score)
            filename = os.path.join(args.output_dir, filename)
            if os.path.exists(filename):
                logger.warning("Skipping file '%s' because it already exists",
                               filename)
                continue
            filedir = os.path.dirname(filename)
            if not os.path.exists(filedir):
                os.makedirs(filedir)
            if not os.path.isdir(filedir):
                logger.warning("%s is not a directory, skipped.", filedir)
                continue

            if not (args.utf8 or args.add_info):

                cacher.get_file_to_path(f_digest, filename)

            else:

                content_bytes = cacher.get_file_content(f_digest)

                if args.utf8:
                    try:
                        content = utf8_decoder(content_bytes)
                        content_bytes = content.encode("utf-8")
                    except TypeError:
                        logger.critical(
                            "Could not guess encoding of file "
                            "'%s'. Aborting.", filename)
                        return 1

                if args.add_info:

                    template_str = TEMPLATE[ext] % (u_name, u_fname, u_lname,
                                                    t_name, sr_score,
                                                    s_timestamp)
                    template_bytes = template_str.encode("utf-8")

                    content_bytes = template_bytes + content_bytes

                with io.open(filename, 'wb') as f_out:
                    f_out.write(content_bytes)

            done += 1
            print(done, "/", len(results))

    return 0
Esempio n. 5
0
class SpoolExporter(object):
    """This service creates a tree structure "similar" to the one used
    in Italian IOI repository for storing the results of a contest.

    """
    def __init__(self, contest_id, spool_dir):
        self.contest_id = contest_id
        self.spool_dir = spool_dir
        self.upload_dir = os.path.join(self.spool_dir, "upload")
        self.contest = None
        self.submissions = None

        self.file_cacher = FileCacher()

    def run(self):
        """Interface to make the class do its job."""
        return self.do_export()

    def do_export(self):
        """Run the actual export code.

        """
        logger.operation = "exporting contest %s" % self.contest_id
        logger.info("Starting export.")

        logger.info("Creating dir structure.")
        try:
            os.mkdir(self.spool_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False
        os.mkdir(self.upload_dir)

        with SessionGen() as session:
            self.contest = Contest.get_from_id(self.contest_id, session)
            self.submissions = sorted(
                (submission for submission in self.contest.get_submissions()
                 if not submission.participation.hidden),
                key=lambda submission: submission.timestamp)

            # Creating users' directory.
            for participation in self.contest.participations:
                if not participation.hidden:
                    os.mkdir(
                        os.path.join(self.upload_dir,
                                     participation.user.username))

            try:
                self.export_submissions()
                self.export_ranking()
            except Exception:
                logger.critical("Generic error.", exc_info=True)
                return False

        logger.info("Export finished.")
        logger.operation = ""

        return True

    def export_submissions(self):
        """Export submissions' source files.

        """
        logger.info("Exporting submissions.")

        queue_file = io.open(os.path.join(self.spool_dir, "queue"),
                             "w",
                             encoding="utf-8")
        for submission in sorted(self.submissions, key=lambda x: x.timestamp):
            logger.info("Exporting submission %s.", submission.id)
            username = submission.participation.user.username
            task = submission.task.name
            timestamp = time.mktime(submission.timestamp.timetuple())

            # Get source files to the spool directory.
            ext = languagemanager.get_language(submission.language)\
                .source_extension
            submission_dir = os.path.join(self.upload_dir, username,
                                          "%s.%d.%s" % (task, timestamp, ext))
            os.mkdir(submission_dir)
            for filename, file_ in iteritems(submission.files):
                self.file_cacher.get_file_to_path(
                    file_.digest,
                    os.path.join(submission_dir, filename.replace(".%l", ext)))
            last_submission_dir = os.path.join(self.upload_dir, username,
                                               "%s.%s" % (task, ext))
            try:
                os.unlink(last_submission_dir)
            except OSError:
                pass
            os.symlink(os.path.basename(submission_dir), last_submission_dir)
            print("./upload/%s/%s.%d.%s" % (username, task, timestamp, ext),
                  file=queue_file)

            # Write results file for the submission.
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            if result.evaluated():
                res_file = io.open(os.path.join(
                    self.spool_dir,
                    "%d.%s.%s.%s.res" % (timestamp, username, task, ext)),
                                   "w",
                                   encoding="utf-8")
                res2_file = io.open(os.path.join(
                    self.spool_dir, "%s.%s.%s.res" % (username, task, ext)),
                                    "w",
                                    encoding="utf-8")
                total = 0.0
                for evaluation in result.evaluations:
                    outcome = float(evaluation.outcome)
                    total += outcome
                    line = "Executing on file with codename '%s' %s (%.4f)" % \
                        (evaluation.testcase.codename,
                         evaluation.text, outcome)
                    print(line, file=res_file)
                    print(line, file=res2_file)
                line = "Score: %.6f" % total
                print(line, file=res_file)
                print(line, file=res2_file)
                res_file.close()
                res2_file.close()

        print("", file=queue_file)
        queue_file.close()

    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((participation.user.username, 0.0)
                      for participation in self.contest.participations
                      if not participation.hidden)
        task_scores = dict(
            (task.id,
             dict((participation.user.username, 0.0)
                  for participation in self.contest.participations
                  if not participation.hidden)) for task in self.contest.tasks)

        is_partial = False
        for task in self.contest.tasks:
            for participation in self.contest.participations:
                if participation.hidden:
                    continue
                score, partial = task_score(participation, task)
                is_partial = is_partial or partial
                task_scores[task.id][participation.user.username] = score
                scores[participation.user.username] += score
        if is_partial:
            logger.warning("Some of the scores are not definitive.")

        sorted_usernames = sorted(iterkeys(scores),
                                  key=lambda username:
                                  (scores[username], username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks, key=lambda task: task.num)

        ranking_file = io.open(os.path.join(self.spool_dir, "ranking.txt"),
                               "w",
                               encoding="utf-8")
        ranking_csv = io.open(os.path.join(self.spool_dir, "ranking.csv"),
                              "w",
                              encoding="utf-8")

        # Write rankings' header.
        n_tasks = len(sorted_tasks)
        print("Final Ranking of Contest `%s'" % self.contest.description,
              file=ranking_file)
        points_line = " %10s" * n_tasks
        csv_points_line = ",%s" * n_tasks
        print(("%20s %10s" % ("User", "Total")) +
              (points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_file)
        print(("%s,%s" % ("user", "total")) +
              (csv_points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_csv)

        # Write rankings' content.
        points_line = " %10.3f" * n_tasks
        csv_points_line = ",%.6f" * n_tasks
        for username in sorted_usernames:
            user_scores = [
                task_scores[task.id][username] for task in sorted_tasks
            ]
            print(("%20s %10.3f" % (username, scores[username])) +
                  (points_line % tuple(user_scores)),
                  file=ranking_file)
            print(("%s,%.6f" % (username, scores[username])) +
                  (csv_points_line % tuple(user_scores)),
                  file=ranking_csv)

        ranking_file.close()
        ranking_csv.close()
Esempio n. 6
0
class SpoolExporter:
    """This service creates a tree structure "similar" to the one used
    in Italian IOI repository for storing the results of a contest.

    """
    def __init__(self, contest_id, spool_dir):
        self.contest_id = contest_id
        self.spool_dir = spool_dir
        self.upload_dir = os.path.join(self.spool_dir, "upload")
        self.contest = None
        self.submissions = None

        self.file_cacher = FileCacher()

    def run(self):
        """Interface to make the class do its job."""
        return self.do_export()

    def do_export(self):
        """Run the actual export code.

        """
        logger.operation = "exporting contest %s" % self.contest_id
        logger.info("Starting export.")

        logger.info("Creating dir structure.")
        try:
            os.mkdir(self.spool_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False
        os.mkdir(self.upload_dir)

        with SessionGen() as session:
            self.contest = Contest.get_from_id(self.contest_id, session)
            self.submissions = \
                get_submissions(session, contest_id=self.contest_id) \
                .filter(not_(Participation.hidden)) \
                .order_by(Submission.timestamp).all()

            # Creating users' directory.
            for participation in self.contest.participations:
                if not participation.hidden:
                    os.mkdir(os.path.join(
                        self.upload_dir, participation.user.username))

            try:
                self.export_submissions()
                self.export_ranking()
            except Exception:
                logger.critical("Generic error.", exc_info=True)
                return False

        logger.info("Export finished.")
        logger.operation = ""

        return True

    def export_submissions(self):
        """Export submissions' source files.

        """
        logger.info("Exporting submissions.")

        with open(os.path.join(self.spool_dir, "queue"),
                  "wt", encoding="utf-8") as queue_file:
            for submission in sorted(self.submissions,
                                     key=lambda x: x.timestamp):
                logger.info("Exporting submission %s.", submission.id)
                username = submission.participation.user.username
                task = submission.task.name
                timestamp = time.mktime(submission.timestamp.timetuple())

                # Get source files to the spool directory.
                ext = languagemanager.get_language(submission.language)\
                    .source_extension
                submission_dir = os.path.join(
                    self.upload_dir, username,
                    "%s.%d.%s" % (task, timestamp, ext))
                os.mkdir(submission_dir)
                for filename, file_ in submission.files.items():
                    self.file_cacher.get_file_to_path(
                        file_.digest,
                        os.path.join(submission_dir,
                                     filename.replace(".%l", ext)))
                last_submission_dir = os.path.join(
                    self.upload_dir, username, "%s.%s" % (task, ext))
                try:
                    os.unlink(last_submission_dir)
                except OSError:
                    pass
                os.symlink(os.path.basename(submission_dir),
                           last_submission_dir)
                print("./upload/%s/%s.%d.%s" % (username, task, timestamp, ext),
                      file=queue_file)

                # Write results file for the submission.
                active_dataset = submission.task.active_dataset
                result = submission.get_result(active_dataset)
                if result.evaluated():
                    with open(os.path.join(self.spool_dir,
                                           "%d.%s.%s.%s.res"
                                           % (timestamp, username, task, ext)),
                              "wt", encoding="utf-8") as res_file, \
                            open(os.path.join(self.spool_dir,
                                              "%s.%s.%s.res"
                                              % (username, task, ext)),
                                 "wt", encoding="utf-8") as res2_file:
                        total = 0.0
                        for evaluation in result.evaluations:
                            outcome = float(evaluation.outcome)
                            total += outcome
                            line = (
                                "Executing on file with codename '%s' %s (%.4f)"
                                % (evaluation.testcase.codename,
                                   evaluation.text, outcome))
                            print(line, file=res_file)
                            print(line, file=res2_file)
                        line = "Score: %.6f" % total
                        print(line, file=res_file)
                        print(line, file=res2_file)

            print("", file=queue_file)

    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((participation.user.username, 0.0)
                      for participation in self.contest.participations
                      if not participation.hidden)
        task_scores = dict(
            (task.id, dict((participation.user.username, 0.0)
                           for participation in self.contest.participations
                           if not participation.hidden))
            for task in self.contest.tasks)

        is_partial = False
        for task in self.contest.tasks:
            for participation in self.contest.participations:
                if participation.hidden:
                    continue
                score, partial = task_score(participation, task)
                is_partial = is_partial or partial
                task_scores[task.id][participation.user.username] = score
                scores[participation.user.username] += score
        if is_partial:
            logger.warning("Some of the scores are not definitive.")

        sorted_usernames = sorted(scores.keys(),
                                  key=lambda username: (scores[username],
                                                        username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks,
                              key=lambda task: task.num)

        with open(os.path.join(self.spool_dir, "ranking.txt"),
                  "wt", encoding="utf-8") as ranking_file, \
                open(os.path.join(self.spool_dir, "ranking.csv"),
                     "wt", encoding="utf-8") as ranking_csv:

            # Write rankings' header.
            n_tasks = len(sorted_tasks)
            print("Final Ranking of Contest `%s'" %
                  self.contest.description, file=ranking_file)
            points_line = " %10s" * n_tasks
            csv_points_line = ",%s" * n_tasks
            print(("%20s %10s" % ("User", "Total")) +
                  (points_line % tuple([t.name for t in sorted_tasks])),
                  file=ranking_file)
            print(("%s,%s" % ("user", "total")) +
                  (csv_points_line % tuple([t.name for t in sorted_tasks])),
                  file=ranking_csv)

            # Write rankings' content.
            points_line = " %10.3f" * n_tasks
            csv_points_line = ",%.6f" * n_tasks
            for username in sorted_usernames:
                user_scores = [task_scores[task.id][username]
                               for task in sorted_tasks]
                print(("%20s %10.3f" % (username, scores[username])) +
                      (points_line % tuple(user_scores)),
                      file=ranking_file)
                print(("%s,%.6f" % (username, scores[username])) +
                      (csv_points_line % tuple(user_scores)),
                      file=ranking_csv)
Esempio n. 7
0
class SpoolExporter(object):
    """This service creates a tree structure "similar" to the one used
    in Italian IOI repository for storing the results of a contest.

    """
    def __init__(self, contest_id, spool_dir):
        self.contest_id = contest_id
        self.spool_dir = spool_dir
        self.upload_dir = os.path.join(self.spool_dir, "upload")
        self.contest = None
        self.submissions = None

        self.file_cacher = FileCacher()

    def run(self):
        """Interface to make the class do its job."""
        return self.do_export()

    def do_export(self):
        """Run the actual export code.

        """
        logger.operation = "exporting contest %s" % self.contest_id
        logger.info("Starting export.")

        logger.info("Creating dir structure.")
        try:
            os.mkdir(self.spool_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False
        os.mkdir(self.upload_dir)

        with SessionGen() as session:
            self.contest = Contest.get_from_id(self.contest_id, session)
            self.submissions = sorted(
                (submission for submission in self.contest.get_submissions()
                 if not submission.user.hidden),
                key=lambda submission: submission.timestamp)

            # Creating users' directory.
            for user in self.contest.users:
                if not user.hidden:
                    os.mkdir(os.path.join(self.upload_dir, user.username))

            try:
                self.export_submissions()
                self.export_ranking()
            except Exception:
                logger.critical("Generic error.", exc_info=True)
                return False

        logger.info("Export finished.")
        logger.operation = ""

        return True

    def export_submissions(self):
        """Export submissions' source files.

        """
        logger.info("Exporting submissions.")

        queue_file = codecs.open(os.path.join(self.spool_dir, "queue"),
                                 "w",
                                 encoding="utf-8")
        for submission in sorted(self.submissions, key=lambda x: x.timestamp):
            logger.info("Exporting submission %s." % submission.id)
            username = submission.user.username
            task = submission.task.name
            timestamp = time.mktime(submission.timestamp.timetuple())

            # Get source files to the spool directory.
            submission_dir = os.path.join(
                self.upload_dir, username,
                "%s.%d.%s" % (task, timestamp, submission.language))
            os.mkdir(submission_dir)
            for filename, file_ in submission.files.iteritems():
                self.file_cacher.get_file_to_path(
                    file_.digest, os.path.join(submission_dir, filename))
            last_submission_dir = os.path.join(
                self.upload_dir, username,
                "%s.%s" % (task, submission.language))
            try:
                os.unlink(last_submission_dir)
            except OSError:
                pass
            os.symlink(os.path.basename(submission_dir), last_submission_dir)
            print >> queue_file, "./upload/%s/%s.%d.%s" % \
                (username, task, timestamp, submission.language)

            # Write results file for the submission.
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            if result.evaluated():
                res_file = codecs.open(os.path.join(
                    self.spool_dir, "%d.%s.%s.%s.res" %
                    (timestamp, username, task, submission.language)),
                                       "w",
                                       encoding="utf-8")
                res2_file = codecs.open(os.path.join(
                    self.spool_dir,
                    "%s.%s.%s.res" % (username, task, submission.language)),
                                        "w",
                                        encoding="utf-8")
                total = 0.0
                for evaluation in result.evaluations:
                    outcome = float(evaluation.outcome)
                    total += outcome
                    line = "Executing on file with codename '%s' %s (%.4f)" % \
                        (evaluation.testcase.codename,
                         evaluation.text, outcome)
                    print >> res_file, line
                    print >> res2_file, line
                line = "Score: %.6f" % total
                print >> res_file, line
                print >> res2_file, line
                res_file.close()
                res2_file.close()

        print >> queue_file
        queue_file.close()

    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((user.username, 0.0) for user in self.contest.users
                      if not user.hidden)
        task_scores = dict((task.id,
                            dict((user.username, 0.0)
                                 for user in self.contest.users
                                 if not user.hidden))
                           for task in self.contest.tasks)
        last_scores = dict((task.id,
                            dict((user.username, 0.0)
                                 for user in self.contest.users
                                 if not user.hidden))
                           for task in self.contest.tasks)

        # Make the score type compute the scores.
        scorers = {}
        for task in self.contest.tasks:
            scorers[task.id] = get_score_type(dataset=task.active_dataset)

        for submission in self.submissions:
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            scorers[submission.task_id].add_submission(
                submission.id, submission.timestamp, submission.user.username,
                result.evaluated(),
                dict((ev.codename, {
                    "outcome": ev.outcome,
                    "text": ev.text,
                    "time": ev.execution_time,
                    "memory": ev.execution_memory
                }) for ev in result.evaluations), submission.tokened())

        # Put together all the scores.
        for submission in self.submissions:
            task_id = submission.task_id
            username = submission.user.username
            details = scorers[task_id].pool[submission.id]
            last_scores[task_id][username] = details["score"]
            if details["tokened"]:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username], details["score"])

        # Merge tokened and last submissions.
        for username in scores:
            for task_id in task_scores:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    last_scores[task_id][username])
            #print username, [task_scores[task_id][username]
            #                        for task_id in task_scores]
            scores[username] = sum(task_scores[task_id][username]
                                   for task_id in task_scores)

        sorted_usernames = sorted(scores.keys(),
                                  key=lambda username:
                                  (scores[username], username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks, key=lambda task: task.num)

        ranking_file = codecs.open(os.path.join(self.spool_dir,
                                                "classifica.txt"),
                                   "w",
                                   encoding="utf-8")
        ranking_csv = codecs.open(os.path.join(self.spool_dir,
                                               "classifica.csv"),
                                  "w",
                                  encoding="utf-8")

        # Write rankings' header.
        n_tasks = len(sorted_tasks)
        print >> ranking_file, "Classifica finale del contest `%s'" % \
            self.contest.description
        points_line = " %10s" * n_tasks
        csv_points_line = ",%s" * n_tasks
        print >> ranking_file, ("%20s %10s" % ("Utente", "Totale")) + \
            (points_line % tuple([t.name for t in sorted_tasks]))
        print >> ranking_csv, ("%s,%s" % ("utente", "totale")) + \
            (csv_points_line % tuple([t.name for t in sorted_tasks]))

        # Write rankings' content.
        points_line = " %10.3f" * n_tasks
        csv_points_line = ",%.6f" * n_tasks
        for username in sorted_usernames:
            user_scores = [
                task_scores[task.id][username] for task in sorted_tasks
            ]
            print >> ranking_file, ("%20s %10.3f" % (
                    username,
                    scores[username])) + \
                    (points_line % tuple(user_scores))
            print >> ranking_csv, ("%s,%.6f" % (
                    username,
                    scores[username])) + \
                    (csv_points_line % tuple(user_scores))

        ranking_file.close()
        ranking_csv.close()
Esempio n. 8
0
class ContestExporter(object):

    """This service exports every data about the contest that CMS
    knows. The process of exporting and importing again should be
    idempotent.

    """

    def __init__(self, contest_id, export_target,
                 dump_files, dump_model, skip_generated,
                 skip_submissions, skip_user_tests):
        self.contest_id = contest_id
        self.dump_files = dump_files
        self.dump_model = dump_model
        self.skip_generated = skip_generated
        self.skip_submissions = skip_submissions
        self.skip_user_tests = skip_user_tests

        # If target is not provided, we use the contest's name.
        if export_target == "":
            with SessionGen() as session:
                contest = Contest.get_from_id(self.contest_id, session)
                self.export_target = "dump_%s.tar.gz" % contest.name
                logger.warning("export_target not given, using \"%s\""
                               % self.export_target)
        else:
            self.export_target = export_target

        self.file_cacher = FileCacher()

    def do_export(self):
        """Run the actual export code."""
        logger.info("Starting export.")

        export_dir = self.export_target
        archive_info = get_archive_info(self.export_target)

        if archive_info["write_mode"] != "":
            # We are able to write to this archive.
            if os.path.exists(self.export_target):
                logger.critical("The specified file already exists, "
                                "I won't overwrite it.")
                return False
            export_dir = os.path.join(tempfile.mkdtemp(),
                                      archive_info["basename"])

        logger.info("Creating dir structure.")
        try:
            os.mkdir(export_dir)
        except OSError:
            logger.critical("The specified directory already exists, "
                            "I won't overwrite it.")
            return False

        files_dir = os.path.join(export_dir, "files")
        descr_dir = os.path.join(export_dir, "descriptions")
        os.mkdir(files_dir)
        os.mkdir(descr_dir)

        with SessionGen() as session:

            contest = Contest.get_from_id(self.contest_id, session)

            # Export files.
            if self.dump_files:
                logger.info("Exporting files.")
                files = contest.enumerate_files(self.skip_submissions,
                                                self.skip_user_tests,
                                                self.skip_generated)
                for file_ in files:
                    if not self.safe_get_file(file_,
                                              os.path.join(files_dir, file_),
                                              os.path.join(descr_dir, file_)):
                        return False

            # Export the contest in JSON format.
            if self.dump_model:
                logger.info("Exporting the contest to a JSON file.")

                # We use strings because they'll be the keys of a JSON
                # object; the contest will have ID 0.
                self.ids = {contest.sa_identity_key: "0"}
                self.queue = [contest]

                data = dict()
                while len(self.queue) > 0:
                    obj = self.queue.pop(0)
                    data[self.ids[obj.sa_identity_key]] = self.export_object(obj)

                # Specify the "root" of the data graph
                data["_objects"] = ["0"]

                data["_version"] = model_version

                with io.open(os.path.join(export_dir,
                                          "contest.json"), "wb") as fout:
                    json.dump(data, fout, encoding="utf-8",
                              indent=4, sort_keys=True)

        # If the admin requested export to file, we do that.
        if archive_info["write_mode"] != "":
            archive = tarfile.open(self.export_target,
                                   archive_info["write_mode"])
            archive.add(export_dir, arcname=archive_info["basename"])
            archive.close()
            rmtree(export_dir)

        logger.info("Export finished.")

        return True

    def get_id(self, obj):
        obj_key = obj.sa_identity_key
        if obj_key not in self.ids:
            # We use strings because they'll be the keys of a JSON object
            self.ids[obj_key] = str(len(self.ids))
            self.queue.append(obj)

        return self.ids[obj_key]

    def export_object(self, obj):

        """Export the given object, returning a JSON-encodable dict.

        The returned dict will contain a "_class" item (the name of the
        class of the given object), an item for each column property
        (with a value properly translated to a JSON-compatible type)
        and an item for each relationship property (which will be an ID
        or a collection of IDs).

        The IDs used in the exported dict aren't related to the ones
        used in the DB: they are newly generated and their scope is
        limited to the exported file only. They are shared among all
        classes (that is, two objects can never share the same ID, even
        if they are of different classes).

        If, when exporting the relationship, we find an object without
        an ID we generate a new ID, assign it to the object and append
        the object to the queue of objects to export.

        The self.skip_submissions flag controls wheter we export
        submissions (and all other objects that can be reached only by
        passing through a submission) or not.

        """

        cls = type(obj)

        data = {"_class": cls.__name__}

        for prp in cls._col_props:
            col, = prp.columns
            col_type = type(col.type)

            val = getattr(obj, prp.key)
            if col_type in [Boolean, Integer, Float, Unicode, RepeatedUnicode]:
                data[prp.key] = val
            elif col_type is String:
                data[prp.key] = \
                    val.decode('latin1') if val is not None else None
            elif col_type is DateTime:
                data[prp.key] = \
                    make_timestamp(val) if val is not None else None
            elif col_type is Interval:
                data[prp.key] = \
                    val.total_seconds() if val is not None else None
            else:
                raise RuntimeError("Unknown SQLAlchemy column type: %s"
                                   % col_type)

        for prp in cls._rel_props:
            other_cls = prp.mapper.class_

            # Skip submissions if requested
            if self.skip_submissions and other_cls is Submission:
                continue

            # Skip user_tests if requested
            if self.skip_user_tests and other_cls is UserTest:
                continue

            # Skip generated data if requested
            if self.skip_generated and other_cls in (SubmissionResult,
                                                     UserTestResult):
                continue

            val = getattr(obj, prp.key)
            if val is None:
                data[prp.key] = None
            elif isinstance(val, other_cls):
                data[prp.key] = self.get_id(val)
            elif isinstance(val, list):
                data[prp.key] = list(self.get_id(i) for i in val)
            elif isinstance(val, dict):
                data[prp.key] = \
                    dict((k, self.get_id(v)) for k, v in val.iteritems())
            else:
                raise RuntimeError("Unknown SQLAlchemy relationship type: %s"
                                   % type(val))

        return data

    def safe_get_file(self, digest, path, descr_path=None):

        """Get file from FileCacher ensuring that the digest is
        correct.

        digest (string): the digest of the file to retrieve.
        path (string): the path where to save the file.
        descr_path (string): the path where to save the description.

        return (bool): True if all ok, False if something wrong.

        """

        # TODO - Probably this method could be merged in FileCacher

        # First get the file
        try:
            self.file_cacher.get_file_to_path(digest, path)
        except Exception as error:
            logger.error("File %s could not retrieved from file server (%r)."
                         % (digest, error))
            return False

        # Then check the digest
        calc_digest = sha1sum(path)
        if digest != calc_digest:
            logger.critical("File %s has wrong hash %s."
                            % (digest, calc_digest))
            return False

        # If applicable, retrieve also the description
        if descr_path is not None:
            with io.open(descr_path, 'wt', encoding='utf-8') as fout:
                fout.write(self.file_cacher.describe(digest))

        return True