Example #1
0
    def get(self, task_name, submission_num):
        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        sr = submission.get_result(task.active_dataset)
        score_type = get_score_type(dataset=task.active_dataset)

        details = None
        if sr is not None:
            if submission.tokened():
                details = sr.score_details
            else:
                details = sr.public_score_details

            if sr.scored():
                details = score_type.get_html_details(details, self._)
            else:
                details = None

        self.render("submission_details.html", sr=sr, details=details)
Example #2
0
def safe_get_score_type(env, *args, **kwargs):
    try:
        return get_score_type(*args, **kwargs)
    # The score type's constructor is called, which may raise any
    # arbitrary exception, hence we stay as general al possible.
    except Exception as err:
        return env.undefined("ScoreType not found: %s" % err)
Example #3
0
def safe_get_score_type(env, *args, **kwargs):
    try:
        return get_score_type(*args, **kwargs)
    # The score type's constructor is called, which may raise any
    # arbitrary exception, hence we stay as general al possible.
    except Exception as err:
        return env.undefined("ScoreType not found: %s" % err)
Example #4
0
    def get(self, task_name, submission_num):
        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)
        if self.contest.restrict_level and self.current_user.user.level != task.level and self.current_user.user.level != "x" and task.level != "x":
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        sr = submission.get_result(task.active_dataset)
        data = dict()

        if sr is None:
            # implicit compiling state while result is not created
            data["status"] = SubmissionResult.COMPILING
        else:
            data["status"] = sr.get_status()

        if data["status"] == SubmissionResult.COMPILING:
            data["status_text"] = self._("Compiling...")
        elif data["status"] == SubmissionResult.COMPILATION_FAILED:
            data["status_text"] = "%s <a class=\"details\">%s</a>" % (
                self._("Compilation failed"), self._("details"))
        elif data["status"] == SubmissionResult.EVALUATING:
            data["status_text"] = self._("Evaluating...")
        elif data["status"] == SubmissionResult.SCORING:
            data["status_text"] = self._("Scoring...")
        elif data["status"] == SubmissionResult.SCORED:
            data["status_text"] = "%s <a class=\"details\">%s</a>" % (
                self._("Evaluated"), self._("details"))

            score_type = get_score_type(dataset=task.active_dataset)
            if score_type.max_public_score > 0:
                data["max_public_score"] = \
                    round(score_type.max_public_score, task.score_precision)
                data["public_score"] = \
                    round(sr.public_score, task.score_precision)
                data["public_score_message"] = score_type.format_score(
                    sr.public_score, score_type.max_public_score,
                    sr.public_score_details, task.score_precision, self._)
            if submission.token is not None:
                data["max_score"] = \
                    round(score_type.max_score, task.score_precision)
                data["score"] = \
                    round(sr.score, task.score_precision)
                data["score_message"] = score_type.format_score(
                    sr.score, score_type.max_score, sr.score_details,
                    task.score_precision, self._)

        self.write(data)
Example #5
0
    def get(self, task_name, submission_num):
        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        sr = submission.get_result(task.active_dataset)
        score_type = get_score_type(dataset=task.active_dataset)

        details = None
        if sr is not None:
            if submission.tokened():
                details = sr.score_details
            else:
                details = sr.public_score_details

            if sr.scored():
                details = score_type.get_html_details(details, self._)
            else:
                details = None

        self.render("submission_details.html",
                    sr=sr,
                    details=details)
Example #6
0
    def get(self, task_name, submission_num):
        participation = self.current_user

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)
        if self.contest.restrict_level and self.current_user.user.level !=  task.level and self.current_user.user.level != "x" and task.level != "x":
            raise tornado.web.HTTPError(404)

        submission = self.sql_session.query(Submission)\
            .filter(Submission.participation == participation)\
            .filter(Submission.task == task)\
            .order_by(Submission.timestamp)\
            .offset(int(submission_num) - 1)\
            .first()
        if submission is None:
            raise tornado.web.HTTPError(404)

        sr = submission.get_result(task.active_dataset)
        data = dict()

        if sr is None:
            # implicit compiling state while result is not created
            data["status"] = SubmissionResult.COMPILING
        else:
            data["status"] = sr.get_status()

        if data["status"] == SubmissionResult.COMPILING:
            data["status_text"] = self._("Compiling...")
        elif data["status"] == SubmissionResult.COMPILATION_FAILED:
            data["status_text"] = "%s <a class=\"details\">%s</a>" % (
                self._("Compilation failed"), self._("details"))
        elif data["status"] == SubmissionResult.EVALUATING:
            data["status_text"] = self._("Evaluating...")
        elif data["status"] == SubmissionResult.SCORING:
            data["status_text"] = self._("Scoring...")
        elif data["status"] == SubmissionResult.SCORED:
            data["status_text"] = "%s <a class=\"details\">%s</a>" % (
                self._("Evaluated"), self._("details"))

            score_type = get_score_type(dataset=task.active_dataset)
            if score_type.max_public_score > 0:
                data["max_public_score"] = \
                    round(score_type.max_public_score, task.score_precision)
                data["public_score"] = \
                    round(sr.public_score, task.score_precision)
                data["public_score_message"] = score_type.format_score(
                    sr.public_score, score_type.max_public_score,
                    sr.public_score_details, task.score_precision, self._)
            if submission.token is not None:
                data["max_score"] = \
                    round(score_type.max_score, task.score_precision)
                data["score"] = \
                    round(sr.score, task.score_precision)
                data["score_message"] = score_type.format_score(
                    sr.score, score_type.max_score,
                    sr.score_details, task.score_precision, self._)

        self.write(data)
Example #7
0
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id, session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." % operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." % operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError(
                    "Submission result %d(%d) was not found." % (operation.submission_id, operation.dataset_id)
                )

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info(
                        "Submission result %d(%d) is already scored.", operation.submission_id, operation.dataset_id
                    )
                    return
                else:
                    raise ValueError(
                        "The state of the submission result "
                        "%d(%d) doesn't allow scoring." % (operation.submission_id, operation.dataset_id)
                    )

            # Instantiate the score type.
            score_type = get_score_type(dataset=dataset)

            # Compute score and fill it in the database.
            submission_result.score, submission_result.score_details, submission_result.public_score, submission_result.public_score_details, submission_result.ranking_score_details = score_type.compute_score(
                submission_result
            )

            # Store it.
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.proxy_service.submission_scored(submission_id=submission.id)
Example #8
0
    def initialize(self):
        """Send basic data to all the rankings.

        It's data that's supposed to be sent before the contest, that's
        needed to understand what we're talking about when we send
        submissions: contest, users, tasks.

        No support for teams, flags and faces.

        """
        logger.info("Initializing rankings.")

        with SessionGen() as session:
            contest = Contest.get_from_id(self.contest_id, session)

            if contest is None:
                logger.error(
                    "Received request for unexistent contest "
                    "id %s.", self.contest_id)
                raise KeyError("Contest not found.")

            contest_id = encode_id(contest.name)
            contest_data = {
                "name": contest.description,
                "begin": int(make_timestamp(contest.start)),
                "end": int(make_timestamp(contest.stop)),
                "score_precision": contest.score_precision
            }

            users = dict()

            for user in contest.users:
                if not user.hidden:
                    users[encode_id(user.username)] = \
                        {"f_name": user.first_name,
                         "l_name": user.last_name,
                         "team": None}

            tasks = dict()

            for task in contest.tasks:
                score_type = get_score_type(dataset=task.active_dataset)
                tasks[encode_id(task.name)] = \
                    {"short_name": task.name,
                     "name": task.title,
                     "contest": encode_id(contest.name),
                     "order": task.num,
                     "max_score": score_type.max_score,
                     "extra_headers": score_type.ranking_headers,
                     "score_precision": task.score_precision,
                     "score_mode": task.score_mode}

        self.enqueue(
            ProxyOperation(ProxyExecutor.CONTEST_TYPE,
                           {contest_id: contest_data}))
        self.enqueue(ProxyOperation(ProxyExecutor.USER_TYPE, users))
        self.enqueue(ProxyOperation(ProxyExecutor.TASK_TYPE, tasks))
    def initialize(self):
        """Send basic data to all the rankings.

        It's data that's supposed to be sent before the contest, that's
        needed to understand what we're talking about when we send
        submissions: contest, users, tasks.

        No support for teams, flags and faces.

        """
        logger.info("Initializing rankings.")

        with SessionGen() as session:
            contest = Contest.get_from_id(self.contest_id, session)

            if contest is None:
                logger.error("Received request for unexistent contest "
                             "id %s.", self.contest_id)
                raise KeyError("Contest not found.")

            contest_id = encode_id(contest.name)
            contest_data = {
                "name": contest.description,
                "begin": int(make_timestamp(contest.start)),
                "end": int(make_timestamp(contest.stop)),
                "score_precision": contest.score_precision}

            users = dict()

            for participation in contest.participations:
                user = participation.user
                if not participation.hidden:
                    users[encode_id(user.username)] = \
                        {"f_name": user.first_name,
                         "l_name": user.last_name,
                         "team": None}

            tasks = dict()

            for task in contest.tasks:
                score_type = get_score_type(dataset=task.active_dataset)
                tasks[encode_id(task.name)] = \
                    {"short_name": task.name,
                     "name": task.title,
                     "contest": encode_id(contest.name),
                     "order": task.num,
                     "max_score": score_type.max_score,
                     "extra_headers": score_type.ranking_headers,
                     "score_precision": task.score_precision,
                     "score_mode": task.score_mode}

        self.enqueue(ProxyOperation(ProxyExecutor.CONTEST_TYPE,
                                    {contest_id: contest_data}))
        self.enqueue(ProxyOperation(ProxyExecutor.USER_TYPE, users))
        self.enqueue(ProxyOperation(ProxyExecutor.TASK_TYPE, tasks))
Example #10
0
    def __init__(self, shard, contest_id):
        logger.initialize(ServiceCoord("ScoringService", shard))
        Service.__init__(self, shard, custom_logger=logger)

        self.contest_id = contest_id

        # Initialize scorers, the ScoreType objects holding all
        # submissions for a given task and deciding scores.
        self.scorers = {}
        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                      filter_by(id=contest_id).first()
            logger.info("Loaded contest %s" % contest.name)
            contest.create_empty_ranking_view(timestamp=contest.start)
            for task in contest.tasks:
                self.scorers[task.id] = get_score_type(task=task)
            session.commit()

        # If for some reason (SS switched off for a while, or broken
        # connection with ES), submissions have been left without
        # score, this is the list where you want to pur their
        # ids. Note that list != [] if and only if there is an alive
        # timeout for the method "score_old_submission".
        self.submission_ids_to_score = []
        self.submission_ids_to_token = []

        # We need to load every submission at start, but we don't want
        # to invalidate every score so that we can simply load the
        # score-less submissions. So we keep a set of submissions that
        # we analyzed (for scoring and for tokens).
        self.submission_ids_scored = set()
        self.submission_ids_tokened = set()

        # Initialize ranking web servers we need to send data to.
        self.rankings = []
        for i in xrange(len(config.rankings_address)):
            address = config.rankings_address[i]
            username = config.rankings_username[i]
            password = config.rankings_password[i]
            auth = get_authorization(username, password)
            self.rankings.append(("%s:%d" % tuple(address), auth))
        self.operation_queue = []

        for ranking in self.rankings:
            self.operation_queue.append((self.initialize, [ranking]))

        self.add_timeout(self.dispatch_operations, None,
                         ScoringService.CHECK_DISPATCH_TIME,
                         immediately=True)
        self.add_timeout(self.search_jobs_not_done, None,
                         ScoringService.JOBS_NOT_DONE_CHECK_TIME,
                         immediately=True)
Example #11
0
    def _initialize_scorers(self):
        """Initialize scorers, the ScoreType objects holding all
        submissions for a given task and deciding scores, and create
        an empty ranking view for the contest.

        """
        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                      filter_by(id=self.contest_id).first()
            for task in contest.tasks:
                try:
                    self.scorers[task.id] = get_score_type(task=task)
                except Exception as error:
                    logger.critical("Cannot get score type for task %s: %r" %
                                    (task.name, error))
                    self.exit()
            session.commit()
Example #12
0
    def _initialize_scorers(self):
        """Initialize scorers, the ScoreType objects holding all
        submissions for a given task and deciding scores, and create
        an empty ranking view for the contest.

        """
        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                      filter_by(id=self.contest_id).first()
            for task in contest.tasks:
                try:
                    self.scorers[task.id] = get_score_type(task=task)
                except Exception as error:
                    logger.critical("Cannot get score type for task %s.\n%r" %
                                    (task.name, error))
                    self.exit()
            session.commit()
Example #13
0
    def _initialize_scorers(self):
        """Initialize scorers, the ScoreType objects holding all
        submissions for a given task and deciding scores, and create
        an empty ranking view for the contest.

        """
        with SessionGen(commit=False) as session:
            contest = Contest.get_from_id(self.contest_id, session)

            for task in contest.tasks:
                for dataset in task.datasets:
                    try:
                        self.scorers[dataset.id] = \
                            get_score_type(dataset=dataset)
                    except Exception as error:
                        logger.critical(
                            "Cannot get score type for task %s(%d): %r" %
                            (task.name, dataset.id, error))
                        self.exit()
            session.commit()
Example #14
0
 def score_type_object(self):
     public_testcases = {k: tc.public for k, tc in self.testcases.items()}
     if not hasattr(self, "_cached_score_type_object") \
             or self.score_type != self._cached_score_type \
             or (self.score_type_parameters
                 != self._cached_score_type_parameters) \
             or public_testcases != self._cached_public_testcases:
         # Import late to avoid a circular dependency.
         from cms.grading.scoretypes import get_score_type
         # This can raise.
         self._cached_score_type_object = get_score_type(
             self.score_type, self.score_type_parameters, public_testcases)
         # If an exception is raised these updates don't take place:
         # that way, next time this property is accessed, we get a
         # cache miss again and the same exception is raised again.
         self._cached_score_type = self.score_type
         self._cached_score_type_parameters = \
             copy.deepcopy(self.score_type_parameters)
         self._cached_public_testcases = public_testcases
     return self._cached_score_type_object
Example #15
0
File: task.py Project: cms-dev/cms
 def score_type_object(self):
     public_testcases = {k: tc.public
                         for k, tc in self.testcases.items()}
     if not hasattr(self, "_cached_score_type_object") \
             or self.score_type != self._cached_score_type \
             or (self.score_type_parameters
                 != self._cached_score_type_parameters) \
             or public_testcases != self._cached_public_testcases:
         # Import late to avoid a circular dependency.
         from cms.grading.scoretypes import get_score_type
         # This can raise.
         self._cached_score_type_object = get_score_type(
             self.score_type, self.score_type_parameters, public_testcases)
         # If an exception is raised these updates don't take place:
         # that way, next time this property is accessed, we get a
         # cache miss again and the same exception is raised again.
         self._cached_score_type = self.score_type
         self._cached_score_type_parameters = \
             copy.deepcopy(self.score_type_parameters)
         self._cached_public_testcases = public_testcases
     return self._cached_score_type_object
Example #16
0
    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((user.username, 0.0)
                      for user in self.contest.users
                      if not user.hidden)
        task_scores = dict((task.id, dict((user.username, 0.0)
                                          for user in self.contest.users
                                          if not user.hidden))
                           for task in self.contest.tasks)
        last_scores = dict((task.id, dict((user.username, 0.0)
                                          for user in self.contest.users
                                          if not user.hidden))
                           for task in self.contest.tasks)

        # Make the score type compute the scores.
        scorers = {}
        for task in self.contest.tasks:
            scorers[task.id] = get_score_type(dataset=task.active_dataset)

        for submission in self.submissions:
            active_dataset = submission.task.active_dataset
            result = submission.get_result(active_dataset)
            scorers[submission.task_id].add_submission(
                submission.id, submission.timestamp,
                submission.user.username,
                result.evaluated(),
                dict((ev.codename,
                      {"outcome": ev.outcome,
                       "text": ev.text,
                       "time": ev.execution_time,
                       "memory": ev.execution_memory})
                     for ev in result.evaluations),
                submission.tokened())

        # Put together all the scores.
        for submission in self.submissions:
            task_id = submission.task_id
            username = submission.user.username
            details = scorers[task_id].pool[submission.id]
            last_scores[task_id][username] = details["score"]
            if details["tokened"]:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    details["score"])

        # Merge tokened and last submissions.
        for username in scores:
            for task_id in task_scores:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    last_scores[task_id][username])
            # print(username, [task_scores[task_id][username]
            #                  for task_id in task_scores])
            scores[username] = sum(task_scores[task_id][username]
                                   for task_id in task_scores)

        sorted_usernames = sorted(scores.keys(),
                                  key=lambda username: (scores[username],
                                                        username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks,
                              key=lambda task: task.num)

        ranking_file = codecs.open(
            os.path.join(self.spool_dir, "classifica.txt"),
            "w", encoding="utf-8")
        ranking_csv = codecs.open(
            os.path.join(self.spool_dir, "classifica.csv"),
            "w", encoding="utf-8")

        # Write rankings' header.
        n_tasks = len(sorted_tasks)
        print("Classifica finale del contest `%s'" %
              self.contest.description, file=ranking_file)
        points_line = " %10s" * n_tasks
        csv_points_line = ",%s" * n_tasks
        print(("%20s %10s" % ("Utente", "Totale")) +
              (points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_file)
        print(("%s,%s" % ("utente", "totale")) +
              (csv_points_line % tuple([t.name for t in sorted_tasks])),
              file=ranking_csv)

        # Write rankings' content.
        points_line = " %10.3f" * n_tasks
        csv_points_line = ",%.6f" * n_tasks
        for username in sorted_usernames:
            user_scores = [task_scores[task.id][username]
                           for task in sorted_tasks]
            print(("%20s %10.3f" % (username, scores[username])) +
                  (points_line % tuple(user_scores)),
                  file=ranking_file)
            print(("%s,%.6f" % (username, scores[username])) +
                  (csv_points_line % tuple(user_scores)),
                  file=ranking_csv)

        ranking_file.close()
        ranking_csv.close()
Example #17
0
    def export_ranking(self):
        """Exports the ranking in csv and txt (human-readable) form.

        """
        logger.info("Exporting ranking.")

        # Create the structure to store the scores.
        scores = dict((user.username, 0.0) for user in self.contest.users
                      if not user.hidden)
        task_scores = dict((task.id,
                            dict((user.username, 0.0)
                                 for user in self.contest.users
                                 if not user.hidden))
                           for task in self.contest.tasks)
        last_scores = dict((task.id,
                            dict((user.username, 0.0)
                                 for user in self.contest.users
                                 if not user.hidden))
                           for task in self.contest.tasks)

        # Make the score type compute the scores.
        scorers = {}
        for task in self.contest.tasks:
            scorers[task.id] = get_score_type(task=task)

        for submission in self.submissions:
            scorers[submission.task_id].add_submission(
                submission.id, submission.timestamp, submission.user.username,
                dict((ev.num, float(ev.outcome))
                     for ev in submission.evaluations), submission.tokened())

        # Put together all the scores.
        for submission in self.submissions:
            task_id = submission.task_id
            username = submission.user.username
            details = scorers[task_id].pool[submission.id]
            last_scores[task_id][username] = details["score"]
            if details["tokened"]:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username], details["score"])

        # Merge tokened and last submissions.
        for username in scores:
            for task_id in task_scores:
                task_scores[task_id][username] = max(
                    task_scores[task_id][username],
                    last_scores[task_id][username])
            print username, [
                task_scores[task_id][username] for task_id in task_scores
            ]
            scores[username] = sum(task_scores[task_id][username]
                                   for task_id in task_scores)

        sorted_usernames = sorted(scores.keys(),
                                  key=lambda username:
                                  (scores[username], username),
                                  reverse=True)
        sorted_tasks = sorted(self.contest.tasks, key=lambda task: task.num)

        ranking_file = codecs.open(os.path.join(self.spool_dir,
                                                "classifica.txt"),
                                   "w",
                                   encoding="utf-8")
        ranking_csv = codecs.open(os.path.join(self.spool_dir,
                                               "classifica.csv"),
                                  "w",
                                  encoding="utf-8")

        # Write rankings' header.
        n_tasks = len(sorted_tasks)
        print >> ranking_file, "Classifica finale del contest `%s'" % \
            self.contest.description
        points_line = " %10s" * n_tasks
        csv_points_line = ",%s" * n_tasks
        print >> ranking_file, ("%20s %10s" % ("Utente", "Totale")) + \
              (points_line % tuple([t.name for t in sorted_tasks]))
        print >> ranking_csv, ("%s,%s" % ("utente", "totale")) + \
              (csv_points_line % tuple([t.name for t in sorted_tasks]))

        # Write rankings' content.
        points_line = " %10.3f" * n_tasks
        csv_points_line = ",%.6f" * n_tasks
        for username in sorted_usernames:
            user_scores = [
                task_scores[task.id][username] for task in sorted_tasks
            ]
            print >> ranking_file, ("%20s %10.3f" % (username,
                                                     scores[username])) + \
                  (points_line % tuple(user_scores))
            print >> ranking_csv, ("%s,%.6f" % (username,
                                                scores[username])) + \
                  (csv_points_line % tuple(user_scores))

        ranking_file.close()
        ranking_csv.close()
Example #18
0
    def _score(self, submission_id, dataset_id):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        submission_id (int): the id of the submission that has to be
            scored.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError("Submission result %d(%d) was not found." %
                                 (submission_id, dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                submission_id, dataset_id)
                    return
                else:
                    raise ValueError("The state of the submission result "
                                     "%d(%d) doesn't allow scoring." %
                                     (submission_id, dataset_id))

            # Instantiate the score type.
            score_type = get_score_type(dataset=dataset)

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                submission_result.ranking_score_details = \
                score_type.compute_score(submission_result)

            # Store it.
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.proxy_service.submission_scored(
                    submission_id=submission.id)
Example #19
0
    def list(self):
        """Produce a list of submissions.

        Filter them using the given query parameters.

        """
        # XXX When writing this method we aimed for efficiency: we
        # wanted it to execute as less queries as possible and not to
        # transmit more data on the wire than strictly necessary.
        # Unfortunately, this made the method rather complex and for
        # medium-sized contests there seems not to be a perceivable
        # difference from a simple enormous joined-load query.

        # Load query parameters. We simply drop the ones we don't
        # understand (i.e. those that aren't integers).
        contest_ids = local.request.args.getlist("contest_id", type=int)
        user_ids = local.request.args.getlist("user_id", type=int)
        task_ids = local.request.args.getlist("task_id", type=int)
        dataset_ids = local.request.args.getlist("dataset_id", type=int)

        with SessionGen() as local.session:
            # Fetch the datasets that have been requested. This has to
            # be done first as it's needed for the only check that can
            # make the request fail (i.e. it could allow us to avoid
            # useless queries).
            if len(dataset_ids) > 0:
                q = local.session.query(Dataset)
                q = q.filter(Dataset.id.in_(dataset_ids))
                datasets = q.all()
            else:
                datasets = list()

            # Check if all parent tasks are distinct. This check also
            # catches the case of a non-existing dataset.
            if len(set(d.task_id for d in datasets)) < len(dataset_ids):
                raise BadRequest()

            # Identify the submissions we're interested in. We have the
            # files and tokens eagerly loaded too. With joinedload they
            # are loaded in the same query. This is perfectly fine for
            # tokens but not as much for files because if there's more
            # than one the entire result row will be duplicated. Using
            # subqueryload could improve that, by firing another query.
            # If we tried to also load results then, depending on how
            # we did that, we would either get them all (even the ones
            # we don't care about) or we wouldn't get the submissions
            # that have no associated result. And, also, we have yet
            # to determine all the datasets we want!
            q = local.session.query(Submission)
            if len(contest_ids) > 0:
                q = q.join(Submission.task)
                q = q.filter(Task.contest_id.in_(contest_ids))
            if len(user_ids) > 0:
                q = q.filter(Submission.user_id.in_(user_ids))
            if len(task_ids) > 0:
                q = q.filter(Submission.task_id.in_(task_ids))
            q = q.options(joinedload(Submission.files))
            q = q.options(joinedload(Submission.token))
            submissions = q.all()

            # Determine the IDs of tasks for which we need a dataset.
            tasks_need_dataset = set(s.task_id for s in submissions)
            # Remove the IDs of tasks for which we have a dataset.
            tasks_need_dataset -= set(d.task_id for d in datasets)

            # Fetch the datasets we're missing, picking the active one
            # of the tasks.
            q = local.session.query(Dataset)
            q = q.join(Task, Dataset.id == Task.active_dataset_id)
            q = q.filter(Task.id.in_(tasks_need_dataset))
            datasets.extend(q.all())

            # Determine the final list of submission and dataset IDs.
            dataset_ids = list(d.id for d in datasets)
            submission_ids = list(s.id for s in submissions)

            # We can now get the submission results.
            # We don't load executables and evaluations because we do
            # not need them. If we did, it'd be probably more efficient
            # to use a subqueryload then a joinedload.
            # not interested in executables.
            q = local.session.query(SubmissionResult)
            q = q.filter(SubmissionResult.submission_id.in_(submission_ids))
            q = q.filter(SubmissionResult.dataset_id.in_(dataset_ids))
            submission_results = q.all()

            # Index submission results and datasets for easy access.
            # We're sure we won't have duplicated entries.
            dataset_map = dict((d.task_id, d) for d in datasets)
            submission_results_map = dict(
                (sr.submission_id, sr) for sr in submission_results)

            # As we need testcases to initialize ScoreTypes, load them
            # in a single batch. This query is independent from the
            # previous ones but cannot be run in parallel as they need
            # to belong to the same Session, and therefore to the same
            # connection, that cannot be shared among greenlets.
            q = local.session.query(Testcase)
            q = q.filter(Testcase.dataset_id.in_(dataset_ids))
            testcases = q.all()

            # Initialize ScoreTypes. We have to pick testcases manually
            # because otherwise SQLAlchemy will fire another query.
            score_types = dict()
            for d in datasets:
                public_testcases = dict((tc.codename, tc.public)
                                        for tc in testcases
                                        if tc.dataset_id == d.id)
                score_types[d.id] = get_score_type(d.score_type,
                                                   d.score_type_parameters,
                                                   public_testcases)

            # Produce the data structure.
            result = list()

            for s in submissions:
                dataset = dataset_map[s.task_id]
                item = {
                    '_ref': "%s" % s.id,
                    'dataset': "%s" % dataset.id,
                    'user': "******" % s.user_id,
                    'task': "%s" % s.task_id,
                    'timestamp': make_timestamp(s.timestamp),
                    'language': s.language,
                    'files': dict((k, v.digest)
                                  for k, v in s.files.iteritems()),
                    'token': make_timestamp(s.token.timestamp)
                             if s.token is not None else None,
                }

                score_type = score_types[dataset.id]
                sr = submission_results_map.get(s.id)

                if sr is not None:
                    item.update({
                        'compilation_outcome':
                            {"ok": True,
                             "fail": False}.get(sr.compilation_outcome),
                        'compilation_tries': sr.compilation_tries,
                        'evaluation_outcome':
                            {"ok": True}.get(sr.evaluation_outcome),
                        'evaluation_tries': sr.evaluation_tries,
                        'score': sr.score,
                        'max_score': score_type.max_score,
                    })
                else:
                    item.update({
                        'compilation_outcome': None,
                        'compilation_tries': 0,
                        'evaluation_outcome': None,
                        'evaluation_tries': 0,
                        'score': None,
                        'max_score': score_type.max_score,
                    })

                result.append(item)

        # Encode and send.
        local.response.mimetype = "application/json"
        local.response.data = json.dumps(result)
Example #20
0
    def get(self, submission_id):
        """Retrieve a single submission.

        Query the database for the submission with the given ID, and
        the dataset given as query parameter (or the active one).

        submission_id (int): the ID of a submission.

        """
        # If it's not an integer we will ignore it. But if it's an
        # integer of a dataset that doesn't exist we'll raise a 404.
        dataset_id = local.request.args.get("dataset_id", type=int)

        with SessionGen() as local.session:
            # Load the submission, and check for existence.
            submission = Submission.get_from_id(submission_id, local.session)

            if submission is None:
                raise NotFound()

            # Load the dataset.
            if dataset_id is not None:
                dataset = Dataset.get_from_id(dataset_id, local.session)
                if dataset is None:
                    raise NotFound()
            else:
                q = local.session.query(Dataset)
                q = q.join(Task, Dataset.id == Task.active_dataset_id)
                q = q.filter(Task.id == submission.task_id)
                dataset = q.one()

            # Get the result (will fire a query).
            submission_result = submission.get_result(dataset)

            # Get the ScoreType (will fire a query for testcases).
            score_type = get_score_type(dataset=dataset)

            # Produce the data structure.
            s = submission
            sr = submission_result

            result = {
                '_ref': "%s" % s.id,
                'dataset': '%s' % dataset.id,
                'user': "******" % s.user_id,
                'task': "%s" % s.task_id,
                'timestamp': make_timestamp(s.timestamp),
                'language': s.language,
                # No files, no token: AWS doesn't need them.
            }

            if sr is not None:
                result.update({
                    'compilation_outcome':
                        {"ok": True,
                         "fail": False}.get(sr.compilation_outcome),
                    'compilation_text':
                        format_status_text(sr.compilation_text),
                    'compilation_tries': sr.compilation_tries,
                    'compilation_stdout': sr.compilation_stdout,
                    'compilation_stderr': sr.compilation_stderr,
                    'compilation_time': sr.compilation_time,
                    'compilation_wall_clock_time':
                        sr.compilation_wall_clock_time,
                    'compilation_memory': sr.compilation_memory,
                    'compilation_shard': sr.compilation_shard,
                    'compilation_sandbox': sr.compilation_sandbox,
                    'evaluation_outcome':
                        {"ok": True}.get(sr.evaluation_outcome),
                    'evaluation_tries': sr.evaluation_tries,
                    'evaluations': dict((ev.codename, {
                        'codename': ev.codename,
                        'outcome': ev.outcome,
                        'text': format_status_text(ev.text),
                        'execution_time': ev.execution_time,
                        'execution_wall_clock_time':
                            ev.execution_wall_clock_time,
                        'execution_memory': ev.execution_memory,
                        'evaluation_shard': ev.evaluation_shard,
                        'evaluation_sandbox': ev.evaluation_sandbox,
                    }) for ev in sr.evaluations),
                    'score': sr.score,
                    'max_score': score_type.max_score,
                    'score_details':
                        score_type.get_html_details(sr.score_details)
                        if sr.score is not None else None,
                })
            else:
                # Just copy all fields with None.
                result.update({
                    'compilation_outcome': None,
                    'compilation_text': None,
                    'compilation_tries': 0,
                    'compilation_stdout': None,
                    'compilation_stderr': None,
                    'compilation_time': None,
                    'compilation_wall_clock_time': None,
                    'compilation_memory': None,
                    'compilation_shard': None,
                    'compilation_sandbox': None,
                    'evaluation_outcome': None,
                    'evaluation_tries': 0,
                    'evaluations': {},
                    'score': None,
                    'max_score': score_type.max_score,
                    'score_details': None,
                })

        # Encode and send.
        local.response.mimetype = "application/json"
        local.response.data = json.dumps(result)
Example #21
0
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id,
                                                session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError("Submission result %d(%d) was not found." %
                                 (operation.submission_id,
                                  operation.dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                operation.submission_id, operation.dataset_id)
                    return
                else:
                    raise ValueError("The state of the submission result "
                                     "%d(%d) doesn't allow scoring." %
                                     (operation.submission_id,
                                      operation.dataset_id))

            # Instantiate the score type.
            score_type = get_score_type(dataset=dataset)

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                submission_result.ranking_score_details = \
                score_type.compute_score(submission_result)

            # Round submission score to 2 decimal places
            submission_result.score = round(submission_result.score, 2)

            # Store it.
            session.commit()

            # Update statistics and access level
            score = submission_result.score
            taskscore = session.query(TaskScore)\
                .filter(TaskScore.user_id == submission.user_id)\
                .filter(TaskScore.task_id == submission.task_id).first()
            if taskscore is None:
                taskscore = TaskScore()
                taskscore.task_id = submission.task_id
                taskscore.user_id = submission.user_id
                session.add(taskscore)
            mtime = max([0] + [e.execution_time
                               for e in submission_result.evaluations])
            if score > taskscore.score:
                taskscore.score = score
                taskscore.time = mtime
            elif score == taskscore.score and mtime < taskscore.time:
                taskscore.time = mtime
            submission.task.nsubscorrect = session.query(Submission)\
                .filter(Submission.task_id == submission.task_id)\
                .filter(Submission.results.any(
                    SubmissionResult.score == 100)).count()
            submission.task.nuserscorrect = session.query(TaskScore)\
                .filter(TaskScore.task_id == submission.task_id)\
                .filter(TaskScore.score == 100).count()
            submission.user.score = sum([
                t.score for t in session.query(TaskScore)
                .filter(TaskScore.user_id == submission.user_id).all()])
            if submission.user.score >= 300 and \
               submission.user.access_level == 6:
                submission.user.access_level = 5
            submission.task.nsubs = session.query(Submission)\
                .filter(Submission.task_id == submission.task_id).count()
            submission.task.nusers = session.query(TaskScore)\
                .filter(TaskScore.task_id == submission.task_id).count()
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.proxy_service.submission_scored(
                    submission_id=submission.id)
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id,
                                                session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError("Submission result %d(%d) was not found." %
                                 (operation.submission_id,
                                  operation.dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                operation.submission_id, operation.dataset_id)
                    return
                else:
                    raise ValueError("The state of the submission result "
                                     "%d(%d) doesn't allow scoring." %
                                     (operation.submission_id,
                                      operation.dataset_id))

            # For Codebreaker, your score depends on your previous submissions
            # to this task. So, let's get the previous submisisons for this task
            previous_submissions = session.query(Submission)\
                .filter(Submission.user_id == submission.user_id,
                        Submission.task_id == submission.task_id)\
                .order_by(asc(Submission.timestamp))\
                .all()
            # Counterintuitively, because we're nice people, we don't care how
            # these submissions were scored. We only care about their
            # evaluations, which will tell us how to score them.
            # For a codebreaker, this will be in one-to-one correspondence with
            # previous submissions, since each "task" should only have the one
            # "testcase".
            previous_evaluations = [
                session.query(Evaluation)
                .filter(Evaluation.submission_id == sub.id).first()
                for sub in previous_submissions]

            assert(len(previous_evaluations) == len(previous_submissions))

            # Now that we have the evaluations, we can pass these as parameters
            # to our score type
            params = [evaluation.outcome for evaluation in previous_evaluations]

            # Instantiate the score type.
            # We don't want to use the dataset since we have to pass in custom
            # params. Instead we'll just hardcode the name of the class in,
            # which is unfortunate.
            # TODO (bgbn): work out a way to make this more generic.
            score_type = get_score_type(name="AIOCCodebreakerScoreType",
                                        parameters=json.dumps(params),
                                        public_testcases=dict((k, tc.public)
                                            for k, tc in
                                            dataset.testcases.iteritems()))

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                submission_result.ranking_score_details = \
                score_type.compute_score(submission_result)

            # Store it.
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.proxy_service.submission_scored(
                    submission_id=submission.id)
Example #23
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            if submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            # We'll accept only submissions that either didn't compile
            # at all or that did evaluate successfully.
            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "not compiled." % (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "compiled but is not evaluated." %
                               (submission_id, dataset_id))
                return

            # Assign score to the submission.
            score_type = get_score_type(dataset=dataset)
            score, details, public_score, public_details, ranking_details = \
                score_type.compute_score(submission_result)

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = score
            submission_result.public_score = public_score

            # And details.
            submission_result.score_details = details
            submission_result.public_score_details = public_details
            submission_result.ranking_score_details = ranking_details

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.rankings_send_score(submission)
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id,
                                                session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError(
                    "Submission result %d(%d) was not found." %
                    (operation.submission_id, operation.dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                operation.submission_id, operation.dataset_id)
                    return
                else:
                    raise ValueError(
                        "The state of the submission result "
                        "%d(%d) doesn't allow scoring." %
                        (operation.submission_id, operation.dataset_id))

            # For Codebreaker, your score depends on your previous submissions
            # to this task. So, let's get the previous submisisons for this task
            previous_submissions = session.query(Submission)\
                .filter(Submission.user_id == submission.user_id,
                        Submission.task_id == submission.task_id)\
                .order_by(asc(Submission.timestamp))\
                .all()
            # Counterintuitively, because we're nice people, we don't care how
            # these submissions were scored. We only care about their
            # evaluations, which will tell us how to score them.
            # For a codebreaker, this will be in one-to-one correspondence with
            # previous submissions, since each "task" should only have the one
            # "testcase".
            previous_evaluations = [
                session.query(Evaluation).filter(
                    Evaluation.submission_id == sub.id).first()
                for sub in previous_submissions
            ]

            assert (len(previous_evaluations) == len(previous_submissions))

            # Now that we have the evaluations, we can pass these as parameters
            # to our score type
            params = [
                evaluation.outcome for evaluation in previous_evaluations
            ]

            # Instantiate the score type.
            # We don't want to use the dataset since we have to pass in custom
            # params. Instead we'll just hardcode the name of the class in,
            # which is unfortunate.
            # TODO (bgbn): work out a way to make this more generic.
            score_type = get_score_type(
                name="AIOCCodebreakerScoreType",
                parameters=json.dumps(params),
                public_testcases=dict(
                    (k, tc.public) for k, tc in dataset.testcases.iteritems()))

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                submission_result.ranking_score_details = \
                score_type.compute_score(submission_result)

            # Store it.
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.proxy_service.submission_scored(
                    submission_id=submission.id)
Example #25
0
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id,
                                                session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError(
                    "Submission result %d(%d) was not found." %
                    (operation.submission_id, operation.dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                operation.submission_id, operation.dataset_id)
                    return
                else:
                    raise ValueError(
                        "The state of the submission result "
                        "%d(%d) doesn't allow scoring." %
                        (operation.submission_id, operation.dataset_id))

            # Instantiate the score type.
            score_type = get_score_type(dataset=dataset)

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                ranking_score_details = \
                score_type.compute_score(submission_result)
            submission_result.ranking_score_details = \
                json.dumps(ranking_score_details)

            # Store it.
            session.commit()

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                logger.info("Submission scored %.1f seconds after submission",
                            (make_datetime() -
                             submission.timestamp).total_seconds())
                self.proxy_service.submission_scored(
                    submission_id=submission.id)
Example #26
0
    def execute(self, entry):
        """Assign a score to a submission result.

        This is the core of ScoringService: here we retrieve the result
        from the database, check if it is in the correct status,
        instantiate its ScoreType, compute its score, store it back in
        the database and tell ProxyService to update RWS if needed.

        entry (QueueEntry): entry containing the operation to perform.

        """
        operation = entry.item
        with SessionGen() as session:
            # Obtain submission.
            submission = Submission.get_from_id(operation.submission_id,
                                                session)
            if submission is None:
                raise ValueError("Submission %d not found in the database." %
                                 operation.submission_id)

            # Obtain dataset.
            dataset = Dataset.get_from_id(operation.dataset_id, session)
            if dataset is None:
                raise ValueError("Dataset %d not found in the database." %
                                 operation.dataset_id)

            # Obtain submission result.
            submission_result = submission.get_result(dataset)

            # It means it was not even compiled (for some reason).
            if submission_result is None:
                raise ValueError(
                    "Submission result %d(%d) was not found." %
                    (operation.submission_id, operation.dataset_id))

            # Check if it's ready to be scored.
            if not submission_result.needs_scoring():
                if submission_result.scored():
                    logger.info("Submission result %d(%d) is already scored.",
                                operation.submission_id, operation.dataset_id)
                    return
                else:
                    raise ValueError(
                        "The state of the submission result "
                        "%d(%d) doesn't allow scoring." %
                        (operation.submission_id, operation.dataset_id))

            # Instantiate the score type.
            score_type = get_score_type(dataset=dataset)

            # Compute score and fill it in the database.
            submission_result.score, \
                submission_result.score_details, \
                submission_result.public_score, \
                submission_result.public_score_details, \
                ranking_score_details = \
                score_type.compute_score(submission_result)
            submission_result.ranking_score_details = \
                json.dumps(ranking_score_details)

            task = submission.task
            participation = submission.participation
            relevant_submissions = session.query(SubmissionResult)\
                .join(SubmissionResult.submission)\
                .filter(Submission.participation_id == participation.id)\
                .filter(Submission.task_id == task.id) \
                .filter(SubmissionResult.dataset_id == dataset.id) \
                .filter(SubmissionResult.filter_scored())\
                .order_by(Submission.timestamp.asc())\
                .all()

            changed_task_results = []
            official_submissions = [
                s for s in relevant_submissions if s.submission.official
            ]
            official_ptr = 0
            for i in range(len(relevant_submissions)):
                sr = relevant_submissions[i]
                if official_ptr < len(official_submissions) and \
                        sr == official_submissions[official_ptr]:
                    official_ptr += 1
                if sr.submission.timestamp >= submission.timestamp:
                    old_data = (sr.task_score, sr.task_score_details,
                                sr.task_public_score,
                                sr.task_public_score_details,
                                sr.task_ranking_score_details)
                    new_data = score_type.\
                        compute_total_score(
                            official_submissions[:official_ptr]
                        )
                    new_data = new_data[:4] + (json.dumps(new_data[4]), )
                    if old_data != new_data:
                        sr.task_score, \
                            sr.task_score_details, \
                            sr.task_public_score, \
                            sr.task_public_score_details, \
                            sr.task_ranking_score_details = \
                            new_data
                        changed_task_results.append(sr.submission_id)
            # Store it.
            session.commit()

            logger.metric("submission_scoring_time",
                          submission_id=submission.id,
                          dataset_id=submission_result.dataset_id,
                          language=submission.language,
                          task=submission.task_id,
                          participant=submission.participation_id,
                          value=(make_datetime() -
                                 submission.timestamp).total_seconds())

            logger.info("Submission scored %d seconds after submission",
                        (make_datetime() -
                         submission.timestamp).total_seconds())

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                if submission.id not in changed_task_results:
                    logger.error("Submission was recently scored but "
                                 "it isn't listed as submissions with "
                                 "a task score change")
                    changed_task_results.append(submission.id)

                for changed_submission_id in changed_task_results:
                    self.proxy_service.submission_scored(
                        submission_id=changed_submission_id)