コード例 #1
0
ファイル: EvaluationService.py プロジェクト: vishan/cms
    def acquire_worker(self, job, side_data=None):
        """Tries to assign a job to an available worker. If no workers
        are available then this returns None, otherwise this returns
        the chosen worker.

        job (job): the job to assign to a worker
        side_data (object): object to attach to the worker for later
                            use

        returns (int): None if no workers are available, the worker
                       assigned to the job otherwise
        """
        # We look for an available worker
        try:
            shard = self.find_worker(WorkerPool.WORKER_INACTIVE, require_connection=True, random_worker=True)
        except LookupError:
            return None

        # Then we fill the info for future memory
        self._job[shard] = job
        self._start_time[shard] = make_datetime()
        self._side_data[shard] = side_data
        logger.debug("Worker %s acquired." % shard)

        # And finally we ask the worker to do the job
        action, object_id = job
        timestamp = side_data[1]
        queue_time = self._start_time[shard] - timestamp
        logger.info(
            "Asking worker %s to %s submission/user test %d "
            " (%s after submission)." % (shard, action, object_id, queue_time)
        )

        with SessionGen(commit=False) as session:
            if action == EvaluationService.JOB_TYPE_COMPILATION:
                submission = Submission.get_from_id(object_id, session)
                job_ = CompilationJob.from_submission(submission)
            elif action == EvaluationService.JOB_TYPE_EVALUATION:
                submission = Submission.get_from_id(object_id, session)
                job_ = EvaluationJob.from_submission(submission)
            elif action == EvaluationService.JOB_TYPE_TEST_COMPILATION:
                user_test = UserTest.get_from_id(object_id, session)
                job_ = CompilationJob.from_user_test(user_test)
            elif action == EvaluationService.JOB_TYPE_TEST_EVALUATION:
                user_test = UserTest.get_from_id(object_id, session)
                job_ = EvaluationJob.from_user_test(user_test)
                job_.get_output = True
                job_.only_execution = True

            self._worker[shard].execute_job(
                job_dict=job_.export_to_dict(),
                callback=self._service.action_finished.im_func,
                plus=(action, object_id, side_data, shard),
            )

        return shard
コード例 #2
0
ファイル: Worker.py プロジェクト: invinciblejha/cms
    def get_submission_data(self, submission_id):
        """Given the id, returns the submission object and a new task
        type object of the correct type.

        submission_id (int): id of the submission.

        return (Submission, TaskType): corresponding objects.

        raise: JobException if id or task type not found.

        """
        submission = Submission.get_from_id(submission_id, self.session)
        if submission is None:
            err_msg = "Couldn't find submission %s " \
                      "in the database." % submission_id
            logger.critical(err_msg)
            raise JobException(err_msg)

        try:
            task_type = get_task_type(submission, self.file_cacher)
        except KeyError as error:
            err_msg = "Task type `%s' not known for " \
                "submission %s (error: %s)." % (
                submission.task.task_type, submission_id, error)
            logger.error(err_msg)
            raise JobException(err_msg)

        return (submission, task_type)
コード例 #3
0
ファイル: ScoringService.py プロジェクト: beyondai/cms
    def submission_tokened(self, submission_id):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[submission_tokened] Received token request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError

            if submission.user.hidden:
                logger.info("[submission_tokened] Token for submission %d "
                            "not sent because user is hidden." % submission_id)
                return

            # Mark submission as tokened.
            self.submissions_tokened.add(submission_id)

            # Update RWS.
            self.rankings_send_token(submission)
コード例 #4
0
ファイル: ContestWebServer.py プロジェクト: volpino/cms
    def post(self):

        submission_id = self.get_argument("submission_id", "")

        # Decrypt submission_id.
        try:
            submission_id = decrypt_number(submission_id)
        except ValueError:
            # We reply with Forbidden if the given ID cannot be
            # decrypted.
            logger.warning(
                "User %s tried to play a token " "on an undecryptable submission_id." % self.current_user.username
            )
            raise tornado.web.HTTPError(403)

        # Find submission and check it is of the current user.
        submission = Submission.get_from_id(submission_id, self.sql_session)
        if submission is None or submission.user != self.current_user:
            logger.warning(
                "User %s tried to play a token " "on an unexisting submission_id." % self.current_user.username
            )
            raise tornado.web.HTTPError(404)

        # Don't trust the user, check again if (s)he can really play
        # the token.
        timestamp = int(time.time())
        if self.contest.tokens_available(self.current_user.username, submission.task.name, timestamp)[0] <= 0:
            logger.warning("User %s tried to play a token " "when it shouldn't." % self.current_user.username)
            # Add "no luck" notification
            self.application.service.add_notification(
                self.current_user.username,
                timestamp,
                self._("Token request discarded"),
                self._("Your request has been discarded because you have no " "tokens available."),
            )
            self.redirect("/tasks/%s" % encrypt_number(submission.task.id))
            return

        token = Token(timestamp, submission)
        self.sql_session.add(token)
        self.sql_session.commit()

        # Inform ScoringService and eventually the ranking that the
        # token has been played.
        self.application.service.scoring_service.submission_tokened(submission_id=submission_id, timestamp=timestamp)

        logger.info("Token played by user %s on task %s." % (self.current_user.username, submission.task.name))

        # Add "All ok" notification
        self.application.service.add_notification(
            self.current_user.username,
            timestamp,
            self._("Token request received"),
            self._("Your request has been received " "and applied to the submission."),
        )

        self.redirect("/tasks/%s" % encrypt_number(submission.task.id))
コード例 #5
0
ファイル: ScoringService.py プロジェクト: bblackham/cms
    def dataset_updated(self, task_id):
        """This function updates RWS with new data about a task. It should be
        called after the live dataset of a task is changed.

        task_id (int): id of the task whose dataset has changed.

        """
        with SessionGen(commit=False) as session:
            task = Task.get_from_id(task_id, session)
            dataset_id = task.active_dataset_id

        logger.info("Dataset update for task %d (dataset now is %d)." % (
            task_id, dataset_id))

        submission_ids = get_submissions(self.contest_id, task_id=task_id)

        subchanges = []
        with SessionGen(commit=False) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                submission_result = SubmissionResult.get_from_id(
                    (submission_id, dataset_id), session)

                if submission_result is None:
                    # Not yet compiled, evaluated or scored.
                    score = None
                    ranking_score_details = None
                else:
                    score = submission_result.score
                    try:
                        ranking_score_details = json.loads(
                                submission_result.ranking_score_details)
                    except (json.decoder.JSONDecodeError, TypeError):
                        # It may be blank.
                        ranking_score_details = None

                # Data to send to remote rankings.
                subchange_id = "%s%ss" % \
                    (int(make_timestamp(submission.timestamp)),
                     submission_id)
                subchange_put_data = {
                    "submission": encode_id(submission_id),
                    "time": int(make_timestamp(submission.timestamp))}
                    # We're sending the unrounded score to RWS
                if score is not None:
                    subchange_put_data["score"] = score
                if ranking_score_details is not None:
                    subchange_put_data["extra"] = ranking_score_details
                subchanges.append((subchange_id, subchange_put_data))

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                for subchange_id, data in subchanges:
                    self.subchange_queue.setdefault(
                        ranking,
                        dict())[encode_id(subchange_id)] = data
コード例 #6
0
ファイル: ScoringService.py プロジェクト: sekouperry/cms
    def submission_tokened(self, submission_id):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[submission_tokened] Received token request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError
            elif submission.user.hidden:
                logger.info("[submission_tokened] Token for submission %d "
                            "not sent because user is hidden." % submission_id)
                return

            # Mark submission as tokened.
            self.submissions_tokened.add(submission_id)

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))
            }
            subchange_id = "%s%st" % \
                (int(make_timestamp(submission.token.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.token.timestamp)),
                "token": True
            }

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
コード例 #7
0
ファイル: ScoringService.py プロジェクト: s546360316/cms
    def submission_tokened(self, submission_id):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[submission_tokened] Received token request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError
            elif submission.user.hidden:
                logger.info("[submission_tokened] Token for submission %d "
                            "not sent because user is hidden." % submission_id)
                return

            # Mark submission as tokened.
            self.submissions_tokened.add(submission_id)

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%st" % \
                (int(make_timestamp(submission.token.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.token.timestamp)),
                "token": True}

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
コード例 #8
0
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              task_id=None):
        """Request for invalidating the scores of some submissions.

        The scores to be cleared are the one regarding 1) a submission
        or 2) all submissions of a user or 3) all submissions of a
        task or 4) all submission (if all parameters are None).

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        task_id (int): id of the task we want to invalidate, or None.

        """
        logger.info("Invalidation request received.")

        submission_ids = get_submissions(self.contest_id, submission_id,
                                         user_id, task_id)

        logger.info("Submissions to invalidate: %s" % len(submission_ids))
        if len(submission_ids) == 0:
            return

        new_submission_ids = []
        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                # If the submission is not evaluated, it does not have
                # a score to invalidate, and, when evaluated,
                # ScoringService will be prompted to score it. So in
                # that case we do not have to do anything.
                if submission.evaluated():
                    submission.invalidate_score()
                    new_submission_ids.append(submission_id)

        old_s = len(self.submission_ids_to_score)
        old_t = len(self.submission_ids_to_token)
        self.submission_ids_to_score |= new_submission_ids
        if old_s + old_t == 0:
            self.add_timeout(self.score_old_submissions,
                             None,
                             0.5,
                             immediately=False)
コード例 #9
0
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              task_id=None):
        """Request for invalidating the scores of some submissions.

        The scores to be cleared are the one regarding 1) a submission
        or 2) all submissions of a user or 3) all submissions of a
        task or 4) all submission (if all parameters are None).

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        task_id (int): id of the task we want to invalidate, or None.

        """
        logger.info("Invalidation request received.")

        submission_ids = get_submissions(
            self.contest_id,
            submission_id, user_id, task_id)

        logger.info("Submissions to invalidate: %s" % len(submission_ids))
        if len(submission_ids) == 0:
            return

        new_submission_ids = []
        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                # If the submission is not evaluated, it does not have
                # a score to invalidate, and, when evaluated,
                # ScoringService will be prompted to score it. So in
                # that case we do not have to do anything.
                if submission.evaluated():
                    submission.invalidate_score()
                    new_submission_ids.append(submission_id)

        old_s = len(self.submission_ids_to_score)
        old_t = len(self.submission_ids_to_token)
        self.submission_ids_to_score |= set(new_submission_ids)
        if old_s + old_t == 0:
            self.add_timeout(self.score_old_submissions, None,
                             0.5, immediately=False)
コード例 #10
0
ファイル: ScoringService.py プロジェクト: volpino/cms
    def submission_tokened(self, submission_id, timestamp):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("Received request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError
            if submission.user.hidden:
                return

            # Mark submission as tokened.
            self.submission_ids_tokened.add(submission_id)

            # Data to send to remote rankings.
            submission_url = "/submissions/%s" % encode_id(submission_id)
            submission_put_data = {"user": encode_id(submission.user.username),
                            "task": encode_id(submission.task.name),
                            "time": submission.timestamp}
            subchange_url = "/subchanges/%s" % encode_id("%s%st" %
                                                         (timestamp,
                                                          submission_id))
            subchange_put_data = {"submission": encode_id(submission_id),
                                  "time": timestamp,
                                  "token": True}

        # Adding operations to the queue.
        for ranking in self.rankings:
            self.operation_queue.append((send_submission,
                                         [ranking, submission_url,
                                          submission_put_data]))
            self.operation_queue.append((send_change,
                                         [ranking, subchange_url,
                                          subchange_put_data]))
コード例 #11
0
    def new_submission(self, submission_id):
        """This RPC prompts ES of the existence of a new
        submission. ES takes the right countermeasures, i.e., it
        schedules it for compilation.

        submission_id (string): the id of the new submission.

        returns (bool): True if everything went well.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_submission] Couldn't find submission "
                             "%s in the database." % submission_id)
                return

            if to_compile(submission):
                self.push_in_queue((EvaluationService.JOB_TYPE_COMPILATION,
                                    submission_id),
                                   EvaluationService.JOB_PRIORITY_HIGH,
                                   submission.timestamp)
コード例 #12
0
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              task_id=None):
        """Request for invalidating the scores of some submissions.

        The scores to be cleared are the one regarding 1) a submission
        or 2) all submissions of a user or 3) all submissions of a
        task or 4) all submission (if all parameters are None).

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        task_id (int): id of the task we want to invalidate, or None.

        """
        logger.info("Invalidation request received.")

        submission_ids = get_submissions(self.contest_id, submission_id,
                                         user_id, task_id)

        logger.info("Submissions to invalidate: %s" % len(submission_ids))
        if len(submission_ids) == 0:
            return

        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                submission.invalidate_score()

        old_s = len(self.submission_ids_to_score)
        old_t = len(self.submission_ids_to_token)
        self.submission_ids_to_score = submission_ids + \
                                       self.submission_ids_to_score
        if old_s + old_t == 0:
            self.add_timeout(self.score_old_submissions,
                             None,
                             0.5,
                             immediately=False)
コード例 #13
0
ファイル: ScoringService.py プロジェクト: volpino/cms
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              task_id=None):
        """Request for invalidating the scores of some submissions.

        The scores to be cleared are the one regarding 1) a submission
        or 2) all submissions of a user or 3) all submissions of a
        task or 4) all submission (if all parameters are None).

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        task_id (int): id of the task we want to invalidate, or None.

        """
        logger.info("Invalidation request received.")

        submission_ids = get_submissions(
            self.contest_id,
            submission_id, user_id, task_id)

        logger.info("Submissions to invalidate: %s" % len(submission_ids))
        if len(submission_ids) == 0:
            return

        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                submission.invalidate_score()

        old_s = len(self.submission_ids_to_score)
        old_t = len(self.submission_ids_to_token)
        self.submission_ids_to_score = submission_ids + \
                                       self.submission_ids_to_score
        if old_s + old_t == 0:
            self.add_timeout(self.score_old_submissions, None,
                             0.5, immediately=False)
コード例 #14
0
ファイル: ScoringService.py プロジェクト: s546360316/cms
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is not "
                               "compiled." %
                               (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) compiled "
                               "correctly but is not evaluated." %
                               (submission_id, dataset_id))
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[dataset_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  submission_result.evaluated(),
                                  dict((ev.codename,
                                        {"outcome": ev.outcome,
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission_result.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = \
                scorer.pool[submission_id]["score"]
            submission_result.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission_result.score_details = \
                scorer.pool[submission_id]["details"]
            submission_result.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission_result.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            try:
                ranking_score_details = json.loads(
                        submission_result.ranking_score_details)
            except (TypeError, ValueError):
                # It may be blank.
                ranking_score_details = None

            # If we are not a live dataset then we can bail out here,
            # and avoid updating RWS.
            if dataset is not submission.task.active_dataset:
                return

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(str(submission_id)),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission_result.score}
            if ranking_score_details is not None:
                subchange_put_data["extra"] = ranking_score_details

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(str(submission_id))] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
コード例 #15
0
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." %
                             submission_id)
                raise KeyError
            elif not submission.compiled():
                logger.warning("[new_evaluation] Submission %d "
                               "is not compiled." % submission_id)
                return
            elif submission.compilation_outcome == "ok" \
                    and not submission.evaluated():
                logger.warning("[new_evaluation] Submission %d compiled "
                               "correctly but is not evaluated."
                               % submission_id)
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  submission.evaluated(),
                                  dict((ev.num,
                                        {"outcome": ev.outcome,
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission.score,
                "extra": submission.ranking_score_details}

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
コード例 #16
0
ファイル: ScoringService.py プロジェクト: bblackham/cms
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              dataset_id=None,
                              task_id=None):
        """Request for invalidating the scores of some submissions.

        The scores to be cleared are the one regarding
            1) a submission (only submission_id and dataset_id given)
         or 2) all submissions of a user for the active dataset (only user_id
               given)
         or 3) all submissions for a dataset (only dataset_id given)

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        dataset_id (int): id of the dataset we want to invalidate, or None.

        """
        logger.info("Invalidation request received.")

        # If we are invalidating a dataset, get the task so we can find all the
        # relevant submissions.
        if submission_id is None and dataset_id is not None:
            with SessionGen(commit=False) as session:
                dataset = Dataset.get_from_id(dataset_id, session)
                task_id = dataset.task_id
        else:
            task_id = None

        submission_ids = get_submissions(
            self.contest_id,
            submission_id, user_id, task_id)

        logger.info("Submissions to invalidate: %s" % len(submission_ids))
        if len(submission_ids) == 0:
            return

        new_submission_ids = []
        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                if dataset_id is None:
                    this_dataset_id = submission.task.active_dataset_id
                else:
                    this_dataset_id = dataset_id
                sr = SubmissionResult.get_from_id(
                    (submission_id, this_dataset_id), session)
                # If the submission is not evaluated, it does not have
                # a score to invalidate, and, when evaluated,
                # ScoringService will be prompted to score it. So in
                # that case we do not have to do anything.
                if sr is not None and sr.evaluated():
                    sr.invalidate_score()
                    new_submission_ids.append((submission_id, this_dataset_id))

        old_s = len(self.submission_ids_to_score)
        old_t = len(self.submission_ids_to_token)
        self.submission_ids_to_score |= set(new_submission_ids)
        if old_s + old_t == 0:
            self.add_timeout(self.score_old_submissions, None,
                             0.5, immediately=False)
コード例 #17
0
ファイル: ScoringService.py プロジェクト: beyondai/cms
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            if submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            # We'll accept only submissions that either didn't compile
            # at all or that did evaluate successfully.
            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "not compiled." % (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "compiled but is not evaluated." %
                               (submission_id, dataset_id))
                return

            # Assign score to the submission.
            score_type = get_score_type(dataset=dataset)
            score, details, public_score, public_details, ranking_details = \
                score_type.compute_score(submission_result)

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = score
            submission_result.public_score = public_score

            # And details.
            submission_result.score_details = details
            submission_result.public_score_details = public_details
            submission_result.ranking_score_details = ranking_details

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.rankings_send_score(submission)
コード例 #18
0
ファイル: ScoringService.py プロジェクト: kennyboy/cms
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the
        evaluation for a submission.

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.critical("[action_finished] Couldn't find "
                                " submission %d in the database" %
                                submission_id)
                return
            if submission.user.hidden:
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  dict((ev.num,
                                        {"outcome": float(ev.outcome),
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % (int(make_timestamp(submission.timestamp)), submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                "score": submission.score,
                "extra": submission.ranking_score_details}

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(ranking, dict())[encode_id(submission_id)] = submission_put_data
                self.subchange_queue.setdefault(ranking, dict())[encode_id(subchange_id)] = subchange_put_data
コード例 #19
0
ファイル: ContestWebServer.py プロジェクト: volpino/cms
 def get(self, sub_id):
     submission = Submission.get_from_id(sub_id, self.sql_session)
     if submission.user.id != self.current_user.id or submission.task.contest.id != self.contest.id:
         raise tornado.web.HTTPError(403)
     self.render("submission_snippet.html", s=submission)
コード例 #20
0
ファイル: EvaluationService.py プロジェクト: vishan/cms
    def action_finished(self, data, plus, error=None):
        """Callback from a worker, to signal that is finished some
        action (compilation or evaluation).

        data (dict): a dictionary that describes a Job instance.
        plus (tuple): the tuple (job_type,
                                 object_id,
                                 side_data=(priority, timestamp),
                                 shard_of_worker)

        """
        # TODO - The next two comments are in the wrong place and
        # really little understandable anyway.

        # We notify the pool that the worker is free (even if it
        # replied with an error), but if the pool wants to disable the
        # worker, it's because it already assigned its job to someone
        # else, so we discard the data from the worker.
        job_type, object_id, side_data, shard = plus

        # If worker was ignored, do nothing.
        if self.pool.release_worker(shard):
            return

        job_success = True
        if error is not None:
            logger.error("Received error from Worker: `%s'." % error)
            job_success = False

        else:
            try:
                job = Job.import_from_dict_with_type(data)
            except:
                logger.error("[action_finished] Couldn't build Job for data" " %s." % (data))
                job_success = False

            else:
                if not job.success:
                    logger.error("Worker %s signaled action " "not successful." % shard)
                    job_success = False

        _, timestamp = side_data

        logger.info("Action %s for submission %s completed. Success: %s." % (job_type, object_id, data["success"]))

        # We get the submission from DB and update it.
        with SessionGen(commit=False) as session:

            if job_type == EvaluationService.JOB_TYPE_COMPILATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error("[action_finished] Couldn't find " "submission %d in the database." % object_id)
                    return

                submission.compilation_tries += 1

                if job_success:
                    submission.compilation_outcome = "ok" if job.compilation_success else "fail"
                    submission.compilation_text = job.text
                    submission.compilation_shard = job.shard
                    submission.compilation_sandbox = ":".join(job.sandboxes)
                    for executable in job.executables.itervalues():
                        submission.executables[executable.filename] = executable
                        session.add(executable)

                self.compilation_ended(submission)

            elif job_type == EvaluationService.JOB_TYPE_EVALUATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error("[action_finished] Couldn't find " "submission %s in the database." % object_id)
                    return

                submission.evaluation_tries += 1

                if job_success:
                    submission.evaluation_outcome = "ok"
                    for test_number, info in job.evaluations.iteritems():
                        evaluation = Evaluation(
                            text=info["text"],
                            outcome=info["outcome"],
                            num=test_number,
                            memory_used=info["plus"].get("memory_used", None),
                            execution_time=info["plus"].get("execution_time", None),
                            execution_wall_clock_time=info["plus"].get("execution_wall_clock_time", None),
                            evaluation_shard=job.shard,
                            evaluation_sandbox=":".join(info["sandboxes"]),
                            submission=submission,
                        )
                        session.add(evaluation)

                self.evaluation_ended(submission)

            elif job_type == EvaluationService.JOB_TYPE_TEST_COMPILATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error("[action_finished] Couldn't find " "user test %d in the database." % object_id)
                    return

                user_test.compilation_tries += 1

                if job_success:
                    user_test.compilation_outcome = "ok" if job.compilation_success else "fail"
                    user_test.compilation_text = job.text
                    user_test.compilation_shard = job.shard
                    user_test.compilation_sandbox = ":".join(job.sandboxes)
                    for executable in job.executables.itervalues():
                        ut_executable = UserTestExecutable.import_from_dict(executable.export_to_dict())
                        user_test.executables[ut_executable.filename] = ut_executable
                        session.add(ut_executable)

                self.user_test_compilation_ended(user_test)

            elif job_type == EvaluationService.JOB_TYPE_TEST_EVALUATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error("[action_finished] Couldn't find " "user test %d in the database." % object_id)
                    return

                user_test.evaluation_tries += 1

                if job_success:
                    try:
                        [evaluation] = job.evaluations.values()
                    except ValueError:
                        logger.error(
                            "[action_finished] I expected the job "
                            "for a user test to contain a single "
                            "evaluation, while instead it has %d." % (len(job.evaluations.values()))
                        )
                        return
                    user_test.evaluation_outcome = "ok"
                    user_test.evaluation_shard = job.shard
                    user_test.output = evaluation["output"]
                    user_test.evaluation_text = evaluation["text"]
                    user_test.evaluation_sandbox = ":".join(evaluation["sandboxes"])
                    user_test.memory_used = (evaluation["plus"].get("memory_used", None),)
                    user_test.execution_time = (evaluation["plus"].get("execution_time", None),)

                self.user_test_evaluation_ended(user_test)

            else:
                logger.error("Invalid job type %r." % (job_type))
                return

            session.commit()
コード例 #21
0
    def action_finished(self, data, plus, error=None):
        """Callback from a worker, to signal that is finished some
        action (compilation or evaluation).

        data (bool): report success of the action
        plus (tuple): the tuple (job=(job_type, submission_id),
                                 side_data=(priority, timestamp),
                                 shard_of_worker)

        """
        # We notify the pool that the worker is free (even if it
        # replied with ane error), but if the pool wants to disable the
        # worker, it's because it already assigned its job to someone
        # else, so we discard the data from the worker.
        job, side_data, shard = plus

        # If worker was ignored, do nothing.
        if self.pool.release_worker(shard):
            return

        if error is not None:
            logger.error("Received error from Worker: `%s'." % error)
            return

        if not data["success"]:
            logger.error("Worker %s signaled action not successful." % shard)
            return

        job_type, submission_id = job
        unused_priority, timestamp = side_data

        logger.info("Action %s for submission %s completed. Success: %s." %
                    (job_type, submission_id, data["success"]))

        # We get the submission from db.
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.critical("[action_finished] Couldn't find submission "
                                "%s in the database." % submission_id)
                return

            if job_type == EvaluationService.JOB_TYPE_COMPILATION:
                submission.compilation_tries += 1
                submission.compilation_outcome = data["compilation_outcome"]
                submission.compilation_text = data["compilation_text"]
                submission.compilation_shard = data["compilation_shard"]
                submission.compilation_sandbox = data["compilation_sandbox"]
                for filename, digest in data.get("executables", []):
                    session.add(Executable(digest, filename, submission))

            if job_type == EvaluationService.JOB_TYPE_EVALUATION:
                submission.evaluation_tries += 1
                submission.evaluation_outcome = "ok"
                for test_number, info in data["evaluations"].iteritems():
                    ewct = info["execution_wall_clock_time"]  # Too long... :(
                    session.add(Evaluation(
                        text=info["text"],
                        outcome=info["outcome"],
                        num=test_number,
                        memory_used=info["memory_used"],
                        execution_time=info["execution_time"],
                        execution_wall_clock_time=ewct,
                        evaluation_shard=info["evaluation_shard"],
                        evaluation_sandbox=info["evaluation_sandbox"],
                        submission=submission))

            compilation_tries = submission.compilation_tries
            compilation_outcome = submission.compilation_outcome
            evaluation_tries = submission.evaluation_tries
            evaluated = submission.evaluated()

        # Compilation.
        if job_type == EvaluationService.JOB_TYPE_COMPILATION:
            self.compilation_ended(submission_id,
                                   timestamp,
                                   compilation_tries,
                                   compilation_outcome)

        # Evaluation.
        elif job_type == EvaluationService.JOB_TYPE_EVALUATION:
            self.evaluation_ended(submission_id,
                                  timestamp,
                                  evaluation_tries,
                                  evaluated)

        # Other (i.e. error).
        else:
            logger.error("Invalid job type %r." % job_type)
            return
コード例 #22
0
ファイル: ScoringService.py プロジェクト: volpino/cms
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the
        evaluation for a submission.

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.critical("[action_finished] Couldn't find "
                                " submission %d in the database" %
                                submission_id)
                return
            if submission.user.hidden:
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  [float(ev.outcome)
                                   for ev in submission.evaluations],
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Update the ranking view.
            contest = session.query(Contest).\
                      filter_by(id=self.contest_id).first()
            contest.update_ranking_view(self.scorers,
                                        task=submission.task)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            details = scorer.pool[submission_id]["details"]
            if details is None:
                details = []
            submission.score_details = json.dumps(details)

            public_details = scorer.pool[submission_id]["public_details"]
            if public_details is None:
                public_details = []
            submission.public_score_details = json.dumps(public_details)

            # Data to send to remote rankings.
            submission_url = "/submissions/%s" % encode_id(submission_id)
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": submission.timestamp}
            subchange_url = "/subchanges/%s" % encode_id("%s%ss" %
                                                         (submission.timestamp,
                                                          submission_id))
            subchange_put_data = {"submission": encode_id(submission_id),
                                  "time": submission.timestamp,
                                  "score": submission.score,
                                  "extra": details}

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        for ranking in self.rankings:
            self.operation_queue.append((send_submission,
                                         [ranking, submission_url,
                                          submission_put_data]))
            self.operation_queue.append((send_change,
                                         [ranking, subchange_url,
                                          subchange_put_data]))
コード例 #23
0
    def invalidate_submission(self,
                              submission_id=None,
                              user_id=None,
                              task_id=None,
                              level="compilation"):
        """Request for invalidating some computed data.

        The data (compilations and evaluations, or evaluations only)
        to be cleared are the one regarding 1) a submission or 2) all
        submissions of a user or 3) all submissions of a task or 4)
        all submission (if all parameters are None).

        The data are cleared, the jobs involving the submissions
        currently enqueued are deleted, and the one already assigned
        to the workers are ignored. New appropriate jobs are enqueued.

        submission_id (int): id of the submission to invalidate, or
                             None.
        user_id (int): id of the user we want to invalidate, or None.
        task_id (int): id of the task we want to invalidate, or None.
        level (string): 'compilation' or 'evaluation'

        """
        logger.info("Invalidation request received.")
        if level not in ["compilation", "evaluation"]:
            err_msg = "Unexpected invalidation level `%s'." % level
            logger.warning(err_msg)
            raise ValueError(err_msg)

        submission_ids = get_submissions(
            self.contest_id,
            submission_id, user_id, task_id)

        logger.info("Submissions to invalidate for %s: %s." %
                    (level, len(submission_ids)))
        if len(submission_ids) == 0:
            return

        for submission_id in submission_ids:
            jobs = [(EvaluationService.JOB_TYPE_COMPILATION, submission_id),
                    (EvaluationService.JOB_TYPE_EVALUATION, submission_id)]
            for job in jobs:
                try:
                    self.queue.remove(job)
                except KeyError:
                    pass  # Ok, the job wasn't in the queue.
                try:
                    self.pool.ignore_job(job)
                except LookupError:
                    pass  # Ok, the job wasn't in the pool.

        # We invalidate the appropriate data and queue the jobs to
        # recompute those data.
        with SessionGen(commit=True) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)

                if level == "compilation":
                    submission.invalidate_compilation()
                    if to_compile(submission):
                        self.push_in_queue(
                            (EvaluationService.JOB_TYPE_COMPILATION,
                             submission_id),
                            EvaluationService.JOB_PRIORITY_HIGH,
                            submission.timestamp)
                elif level == "evaluation":
                    submission.invalidate_evaluation()
                    if to_evaluate(submission):
                        self.push_in_queue(
                            (EvaluationService.JOB_TYPE_EVALUATION,
                             submission_id),
                            EvaluationService.JOB_PRIORITY_MEDIUM,
                            submission.timestamp)
コード例 #24
0
ファイル: ScoringService.py プロジェクト: sekouperry/cms
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." % submission_id)
                raise KeyError
            elif not submission.compiled():
                logger.warning("[new_evaluation] Submission %d "
                               "is not compiled." % submission_id)
                return
            elif submission.compilation_outcome == "ok" \
                    and not submission.evaluated():
                logger.warning("[new_evaluation] Submission %d compiled "
                               "correctly but is not evaluated." %
                               submission_id)
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(
                submission_id, submission.timestamp, submission.user.username,
                submission.evaluated(),
                dict((ev.num, {
                    "outcome": ev.outcome,
                    "text": ev.text,
                    "time": ev.execution_time,
                    "memory": ev.memory_used
                }) for ev in submission.evaluations), submission.tokened())

            # Mark submission as scored.
            self.submissions_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))
            }
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission.score,
                "extra": submission.ranking_score_details
            }

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data