示例#1
0
    def rankings_send_score(self, submission):
        submission_result = submission.get_result()

        # Data to send to remote rankings.
        submission_id = str(submission.id)
        submission_data = {
            "user": encode_id(submission.user.username),
            "task": encode_id(submission.task.name),
            "time": int(make_timestamp(submission.timestamp))}

        subchange_id = \
            "%d%ss" % (make_timestamp(submission.timestamp), submission_id)
        subchange_data = {
            "submission": submission_id,
            "time": int(make_timestamp(submission.timestamp))}

        # XXX This check is probably useless.
        if submission_result is not None and submission_result.scored():
            # We're sending the unrounded score to RWS
            subchange_data["score"] = submission_result.score
            subchange_data["extra"] = \
                json.loads(submission_result.ranking_score_details)

        # Adding operations to the queue.
        for ranking in self.rankings:
            ranking.data_queue.put((ranking.SUBMISSION_TYPE,
                                    {submission_id: submission_data}))
            ranking.data_queue.put((ranking.SUBCHANGE_TYPE,
                                    {subchange_id: subchange_data}))
示例#2
0
文件: Contest.py 项目: anantha89/cms
    def export_to_dict(self, skip_submissions=False):
        """Return object data as a dictionary.

        """
        return {
            "name": self.name,
            "description": self.description,
            "tasks": [task.export_to_dict() for task in self.tasks],
            "users": [user.export_to_dict(skip_submissions) for user in self.users],
            "token_initial": self.token_initial,
            "token_max": self.token_max,
            "token_total": self.token_total,
            "token_min_interval": self.token_min_interval.total_seconds(),
            "token_gen_time": self.token_gen_time.total_seconds() / 60,
            "token_gen_number": self.token_gen_number,
            "start": make_timestamp(self.start) if self.start is not None else None,
            "stop": make_timestamp(self.stop) if self.stop is not None else None,
            "timezone": self.timezone,
            "per_user_time": self.per_user_time.total_seconds() if self.per_user_time is not None else None,
            "max_submission_number": self.max_submission_number if self.max_submission_number is not None else None,
            "max_usertest_number": self.max_usertest_number if self.max_usertest_number is not None else None,
            "min_submission_interval": self.min_submission_interval.total_seconds()
            if self.min_submission_interval is not None
            else None,
            "min_usertest_interval": self.min_usertest_interval.total_seconds()
            if self.min_usertest_interval is not None
            else None,
            "announcements": [announcement.export_to_dict() for announcement in self.announcements],
        }
示例#3
0
    def export_to_dict(self, skip_submissions=False, skip_user_tests=False):
        """Return object data as a dictionary.

        """
        return {'name':               self.name,
                'description':        self.description,
                'tasks':              [task.export_to_dict()
                                       for task in self.tasks],
                'users':              [user.export_to_dict(skip_submissions, skip_user_tests)
                                       for user in self.users],
                'token_initial':      self.token_initial,
                'token_max':          self.token_max,
                'token_total':        self.token_total,
                'token_min_interval': self.token_min_interval.total_seconds(),
                'token_gen_time':     self.token_gen_time.total_seconds(),
                'token_gen_number':   self.token_gen_number,
                'start':              make_timestamp(self.start) if self.start is not None else None,
                'stop':               make_timestamp(self.stop) if self.stop is not None else None,
                'timezone':           self.timezone,
                'per_user_time':      self.per_user_time.total_seconds() if self.per_user_time is not None else None,
                'max_submission_number': self.max_submission_number,
                'max_user_test_number': self.max_user_test_number,
                'min_submission_interval': self.min_submission_interval.total_seconds() if self.min_submission_interval is not None else None,
                'min_user_test_interval': self.min_user_test_interval.total_seconds() if self.min_user_test_interval is not None else None,
                'announcements':      [announcement.export_to_dict()
                                       for announcement in self.announcements],
                }
示例#4
0
    def send_token(self, submission):
        """Send the token for the given submission to all rankings.

        Put the submission and its token subchange in all the proxy
        queues for them to be sent to rankings.

        """
        # Data to send to remote rankings.
        submission_id = str(submission.id)
        submission_data = {
            "user": encode_id(submission.user.username),
            "task": encode_id(submission.task.name),
            "time": int(make_timestamp(submission.timestamp))}

        subchange_id = "%d%st" % (make_timestamp(submission.token.timestamp),
                                  submission_id)
        subchange_data = {
            "submission": submission_id,
            "time": int(make_timestamp(submission.token.timestamp)),
            "token": True}

        # Adding operations to the queue.
        for ranking in self.rankings:
            ranking.data_queue.put((ranking.SUBMISSION_TYPE,
                                    {submission_id: submission_data}))
            ranking.data_queue.put((ranking.SUBCHANGE_TYPE,
                                    {subchange_id: subchange_data}))

        self.tokens_sent_to_rankings.add(submission.id)
示例#5
0
    def dataset_updated(self, task_id):
        """This function updates RWS with new data about a task. It should be
        called after the live dataset of a task is changed.

        task_id (int): id of the task whose dataset has changed.

        """
        with SessionGen(commit=False) as session:
            task = Task.get_from_id(task_id, session)
            dataset_id = task.active_dataset_id

        logger.info("Dataset update for task %d (dataset now is %d)." % (
            task_id, dataset_id))

        submission_ids = get_submissions(self.contest_id, task_id=task_id)

        subchanges = []
        with SessionGen(commit=False) as session:
            for submission_id in submission_ids:
                submission = Submission.get_from_id(submission_id, session)
                submission_result = SubmissionResult.get_from_id(
                    (submission_id, dataset_id), session)

                if submission_result is None:
                    # Not yet compiled, evaluated or scored.
                    score = None
                    ranking_score_details = None
                else:
                    score = submission_result.score
                    try:
                        ranking_score_details = json.loads(
                                submission_result.ranking_score_details)
                    except (json.decoder.JSONDecodeError, TypeError):
                        # It may be blank.
                        ranking_score_details = None

                # Data to send to remote rankings.
                subchange_id = "%s%ss" % \
                    (int(make_timestamp(submission.timestamp)),
                     submission_id)
                subchange_put_data = {
                    "submission": encode_id(submission_id),
                    "time": int(make_timestamp(submission.timestamp))}
                    # We're sending the unrounded score to RWS
                if score is not None:
                    subchange_put_data["score"] = score
                if ranking_score_details is not None:
                    subchange_put_data["extra"] = ranking_score_details
                subchanges.append((subchange_id, subchange_put_data))

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                for subchange_id, data in subchanges:
                    self.subchange_queue.setdefault(
                        ranking,
                        dict())[encode_id(subchange_id)] = data
示例#6
0
    def initialize(self):
        """Send basic data to all the rankings.

        It's data that's supposed to be sent before the contest, that's
        needed to understand what we're talking about when we send
        submissions: contest, users, tasks.

        No support for teams, flags and faces.

        """
        logger.info("Initializing rankings.")

        with SessionGen() as session:
            contest = Contest.get_from_id(self.contest_id, session)

            if contest is None:
                logger.error("Received request for unexistent contest "
                             "id %s." % self.contest_id)
                raise KeyError

            contest_id = encode_id(contest.name)
            contest_data = {
                "name": contest.description,
                "begin": int(make_timestamp(contest.start)),
                "end": int(make_timestamp(contest.stop)),
                "score_precision": contest.score_precision}

            users = dict()

            for user in contest.users:
                if not user.hidden:
                    users[encode_id(user.username)] = \
                        {"f_name": user.first_name,
                         "l_name": user.last_name,
                         "team": None}

            tasks = dict()

            for task in contest.tasks:
                score_type = get_score_type(dataset=task.active_dataset)
                tasks[encode_id(task.name)] = \
                    {"short_name": task.name,
                     "name": task.title,
                     "contest": encode_id(contest.name),
                     "order": task.num,
                     "max_score": score_type.max_score,
                     "extra_headers": score_type.ranking_headers,
                     "score_precision": task.score_precision}

        for ranking in self.rankings:
            ranking.data_queue.put((ranking.CONTEST_TYPE,
                                    {contest_id: contest_data}))
            ranking.data_queue.put((ranking.USER_TYPE, users))
            ranking.data_queue.put((ranking.TASK_TYPE, tasks))
示例#7
0
文件: User.py 项目: kennyboy/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {'question_timestamp': make_timestamp(self.question_timestamp),
                'subject':            self.subject,
                'text':               self.text,
                'reply_timestamp':    make_timestamp(self.reply_timestamp)
                if self.reply_timestamp is not None else None,
                'reply_subject':      self.reply_subject,
                'reply_text':         self.reply_text,
                'ignored':            self.ignored}
示例#8
0
文件: User.py 项目: bblackham/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {'question_timestamp': make_timestamp(self.question_timestamp),
                'subject':            self.subject,
                'text':               self.text,
                'reply_timestamp':    make_timestamp(self.reply_timestamp)
                if self.reply_timestamp is not None else None,
                'reply_subject':      self.reply_subject,
                'reply_text':         self.reply_text,
                'ignored':            self.ignored}
示例#9
0
    def submission_tokened(self, submission_id):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[submission_tokened] Received token request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError
            elif submission.user.hidden:
                logger.info("[submission_tokened] Token for submission %d "
                            "not sent because user is hidden." % submission_id)
                return

            # Mark submission as tokened.
            self.submissions_tokened.add(submission_id)

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))
            }
            subchange_id = "%s%st" % \
                (int(make_timestamp(submission.token.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.token.timestamp)),
                "token": True
            }

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#10
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task': self.task.name,
            'timestamp': make_timestamp(self.timestamp),
            'files': [_file.export_to_dict()
                      for _file in self.files.itervalues()],
            'managers': [manager.export_to_dict()
                         for manager in self.managers.itervalues()],
            'input': self.input,
            'output': self.output,
            'language': self.language,
            'compilation_outcome': self.compilation_outcome,
            'compilation_tries': self.compilation_tries,
            'compilation_text': self.compilation_text,
            'compilation_shard': self.compilation_shard,
            'compilation_sandbox': self.compilation_sandbox,
            'executables': [executable.export_to_dict()
                            for executable
                            in self.executables.itervalues()],
            'evaluation_outcome': self.evaluation_outcome,
            'evaluation_text': self.evaluation_text,
            'evaluation_tries': self.evaluation_tries,
            'evaluation_shard': self.evaluation_shard,
            'evaluation_sandbox': self.evaluation_sandbox,
            'memory_used': self.memory_used,
            'execution_time': self.execution_time,
            }
        return res
示例#11
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task': self.task.name,
            'timestamp': make_timestamp(self.timestamp),
            'files': [_file.export_to_dict()
                      for _file in self.files.itervalues()],
            'language': self.language,
            'compilation_outcome': self.compilation_outcome,
            'compilation_tries': self.compilation_tries,
            'compilation_text': self.compilation_text,
            'compilation_shard': self.compilation_shard,
            'compilation_sandbox': self.compilation_sandbox,
            'executables': [executable.export_to_dict()
                            for executable
                            in self.executables.itervalues()],
            'evaluation_outcome': self.evaluation_outcome,
            'evaluations': [evaluation.export_to_dict()
                            for evaluation in self.evaluations],
            'evaluation_tries': self.evaluation_tries,
            'token': self.token
            }
        if self.token is not None:
            res['token'] = self.token.export_to_dict()
        return res
示例#12
0
文件: User.py 项目: bblackham/cms
    def export_to_dict(self, skip_submissions=False, skip_user_tests=False):
        """Return object data as a dictionary.

        """
        submissions = []
        if not skip_submissions:
            submissions = [submission.export_to_dict()
                           for submission in self.submissions]
        user_tests = []
        if not skip_user_tests:
            user_tests = [user_test.export_to_dict()
                          for user_test in self.user_tests]

        return {'first_name':    self.first_name,
                'last_name':     self.last_name,
                'username':      self.username,
                'password':      self.password,
                'email':         self.email,
                'ip':            self.ip,
                'hidden':        self.hidden,
                'primary_statements': self.primary_statements,
                'timezone':      self.timezone,
                'starting_time': make_timestamp(self.starting_time)
                if self.starting_time is not None else None,
                'extra_time':    self.extra_time.total_seconds(),
                'messages':      [message.export_to_dict()
                                  for message in self.messages],
                'questions':     [question.export_to_dict()
                                  for question in self.questions],
                'submissions':   submissions,
                'user_tests':    user_tests}
示例#13
0
文件: UserTest.py 项目: vishan/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            "task": self.task.name,
            "timestamp": make_timestamp(self.timestamp),
            "files": [_file.export_to_dict() for _file in self.files.itervalues()],
            "managers": [manager.export_to_dict() for manager in self.managers.itervalues()],
            "input": self.input,
            "output": self.output,
            "language": self.language,
            "compilation_outcome": self.compilation_outcome,
            "compilation_tries": self.compilation_tries,
            "compilation_text": self.compilation_text,
            "compilation_shard": self.compilation_shard,
            "compilation_sandbox": self.compilation_sandbox,
            "executables": [executable.export_to_dict() for executable in self.executables.itervalues()],
            "evaluation_outcome": self.evaluation_outcome,
            "evaluation_text": self.evaluation_text,
            "evaluation_tries": self.evaluation_tries,
            "evaluation_shard": self.evaluation_shard,
            "evaluation_sandbox": self.evaluation_sandbox,
            "memory_used": self.memory_used,
            "execution_time": self.execution_time,
        }
        return res
示例#14
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {
            'timestamp': make_timestamp(self.timestamp)
            }
示例#15
0
文件: Contest.py 项目: bblackham/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {'timestamp': make_timestamp(self.timestamp),
                'subject':   self.subject,
                'text':      self.text}
示例#16
0
文件: UserTest.py 项目: VittGam/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task': self.task.name,
            'timestamp': make_timestamp(self.timestamp),
            'files': [_file.export_to_dict()
                      for _file in self.files.itervalues()],
            'managers': [manager.export_to_dict()
                         for manager in self.managers.itervalues()],
            'input': self.input,
            'output': self.output,
            'language': self.language,
            'compilation_outcome': self.compilation_outcome,
            'compilation_tries': self.compilation_tries,
            'compilation_text': self.compilation_text,
            'compilation_shard': self.compilation_shard,
            'compilation_sandbox': self.compilation_sandbox,
            'executables': [executable.export_to_dict()
                            for executable
                            in self.executables.itervalues()],
            'evaluation_outcome': self.evaluation_outcome,
            'evaluation_text': self.evaluation_text,
            'evaluation_tries': self.evaluation_tries,
            'evaluation_shard': self.evaluation_shard,
            'evaluation_sandbox': self.evaluation_sandbox,
            'memory_used': self.memory_used,
            'execution_time': self.execution_time,
            }
        return res
示例#17
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {'timestamp': make_timestamp(self.timestamp),
                'subject':   self.subject,
                'text':      self.text}
示例#18
0
文件: User.py 项目: kennyboy/cms
    def export_to_dict(self, skip_submissions=False):
        """Return object data as a dictionary.

        """
        submissions = []
        if not skip_submissions:
            submissions = [submission.export_to_dict()
                           for submission in self.submissions]
        return {'first_name':    self.first_name,
                'last_name':     self.last_name,
                'username':      self.username,
                'password':      self.password,
                'email':         self.email,
                'ip':            self.ip,
                'hidden':        self.hidden,
                'statements':    self.statements,
                'timezone':      self.timezone,
                'starting_time': make_timestamp(self.starting_time)
                if self.starting_time is not None else None,
                'extra_time':    self.extra_time.total_seconds(),
                'messages':      [message.export_to_dict()
                                  for message in self.messages],
                'questions':     [question.export_to_dict()
                                  for question in self.questions],
                'submissions':   submissions}
示例#19
0
    def submission_tokened(self, submission_id):
        """This RPC inform ScoringService that the user has played the
        token on a submission.

        submission_id (int): the id of the submission that changed.
        timestamp (int): the time of the token.

        """
        with SessionGen(commit=False) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[submission_tokened] Received token request for "
                             "unexistent submission id %s." % submission_id)
                raise KeyError
            elif submission.user.hidden:
                logger.info("[submission_tokened] Token for submission %d "
                            "not sent because user is hidden." % submission_id)
                return

            # Mark submission as tokened.
            self.submissions_tokened.add(submission_id)

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%st" % \
                (int(make_timestamp(submission.token.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.token.timestamp)),
                "token": True}

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#20
0
    def rankings_initialize(self):
        """Send to all the rankings all the data that are supposed to be
        sent before the contest: contest, users, tasks. No support for
        teams, flags and faces.

        """
        logger.info("Initializing rankings.")

        with SessionGen(commit=False) as session:
            contest = Contest.get_from_id(self.contest_id, session)

            if contest is None:
                logger.error("Received request for unexistent contest "
                             "id %s." % self.contest_id)
                raise KeyError

            contest_id = encode_id(contest.name)
            contest_data = {
                "name": contest.description,
                "begin": int(make_timestamp(contest.start)),
                "end": int(make_timestamp(contest.stop)),
                "score_precision": contest.score_precision}

            users = dict((encode_id(user.username),
                          {"f_name": user.first_name,
                           "l_name": user.last_name,
                           "team": None})
                         for user in contest.users
                         if not user.hidden)

            tasks = dict((encode_id(task.name),
                          {"name": task.title,
                           "contest": encode_id(contest.name),
                           "max_score": 100.0,
                           "score_precision": task.score_precision,
                           "extra_headers": [],
                           "order": task.num,
                           "short_name": task.name})
                         for task in contest.tasks)

        for ranking in self.rankings:
            ranking.data_queue.put((ranking.CONTEST_TYPE,
                                    {contest_id: contest_data}))
            ranking.data_queue.put((ranking.USER_TYPE, users))
            ranking.data_queue.put((ranking.TASK_TYPE, tasks))
示例#21
0
    def rankings_send_token(self, submission):
        # Data to send to remote rankings.
        submission_id = str(submission.id)
        submission_data = {
            "user": encode_id(submission.user.username),
            "task": encode_id(submission.task.name),
            "time": int(make_timestamp(submission.timestamp))}

        subchange_id = \
            "%d%st" % (make_timestamp(submission.token.timestamp), submission_id)
        subchange_data = {
            "submission": submission_id,
            "time": int(make_timestamp(submission.token.timestamp)),
            "token": True}

        # Adding operations to the queue.
        for ranking in self.rankings:
            ranking.data_queue.put((ranking.SUBMISSION_TYPE,
                                    {submission_id: submission_data}))
            ranking.data_queue.put((ranking.SUBCHANGE_TYPE,
                                    {subchange_id: subchange_data}))
示例#22
0
    def get_status(self):
        """Returns the content of the queue. Note that the order may
        be not correct, but the first element is the one at the top.

        returns (list): a list of dictionary containing the
                        representation of the job, the priority and
                        the timestamp.
        """
        ret = []
        for data in self._queue:
            ret.append({"job": data[2], "priority": data[0], "timestamp": make_timestamp(data[1])})
        return ret
示例#23
0
    def search_jobs_not_done(self):
        """Look in the database for submissions that have not been
        scored for no good reasons. Put the missing job in the queue.

        """
        # Do this only if we are not still loading old submission
        # (from the start of the service).
        if self.scoring_old_submission:
            return True

        with SessionGen(commit=False) as session:
            new_submission_ids_to_score = set([])
            new_submission_ids_to_token = set([])
            contest = session.query(Contest).\
                      filter_by(id=self.contest_id).first()
            for submission in contest.get_submissions():
                for dataset in get_autojudge_datasets(submission.task):
                    # If a submission result does not yet exist, then we don't
                    # need to score it.
                    r = SubmissionResult.get_from_id(
                        (submission.id, dataset.id),
                        session)
                    if r is None:
                        continue

                    x = (r.submission_id, r.dataset_id)
                    if r is not None and (r.evaluated()
                        or r.compilation_outcome == "fail") \
                            and x not in self.submission_ids_scored:
                        new_submission_ids_to_score.add(x)
                    if r.submission.tokened() and r.submission_id not in \
                            self.submission_ids_tokened:
                        new_submission_ids_to_token.add(
                            (r.submission_id,
                             make_timestamp(r.submission.token.timestamp)))

        new_s = len(new_submission_ids_to_score)
        old_s = len(self.submission_ids_to_score)
        new_t = len(new_submission_ids_to_token)
        old_t = len(self.submission_ids_to_token)
        logger.info("Submissions found to score/token: %d, %d." %
                    (new_s, new_t))
        if new_s + new_t > 0:
            self.submission_ids_to_score |= new_submission_ids_to_score
            self.submission_ids_to_token |= new_submission_ids_to_token
            if old_s + old_t == 0:
                self.add_timeout(self.score_old_submissions, None,
                                 0.5, immediately=False)

        # Run forever.
        return True
示例#24
0
    def get_status(self):
        """Returns a dict with info about the current status of all
        workers.

        return (dict): dict of info: current job, starting time,
                       number of errors, and additional data specified
                       in the job.

        """
        result = dict()
        for shard in self._worker.keys():
            s_time = self._start_time[shard]
            s_time = make_timestamp(s_time) if s_time is not None else None
            s_data = self._side_data[shard]
            s_data = (s_data[0], make_timestamp(s_data[1])) if s_data is not None else None

            result[str(shard)] = {
                "connected": self._worker[shard].connected,
                "job": self._job[shard],
                "start_time": s_time,
                "side_data": s_data,
            }
        return result
示例#25
0
    def send_score(self, submission):
        """Send the score for the given submission to all rankings.

        Put the submission and its score subchange in all the proxy
        queues for them to be sent to rankings.

        """
        submission_result = submission.get_result()

        # Data to send to remote rankings.
        submission_id = str(submission.id)
        submission_data = {
            "user": encode_id(submission.user.username),
            "task": encode_id(submission.task.name),
            "time": int(make_timestamp(submission.timestamp))}

        subchange_id = "%d%ss" % (make_timestamp(submission.timestamp),
                                  submission_id)
        subchange_data = {
            "submission": submission_id,
            "time": int(make_timestamp(submission.timestamp))}

        # XXX This check is probably useless.
        if submission_result is not None and submission_result.scored():
            # We're sending the unrounded score to RWS
            subchange_data["score"] = submission_result.score
            subchange_data["extra"] = \
                json.loads(submission_result.ranking_score_details)

        # Adding operations to the queue.
        for ranking in self.rankings:
            ranking.data_queue.put((ranking.SUBMISSION_TYPE,
                                    {submission_id: submission_data}))
            ranking.data_queue.put((ranking.SUBCHANGE_TYPE,
                                    {subchange_id: subchange_data}))

        self.scores_sent_to_rankings.add(submission.id)
示例#26
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task': self.task.name,
            'timestamp': make_timestamp(self.timestamp),
            'files': [_file.export_to_dict()
                      for _file in self.files.itervalues()],
            'language': self.language,
            'token': self.token,
            'results': [_sr.export_to_dict()
                        for _, _sr in sorted(self.results)],
            }
        if self.token is not None:
            res['token'] = self.token.export_to_dict()
        return res
示例#27
0
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        res = {
            'task': self.task.name,
            'timestamp': make_timestamp(self.timestamp),
            'files':
            [_file.export_to_dict() for _file in self.files.itervalues()],
            'language': self.language,
            'token': self.token,
            'results':
            [_sr.export_to_dict() for _, _sr in sorted(self.results)],
        }
        if self.token is not None:
            res['token'] = self.token.export_to_dict()
        return res
示例#28
0
    def search_jobs_not_done(self):
        """Look in the database for submissions that have not been
        scored for no good reasons. Put the missing job in the queue.

        """
        # Do this only if we are not still loading old submission
        # (from the start of the service).
        if self.scoring_old_submission:
            return True

        with SessionGen(commit=False) as session:
            contest = session.query(Contest).\
                      filter_by(id=self.contest_id).first()

            new_submission_ids_to_score = set([])
            new_submission_ids_to_token = set([])
            for submission in contest.get_submissions():
                if (submission.evaluated()
                    or submission.compilation_outcome == "fail") \
                        and submission.id not in self.submission_ids_scored:
                    new_submission_ids_to_score.add(submission.id)
                if submission.tokened() \
                        and submission.id not in self.submission_ids_tokened:
                    new_submission_ids_to_token.add(
                        (submission.id,
                         make_timestamp(submission.token.timestamp)))

        new_s = len(new_submission_ids_to_score)
        old_s = len(self.submission_ids_to_score)
        new_t = len(new_submission_ids_to_token)
        old_t = len(self.submission_ids_to_token)
        logger.info("Submissions found to score/token: %d, %d." %
                    (new_s, new_t))
        if new_s + new_t > 0:
            self.submission_ids_to_score |= new_submission_ids_to_score
            self.submission_ids_to_token |= new_submission_ids_to_token
            if old_s + old_t == 0:
                self.add_timeout(self.score_old_submissions, None,
                                 0.5, immediately=False)

        # Run forever.
        return True
示例#29
0
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the
        evaluation for a submission.

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.critical("[action_finished] Couldn't find "
                                " submission %d in the database" %
                                submission_id)
                return
            if submission.user.hidden:
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  dict((ev.num,
                                        {"outcome": float(ev.outcome),
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % (int(make_timestamp(submission.timestamp)), submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                "score": submission.score,
                "extra": submission.ranking_score_details}

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(ranking, dict())[encode_id(submission_id)] = submission_put_data
                self.subchange_queue.setdefault(ranking, dict())[encode_id(subchange_id)] = subchange_put_data
示例#30
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_verion (int): the dataset version used.

        """
        with SessionGen(commit=True) as session:
            submission_result = SubmissionResult.get_from_id(
                (submission_id, dataset_id), session)

            if submission_result is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." %
                             submission_id)
                raise KeyError
            submission = submission_result.submission

            if not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) "
                               "is not compiled." % (
                               submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" \
                    and not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) compiled "
                               "correctly but is not evaluated."
                               % (submission_id, dataset_id))
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers.get(dataset_id)
            if scorer is None:
                # We may get here because the scorer threw an exception whilst
                # initalizing, or we may be scoring for the wrong contest.
                logger.error(
                    "Not scoring submission %d because scorer is broken." %
                        submission_id)
                return

            try:
                scorer.add_submission(submission_id, dataset_id,
                      submission.timestamp,
                      submission.user.username,
                      submission_result.evaluated(),
                      dict((ev.num,
                            {"outcome": ev.outcome,
                             "text": ev.text,
                             "time": ev.execution_time,
                             "memory": ev.memory_used})
                           for ev in submission_result.evaluations),
                      submission.tokened())
            except:
                logger.error("Failed to score submission %d. "
                    "Scorer threw an exception: %s" % (
                        submission_id, traceback.format_exc()))
                return

            # Mark submission as scored.
            self.submission_ids_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = scorer.pool[submission_id]["score"]
            submission_result.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission_result.score_details = \
                scorer.pool[submission_id]["details"]
            submission_result.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission_result.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            try:
                ranking_score_details = json.loads(
                        submission_result.ranking_score_details)
            except (json.decoder.JSONDecodeError, TypeError):
                # It may be blank.
                ranking_score_details = None

            # If we are not a live dataset then we can bail out here, and avoid
            # updating RWS.
            if dataset_id != submission.task.active_dataset_id:
                return

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission_result.score}
            if ranking_score_details is not None:
                subchange_put_data["extra"] = ranking_score_details

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#31
0
    def export_object(self, obj):
        """Export the given object, returning a JSON-encodable dict

        The returned dict will contain a "_class" item (the name of the
        class of the given object), an item for each column property
        (with a value properly translated to a JSON-compatible type)
        and an item for each relationship property (which will be an ID
        or a collection of IDs).

        The IDs used in the exported dict aren't related to the ones
        used in the DB: they are newly generated and their scope is
        limited to the exported file only. They are shared among all
        classes (that is, two objects can never share the same ID, even
        if they are of different classes). The contest will have ID 0.

        If, when exporting the relationship, we find an object without
        an ID we generate a new ID, assign it to the object and append
        the object to the queue of objects to export.

        The self.skip_submissions flag controls wheter we export
        submissions (and all other objects that can be reached only by
        passing through a submission) or not.

        """
        cls = type(obj)

        data = {"_class": cls.__name__}

        for prp in cls._col_props:
            col = prp.columns[0]
            col_type = type(col.type)

            val = getattr(obj, prp.key)
            if col_type in [Boolean, Integer, Float, String]:
                data[prp.key] = val
            elif col_type is DateTime:
                data[prp.key] = make_timestamp(
                    val) if val is not None else None
            elif col_type is Interval:
                data[
                    prp.key] = val.total_seconds() if val is not None else None
            else:
                raise RuntimeError("Unknown SQLAlchemy column type: %s" %
                                   col_type)

        for prp in cls._rel_props:
            other_cls = prp.mapper.class_

            # Skip submissions if requested
            if self.skip_submissions and other_cls is Submission:
                continue

            val = getattr(obj, prp.key)
            if val is None:
                data[prp.key] = None
            elif isinstance(val, other_cls):
                data[prp.key] = self.get_id(val)
            elif isinstance(val, list):
                data[prp.key] = list(self.get_id(i) for i in val)
            elif isinstance(val, dict):
                data[prp.key] = dict(
                    (k, self.get_id(v)) for k, v in val.iteritems())
            else:
                raise RuntimeError(
                    "Unknown SQLAlchemy relationship type on %s: %s" %
                    (prp.key, type(val)))

        return data
示例#32
0
    def export_object(self, obj):

        """Export the given object, returning a JSON-encodable dict.

        The returned dict will contain a "_class" item (the name of the
        class of the given object), an item for each column property
        (with a value properly translated to a JSON-compatible type)
        and an item for each relationship property (which will be an ID
        or a collection of IDs).

        The IDs used in the exported dict aren't related to the ones
        used in the DB: they are newly generated and their scope is
        limited to the exported file only. They are shared among all
        classes (that is, two objects can never share the same ID, even
        if they are of different classes).

        If, when exporting the relationship, we find an object without
        an ID we generate a new ID, assign it to the object and append
        the object to the queue of objects to export.

        The self.skip_submissions flag controls wheter we export
        submissions (and all other objects that can be reached only by
        passing through a submission) or not.

        """

        cls = type(obj)

        data = {"_class": cls.__name__}

        for prp in cls._col_props:
            col, = prp.columns
            col_type = type(col.type)

            val = getattr(obj, prp.key)
            if col_type in [Boolean, Integer, Float, String]:
                data[prp.key] = val
            elif col_type is DateTime:
                data[prp.key] = \
                    make_timestamp(val) if val is not None else None
            elif col_type is Interval:
                data[prp.key] = \
                    val.total_seconds() if val is not None else None
            else:
                raise RuntimeError("Unknown SQLAlchemy column type: %s"
                                   % col_type)

        for prp in cls._rel_props:
            other_cls = prp.mapper.class_

            # Skip submissions if requested
            if self.skip_submissions and other_cls is Submission:
                continue

            # Skip user_tests if requested
            if self.skip_user_tests and other_cls is UserTest:
                continue

            val = getattr(obj, prp.key)
            if val is None:
                data[prp.key] = None
            elif isinstance(val, other_cls):
                data[prp.key] = self.get_id(val)
            elif isinstance(val, list):
                data[prp.key] = list(self.get_id(i) for i in val)
            elif isinstance(val, dict):
                data[prp.key] = \
                    dict((k, self.get_id(v)) for k, v in val.iteritems())
            else:
                raise RuntimeError("Unknown SQLAlchemy relationship type: %s"
                                   % type(val))

        return data
示例#33
0
文件: Contest.py 项目: anantha89/cms
    def export_to_dict(self):
        """Return object data as a dictionary.

        """
        return {"timestamp": make_timestamp(self.timestamp), "subject": self.subject, "text": self.text}
示例#34
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is not "
                               "compiled." %
                               (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) compiled "
                               "correctly but is not evaluated." %
                               (submission_id, dataset_id))
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[dataset_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  submission_result.evaluated(),
                                  dict((ev.codename,
                                        {"outcome": ev.outcome,
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission_result.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = \
                scorer.pool[submission_id]["score"]
            submission_result.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission_result.score_details = \
                scorer.pool[submission_id]["details"]
            submission_result.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission_result.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            try:
                ranking_score_details = json.loads(
                        submission_result.ranking_score_details)
            except (TypeError, ValueError):
                # It may be blank.
                ranking_score_details = None

            # If we are not a live dataset then we can bail out here,
            # and avoid updating RWS.
            if dataset is not submission.task.active_dataset:
                return

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(str(submission_id)),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission_result.score}
            if ranking_score_details is not None:
                subchange_put_data["extra"] = ranking_score_details

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(str(submission_id))] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#35
0
    def initialize(self, ranking, log_bridge):
        """Send to the ranking all the data that are supposed to be
        sent before the contest: contest, users, tasks. No support for
        teams, flags and faces.

        ranking ((str, str, str)): protocol, address and authorization
                                   string of ranking server.
        log_bridge (LogBridge): the bridge to use to write logs.

        raise CannotSendError in case of communication errors.

        """
        log_bridge.info("Initializing ranking %s." % ranking[1])

        try:
            connection = get_connection(ranking[:2], log_bridge)
            auth = ranking[2]

            with SessionGen(commit=False) as session:
                contest = Contest.get_from_id(self.contest_id, session)

                if contest is None:
                    log_bridge.error("Received request for unexistent contest "
                                   "id %s." % self.contest_id)
                    raise KeyError
                contest_name = contest.name
                contest_url = "/contests/%s" % encode_id(contest_name)
                contest_data = {
                    "name": contest.description,
                    "begin": int(make_timestamp(contest.start)),
                    "end": int(make_timestamp(contest.stop)),
                    "score_precision": contest.score_precision}

                users = dict((encode_id(user.username),
                              {"f_name": user.first_name,
                               "l_name": user.last_name,
                               "team": None})
                             for user in contest.users
                             if not user.hidden)

                tasks = dict((encode_id(task.name),
                              {"name": task.title,
                               "contest": encode_id(contest.name),
                               "max_score": 100.0,
                               "score_precision": task.score_precision,
                               "extra_headers": [],
                               "order": task.num,
                               "short_name": task.name})
                             for task in contest.tasks)

            safe_put_data(connection, contest_url, contest_data, auth,
                          "sending contest %s to ranking %s" %
                          (contest_name, ranking[1]), log_bridge)

            safe_put_data(connection, "/users/", users, auth,
                          "sending users to ranking %s" % ranking[1],
                          log_bridge)

            safe_put_data(connection, "/tasks/", tasks, auth,
                          "sending tasks to ranking %s" % ranking[1],
                          log_bridge)

        except CannotSendError as error:
            # Delete it to make get_connection try to create it again.
            del active_connections[ranking[1]]
            raise error
示例#36
0
    def get(self, submission_id):
        """Retrieve a single submission.

        Query the database for the submission with the given ID, and
        the dataset given as query parameter (or the active one).

        submission_id (int): the ID of a submission.

        """
        # If it's not an integer we will ignore it. But if it's an
        # integer of a dataset that doesn't exist we'll raise a 404.
        dataset_id = local.request.args.get("dataset_id", type=int)

        with SessionGen() as local.session:
            # Load the submission, and check for existence.
            submission = Submission.get_from_id(submission_id, local.session)

            if submission is None:
                raise NotFound()

            # Load the dataset.
            if dataset_id is not None:
                dataset = Dataset.get_from_id(dataset_id, local.session)
                if dataset is None:
                    raise NotFound()
            else:
                q = local.session.query(Dataset)
                q = q.join(Task, Dataset.id == Task.active_dataset_id)
                q = q.filter(Task.id == submission.task_id)
                dataset = q.one()

            # Get the result (will fire a query).
            submission_result = submission.get_result(dataset)

            # Get the ScoreType (will fire a query for testcases).
            score_type = get_score_type(dataset=dataset)

            # Produce the data structure.
            s = submission
            sr = submission_result

            result = {
                '_ref': "%s" % s.id,
                'dataset': '%s' % dataset.id,
                'user': "******" % s.user_id,
                'task': "%s" % s.task_id,
                'timestamp': make_timestamp(s.timestamp),
                'language': s.language,
                # No files, no token: AWS doesn't need them.
            }

            if sr is not None:
                result.update({
                    'compilation_outcome':
                        {"ok": True,
                         "fail": False}.get(sr.compilation_outcome),
                    'compilation_text':
                        format_status_text(sr.compilation_text),
                    'compilation_tries': sr.compilation_tries,
                    'compilation_stdout': sr.compilation_stdout,
                    'compilation_stderr': sr.compilation_stderr,
                    'compilation_time': sr.compilation_time,
                    'compilation_wall_clock_time':
                        sr.compilation_wall_clock_time,
                    'compilation_memory': sr.compilation_memory,
                    'compilation_shard': sr.compilation_shard,
                    'compilation_sandbox': sr.compilation_sandbox,
                    'evaluation_outcome':
                        {"ok": True}.get(sr.evaluation_outcome),
                    'evaluation_tries': sr.evaluation_tries,
                    'evaluations': dict((ev.codename, {
                        'codename': ev.codename,
                        'outcome': ev.outcome,
                        'text': format_status_text(ev.text),
                        'execution_time': ev.execution_time,
                        'execution_wall_clock_time':
                            ev.execution_wall_clock_time,
                        'execution_memory': ev.execution_memory,
                        'evaluation_shard': ev.evaluation_shard,
                        'evaluation_sandbox': ev.evaluation_sandbox,
                    }) for ev in sr.evaluations),
                    'score': sr.score,
                    'max_score': score_type.max_score,
                    'score_details':
                        score_type.get_html_details(sr.score_details)
                        if sr.score is not None else None,
                })
            else:
                # Just copy all fields with None.
                result.update({
                    'compilation_outcome': None,
                    'compilation_text': None,
                    'compilation_tries': 0,
                    'compilation_stdout': None,
                    'compilation_stderr': None,
                    'compilation_time': None,
                    'compilation_wall_clock_time': None,
                    'compilation_memory': None,
                    'compilation_shard': None,
                    'compilation_sandbox': None,
                    'evaluation_outcome': None,
                    'evaluation_tries': 0,
                    'evaluations': {},
                    'score': None,
                    'max_score': score_type.max_score,
                    'score_details': None,
                })

        # Encode and send.
        local.response.mimetype = "application/json"
        local.response.data = json.dumps(result)
示例#37
0
# The rules describing how to encode/decode column properties to/from
# JSON. They're indexed by the SQLAlchemy column type and their values
# are in the form (python type(s), decoder, encoder).
_TYPE_MAP = {
    Boolean: (bool,
              lambda x: x, lambda x: x),
    Integer: (six.integer_types,
              lambda x: x, lambda x: x),
    Float: (six.integer_types + (float,),
            lambda x: x, lambda x: x),
    String: (six.text_type,
             lambda x: x.encode('latin1'), lambda x: x.decode('latin1')),
    Unicode: (six.text_type,
              lambda x: x, lambda x: x),
    DateTime: (six.integer_types + (float,),
               lambda x: make_datetime(x), lambda x: make_timestamp(x)),
    Interval: (six.integer_types + (float,),
               lambda x: timedelta(seconds=x), lambda x: x.total_seconds()),
}


# All the URLs we need for each database class, as a template. URLs are
# the one described in the module-level docstring, whereas endpoints
# are in the form "<class name>/<action>".
_ENTITY_TEMPLATE = RuleTemplate([
    EndpointPrefix('$name/', [
        Submount('/$path', [
            Rule("/", methods=["GET"], endpoint="list"),
            Rule("/", methods=["POST"], endpoint="create"),
            Rule("/$pkey", methods=["GET"], endpoint="retrieve"),
            Rule("/$pkey/<prp_key>", methods=["GET"], endpoint="sublist"),
示例#38
0
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." %
                             submission_id)
                raise KeyError
            elif not submission.compiled():
                logger.warning("[new_evaluation] Submission %d "
                               "is not compiled." % submission_id)
                return
            elif submission.compilation_outcome == "ok" \
                    and not submission.evaluated():
                logger.warning("[new_evaluation] Submission %d compiled "
                               "correctly but is not evaluated."
                               % submission_id)
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  submission.evaluated(),
                                  dict((ev.num,
                                        {"outcome": ev.outcome,
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_ids_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission.score,
                "extra": submission.ranking_score_details}

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#39
0
    def new_evaluation(self, submission_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)
            if submission is None:
                logger.error("[new_evaluation] Couldn't find "
                             " submission %d in the database." % submission_id)
                raise KeyError
            elif not submission.compiled():
                logger.warning("[new_evaluation] Submission %d "
                               "is not compiled." % submission_id)
                return
            elif submission.compilation_outcome == "ok" \
                    and not submission.evaluated():
                logger.warning("[new_evaluation] Submission %d compiled "
                               "correctly but is not evaluated." %
                               submission_id)
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[submission.task_id]
            scorer.add_submission(
                submission_id, submission.timestamp, submission.user.username,
                submission.evaluated(),
                dict((ev.num, {
                    "outcome": ev.outcome,
                    "text": ev.text,
                    "time": ev.execution_time,
                    "memory": ev.memory_used
                }) for ev in submission.evaluations), submission.tokened())

            # Mark submission as scored.
            self.submissions_scored.add(submission_id)

            # Filling submission's score info in the db.
            submission.score = scorer.pool[submission_id]["score"]
            submission.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission.score_details = scorer.pool[submission_id]["details"]
            submission.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))
            }
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(submission_id),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission.score,
                "extra": submission.ranking_score_details
            }

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(submission_id)] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
示例#40
0
    def list(self):
        """Produce a list of submissions.

        Filter them using the given query parameters.

        """
        # XXX When writing this method we aimed for efficiency: we
        # wanted it to execute as less queries as possible and not to
        # transmit more data on the wire than strictly necessary.
        # Unfortunately, this made the method rather complex and for
        # medium-sized contests there seems not to be a perceivable
        # difference from a simple enormous joined-load query.

        # Load query parameters. We simply drop the ones we don't
        # understand (i.e. those that aren't integers).
        contest_ids = local.request.args.getlist("contest_id", type=int)
        user_ids = local.request.args.getlist("user_id", type=int)
        task_ids = local.request.args.getlist("task_id", type=int)
        dataset_ids = local.request.args.getlist("dataset_id", type=int)

        with SessionGen() as local.session:
            # Fetch the datasets that have been requested. This has to
            # be done first as it's needed for the only check that can
            # make the request fail (i.e. it could allow us to avoid
            # useless queries).
            if len(dataset_ids) > 0:
                q = local.session.query(Dataset)
                q = q.filter(Dataset.id.in_(dataset_ids))
                datasets = q.all()
            else:
                datasets = list()

            # Check if all parent tasks are distinct. This check also
            # catches the case of a non-existing dataset.
            if len(set(d.task_id for d in datasets)) < len(dataset_ids):
                raise BadRequest()

            # Identify the submissions we're interested in. We have the
            # files and tokens eagerly loaded too. With joinedload they
            # are loaded in the same query. This is perfectly fine for
            # tokens but not as much for files because if there's more
            # than one the entire result row will be duplicated. Using
            # subqueryload could improve that, by firing another query.
            # If we tried to also load results then, depending on how
            # we did that, we would either get them all (even the ones
            # we don't care about) or we wouldn't get the submissions
            # that have no associated result. And, also, we have yet
            # to determine all the datasets we want!
            q = local.session.query(Submission)
            if len(contest_ids) > 0:
                q = q.join(Submission.task)
                q = q.filter(Task.contest_id.in_(contest_ids))
            if len(user_ids) > 0:
                q = q.filter(Submission.user_id.in_(user_ids))
            if len(task_ids) > 0:
                q = q.filter(Submission.task_id.in_(task_ids))
            q = q.options(joinedload(Submission.files))
            q = q.options(joinedload(Submission.token))
            submissions = q.all()

            # Determine the IDs of tasks for which we need a dataset.
            tasks_need_dataset = set(s.task_id for s in submissions)
            # Remove the IDs of tasks for which we have a dataset.
            tasks_need_dataset -= set(d.task_id for d in datasets)

            # Fetch the datasets we're missing, picking the active one
            # of the tasks.
            q = local.session.query(Dataset)
            q = q.join(Task, Dataset.id == Task.active_dataset_id)
            q = q.filter(Task.id.in_(tasks_need_dataset))
            datasets.extend(q.all())

            # Determine the final list of submission and dataset IDs.
            dataset_ids = list(d.id for d in datasets)
            submission_ids = list(s.id for s in submissions)

            # We can now get the submission results.
            # We don't load executables and evaluations because we do
            # not need them. If we did, it'd be probably more efficient
            # to use a subqueryload then a joinedload.
            # not interested in executables.
            q = local.session.query(SubmissionResult)
            q = q.filter(SubmissionResult.submission_id.in_(submission_ids))
            q = q.filter(SubmissionResult.dataset_id.in_(dataset_ids))
            submission_results = q.all()

            # Index submission results and datasets for easy access.
            # We're sure we won't have duplicated entries.
            dataset_map = dict((d.task_id, d) for d in datasets)
            submission_results_map = dict(
                (sr.submission_id, sr) for sr in submission_results)

            # As we need testcases to initialize ScoreTypes, load them
            # in a single batch. This query is independent from the
            # previous ones but cannot be run in parallel as they need
            # to belong to the same Session, and therefore to the same
            # connection, that cannot be shared among greenlets.
            q = local.session.query(Testcase)
            q = q.filter(Testcase.dataset_id.in_(dataset_ids))
            testcases = q.all()

            # Initialize ScoreTypes. We have to pick testcases manually
            # because otherwise SQLAlchemy will fire another query.
            score_types = dict()
            for d in datasets:
                public_testcases = dict((tc.codename, tc.public)
                                        for tc in testcases
                                        if tc.dataset_id == d.id)
                score_types[d.id] = get_score_type(d.score_type,
                                                   d.score_type_parameters,
                                                   public_testcases)

            # Produce the data structure.
            result = list()

            for s in submissions:
                dataset = dataset_map[s.task_id]
                item = {
                    '_ref': "%s" % s.id,
                    'dataset': "%s" % dataset.id,
                    'user': "******" % s.user_id,
                    'task': "%s" % s.task_id,
                    'timestamp': make_timestamp(s.timestamp),
                    'language': s.language,
                    'files': dict((k, v.digest)
                                  for k, v in s.files.iteritems()),
                    'token': make_timestamp(s.token.timestamp)
                             if s.token is not None else None,
                }

                score_type = score_types[dataset.id]
                sr = submission_results_map.get(s.id)

                if sr is not None:
                    item.update({
                        'compilation_outcome':
                            {"ok": True,
                             "fail": False}.get(sr.compilation_outcome),
                        'compilation_tries': sr.compilation_tries,
                        'evaluation_outcome':
                            {"ok": True}.get(sr.evaluation_outcome),
                        'evaluation_tries': sr.evaluation_tries,
                        'score': sr.score,
                        'max_score': score_type.max_score,
                    })
                else:
                    item.update({
                        'compilation_outcome': None,
                        'compilation_tries': 0,
                        'evaluation_outcome': None,
                        'evaluation_tries': 0,
                        'score': None,
                        'max_score': score_type.max_score,
                    })

                result.append(item)

        # Encode and send.
        local.response.mimetype = "application/json"
        local.response.data = json.dumps(result)
示例#41
0
    def initialize(self, ranking, log_file):
        """Send to the ranking all the data that are supposed to be
        sent before the contest: contest, users, tasks. No support for
        teams, flags and faces.

        ranking ((str, str, str)): protocol, address and authorization
                                   string of ranking server.
        log_file (writable file object): the file to use to write logs.

        raise CannotSendError in case of communication errors.

        """
        #logger.info("Initializing ranking %s." % ranking[1])
        log_file.write("Initializing ranking %s.\n" % ranking[1])
        log_file.flush()

        try:
            connection = get_connection(ranking[:2], log_file)
            auth = ranking[2]

            with SessionGen(commit=False) as session:
                contest = Contest.get_from_id(self.contest_id, session)
                if contest is None:
                    #logger.error("Received request for unexistent contest id %s." %
                    #             self.contest_id)
                    log_file.write("Received request for unexistent contest id %s.\n" %
                                   self.contest_id)
                    log_file.flush()
                    raise KeyError
                contest_name = contest.name
                contest_url = "/contests/%s" % encode_id(contest_name)
                contest_data = {
                    "name": contest.description,
                    "begin": int(make_timestamp(contest.start)),
                    "end": int(make_timestamp(contest.stop))}

                users = dict((encode_id(user.username),
                              {"f_name": user.first_name,
                               "l_name": user.last_name,
                               "team": None})
                             for user in contest.users
                             if not user.hidden)

                tasks = dict((encode_id(task.name),
                              {"name": task.title,
                               "contest": encode_id(contest.name),
                               "max_score": 100.0,
                               "extra_headers": [],
                               "order": task.num,
                               "short_name": task.name})
                             for task in contest.tasks)

            safe_put_data(connection, contest_url, contest_data, auth,
                          "sending contest %s to ranking %s" % (contest_name, ranking[1]),
                          log_file)

            safe_put_data(connection, "/users/", users, auth,
                          "sending users to ranking %s" % ranking[1],
                          log_file)

            safe_put_data(connection, "/tasks/", tasks, auth,
                          "sending tasks to ranking %s" % ranking[1],
                          log_file)

        except CannotSendError as error:
            # Delete it to make get_connection try to create it again.
            del active_connections[ranking[1]]
            raise error