示例#1
0
    def build_job(self, session):
        """Produce the Job for this operation.

        Return the Job object that has to be sent to Workers to have
        them perform the operation this object describes.

        session (Session): the database session to use to fetch objects
            if necessary.

        return (Job): the job encoding of the operation, as understood
            by Workers and TaskTypes.

        """
        result = None
        dataset = Dataset.get_from_id(self.dataset_id, session)
        if self.type_ == ESOperation.COMPILATION:
            submission = Submission.get_from_id(self.object_id, session)
            result = CompilationJob.from_submission(submission, dataset)
        elif self.type_ == ESOperation.EVALUATION:
            submission = Submission.get_from_id(self.object_id, session)
            result = EvaluationJob.from_submission(
                submission, dataset, self.testcase_codename)
        elif self.type_ == ESOperation.USER_TEST_COMPILATION:
            user_test = UserTest.get_from_id(self.object_id, session)
            result = CompilationJob.from_user_test(user_test, dataset)
        elif self.type_ == ESOperation.USER_TEST_EVALUATION:
            user_test = UserTest.get_from_id(self.object_id, session)
            result = EvaluationJob.from_user_test(user_test, dataset)
        return result
示例#2
0
    def build_job(self, session):
        """Produce the Job for this operation.

        Return the Job object that has to be sent to Workers to have
        them perform the operation this object describes.

        session (Session): the database session to use to fetch objects
            if necessary.

        return (Job): the job encoding of the operation, as understood
            by Workers and TaskTypes.

        """
        result = None
        dataset = Dataset.get_from_id(self.dataset_id, session)
        if self.type_ == ESOperation.COMPILATION:
            submission = Submission.get_from_id(self.object_id, session)
            result = CompilationJob.from_submission(submission, dataset)
        elif self.type_ == ESOperation.EVALUATION:
            submission = Submission.get_from_id(self.object_id, session)
            result = EvaluationJob.from_submission(submission, dataset,
                                                   self.testcase_codename)
        elif self.type_ == ESOperation.USER_TEST_COMPILATION:
            user_test = UserTest.get_from_id(self.object_id, session)
            result = CompilationJob.from_user_test(user_test, dataset)
        elif self.type_ == ESOperation.USER_TEST_EVALUATION:
            user_test = UserTest.get_from_id(self.object_id, session)
            result = EvaluationJob.from_user_test(user_test, dataset)
        return result
示例#3
0
    def enqueue(self, operation, priority, timestamp, job=None):
        """Push an operation in the queue.

        Push an operation in the operation queue if the submission is
        not already in the queue or assigned to a worker.

        operation (ESOperation): the operation to put in the queue.
        priority (int): the priority of the operation.
        timestamp (datetime): the time of the submission.
        job (dict|None): the job associated; if None will be computed

        return (bool): True if pushed, False if not.

        """
        if job is None:
            with SessionGen() as session:
                dataset = Dataset.get_from_id(operation.dataset_id, session)
                if operation.for_submission():
                    object_ = Submission.get_from_id(operation.object_id,
                                                     session)
                else:
                    object_ = UserTest.get_from_id(operation.object_id,
                                                   session)
                job = Job.from_operation(operation, object_,
                                         dataset).export_to_dict()
        return self.queue_service.enqueue(
            operation=operation.to_list(),
            priority=priority,
            timestamp=(timestamp - EvaluationService.EPOCH).total_seconds(),
            job=job)
示例#4
0
    def acquire_worker(self, operations):
        """Tries to assign an operation to an available worker. If no workers
        are available then this returns None, otherwise this returns
        the chosen worker.

        operations ([ESOperation]): the operations to assign to a worker.

        return (int|None): None if no workers are available, the worker
            assigned to the operation otherwise.

        """
        # We look for an available worker.
        try:
            shard = self.find_worker(WorkerPool.WORKER_INACTIVE,
                                     require_connection=True,
                                     random_worker=True)
        except LookupError:
            self._workers_available_event.clear()
            return None

        # Then we fill the info for future memory.
        self._add_operations(shard, operations)

        logger.debug("Worker %s acquired.", shard)
        self._start_time[shard] = make_datetime()

        with SessionGen() as session:
            jobs = []
            datasets = {}
            submissions = {}
            user_tests = {}
            for operation in operations:
                if operation.dataset_id not in datasets:
                    datasets[operation.dataset_id] = Dataset.get_from_id(
                        operation.dataset_id, session)
                object_ = None
                if operation.for_submission():
                    if operation.object_id not in submissions:
                        submissions[operation.object_id] = \
                            Submission.get_from_id(
                                operation.object_id, session)
                    object_ = submissions[operation.object_id]
                else:
                    if operation.object_id not in user_tests:
                        user_tests[operation.object_id] = \
                            UserTest.get_from_id(operation.object_id, session)
                    object_ = user_tests[operation.object_id]
                logger.info("Asking worker %s to `%s'.", shard, operation)

                jobs.append(
                    Job.from_operation(operation, object_,
                                       datasets[operation.dataset_id]))
            job_group_dict = JobGroup(jobs).export_to_dict()

        self._worker[shard].execute_job_group(
            job_group_dict=job_group_dict,
            callback=self._service.action_finished,
            plus=shard)
        return shard
示例#5
0
    def acquire_worker(self, operations):
        """Tries to assign an operation to an available worker. If no workers
        are available then this returns None, otherwise this returns
        the chosen worker.

        operations ([ESOperation]): the operations to assign to a worker.

        return (int|None): None if no workers are available, the worker
            assigned to the operation otherwise.

        """
        # We look for an available worker.
        try:
            shard = self.find_worker(WorkerPool.WORKER_INACTIVE,
                                     require_connection=True,
                                     random_worker=True)
        except LookupError:
            self._workers_available_event.clear()
            return None

        # Then we fill the info for future memory.
        self._add_operations(shard, operations)

        logger.debug("Worker %s acquired.", shard)
        self._start_time[shard] = make_datetime()

        with SessionGen() as session:
            jobs = []
            datasets = {}
            submissions = {}
            user_tests = {}
            for operation in operations:
                if operation.dataset_id not in datasets:
                    datasets[operation.dataset_id] = Dataset.get_from_id(
                        operation.dataset_id, session)
                object_ = None
                if operation.for_submission():
                    if operation.object_id not in submissions:
                        submissions[operation.object_id] = \
                            Submission.get_from_id(
                                operation.object_id, session)
                    object_ = submissions[operation.object_id]
                else:
                    if operation.object_id not in user_tests:
                        user_tests[operation.object_id] = \
                            UserTest.get_from_id(operation.object_id, session)
                    object_ = user_tests[operation.object_id]
                logger.info("Asking worker %s to `%s'.", shard, operation)

                jobs.append(Job.from_operation(
                    operation, object_, datasets[operation.dataset_id]))
            job_group_dict = JobGroup(jobs).export_to_dict()

        self._worker[shard].execute_job_group(
            job_group_dict=job_group_dict,
            callback=self._service.action_finished,
            plus=shard)
        return shard
示例#6
0
    def from_operations(operations, session):
        jobs = []
        for operation in operations:
            # The get_from_id method loads from the instance map (if the
            # object exists there), which thus acts as a cache.
            if operation.for_submission():
                object_ = Submission.get_from_id(operation.object_id, session)
            else:
                object_ = UserTest.get_from_id(operation.object_id, session)
            dataset = Dataset.get_from_id(operation.dataset_id, session)

            jobs.append(Job.from_operation(operation, object_, dataset))
        return JobGroup(jobs)
示例#7
0
 def add_user_test(self, task=None, participation=None, **kwargs):
     """Add a user test."""
     task = task if task is not None else self.add_task()
     participation = participation \
         if participation is not None \
         else self.add_participation(contest=task.contest)
     assert task.contest == participation.contest
     args = {
         "task": task,
         "participation": participation,
         "input": unique_digest(),
         "timestamp": (task.contest.start + timedelta(0, unique_long_id())),
     }
     args.update(kwargs)
     user_test = UserTest(**args)
     self.session.add(user_test)
     return user_test
示例#8
0
    def new_user_test(self, user_test_id):
        """This RPC prompts ES of the existence of a new user test. ES
        takes takes the right countermeasures, i.e., it schedules it
        for compilation.

        user_test_id (int): the id of the new user test.

        returns (bool): True if everything went well.

        """
        with SessionGen() as session:
            user_test = UserTest.get_from_id(user_test_id, session)
            if user_test is None:
                logger.error("[new_user_test] Couldn't find user test %d "
                             "in the database.", user_test_id)
                return

            self.user_test_enqueue_operations(user_test)

            session.commit()
示例#9
0
    def new_user_test(self, user_test_id):
        """This RPC prompts ES of the existence of a new user test. ES
        takes takes the right countermeasures, i.e., it schedules it
        for compilation.

        user_test_id (int): the id of the new user test.

        returns (bool): True if everything went well.

        """
        with SessionGen() as session:
            user_test = UserTest.get_from_id(user_test_id, session)
            if user_test is None:
                logger.error("[new_user_test] Couldn't find user test %d "
                             "in the database.", user_test_id)
                return

            self.user_test_enqueue_operations(user_test)

            session.commit()
示例#10
0
def accept_user_test(sql_session, file_cacher, participation, task, timestamp,
                     tornado_files, language_name):
    """Process a contestant's request to submit a user test.

    sql_session (Session): the DB session to use to fetch and add data.
    file_cacher (FileCacher): the file cacher to use to store the files.
    participation (Participation): the contestant who is submitting.
    task (Task): the task on which they are submitting.
    timestamp (datetime): the moment in time they submitted at.
    tornado_files ({str: [tornado.httputil.HTTPFile]}): the files they
        sent in.
    language_name (str|None): the language they declared their files are
        in (None means unknown and thus auto-detect).

    return (UserTest): the resulting user test, if all went well.

    raise (TestingNotAllowed): if the task doesn't allow for any tests.
    raise (UnacceptableUserTest): if the contestant wasn't allowed to
        hand in a user test, if the provided data was invalid, if there
        were critical failures in the process.

    """
    contest = participation.contest
    assert task.contest is contest

    # Check whether the task is testable.

    task_type = task.active_dataset.task_type_object
    if not task_type.testable:
        raise TestingNotAllowed()

    # Check whether the contestant is allowed to send a test.

    if not check_max_number(sql_session,
                            contest.max_user_test_number,
                            participation,
                            contest=contest,
                            cls=UserTest):
        raise UnacceptableUserTest(
            N_("Too many tests!"),
            N_("You have reached the maximum limit of "
               "at most %d tests among all tasks.") %
            contest.max_user_test_number)

    if not check_max_number(sql_session,
                            task.max_user_test_number,
                            participation,
                            task=task,
                            cls=UserTest):
        raise UnacceptableUserTest(
            N_("Too many tests!"),
            N_("You have reached the maximum limit of "
               "at most %d tests on this task.") % task.max_user_test_number)

    if not check_min_interval(sql_session,
                              contest.min_user_test_interval,
                              timestamp,
                              participation,
                              contest=contest,
                              cls=UserTest):
        raise UnacceptableUserTest(
            N_("Tests too frequent!"),
            N_("Among all tasks, you can test again "
               "after %d seconds from last test.") %
            contest.min_user_test_interval.total_seconds())

    if not check_min_interval(sql_session,
                              task.min_user_test_interval,
                              timestamp,
                              participation,
                              task=task,
                              cls=UserTest):
        raise UnacceptableUserTest(
            N_("Tests too frequent!"),
            N_("For this task, you can test again "
               "after %d seconds from last test.") %
            task.min_user_test_interval.total_seconds())

    # Process the data we received and ensure it's valid.

    required_codenames = set(task.submission_format)
    required_codenames.update(task_type.get_user_managers())
    required_codenames.add("input")

    try:
        received_files = extract_files_from_tornado(tornado_files)
    except InvalidArchive:
        raise UnacceptableUserTest(
            N_("Invalid archive format!"),
            N_("The submitted archive could not be opened."))

    try:
        files, language = match_files_and_language(received_files,
                                                   language_name,
                                                   required_codenames,
                                                   contest.languages)
    except InvalidFilesOrLanguage:
        raise UnacceptableUserTest(N_("Invalid test format!"),
                                   N_("Please select the correct files."))

    digests = dict()
    missing_codenames = required_codenames.difference(iterkeys(files))
    if len(missing_codenames) > 0:
        if task.active_dataset.task_type_object.ALLOW_PARTIAL_SUBMISSION:
            digests = fetch_file_digests_from_previous_submission(
                sql_session,
                participation,
                task,
                language,
                missing_codenames,
                cls=UserTest)
        else:
            raise UnacceptableUserTest(N_("Invalid test format!"),
                                       N_("Please select the correct files."))

    if "input" not in files and "input" not in digests:
        raise UnacceptableUserTest(N_("Invalid test format!"),
                                   N_("Please select the correct files."))

    if any(
            len(content) > config.max_submission_length
            for codename, content in iteritems(files) if codename != "input"):
        raise UnacceptableUserTest(
            N_("Test too big!"),
            N_("Each source file must be at most %d bytes long.") %
            config.max_submission_length)
    if "input" in files and len(files["input"]) > config.max_input_length:
        raise UnacceptableUserTest(
            N_("Input too big!"),
            N_("The input file must be at most %d bytes long.") %
            config.max_input_length)

    # All checks done, submission accepted.

    if config.tests_local_copy:
        try:
            store_local_copy(config.tests_local_copy_path, participation, task,
                             timestamp, files)
        except StorageFailed:
            logger.error("Test local copy failed.", exc_info=True)

    # We now have to send all the files to the destination...
    try:
        for codename, content in iteritems(files):
            digest = file_cacher.put_file_content(
                content, "Test file %s sent by %s at %d." %
                (codename, participation.user.username,
                 make_timestamp(timestamp)))
            digests[codename] = digest

    # In case of error, the server aborts the submission
    except Exception as error:
        logger.error("Storage failed! %s", error)
        raise UnacceptableUserTest(N_("Test storage failed!"),
                                   N_("Please try again."))

    # All the files are stored, ready to submit!
    logger.info("All files stored for test sent by %s",
                participation.user.username)

    user_test = UserTest(
        timestamp=timestamp,
        language=language.name if language is not None else None,
        input=digests["input"],
        participation=participation,
        task=task)
    sql_session.add(user_test)

    for codename, digest in iteritems(digests):
        if codename == "input":
            continue
        if codename in task.submission_format:
            sql_session.add(
                UserTestFile(filename=codename,
                             digest=digest,
                             user_test=user_test))
        else:  # codename in task_type.get_user_managers()
            if language is not None:
                extension = language.source_extension
                filename = codename.replace(".%l", extension)
            else:
                filename = codename
            sql_session.add(
                UserTestManager(filename=filename,
                                digest=digest,
                                user_test=user_test))

    return user_test
示例#11
0
    def post(self, task_name):
        participation = self.current_user

        if not self.r_params["testing_enabled"]:
            raise tornado.web.HTTPError(404)

        try:
            task = self.contest.get_task(task_name)
        except KeyError:
            raise tornado.web.HTTPError(404)

        self.fallback_page = ["testing"]
        self.fallback_args = {"task_name": task.name}

        # Check that the task is testable
        task_type = get_task_type(dataset=task.active_dataset)
        if not task_type.testable:
            logger.warning("User %s tried to make test on task %s.",
                           participation.user.username, task_name)
            raise tornado.web.HTTPError(404)

        # Alias for easy access
        contest = self.contest

        # Enforce maximum number of user_tests
        try:
            if contest.max_user_test_number is not None:
                user_test_c = self.sql_session.query(func.count(UserTest.id))\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_c >= contest.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests among all tasks.") %
                        contest.max_user_test_number)
            if task.max_user_test_number is not None:
                user_test_t = self.sql_session.query(func.count(UserTest.id))\
                    .filter(UserTest.task == task)\
                    .filter(UserTest.participation == participation)\
                    .scalar()
                if user_test_t >= task.max_user_test_number and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("You have reached the maximum limit of "
                               "at most %d tests on this task.") %
                        task.max_user_test_number)
        except ValueError as error:
            self._send_error(self._("Too many tests!"), str(error))
            return

        # Enforce minimum time between user_tests
        try:
            if contest.min_user_test_interval is not None:
                last_user_test_c = self.sql_session.query(UserTest)\
                    .join(UserTest.task)\
                    .filter(Task.contest == contest)\
                    .filter(UserTest.participation == participation)\
                    .order_by(UserTest.timestamp.desc())\
                    .first()
                if last_user_test_c is not None and \
                        self.timestamp - last_user_test_c.timestamp < \
                        contest.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("Among all tasks, you can test again "
                               "after %d seconds from last test.") %
                        contest.min_user_test_interval.total_seconds())
            # We get the last user_test even if we may not need it
            # for min_user_test_interval because we may need it later,
            # in case this is a ALLOW_PARTIAL_SUBMISSION task.
            last_user_test_t = self.sql_session.query(UserTest)\
                .filter(UserTest.participation == participation)\
                .filter(UserTest.task == task)\
                .order_by(UserTest.timestamp.desc())\
                .first()
            if task.min_user_test_interval is not None:
                if last_user_test_t is not None and \
                        self.timestamp - last_user_test_t.timestamp < \
                        task.min_user_test_interval and \
                        not self.current_user.unrestricted:
                    raise ValueError(
                        self._("For this task, you can test again "
                               "after %d seconds from last test.") %
                        task.min_user_test_interval.total_seconds())
        except ValueError as error:
            self._send_error(self._("Tests too frequent!"), str(error))
            return

        # Required files from the user.
        required = set([sfe.filename for sfe in task.submission_format] +
                       task_type.get_user_managers(task.submission_format) +
                       ["input"])

        # Ensure that the user did not submit multiple files with the
        # same name.
        if any(
                len(filename) != 1
                for filename in itervalues(self.request.files)):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # If the user submitted an archive, extract it and use content
        # as request.files. But only valid for "output only" (i.e.,
        # not for submissions requiring a programming language
        # identification).
        if len(self.request.files) == 1 and \
                next(iterkeys(self.request.files)) == "submission":
            if any(filename.endswith(".%l") for filename in required):
                self._send_error(self._("Invalid test format!"),
                                 self._("Please select the correct files."),
                                 task)
                return
            archive_data = self.request.files["submission"][0]
            del self.request.files["submission"]

            # Create the archive.
            archive = Archive.from_raw_data(archive_data["body"])

            if archive is None:
                self._send_error(
                    self._("Invalid archive format!"),
                    self._("The submitted archive could not be opened."))
                return

            # Extract the archive.
            unpacked_dir = archive.unpack()
            for name in archive.namelist():
                filename = os.path.basename(name)
                body = open(os.path.join(unpacked_dir, filename), "r").read()
                self.request.files[filename] = [{
                    'filename': filename,
                    'body': body
                }]

            archive.cleanup()

        # This ensure that the user sent one file for every name in
        # submission format and no more. Less is acceptable if task
        # type says so.
        provided = set(iterkeys(self.request.files))
        if not (required == provided or (task_type.ALLOW_PARTIAL_SUBMISSION
                                         and required.issuperset(provided))):
            self._send_error(self._("Invalid test format!"),
                             self._("Please select the correct files."))
            return

        # Add submitted files. After this, files is a dictionary indexed
        # by *our* filenames (something like "output01.txt" or
        # "taskname.%l", and whose value is a couple
        # (user_assigned_filename, content).
        files = {}
        for uploaded, data in iteritems(self.request.files):
            files[uploaded] = (data[0]["filename"], data[0]["body"])

        # Read the submission language provided in the request; we
        # integrate it with the language fetched from the previous
        # submission (if we use it) and later make sure it is
        # recognized and allowed.
        submission_lang = self.get_argument("language", None)
        need_lang = any(
            our_filename.find(".%l") != -1 for our_filename in files)

        # If we allow partial submissions, implicitly we recover the
        # non-submitted files from the previous user test. And put them
        # in file_digests (i.e. like they have already been sent to FS).
        file_digests = {}
        if task_type.ALLOW_PARTIAL_SUBMISSION and \
                last_user_test_t is not None and \
                (submission_lang is None or
                 submission_lang == last_user_test_t.language):
            submission_lang = last_user_test_t.language
            for filename in required.difference(provided):
                if filename in last_user_test_t.files:
                    file_digests[filename] = \
                        last_user_test_t.files[filename].digest

        # Throw an error if task needs a language, but we don't have
        # it or it is not allowed / recognized.
        if need_lang:
            error = None
            if submission_lang is None:
                error = self._("Cannot recognize the user test language.")
            elif submission_lang not in contest.languages:
                error = self._("Language %s not allowed in this contest.") \
                    % submission_lang
        if error is not None:
            self._send_error(self._("Invalid test!"), error)
            return

        # Check if submitted files are small enough.
        if any([
                len(f[1]) > config.max_submission_length
                for n, f in iteritems(files) if n != "input"
        ]):
            self._send_error(
                self._("Test too big!"),
                self._("Each source file must be at most %d bytes long.") %
                config.max_submission_length)
            return
        if len(files["input"][1]) > config.max_input_length:
            self._send_error(
                self._("Input too big!"),
                self._("The input file must be at most %d bytes long.") %
                config.max_input_length)
            return

        # All checks done, submission accepted.

        # Attempt to store the submission locally to be able to
        # recover a failure.
        if config.tests_local_copy:
            try:
                path = os.path.join(
                    config.tests_local_copy_path.replace(
                        "%s", config.data_dir), participation.user.username)
                if not os.path.exists(path):
                    os.makedirs(path)
                # Pickle in ASCII format produces str, not unicode,
                # therefore we open the file in binary mode.
                with io.open(
                        os.path.join(path,
                                     "%d" % make_timestamp(self.timestamp)),
                        "wb") as file_:
                    pickle.dump((self.contest.id, participation.user.id,
                                 task.id, files), file_)
            except Exception as error:
                logger.error("Test local copy failed.", exc_info=True)

        # We now have to send all the files to the destination...
        try:
            for filename in files:
                digest = self.service.file_cacher.put_file_content(
                    files[filename][1], "Test file %s sent by %s at %d." %
                    (filename, participation.user.username,
                     make_timestamp(self.timestamp)))
                file_digests[filename] = digest

        # In case of error, the server aborts the submission
        except Exception as error:
            logger.error("Storage failed! %s", error)
            self._send_error(self._("Test storage failed!"),
                             self._("Please try again."))
            return

        # All the files are stored, ready to submit!
        logger.info("All files stored for test sent by %s",
                    participation.user.username)
        user_test = UserTest(self.timestamp,
                             submission_lang,
                             file_digests["input"],
                             participation=participation,
                             task=task)

        for filename in [sfe.filename for sfe in task.submission_format]:
            digest = file_digests[filename]
            self.sql_session.add(
                UserTestFile(filename, digest, user_test=user_test))
        for filename in task_type.get_user_managers(task.submission_format):
            digest = file_digests[filename]
            if submission_lang is not None:
                extension = get_language(submission_lang).source_extension
                filename = filename.replace(".%l", extension)
            self.sql_session.add(
                UserTestManager(filename, digest, user_test=user_test))

        self.sql_session.add(user_test)
        self.sql_session.commit()
        self.service.evaluation_service.new_user_test(
            user_test_id=user_test.id)
        self.service.add_notification(
            participation.user.username, self.timestamp,
            self._("Test received"),
            self._("Your test has been received "
                   "and is currently being executed."), NOTIFICATION_SUCCESS)

        # The argument (encripted user test id) is not used by CWS
        # (nor it discloses information to the user), but it is useful
        # for automatic testing to obtain the user test id).
        self.redirect(
            self.contest_url(*self.fallback_page,
                             user_test_id=encrypt_number(user_test.id),
                             **self.fallback_args))
示例#12
0
    def write_results(self, items):
        """Receive worker results from the cache and writes them to the DB.

        Grouping results together by object (i.e., submission result
        or user test result) and type (compilation or evaluation)
        allows this method to talk less to the DB, for example by
        retrieving datasets and submission results only once instead
        of once for every result.

        items ([(operation, Result)]): the results received by ES but
            not yet written to the db.

        """
        logger.info("Starting commit process...")

        # Reorganize the results by submission/usertest result and
        # operation type (i.e., group together the testcase
        # evaluations for the same submission and dataset).
        by_object_and_type = defaultdict(list)
        for operation, result in items:
            t = (operation.type_, operation.object_id, operation.dataset_id)
            by_object_and_type[t].append((operation, result))

        with SessionGen() as session:
            for key, operation_results in by_object_and_type.items():
                type_, object_id, dataset_id = key

                dataset = Dataset.get_from_id(dataset_id, session)
                if dataset is None:
                    logger.error("Could not find dataset %d in the database.",
                                 dataset_id)
                    continue

                # Get submission or user test results.
                if type_ in [ESOperation.COMPILATION, ESOperation.EVALUATION]:
                    object_ = Submission.get_from_id(object_id, session)
                    if object_ is None:
                        logger.error(
                            "Could not find submission %d "
                            "in the database.", object_id)
                        continue
                    object_result = object_.get_result_or_create(dataset)
                else:
                    object_ = UserTest.get_from_id(object_id, session)
                    if object_ is None:
                        logger.error(
                            "Could not find user test %d "
                            "in the database.", object_id)
                        continue
                    object_result = object_.get_result_or_create(dataset)

                self.write_results_one_object_and_type(session, object_result,
                                                       operation_results)

            logger.info("Committing evaluations...")
            session.commit()

            num_testcases_per_dataset = dict()
            for type_, object_id, dataset_id in by_object_and_type.keys():
                if type_ == ESOperation.EVALUATION:
                    if dataset_id not in num_testcases_per_dataset:
                        num_testcases_per_dataset[dataset_id] = session\
                            .query(func.count(Testcase.id))\
                            .filter(Testcase.dataset_id == dataset_id).scalar()
                    num_evaluations = session\
                        .query(func.count(Evaluation.id)) \
                        .filter(Evaluation.dataset_id == dataset_id) \
                        .filter(Evaluation.submission_id == object_id).scalar()
                    if num_evaluations == num_testcases_per_dataset[
                            dataset_id]:
                        submission_result = SubmissionResult.get_from_id(
                            (object_id, dataset_id), session)
                        submission_result.set_evaluation_outcome()

            logger.info("Committing evaluation outcomes...")
            session.commit()

            logger.info("Ending operations for %s objects...",
                        len(by_object_and_type))
            for type_, object_id, dataset_id in by_object_and_type.keys():
                if type_ == ESOperation.COMPILATION:
                    submission_result = SubmissionResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.compilation_ended(submission_result)
                elif type_ == ESOperation.EVALUATION:
                    submission_result = SubmissionResult.get_from_id(
                        (object_id, dataset_id), session)
                    if submission_result.evaluated():
                        self.evaluation_ended(submission_result)
                elif type_ == ESOperation.USER_TEST_COMPILATION:
                    user_test_result = UserTestResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.user_test_compilation_ended(user_test_result)
                elif type_ == ESOperation.USER_TEST_EVALUATION:
                    user_test_result = UserTestResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.user_test_evaluation_ended(user_test_result)

        logger.info("Done")
示例#13
0
    def action_finished(self, data, plus, error=None):
        """Callback from a worker, to signal that is finished some
        action (compilation or evaluation).

        data (dict): a dictionary that describes a Job instance.
        plus (tuple): the tuple (type_,
                                 object_id,
                                 dataset_id,
                                 testcase_codename,
                                 side_data=(priority, timestamp),
                                 shard_of_worker)

        """
        # Unpack the plus tuple. It's built in the RPC call to Worker's
        # execute_job method inside WorkerPool.acquire_worker.
        type_, object_id, dataset_id, testcase_codename, _, \
            shard = plus

        # Restore operation from its fields.
        operation = ESOperation(
            type_, object_id, dataset_id, testcase_codename)

        # We notify the pool that the worker is available again for
        # further work (no matter how the current request turned out,
        # even if the worker encountered an error). If the pool
        # informs us that the data produced by the worker has to be
        # ignored (by returning True) we interrupt the execution of
        # this method and do nothing because in that case we know the
        # operation has returned to the queue and perhaps already been
        # reassigned to another worker.
        if self.get_executor().pool.release_worker(shard):
            logger.info("Ignored result from worker %s as requested.", shard)
            return

        job_success = True
        if error is not None:
            logger.error("Received error from Worker: `%s'.", error)
            job_success = False

        else:
            try:
                job = Job.import_from_dict_with_type(data)
            except:
                logger.error("[action_finished] Couldn't build Job for "
                             "data %s.", data, exc_info=True)
                job_success = False

            else:
                if not job.success:
                    logger.error("Worker %s signaled action "
                                 "not successful.", shard)
                    job_success = False

        logger.info("Operation `%s' for submission %s completed. Success: %s.",
                    operation, object_id, job_success)

        # We get the submission from DB and update it.
        with SessionGen() as session:
            dataset = Dataset.get_from_id(dataset_id, session)
            if dataset is None:
                logger.error("[action_finished] Could not find "
                             "dataset %d in the database.",
                             dataset_id)
                return

            # TODO Try to move this 4-cases if-clause into a method of
            # ESOperation: I'd really like ES itself not to care about
            # which type of operation it's handling.
            if type_ == ESOperation.COMPILATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error("[action_finished] Could not find "
                                 "submission %d in the database.",
                                 object_id)
                    return

                submission_result = submission.get_result(dataset)
                if submission_result is None:
                    logger.info("[action_finished] Couldn't find "
                                "submission %d(%d) in the database. "
                                "Creating it.", object_id, dataset_id)
                    submission_result = \
                        submission.get_result_or_create(dataset)

                if job_success:
                    job.to_submission(submission_result)
                else:
                    submission_result.compilation_tries += 1

                session.commit()

                self.compilation_ended(submission_result)

            elif type_ == ESOperation.EVALUATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error("[action_finished] Could not find "
                                 "submission %d in the database.",
                                 object_id)
                    return

                submission_result = submission.get_result(dataset)
                if submission_result is None:
                    logger.error("[action_finished] Couldn't find "
                                 "submission %d(%d) in the database.",
                                 object_id, dataset_id)
                    return

                if job_success:
                    job.to_submission(submission_result)
                else:
                    submission_result.evaluation_tries += 1

                # Submission evaluation will be ended only when
                # evaluation for each testcase is available.
                evaluation_complete = (len(submission_result.evaluations) ==
                                       len(dataset.testcases))
                if evaluation_complete:
                    submission_result.set_evaluation_outcome()

                session.commit()

                if evaluation_complete:
                    self.evaluation_ended(submission_result)

            elif type_ == ESOperation.USER_TEST_COMPILATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error("[action_finished] Could not find "
                                 "user test %d in the database.",
                                 object_id)
                    return

                user_test_result = user_test.get_result(dataset)
                if user_test_result is None:
                    logger.error("[action_finished] Couldn't find "
                                 "user test %d(%d) in the database. "
                                 "Creating it.", object_id, dataset_id)
                    user_test_result = \
                        user_test.get_result_or_create(dataset)

                if job_success:
                    job.to_user_test(user_test_result)
                else:
                    user_test_result.compilation_tries += 1

                session.commit()

                self.user_test_compilation_ended(user_test_result)

            elif type_ == ESOperation.USER_TEST_EVALUATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error("[action_finished] Could not find "
                                 "user test %d in the database.",
                                 object_id)
                    return

                user_test_result = user_test.get_result(dataset)
                if user_test_result is None:
                    logger.error("[action_finished] Couldn't find "
                                 "user test %d(%d) in the database.",
                                 object_id, dataset_id)
                    return

                if job_success:
                    job.to_user_test(user_test_result)
                else:
                    user_test_result.evaluation_tries += 1

                session.commit()

                self.user_test_evaluation_ended(user_test_result)

            else:
                logger.error("Invalid operation type %r.", type_)
                return
示例#14
0
    def action_finished(self, data, plus, error=None):
        """Callback from a worker, to signal that is finished some
        action (compilation or evaluation).

        data (dict): a dictionary that describes a Job instance.
        plus (tuple): the tuple (type_,
                                 object_id,
                                 dataset_id,
                                 testcase_codename,
                                 side_data=(priority, timestamp),
                                 shard_of_worker)

        """
        # Unpack the plus tuple. It's built in the RPC call to Worker's
        # execute_job method inside WorkerPool.acquire_worker.
        type_, object_id, dataset_id, testcase_codename, _, \
            shard = plus

        # Restore operation from its fields.
        operation = ESOperation(type_, object_id, dataset_id,
                                testcase_codename)

        # We notify the pool that the worker is available again for
        # further work (no matter how the current request turned out,
        # even if the worker encountered an error). If the pool
        # informs us that the data produced by the worker has to be
        # ignored (by returning True) we interrupt the execution of
        # this method and do nothing because in that case we know the
        # operation has returned to the queue and perhaps already been
        # reassigned to another worker.
        if self.get_executor().pool.release_worker(shard):
            logger.info("Ignored result from worker %s as requested.", shard)
            return

        job_success = True
        if error is not None:
            logger.error("Received error from Worker: `%s'.", error)
            job_success = False

        else:
            try:
                job = Job.import_from_dict_with_type(data)
            except:
                logger.error("Couldn't build Job for data %s.",
                             data,
                             exc_info=True)
                job_success = False

            else:
                if not job.success:
                    logger.error("Worker %s signaled action not successful.",
                                 shard)
                    job_success = False

        logger.info("`%s' completed. Success: %s.", operation, job_success)

        # We get the submission from DB and update it.
        with SessionGen() as session:
            dataset = Dataset.get_from_id(dataset_id, session)
            if dataset is None:
                logger.error("Could not find dataset %d in the database.",
                             dataset_id)
                return

            # TODO Try to move this 4-cases if-clause into a method of
            # ESOperation: I'd really like ES itself not to care about
            # which type of operation it's handling.
            if type_ == ESOperation.COMPILATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error(
                        "Could not find submission %d "
                        "in the database.", object_id)
                    return

                submission_result = submission.get_result(dataset)
                if submission_result is None:
                    logger.info(
                        "Couldn't find submission %d(%d) "
                        "in the database. Creating it.", object_id, dataset_id)
                    submission_result = \
                        submission.get_result_or_create(dataset)

                if job_success:
                    job.to_submission(submission_result)
                else:
                    submission_result.compilation_tries += 1

                session.commit()

                self.compilation_ended(submission_result)

            elif type_ == ESOperation.EVALUATION:
                submission = Submission.get_from_id(object_id, session)
                if submission is None:
                    logger.error(
                        "Could not find submission %d "
                        "in the database.", object_id)
                    return

                submission_result = submission.get_result(dataset)
                if submission_result is None:
                    logger.error(
                        "Couldn't find submission %d(%d) "
                        "in the database.", object_id, dataset_id)
                    return

                if job_success:
                    job.to_submission(submission_result)
                else:
                    submission_result.evaluation_tries += 1

                # Submission evaluation will be ended only when
                # evaluation for each testcase is available.
                evaluation_complete = (len(
                    submission_result.evaluations) == len(dataset.testcases))
                if evaluation_complete:
                    submission_result.set_evaluation_outcome()

                session.commit()

                if evaluation_complete:
                    self.evaluation_ended(submission_result)

            elif type_ == ESOperation.USER_TEST_COMPILATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error(
                        "Could not find user test %d "
                        "in the database.", object_id)
                    return

                user_test_result = user_test.get_result(dataset)
                if user_test_result is None:
                    logger.error(
                        "Couldn't find user test %d(%d) "
                        "in the database. Creating it.", object_id, dataset_id)
                    user_test_result = \
                        user_test.get_result_or_create(dataset)

                if job_success:
                    job.to_user_test(user_test_result)
                else:
                    user_test_result.compilation_tries += 1

                session.commit()

                self.user_test_compilation_ended(user_test_result)

            elif type_ == ESOperation.USER_TEST_EVALUATION:
                user_test = UserTest.get_from_id(object_id, session)
                if user_test is None:
                    logger.error(
                        "Could not find user test %d "
                        "in the database.", object_id)
                    return

                user_test_result = user_test.get_result(dataset)
                if user_test_result is None:
                    logger.error(
                        "Couldn't find user test %d(%d) "
                        "in the database.", object_id, dataset_id)
                    return

                if job_success:
                    job.to_user_test(user_test_result)
                else:
                    user_test_result.evaluation_tries += 1

                session.commit()

                self.user_test_evaluation_ended(user_test_result)

            else:
                logger.error("Invalid operation type %r.", type_)
                return
示例#15
0
    def write_result(self, operation, job):
        """Receive worker results from QS and writes them to the DB.

        operation (dict): operation performed, exported as dict
        job (dict): job containing the result, exported as dict

        """
        logger.debug("Starting commit process...")
        operation = ESOperation.from_dict(operation)
        job = Job.import_from_dict_with_type(job)

        with SessionGen() as session:
            type_ = operation.type_
            object_id = operation.object_id
            dataset_id = operation.dataset_id

            dataset = session.query(Dataset)\
                .filter(Dataset.id == dataset_id)\
                .options(joinedload(Dataset.testcases))\
                .first()
            if dataset is None:
                logger.error("Could not find dataset %d in the database.",
                             dataset_id)
                return False, []

            # Get submission or user test, and their results.
            if type_ in [ESOperation.COMPILATION, ESOperation.EVALUATION]:
                object_ = Submission.get_from_id(object_id, session)
                if object_ is None:
                    logger.error(
                        "Could not find submission %d "
                        "in the database.", object_id)
                    return False, []
                object_result = object_.get_result_or_create(dataset)
            else:
                object_ = UserTest.get_from_id(object_id, session)
                object_result = object_.get_result_or_create(dataset)

            logger.info("Writing result to db for %s", operation)
            new_operations = []
            try:
                new_operations = self.write_results_one_row(
                    session, object_result, operation, job)
            except IntegrityError:
                logger.warning(
                    "Integrity error while inserting worker result.",
                    exc_info=True)
                # This is not an error condition, as the result is already
                # in the DB.
                return True, []

            logger.debug("Committing evaluations...")
            session.commit()

            # If we collected some new operations to do while writing
            # the results, it means we had to invalidate the submission.
            # We return immediately since we already have all the operations
            # we need to do next.
            if new_operations != []:
                return True, [[
                    op.to_dict(), priority,
                    (timestamp - EvaluationService.EPOCH).total_seconds(), job_
                ] for op, priority, timestamp, job_ in new_operations]

            if type_ == ESOperation.EVALUATION:
                if len(object_result.evaluations) == len(dataset.testcases):
                    object_result.set_evaluation_outcome()

            logger.debug("Committing evaluation outcomes...")
            session.commit()

            logger.info("Ending operations...")
            if type_ == ESOperation.COMPILATION:
                new_operations = self.compilation_ended(object_result)
            elif type_ == ESOperation.EVALUATION:
                if object_result.evaluated():
                    new_operations = self.evaluation_ended(object_result)
            elif type_ == ESOperation.USER_TEST_COMPILATION:
                new_operations = \
                    self.user_test_compilation_ended(object_result)
            elif type_ == ESOperation.USER_TEST_EVALUATION:
                new_operations = self.user_test_evaluation_ended(object_result)

        logger.debug("Done")
        return True, [[
            op.to_dict(), priority,
            (timestamp - EvaluationService.EPOCH).total_seconds(), job_
        ] for op, priority, timestamp, job_ in new_operations]
示例#16
0
    def write_results(self, items):
        """Receive worker results from the cache and writes them to the DB.

        Grouping results together by object (i.e., submission result
        or user test result) and type (compilation or evaluation)
        allows this method to talk less to the DB, for example by
        retrieving datasets and submission results only once instead
        of once for every result.

        items ([(operation, Result)]): the results received by ES but
            not yet written to the db.

        """
        logger.info("Starting commit process...")

        # Reorganize the results by submission/usertest result and
        # operation type (i.e., group together the testcase
        # evaluations for the same submission and dataset).
        by_object_and_type = defaultdict(list)
        for operation, result in items:
            t = (operation.type_, operation.object_id, operation.dataset_id)
            by_object_and_type[t].append((operation, result))

        with SessionGen() as session:
            # Dictionary holding the objects we use repeatedly,
            # indexed by id, to avoid querying them multiple times.
            # TODO: this pattern is used in WorkerPool and should be
            # abstracted away.
            datasets = dict()
            subs = dict()
            srs = dict()

            for key, operation_results in iteritems(by_object_and_type):
                type_, object_id, dataset_id = key

                # Get dataset.
                if dataset_id not in datasets:
                    datasets[dataset_id] = session.query(Dataset)\
                        .filter(Dataset.id == dataset_id)\
                        .options(joinedload(Dataset.testcases))\
                        .first()
                dataset = datasets[dataset_id]
                if dataset is None:
                    logger.error("Could not find dataset %d in the database.",
                                 dataset_id)
                    continue

                # Get submission or user test, and their results.
                if type_ in [ESOperation.COMPILATION, ESOperation.EVALUATION]:
                    if object_id not in subs:
                        subs[object_id] = \
                            Submission.get_from_id(object_id, session)
                    object_ = subs[object_id]
                    if object_ is None:
                        logger.error("Could not find submission %d "
                                     "in the database.", object_id)
                        continue
                    result_id = (object_id, dataset_id)
                    if result_id not in srs:
                        srs[result_id] = object_.get_result_or_create(dataset)
                    object_result = srs[result_id]
                else:
                    # We do not cache user tests as they can come up
                    # only once.
                    object_ = UserTest.get_from_id(object_id, session)
                    object_result = object_.get_result_or_create(dataset)

                self.write_results_one_object_and_type(
                    session, object_result, operation_results)

            logger.info("Committing evaluations...")
            session.commit()

            for type_, object_id, dataset_id in by_object_and_type:
                if type_ == ESOperation.EVALUATION:
                    submission_result = srs[(object_id, dataset_id)]
                    dataset = datasets[dataset_id]
                    if len(submission_result.evaluations) == \
                            len(dataset.testcases):
                        submission_result.set_evaluation_outcome()

            logger.info("Committing evaluation outcomes...")
            session.commit()

            logger.info("Ending operations for %s objects...",
                        len(by_object_and_type))
            for type_, object_id, dataset_id in by_object_and_type:
                if type_ == ESOperation.COMPILATION:
                    submission_result = srs[(object_id, dataset_id)]
                    self.compilation_ended(submission_result)
                elif type_ == ESOperation.EVALUATION:
                    submission_result = srs[(object_id, dataset_id)]
                    if submission_result.evaluated():
                        self.evaluation_ended(submission_result)
                elif type_ == ESOperation.USER_TEST_COMPILATION:
                    user_test_result = UserTest\
                        .get_from_id(object_id, session)\
                        .get_result(datasets[dataset_id])
                    self.user_test_compilation_ended(user_test_result)
                elif type_ == ESOperation.USER_TEST_EVALUATION:
                    user_test_result = UserTest\
                        .get_from_id(object_id, session)\
                        .get_result(datasets[dataset_id])
                    self.user_test_evaluation_ended(user_test_result)

        logger.info("Done")
示例#17
0
    def write_results(self, items):
        """Receive worker results from the cache and writes them to the DB.

        Grouping results together by object (i.e., submission result
        or user test result) and type (compilation or evaluation)
        allows this method to talk less to the DB, for example by
        retrieving datasets and submission results only once instead
        of once for every result.

        items ([(operation, Result)]): the results received by ES but
            not yet written to the db.

        """
        logger.info("Starting commit process...")

        # Reorganize the results by submission/usertest result and
        # operation type (i.e., group together the testcase
        # evaluations for the same submission and dataset).
        by_object_and_type = defaultdict(list)
        for operation, result in items:
            t = (operation.type_, operation.object_id, operation.dataset_id)
            by_object_and_type[t].append((operation, result))

        with SessionGen() as session:
            # Dictionary holding the objects we use repeatedly,
            # indexed by id, to avoid querying them multiple times.
            # TODO: this pattern is used in WorkerPool and should be
            # abstracted away.
            datasets = dict()
            subs = dict()
            srs = dict()

            for key, operation_results in iteritems(by_object_and_type):
                type_, object_id, dataset_id = key

                # Get dataset.
                if dataset_id not in datasets:
                    datasets[dataset_id] = session.query(Dataset)\
                        .filter(Dataset.id == dataset_id)\
                        .options(joinedload(Dataset.testcases))\
                        .first()
                dataset = datasets[dataset_id]
                if dataset is None:
                    logger.error("Could not find dataset %d in the database.",
                                 dataset_id)
                    continue

                # Get submission or user test, and their results.
                if type_ in [ESOperation.COMPILATION, ESOperation.EVALUATION]:
                    if object_id not in subs:
                        subs[object_id] = \
                            Submission.get_from_id(object_id, session)
                    object_ = subs[object_id]
                    if object_ is None:
                        logger.error(
                            "Could not find submission %d "
                            "in the database.", object_id)
                        continue
                    result_id = (object_id, dataset_id)
                    if result_id not in srs:
                        srs[result_id] = object_.get_result_or_create(dataset)
                    object_result = srs[result_id]
                else:
                    # We do not cache user tests as they can come up
                    # only once.
                    object_ = UserTest.get_from_id(object_id, session)
                    object_result = object_.get_result_or_create(dataset)

                self.write_results_one_object_and_type(session, object_result,
                                                       operation_results)

            logger.info("Committing evaluations...")
            session.commit()

            for type_, object_id, dataset_id in by_object_and_type:
                if type_ == ESOperation.EVALUATION:
                    submission_result = srs[(object_id, dataset_id)]
                    dataset = datasets[dataset_id]
                    if len(submission_result.evaluations) == \
                            len(dataset.testcases):
                        submission_result.set_evaluation_outcome()

            logger.info("Committing evaluation outcomes...")
            session.commit()

            logger.info("Ending operations for %s objects...",
                        len(by_object_and_type))
            for type_, object_id, dataset_id in by_object_and_type:
                if type_ == ESOperation.COMPILATION:
                    submission_result = srs[(object_id, dataset_id)]
                    self.compilation_ended(submission_result)
                elif type_ == ESOperation.EVALUATION:
                    submission_result = srs[(object_id, dataset_id)]
                    if submission_result.evaluated():
                        self.evaluation_ended(submission_result)
                elif type_ == ESOperation.USER_TEST_COMPILATION:
                    user_test_result = UserTest\
                        .get_from_id(object_id, session)\
                        .get_result(datasets[dataset_id])
                    self.user_test_compilation_ended(user_test_result)
                elif type_ == ESOperation.USER_TEST_EVALUATION:
                    user_test_result = UserTest\
                        .get_from_id(object_id, session)\
                        .get_result(datasets[dataset_id])
                    self.user_test_evaluation_ended(user_test_result)

        logger.info("Done")
示例#18
0
    def write_results(self, items):
        """Receive worker results from the cache and writes them to the DB.

        Grouping results together by object (i.e., submission result
        or user test result) and type (compilation or evaluation)
        allows this method to talk less to the DB, for example by
        retrieving datasets and submission results only once instead
        of once for every result.

        items ([(operation, Result)]): the results received by ES but
            not yet written to the db.

        """
        logger.info("Starting commit process...")

        # Reorganize the results by submission/usertest result and
        # operation type (i.e., group together the testcase
        # evaluations for the same submission and dataset).
        by_object_and_type = defaultdict(list)
        for operation, result in items:
            t = (operation.type_, operation.object_id, operation.dataset_id)
            by_object_and_type[t].append((operation, result))

        with SessionGen() as session:
            for key, operation_results in by_object_and_type.items():
                type_, object_id, dataset_id = key

                dataset = Dataset.get_from_id(dataset_id, session)
                if dataset is None:
                    logger.error("Could not find dataset %d in the database.",
                                 dataset_id)
                    continue

                # Get submission or user test results.
                if type_ in [ESOperation.COMPILATION, ESOperation.EVALUATION]:
                    object_ = Submission.get_from_id(object_id, session)
                    if object_ is None:
                        logger.error("Could not find submission %d "
                                     "in the database.", object_id)
                        continue
                    object_result = object_.get_result_or_create(dataset)
                else:
                    object_ = UserTest.get_from_id(object_id, session)
                    if object_ is None:
                        logger.error("Could not find user test %d "
                                     "in the database.", object_id)
                        continue
                    object_result = object_.get_result_or_create(dataset)

                self.write_results_one_object_and_type(
                    session, object_result, operation_results)

            logger.info("Committing evaluations...")
            session.commit()

            num_testcases_per_dataset = dict()
            for type_, object_id, dataset_id in by_object_and_type.keys():
                if type_ == ESOperation.EVALUATION:
                    if dataset_id not in num_testcases_per_dataset:
                        num_testcases_per_dataset[dataset_id] = session\
                            .query(func.count(Testcase.id))\
                            .filter(Testcase.dataset_id == dataset_id).scalar()
                    num_evaluations = session\
                        .query(func.count(Evaluation.id)) \
                        .filter(Evaluation.dataset_id == dataset_id) \
                        .filter(Evaluation.submission_id == object_id).scalar()
                    if num_evaluations == num_testcases_per_dataset[dataset_id]:
                        submission_result = SubmissionResult.get_from_id(
                            (object_id, dataset_id), session)
                        submission_result.set_evaluation_outcome()

            logger.info("Committing evaluation outcomes...")
            session.commit()

            logger.info("Ending operations for %s objects...",
                        len(by_object_and_type))
            for type_, object_id, dataset_id in by_object_and_type.keys():
                if type_ == ESOperation.COMPILATION:
                    submission_result = SubmissionResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.compilation_ended(submission_result)
                elif type_ == ESOperation.EVALUATION:
                    submission_result = SubmissionResult.get_from_id(
                        (object_id, dataset_id), session)
                    if submission_result.evaluated():
                        self.evaluation_ended(submission_result)
                elif type_ == ESOperation.USER_TEST_COMPILATION:
                    user_test_result = UserTestResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.user_test_compilation_ended(user_test_result)
                elif type_ == ESOperation.USER_TEST_EVALUATION:
                    user_test_result = UserTestResult.get_from_id(
                        (object_id, dataset_id), session)
                    self.user_test_evaluation_ended(user_test_result)

        logger.info("Done")