Example #1
0
File: Job.py Project: bblackham/cms
    def from_submission(submission, dataset_id):
        dataset = Dataset.get_from_id(dataset_id, submission.sa_session)

        job = EvaluationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = json.loads(dataset.task_type_parameters)

        submission_result = submission.get_result(dataset_id)

        # This should have been created by now.
        assert submission_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.executables = dict(submission_result.executables)
        job.testcases = dataset.testcases
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.managers = dict(dataset.managers)
        job.files = dict(submission.files)
        job.info = "evaluate submission %d" % (submission.id)

        return job
Example #2
0
File: Job.py Project: bblackham/cms
    def from_submission(submission, dataset_id):
        dataset = Dataset.get_from_id(dataset_id, submission.sa_session)

        job = EvaluationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = json.loads(dataset.task_type_parameters)

        submission_result = submission.get_result(dataset_id)

        # This should have been created by now.
        assert submission_result is not None

        # EvaluationJob; dict() is required to detach the dictionary
        # that gets added to the Job from the control of SQLAlchemy
        job.executables = dict(submission_result.executables)
        job.testcases = dataset.testcases
        job.time_limit = dataset.time_limit
        job.memory_limit = dataset.memory_limit
        job.managers = dict(dataset.managers)
        job.files = dict(submission.files)
        job.info = "evaluate submission %d" % (submission.id)

        return job
Example #3
0
def get_score_type(submission=None, task=None, dataset_id=None):
    """Given a task, instantiate the corresponding ScoreType class.

    submission (Submission): the submission that needs the task type.
    task (Task): the task we want to score.
    dataset_id (int): the dataset id to use, or None for active.

    return (object): an instance of the correct ScoreType class.

    """
    # Validate arguments.
    if [x is not None
        for x in [submission, task]].count(True) != 1:
        raise ValueError("Need at most one way to get the score type.")

    if submission is not None:
        task = submission.task

    if dataset_id is None:
        dataset_id = task.active_dataset_id

    dataset = Dataset.get_from_id(dataset_id, task.sa_session)

    score_type_name = dataset.score_type
    try:
        score_type_parameters = json.loads(dataset.score_type_parameters)
    except json.decoder.JSONDecodeError as error:
        logger.error("Cannot decode score type parameters for task "
            "%d \"%s\", dataset %d \"%s\"\n%r." % (
                task.id, task.name, dataset.id, dataset.description,
                error))
        return None

    public_testcases = dict(
        (testcase.num, testcase.public)
        for testcase in dataset.testcases)

    cls = plugin_lookup(score_type_name,
                        "cms.grading.scoretypes", "scoretypes")

    try:
        return cls(score_type_parameters, public_testcases)
    except Exception as error:
        logger.error("Cannot instantiate score type for task "
            "%d \"%s\", dataset %d \"%s\"\n%r." % (
                task.id, task.name, dataset.id, dataset.description,
                error))
        return None
Example #4
0
File: Job.py Project: bblackham/cms
    def from_submission(submission, dataset_id):
        dataset = Dataset.get_from_id(dataset_id, submission.sa_session)

        job = CompilationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = json.loads(dataset.task_type_parameters)

        # CompilationJob
        job.language = submission.language
        job.files = submission.files
        job.managers = dataset.managers
        job.info = "compile submission %d" % (submission.id)

        return job
Example #5
0
File: Job.py Project: bblackham/cms
    def from_submission(submission, dataset_id):
        dataset = Dataset.get_from_id(dataset_id, submission.sa_session)

        job = CompilationJob()

        # Job
        job.task_type = dataset.task_type
        job.task_type_parameters = json.loads(dataset.task_type_parameters)

        # CompilationJob
        job.language = submission.language
        job.files = submission.files
        job.managers = dataset.managers
        job.info = "compile submission %d" % (submission.id)

        return job
Example #6
0
def get_score_type(submission=None, task=None, dataset_id=None):
    """Given a task, instantiate the corresponding ScoreType class.

    submission (Submission): the submission that needs the task type.
    task (Task): the task we want to score.
    dataset_id (int): the dataset id to use, or None for active.

    return (object): an instance of the correct ScoreType class.

    """
    # Validate arguments.
    if [x is not None for x in [submission, task]].count(True) != 1:
        raise ValueError("Need at most one way to get the score type.")

    if submission is not None:
        task = submission.task

    if dataset_id is None:
        dataset_id = task.active_dataset_id

    dataset = Dataset.get_from_id(dataset_id, task.sa_session)

    score_type_name = dataset.score_type
    try:
        score_type_parameters = json.loads(dataset.score_type_parameters)
    except json.decoder.JSONDecodeError as error:
        logger.error(
            "Cannot decode score type parameters for task "
            "%d \"%s\", dataset %d \"%s\"\n%r." %
            (task.id, task.name, dataset.id, dataset.description, error))
        return None

    public_testcases = dict(
        (testcase.num, testcase.public) for testcase in dataset.testcases)

    cls = plugin_lookup(score_type_name, "cms.grading.scoretypes",
                        "scoretypes")

    try:
        return cls(score_type_parameters, public_testcases)
    except Exception as error:
        logger.error(
            "Cannot instantiate score type for task "
            "%d \"%s\", dataset %d \"%s\"\n%r." %
            (task.id, task.name, dataset.id, dataset.description, error))
        return None
Example #7
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is not "
                               "compiled." %
                               (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) compiled "
                               "correctly but is not evaluated." %
                               (submission_id, dataset_id))
                return
            elif submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            # Assign score to the submission.
            scorer = self.scorers[dataset_id]
            scorer.add_submission(submission_id, submission.timestamp,
                                  submission.user.username,
                                  submission_result.evaluated(),
                                  dict((ev.codename,
                                        {"outcome": ev.outcome,
                                         "text": ev.text,
                                         "time": ev.execution_time,
                                         "memory": ev.memory_used})
                                       for ev in submission_result.evaluations),
                                  submission.tokened())

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = \
                scorer.pool[submission_id]["score"]
            submission_result.public_score = \
                scorer.pool[submission_id]["public_score"]

            # And details.
            submission_result.score_details = \
                scorer.pool[submission_id]["details"]
            submission_result.public_score_details = \
                scorer.pool[submission_id]["public_details"]
            submission_result.ranking_score_details = \
                scorer.pool[submission_id]["ranking_details"]

            try:
                ranking_score_details = json.loads(
                        submission_result.ranking_score_details)
            except (TypeError, ValueError):
                # It may be blank.
                ranking_score_details = None

            # If we are not a live dataset then we can bail out here,
            # and avoid updating RWS.
            if dataset is not submission.task.active_dataset:
                return

            # Data to send to remote rankings.
            submission_put_data = {
                "user": encode_id(submission.user.username),
                "task": encode_id(submission.task.name),
                "time": int(make_timestamp(submission.timestamp))}
            subchange_id = "%s%ss" % \
                (int(make_timestamp(submission.timestamp)),
                 submission_id)
            subchange_put_data = {
                "submission": encode_id(str(submission_id)),
                "time": int(make_timestamp(submission.timestamp)),
                # We're sending the unrounded score to RWS
                "score": submission_result.score}
            if ranking_score_details is not None:
                subchange_put_data["extra"] = ranking_score_details

        # TODO: ScoreRelative here does not work with remote
        # rankings (it does in the ranking view) because we
        # update only the user owning the submission.

        # Adding operations to the queue.
        with self.operation_queue_lock:
            for ranking in self.rankings:
                self.submission_queue.setdefault(
                    ranking,
                    dict())[encode_id(str(submission_id))] = \
                    submission_put_data
                self.subchange_queue.setdefault(
                    ranking,
                    dict())[encode_id(subchange_id)] = \
                    subchange_put_data
Example #8
0
    def new_evaluation(self, submission_id, dataset_id):
        """This RPC inform ScoringService that ES finished the work on
        a submission (either because it has been evaluated, or because
        the compilation failed).

        submission_id (int): the id of the submission that changed.
        dataset_id (int): the id of the dataset to use.

        """
        with SessionGen(commit=True) as session:
            submission = Submission.get_from_id(submission_id, session)

            if submission is None:
                logger.error("[new_evaluation] Couldn't find submission %d "
                             "in the database." % submission_id)
                raise ValueError

            if submission.user.hidden:
                logger.info("[new_evaluation] Submission %d not scored "
                            "because user is hidden." % submission_id)
                return

            dataset = Dataset.get_from_id(dataset_id, session)

            if dataset is None:
                logger.error("[new_evaluation] Couldn't find dataset %d "
                             "in the database." % dataset_id)
                raise ValueError

            submission_result = submission.get_result(dataset)

            # We'll accept only submissions that either didn't compile
            # at all or that did evaluate successfully.
            if submission_result is None or not submission_result.compiled():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "not compiled." % (submission_id, dataset_id))
                return
            elif submission_result.compilation_outcome == "ok" and \
                    not submission_result.evaluated():
                logger.warning("[new_evaluation] Submission %d(%d) is "
                               "compiled but is not evaluated." %
                               (submission_id, dataset_id))
                return

            # Assign score to the submission.
            score_type = get_score_type(dataset=dataset)
            score, details, public_score, public_details, ranking_details = \
                score_type.compute_score(submission_result)

            # Mark submission as scored.
            self.submission_results_scored.add((submission_id, dataset_id))

            # Filling submission's score info in the db.
            submission_result.score = score
            submission_result.public_score = public_score

            # And details.
            submission_result.score_details = details
            submission_result.public_score_details = public_details
            submission_result.ranking_score_details = ranking_details

            # If dataset is the active one, update RWS.
            if dataset is submission.task.active_dataset:
                self.rankings_send_score(submission)