Пример #1
0
    def submissions_status(contest_id):
        """Returns a dictionary of statistics about the number of
        submissions on a specific status in the given contest.

        There are six statuses: evaluated, compilation failed,
        evaluating, compiling, maximum number of attempts of
        compilations reached, the same for evaluations. The last two
        should not happen and require a check from the admin.

        The status of a submission is checked on its result for the
        active dataset of its task.

        contest_id (int|None): counts are restricted to this contest,
            or None for no restrictions.

        return (dict): statistics on the submissions.

        """
        # TODO: at the moment this counts all submission results for
        # the live datasets. It is interesting to show also numbers
        # for the datasets with autojudge, and for all datasets.
        stats = {}
        with SessionGen() as session:
            base_query = session\
                .query(func.count(SubmissionResult.submission_id))\
                .select_from(SubmissionResult)\
                .join(Dataset)\
                .join(Task, Dataset.task_id == Task.id)\
                .filter(Task.active_dataset_id == SubmissionResult.dataset_id)
            if contest_id is not None:
                base_query = base_query\
                    .filter(Task.contest_id == contest_id)

            compiled = base_query.filter(SubmissionResult.filter_compiled())
            evaluated = compiled.filter(SubmissionResult.filter_evaluated())
            not_compiled = base_query.filter(
                not_(SubmissionResult.filter_compiled()))
            not_evaluated = compiled.filter(
                SubmissionResult.filter_compilation_succeeded(),
                not_(SubmissionResult.filter_evaluated()))

            queries = {}
            queries['compiling'] = not_compiled.filter(
                SubmissionResult.compilation_tries <
                EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)
            queries['max_compilations'] = not_compiled.filter(
                SubmissionResult.compilation_tries >=
                EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)
            queries['compilation_fail'] = base_query.filter(
                SubmissionResult.filter_compilation_failed())
            queries['evaluating'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries <
                EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)
            queries['max_evaluations'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries >=
                EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)
            queries['scoring'] = evaluated.filter(
                not_(SubmissionResult.filter_scored()))
            queries['scored'] = evaluated.filter(
                SubmissionResult.filter_scored())

            total_query = session\
                .query(func.count(Submission.id))\
                .select_from(Submission)\
                .join(Task, Submission.task_id == Task.id)
            if contest_id is not None:
                total_query = total_query\
                    .filter(Task.contest_id == contest_id)
            queries['total'] = total_query

            stats = {}
            keys = list(queries.keys())
            results = queries[keys[0]].union_all(
                *(queries[key] for key in keys[1:])).all()

        for i, k in enumerate(keys):
            stats[k] = results[i][0]
        stats['compiling'] += 2 * stats['total'] - sum(stats.values())

        return stats
Пример #2
0
from cms.io import PriorityQueue, QueueItem
from cms.db import Dataset, Evaluation, Submission, SubmissionResult, \
    Task, Testcase, UserTest, UserTestResult
from cms.grading.Job import CompilationJob, EvaluationJob

logger = logging.getLogger(__name__)

MAX_COMPILATION_TRIES = 3
MAX_EVALUATION_TRIES = 3
MAX_USER_TEST_COMPILATION_TRIES = 3
MAX_USER_TEST_EVALUATION_TRIES = 3

FILTER_SUBMISSION_DATASETS_TO_JUDGE = ((Dataset.id == Task.active_dataset_id) |
                                       (Dataset.autojudge.is_(True)))
FILTER_SUBMISSION_RESULTS_TO_COMPILE = (
    (~SubmissionResult.filter_compiled()) &
    (SubmissionResult.compilation_tries < MAX_COMPILATION_TRIES))
FILTER_SUBMISSION_RESULTS_TO_EVALUATE = (
    SubmissionResult.filter_compilation_succeeded() &
    (~SubmissionResult.filter_evaluated()) &
    (SubmissionResult.evaluation_tries < MAX_EVALUATION_TRIES))

FILTER_USER_TEST_DATASETS_TO_JUDGE = ((Dataset.id == Task.active_dataset_id) |
                                      (Dataset.autojudge.is_(True)))
FILTER_USER_TEST_RESULTS_TO_COMPILE = (
    (~UserTestResult.filter_compiled()) &
    (UserTestResult.compilation_tries < MAX_COMPILATION_TRIES))
FILTER_USER_TEST_RESULTS_TO_EVALUATE = (
    UserTestResult.filter_compilation_succeeded() &
    (~UserTestResult.filter_evaluated()) &
    (UserTestResult.evaluation_tries < MAX_EVALUATION_TRIES))
Пример #3
0
    def submissions_status(self):
        """Returns a dictionary of statistics about the number of
        submissions on a specific status. There are seven statuses:
        evaluated, compilation failed, evaluating, compiling, maximum
        number of attempts of compilations reached, the same for
        evaluations, and finally 'I have no idea what's
        happening'. The last three should not happen and require a
        check from the admin.

        The status of a submission is checked on its result for the
        active dataset of its task.

        return (dict): statistics on the submissions.

        """
        # TODO: at the moment this counts all submission results for
        # the live datasets. It is interesting to show also numbers
        # for the datasets with autojudge, and for all datasets.
        stats = {}
        with SessionGen() as session:
            base_query = session\
                .query(func.count(SubmissionResult.submission_id))\
                .select_from(SubmissionResult)\
                .join(Dataset)\
                .join(Task, Dataset.task_id == Task.id)\
                .filter(Task.active_dataset_id == SubmissionResult.dataset_id)
            if self.contest_id is not None:
                base_query = base_query\
                    .filter(Task.contest_id == self.contest_id)

            compiled = base_query.filter(SubmissionResult.filter_compiled())
            evaluated = compiled.filter(SubmissionResult.filter_evaluated())
            not_compiled = base_query.filter(
                not_(SubmissionResult.filter_compiled()))
            not_evaluated = compiled.filter(
                SubmissionResult.filter_compilation_succeeded(),
                not_(SubmissionResult.filter_evaluated()))

            queries = {}
            queries['compiling'] = not_compiled.filter(
                SubmissionResult.compilation_tries <
                EvaluationService.MAX_COMPILATION_TRIES)
            queries['max_compilations'] = not_compiled.filter(
                SubmissionResult.compilation_tries >=
                EvaluationService.MAX_COMPILATION_TRIES)
            queries['compilation_fail'] = base_query.filter(
                SubmissionResult.filter_compilation_failed())
            queries['evaluating'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries <
                EvaluationService.MAX_EVALUATION_TRIES)
            queries['max_evaluations'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries >=
                EvaluationService.MAX_EVALUATION_TRIES)
            queries['scoring'] = evaluated.filter(
                not_(SubmissionResult.filter_scored()))
            queries['scored'] = evaluated.filter(
                SubmissionResult.filter_scored())
            queries['total'] = base_query

            stats = {}
            keys = queries.keys()
            results = queries[keys[0]].union_all(
                *(queries[key] for key in keys[1:])).all()

        for i in range(len(keys)):
            stats[keys[i]] = results[i][0]
        stats['invalid'] = 2 * stats['total'] - sum(stats.itervalues())

        return stats
Пример #4
0
logger = logging.getLogger(__name__)


MAX_COMPILATION_TRIES = 3
MAX_EVALUATION_TRIES = 3
MAX_USER_TEST_COMPILATION_TRIES = 3
MAX_USER_TEST_EVALUATION_TRIES = 3


FILTER_SUBMISSION_DATASETS_TO_JUDGE = (
    (Dataset.id == Task.active_dataset_id) |
    (Dataset.autojudge.is_(True))
)
FILTER_SUBMISSION_RESULTS_TO_COMPILE = (
    (~SubmissionResult.filter_compiled()) &
    (SubmissionResult.compilation_tries < MAX_COMPILATION_TRIES)
)
FILTER_SUBMISSION_RESULTS_TO_EVALUATE = (
    SubmissionResult.filter_compilation_succeeded() &
    (~SubmissionResult.filter_evaluated()) &
    (SubmissionResult.evaluation_tries < MAX_EVALUATION_TRIES)
)


FILTER_USER_TEST_DATASETS_TO_JUDGE = (
    (Dataset.id == Task.active_dataset_id) |
    (Dataset.autojudge.is_(True))
)
FILTER_USER_TEST_RESULTS_TO_COMPILE = (
    (~UserTestResult.filter_compiled()) &
Пример #5
0
    def submissions_status(contest_id):
        """Returns a dictionary of statistics about the number of
        submissions on a specific status in the given contest.

        There are six statuses: evaluated, compilation failed,
        evaluating, compiling, maximum number of attempts of
        compilations reached, the same for evaluations. The last two
        should not happen and require a check from the admin.

        The status of a submission is checked on its result for the
        active dataset of its task.

        contest_id (int|None): counts are restricted to this contest,
            or None for no restrictions.

        return (dict): statistics on the submissions.

        """
        # TODO: at the moment this counts all submission results for
        # the live datasets. It is interesting to show also numbers
        # for the datasets with autojudge, and for all datasets.
        stats = {}
        with SessionGen() as session:
            base_query = session\
                .query(func.count(SubmissionResult.submission_id))\
                .select_from(SubmissionResult)\
                .join(Dataset)\
                .join(Task, Dataset.task_id == Task.id)\
                .filter(Task.active_dataset_id == SubmissionResult.dataset_id)
            if contest_id is not None:
                base_query = base_query\
                    .filter(Task.contest_id == contest_id)

            compiled = base_query.filter(SubmissionResult.filter_compiled())
            evaluated = compiled.filter(SubmissionResult.filter_evaluated())
            not_compiled = base_query.filter(
                not_(SubmissionResult.filter_compiled()))
            not_evaluated = compiled.filter(
                SubmissionResult.filter_compilation_succeeded(),
                not_(SubmissionResult.filter_evaluated()))

            queries = {}
            queries['compiling'] = not_compiled.filter(
                SubmissionResult.compilation_tries <
                EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)
            queries['max_compilations'] = not_compiled.filter(
                SubmissionResult.compilation_tries >=
                EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)
            queries['compilation_fail'] = base_query.filter(
                SubmissionResult.filter_compilation_failed())
            queries['evaluating'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries <
                EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)
            queries['max_evaluations'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries >=
                EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)
            queries['scoring'] = evaluated.filter(
                not_(SubmissionResult.filter_scored()))
            queries['scored'] = evaluated.filter(
                SubmissionResult.filter_scored())

            total_query = session\
                .query(func.count(Submission.id))\
                .select_from(Submission)\
                .join(Task, Submission.task_id == Task.id)
            if contest_id is not None:
                total_query = total_query\
                    .filter(Task.contest_id == contest_id)
            queries['total'] = total_query

            stats = {}
            keys = list(iterkeys(queries))
            results = queries[keys[0]].union_all(
                *(queries[key] for key in keys[1:])).all()

        for i, k in enumerate(keys):
            stats[k] = results[i][0]
        stats['compiling'] += 2 * stats['total'] - sum(itervalues(stats))

        return stats
Пример #6
0
    def submissions_status(self):
        """Returns a dictionary of statistics about the number of
        submissions on a specific status. There are seven statuses:
        evaluated, compilation failed, evaluating, compiling, maximum
        number of attempts of compilations reached, the same for
        evaluations, and finally 'I have no idea what's
        happening'. The last three should not happen and require a
        check from the admin.

        The status of a submission is checked on its result for the
        active dataset of its task.

        return (dict): statistics on the submissions.

        """
        # TODO: at the moment this counts all submission results for
        # the live datasets. It is interesting to show also numbers
        # for the datasets with autojudge, and for all datasets.
        stats = {}
        with SessionGen() as session:
            base_query = session\
                .query(func.count(SubmissionResult.submission_id))\
                .select_from(SubmissionResult)\
                .join(Dataset)\
                .join(Task, Dataset.task_id == Task.id)\
                .filter(Task.active_dataset_id == SubmissionResult.dataset_id)\
                .filter(Task.contest_id == self.contest_id)

            compiled = base_query.filter(SubmissionResult.filter_compiled())
            evaluated = compiled.filter(SubmissionResult.filter_evaluated())
            not_compiled = base_query.filter(
                not_(SubmissionResult.filter_compiled()))
            not_evaluated = compiled.filter(
                SubmissionResult.filter_compilation_succeeded(),
                not_(SubmissionResult.filter_evaluated()))

            queries = {}
            queries['compiling'] = not_compiled.filter(
                SubmissionResult.compilation_tries <
                EvaluationService.MAX_COMPILATION_TRIES)
            queries['max_compilations'] = not_compiled.filter(
                SubmissionResult.compilation_tries >=
                EvaluationService.MAX_COMPILATION_TRIES)
            queries['compilation_fail'] = base_query.filter(
                SubmissionResult.filter_compilation_failed())
            queries['evaluating'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries <
                EvaluationService.MAX_EVALUATION_TRIES)
            queries['max_evaluations'] = not_evaluated.filter(
                SubmissionResult.evaluation_tries >=
                EvaluationService.MAX_EVALUATION_TRIES)
            queries['scoring'] = evaluated.filter(
                not_(SubmissionResult.filter_scored()))
            queries['scored'] = evaluated.filter(
                SubmissionResult.filter_scored())
            queries['total'] = base_query

            stats = {}
            keys = queries.keys()
            results = queries[keys[0]].union_all(*(queries[key]
                                                   for key in keys[1:])).all()

        for i in range(len(keys)):
            stats[keys[i]] = results[i][0]
        stats['invalid'] = 2 * stats['total'] - sum(stats.itervalues())

        return stats
Пример #7
0
def compute_contest_metrics(sql_session):

    metrics = {}
    descs = {}

    sub_full_query = sql_session.query(Contest.name, Task.name, Team.code, User.username, func.count(Submission.id))\
        .select_from(Participation)\
        .filter(not_(Participation.hidden))\
        .outerjoin(Team, Team.id == Participation.team_id)\
        .join(User, User.id == Participation.user_id)\
        .join(Contest, Contest.id == Participation.contest_id)\
        .join(Submission, Submission.participation_id == Participation.id)\
        .join(Task, Task.id == Submission.task_id)\
        .group_by(Contest.id, Task.id, Team.id, User.id)

    sub_official_counts = sub_full_query.filter(Submission.official).all()
    sub_unofficial_counts = sub_full_query.filter(not_(
        Submission.official)).all()

    descs['submissions_total'] = ('gauge', 'status = official | unofficial')
    metrics['submissions_total'] = {}
    for cs, status in [(sub_official_counts, 'official'),
                       (sub_unofficial_counts, 'unofficial')]:
        for c in cs:
            cname, taskname, teamname, uname, count = c
            key = (('contest', cname), ('task', taskname), ('team', teamname),
                   ('user', uname), ('status', status))
            metrics['submissions_total'][key] = count

    res_full_query = sql_session.query(
        Contest.name, Task.name, Team.code, User.username, Dataset.description,
        Dataset.id == Task.active_dataset_id, Dataset.autojudge, func.count(SubmissionResult.submission_id))\
        .select_from(Participation)\
        .filter(not_(Participation.hidden))\
        .outerjoin(Team, Team.id == Participation.team_id)\
        .join(User, User.id == Participation.user_id)\
        .join(Contest, Contest.id == Participation.contest_id)\
        .join(Submission, Submission.participation_id == Participation.id)\
        .join(Task, Task.id == Submission.task_id)\
        .join(SubmissionResult, SubmissionResult.submission_id == Submission.id)\
        .join(Dataset, Dataset.id == SubmissionResult.dataset_id)\
        .group_by(Contest.id, Task.id, Team.id, User.id, Dataset.id)

    res_compiling_query = res_full_query.filter(
        not_(SubmissionResult.filter_compiled()))
    res_evaluating_query = res_full_query.filter(
        SubmissionResult.filter_compilation_succeeded(),
        not_(SubmissionResult.filter_evaluated()))
    res_evaluated_query = res_full_query.filter(
        SubmissionResult.filter_compilation_succeeded(),
        SubmissionResult.filter_evaluated())

    res_compiling_ok = res_compiling_query.filter(
        SubmissionResult.compilation_tries <
        EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)\
        .all()
    res_compiling_stop = res_compiling_query.filter(
        SubmissionResult.compilation_tries >=
        EvaluationService.EvaluationService.MAX_COMPILATION_TRIES)\
        .all()
    res_compilation_failed = res_full_query.filter(
        SubmissionResult.filter_compilation_failed())\
        .all()

    res_evaluating_ok = res_evaluating_query.filter(
        SubmissionResult.evaluation_tries <
        EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)\
        .all()
    res_evaluating_stop = res_evaluating_query.filter(
        SubmissionResult.evaluation_tries >=
        EvaluationService.EvaluationService.MAX_EVALUATION_TRIES)\
        .all()
    res_scoring = res_evaluated_query.filter(
        not_(SubmissionResult.filter_scored()))\
        .all()
    res_scored = res_evaluated_query.filter(
        SubmissionResult.filter_scored())\
        .all()

    judgements_list = [
        (res_compiling_ok, 'compiling'),
        (res_compiling_stop, 'stuck_in_compilation'),
        (res_compilation_failed, 'compilation_failed'),
        (res_evaluating_ok, 'evaluating'),
        (res_evaluating_stop, 'stuck_in_evaluation'),
        (res_scoring, 'scoring'),
        (res_scored, 'scored'),
    ]

    status_list = " | ".join(map(lambda l: l[1], judgements_list))

    descs['judgements_total'] = (
        'gauge',
        'status = {}\\ndataset_status = live | active | inactive'.format(
            status_list))
    metrics['judgements_total'] = {}
    for cs, status in judgements_list:
        for c in cs:
            cname, taskname, teamname, uname, ds_desc, ds_live, ds_autojudge, count = c
            ds_status = get_dataset_status(ds_live, ds_autojudge)
            key = (('contest', cname), ('task', taskname), ('team', teamname),
                   ('user', uname), ('dataset', ds_desc),
                   ('dataset_status', ds_status), ('status', status))
            metrics['judgements_total'][key] = count

    question_query = sql_session.query(Contest.name, Team.code, User.username, func.count(Question.id))\
        .select_from(Participation)\
        .filter(not_(Participation.hidden))\
        .outerjoin(Team, Team.id == Participation.team_id)\
        .join(User, User.id == Participation.user_id)\
        .join(Contest, Contest.id == Participation.contest_id)\
        .join(Question, Question.participation_id == Participation.id)\
        .group_by(Contest.id, Team.id, User.id)

    question_answered = question_query.filter(
        Question.reply_timestamp.isnot(None)).all()
    question_ignored = question_query.filter(Question.ignored.is_(True)).all()
    question_pending = question_query.filter(
        Question.reply_timestamp.is_(None), Question.ignored.is_(False)).all()

    question_list = [
        (question_answered, 'answered'),
        (question_ignored, 'ignored'),
        (question_pending, 'pending'),
    ]

    status_list = " | ".join(map(lambda l: l[1], question_list))

    descs['questions_total'] = ('gauge', 'status = {}'.format(status_list))
    metrics['questions_total'] = {}
    for qs, status in question_list:
        for q in qs:
            cname, tname, uname, count = q
            key = (('contest', cname), ('team', tname), ('user', uname),
                   ('status', status))
            metrics['questions_total'][key] = count

    evals = sql_session.query(
        Contest.name, Task.name, Team.code, User.username, Dataset.description,
        Dataset.id == Task.active_dataset_id, Dataset.autojudge, func.coalesce(func.sum(Evaluation.execution_wall_clock_time), 0.0))\
        .select_from(Participation)\
        .filter(not_(Participation.hidden))\
        .outerjoin(Team, Team.id == Participation.team_id)\
        .join(User, User.id == Participation.user_id)\
        .join(Contest, Contest.id == Participation.contest_id)\
        .join(Submission, Submission.participation_id == Participation.id)\
        .join(Task, Task.id == Submission.task_id)\
        .join(SubmissionResult, SubmissionResult.submission_id == Submission.id)\
        .join(Dataset, Dataset.id == SubmissionResult.dataset_id)\
        .join(Evaluation, Evaluation.submission_id == Submission.id)\
        .filter(Evaluation.dataset_id == Dataset.id)\
        .group_by(Contest.id, Team.id, User.id, Task.id, Dataset.id)\
        .all()

    descs['wall_clock_time_total'] = (
        'gauge', 'dataset_status = live | active | inactive')
    metrics['wall_clock_time_total'] = {}

    for e in evals:
        cname, taskname, teamname, uname, ddesc, ds_live, ds_autojudge, wtime = e
        ds_status = get_dataset_status(ds_live, ds_autojudge)
        key = (('contest', cname), ('task', taskname), ('team', teamname),
               ('user', uname), ('dataset', ddesc), ('dataset_status',
                                                     ds_status))
        metrics['wall_clock_time_total'][key] = wtime

    return (metrics, descs)