Esempio n. 1
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        evaluations = evaluations.select_related("course__type").prefetch_related(
            "course__degrees",
            "course__semester",
            "course__responsibles",
        )
        evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)

    annotate_distributions_and_grades(evaluations)

    return evaluations
Esempio n. 2
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)

    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
            evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
Esempio n. 3
0
def index(request):
    query = (Evaluation.objects
        .annotate(participates_in=Exists(Evaluation.objects.filter(id=OuterRef('id'), participants=request.user)))
        .annotate(voted_for=Exists(Evaluation.objects.filter(id=OuterRef('id'), voters=request.user)))

        .filter(~Q(state="new"), course__evaluations__participants=request.user)
        .exclude(state="new")
        .prefetch_related(
            'course', 'course__semester', 'course__grade_documents', 'course__type',
            'course__evaluations', 'course__responsibles', 'course__degrees',
        )
        .distinct()
    )
    query = Evaluation.annotate_with_participant_and_voter_counts(query)
    evaluations = [evaluation for evaluation in query if evaluation.can_be_seen_by(request.user)]

    inner_evaluation_ids = [inner_evaluation.id for evaluation in evaluations for inner_evaluation in evaluation.course.evaluations.all()]
    inner_evaluation_query = Evaluation.objects.filter(pk__in=inner_evaluation_ids)
    inner_evaluation_query = Evaluation.annotate_with_participant_and_voter_counts(inner_evaluation_query)

    evaluations_by_id = {evaluation['id']: evaluation for evaluation in inner_evaluation_query.values()}

    for evaluation in evaluations:
        for inner_evaluation in evaluation.course.evaluations.all():
            inner_evaluation.num_voters = evaluations_by_id[inner_evaluation.id]['num_voters']
            inner_evaluation.num_participants = evaluations_by_id[inner_evaluation.id]['num_participants']

    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    evaluations = get_evaluations_with_course_result_attributes(evaluations)

    # evaluations must be sorted for regrouping them in the template
    evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    unfinished_evaluations_query = (
        Evaluation.objects
        .filter(participants=request.user, state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation'])
        .exclude(voters=request.user)
        .prefetch_related('course__responsibles', 'course__type', 'course__semester')
    )

    unfinished_evaluations_query = Evaluation.annotate_with_participant_and_voter_counts(unfinished_evaluations_query)
    unfinished_evaluations = list(unfinished_evaluations_query)

    # available evaluations come first, ordered by time left for evaluation and the name
    # evaluations in other (visible) states follow by name
    def sorter(evaluation):
        return (
            evaluation.state != 'in_evaluation',
            evaluation.vote_end_date if evaluation.state == 'in_evaluation' else None,
            evaluation.full_name
        )
    unfinished_evaluations.sort(key=sorter)

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
        unfinished_evaluations=unfinished_evaluations,
        evaluation_end_warning_period=settings.EVALUATION_END_WARNING_PERIOD,
    )

    return render(request, "student_index.html", template_data)
Esempio n. 4
0
def index(request):
    query = (Evaluation.objects.annotate(participates_in=Exists(
        Evaluation.objects.filter(id=OuterRef("id"), participants=request.user)
    )).annotate(voted_for=Exists(
        Evaluation.objects.filter(id=OuterRef(
            "id"), voters=request.user))).filter(
                ~Q(state=Evaluation.State.NEW),
                course__evaluations__participants=request.user).exclude(
                    state=Evaluation.State.NEW).prefetch_related(
                        "course",
                        "course__semester",
                        "course__grade_documents",
                        "course__type",
                        "course__evaluations",
                        "course__responsibles",
                        "course__degrees",
                    ).distinct())
    query = Evaluation.annotate_with_participant_and_voter_counts(query)
    evaluations = [
        evaluation for evaluation in query
        if evaluation.can_be_seen_by(request.user)
    ]

    inner_evaluation_ids = [
        inner_evaluation.id for evaluation in evaluations
        for inner_evaluation in evaluation.course.evaluations.all()
    ]
    inner_evaluation_query = Evaluation.objects.filter(
        pk__in=inner_evaluation_ids)
    inner_evaluation_query = Evaluation.annotate_with_participant_and_voter_counts(
        inner_evaluation_query)

    evaluations_by_id = {
        evaluation["id"]: evaluation
        for evaluation in inner_evaluation_query.values()
    }

    for evaluation in evaluations:
        for inner_evaluation in evaluation.course.evaluations.all():
            inner_evaluation.num_voters = evaluations_by_id[
                inner_evaluation.id]["num_voters"]
            inner_evaluation.num_participants = evaluations_by_id[
                inner_evaluation.id]["num_participants"]

    annotate_distributions_and_grades(e for e in evaluations
                                      if e.state == Evaluation.State.PUBLISHED)
    evaluations = get_evaluations_with_course_result_attributes(evaluations)

    # evaluations must be sorted for regrouping them in the template
    evaluations.sort(
        key=lambda evaluation: (evaluation.course.name, evaluation.name))

    semesters = Semester.objects.all()
    semester_list = [
        dict(
            semester_name=semester.name,
            id=semester.id,
            results_are_archived=semester.results_are_archived,
            grade_documents_are_deleted=semester.grade_documents_are_deleted,
            evaluations=[
                evaluation for evaluation in evaluations
                if evaluation.course.semester_id == semester.id
            ],
        ) for semester in semesters
    ]

    unfinished_evaluations_query = (Evaluation.objects.filter(
        participants=request.user,
        state__in=[
            Evaluation.State.PREPARED,
            Evaluation.State.EDITOR_APPROVED,
            Evaluation.State.APPROVED,
            Evaluation.State.IN_EVALUATION,
        ],
    ).exclude(voters=request.user).prefetch_related("course__responsibles",
                                                    "course__type",
                                                    "course__semester"))

    unfinished_evaluations_query = Evaluation.annotate_with_participant_and_voter_counts(
        unfinished_evaluations_query)
    unfinished_evaluations = list(unfinished_evaluations_query)

    # available evaluations come first, ordered by time left for evaluation and the name
    # evaluations in other (visible) states follow by name
    def sorter(evaluation):
        return (
            evaluation.state != Evaluation.State.IN_EVALUATION,
            evaluation.vote_end_date
            if evaluation.state == Evaluation.State.IN_EVALUATION else None,
            evaluation.full_name,
        )

    unfinished_evaluations.sort(key=sorter)

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
        unfinished_evaluations=unfinished_evaluations,
        evaluation_end_warning_period=settings.EVALUATION_END_WARNING_PERIOD,
    )

    return render(request, "student_index.html", template_data)