Esempio n. 1
0
 def test_get_single_result_rating_result(self):
     single_result_course = mommy.make(Course,
                                       state='published',
                                       is_single_result=True)
     questionnaire = Questionnaire.objects.get(
         name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution,
                               contributor=mommy.make(UserProfile),
                               course=single_result_course,
                               questionnaires=[questionnaire],
                               responsible=True,
                               can_edit=True,
                               comment_visibility=Contribution.ALL_COMMENTS)
     mommy.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=1,
                count=1)
     mommy.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=4,
                count=1)
     distribution = calculate_average_distribution(single_result_course)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_course)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 2
0
 def test_get_single_result_rating_result(self):
     single_result_evaluation = baker.make(Evaluation,
                                           state='published',
                                           is_single_result=True)
     questionnaire = Questionnaire.objects.get(
         name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = baker.make(
         Contribution,
         contributor=baker.make(UserProfile),
         evaluation=single_result_evaluation,
         questionnaires=[questionnaire],
         can_edit=True,
         textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)
     baker.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=1,
                count=1)
     baker.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=4,
                count=1)
     distribution = calculate_average_distribution(single_result_evaluation)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(
         single_result_evaluation)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 3
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        participant_counts = evaluations.annotate(
            num_participants=Count("participants")).values_list(
                "num_participants", flat=True)
        voter_counts = evaluations.annotate(
            num_voters=Count("voters")).values_list("num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(
            num_course_evaluations=Count("course__evaluations")).values_list(
                "num_course_evaluations", flat=True)
        evaluations = (
            evaluations.select_related("course__type").prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            ))
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(
                evaluations, participant_counts, voter_counts,
                course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(
                evaluation)
            evaluation.avg_grade = distribution_to_grade(
                evaluation.distribution)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(
                evaluation)
    return evaluations
Esempio n. 4
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        participant_counts = evaluations.annotate(num_participants=Count("participants")).values_list("num_participants", flat=True)
        voter_counts = evaluations.annotate(num_voters=Count("voters")).values_list("num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(num_course_evaluations=Count("course__evaluations")).values_list("num_course_evaluations", flat=True)
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(evaluations, participant_counts, voter_counts, course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
    return evaluations
Esempio n. 5
0
def get_evaluations_of_course(course, request):
    course_evaluations = []

    if course.evaluations.count() > 1:
        course_evaluations = [
            evaluation
            for evaluation in course.evaluations.filter(state="published")
            if evaluation.can_be_seen_by(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += course.evaluations.filter(
                state__in=['in_evaluation', 'evaluated', 'reviewed'])

        course_evaluations = get_evaluations_with_course_result_attributes(
            course_evaluations)

        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(
                    course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(
                    course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(
                    course_evaluation.distribution)

    return course_evaluations
Esempio n. 6
0
def index(request):
    # retrieve all courses which have evaluations that are not in state "new" and in which the user participates
    courses = Course.objects.filter(
        evaluations__participants=request.user,
        evaluations__state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    ).distinct()
    # retrieve all evaluations which the user can see that are not new
    evaluations = [evaluation for course in courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(request.user)]
    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
                evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
        evaluation.participates_in = request.user in evaluation.participants.all()
        evaluation.voted_for = request.user in evaluation.voters.all()
    evaluations = get_evaluations_with_course_result_attributes(evaluations)
    evaluations.sort(key=lambda evaluation: evaluation.full_name)  # evaluations must be sorted for regrouping them in the template

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
    )
    return render(request, "student_index.html", template_data)
Esempio n. 7
0
File: views.py Progetto: Onderi/EvaP
def get_courses_with_prefetched_data(courses):
    if isinstance(courses, QuerySet):
        participant_counts = courses.annotate(
            num_participants=Count("participants")).values_list(
                "num_participants", flat=True)
        voter_counts = courses.annotate(
            num_voters=Count("voters")).values_list("num_voters", flat=True)
        courses = (courses.select_related("type").prefetch_related(
            "degrees", "semester",
            Prefetch("contributions",
                     queryset=Contribution.objects.filter(
                         responsible=True).select_related("contributor"),
                     to_attr="responsible_contributions")))
        for course, participant_count, voter_count in zip(
                courses, participant_counts, voter_counts):
            if course._participant_count is None:
                course.num_participants = participant_count
                course.num_voters = voter_count
            course.responsible_contributors = [
                contribution.contributor
                for contribution in course.responsible_contributions
            ]
    for course in courses:
        if not course.is_single_result:
            course.distribution = calculate_average_distribution(course)
            course.avg_grade = distribution_to_grade(course.distribution)
        else:
            course.single_result_rating_result = get_single_result_rating_result(
                course)
    return courses
Esempio n. 8
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(
        Q(evaluations__state__in=contributor_visible_states) & (
            Q(responsibles=user) |
            Q(evaluations__contributions__contributor=user)
        )
    )
    own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
    for evaluation in own_evaluations:
        evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()

    displayed_evaluations = set(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_courses = Course.objects.filter(
            Q(evaluations__state__in=contributor_visible_states) & (
                Q(responsibles__in=represented_users) |
                Q(
                    evaluations__contributions__role=Contribution.Role.EDITOR,
                    evaluations__contributions__contributor__in=represented_users,
                )
            )
        )
        delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations |= delegated_evaluations - displayed_evaluations
    displayed_evaluations = list(displayed_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))  # evaluations must be sorted for regrouping them in the template

    for evaluation in displayed_evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active=semester.is_active,
        evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=DelegateSelectionForm(),
    )
    return render(request, "contributor_index.html", template_data)
Esempio n. 9
0
def index(request):
    # retrieve all courses which have evaluations that are not in state "new" and in which the user participates
    courses = Course.objects.filter(
        evaluations__participants=request.user,
        evaluations__state__in=[
            'prepared', 'editor_approved', 'approved', 'in_evaluation',
            'evaluated', 'reviewed', 'published'
        ]).distinct().prefetch_related('semester', 'grade_documents', 'type',
                                       'evaluations',
                                       'evaluations__participants',
                                       'evaluations__voters')
    # retrieve all evaluations which the user can see that are not new
    evaluations = [
        evaluation for course in courses
        for evaluation in course.evaluations.all()
        if evaluation.can_be_seen_by(request.user)
    ]
    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(
                    evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(
                    evaluation)
                evaluation.distribution = normalized_distribution(
                    evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(
                evaluation.distribution)
        evaluation.participates_in = request.user in evaluation.participants.all(
        )
        evaluation.voted_for = request.user in evaluation.voters.all()
    evaluations = get_evaluations_with_course_result_attributes(evaluations)
    evaluations.sort(
        key=lambda evaluation: (evaluation.course.name, evaluation.name)
    )  # evaluations must be sorted for regrouping them in the template

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active=semester.is_active,
             results_are_archived=semester.results_are_archived,
             grade_documents_are_deleted=semester.grade_documents_are_deleted,
             evaluations=[
                 evaluation for evaluation in evaluations
                 if evaluation.course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
    )

    return render(request, "student_index.html", template_data)
Esempio n. 10
0
 def test_get_single_result_rating_result(self):
     single_result_evaluation = mommy.make(Evaluation, state='published', is_single_result=True)
     questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=single_result_evaluation, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)
     mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=1, count=1)
     mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=4, count=1)
     distribution = calculate_average_distribution(single_result_evaluation)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_evaluation)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 11
0
 def test_get_single_result_rating_result(self):
     single_result_course = mommy.make(Course, state='published', is_single_result=True)
     questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution, contributor=mommy.make(UserProfile), course=single_result_course, questionnaires=[questionnaire], responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
     mommy.make(RatingAnswerCounter, question=questionnaire.question_set.first(), contribution=contribution, answer=1, count=1)
     mommy.make(RatingAnswerCounter, question=questionnaire.question_set.first(), contribution=contribution, answer=4, count=1)
     distribution = calculate_average_distribution(single_result_course)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_course)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 12
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(
        Q(evaluations__state__in=contributor_visible_states) & (
            Q(responsibles=user) |
            Q(evaluations__contributions__contributor=user)
        )
    )
    own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
    for evaluation in own_evaluations:
        evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()

    displayed_evaluations = set(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_courses = Course.objects.filter(
            Q(evaluations__state__in=contributor_visible_states) & (
                Q(responsibles__in=represented_users) |
                Q(evaluations__contributions__can_edit=True, evaluations__contributions__contributor__in=represented_users)
            )
        )
        delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations |= delegated_evaluations - displayed_evaluations
    displayed_evaluations = list(displayed_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: evaluation.full_name)  # evaluations must be sorted for regrouping them in the template

    for evaluation in displayed_evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
                evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
    displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=DelegateSelectionForm(),
    )
    return render(request, "contributor_index.html", template_data)
Esempio n. 13
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)

    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
            evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
Esempio n. 14
0
def get_courses_with_prefetched_data(courses):
    if isinstance(courses, QuerySet):
        courses = (courses
            .annotate(num_participants=Count("participants", distinct=True), num_voters=Count("voters", distinct=True))
            .select_related("type")
            .prefetch_related(
                "degrees",
                "semester",
                Prefetch("contributions", queryset=Contribution.objects.filter(responsible=True).select_related("contributor"), to_attr="responsible_contributions")
            )
        )
        for course in courses:
            course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions]
    for course in courses:
        if not course.is_single_result:
            course.distribution = calculate_average_distribution(course)
            course.avg_grade = distribution_to_grade(course.distribution)
        else:
            course.single_result_rating_result = get_single_result_rating_result(course)
    return courses
Esempio n. 15
0
    def test_get_single_result_rating_result(self):
        single_result_evaluation = baker.make(Evaluation,
                                              state=Evaluation.State.PUBLISHED,
                                              is_single_result=True)
        questionnaire = Questionnaire.single_result_questionnaire()
        contribution = baker.make(
            Contribution,
            contributor=baker.make(UserProfile),
            evaluation=single_result_evaluation,
            questionnaires=[questionnaire],
            role=Contribution.Role.EDITOR,
            textanswer_visibility=Contribution.TextAnswerVisibility.
            GENERAL_TEXTANSWERS,
        )
        make_rating_answer_counters(questionnaire.questions.first(),
                                    contribution, [1, 0, 0, 1, 0])

        cache_results(single_result_evaluation)
        distribution = calculate_average_distribution(single_result_evaluation)
        self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
        rating_result = get_single_result_rating_result(
            single_result_evaluation)
        self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 16
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        # these annotates and the zip below could be replaced by something like this, but it was 2x slower:
        # annotate(num_participants=Coalesce('_participant_count', Count("participants", distinct=True)))
        participant_counts = evaluations.annotate(
            num_participants=Count("participants")).order_by('pk').values_list(
                "num_participants", flat=True)
        voter_counts = evaluations.annotate(
            num_voters=Count("voters")).order_by('pk').values_list(
                "num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(
            num_course_evaluations=Count("course__evaluations")).order_by(
                'pk').values_list("num_course_evaluations", flat=True)
        evaluations = (
            evaluations.select_related("course__type").prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            ))
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(
                evaluations, participant_counts, voter_counts,
                course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(
                evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(
                evaluation)
            evaluation.distribution = normalized_distribution(
                evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
Esempio n. 17
0
def get_evaluations_of_course(course, request):
    course_evaluations = []

    if course.evaluations.count() > 1:
        course_evaluations = [
            evaluation
            for evaluation in course.evaluations.filter(state=Evaluation.State.PUBLISHED)
            if evaluation.can_be_seen_by(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += course.evaluations.filter(
                state__in=[Evaluation.State.IN_EVALUATION, Evaluation.State.EVALUATED, Evaluation.State.REVIEWED]
            )

        course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)

        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)

    return course_evaluations
Esempio n. 18
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations,
                                   id=evaluation_id,
                                   course__semester=semester)

    if not evaluation.can_user_see_results_page(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get(
            'view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get(
            'view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(
            id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [
                    answer for answer in question_result.answers
                    if user_can_see_textanswer(view_as_user, represented_users,
                                               answer, view)
                ]
        # remove empty TextResults
        questionnaire_result.question_results = [
            result for result in questionnaire_result.question_results
            if not isinstance(result, TextResult) or len(result.answers) > 0
        ]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(
                questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results
                                ) - 1 or isinstance(
                                    questionnaire_result.question_results[index
                                                                          + 1],
                                    HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [
            questionnaire_result for questionnaire_result in
            contribution_result.questionnaire_results
            if questionnaire_result.question_results
        ]
    evaluation_result.contribution_results = [
        contribution_result
        for contribution_result in evaluation_result.contribution_results
        if contribution_result.questionnaire_results
    ]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(
                        questionnaire_result)
                else:
                    general_questionnaire_results_top.append(
                        questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [
            evaluation for evaluation in evaluation.course.evaluations.filter(
                state="published")
            if evaluation.can_user_see_evaluation(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(
                state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(
            course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(
                    course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(
                    course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(
                    course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=
        general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.
        is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Esempio n. 19
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations, id=evaluation_id, course__semester=semester)

    if not evaluation.can_results_page_be_seen_by(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get('view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get('view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if can_textanswer_be_seen_by(view_as_user, represented_users, answer, view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    evaluation_result.contribution_results = [contribution_result for contribution_result in evaluation_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    general_questionnaire_results_top.append(questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [evaluation for evaluation in evaluation.course.evaluations.filter(state="published") if evaluation.can_be_seen_by(request.user)]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [contribution_result.contributor for contribution_result in evaluation_result.contribution_results if contribution_result.contributor not in [None, view_as_user]]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Esempio n. 20
0
def index(request):
    query = (Evaluation.objects
        .annotate(participates_in=Exists(Evaluation.objects.filter(id=OuterRef('id'), participants=request.user)))
        .annotate(voted_for=Exists(Evaluation.objects.filter(id=OuterRef('id'), voters=request.user)))

        .filter(~Q(state="new"), course__evaluations__participants=request.user)
        .exclude(state="new")
        .prefetch_related(
            'course', 'course__semester', 'course__grade_documents', 'course__type',
            'course__evaluations', 'course__responsibles', 'course__degrees',
        )
        .distinct()
    )
    query = Evaluation.annotate_with_participant_and_voter_counts(query)
    evaluations = [evaluation for evaluation in query if evaluation.can_be_seen_by(request.user)]

    inner_evaluation_ids = [inner_evaluation.id for evaluation in evaluations for inner_evaluation in evaluation.course.evaluations.all()]
    inner_evaluation_query = Evaluation.objects.filter(pk__in=inner_evaluation_ids)
    inner_evaluation_query = Evaluation.annotate_with_participant_and_voter_counts(inner_evaluation_query)

    evaluations_by_id = {evaluation['id']: evaluation for evaluation in inner_evaluation_query.values()}

    for evaluation in evaluations:
        for inner_evaluation in evaluation.course.evaluations.all():
            inner_evaluation.num_voters = evaluations_by_id[inner_evaluation.id]['num_voters']
            inner_evaluation.num_participants = evaluations_by_id[inner_evaluation.id]['num_participants']

    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    evaluations = get_evaluations_with_course_result_attributes(evaluations)

    # evaluations must be sorted for regrouping them in the template
    evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    unfinished_evaluations_query = (
        Evaluation.objects
        .filter(participants=request.user, state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation'])
        .exclude(voters=request.user)
        .prefetch_related('course__responsibles', 'course__type', 'course__semester')
    )

    unfinished_evaluations_query = Evaluation.annotate_with_participant_and_voter_counts(unfinished_evaluations_query)
    unfinished_evaluations = list(unfinished_evaluations_query)

    # available evaluations come first, ordered by time left for evaluation and the name
    # evaluations in other (visible) states follow by name
    def sorter(evaluation):
        return (
            evaluation.state != 'in_evaluation',
            evaluation.vote_end_date if evaluation.state == 'in_evaluation' else None,
            evaluation.full_name
        )
    unfinished_evaluations.sort(key=sorter)

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
        unfinished_evaluations=unfinished_evaluations,
        evaluation_end_warning_period=settings.EVALUATION_END_WARNING_PERIOD,
    )

    return render(request, "student_index.html", template_data)