Exemple #1
0
    def test_collect_results_after_user_merge(self):
        """ Asserts that merge_users leaves the results cache in a consistent state. Regression test for #907 """
        contributor = mommy.make(UserProfile)
        main_user = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        course = mommy.make(Course, state='published', participants=[student])
        questionnaire = mommy.make(Questionnaire)
        mommy.make(Question, questionnaire=questionnaire, type="G")
        mommy.make(Contribution,
                   contributor=contributor,
                   course=course,
                   questionnaires=[questionnaire])

        collect_results(course)

        merge_users(main_user, contributor)

        course_results = collect_results(course)

        for contribution_result in course_results.contribution_results:
            self.assertTrue(
                Contribution.objects.filter(
                    course=course,
                    contributor=contribution_result.contributor).exists())
Exemple #2
0
    def test_collect_results_after_user_merge(self):
        """ Asserts that merge_users leaves the results cache in a consistent state. Regression test for #907 """
        contributor = baker.make(UserProfile)
        main_user = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(Evaluation,
                                state='published',
                                participants=[student])
        questionnaire = baker.make(Questionnaire)
        baker.make(Question, questionnaire=questionnaire, type=Question.GRADE)
        baker.make(Contribution,
                   contributor=contributor,
                   evaluation=evaluation,
                   questionnaires=[questionnaire])

        collect_results(evaluation)

        merge_users(main_user, contributor)

        evaluation_results = collect_results(evaluation)

        for contribution_result in evaluation_results.contribution_results:
            self.assertTrue(
                Contribution.objects.filter(
                    evaluation=evaluation,
                    contributor=contribution_result.contributor).exists())
Exemple #3
0
    def test_cache_unpublished_evaluation(self):
        evaluation = mommy.make(Evaluation, state='published', _voter_count=0, _participant_count=0)
        collect_results(evaluation)
        evaluation.unpublish()
        evaluation.save()

        self.assertIsNone(caches['results'].get(get_collect_results_cache_key(evaluation)))
Exemple #4
0
    def test_caches_published_course(self):
        course = mommy.make(Course, state='published')

        self.assertIsNone(caches['results'].get(get_collect_results_cache_key(course)))

        collect_results(course)

        self.assertIsNotNone(caches['results'].get(get_collect_results_cache_key(course)))
Exemple #5
0
    def test_caches_published_evaluation(self):
        evaluation = mommy.make(Evaluation, state='published')

        self.assertIsNone(caches['results'].get(get_collect_results_cache_key(evaluation)))

        collect_results(evaluation)

        self.assertIsNotNone(caches['results'].get(get_collect_results_cache_key(evaluation)))
Exemple #6
0
    def test_cache_unpublished_evaluation(self):
        evaluation = mommy.make(Evaluation,
                                state='published',
                                _voter_count=0,
                                _participant_count=0)
        collect_results(evaluation)
        evaluation.unpublish()

        self.assertIsNone(caches['results'].get(
            get_collect_results_cache_key(evaluation)))
Exemple #7
0
    def test_caches_published_evaluation(self):
        evaluation = baker.make(Evaluation, state='published')

        self.assertIsNone(caches['results'].get(
            get_collect_results_cache_key(evaluation)))

        collect_results(evaluation)

        self.assertIsNotNone(caches['results'].get(
            get_collect_results_cache_key(evaluation)))
Exemple #8
0
    def test_cache_unpublished_course(self):
        course = mommy.make(Course,
                            state='published',
                            _voter_count=0,
                            _participant_count=0)
        collect_results(course)
        course.unpublish()

        self.assertIsNone(caches['results'].get(
            get_collect_results_cache_key(course)))
Exemple #9
0
    def test_text_answer_export(self):
        evaluation = baker.make(Evaluation, can_publish_text_results=True)
        questions = [baker.make(Question, questionnaire__type=t, type=Question.TEXT) for t in Questionnaire.Type.values]

        for idx in [0, 1, 2, 2, 0]:
            baker.make(
                TextAnswer,
                question=questions[idx],
                contribution__evaluation=evaluation,
                contribution__questionnaires=[questions[idx].questionnaire],
                state=TextAnswer.State.PUBLISHED
            )

        evaluation_result = collect_results(evaluation)
        filter_text_answers(evaluation_result)

        results = TextAnswerExcelExporter.InputData(evaluation_result.contribution_results)

        binary_content = BytesIO()
        TextAnswerExcelExporter(evaluation.name, evaluation.course.semester.name,
                                evaluation.course.responsibles_names,
                                results, None).export(binary_content)
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())
        sheet = workbook.sheets()[0]

        # Sheet headline
        self.assertEqual(sheet.row_values(0)[0], evaluation.name)
        self.assertEqual(sheet.row_values(1)[0], evaluation.course.semester.name)
        self.assertEqual(sheet.row_values(2)[0], evaluation.course.responsibles_names)

        # Questions are ordered by questionnaire type, answers keep their order respectively
        self.assertEqual(sheet.row_values(3)[0], questions[0].text)
        self.assertEqual(sheet.row_values(5)[0], questions[1].text)
        self.assertEqual(sheet.row_values(6)[0], questions[2].text)
Exemple #10
0
    def test_calculation_unipolar_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        evaluation = mommy.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE)
        contribution1 = mommy.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30)

        evaluation_results = collect_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
Exemple #11
0
    def test_calculation_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        course = mommy.make(Course, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type="G")
        contribution1 = mommy.make(Contribution, contributor=contributor1, course=course, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30)

        course_results = collect_results(course)

        self.assertEqual(len(course_results.questionnaire_results), 1)
        questionnaire_result = course_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
    def handle(self, *args, **options):
        self.stdout.write("Clearing results cache...")
        caches['results'].clear()
        total_count = Course.objects.count()

        self.stdout.write("Calculating results for all courses...")

        self.stdout.ending = None
        progress_bar = ProgressBar(self.stdout, total_count)

        for counter, course in enumerate(Course.objects.all()):
            progress_bar.update(counter + 1)
            collect_results(course)

        self.stdout.write("Prerendering result index page...\n")

        warm_up_template_cache(Course.objects.filter(state='published'))

        self.stdout.write("Results cache has been refreshed.\n")
Exemple #13
0
    def test_collect_results_after_user_merge(self):
        """ Asserts that merge_users leaves the results cache in a consistent state. Regression test for #907 """
        contributor = mommy.make(UserProfile)
        main_user = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        evaluation = mommy.make(Evaluation, state='published', participants=[student])
        questionnaire = mommy.make(Questionnaire)
        mommy.make(Question, questionnaire=questionnaire, type=Question.GRADE)
        mommy.make(Contribution, contributor=contributor, evaluation=evaluation, questionnaires=[questionnaire])

        collect_results(evaluation)

        merge_users(main_user, contributor)

        evaluation_results = collect_results(evaluation)

        for contribution_result in evaluation_results.contribution_results:
            self.assertTrue(Contribution.objects.filter(evaluation=evaluation, contributor=contribution_result.contributor).exists())
Exemple #14
0
    def filter_evaluations(semesters, evaluation_states, degrees, course_types,
                           contributor, include_not_enough_voters):
        course_results_exist = False
        evaluations_with_results = list()
        used_questionnaires = set()
        evaluations_filter = Q(course__semester__in=semesters,
                               state__in=evaluation_states,
                               course__degrees__in=degrees,
                               course__type__in=course_types)
        if contributor:
            evaluations_filter = evaluations_filter & (
                Q(course__responsibles__in=[contributor])
                | Q(contributions__contributor__in=[contributor]))
        evaluations = Evaluation.objects.filter(evaluations_filter).distinct()
        for evaluation in evaluations:
            if evaluation.is_single_result:
                continue
            if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                continue
            results = OrderedDict()
            for contribution_result in collect_results(
                    evaluation).contribution_results:
                for questionnaire_result in contribution_result.questionnaire_results:
                    # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it
                    if all(not question_result.question.is_rating_question
                           or question_result.counts is None
                           or sum(question_result.counts) == 0
                           for question_result in
                           questionnaire_result.question_results):
                        continue
                    if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor:
                        results.setdefault(
                            questionnaire_result.questionnaire.id,
                            []).extend(questionnaire_result.question_results)
                        used_questionnaires.add(
                            questionnaire_result.questionnaire)
            evaluation.course_evaluations_count = evaluation.course.evaluations.count(
            )
            if evaluation.course_evaluations_count > 1:
                course_results_exist = True
                evaluation.weight_percentage = int((evaluation.weight / sum(
                    evaluation.weight
                    for evaluation in evaluation.course.evaluations.all())) *
                                                   100)
                evaluation.course.avg_grade = distribution_to_grade(
                    calculate_average_course_distribution(evaluation.course))
            evaluations_with_results.append((evaluation, results))

        evaluations_with_results.sort(key=lambda cr: (cr[
            0].course.semester.id, cr[0].course.type.order, cr[0].full_name))
        used_questionnaires = sorted(used_questionnaires)

        return evaluations_with_results, used_questionnaires, course_results_exist
Exemple #15
0
    def test_calculation_unipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(Evaluation,
                                state='published',
                                participants=[student, contributor1],
                                voters=[student, contributor1])
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.GRADE)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        baker.make(RatingAnswerCounter,
                   question=question,
                   contribution=contribution1,
                   answer=1,
                   count=5)
        baker.make(RatingAnswerCounter,
                   question=question,
                   contribution=contribution1,
                   answer=2,
                   count=15)
        baker.make(RatingAnswerCounter,
                   question=question,
                   contribution=contribution1,
                   answer=3,
                   count=40)
        baker.make(RatingAnswerCounter,
                   question=question,
                   contribution=contribution1,
                   answer=4,
                   count=60)
        baker.make(RatingAnswerCounter,
                   question=question,
                   contribution=contribution1,
                   answer=5,
                   count=30)

        evaluation_results = collect_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
Exemple #16
0
    def test_results_cache_refreshed(self):
        contributor = baker.make(UserProfile, first_name="Peter")
        evaluation = baker.make(Evaluation, state="published")
        baker.make(Contribution,
                   contributor=contributor,
                   evaluation=evaluation)

        results_before = collect_results(evaluation)

        form_data = get_form_data_from_instance(UserForm, contributor)
        form_data["first_name"] = "Patrick"
        form = UserForm(form_data, instance=contributor)
        form.save()

        results_after = collect_results(evaluation)

        self.assertEqual(
            results_before.contribution_results[0].contributor.first_name,
            "Peter")

        self.assertEqual(
            results_after.contribution_results[0].contributor.first_name,
            "Patrick")
Exemple #17
0
    def save(self, *args, **kw):
        super().save(*args, **kw)
        new_evaluation_list = list(
            self.instance.evaluations_participating_in.exclude(
                course__semester=Semester.active_semester())) + list(
                    self.cleaned_data.get('evaluations_participating_in'))
        self.instance.evaluations_participating_in.set(new_evaluation_list)

        manager_group = Group.objects.get(name="Manager")
        grade_publisher_group = Group.objects.get(name="Grade publisher")
        reviewer_group = Group.objects.get(name="Reviewer")
        if self.cleaned_data.get('is_manager'):
            self.instance.groups.add(manager_group)
        else:
            self.instance.groups.remove(manager_group)

        if self.cleaned_data.get('is_grade_publisher'):
            self.instance.groups.add(grade_publisher_group)
        else:
            self.instance.groups.remove(grade_publisher_group)

        if self.cleaned_data.get(
                'is_reviewer') and not self.cleaned_data.get('is_manager'):
            self.instance.groups.add(reviewer_group)
        else:
            self.instance.groups.remove(reviewer_group)

        self.instance.is_active = not self.cleaned_data.get('is_inactive')

        # refresh results cache
        for evaluation in Evaluation.objects.filter(
                contributions__contributor=self.instance).distinct():
            if any(attribute in self.changed_data
                   for attribute in ["first_name", "last_name", "title"]):
                collect_results(evaluation, force_recalculation=True)

        self.instance.save()
Exemple #18
0
def extract_evaluation_answer_data(request, evaluation):
    # TextAnswerExcelExporter wants a dict from Question to tuple of contributor_name and string list (of the answers)

    view, view_as_user, represented_users, contributor_id = evaluation_detail_parse_get_parameters(
        request, evaluation)

    evaluation_result = collect_results(evaluation)
    filter_text_answers(evaluation_result)
    remove_textanswers_that_the_user_must_not_see(evaluation_result,
                                                  view_as_user,
                                                  represented_users, view)

    results = TextAnswerExcelExporter.InputData(
        evaluation_result.contribution_results)

    return results, contributor_id
Exemple #19
0
    def test_calculation_bipolar_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        course = mommy.make(Course, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT)
        contribution1 = mommy.make(Contribution, contributor=contributor1, course=course, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10)

        course_results = collect_results(course)

        self.assertEqual(len(course_results.questionnaire_results), 1)
        questionnaire_result = course_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
Exemple #20
0
    def test_calculation_bipolar_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        evaluation = mommy.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT)
        contribution1 = mommy.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10)

        evaluation_results = collect_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
Exemple #21
0
def course_detail(request, semester_id, course_id):
    semester = get_object_or_404(Semester, id=semester_id)
    course = get_object_or_404(semester.course_set, id=course_id, semester=semester)

    if not course.can_user_see_results_page(request.user):
        raise PermissionDenied

    course_result = collect_results(course)

    if request.user.is_reviewer:
        public_view = request.GET.get('public_view') != 'false'  # if parameter is not given, show public view.
    else:
        public_view = request.GET.get('public_view') == 'true'  # if parameter is not given, show own view.

    # redirect to non-public view if there is none because the results have not been published
    if not course.can_publish_rating_results:
        public_view = False

    represented_users = list(request.user.represented_users.all()) + [request.user]

    # remove text answers if the user may not see them
    for questionnaire_result in course_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in course_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in course_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(course, course_result)

    # split course_result into different lists
    course_questionnaire_results_top = []
    course_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in course_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    course_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    course_questionnaire_results_top.append(questionnaire_result)
        else:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        course_questionnaire_results_top += course_questionnaire_results_bottom
        course_questionnaire_results_bottom = []

    course.distribution = calculate_average_distribution(course)
    course.avg_grade = distribution_to_grade(course.distribution)

    template_data = dict(
            course=course,
            course_questionnaire_results_top=course_questionnaire_results_top,
            course_questionnaire_results_bottom=course_questionnaire_results_bottom,
            contributor_contribution_results=contributor_contribution_results,
            reviewer=request.user.is_reviewer,
            contributor=course.is_user_contributor_or_delegate(request.user),
            can_download_grades=request.user.can_download_grades,
            public_view=public_view)
    return render(request, "results_course_detail.html", template_data)
Exemple #22
0
def merge_users(main_user, other_user, preview=False):
    """Merges other_user into main_user"""

    merged_user = dict()
    merged_user['username'] = main_user.username
    merged_user['is_active'] = main_user.is_active or other_user.is_active
    merged_user['title'] = main_user.title if main_user.title else other_user.title or ""
    merged_user['first_name'] = main_user.first_name if main_user.first_name else other_user.first_name or ""
    merged_user['last_name'] = main_user.last_name if main_user.last_name else other_user.last_name or ""
    merged_user['email'] = main_user.email if main_user.email else other_user.email or None

    merged_user['groups'] = Group.objects.filter(user__in=[main_user, other_user]).distinct()
    merged_user['is_superuser'] = main_user.is_superuser or other_user.is_superuser
    merged_user['delegates'] = UserProfile.objects.filter(represented_users__in=[main_user, other_user]).distinct()
    merged_user['represented_users'] = UserProfile.objects.filter(delegates__in=[main_user, other_user]).distinct()
    merged_user['cc_users'] = UserProfile.objects.filter(ccing_users__in=[main_user, other_user]).distinct()
    merged_user['ccing_users'] = UserProfile.objects.filter(cc_users__in=[main_user, other_user]).distinct()

    errors = []
    warnings = []
    if any(contribution.course in [contribution.course for contribution in main_user.get_sorted_contributions()] for contribution in other_user.get_sorted_contributions()):
        errors.append('contributions')
    if any(course in main_user.get_sorted_courses_participating_in() for course in other_user.get_sorted_courses_participating_in()):
        errors.append('courses_participating_in')
    if any(course in main_user.get_sorted_courses_voted_for() for course in other_user.get_sorted_courses_voted_for()):
        errors.append('courses_voted_for')

    if main_user.reward_point_grantings.all().exists() and other_user.reward_point_grantings.all().exists():
        warnings.append('rewards')

    merged_user['contributions'] = Contribution.objects.filter(contributor__in=[main_user, other_user]).order_by('course__semester__created_at', 'course__name_de')
    merged_user['courses_participating_in'] = Course.objects.filter(participants__in=[main_user, other_user]).order_by('semester__created_at', 'name_de')
    merged_user['courses_voted_for'] = Course.objects.filter(voters__in=[main_user, other_user]).order_by('semester__created_at', 'name_de')

    merged_user['reward_point_grantings'] = main_user.reward_point_grantings.all() if main_user.reward_point_grantings.all().exists() else other_user.reward_point_grantings.all()
    merged_user['reward_point_redemptions'] = main_user.reward_point_redemptions.all() if main_user.reward_point_redemptions.all().exists() else other_user.reward_point_redemptions.all()

    if preview or errors:
        return merged_user, errors, warnings

    # update last_modified_user for courses and grade documents
    Course.objects.filter(last_modified_user=other_user).update(last_modified_user=main_user)
    GradeDocument.objects.filter(last_modified_user=other_user).update(last_modified_user=main_user)

    # email must not exist twice. other_user can't be deleted before contributions have been changed
    other_user.email = ""
    other_user.save()

    # update values for main user
    for key, value in merged_user.items():
        attr = getattr(main_user, key)
        if hasattr(attr, "set"):
            attr.set(value)  # use the 'set' method for e.g. many-to-many relations
        else:
            setattr(main_user, key, value)  # use direct assignment for everything else
    main_user.save()

    # delete rewards
    other_user.reward_point_grantings.all().delete()
    other_user.reward_point_redemptions.all().delete()

    # refresh results cache
    for course in Course.objects.filter(contributions__contributor=main_user).distinct():
        collect_results(course, force_recalculation=True)

    # delete other_user
    other_user.delete()

    return merged_user, errors, warnings
Exemple #23
0
    def export(self,
               response,
               semesters,
               selection_list,
               include_not_enough_voters=False,
               include_unpublished=False,
               contributor=None):
        # the excel file we're creating here is rather complex. However, from the nature of a single
        # file, it doesn't make much sense to split up the code into different methods as they will
        # always be tightly coupled based on the layout of the sheet. We thus think that one big method
        # containing the business logic is okay here
        # pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches, too-many-statements
        workbook = xlwt.Workbook()
        self.init_styles(workbook)
        counter = 1
        course_results_exist = False

        for degrees, course_types in selection_list:
            self.sheet = workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            evaluations_filter = Q(course__semester__in=semesters,
                                   state__in=evaluation_states,
                                   course__degrees__in=degrees,
                                   course__type__in=course_types)
            if contributor:
                evaluations_filter = evaluations_filter & (
                    Q(course__responsibles__in=[contributor])
                    | Q(contributions__contributor__in=[contributor]))
            evaluations = Evaluation.objects.filter(
                evaluations_filter).distinct()
            for evaluation in evaluations:
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for contribution_result in collect_results(
                        evaluation).contribution_results:
                    for questionnaire_result in contribution_result.questionnaire_results:
                        if all(not question_result.question.is_rating_question
                               or question_result.counts is None
                               for question_result in
                               questionnaire_result.question_results):
                            continue
                        if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor:
                            results.setdefault(
                                questionnaire_result.questionnaire.id,
                                []).extend(
                                    questionnaire_result.question_results)
                            used_questionnaires.add(
                                questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count(
                )
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int(
                        (evaluation.weight / sum(evaluation.weight
                                                 for evaluation in evaluation.
                                                 course.evaluations.all())) *
                        100)
                    evaluation.course.avg_grade = distribution_to_grade(
                        calculate_average_course_distribution(
                            evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(
                key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.
                                order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            export_name = "Evaluation"
            if contributor:
                export_name += "\n{}".format(contributor.full_name)
            elif len(semesters) == 1:
                export_name += "\n{}".format(semesters[0].name)
            degree_names = [
                degree.name for degree in Degree.objects.filter(pk__in=degrees)
            ]
            course_type_names = [
                course_type.name for course_type in CourseType.objects.filter(
                    pk__in=course_types)
            ]
            writec(
                self,
                _("{}\n\n{}\n\n{}").format(export_name,
                                           ", ".join(degree_names),
                                           ", ".join(course_type_names)),
                "headline")

            for evaluation, results in evaluations_with_results:
                title = evaluation.full_name
                if len(semesters) > 1:
                    title += "\n{}".format(evaluation.course.semester.name)
                responsible_names = [
                    responsible.full_name
                    for responsible in evaluation.course.responsibles.all()
                ]
                title += "\n{}".format(", ".join(responsible_names))
                writec(self, title, "evaluation")

            writen(self, _("Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "\n".join(
                        [d.name for d in evaluation.course.degrees.all()]),
                    "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                if contributor and questionnaire.type == Questionnaire.CONTRIBUTOR:
                    writen(
                        self, "{} ({})".format(questionnaire.name,
                                               contributor.full_name), "bold")
                else:
                    writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(
                    questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average *
                                                  grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval),
                                       self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(
                    calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "{}/{}".format(evaluation.num_voters,
                                         evaluation.num_participants),
                    "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int(
                    (evaluation.num_voters / evaluation.num_participants) *
                    100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants),
                       "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self,
                               "{}%".format(evaluation.weight_percentage),
                               "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(
                                self, evaluation.course.avg_grade,
                                self.grade_to_style(
                                    evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        workbook.save(response)
Exemple #24
0
def merge_users(main_user, other_user, preview=False):
    """Merges other_user into main_user"""
    # This is much stuff to do. However, splitting it up into subtasks doesn't make much sense.
    # pylint: disable=too-many-statements

    merged_user = dict()
    merged_user['username'] = main_user.username
    merged_user['is_active'] = main_user.is_active or other_user.is_active
    merged_user['title'] = main_user.title or other_user.title or ""
    merged_user[
        'first_name'] = main_user.first_name or other_user.first_name or ""
    merged_user[
        'last_name'] = main_user.last_name or other_user.last_name or ""
    merged_user['email'] = main_user.email or other_user.email or None

    merged_user['groups'] = Group.objects.filter(
        user__in=[main_user, other_user]).distinct()
    merged_user[
        'is_superuser'] = main_user.is_superuser or other_user.is_superuser
    merged_user[
        'is_proxy_user'] = main_user.is_proxy_user or other_user.is_proxy_user
    merged_user['delegates'] = UserProfile.objects.filter(
        represented_users__in=[main_user, other_user]).distinct()
    merged_user['represented_users'] = UserProfile.objects.filter(
        delegates__in=[main_user, other_user]).distinct()
    merged_user['cc_users'] = UserProfile.objects.filter(
        ccing_users__in=[main_user, other_user]).distinct()
    merged_user['ccing_users'] = UserProfile.objects.filter(
        cc_users__in=[main_user, other_user]).distinct()

    errors = []
    warnings = []
    courses_main_user_is_responsible_for = main_user.get_sorted_courses_responsible_for(
    )
    if any(course in courses_main_user_is_responsible_for
           for course in other_user.get_sorted_courses_responsible_for()):
        errors.append('courses_responsible_for')
    if any(contribution.evaluation in [
            contribution.evaluation
            for contribution in main_user.get_sorted_contributions()
    ] for contribution in other_user.get_sorted_contributions()):
        errors.append('contributions')
    if any(evaluation in main_user.get_sorted_evaluations_participating_in()
           for evaluation in
           other_user.get_sorted_evaluations_participating_in()):
        errors.append('evaluations_participating_in')
    if any(evaluation in main_user.get_sorted_evaluations_voted_for()
           for evaluation in other_user.get_sorted_evaluations_voted_for()):
        errors.append('evaluations_voted_for')

    if main_user.reward_point_grantings.all().exists(
    ) and other_user.reward_point_grantings.all().exists():
        warnings.append('rewards')

    merged_user['courses_responsible_for'] = Course.objects.filter(
        responsibles__in=[main_user, other_user]).order_by(
            'semester__created_at', 'name_de')
    merged_user['contributions'] = Contribution.objects.filter(
        contributor__in=[main_user, other_user]).order_by(
            'evaluation__course__semester__created_at', 'evaluation__name_de')
    merged_user['evaluations_participating_in'] = Evaluation.objects.filter(
        participants__in=[main_user, other_user]).order_by(
            'course__semester__created_at', 'name_de')
    merged_user['evaluations_voted_for'] = Evaluation.objects.filter(
        voters__in=[main_user, other_user]).order_by(
            'course__semester__created_at', 'name_de')

    merged_user[
        'reward_point_grantings'] = main_user.reward_point_grantings.all(
        ) or other_user.reward_point_grantings.all()
    merged_user[
        'reward_point_redemptions'] = main_user.reward_point_redemptions.all(
        ) or other_user.reward_point_redemptions.all()

    if preview or errors:
        return merged_user, errors, warnings

    # update responsibility
    for course in Course.objects.filter(responsibles__in=[other_user]):
        responsibles = list(course.responsibles.all())
        responsibles.remove(other_user)
        responsibles.append(main_user)
        course.responsibles.set(responsibles)

    # update last_modified_user for evaluations and grade documents
    Course.objects.filter(last_modified_user=other_user).update(
        last_modified_user=main_user)
    Evaluation.objects.filter(last_modified_user=other_user).update(
        last_modified_user=main_user)
    GradeDocument.objects.filter(last_modified_user=other_user).update(
        last_modified_user=main_user)

    # email must not exist twice. other_user can't be deleted before contributions have been changed
    other_user.email = ""
    other_user.save()

    # update values for main user
    for key, value in merged_user.items():
        attr = getattr(main_user, key)
        if hasattr(attr, "set"):
            attr.set(
                value)  # use the 'set' method for e.g. many-to-many relations
        else:
            setattr(main_user, key,
                    value)  # use direct assignment for everything else
    main_user.save()

    # delete rewards
    other_user.reward_point_grantings.all().delete()
    other_user.reward_point_redemptions.all().delete()

    # refresh results cache
    for evaluation in Evaluation.objects.filter(
            contributions__contributor=main_user).distinct():
        collect_results(evaluation, force_recalculation=True)

    # delete other_user
    other_user.delete()

    return merged_user, errors, warnings
Exemple #25
0
def warmup_cache_on_publish(instance, target, **_kwargs):
    if target == 'published':
        from evap.results.tools import collect_results
        from evap.results.views import warm_up_template_cache
        collect_results(instance)
        warm_up_template_cache([instance])
Exemple #26
0
def evaluation_detail(request, semester_id, evaluation_id):
    # pylint: disable=too-many-locals
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations,
                                   id=evaluation_id,
                                   course__semester=semester)

    view, view_as_user, represented_users, contributor_id = evaluation_detail_parse_get_parameters(
        request, evaluation)

    evaluation_result = collect_results(evaluation)
    remove_textanswers_that_the_user_must_not_see(evaluation_result,
                                                  view_as_user,
                                                  represented_users, view)
    exclude_empty_headings(evaluation_result)
    remove_empty_questionnaire_and_contribution_results(evaluation_result)
    add_warnings(evaluation, evaluation_result)

    top_results, bottom_results, contributor_results = split_evaluation_result_into_top_bottom_and_contributor(
        evaluation_result, view_as_user, view)

    course_evaluations = get_evaluations_of_course(evaluation.course, request)
    course_evaluations.sort(key=lambda evaluation: evaluation.name)

    contributors_with_omitted_results = []
    if view == 'export':
        contributors_with_omitted_results = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_course_result_attributes(
            get_evaluations_with_prefetched_data([evaluation]))[0]

    is_responsible_or_contributor_or_delegate = evaluation.is_user_responsible_or_contributor_or_delegate(
        view_as_user)

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=top_results,
        general_questionnaire_results_bottom=bottom_results,
        contributor_contribution_results=contributor_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=
        is_responsible_or_contributor_or_delegate,
        can_download_grades=view_as_user.can_download_grades,
        can_export_text_answers=(view in ("export", "full") and
                                 (view_as_user.is_reviewer or
                                  is_responsible_or_contributor_or_delegate)),
        view=view,
        view_as_user=view_as_user,
        contributors_with_omitted_results=contributors_with_omitted_results,
        contributor_id=contributor_id,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Exemple #27
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations,
                                   id=evaluation_id,
                                   course__semester=semester)

    if not evaluation.can_results_page_be_seen_by(request.user):
        raise PermissionDenied

    if request.user.is_reviewer:
        view = request.GET.get(
            'view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get(
            'view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(
            id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    evaluation_result = collect_results(evaluation)
    remove_textanswers_that_the_user_must_not_see(evaluation_result,
                                                  view_as_user,
                                                  represented_users, view)
    filter_empty_headings(evaluation_result)
    remove_empty_questionnaire_and_contribution_results(evaluation_result)
    add_warnings(evaluation, evaluation_result)

    top_results, bottom_results, contributor_results = split_evaluation_result_into_top_bottom_and_contributor(
        evaluation_result, view_as_user, view)

    course_evaluations = get_evaluations_of_course(evaluation.course, request)
    course_evaluations.sort(key=lambda evaluation: evaluation.name)

    contributors_with_omitted_results = []
    if view == 'export':
        contributors_with_omitted_results = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_course_result_attributes(
            get_evaluations_with_prefetched_data([evaluation]))[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=top_results,
        general_questionnaire_results_bottom=bottom_results,
        contributor_contribution_results=contributor_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.
        is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        contributors_with_omitted_results=contributors_with_omitted_results,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Exemple #28
0
def course_detail(request, semester_id, course_id):
    semester = get_object_or_404(Semester, id=semester_id)
    course = get_object_or_404(semester.course_set,
                               id=course_id,
                               semester=semester)

    if not course.can_user_see_results_page(request.user):
        raise PermissionDenied

    course_result = collect_results(course)

    if request.user.is_reviewer:
        public_view = request.GET.get(
            'public_view'
        ) != 'false'  # if parameter is not given, show public view.
    else:
        public_view = request.GET.get(
            'public_view'
        ) == 'true'  # if parameter is not given, show own view.

    # redirect to non-public view if there is none because the results have not been published
    if not course.can_publish_rating_results:
        public_view = False

    represented_users = list(
        request.user.represented_users.all()) + [request.user]

    # remove text answers if the user may not see them
    for questionnaire_result in course_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [
                    answer for answer in question_result.answers
                    if user_can_see_text_answer(
                        request.user, represented_users, answer, public_view)
                ]
        # remove empty TextResults
        questionnaire_result.question_results = [
            result for result in questionnaire_result.question_results
            if not isinstance(result, TextResult) or len(result.answers) > 0
        ]

    # filter empty headings
    for questionnaire_result in course_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(
                questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results
                                ) - 1 or isinstance(
                                    questionnaire_result.question_results[index
                                                                          + 1],
                                    HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in course_result.contribution_results:
        contribution_result.questionnaire_results = [
            questionnaire_result for questionnaire_result in
            contribution_result.questionnaire_results
            if questionnaire_result.question_results
        ]
    course_result.contribution_results = [
        contribution_result
        for contribution_result in course_result.contribution_results
        if contribution_result.questionnaire_results
    ]

    add_warnings(course, course_result)

    # split course_result into different lists
    course_questionnaire_results_top = []
    course_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in course_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    course_questionnaire_results_bottom.append(
                        questionnaire_result)
                else:
                    course_questionnaire_results_top.append(
                        questionnaire_result)
        else:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        course_questionnaire_results_top += course_questionnaire_results_bottom
        course_questionnaire_results_bottom = []

    course.distribution = calculate_average_distribution(course)
    course.avg_grade = distribution_to_grade(course.distribution)

    template_data = dict(
        course=course,
        course_questionnaire_results_top=course_questionnaire_results_top,
        course_questionnaire_results_bottom=course_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        reviewer=request.user.is_reviewer,
        contributor=course.is_user_contributor_or_delegate(request.user),
        can_download_grades=request.user.can_download_grades,
        public_view=public_view)
    return render(request, "results_course_detail.html", template_data)
Exemple #29
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations, id=evaluation_id, course__semester=semester)

    if not evaluation.can_results_page_be_seen_by(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get('view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get('view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if can_textanswer_be_seen_by(view_as_user, represented_users, answer, view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    evaluation_result.contribution_results = [contribution_result for contribution_result in evaluation_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    general_questionnaire_results_top.append(questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [evaluation for evaluation in evaluation.course.evaluations.filter(state="published") if evaluation.can_be_seen_by(request.user)]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [contribution_result.contributor for contribution_result in evaluation_result.contribution_results if contribution_result.contributor not in [None, view_as_user]]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Exemple #30
0
def course_detail(request, semester_id, course_id):
    semester = get_object_or_404(Semester, id=semester_id)
    course = get_object_or_404(semester.courses, id=course_id, semester=semester)

    if not course.can_user_see_results_page(request.user):
        raise PermissionDenied

    course_result = collect_results(course)

    if request.user.is_reviewer:
        view = request.GET.get('view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get('view', 'full')  # if parameter is not given, show own view.

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not course.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in course_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(view_as_user, represented_users, answer, view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in course_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in course_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(course, course_result)

    # split course_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in course_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    general_questionnaire_results_top.append(questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course.distribution = calculate_average_distribution(course)
    course.avg_grade = distribution_to_grade(course.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [contribution_result.contributor for contribution_result in course_result.contribution_results if contribution_result.contributor not in [None, view_as_user]]

    template_data = dict(
        course=course,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=course.is_user_contributor(view_as_user),
        is_contributor_or_delegate=course.is_user_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_course_detail.html", template_data)
Exemple #31
0
    def export(self,
               response,
               selection_list,
               include_not_enough_voters=False,
               include_unpublished=False):
        self.workbook = xlwt.Workbook()
        self.init_styles(self.workbook)
        counter = 1
        course_results_exist = False

        for degrees, course_types in selection_list:
            self.sheet = self.workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            for evaluation in self.semester.evaluations.filter(
                    state__in=evaluation_states,
                    course__degrees__in=degrees,
                    course__type__in=course_types).distinct():
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for questionnaire_result in collect_results(
                        evaluation).questionnaire_results:
                    if all(not question_result.question.is_rating_question
                           or question_result.counts is None
                           for question_result in
                           questionnaire_result.question_results):
                        continue
                    results.setdefault(
                        questionnaire_result.questionnaire.id,
                        []).extend(questionnaire_result.question_results)
                    used_questionnaires.add(questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count(
                )
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int(
                        (evaluation.weight / sum(evaluation.weight
                                                 for evaluation in evaluation.
                                                 course.evaluations.all())) *
                        100)
                    evaluation.course.avg_grade = distribution_to_grade(
                        calculate_average_course_distribution(
                            evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(
                key=lambda cr: (cr[0].course.type.order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            degree_names = [
                degree.name for degree in Degree.objects.filter(pk__in=degrees)
            ]
            course_type_names = [
                course_type.name for course_type in CourseType.objects.filter(
                    pk__in=course_types)
            ]
            writec(
                self,
                _("Evaluation {}\n\n{}\n\n{}").format(
                    self.semester.name, ", ".join(degree_names),
                    ", ".join(course_type_names)), "headline")

            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.full_name, "evaluation")

            writen(self, _("Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "\n".join(
                        [d.name for d in evaluation.course.degrees.all()]),
                    "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(
                    questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average *
                                                  grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval),
                                       self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(
                    calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "{}/{}".format(evaluation.num_voters,
                                         evaluation.num_participants),
                    "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int(
                    (evaluation.num_voters / evaluation.num_participants) *
                    100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants),
                       "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self,
                               "{}%".format(evaluation.weight_percentage),
                               "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(
                                self, evaluation.course.avg_grade,
                                self.grade_to_style(
                                    evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        self.workbook.save(response)
Exemple #32
0
    def test_cache_unpublished_course(self):
        course = mommy.make(Course, state='published', _voter_count=0, _participant_count=0)
        collect_results(course)
        course.unpublish()

        self.assertIsNone(caches['results'].get(get_collect_results_cache_key(course)))
Exemple #33
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations,
                                   id=evaluation_id,
                                   course__semester=semester)

    if not evaluation.can_user_see_results_page(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get(
            'view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get(
            'view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(
            id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [
                    answer for answer in question_result.answers
                    if user_can_see_textanswer(view_as_user, represented_users,
                                               answer, view)
                ]
        # remove empty TextResults
        questionnaire_result.question_results = [
            result for result in questionnaire_result.question_results
            if not isinstance(result, TextResult) or len(result.answers) > 0
        ]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(
                questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results
                                ) - 1 or isinstance(
                                    questionnaire_result.question_results[index
                                                                          + 1],
                                    HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [
            questionnaire_result for questionnaire_result in
            contribution_result.questionnaire_results
            if questionnaire_result.question_results
        ]
    evaluation_result.contribution_results = [
        contribution_result
        for contribution_result in evaluation_result.contribution_results
        if contribution_result.questionnaire_results
    ]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(
                        questionnaire_result)
                else:
                    general_questionnaire_results_top.append(
                        questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [
            evaluation for evaluation in evaluation.course.evaluations.filter(
                state="published")
            if evaluation.can_user_see_evaluation(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(
                state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(
            course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(
                    course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(
                    course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(
                    course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=
        general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.
        is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Exemple #34
0
    def export(self, response, course_types_list, include_not_enough_voters=False, include_unpublished=False):
        self.workbook = xlwt.Workbook()
        self.init_styles(self.workbook)
        counter = 1
        course_results_exist = False

        for course_types in course_types_list:
            self.sheet = self.workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            for evaluation in self.semester.evaluations.filter(state__in=evaluation_states, course__type__in=course_types).all():
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for questionnaire_result in collect_results(evaluation).questionnaire_results:
                    if all(not question_result.question.is_rating_question or question_result.counts is None for question_result in questionnaire_result.question_results):
                        continue
                    results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
                    used_questionnaires.add(questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count()
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int((evaluation.weight / sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())) * 100)
                    evaluation.course.avg_grade = distribution_to_grade(calculate_average_course_distribution(evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(key=lambda cr: (cr[0].course.type.order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            course_type_names = [ct.name for ct in CourseType.objects.filter(pk__in=course_types)]
            writec(self, _("Evaluation {0}\n\n{1}").format(self.semester.name, ", ".join(course_type_names)), "headline")

            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.full_name, "evaluation")

            writen(self, _("Course Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "\n".join([d.name for d in evaluation.course.degrees.all()]), "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average * grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval), self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "{}/{}".format(evaluation.num_voters, evaluation.num_participants), "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int((evaluation.num_voters / evaluation.num_participants) * 100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants), "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, "{}%".format(evaluation.weight_percentage), "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(self, evaluation.course.avg_grade, self.grade_to_style(evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        self.workbook.save(response)