Ejemplo n.º 1
0
    def test_calculate_average_course_distribution(self):
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=2)

        course = self.evaluation.course
        single_result = mommy.make(
            Evaluation,
            name_de="Single Result",
            name_en="Single Result",
            course=course,
            weight=3,
            is_single_result=True,
            vote_start_datetime=datetime.now(),
            vote_end_date=datetime.now().date(),
            state="published",
        )
        single_result_questionnaire = Questionnaire.single_result_questionnaire()
        single_result_question = single_result_questionnaire.questions.first()

        contribution = mommy.make(Contribution, evaluation=single_result, contributor=None, questionnaires=[single_result_questionnaire])
        mommy.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=2, count=1)
        mommy.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=3, count=1)

        distribution = calculate_average_course_distribution(course)
        self.assertEqual(distribution[0], 0.25)
        self.assertEqual(distribution[1], 0.375)
        self.assertEqual(distribution[2], 0.375)
        self.assertEqual(distribution[3], 0)
        self.assertEqual(distribution[4], 0)
Ejemplo n.º 2
0
    def test_calculate_average_course_distribution(self):
        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [2, 0, 0, 0, 0])

        course = self.evaluation.course
        single_result = baker.make(
            Evaluation,
            name_de="Single Result",
            name_en="Single Result",
            course=course,
            weight=3,
            is_single_result=True,
            vote_start_datetime=datetime.now(),
            vote_end_date=datetime.now().date(),
            state=Evaluation.State.PUBLISHED,
        )
        single_result_questionnaire = Questionnaire.single_result_questionnaire(
        )
        single_result_question = single_result_questionnaire.questions.first()

        contribution = baker.make(Contribution,
                                  evaluation=single_result,
                                  contributor=None,
                                  questionnaires=[single_result_questionnaire])
        make_rating_answer_counters(single_result_question, contribution,
                                    [0, 1, 1, 0, 0])
        cache_results(single_result)
        cache_results(self.evaluation)

        distribution = calculate_average_course_distribution(course)
        self.assertEqual(distribution[0], 0.25)
        self.assertEqual(distribution[1], 0.375)
        self.assertEqual(distribution[2], 0.375)
        self.assertEqual(distribution[3], 0)
        self.assertEqual(distribution[4], 0)
Ejemplo n.º 3
0
    def test_calculate_average_course_distribution(self):
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=2)

        course = self.evaluation.course
        single_result = baker.make(
            Evaluation,
            name_de="Single Result",
            name_en="Single Result",
            course=course,
            weight=3,
            is_single_result=True,
            vote_start_datetime=datetime.now(),
            vote_end_date=datetime.now().date(),
            state="published",
        )
        single_result_questionnaire = Questionnaire.single_result_questionnaire()
        single_result_question = single_result_questionnaire.questions.first()

        contribution = baker.make(Contribution, evaluation=single_result, contributor=None, questionnaires=[single_result_questionnaire])
        baker.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=2, count=1)
        baker.make(RatingAnswerCounter, question=single_result_question, contribution=contribution, answer=3, count=1)
        cache_results(single_result)
        cache_results(self.evaluation)

        distribution = calculate_average_course_distribution(course)
        self.assertEqual(distribution[0], 0.25)
        self.assertEqual(distribution[1], 0.375)
        self.assertEqual(distribution[2], 0.375)
        self.assertEqual(distribution[3], 0)
        self.assertEqual(distribution[4], 0)
Ejemplo n.º 4
0
    def filter_evaluations(semesters, evaluation_states, degrees, course_types, contributor, include_not_enough_voters):
        course_results_exist = False
        evaluations_with_results = list()
        used_questionnaires = set()
        evaluations_filter = Q(course__semester__in=semesters, state__in=evaluation_states, course__degrees__in=degrees, course__type__in=course_types)
        if contributor:
            evaluations_filter = evaluations_filter & (Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]))
        evaluations = Evaluation.objects.filter(evaluations_filter).distinct()
        for evaluation in evaluations:
            if evaluation.is_single_result:
                continue
            if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                continue
            results = OrderedDict()
            for contribution_result in get_results(evaluation).contribution_results:
                for questionnaire_result in contribution_result.questionnaire_results:
                    # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it
                    if all(not question_result.question.is_rating_question or question_result.counts is None or sum(question_result.counts) == 0 for question_result in questionnaire_result.question_results):
                        continue
                    if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor:
                        results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
                        used_questionnaires.add(questionnaire_result.questionnaire)
            evaluation.course_evaluations_count = evaluation.course.evaluations.count()
            if evaluation.course_evaluations_count > 1:
                course_results_exist = True
                evaluation.weight_percentage = int((evaluation.weight / sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())) * 100)
                evaluation.course.avg_grade = distribution_to_grade(calculate_average_course_distribution(evaluation.course))
            evaluations_with_results.append((evaluation, results))

        evaluations_with_results.sort(key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.order, cr[0].full_name))
        used_questionnaires = sorted(used_questionnaires)

        return evaluations_with_results, used_questionnaires, course_results_exist
Ejemplo n.º 5
0
    def export(self, response, course_types_list, include_not_enough_voters=False, include_unpublished=False):
        self.workbook = xlwt.Workbook()
        self.init_styles(self.workbook)
        counter = 1
        course_results_exist = False

        for course_types in course_types_list:
            self.sheet = self.workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            for evaluation in self.semester.evaluations.filter(state__in=evaluation_states, course__type__in=course_types).all():
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for questionnaire_result in collect_results(evaluation).questionnaire_results:
                    if all(not question_result.question.is_rating_question or question_result.counts is None for question_result in questionnaire_result.question_results):
                        continue
                    results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
                    used_questionnaires.add(questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count()
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int((evaluation.weight / sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())) * 100)
                    evaluation.course.avg_grade = distribution_to_grade(calculate_average_course_distribution(evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(key=lambda cr: (cr[0].course.type.order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            course_type_names = [ct.name for ct in CourseType.objects.filter(pk__in=course_types)]
            writec(self, _("Evaluation {0}\n\n{1}").format(self.semester.name, ", ".join(course_type_names)), "headline")

            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.full_name, "evaluation")

            writen(self, _("Course Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "\n".join([d.name for d in evaluation.course.degrees.all()]), "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average * grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval), self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "{}/{}".format(evaluation.num_voters, evaluation.num_participants), "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int((evaluation.num_voters / evaluation.num_participants) * 100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants), "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, "{}%".format(evaluation.weight_percentage), "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(self, evaluation.course.avg_grade, self.grade_to_style(evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        self.workbook.save(response)
Ejemplo n.º 6
0
    def export(self,
               response,
               selection_list,
               include_not_enough_voters=False,
               include_unpublished=False):
        self.workbook = xlwt.Workbook()
        self.init_styles(self.workbook)
        counter = 1
        course_results_exist = False

        for degrees, course_types in selection_list:
            self.sheet = self.workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            for evaluation in self.semester.evaluations.filter(
                    state__in=evaluation_states,
                    course__degrees__in=degrees,
                    course__type__in=course_types).distinct():
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for questionnaire_result in collect_results(
                        evaluation).questionnaire_results:
                    if all(not question_result.question.is_rating_question
                           or question_result.counts is None
                           for question_result in
                           questionnaire_result.question_results):
                        continue
                    results.setdefault(
                        questionnaire_result.questionnaire.id,
                        []).extend(questionnaire_result.question_results)
                    used_questionnaires.add(questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count(
                )
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int(
                        (evaluation.weight / sum(evaluation.weight
                                                 for evaluation in evaluation.
                                                 course.evaluations.all())) *
                        100)
                    evaluation.course.avg_grade = distribution_to_grade(
                        calculate_average_course_distribution(
                            evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(
                key=lambda cr: (cr[0].course.type.order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            degree_names = [
                degree.name for degree in Degree.objects.filter(pk__in=degrees)
            ]
            course_type_names = [
                course_type.name for course_type in CourseType.objects.filter(
                    pk__in=course_types)
            ]
            writec(
                self,
                _("Evaluation {}\n\n{}\n\n{}").format(
                    self.semester.name, ", ".join(degree_names),
                    ", ".join(course_type_names)), "headline")

            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.full_name, "evaluation")

            writen(self, _("Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "\n".join(
                        [d.name for d in evaluation.course.degrees.all()]),
                    "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(
                    questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average *
                                                  grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval),
                                       self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(
                    calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "{}/{}".format(evaluation.num_voters,
                                         evaluation.num_participants),
                    "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int(
                    (evaluation.num_voters / evaluation.num_participants) *
                    100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants),
                       "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self,
                               "{}%".format(evaluation.weight_percentage),
                               "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(
                                self, evaluation.course.avg_grade,
                                self.grade_to_style(
                                    evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        self.workbook.save(response)
Ejemplo n.º 7
0
    def export(self,
               response,
               semesters,
               selection_list,
               include_not_enough_voters=False,
               include_unpublished=False,
               contributor=None):
        # the excel file we're creating here is rather complex. However, from the nature of a single
        # file, it doesn't make much sense to split up the code into different methods as they will
        # always be tightly coupled based on the layout of the sheet. We thus think that one big method
        # containing the business logic is okay here
        # pylint: disable=too-many-locals, too-many-nested-blocks, too-many-branches, too-many-statements
        workbook = xlwt.Workbook()
        self.init_styles(workbook)
        counter = 1
        course_results_exist = False

        for degrees, course_types in selection_list:
            self.sheet = workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            evaluations_filter = Q(course__semester__in=semesters,
                                   state__in=evaluation_states,
                                   course__degrees__in=degrees,
                                   course__type__in=course_types)
            if contributor:
                evaluations_filter = evaluations_filter & (
                    Q(course__responsibles__in=[contributor])
                    | Q(contributions__contributor__in=[contributor]))
            evaluations = Evaluation.objects.filter(
                evaluations_filter).distinct()
            for evaluation in evaluations:
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for contribution_result in collect_results(
                        evaluation).contribution_results:
                    for questionnaire_result in contribution_result.questionnaire_results:
                        if all(not question_result.question.is_rating_question
                               or question_result.counts is None
                               for question_result in
                               questionnaire_result.question_results):
                            continue
                        if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor:
                            results.setdefault(
                                questionnaire_result.questionnaire.id,
                                []).extend(
                                    questionnaire_result.question_results)
                            used_questionnaires.add(
                                questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count(
                )
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int(
                        (evaluation.weight / sum(evaluation.weight
                                                 for evaluation in evaluation.
                                                 course.evaluations.all())) *
                        100)
                    evaluation.course.avg_grade = distribution_to_grade(
                        calculate_average_course_distribution(
                            evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(
                key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.
                                order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            export_name = "Evaluation"
            if contributor:
                export_name += "\n{}".format(contributor.full_name)
            elif len(semesters) == 1:
                export_name += "\n{}".format(semesters[0].name)
            degree_names = [
                degree.name for degree in Degree.objects.filter(pk__in=degrees)
            ]
            course_type_names = [
                course_type.name for course_type in CourseType.objects.filter(
                    pk__in=course_types)
            ]
            writec(
                self,
                _("{}\n\n{}\n\n{}").format(export_name,
                                           ", ".join(degree_names),
                                           ", ".join(course_type_names)),
                "headline")

            for evaluation, results in evaluations_with_results:
                title = evaluation.full_name
                if len(semesters) > 1:
                    title += "\n{}".format(evaluation.course.semester.name)
                responsible_names = [
                    responsible.full_name
                    for responsible in evaluation.course.responsibles.all()
                ]
                title += "\n{}".format(", ".join(responsible_names))
                writec(self, title, "evaluation")

            writen(self, _("Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "\n".join(
                        [d.name for d in evaluation.course.degrees.all()]),
                    "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                if contributor and questionnaire.type == Questionnaire.CONTRIBUTOR:
                    writen(
                        self, "{} ({})".format(questionnaire.name,
                                               contributor.full_name), "bold")
                else:
                    writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(
                    questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average *
                                                  grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval),
                                       self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(
                    calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(
                    self, "{}/{}".format(evaluation.num_voters,
                                         evaluation.num_participants),
                    "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int(
                    (evaluation.num_voters / evaluation.num_participants) *
                    100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants),
                       "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self,
                               "{}%".format(evaluation.weight_percentage),
                               "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(
                                self, evaluation.course.avg_grade,
                                self.grade_to_style(
                                    evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        workbook.save(response)