Esempio n. 1
0
    def test_single_result_anonymization(self):
        questionnaire = Questionnaire.single_result_questionnaire()
        single_result = baker.make(Evaluation,
                                   is_single_result=True,
                                   course=self.course)
        single_result.general_contribution.questionnaires.set([questionnaire])
        question = Question.objects.get(questionnaire=questionnaire)

        answer_count_before = 0
        choices = [
            choice for choice in CHOICES[question.type].values
            if choice != NO_ANSWER
        ]

        answer_counts = [random.randint(50, 100) for answer in choices]
        answer_count_before = sum(answer_counts)
        make_rating_answer_counters(question,
                                    single_result.general_contribution,
                                    answer_counts)

        management.call_command("anonymize", stdout=StringIO())

        self.assertLessEqual(RatingAnswerCounter.objects.count(), len(choices))
        self.assertEqual(
            RatingAnswerCounter.objects.aggregate(Sum("count"))["count__sum"],
            answer_count_before)
Esempio n. 2
0
    def test_calculation_unipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[student, contributor1],
            voters=[student, contributor1],
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.GRADE)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        make_rating_answer_counters(question, contribution1,
                                    [5, 15, 40, 60, 30])

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
Esempio n. 3
0
    def test_result_calculation_with_no_contributor_rating_question(self):
        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[self.student1, self.student2],
            voters=[self.student1, self.student2],
        )
        questionnaire_text = baker.make(Questionnaire)
        baker.make(Question,
                   questionnaire=questionnaire_text,
                   type=Question.TEXT)
        baker.make(
            Contribution,
            contributor=baker.make(UserProfile),
            evaluation=evaluation,
            questionnaires=[questionnaire_text],
        )

        evaluation.general_contribution.questionnaires.set(
            [self.questionnaire])
        make_rating_answer_counters(self.question_grade,
                                    evaluation.general_contribution,
                                    [1, 0, 0, 0, 0])
        cache_results(evaluation)

        distribution = calculate_average_distribution(evaluation)
        self.assertEqual(distribution[0], 1)
Esempio n. 4
0
    def test_course_grade(self):
        degree = baker.make(Degree)
        course = baker.make(Course, degrees=[degree])
        evaluations = baker.make(
            Evaluation,
            course=course,
            name_en=iter(["eval0", "eval1", "eval2"]),
            name_de=iter(["eval0", "eval1", "eval2"]),
            state=Evaluation.State.PUBLISHED,
            _voter_count=5,
            _participant_count=10,
            _quantity=3,
        )

        grades_per_eval = [[1, 1, 0, 0, 0], [0, 1, 1, 0, 0], [1, 0, 1, 0, 0]]
        expected_average = 2.0

        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire)
        for grades, e in zip(grades_per_eval, evaluations):
            make_rating_answer_counters(question, e.general_contribution, grades)
            e.general_contribution.questionnaires.set([questionnaire])
        for evaluation in evaluations:
            cache_results(evaluation)

        sheet = self.get_export_sheet(course.semester, degree, [course.type.id])
        self.assertEqual(sheet.row_values(12)[1], expected_average)
        self.assertEqual(sheet.row_values(12)[2], expected_average)
        self.assertEqual(sheet.row_values(12)[3], expected_average)
Esempio n. 5
0
    def test_calculate_average_course_distribution(self):
        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [2, 0, 0, 0, 0])

        course = self.evaluation.course
        single_result = baker.make(
            Evaluation,
            name_de="Single Result",
            name_en="Single Result",
            course=course,
            weight=3,
            is_single_result=True,
            vote_start_datetime=datetime.now(),
            vote_end_date=datetime.now().date(),
            state=Evaluation.State.PUBLISHED,
        )
        single_result_questionnaire = Questionnaire.single_result_questionnaire(
        )
        single_result_question = single_result_questionnaire.questions.first()

        contribution = baker.make(Contribution,
                                  evaluation=single_result,
                                  contributor=None,
                                  questionnaires=[single_result_questionnaire])
        make_rating_answer_counters(single_result_question, contribution,
                                    [0, 1, 1, 0, 0])
        cache_results(single_result)
        cache_results(self.evaluation)

        distribution = calculate_average_course_distribution(course)
        self.assertEqual(distribution[0], 0.25)
        self.assertEqual(distribution[1], 0.375)
        self.assertEqual(distribution[2], 0.375)
        self.assertEqual(distribution[3], 0)
        self.assertEqual(distribution[4], 0)
Esempio n. 6
0
    def test_correct_grades_and_bottom_numbers(self):
        degree = baker.make(Degree)
        evaluation = baker.make(
            Evaluation,
            _voter_count=5,
            _participant_count=10,
            course__degrees=[degree],
            state=Evaluation.State.PUBLISHED,
        )
        questionnaire1 = baker.make(Questionnaire, order=1)
        questionnaire2 = baker.make(Questionnaire, order=2)
        question1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire1)
        question2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire2)

        make_rating_answer_counters(question1, evaluation.general_contribution, [1, 0, 1, 0, 0])
        make_rating_answer_counters(question2, evaluation.general_contribution, [0, 1, 0, 1, 0])

        evaluation.general_contribution.questionnaires.set([questionnaire1, questionnaire2])
        cache_results(evaluation)

        sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id])

        self.assertEqual(sheet.row_values(5)[1], 2.0)  # question 1 average
        self.assertEqual(sheet.row_values(8)[1], 3.0)  # question 2 average
        self.assertEqual(sheet.row_values(10)[1], 2.5)  # Average grade
        self.assertEqual(sheet.row_values(11)[1], "5/10")  # Voters / Participants
        self.assertEqual(sheet.row_values(12)[1], "50%")  # Voter percentage
Esempio n. 7
0
    def test_distribution_without_general_grade_question(self):
        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [1, 0, 1, 0, 0])
        make_rating_answer_counters(self.question_grade, self.contribution2,
                                    [0, 1, 0, 1, 0])
        make_rating_answer_counters(self.question_likert, self.contribution1,
                                    [0, 0, 3, 0, 3])
        make_rating_answer_counters(self.question_likert,
                                    self.general_contribution, [0, 0, 0, 0, 5])
        make_rating_answer_counters(self.question_likert_2,
                                    self.general_contribution, [0, 0, 3, 0, 0])

        cache_results(self.evaluation)

        # contribution1: 0.4 * (0.5, 0, 0.5, 0, 0) + 0.6 * (0, 0, 0.5, 0, 0.5) = (0.2, 0, 0.5, 0, 0.3)
        # contribution2: (0, 0.5, 0, 0.5, 0)
        # contributions: (6 / 8) * (0.2, 0, 0.5, 0, 0.3) + (2 / 8) * (0, 0.5, 0, 0.5, 0) = (0.15, 0.125, 0.375, 0.125, 0.225)

        # general_non_grade: (0, 0, 0.375, 0, 0.625)

        # total: 0.375 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.625 * (0, 0, 0.375, 0, 0.625) = (0.05625, 0.046875, 0.375, 0.046875, 0.475)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.05625)
        self.assertAlmostEqual(distribution[1], 0.046875)
        self.assertAlmostEqual(distribution[2], 0.375)
        self.assertAlmostEqual(distribution[3], 0.046875)
        self.assertAlmostEqual(distribution[4], 0.475)
Esempio n. 8
0
    def test_single_result_can_be_deleted_only_in_reviewed(self):
        responsible = baker.make(UserProfile)
        evaluation = baker.make(Evaluation, is_single_result=True)
        contribution = baker.make(
            Contribution,
            evaluation=evaluation,
            contributor=responsible,
            questionnaires=[Questionnaire.single_result_questionnaire()],
            role=Contribution.Role.EDITOR,
            textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
        )
        make_rating_answer_counters(Questionnaire.single_result_questionnaire().questions.first(), contribution)
        evaluation.skip_review_single_result()
        evaluation.publish()
        evaluation.save()

        self.assertTrue(Evaluation.objects.filter(pk=evaluation.pk).exists())
        self.assertFalse(evaluation.can_be_deleted_by_manager)

        evaluation.unpublish()
        self.assertTrue(evaluation.can_be_deleted_by_manager)

        RatingAnswerCounter.objects.filter(contribution__evaluation=evaluation).delete()
        evaluation.delete()
        self.assertFalse(Evaluation.objects.filter(pk=evaluation.pk).exists())
Esempio n. 9
0
    def test_course_type_ordering(self):
        degree = baker.make(Degree)
        course_type_1 = baker.make(CourseType, order=1)
        course_type_2 = baker.make(CourseType, order=2)
        semester = baker.make(Semester)
        evaluation_1 = baker.make(
            Evaluation,
            course=baker.make(Course, semester=semester, degrees=[degree], type=course_type_1),
            state=Evaluation.State.PUBLISHED,
            _participant_count=2,
            _voter_count=2,
        )
        evaluation_2 = baker.make(
            Evaluation,
            course=baker.make(Course, semester=semester, degrees=[degree], type=course_type_2),
            state=Evaluation.State.PUBLISHED,
            _participant_count=2,
            _voter_count=2,
        )

        cache_results(evaluation_1)
        cache_results(evaluation_2)

        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire)

        evaluation_1.general_contribution.questionnaires.set([questionnaire])
        make_rating_answer_counters(question, evaluation_1.general_contribution)

        evaluation_2.general_contribution.questionnaires.set([questionnaire])
        make_rating_answer_counters(question, evaluation_2.general_contribution)

        binary_content = BytesIO()
        ResultsExporter().export(
            binary_content, [semester], [([degree.id], [course_type_1.id, course_type_2.id])], True, True
        )
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_1.full_name + "\n")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_2.full_name + "\n")

        course_type_2.order = 0
        course_type_2.save()

        binary_content = BytesIO()
        ResultsExporter().export(
            binary_content, [semester], [([degree.id], [course_type_1.id, course_type_2.id])], True, True
        )
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(0)[1], evaluation_2.full_name + "\n")
        self.assertEqual(workbook.sheets()[0].row_values(0)[2], evaluation_1.full_name + "\n")
Esempio n. 10
0
    def test_single_result_can_be_published():
        """Regression test for #1238"""
        responsible = baker.make(UserProfile)
        single_result = baker.make(Evaluation, is_single_result=True, _participant_count=5, _voter_count=5)
        contribution = baker.make(
            Contribution,
            evaluation=single_result,
            contributor=responsible,
            questionnaires=[Questionnaire.single_result_questionnaire()],
            role=Contribution.Role.EDITOR,
            textanswer_visibility=Contribution.TextAnswerVisibility.GENERAL_TEXTANSWERS,
        )
        make_rating_answer_counters(Questionnaire.single_result_questionnaire().questions.first(), contribution)

        single_result.skip_review_single_result()
        single_result.publish()  # used to crash
Esempio n. 11
0
    def test_questionnaire_ordering(self):
        degree = baker.make(Degree)
        evaluation = baker.make(
            Evaluation,
            course__degrees=[degree],
            state=Evaluation.State.PUBLISHED,
            _participant_count=2,
            _voter_count=2,
        )

        questionnaire_1 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.TOP)
        questionnaire_2 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.TOP)
        questionnaire_3 = baker.make(Questionnaire, order=1, type=Questionnaire.Type.BOTTOM)
        questionnaire_4 = baker.make(Questionnaire, order=4, type=Questionnaire.Type.BOTTOM)

        question_1 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_1)
        question_2 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_2)
        question_3 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_3)
        question_4 = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire_4)

        evaluation.general_contribution.questionnaires.set(
            [questionnaire_1, questionnaire_2, questionnaire_3, questionnaire_4]
        )

        make_rating_answer_counters(question_1, evaluation.general_contribution)
        make_rating_answer_counters(question_2, evaluation.general_contribution)
        make_rating_answer_counters(question_3, evaluation.general_contribution)
        make_rating_answer_counters(question_4, evaluation.general_contribution)

        cache_results(evaluation)

        binary_content = BytesIO()
        ResultsExporter().export(
            binary_content,
            [evaluation.course.semester],
            [([course_degree.id for course_degree in evaluation.course.degrees.all()], [evaluation.course.type.id])],
            True,
            True,
        )
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire_1.name)
        self.assertEqual(workbook.sheets()[0].row_values(5)[0], question_1.text)

        self.assertEqual(workbook.sheets()[0].row_values(7)[0], questionnaire_2.name)
        self.assertEqual(workbook.sheets()[0].row_values(8)[0], question_2.text)

        self.assertEqual(workbook.sheets()[0].row_values(10)[0], questionnaire_3.name)
        self.assertEqual(workbook.sheets()[0].row_values(11)[0], question_3.text)

        self.assertEqual(workbook.sheets()[0].row_values(13)[0], questionnaire_4.name)
        self.assertEqual(workbook.sheets()[0].row_values(14)[0], question_4.text)
Esempio n. 12
0
    def test_calculation_bipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[student, contributor1],
            voters=[student, contributor1],
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.EASY_DIFFICULT)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        make_rating_answer_counters(question, contribution1,
                                    [5, 5, 15, 30, 25, 15, 10])

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
Esempio n. 13
0
    def test_unipolarized_unipolar(self):
        answer_counters = make_rating_answer_counters(
            self.question_likert, self.general_contribution, [5, 3, 1, 1, 0])

        result = RatingResult(self.question_likert, answer_counters)
        distribution = unipolarized_distribution(result)
        self.assertAlmostEqual(distribution[0], 0.5)
        self.assertAlmostEqual(distribution[1], 0.3)
        self.assertAlmostEqual(distribution[2], 0.1)
        self.assertAlmostEqual(distribution[3], 0.1)
        self.assertAlmostEqual(distribution[4], 0.0)
Esempio n. 14
0
    def test_yes_no_question_result(self):
        degree = baker.make(Degree)
        evaluation = baker.make(
            Evaluation,
            _voter_count=6,
            _participant_count=10,
            course__degrees=[degree],
            state=Evaluation.State.PUBLISHED,
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question, type=Question.POSITIVE_YES_NO, questionnaire=questionnaire)

        make_rating_answer_counters(question, evaluation.general_contribution, [4, 2])

        evaluation.general_contribution.questionnaires.set([questionnaire])
        cache_results(evaluation)

        sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id])
        self.assertEqual(sheet.row_values(5)[0], question.text)
        self.assertEqual(sheet.row_values(5)[1], "67%")
Esempio n. 15
0
    def test_unipolarized_bipolar(self):
        answer_counters = make_rating_answer_counters(
            self.question_bipolar, self.general_contribution,
            [0, 1, 4, 8, 2, 2, 3])

        result = RatingResult(self.question_bipolar, answer_counters)
        distribution = unipolarized_distribution(result)
        self.assertAlmostEqual(distribution[0], 0.4)
        self.assertAlmostEqual(distribution[1], 0.2)
        self.assertAlmostEqual(distribution[2], 0.15)
        self.assertAlmostEqual(distribution[3], 0.1)
        self.assertAlmostEqual(distribution[4], 0.15)
Esempio n. 16
0
    def test_heading_question_filtering(self):
        degree = baker.make(Degree)
        evaluation = baker.make(
            Evaluation,
            course__degrees=[degree],
            state=Evaluation.State.PUBLISHED,
            _participant_count=2,
            _voter_count=2,
        )
        contributor = baker.make(UserProfile)
        evaluation.general_contribution.questionnaires.set([baker.make(Questionnaire)])

        questionnaire = baker.make(Questionnaire)
        baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=0)
        heading_question = baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=1)
        likert_question = baker.make(Question, type=Question.LIKERT, questionnaire=questionnaire, order=2)
        baker.make(Question, type=Question.HEADING, questionnaire=questionnaire, order=3)

        contribution = baker.make(
            Contribution, evaluation=evaluation, questionnaires=[questionnaire], contributor=contributor
        )
        make_rating_answer_counters(likert_question, contribution)

        cache_results(evaluation)

        binary_content = BytesIO()
        ResultsExporter().export(
            binary_content,
            [evaluation.course.semester],
            [([course_degree.id for course_degree in evaluation.course.degrees.all()], [evaluation.course.type.id])],
            True,
            True,
        )
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())

        self.assertEqual(workbook.sheets()[0].row_values(4)[0], questionnaire.name)
        self.assertEqual(workbook.sheets()[0].row_values(5)[0], heading_question.text)
        self.assertEqual(workbook.sheets()[0].row_values(6)[0], likert_question.text)
        self.assertEqual(workbook.sheets()[0].row_values(7)[0], "")
Esempio n. 17
0
    def test_unipolarized_yesno(self):
        question_yesno = baker.make(Question,
                                    questionnaire=self.questionnaire,
                                    type=Question.POSITIVE_YES_NO)
        answer_counters = make_rating_answer_counters(
            question_yesno, self.general_contribution, [57, 43])

        result = RatingResult(question_yesno, answer_counters)
        distribution = unipolarized_distribution(result)
        self.assertAlmostEqual(distribution[0], 0.57)
        self.assertEqual(distribution[1], 0)
        self.assertEqual(distribution[2], 0)
        self.assertEqual(distribution[3], 0)
        self.assertAlmostEqual(distribution[4], 0.43)
Esempio n. 18
0
    def test_exclude_used_but_unanswered_questionnaires(self):
        degree = baker.make(Degree)
        evaluation = baker.make(
            Evaluation,
            _voter_count=10,
            _participant_count=10,
            state=Evaluation.State.PUBLISHED,
            course__degrees=[degree],
        )
        used_questionnaire = baker.make(Questionnaire)
        used_question = baker.make(Question, type=Question.LIKERT, questionnaire=used_questionnaire)
        unused_questionnaire = baker.make(Questionnaire)
        unused_question = baker.make(Question, type=Question.LIKERT, questionnaire=unused_questionnaire)

        evaluation.general_contribution.questionnaires.set([used_questionnaire, unused_questionnaire])
        make_rating_answer_counters(used_question, evaluation.general_contribution)
        cache_results(evaluation)

        sheet = self.get_export_sheet(evaluation.course.semester, degree, [evaluation.course.type.id])
        self.assertEqual(sheet.row_values(4)[0], used_questionnaire.name)
        self.assertEqual(sheet.row_values(5)[0], used_question.text)
        self.assertNotIn(unused_questionnaire.name, sheet.col_values(0))
        self.assertNotIn(unused_question.text, sheet.col_values(0))
Esempio n. 19
0
    def test_get_single_result_rating_result(self):
        single_result_evaluation = baker.make(Evaluation,
                                              state=Evaluation.State.PUBLISHED,
                                              is_single_result=True)
        questionnaire = Questionnaire.single_result_questionnaire()
        contribution = baker.make(
            Contribution,
            contributor=baker.make(UserProfile),
            evaluation=single_result_evaluation,
            questionnaires=[questionnaire],
            role=Contribution.Role.EDITOR,
            textanswer_visibility=Contribution.TextAnswerVisibility.
            GENERAL_TEXTANSWERS,
        )
        make_rating_answer_counters(questionnaire.questions.first(),
                                    contribution, [1, 0, 0, 1, 0])

        cache_results(single_result_evaluation)
        distribution = calculate_average_distribution(single_result_evaluation)
        self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
        rating_result = get_single_result_rating_result(
            single_result_evaluation)
        self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 20
0
    def test_no_empty_rating_answer_counters_left(self):
        counters = []
        for question in chain(self.contributor_questions,
                              self.general_questions):
            counts = [
                1 for choice in CHOICES[question.type].values
                if choice != NO_ANSWER
            ]
            counters.extend(
                make_rating_answer_counters(question, self.contribution,
                                            counts, False))
        RatingAnswerCounter.objects.bulk_create(counters)

        old_count = RatingAnswerCounter.objects.count()

        management.call_command("anonymize", stdout=StringIO())

        new_count = RatingAnswerCounter.objects.count()
        self.assertLess(new_count, old_count)

        for counter in RatingAnswerCounter.objects.all():
            self.assertGreater(counter.count, 0)
Esempio n. 21
0
    def test_answer_count_unchanged(self):
        answers_per_question = defaultdict(int)

        counters = []
        for question in chain(self.contributor_questions,
                              self.general_questions):
            counts = [
                random.randint(10, 100)
                for choice in CHOICES[question.type].values
                if choice != NO_ANSWER
            ]
            counters.extend(
                make_rating_answer_counters(question, self.contribution,
                                            counts, False))
            answers_per_question[question] += sum(counts)
        RatingAnswerCounter.objects.bulk_create(counters)

        management.call_command("anonymize", stdout=StringIO())

        for question in chain(self.contributor_questions,
                              self.general_questions):
            answer_count = RatingAnswerCounter.objects.filter(
                question=question).aggregate(Sum("count"))["count__sum"]
            self.assertEqual(answers_per_question[question], answer_count)
Esempio n. 22
0
    def test_distribution_with_general_grade_question(self):
        counters = [
            *make_rating_answer_counters(self.question_grade,
                                         self.contribution1, [1, 0, 1, 0, 0],
                                         False),
            *make_rating_answer_counters(self.question_grade,
                                         self.contribution2, [0, 1, 0, 1, 0],
                                         False),
            *make_rating_answer_counters(self.question_likert,
                                         self.contribution1, [0, 0, 3, 0, 3],
                                         False),
            *make_rating_answer_counters(self.question_likert,
                                         self.general_contribution,
                                         [0, 0, 0, 0, 5], False),
            *make_rating_answer_counters(self.question_likert_2,
                                         self.general_contribution,
                                         [0, 0, 3, 0, 0], False),
            *make_rating_answer_counters(self.question_grade,
                                         self.general_contribution,
                                         [0, 10, 0, 0, 0], False),
        ]
        RatingAnswerCounter.objects.bulk_create(counters)

        cache_results(self.evaluation)

        # contributions and general_non_grade are as above
        # general_grade: (0, 1, 0, 0, 0)

        # total: 0.3 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.2 * (0, 1, 0, 0, 0) + 0.5 * (0, 0, 0.375, 0, 0.625) = (0.045, 0.2375, 0.3, 0.0375, 0.38)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.045)
        self.assertAlmostEqual(distribution[1], 0.2375)
        self.assertAlmostEqual(distribution[2], 0.3)
        self.assertAlmostEqual(distribution[3], 0.0375)
        self.assertAlmostEqual(distribution[4], 0.38)
Esempio n. 23
0
    def test_contributor_result_export(self):
        degree = baker.make(Degree)
        contributor = baker.make(UserProfile)
        other_contributor = baker.make(UserProfile)
        evaluation_1 = baker.make(
            Evaluation,
            course=baker.make(Course, degrees=[degree], responsibles=[contributor]),
            state=Evaluation.State.PUBLISHED,
            _participant_count=10,
            _voter_count=1,
        )
        evaluation_2 = baker.make(
            Evaluation,
            course=baker.make(Course, degrees=[degree], responsibles=[other_contributor]),
            state=Evaluation.State.PUBLISHED,
            _participant_count=2,
            _voter_count=2,
        )
        contribution = baker.make(Contribution, evaluation=evaluation_2, contributor=contributor)
        other_contribution = baker.make(Contribution, evaluation=evaluation_2, contributor=other_contributor)

        general_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.TOP)
        contributor_questionnaire = baker.make(Questionnaire, type=Questionnaire.Type.CONTRIBUTOR)
        general_question = baker.make(Question, type=Question.LIKERT, questionnaire=general_questionnaire)
        contributor_question = baker.make(Question, type=Question.LIKERT, questionnaire=contributor_questionnaire)

        evaluation_1.general_contribution.questionnaires.set([general_questionnaire])
        make_rating_answer_counters(general_question, evaluation_1.general_contribution, [2, 0, 0, 0, 0])
        evaluation_2.general_contribution.questionnaires.set([general_questionnaire])
        make_rating_answer_counters(general_question, evaluation_2.general_contribution, [0, 0, 0, 2, 0])

        contribution.questionnaires.set([contributor_questionnaire])
        make_rating_answer_counters(contributor_question, contribution, [0, 0, 2, 0, 0])
        other_contribution.questionnaires.set([contributor_questionnaire])
        make_rating_answer_counters(contributor_question, other_contribution, [0, 2, 0, 0, 0])

        cache_results(evaluation_1)
        cache_results(evaluation_2)

        binary_content = export_contributor_results(contributor).content
        workbook = xlrd.open_workbook(file_contents=binary_content)

        self.assertEqual(
            workbook.sheets()[0].row_values(0)[1],
            f"{evaluation_1.full_name}\n{evaluation_1.course.semester.name}\n{contributor.full_name}",
        )
        self.assertEqual(
            workbook.sheets()[0].row_values(0)[2],
            f"{evaluation_2.full_name}\n{evaluation_2.course.semester.name}\n{other_contributor.full_name}",
        )
        self.assertEqual(workbook.sheets()[0].row_values(4)[0], general_questionnaire.name)
        self.assertEqual(workbook.sheets()[0].row_values(5)[0], general_question.text)
        self.assertEqual(workbook.sheets()[0].row_values(5)[2], 4.0)
        self.assertEqual(
            workbook.sheets()[0].row_values(7)[0],
            f"{contributor_questionnaire.name} ({contributor.full_name})",
        )
        self.assertEqual(workbook.sheets()[0].row_values(8)[0], contributor_question.text)
        self.assertEqual(workbook.sheets()[0].row_values(8)[2], 3.0)
        self.assertEqual(workbook.sheets()[0].row_values(10)[0], "Overall Average Grade")
        self.assertEqual(workbook.sheets()[0].row_values(10)[2], 3.25)
Esempio n. 24
0
    def test_average_grade(self):
        question_grade2 = baker.make(Question,
                                     questionnaire=self.questionnaire,
                                     type=Question.GRADE)

        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [0, 1, 0, 0, 0])
        make_rating_answer_counters(self.question_grade, self.contribution2,
                                    [0, 0, 0, 2, 0])
        make_rating_answer_counters(question_grade2, self.contribution1,
                                    [1, 0, 0, 0, 0])
        make_rating_answer_counters(self.question_likert, self.contribution1,
                                    [0, 0, 4, 0, 0])
        make_rating_answer_counters(self.question_likert,
                                    self.general_contribution, [0, 0, 0, 0, 5])
        make_rating_answer_counters(self.question_likert_2,
                                    self.general_contribution, [0, 0, 3, 0, 0])
        make_rating_answer_counters(self.question_bipolar,
                                    self.general_contribution,
                                    [0, 0, 0, 0, 0, 0, 2])
        make_rating_answer_counters(self.question_bipolar_2,
                                    self.general_contribution,
                                    [0, 0, 4, 0, 0, 0, 0])

        cache_results(self.evaluation)

        contributor_weights_sum = (
            settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT +
            settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT)
        contributor1_average = (
            (settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT *
             ((2 * 1) + (1 * 1)) / (1 + 1)) +
            (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT *
             3)) / contributor_weights_sum  # 2.4
        contributor2_average = 4
        contributors_average = (
            (4 * contributor1_average) +
            (2 * contributor2_average)) / (4 + 2)  # 2.9333333

        general_non_grade_average = (
            (5 * 5) + (3 * 3) + (2 * 5) +
            (4 * 7 / 3)) / (5 + 3 + 2 + 4)  # 3.80952380

        contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / (
            settings.CONTRIBUTIONS_WEIGHT +
            settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.375
        general_non_grade_percentage = settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT / (
            settings.CONTRIBUTIONS_WEIGHT +
            settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.625

        total_grade = (contributors_percentage * contributors_average +
                       general_non_grade_percentage * general_non_grade_average
                       )  # 1.1 + 2.38095238 = 3.48095238

        average_grade = distribution_to_grade(
            calculate_average_distribution(self.evaluation))
        self.assertAlmostEqual(average_grade, total_grade)
        self.assertAlmostEqual(average_grade, 3.48095238)