Ejemplo n.º 1
0
    def test_results_cache_refreshed(self):
        contributor = baker.make(UserProfile, first_name="Peter")
        evaluation = baker.make(Evaluation, state="published")
        baker.make(Contribution,
                   contributor=contributor,
                   evaluation=evaluation)

        cache_results(evaluation)
        results_before = get_results(evaluation)

        form_data = get_form_data_from_instance(UserForm, contributor)
        form_data["first_name"] = "Patrick"
        form = UserForm(form_data, instance=contributor)
        form.save()

        results_after = get_results(evaluation)

        self.assertCountEqual(
            (result.contributor.first_name
             for result in results_before.contribution_results
             if result.contributor),
            ("Peter", ),
        )

        self.assertCountEqual(
            (result.contributor.first_name
             for result in results_after.contribution_results
             if result.contributor),
            ("Patrick", ),
        )
Ejemplo n.º 2
0
    def test_calculation_unipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[student, contributor1],
            voters=[student, contributor1],
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.GRADE)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        make_rating_answer_counters(question, contribution1,
                                    [5, 15, 40, 60, 30])

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
Ejemplo n.º 3
0
    def test_results_cache_after_user_merge(self):
        """Asserts that merge_users leaves the results cache in a consistent state. Regression test for #907"""
        contributor = baker.make(UserProfile)
        main_user = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(Evaluation,
                                state=Evaluation.State.PUBLISHED,
                                participants=[student])
        questionnaire = baker.make(Questionnaire)
        baker.make(Question, questionnaire=questionnaire, type=Question.GRADE)
        baker.make(Contribution,
                   contributor=contributor,
                   evaluation=evaluation,
                   questionnaires=[questionnaire])

        cache_results(evaluation)

        merge_users(main_user, contributor)

        evaluation_results = get_results(evaluation)

        for contribution_result in evaluation_results.contribution_results:
            self.assertTrue(
                Contribution.objects.filter(
                    evaluation=evaluation,
                    contributor=contribution_result.contributor).exists())
Ejemplo n.º 4
0
    def test_calculation_unipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question, questionnaire=questionnaire, type=Question.GRADE)
        contribution1 = baker.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire])

        baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5)
        baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40)
        baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60)
        baker.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30)

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 150)
        self.assertAlmostEqual(question_result.average, float(109) / 30)
        self.assertEqual(question_result.counts, (5, 15, 40, 60, 30))
Ejemplo n.º 5
0
    def filter_evaluations(semesters, evaluation_states, degrees, course_types, contributor, include_not_enough_voters):
        course_results_exist = False
        evaluations_with_results = list()
        used_questionnaires = set()
        evaluations_filter = Q(course__semester__in=semesters, state__in=evaluation_states, course__degrees__in=degrees, course__type__in=course_types)
        if contributor:
            evaluations_filter = evaluations_filter & (Q(course__responsibles__in=[contributor]) | Q(contributions__contributor__in=[contributor]))
        evaluations = Evaluation.objects.filter(evaluations_filter).distinct()
        for evaluation in evaluations:
            if evaluation.is_single_result:
                continue
            if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                continue
            results = OrderedDict()
            for contribution_result in get_results(evaluation).contribution_results:
                for questionnaire_result in contribution_result.questionnaire_results:
                    # RatingQuestion.counts is a tuple of integers or None, if this tuple is all zero, we want to exclude it
                    if all(not question_result.question.is_rating_question or question_result.counts is None or sum(question_result.counts) == 0 for question_result in questionnaire_result.question_results):
                        continue
                    if not contributor or contribution_result.contributor is None or contribution_result.contributor == contributor:
                        results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
                        used_questionnaires.add(questionnaire_result.questionnaire)
            evaluation.course_evaluations_count = evaluation.course.evaluations.count()
            if evaluation.course_evaluations_count > 1:
                course_results_exist = True
                evaluation.weight_percentage = int((evaluation.weight / sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())) * 100)
                evaluation.course.avg_grade = distribution_to_grade(calculate_average_course_distribution(evaluation.course))
            evaluations_with_results.append((evaluation, results))

        evaluations_with_results.sort(key=lambda cr: (cr[0].course.semester.id, cr[0].course.type.order, cr[0].full_name))
        used_questionnaires = sorted(used_questionnaires)

        return evaluations_with_results, used_questionnaires, course_results_exist
Ejemplo n.º 6
0
def evaluation_detail(request, semester_id, evaluation_id):
    # pylint: disable=too-many-locals
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations, id=evaluation_id, course__semester=semester)

    view, view_as_user, represented_users, contributor_id = evaluation_detail_parse_get_parameters(request, evaluation)

    evaluation_result = get_results(evaluation)
    remove_textanswers_that_the_user_must_not_see(evaluation_result, view_as_user, represented_users, view)
    exclude_empty_headings(evaluation_result)
    remove_empty_questionnaire_and_contribution_results(evaluation_result)
    add_warnings(evaluation, evaluation_result)

    top_results, bottom_results, contributor_results = split_evaluation_result_into_top_bottom_and_contributor(
        evaluation_result, view_as_user, view
    )

    course_evaluations = get_evaluations_of_course(evaluation.course, request)
    course_evaluations.sort(key=lambda evaluation: evaluation.name)

    contributors_with_omitted_results = []
    if view == "export":
        contributors_with_omitted_results = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the results are not cached, we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state not in STATES_WITH_RESULT_TEMPLATE_CACHING:
        prefetched = get_evaluations_with_prefetched_data([evaluation])
        evaluation = get_evaluations_with_course_result_attributes(prefetched)[0]

    is_responsible_or_contributor_or_delegate = evaluation.is_user_responsible_or_contributor_or_delegate(view_as_user)

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=top_results,
        general_questionnaire_results_bottom=bottom_results,
        contributor_contribution_results=contributor_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=is_responsible_or_contributor_or_delegate,
        can_download_grades=view_as_user.can_download_grades,
        can_export_text_answers=(
            view in ("export", "full") and (view_as_user.is_reviewer or is_responsible_or_contributor_or_delegate)
        ),
        view=view,
        view_as_user=view_as_user,
        contributors_with_omitted_results=contributors_with_omitted_results,
        contributor_id=contributor_id,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Ejemplo n.º 7
0
def extract_evaluation_answer_data(request, evaluation):
    # TextAnswerExporter wants a dict from Question to tuple of contributor_name and string list (of the answers)

    view, view_as_user, represented_users, contributor_id = evaluation_detail_parse_get_parameters(request, evaluation)

    evaluation_result = get_results(evaluation)
    filter_text_answers(evaluation_result)
    remove_textanswers_that_the_user_must_not_see(evaluation_result, view_as_user, represented_users, view)

    results = TextAnswerExporter.InputData(evaluation_result.contribution_results)

    return results, contributor_id
Ejemplo n.º 8
0
    def test_text_answer_export(self):
        evaluation = baker.make(Evaluation,
                                state=Evaluation.State.PUBLISHED,
                                can_publish_text_results=True)
        questions = [
            baker.make(Question, questionnaire__type=t, type=Question.TEXT)
            for t in Questionnaire.Type.values
        ]

        for idx in [0, 1, 2, 2, 0]:
            baker.make(
                TextAnswer,
                question=questions[idx],
                contribution__evaluation=evaluation,
                contribution__questionnaires=[questions[idx].questionnaire],
                state=TextAnswer.State.PUBLISHED,
            )

        cache_results(evaluation)
        evaluation_result = get_results(evaluation)
        filter_text_answers(evaluation_result)

        results = TextAnswerExporter.InputData(
            evaluation_result.contribution_results)

        binary_content = BytesIO()
        TextAnswerExporter(evaluation.name, evaluation.course.semester.name,
                           evaluation.course.responsibles_names, results,
                           None).export(binary_content)
        binary_content.seek(0)
        workbook = xlrd.open_workbook(file_contents=binary_content.read())
        sheet = workbook.sheets()[0]

        # Sheet headline
        self.assertEqual(sheet.row_values(0)[0], evaluation.name)
        self.assertEqual(
            sheet.row_values(1)[0], evaluation.course.semester.name)
        self.assertEqual(
            sheet.row_values(2)[0], evaluation.course.responsibles_names)

        # Questions are ordered by questionnaire type, answers keep their order respectively
        self.assertEqual(sheet.row_values(3)[0], questions[0].text)
        self.assertEqual(sheet.row_values(5)[0], questions[1].text)
        self.assertEqual(sheet.row_values(6)[0], questions[2].text)
Ejemplo n.º 9
0
    def test_calculation_bipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[student, contributor1],
            voters=[student, contributor1],
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.EASY_DIFFICULT)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        make_rating_answer_counters(question, contribution1,
                                    [5, 5, 15, 30, 25, 15, 10])

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)