Esempio n. 1
0
    def test_archiving_participations_does_not_change_results(self):
        distribution = calculate_average_distribution(self.evaluation)

        self.semester.archive_participations()
        self.refresh_evaluation()
        caches['results'].clear()

        new_distribution = calculate_average_distribution(self.evaluation)
        self.assertEqual(new_distribution, distribution)
Esempio n. 2
0
    def test_archiving_does_not_change_results(self):
        distribution = calculate_average_distribution(self.course)

        self.semester.archive()
        self.refresh_course()
        caches['results'].clear()

        new_distribution = calculate_average_distribution(self.course)
        self.assertEqual(new_distribution, distribution)
Esempio n. 3
0
    def test_average_grade(self):
        question_grade2 = mommy.make(Question, questionnaire=self.questionnaire, type=Question.GRADE)

        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=2, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=2)
        mommy.make(RatingAnswerCounter, question=question_grade2, contribution=self.contribution1, answer=1, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=4)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=3, count=2)
        mommy.make(RatingAnswerCounter, question=self.question_bipolar_2, contribution=self.general_contribution, answer=-1, count=4)

        contributor_weights_sum = settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT + settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT
        contributor1_average = ((settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT * ((2 * 1) + (1 * 1)) / (1 + 1)) + (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT * 3)) / contributor_weights_sum  # 2.4
        contributor2_average = 4
        contributors_average = ((4 * contributor1_average) + (2 * contributor2_average)) / (4 + 2)  # 2.9333333

        general_non_grade_average = ((5 * 5) + (3 * 3) + (2 * 5) + (4 * 7 / 3)) / (5 + 3 + 2 + 4)  # 3.80952380

        contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.375
        general_non_grade_percentage = settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.625

        total_grade = contributors_percentage * contributors_average + general_non_grade_percentage * general_non_grade_average  # 1.1 + 2.38095238 = 3.48095238

        average_grade = distribution_to_grade(calculate_average_distribution(self.evaluation))
        self.assertAlmostEqual(average_grade, total_grade)
        self.assertAlmostEqual(average_grade, 3.48095238)
Esempio n. 4
0
    def test_result_calculation_with_no_contributor_rating_question(self):
        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[self.student1, self.student2],
            voters=[self.student1, self.student2],
        )
        questionnaire_text = baker.make(Questionnaire)
        baker.make(Question,
                   questionnaire=questionnaire_text,
                   type=Question.TEXT)
        baker.make(
            Contribution,
            contributor=baker.make(UserProfile),
            evaluation=evaluation,
            questionnaires=[questionnaire_text],
        )

        evaluation.general_contribution.questionnaires.set(
            [self.questionnaire])
        make_rating_answer_counters(self.question_grade,
                                    evaluation.general_contribution,
                                    [1, 0, 0, 0, 0])
        cache_results(evaluation)

        distribution = calculate_average_distribution(evaluation)
        self.assertEqual(distribution[0], 1)
Esempio n. 5
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        participant_counts = evaluations.annotate(
            num_participants=Count("participants")).values_list(
                "num_participants", flat=True)
        voter_counts = evaluations.annotate(
            num_voters=Count("voters")).values_list("num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(
            num_course_evaluations=Count("course__evaluations")).values_list(
                "num_course_evaluations", flat=True)
        evaluations = (
            evaluations.select_related("course__type").prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            ))
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(
                evaluations, participant_counts, voter_counts,
                course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(
                evaluation)
            evaluation.avg_grade = distribution_to_grade(
                evaluation.distribution)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(
                evaluation)
    return evaluations
Esempio n. 6
0
def index(request):
    user = request.user

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(contributions__contributor=user, state__in=contributor_visible_states)

    represented_users = user.represented_users.all()
    delegated_courses = Course.objects.exclude(id__in=own_courses).filter(contributions__can_edit=True, contributions__contributor__in=represented_users, state__in=contributor_visible_states)

    all_courses = list(own_courses) + list(delegated_courses)
    all_courses.sort(key=lambda course: list(STATES_ORDERED.keys()).index(course.state))

    for course in all_courses:
        course.distribution = calculate_average_distribution(course)
        course.avg_grade = distribution_to_grade(course.distribution)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        courses=[course for course in all_courses if course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(semester_list=semester_list, delegated_courses=delegated_courses)
    return render(request, "contributor_index.html", template_data)
Esempio n. 7
0
 def test_get_single_result_rating_result(self):
     single_result_course = mommy.make(Course,
                                       state='published',
                                       is_single_result=True)
     questionnaire = Questionnaire.objects.get(
         name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution,
                               contributor=mommy.make(UserProfile),
                               course=single_result_course,
                               questionnaires=[questionnaire],
                               responsible=True,
                               can_edit=True,
                               comment_visibility=Contribution.ALL_COMMENTS)
     mommy.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=1,
                count=1)
     mommy.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=4,
                count=1)
     distribution = calculate_average_distribution(single_result_course)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_course)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 8
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        participant_counts = evaluations.annotate(num_participants=Count("participants")).values_list("num_participants", flat=True)
        voter_counts = evaluations.annotate(num_voters=Count("voters")).values_list("num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(num_course_evaluations=Count("course__evaluations")).values_list("num_course_evaluations", flat=True)
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(evaluations, participant_counts, voter_counts, course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
    return evaluations
Esempio n. 9
0
 def test_get_single_result_rating_result(self):
     single_result_evaluation = baker.make(Evaluation,
                                           state='published',
                                           is_single_result=True)
     questionnaire = Questionnaire.objects.get(
         name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = baker.make(
         Contribution,
         contributor=baker.make(UserProfile),
         evaluation=single_result_evaluation,
         questionnaires=[questionnaire],
         can_edit=True,
         textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)
     baker.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=1,
                count=1)
     baker.make(RatingAnswerCounter,
                question=questionnaire.questions.first(),
                contribution=contribution,
                answer=4,
                count=1)
     distribution = calculate_average_distribution(single_result_evaluation)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(
         single_result_evaluation)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 10
0
def get_evaluations_of_course(course, request):
    course_evaluations = []

    if course.evaluations.count() > 1:
        course_evaluations = [
            evaluation
            for evaluation in course.evaluations.filter(state="published")
            if evaluation.can_be_seen_by(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += course.evaluations.filter(
                state__in=['in_evaluation', 'evaluated', 'reviewed'])

        course_evaluations = get_evaluations_with_course_result_attributes(
            course_evaluations)

        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(
                    course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(
                    course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(
                    course_evaluation.distribution)

    return course_evaluations
Esempio n. 11
0
File: views.py Progetto: Onderi/EvaP
def get_courses_with_prefetched_data(courses):
    if isinstance(courses, QuerySet):
        participant_counts = courses.annotate(
            num_participants=Count("participants")).values_list(
                "num_participants", flat=True)
        voter_counts = courses.annotate(
            num_voters=Count("voters")).values_list("num_voters", flat=True)
        courses = (courses.select_related("type").prefetch_related(
            "degrees", "semester",
            Prefetch("contributions",
                     queryset=Contribution.objects.filter(
                         responsible=True).select_related("contributor"),
                     to_attr="responsible_contributions")))
        for course, participant_count, voter_count in zip(
                courses, participant_counts, voter_counts):
            if course._participant_count is None:
                course.num_participants = participant_count
                course.num_voters = voter_count
            course.responsible_contributors = [
                contribution.contributor
                for contribution in course.responsible_contributions
            ]
    for course in courses:
        if not course.is_single_result:
            course.distribution = calculate_average_distribution(course)
            course.avg_grade = distribution_to_grade(course.distribution)
        else:
            course.single_result_rating_result = get_single_result_rating_result(
                course)
    return courses
Esempio n. 12
0
    def test_result_calculation_with_no_contributor_rating_question_does_not_fail(
            self):
        evaluation = mommy.make(Evaluation,
                                state='published',
                                participants=[self.student1, self.student2],
                                voters=[self.student1, self.student2])
        questionnaire_text = mommy.make(Questionnaire)
        mommy.make(Question,
                   questionnaire=questionnaire_text,
                   type=Question.TEXT)
        mommy.make(Contribution,
                   contributor=mommy.make(UserProfile),
                   evaluation=evaluation,
                   questionnaires=[questionnaire_text])

        evaluation.general_contribution.questionnaires.set(
            [self.questionnaire])
        mommy.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=evaluation.general_contribution,
                   answer=1,
                   count=1)

        distribution = calculate_average_distribution(evaluation)
        self.assertEqual(distribution[0], 1)
Esempio n. 13
0
def index(request):
    # retrieve all courses which have evaluations that are not in state "new" and in which the user participates
    courses = Course.objects.filter(
        evaluations__participants=request.user,
        evaluations__state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    ).distinct()
    # retrieve all evaluations which the user can see that are not new
    evaluations = [evaluation for course in courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(request.user)]
    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
                evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
        evaluation.participates_in = request.user in evaluation.participants.all()
        evaluation.voted_for = request.user in evaluation.voters.all()
    evaluations = get_evaluations_with_course_result_attributes(evaluations)
    evaluations.sort(key=lambda evaluation: evaluation.full_name)  # evaluations must be sorted for regrouping them in the template

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
    )
    return render(request, "student_index.html", template_data)
Esempio n. 14
0
    def test_distribution_without_general_grade_question(self):
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1)
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1)
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1)
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1)
        baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3)
        baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3)
        baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        baker.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)
        cache_results(self.evaluation)

        # contribution1: 0.4 * (0.5, 0, 0.5, 0, 0) + 0.6 * (0, 0, 0.5, 0, 0.5) = (0.2, 0, 0.5, 0, 0.3)
        # contribution2: (0, 0.5, 0, 0.5, 0)
        # contributions: (6 / 8) * (0.2, 0, 0.5, 0, 0.3) + (2 / 8) * (0, 0.5, 0, 0.5, 0) = (0.15, 0.125, 0.375, 0.125, 0.225)

        # general_non_grade: (0, 0, 0.375, 0, 0.625)

        # total: 0.375 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.625 * (0, 0, 0.375, 0, 0.625) = (0.05625, 0.046875, 0.375, 0.046875, 0.475)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.05625)
        self.assertAlmostEqual(distribution[1], 0.046875)
        self.assertAlmostEqual(distribution[2], 0.375)
        self.assertAlmostEqual(distribution[3], 0.046875)
        self.assertAlmostEqual(distribution[4], 0.475)
Esempio n. 15
0
    def test_average_grade(self):
        question_grade2 = baker.make(Question, questionnaire=self.questionnaire, type=Question.GRADE)

        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=2, count=1)
        baker.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=2)
        baker.make(RatingAnswerCounter, question=question_grade2, contribution=self.contribution1, answer=1, count=1)
        baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=4)
        baker.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        baker.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)
        baker.make(RatingAnswerCounter, question=self.question_bipolar, contribution=self.general_contribution, answer=3, count=2)
        baker.make(RatingAnswerCounter, question=self.question_bipolar_2, contribution=self.general_contribution, answer=-1, count=4)
        cache_results(self.evaluation)

        contributor_weights_sum = settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT + settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT
        contributor1_average = ((settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT * ((2 * 1) + (1 * 1)) / (1 + 1)) + (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT * 3)) / contributor_weights_sum  # 2.4
        contributor2_average = 4
        contributors_average = ((4 * contributor1_average) + (2 * contributor2_average)) / (4 + 2)  # 2.9333333

        general_non_grade_average = ((5 * 5) + (3 * 3) + (2 * 5) + (4 * 7 / 3)) / (5 + 3 + 2 + 4)  # 3.80952380

        contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.375
        general_non_grade_percentage = settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.625

        total_grade = contributors_percentage * contributors_average + general_non_grade_percentage * general_non_grade_average  # 1.1 + 2.38095238 = 3.48095238

        average_grade = distribution_to_grade(calculate_average_distribution(self.evaluation))
        self.assertAlmostEqual(average_grade, total_grade)
        self.assertAlmostEqual(average_grade, 3.48095238)
Esempio n. 16
0
    def test_distribution_without_general_grade_question(self):
        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [1, 0, 1, 0, 0])
        make_rating_answer_counters(self.question_grade, self.contribution2,
                                    [0, 1, 0, 1, 0])
        make_rating_answer_counters(self.question_likert, self.contribution1,
                                    [0, 0, 3, 0, 3])
        make_rating_answer_counters(self.question_likert,
                                    self.general_contribution, [0, 0, 0, 0, 5])
        make_rating_answer_counters(self.question_likert_2,
                                    self.general_contribution, [0, 0, 3, 0, 0])

        cache_results(self.evaluation)

        # contribution1: 0.4 * (0.5, 0, 0.5, 0, 0) + 0.6 * (0, 0, 0.5, 0, 0.5) = (0.2, 0, 0.5, 0, 0.3)
        # contribution2: (0, 0.5, 0, 0.5, 0)
        # contributions: (6 / 8) * (0.2, 0, 0.5, 0, 0.3) + (2 / 8) * (0, 0.5, 0, 0.5, 0) = (0.15, 0.125, 0.375, 0.125, 0.225)

        # general_non_grade: (0, 0, 0.375, 0, 0.625)

        # total: 0.375 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.625 * (0, 0, 0.375, 0, 0.625) = (0.05625, 0.046875, 0.375, 0.046875, 0.475)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.05625)
        self.assertAlmostEqual(distribution[1], 0.046875)
        self.assertAlmostEqual(distribution[2], 0.375)
        self.assertAlmostEqual(distribution[3], 0.046875)
        self.assertAlmostEqual(distribution[4], 0.475)
Esempio n. 17
0
    def test_average_grade(self):
        question_grade2 = mommy.make(Question, questionnaire=self.questionnaire, type="G")

        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=2, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=2)
        mommy.make(RatingAnswerCounter, question=question_grade2, contribution=self.contribution1, answer=1, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=4)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)

        contributor_weights_sum = settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT + settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT
        contributor1_average = ((settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT * ((2 * 1) + (1 * 1)) / (1 + 1)) + (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT * 3)) / contributor_weights_sum  # 2.4
        contributor2_average = 4
        contributors_average = ((4 * contributor1_average) + (2 * contributor2_average)) / (4 + 2)  # 2.9333333

        course_non_grade_average = ((5 * 5) + (3 * 3)) / (5 + 3)  # 4.25

        contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.COURSE_NON_GRADE_QUESTIONS_WEIGHT)  # 0.375
        course_non_grade_percentage = settings.COURSE_NON_GRADE_QUESTIONS_WEIGHT / (settings.CONTRIBUTIONS_WEIGHT + settings.COURSE_NON_GRADE_QUESTIONS_WEIGHT)  # 0.625

        total_grade = contributors_percentage * contributors_average + course_non_grade_percentage * course_non_grade_average  # 1.1 + 2.65625 = 3.75625

        average_grade = distribution_to_grade(calculate_average_distribution(self.course))
        self.assertAlmostEqual(average_grade, total_grade)
        self.assertAlmostEqual(average_grade, 3.75625)
Esempio n. 18
0
    def test_distribution_with_general_grade_question(self):
        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [1, 0, 1, 0, 0])
        make_rating_answer_counters(self.question_grade, self.contribution2,
                                    [0, 1, 0, 1, 0])
        make_rating_answer_counters(self.question_likert, self.contribution1,
                                    [0, 0, 3, 0, 3])
        make_rating_answer_counters(self.question_likert,
                                    self.general_contribution, [0, 0, 0, 0, 5])
        make_rating_answer_counters(self.question_likert_2,
                                    self.general_contribution, [0, 0, 3, 0, 0])
        make_rating_answer_counters(self.question_grade,
                                    self.general_contribution,
                                    [0, 10, 0, 0, 0])

        cache_results(self.evaluation)

        # contributions and general_non_grade are as above
        # general_grade: (0, 1, 0, 0, 0)

        # total: 0.3 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.2 * (0, 1, 0, 0, 0) + 0.5 * (0, 0, 0.375, 0, 0.625) = (0.045, 0.2375, 0.3, 0.0375, 0.38)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.045)
        self.assertAlmostEqual(distribution[1], 0.2375)
        self.assertAlmostEqual(distribution[2], 0.3)
        self.assertAlmostEqual(distribution[3], 0.0375)
        self.assertAlmostEqual(distribution[4], 0.38)
Esempio n. 19
0
    def test_distribution_with_general_grade_question(self):
        baker.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=self.contribution1,
                   answer=1,
                   count=1)
        baker.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=self.contribution1,
                   answer=3,
                   count=1)
        baker.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=self.contribution2,
                   answer=4,
                   count=1)
        baker.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=self.contribution2,
                   answer=2,
                   count=1)
        baker.make(RatingAnswerCounter,
                   question=self.question_likert,
                   contribution=self.contribution1,
                   answer=3,
                   count=3)
        baker.make(RatingAnswerCounter,
                   question=self.question_likert,
                   contribution=self.contribution1,
                   answer=5,
                   count=3)
        baker.make(RatingAnswerCounter,
                   question=self.question_likert,
                   contribution=self.general_contribution,
                   answer=5,
                   count=5)
        baker.make(RatingAnswerCounter,
                   question=self.question_likert_2,
                   contribution=self.general_contribution,
                   answer=3,
                   count=3)
        baker.make(RatingAnswerCounter,
                   question=self.question_grade,
                   contribution=self.general_contribution,
                   answer=2,
                   count=10)
        cache_results(self.evaluation)

        # contributions and general_non_grade are as above
        # general_grade: (0, 1, 0, 0, 0)

        # total: 0.3 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.2 * (0, 1, 0, 0, 0) + 0.5 * (0, 0, 0.375, 0, 0.625) = (0.045, 0.2375, 0.3, 0.0375, 0.38)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.045)
        self.assertAlmostEqual(distribution[1], 0.2375)
        self.assertAlmostEqual(distribution[2], 0.3)
        self.assertAlmostEqual(distribution[3], 0.0375)
        self.assertAlmostEqual(distribution[4], 0.38)
Esempio n. 20
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(
        Q(evaluations__state__in=contributor_visible_states) & (
            Q(responsibles=user) |
            Q(evaluations__contributions__contributor=user)
        )
    )
    own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
    for evaluation in own_evaluations:
        evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()

    displayed_evaluations = set(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_courses = Course.objects.filter(
            Q(evaluations__state__in=contributor_visible_states) & (
                Q(responsibles__in=represented_users) |
                Q(
                    evaluations__contributions__role=Contribution.Role.EDITOR,
                    evaluations__contributions__contributor__in=represented_users,
                )
            )
        )
        delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations |= delegated_evaluations - displayed_evaluations
    displayed_evaluations = list(displayed_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))  # evaluations must be sorted for regrouping them in the template

    for evaluation in displayed_evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active=semester.is_active,
        evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=DelegateSelectionForm(),
    )
    return render(request, "contributor_index.html", template_data)
Esempio n. 21
0
    def test_average_grade(self):
        question_grade2 = baker.make(Question,
                                     questionnaire=self.questionnaire,
                                     type=Question.GRADE)

        make_rating_answer_counters(self.question_grade, self.contribution1,
                                    [0, 1, 0, 0, 0])
        make_rating_answer_counters(self.question_grade, self.contribution2,
                                    [0, 0, 0, 2, 0])
        make_rating_answer_counters(question_grade2, self.contribution1,
                                    [1, 0, 0, 0, 0])
        make_rating_answer_counters(self.question_likert, self.contribution1,
                                    [0, 0, 4, 0, 0])
        make_rating_answer_counters(self.question_likert,
                                    self.general_contribution, [0, 0, 0, 0, 5])
        make_rating_answer_counters(self.question_likert_2,
                                    self.general_contribution, [0, 0, 3, 0, 0])
        make_rating_answer_counters(self.question_bipolar,
                                    self.general_contribution,
                                    [0, 0, 0, 0, 0, 0, 2])
        make_rating_answer_counters(self.question_bipolar_2,
                                    self.general_contribution,
                                    [0, 0, 4, 0, 0, 0, 0])

        cache_results(self.evaluation)

        contributor_weights_sum = (
            settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT +
            settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT)
        contributor1_average = (
            (settings.CONTRIBUTOR_GRADE_QUESTIONS_WEIGHT *
             ((2 * 1) + (1 * 1)) / (1 + 1)) +
            (settings.CONTRIBUTOR_NON_GRADE_RATING_QUESTIONS_WEIGHT *
             3)) / contributor_weights_sum  # 2.4
        contributor2_average = 4
        contributors_average = (
            (4 * contributor1_average) +
            (2 * contributor2_average)) / (4 + 2)  # 2.9333333

        general_non_grade_average = (
            (5 * 5) + (3 * 3) + (2 * 5) +
            (4 * 7 / 3)) / (5 + 3 + 2 + 4)  # 3.80952380

        contributors_percentage = settings.CONTRIBUTIONS_WEIGHT / (
            settings.CONTRIBUTIONS_WEIGHT +
            settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.375
        general_non_grade_percentage = settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT / (
            settings.CONTRIBUTIONS_WEIGHT +
            settings.GENERAL_NON_GRADE_QUESTIONS_WEIGHT)  # 0.625

        total_grade = (contributors_percentage * contributors_average +
                       general_non_grade_percentage * general_non_grade_average
                       )  # 1.1 + 2.38095238 = 3.48095238

        average_grade = distribution_to_grade(
            calculate_average_distribution(self.evaluation))
        self.assertAlmostEqual(average_grade, total_grade)
        self.assertAlmostEqual(average_grade, 3.48095238)
Esempio n. 22
0
def index(request):
    # retrieve all courses which have evaluations that are not in state "new" and in which the user participates
    courses = Course.objects.filter(
        evaluations__participants=request.user,
        evaluations__state__in=[
            'prepared', 'editor_approved', 'approved', 'in_evaluation',
            'evaluated', 'reviewed', 'published'
        ]).distinct().prefetch_related('semester', 'grade_documents', 'type',
                                       'evaluations',
                                       'evaluations__participants',
                                       'evaluations__voters')
    # retrieve all evaluations which the user can see that are not new
    evaluations = [
        evaluation for course in courses
        for evaluation in course.evaluations.all()
        if evaluation.can_be_seen_by(request.user)
    ]
    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(
                    evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(
                    evaluation)
                evaluation.distribution = normalized_distribution(
                    evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(
                evaluation.distribution)
        evaluation.participates_in = request.user in evaluation.participants.all(
        )
        evaluation.voted_for = request.user in evaluation.voters.all()
    evaluations = get_evaluations_with_course_result_attributes(evaluations)
    evaluations.sort(
        key=lambda evaluation: (evaluation.course.name, evaluation.name)
    )  # evaluations must be sorted for regrouping them in the template

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active=semester.is_active,
             results_are_archived=semester.results_are_archived,
             grade_documents_are_deleted=semester.grade_documents_are_deleted,
             evaluations=[
                 evaluation for evaluation in evaluations
                 if evaluation.course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
    )

    return render(request, "student_index.html", template_data)
Esempio n. 23
0
 def test_get_single_result_rating_result(self):
     single_result_evaluation = mommy.make(Evaluation, state='published', is_single_result=True)
     questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=single_result_evaluation, questionnaires=[questionnaire], can_edit=True, textanswer_visibility=Contribution.GENERAL_TEXTANSWERS)
     mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=1, count=1)
     mommy.make(RatingAnswerCounter, question=questionnaire.questions.first(), contribution=contribution, answer=4, count=1)
     distribution = calculate_average_distribution(single_result_evaluation)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_evaluation)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 24
0
 def test_get_single_result_rating_result(self):
     single_result_course = mommy.make(Course, state='published', is_single_result=True)
     questionnaire = Questionnaire.objects.get(name_en=Questionnaire.SINGLE_RESULT_QUESTIONNAIRE_NAME)
     contribution = mommy.make(Contribution, contributor=mommy.make(UserProfile), course=single_result_course, questionnaires=[questionnaire], responsible=True, can_edit=True, comment_visibility=Contribution.ALL_COMMENTS)
     mommy.make(RatingAnswerCounter, question=questionnaire.question_set.first(), contribution=contribution, answer=1, count=1)
     mommy.make(RatingAnswerCounter, question=questionnaire.question_set.first(), contribution=contribution, answer=4, count=1)
     distribution = calculate_average_distribution(single_result_course)
     self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
     rating_result = get_single_result_rating_result(single_result_course)
     self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 25
0
    def write_overall_results(self, evaluations_with_results,
                              course_results_exist):
        evaluations = [e for e, __ in evaluations_with_results]

        self.write_cell(_("Overall Average Grade"), "bold")
        averages = (distribution_to_grade(calculate_average_distribution(e))
                    for e in evaluations)
        self.write_row(
            averages, lambda avg: self.grade_to_style(avg)
            if avg else "border_left_right")

        self.write_cell(_("Total voters/Total participants"), "bold")
        voter_ratios = (f"{e.num_voters}/{e.num_participants}"
                        for e in evaluations)
        self.write_row(voter_ratios, style="total_voters")

        self.write_cell(_("Evaluation rate"), "bold")
        # round down like in progress bar
        participant_percentages = (
            f"{int((e.num_voters / e.num_participants) * 100) if e.num_participants > 0 else 0}%"
            for e in evaluations)
        self.write_row(participant_percentages, style="evaluation_rate")

        if course_results_exist:
            # Only query the number of evaluations once and keep track of it here.
            count_gt_1 = [e.course_evaluations_count > 1 for e in evaluations]

            # Borders only if there is a course grade below. Offset by one column
            self.write_empty_row_with_styles(["default"] + [
                "border_left_right" if gt1 else "default" for gt1 in count_gt_1
            ])

            self.write_cell(_("Evaluation weight"), "bold")
            weight_percentages = (f"{e.weight_percentage}%" if gt1 else None
                                  for e, gt1 in zip(evaluations, count_gt_1))
            self.write_row(
                weight_percentages, lambda s: "evaluation_weight"
                if s is not None else "default")

            self.write_cell(_("Course Grade"), "bold")
            for evaluation, gt1 in zip(evaluations, count_gt_1):
                if not gt1:
                    self.write_cell()
                    continue

                avg = evaluation.course.avg_grade
                style = self.grade_to_style(
                    avg) if avg is not None else "border_left_right"
                self.write_cell(avg, style)
            self.next_row()

            # Same reasoning as above.
            self.write_empty_row_with_styles(
                ["default"] +
                ["border_top" if gt1 else "default" for gt1 in count_gt_1])
Esempio n. 26
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(
        Q(evaluations__state__in=contributor_visible_states) & (
            Q(responsibles=user) |
            Q(evaluations__contributions__contributor=user)
        )
    )
    own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
    for evaluation in own_evaluations:
        evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()

    displayed_evaluations = set(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_courses = Course.objects.filter(
            Q(evaluations__state__in=contributor_visible_states) & (
                Q(responsibles__in=represented_users) |
                Q(evaluations__contributions__can_edit=True, evaluations__contributions__contributor__in=represented_users)
            )
        )
        delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations |= delegated_evaluations - displayed_evaluations
    displayed_evaluations = list(displayed_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: evaluation.full_name)  # evaluations must be sorted for regrouping them in the template

    for evaluation in displayed_evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
                evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
    displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=DelegateSelectionForm(),
    )
    return render(request, "contributor_index.html", template_data)
Esempio n. 27
0
    def test_result_calculation_with_no_contributor_rating_question_does_not_fail(self):
        evaluation = mommy.make(Evaluation, state='published', participants=[self.student1, self.student2], voters=[self.student1, self.student2])
        questionnaire_text = mommy.make(Questionnaire)
        mommy.make(Question, questionnaire=questionnaire_text, type=Question.TEXT)
        mommy.make(Contribution, contributor=mommy.make(UserProfile), evaluation=evaluation, questionnaires=[questionnaire_text])

        evaluation.general_contribution.questionnaires.set([self.questionnaire])
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=evaluation.general_contribution, answer=1, count=1)

        distribution = calculate_average_distribution(evaluation)
        self.assertEqual(distribution[0], 1)
Esempio n. 28
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request,
                                                       "show_delegated", True)

    contributor_visible_states = [
        'prepared', 'editor_approved', 'approved', 'in_evaluation',
        'evaluated', 'reviewed', 'published'
    ]
    own_evaluations = Evaluation.objects.filter(
        contributions__contributor=user, state__in=contributor_visible_states)

    displayed_evaluations = list(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_evaluations = Evaluation.objects.exclude(
            id__in=own_evaluations).filter(
                contributions__can_edit=True,
                contributions__contributor__in=represented_users,
                state__in=contributor_visible_states)
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations += list(delegated_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: list(STATES_ORDERED.keys(
    )).index(evaluation.state))

    delegate_selection_form = DelegateSelectionForm()

    for evaluation in displayed_evaluations:
        evaluation.distribution = calculate_average_distribution(evaluation)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active_semester=semester.is_active_semester,
             evaluations=[
                 evaluation for evaluation in displayed_evaluations
                 if evaluation.course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=delegate_selection_form,
    )
    return render(request, "contributor_index.html", template_data)
Esempio n. 29
0
def index(request):
    # retrieve all evaluations, where the user is a participant and that are not new
    evaluations = list(
        set(
            Evaluation.objects.filter(participants=request.user).exclude(
                state="new")))
    for evaluation in evaluations:
        evaluation.distribution = calculate_average_distribution(evaluation)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)

    voted_evaluations = list(
        set(Evaluation.objects.filter(voters=request.user)))
    due_evaluations = list(
        set(
            Evaluation.objects.filter(
                participants=request.user,
                state='in_evaluation').exclude(voters=request.user)))

    # due evaluations come first, then everything else in chronological order
    # some states are handled as a group because they appear the same to students
    def sorter(evaluation):
        return (evaluation not in due_evaluations, evaluation.state
                not in ['prepared', 'editor_approved',
                        'approved'], evaluation.state != 'in_evaluation',
                evaluation.state not in ['evaluated',
                                         'reviewed'], evaluation.name)

    evaluations.sort(key=sorter)

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active_semester=semester.is_active_semester,
             results_are_archived=semester.results_are_archived,
             grade_documents_are_deleted=semester.grade_documents_are_deleted,
             evaluations=[
                 evaluation for evaluation in evaluations
                 if evaluation.course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        voted_evaluations=voted_evaluations,
        can_download_grades=request.user.can_download_grades,
    )
    return render(request, "student_index.html", template_data)
Esempio n. 30
0
def index(request):
    # retrieve all courses, where the user is a participant and that are not new
    courses = list(
        set(
            Course.objects.filter(participants=request.user).exclude(
                state="new")))
    for course in courses:
        course.distribution = calculate_average_distribution(course)
        course.avg_grade = distribution_to_grade(course.distribution)

    voted_courses = list(set(Course.objects.filter(voters=request.user)))
    due_courses = list(
        set(
            Course.objects.filter(
                participants=request.user,
                state='in_evaluation').exclude(voters=request.user)))

    # due courses come first, then everything else in chronological order
    # some states are handled as a group because they appear the same to students
    sorter = lambda course: (course not in due_courses, course.state not in [
        'prepared', 'editor_approved', 'approved'
    ], course.state != 'in_evaluation', course.state not in
                             ['evaluated', 'reviewed'], course.name)
    courses.sort(key=sorter)

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active_semester=semester.is_active_semester,
             results_are_archived=semester.results_are_archived,
             courses=[
                 course for course in courses
                 if course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        voted_courses=voted_courses,
        can_download_grades=request.user.can_download_grades,
    )
    return render(request, "student_index.html", template_data)
Esempio n. 31
0
def get_courses_with_prefetched_data(courses):
    if isinstance(courses, QuerySet):
        courses = (courses
            .annotate(num_participants=Count("participants", distinct=True), num_voters=Count("voters", distinct=True))
            .select_related("type")
            .prefetch_related(
                "degrees",
                "semester",
                Prefetch("contributions", queryset=Contribution.objects.filter(responsible=True).select_related("contributor"), to_attr="responsible_contributions")
            )
        )
        for course in courses:
            course.responsible_contributors = [contribution.contributor for contribution in course.responsible_contributions]
    for course in courses:
        if not course.is_single_result:
            course.distribution = calculate_average_distribution(course)
            course.avg_grade = distribution_to_grade(course.distribution)
        else:
            course.single_result_rating_result = get_single_result_rating_result(course)
    return courses
Esempio n. 32
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)

    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
            evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
Esempio n. 33
0
    def test_distribution_with_general_grade_question(self):
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.general_contribution, answer=2, count=10)

        # contributions and general_non_grade are as above
        # general_grade: (0, 1, 0, 0, 0)

        # total: 0.3 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.2 * (0, 1, 0, 0, 0) + 0.5 * (0, 0, 0.375, 0, 0.625) = (0.045, 0.2375, 0.3, 0.0375, 0.38)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.045)
        self.assertAlmostEqual(distribution[1], 0.2375)
        self.assertAlmostEqual(distribution[2], 0.3)
        self.assertAlmostEqual(distribution[3], 0.0375)
        self.assertAlmostEqual(distribution[4], 0.38)
Esempio n. 34
0
def index(request):
    user = request.user

    contributor_visible_states = [
        'prepared', 'editor_approved', 'approved', 'in_evaluation',
        'evaluated', 'reviewed', 'published'
    ]
    own_courses = Course.objects.filter(contributions__contributor=user,
                                        state__in=contributor_visible_states)

    represented_users = user.represented_users.all()
    delegated_courses = Course.objects.exclude(id__in=own_courses).filter(
        contributions__can_edit=True,
        contributions__contributor__in=represented_users,
        state__in=contributor_visible_states)

    all_courses = list(own_courses) + list(delegated_courses)
    all_courses.sort(
        key=lambda course: list(STATES_ORDERED.keys()).index(course.state))

    for course in all_courses:
        course.distribution = calculate_average_distribution(
            course) if course.can_user_see_grades(user) else None
        course.avg_grade = distribution_to_grade(course.distribution)

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active_semester=semester.is_active_semester,
             courses=[
                 course for course in all_courses
                 if course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(semester_list=semester_list,
                         delegated_courses=delegated_courses)
    return render(request, "contributor_index.html", template_data)
Esempio n. 35
0
def semester_detail(request, semester_id):
    semester = get_object_or_404(Semester, id=semester_id)

    visible_states = ['published']
    if request.user.is_reviewer:
        visible_states += ['in_evaluation', 'evaluated', 'reviewed']

    courses = semester.course_set.filter(
        state__in=visible_states).prefetch_related("degrees")

    courses = [
        course for course in courses
        if course.can_user_see_course(request.user)
    ]

    for course in courses:
        course.distribution = calculate_average_distribution(
            course) if course.can_user_see_grades(request.user) else None
        course.avg_grade = distribution_to_grade(course.distribution)

    CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))

    courses_by_degree = OrderedDict()
    for degree in Degree.objects.all():
        courses_by_degree[degree] = CourseTuple([], [])
    for course in courses:
        if course.is_single_result:
            for degree in course.degrees.all():
                section = calculate_results(course)[0]
                result = section.results[0]
                courses_by_degree[degree].single_results.append(
                    (course, result))
        else:
            for degree in course.degrees.all():
                courses_by_degree[degree].courses.append(course)

    template_data = dict(semester=semester,
                         courses_by_degree=courses_by_degree)
    return render(request, "results_semester_detail.html", template_data)
Esempio n. 36
0
    def test_get_single_result_rating_result(self):
        single_result_evaluation = baker.make(Evaluation,
                                              state=Evaluation.State.PUBLISHED,
                                              is_single_result=True)
        questionnaire = Questionnaire.single_result_questionnaire()
        contribution = baker.make(
            Contribution,
            contributor=baker.make(UserProfile),
            evaluation=single_result_evaluation,
            questionnaires=[questionnaire],
            role=Contribution.Role.EDITOR,
            textanswer_visibility=Contribution.TextAnswerVisibility.
            GENERAL_TEXTANSWERS,
        )
        make_rating_answer_counters(questionnaire.questions.first(),
                                    contribution, [1, 0, 0, 1, 0])

        cache_results(single_result_evaluation)
        distribution = calculate_average_distribution(single_result_evaluation)
        self.assertEqual(distribution, (0.5, 0, 0, 0.5, 0))
        rating_result = get_single_result_rating_result(
            single_result_evaluation)
        self.assertEqual(rating_result.counts, (1, 0, 0, 1, 0))
Esempio n. 37
0
def index(request):
    # retrieve all courses, where the user is a participant and that are not new
    courses = list(set(Course.objects.filter(participants=request.user).exclude(state="new")))
    for course in courses:
        course.distribution = calculate_average_distribution(course)
        course.avg_grade = distribution_to_grade(course.distribution)

    voted_courses = list(set(Course.objects.filter(voters=request.user)))
    due_courses = list(set(Course.objects.filter(participants=request.user, state='in_evaluation').exclude(voters=request.user)))

    # due courses come first, then everything else in chronological order
    # some states are handled as a group because they appear the same to students
    sorter = lambda course: (
        course not in due_courses,
        course.state not in ['prepared', 'editor_approved', 'approved'],
        course.state != 'in_evaluation',
        course.state not in ['evaluated', 'reviewed'],
        course.name
    )
    courses.sort(key=sorter)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active_semester=semester.is_active_semester,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        courses=[course for course in courses if course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        voted_courses=voted_courses,
        can_download_grades=request.user.can_download_grades,
    )
    return render(request, "student_index.html", template_data)
Esempio n. 38
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        # these annotates and the zip below could be replaced by something like this, but it was 2x slower:
        # annotate(num_participants=Coalesce('_participant_count', Count("participants", distinct=True)))
        participant_counts = evaluations.annotate(
            num_participants=Count("participants")).order_by('pk').values_list(
                "num_participants", flat=True)
        voter_counts = evaluations.annotate(
            num_voters=Count("voters")).order_by('pk').values_list(
                "num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(
            num_course_evaluations=Count("course__evaluations")).order_by(
                'pk').values_list("num_course_evaluations", flat=True)
        evaluations = (
            evaluations.select_related("course__type").prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            ))
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(
                evaluations, participant_counts, voter_counts,
                course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(
                evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(
                evaluation)
            evaluation.distribution = normalized_distribution(
                evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
Esempio n. 39
0
def get_evaluations_of_course(course, request):
    course_evaluations = []

    if course.evaluations.count() > 1:
        course_evaluations = [
            evaluation
            for evaluation in course.evaluations.filter(state=Evaluation.State.PUBLISHED)
            if evaluation.can_be_seen_by(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += course.evaluations.filter(
                state__in=[Evaluation.State.IN_EVALUATION, Evaluation.State.EVALUATED, Evaluation.State.REVIEWED]
            )

        course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)

        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)

    return course_evaluations
Esempio n. 40
0
    def test_distribution_without_general_grade_question(self):
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=1, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution1, answer=3, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=4, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_grade, contribution=self.contribution2, answer=2, count=1)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=3, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.contribution1, answer=5, count=3)
        mommy.make(RatingAnswerCounter, question=self.question_likert, contribution=self.general_contribution, answer=5, count=5)
        mommy.make(RatingAnswerCounter, question=self.question_likert_2, contribution=self.general_contribution, answer=3, count=3)

        # contribution1: 0.4 * (0.5, 0, 0.5, 0, 0) + 0.6 * (0, 0, 0.5, 0, 0.5) = (0.2, 0, 0.5, 0, 0.3)
        # contribution2: (0, 0.5, 0, 0.5, 0)
        # contributions: (6 / 8) * (0.2, 0, 0.5, 0, 0.3) + (2 / 8) * (0, 0.5, 0, 0.5, 0) = (0.15, 0.125, 0.375, 0.125, 0.225)

        # general_non_grade: (0, 0, 0.375, 0, 0.625)

        # total: 0.375 * (0.15, 0.125, 0.375, 0.125, 0.225) + 0.625 * (0, 0, 0.375, 0, 0.625) = (0.05625, 0.046875, 0.375, 0.046875, 0.475)

        distribution = calculate_average_distribution(self.evaluation)
        self.assertAlmostEqual(distribution[0], 0.05625)
        self.assertAlmostEqual(distribution[1], 0.046875)
        self.assertAlmostEqual(distribution[2], 0.375)
        self.assertAlmostEqual(distribution[3], 0.046875)
        self.assertAlmostEqual(distribution[4], 0.475)
Esempio n. 41
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations,
                                   id=evaluation_id,
                                   course__semester=semester)

    if not evaluation.can_user_see_results_page(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get(
            'view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get(
            'view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(
            id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [
                    answer for answer in question_result.answers
                    if user_can_see_textanswer(view_as_user, represented_users,
                                               answer, view)
                ]
        # remove empty TextResults
        questionnaire_result.question_results = [
            result for result in questionnaire_result.question_results
            if not isinstance(result, TextResult) or len(result.answers) > 0
        ]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(
                questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results
                                ) - 1 or isinstance(
                                    questionnaire_result.question_results[index
                                                                          + 1],
                                    HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [
            questionnaire_result for questionnaire_result in
            contribution_result.questionnaire_results
            if questionnaire_result.question_results
        ]
    evaluation_result.contribution_results = [
        contribution_result
        for contribution_result in evaluation_result.contribution_results
        if contribution_result.questionnaire_results
    ]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(
                        questionnaire_result)
                else:
                    general_questionnaire_results_top.append(
                        questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [
            evaluation for evaluation in evaluation.course.evaluations.filter(
                state="published")
            if evaluation.can_user_see_evaluation(request.user)
        ]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(
                state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(
            course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(
                    course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(
                    course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(
                    course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [
            contribution_result.contributor
            for contribution_result in evaluation_result.contribution_results
            if contribution_result.contributor not in [None, view_as_user]
        ]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=
        general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.
        is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Esempio n. 42
0
def course_detail(request, semester_id, course_id):
    semester = get_object_or_404(Semester, id=semester_id)
    course = get_object_or_404(semester.course_set, id=course_id, semester=semester)

    if not course.can_user_see_results_page(request.user):
        raise PermissionDenied

    course_result = collect_results(course)

    if request.user.is_reviewer:
        public_view = request.GET.get('public_view') != 'false'  # if parameter is not given, show public view.
    else:
        public_view = request.GET.get('public_view') == 'true'  # if parameter is not given, show own view.

    # redirect to non-public view if there is none because the results have not been published
    if not course.can_publish_rating_results:
        public_view = False

    represented_users = list(request.user.represented_users.all()) + [request.user]

    # remove text answers if the user may not see them
    for questionnaire_result in course_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in course_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in course_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    course_result.contribution_results = [contribution_result for contribution_result in course_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(course, course_result)

    # split course_result into different lists
    course_questionnaire_results_top = []
    course_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in course_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    course_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    course_questionnaire_results_top.append(questionnaire_result)
        else:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        course_questionnaire_results_top += course_questionnaire_results_bottom
        course_questionnaire_results_bottom = []

    course.distribution = calculate_average_distribution(course)
    course.avg_grade = distribution_to_grade(course.distribution)

    template_data = dict(
            course=course,
            course_questionnaire_results_top=course_questionnaire_results_top,
            course_questionnaire_results_bottom=course_questionnaire_results_bottom,
            contributor_contribution_results=contributor_contribution_results,
            reviewer=request.user.is_reviewer,
            contributor=course.is_user_contributor_or_delegate(request.user),
            can_download_grades=request.user.can_download_grades,
            public_view=public_view)
    return render(request, "results_course_detail.html", template_data)
Esempio n. 43
0
def evaluation_detail(request, semester_id, evaluation_id):
    semester = get_object_or_404(Semester, id=semester_id)
    evaluation = get_object_or_404(semester.evaluations, id=evaluation_id, course__semester=semester)

    if not evaluation.can_results_page_be_seen_by(request.user):
        raise PermissionDenied

    evaluation_result = collect_results(evaluation)

    if request.user.is_reviewer:
        view = request.GET.get('view', 'public')  # if parameter is not given, show public view.
    else:
        view = request.GET.get('view', 'full')  # if parameter is not given, show own view.
    if view not in ['public', 'full', 'export']:
        view = 'public'

    view_as_user = request.user
    if view == 'export' and request.user.is_staff:
        view_as_user = UserProfile.objects.get(id=int(request.GET.get('contributor_id', request.user.id)))

    represented_users = [view_as_user]
    if view != 'export':
        represented_users += list(view_as_user.represented_users.all())
    # redirect to non-public view if there is none because the results have not been published
    if not evaluation.can_publish_rating_results and view == 'public':
        view = 'full'

    # remove text answers if the user may not see them
    for questionnaire_result in evaluation_result.questionnaire_results:
        for question_result in questionnaire_result.question_results:
            if isinstance(question_result, TextResult):
                question_result.answers = [answer for answer in question_result.answers if can_textanswer_be_seen_by(view_as_user, represented_users, answer, view)]
        # remove empty TextResults
        questionnaire_result.question_results = [result for result in questionnaire_result.question_results if not isinstance(result, TextResult) or len(result.answers) > 0]

    # filter empty headings
    for questionnaire_result in evaluation_result.questionnaire_results:
        filtered_question_results = []
        for index, question_result in enumerate(questionnaire_result.question_results):
            # filter out if there are no more questions or the next question is also a heading question
            if isinstance(question_result, HeadingResult):
                if index == len(questionnaire_result.question_results) - 1 or isinstance(questionnaire_result.question_results[index + 1], HeadingResult):
                    continue
            filtered_question_results.append(question_result)
        questionnaire_result.question_results = filtered_question_results

    # remove empty questionnaire_results and contribution_results
    for contribution_result in evaluation_result.contribution_results:
        contribution_result.questionnaire_results = [questionnaire_result for questionnaire_result in contribution_result.questionnaire_results if questionnaire_result.question_results]
    evaluation_result.contribution_results = [contribution_result for contribution_result in evaluation_result.contribution_results if contribution_result.questionnaire_results]

    add_warnings(evaluation, evaluation_result)

    # split evaluation_result into different lists
    general_questionnaire_results_top = []
    general_questionnaire_results_bottom = []
    contributor_contribution_results = []
    for contribution_result in evaluation_result.contribution_results:
        if contribution_result.contributor is None:
            for questionnaire_result in contribution_result.questionnaire_results:
                if questionnaire_result.questionnaire.is_below_contributors:
                    general_questionnaire_results_bottom.append(questionnaire_result)
                else:
                    general_questionnaire_results_top.append(questionnaire_result)
        elif view != 'export' or view_as_user.id == contribution_result.contributor.id:
            contributor_contribution_results.append(contribution_result)

    if not contributor_contribution_results:
        general_questionnaire_results_top += general_questionnaire_results_bottom
        general_questionnaire_results_bottom = []

    course_evaluations = []
    if evaluation.course.evaluations.count() > 1:
        course_evaluations = [evaluation for evaluation in evaluation.course.evaluations.filter(state="published") if evaluation.can_be_seen_by(request.user)]
        if request.user.is_reviewer:
            course_evaluations += evaluation.course.evaluations.filter(state__in=['in_evaluation', 'evaluated', 'reviewed'])
        course_evaluations = get_evaluations_with_course_result_attributes(course_evaluations)
        for course_evaluation in course_evaluations:
            if course_evaluation.is_single_result:
                course_evaluation.single_result_rating_result = get_single_result_rating_result(course_evaluation)
            else:
                course_evaluation.distribution = calculate_average_distribution(course_evaluation)
                course_evaluation.avg_grade = distribution_to_grade(course_evaluation.distribution)

    other_contributors = []
    if view == 'export':
        other_contributors = [contribution_result.contributor for contribution_result in evaluation_result.contribution_results if contribution_result.contributor not in [None, view_as_user]]

    # if the evaluation is not published, the rendered results are not cached, so we need to attach distribution
    # information for rendering the distribution bar
    if evaluation.state != 'published':
        evaluation = get_evaluations_with_prefetched_data([evaluation])[0]

    template_data = dict(
        evaluation=evaluation,
        course=evaluation.course,
        course_evaluations=course_evaluations,
        general_questionnaire_results_top=general_questionnaire_results_top,
        general_questionnaire_results_bottom=general_questionnaire_results_bottom,
        contributor_contribution_results=contributor_contribution_results,
        is_reviewer=view_as_user.is_reviewer,
        is_contributor=evaluation.is_user_contributor(view_as_user),
        is_responsible_or_contributor_or_delegate=evaluation.is_user_responsible_or_contributor_or_delegate(view_as_user),
        can_download_grades=view_as_user.can_download_grades,
        view=view,
        view_as_user=view_as_user,
        other_contributors=other_contributors,
    )
    return render(request, "results_evaluation_detail.html", template_data)
Esempio n. 44
0
def index(request):
    query = (Evaluation.objects
        .annotate(participates_in=Exists(Evaluation.objects.filter(id=OuterRef('id'), participants=request.user)))
        .annotate(voted_for=Exists(Evaluation.objects.filter(id=OuterRef('id'), voters=request.user)))

        .filter(~Q(state="new"), course__evaluations__participants=request.user)
        .exclude(state="new")
        .prefetch_related(
            'course', 'course__semester', 'course__grade_documents', 'course__type',
            'course__evaluations', 'course__responsibles', 'course__degrees',
        )
        .distinct()
    )
    query = Evaluation.annotate_with_participant_and_voter_counts(query)
    evaluations = [evaluation for evaluation in query if evaluation.can_be_seen_by(request.user)]

    inner_evaluation_ids = [inner_evaluation.id for evaluation in evaluations for inner_evaluation in evaluation.course.evaluations.all()]
    inner_evaluation_query = Evaluation.objects.filter(pk__in=inner_evaluation_ids)
    inner_evaluation_query = Evaluation.annotate_with_participant_and_voter_counts(inner_evaluation_query)

    evaluations_by_id = {evaluation['id']: evaluation for evaluation in inner_evaluation_query.values()}

    for evaluation in evaluations:
        for inner_evaluation in evaluation.course.evaluations.all():
            inner_evaluation.num_voters = evaluations_by_id[inner_evaluation.id]['num_voters']
            inner_evaluation.num_participants = evaluations_by_id[inner_evaluation.id]['num_participants']

    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    evaluations = get_evaluations_with_course_result_attributes(evaluations)

    # evaluations must be sorted for regrouping them in the template
    evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    unfinished_evaluations_query = (
        Evaluation.objects
        .filter(participants=request.user, state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation'])
        .exclude(voters=request.user)
        .prefetch_related('course__responsibles', 'course__type', 'course__semester')
    )

    unfinished_evaluations_query = Evaluation.annotate_with_participant_and_voter_counts(unfinished_evaluations_query)
    unfinished_evaluations = list(unfinished_evaluations_query)

    # available evaluations come first, ordered by time left for evaluation and the name
    # evaluations in other (visible) states follow by name
    def sorter(evaluation):
        return (
            evaluation.state != 'in_evaluation',
            evaluation.vote_end_date if evaluation.state == 'in_evaluation' else None,
            evaluation.full_name
        )
    unfinished_evaluations.sort(key=sorter)

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
        unfinished_evaluations=unfinished_evaluations,
        evaluation_end_warning_period=settings.EVALUATION_END_WARNING_PERIOD,
    )

    return render(request, "student_index.html", template_data)
Esempio n. 45
0
    def export(self, response, course_types_list, include_not_enough_voters=False, include_unpublished=False):
        self.workbook = xlwt.Workbook()
        self.init_styles(self.workbook)
        counter = 1
        course_results_exist = False

        for course_types in course_types_list:
            self.sheet = self.workbook.add_sheet("Sheet " + str(counter))
            counter += 1
            self.row = 0
            self.col = 0

            evaluations_with_results = list()
            evaluation_states = ['published']
            if include_unpublished:
                evaluation_states.extend(['evaluated', 'reviewed'])

            used_questionnaires = set()
            for evaluation in self.semester.evaluations.filter(state__in=evaluation_states, course__type__in=course_types).all():
                if evaluation.is_single_result:
                    continue
                if not evaluation.can_publish_rating_results and not include_not_enough_voters:
                    continue
                results = OrderedDict()
                for questionnaire_result in collect_results(evaluation).questionnaire_results:
                    if all(not question_result.question.is_rating_question or question_result.counts is None for question_result in questionnaire_result.question_results):
                        continue
                    results.setdefault(questionnaire_result.questionnaire.id, []).extend(questionnaire_result.question_results)
                    used_questionnaires.add(questionnaire_result.questionnaire)
                evaluation.course_evaluations_count = evaluation.course.evaluations.count()
                if evaluation.course_evaluations_count > 1:
                    course_results_exist = True
                    evaluation.weight_percentage = int((evaluation.weight / sum(evaluation.weight for evaluation in evaluation.course.evaluations.all())) * 100)
                    evaluation.course.avg_grade = distribution_to_grade(calculate_average_course_distribution(evaluation.course))
                evaluations_with_results.append((evaluation, results))

            evaluations_with_results.sort(key=lambda cr: (cr[0].course.type.order, cr[0].full_name))
            used_questionnaires = sorted(used_questionnaires)

            course_type_names = [ct.name for ct in CourseType.objects.filter(pk__in=course_types)]
            writec(self, _("Evaluation {0}\n\n{1}").format(self.semester.name, ", ".join(course_type_names)), "headline")

            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.full_name, "evaluation")

            writen(self, _("Course Degrees"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "\n".join([d.name for d in evaluation.course.degrees.all()]), "degree")

            writen(self, _("Course Type"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, evaluation.course.type.name, "border_left_right")

            writen(self)
            for evaluation, results in evaluations_with_results:
                self.write_empty_cell_with_borders()

            for questionnaire in used_questionnaires:
                writen(self, questionnaire.name, "bold")
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

                filtered_questions = self.filter_text_and_heading_questions(questionnaire.questions.all())

                for question in filtered_questions:
                    if question.is_heading_question:
                        writen(self, question.text, "italic")
                    else:
                        writen(self, question.text)

                    for evaluation, results in evaluations_with_results:
                        if questionnaire.id not in results or question.is_heading_question:
                            self.write_empty_cell_with_borders()
                            continue
                        qn_results = results[questionnaire.id]
                        values = []
                        count_sum = 0
                        approval_count = 0

                        for grade_result in qn_results:
                            if grade_result.question.id == question.id:
                                if grade_result.has_answers:
                                    values.append(grade_result.average * grade_result.count_sum)
                                    count_sum += grade_result.count_sum
                                    if grade_result.question.is_yes_no_question:
                                        approval_count += grade_result.approval_count
                        if values:
                            avg = sum(values) / count_sum

                            if question.is_yes_no_question:
                                percent_approval = approval_count / count_sum if count_sum > 0 else 0
                                writec(self, "{:.0%}".format(percent_approval), self.grade_to_style(avg))
                            else:
                                writec(self, avg, self.grade_to_style(avg))
                        else:
                            self.write_empty_cell_with_borders()
                writen(self)
                for evaluation, results in evaluations_with_results:
                    self.write_empty_cell_with_borders()

            writen(self, _("Overall Average Grade"), "bold")
            for evaluation, results in evaluations_with_results:
                avg = distribution_to_grade(calculate_average_distribution(evaluation))
                if avg:
                    writec(self, avg, self.grade_to_style(avg))
                else:
                    self.write_empty_cell_with_borders()

            writen(self, _("Total voters/Total participants"), "bold")
            for evaluation, results in evaluations_with_results:
                writec(self, "{}/{}".format(evaluation.num_voters, evaluation.num_participants), "total_voters")

            writen(self, _("Evaluation rate"), "bold")
            for evaluation, results in evaluations_with_results:
                # round down like in progress bar
                percentage_participants = int((evaluation.num_voters / evaluation.num_participants) * 100) if evaluation.num_participants > 0 else 0
                writec(self, "{}%".format(percentage_participants), "evaluation_rate")

            if course_results_exist:
                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self, _("Evaluation weight"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, "{}%".format(evaluation.weight_percentage), "evaluation_weight")
                    else:
                        self.write_empty_cell()

                writen(self, _("Course Grade"), "bold")
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        if evaluation.course.avg_grade:
                            writec(self, evaluation.course.avg_grade, self.grade_to_style(evaluation.course.avg_grade))
                        else:
                            self.write_empty_cell_with_borders()
                    else:
                        self.write_empty_cell()

                writen(self)
                for evaluation, results in evaluations_with_results:
                    if evaluation.course_evaluations_count > 1:
                        writec(self, None, "border_top")
                    else:
                        self.write_empty_cell()

        self.workbook.save(response)