예제 #1
0
def index(request):
    user = request.user
    show_delegated = get_parameter_from_url_or_session(request, "show_delegated", True)

    contributor_visible_states = ['prepared', 'editor_approved', 'approved', 'in_evaluation', 'evaluated', 'reviewed', 'published']
    own_courses = Course.objects.filter(
        Q(evaluations__state__in=contributor_visible_states) & (
            Q(responsibles=user) |
            Q(evaluations__contributions__contributor=user)
        )
    )
    own_evaluations = [evaluation for course in own_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user)]
    for evaluation in own_evaluations:
        evaluation.contributes_to = evaluation.contributions.filter(contributor=user).exists()

    displayed_evaluations = set(own_evaluations)
    if show_delegated:
        represented_users = user.represented_users.all()
        delegated_courses = Course.objects.filter(
            Q(evaluations__state__in=contributor_visible_states) & (
                Q(responsibles__in=represented_users) |
                Q(
                    evaluations__contributions__role=Contribution.Role.EDITOR,
                    evaluations__contributions__contributor__in=represented_users,
                )
            )
        )
        delegated_evaluations = set(evaluation for course in delegated_courses for evaluation in course.evaluations.all() if evaluation.can_be_seen_by(user))
        for evaluation in delegated_evaluations:
            evaluation.delegated_evaluation = True
        displayed_evaluations |= delegated_evaluations - displayed_evaluations
    displayed_evaluations = list(displayed_evaluations)
    displayed_evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))  # evaluations must be sorted for regrouping them in the template

    for evaluation in displayed_evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    displayed_evaluations = get_evaluations_with_course_result_attributes(displayed_evaluations)

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        is_active=semester.is_active,
        evaluations=[evaluation for evaluation in displayed_evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    template_data = dict(
        semester_list=semester_list,
        show_delegated=show_delegated,
        delegate_selection_form=DelegateSelectionForm(),
    )
    return render(request, "contributor_index.html", template_data)
예제 #2
0
def index(request):
    # retrieve all courses which have evaluations that are not in state "new" and in which the user participates
    courses = Course.objects.filter(
        evaluations__participants=request.user,
        evaluations__state__in=[
            'prepared', 'editor_approved', 'approved', 'in_evaluation',
            'evaluated', 'reviewed', 'published'
        ]).distinct().prefetch_related('semester', 'grade_documents', 'type',
                                       'evaluations',
                                       'evaluations__participants',
                                       'evaluations__voters')
    # retrieve all evaluations which the user can see that are not new
    evaluations = [
        evaluation for course in courses
        for evaluation in course.evaluations.all()
        if evaluation.can_be_seen_by(request.user)
    ]
    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(
                    evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(
                    evaluation)
                evaluation.distribution = normalized_distribution(
                    evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(
                evaluation.distribution)
        evaluation.participates_in = request.user in evaluation.participants.all(
        )
        evaluation.voted_for = request.user in evaluation.voters.all()
    evaluations = get_evaluations_with_course_result_attributes(evaluations)
    evaluations.sort(
        key=lambda evaluation: (evaluation.course.name, evaluation.name)
    )  # evaluations must be sorted for regrouping them in the template

    semesters = Semester.objects.all()
    semester_list = [
        dict(semester_name=semester.name,
             id=semester.id,
             is_active=semester.is_active,
             results_are_archived=semester.results_are_archived,
             grade_documents_are_deleted=semester.grade_documents_are_deleted,
             evaluations=[
                 evaluation for evaluation in evaluations
                 if evaluation.course.semester_id == semester.id
             ]) for semester in semesters
    ]

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
    )

    return render(request, "student_index.html", template_data)
예제 #3
0
파일: test_tools.py 프로젝트: janno42/EvaP
    def test_calculation_bipolar_results(self):
        contributor1 = baker.make(UserProfile)
        student = baker.make(UserProfile)

        evaluation = baker.make(
            Evaluation,
            state=Evaluation.State.PUBLISHED,
            participants=[student, contributor1],
            voters=[student, contributor1],
        )
        questionnaire = baker.make(Questionnaire)
        question = baker.make(Question,
                              questionnaire=questionnaire,
                              type=Question.EASY_DIFFICULT)
        contribution1 = baker.make(Contribution,
                                   contributor=contributor1,
                                   evaluation=evaluation,
                                   questionnaires=[questionnaire])

        make_rating_answer_counters(question, contribution1,
                                    [5, 5, 15, 30, 25, 15, 10])

        cache_results(evaluation)
        evaluation_results = get_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
예제 #4
0
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        evaluations = (evaluations
            .select_related("course__type")
            .prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            )
        )
        evaluations = Evaluation.annotate_with_participant_and_voter_counts(evaluations)

    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
            evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
예제 #5
0
파일: views.py 프로젝트: jmunden1/EvaP
def get_evaluations_with_prefetched_data(evaluations):
    if isinstance(evaluations, QuerySet):
        # these annotates and the zip below could be replaced by something like this, but it was 2x slower:
        # annotate(num_participants=Coalesce('_participant_count', Count("participants", distinct=True)))
        participant_counts = evaluations.annotate(
            num_participants=Count("participants")).order_by('pk').values_list(
                "num_participants", flat=True)
        voter_counts = evaluations.annotate(
            num_voters=Count("voters")).order_by('pk').values_list(
                "num_voters", flat=True)
        course_evaluations_counts = evaluations.annotate(
            num_course_evaluations=Count("course__evaluations")).order_by(
                'pk').values_list("num_course_evaluations", flat=True)
        evaluations = (
            evaluations.select_related("course__type").prefetch_related(
                "course__degrees",
                "course__semester",
                "course__responsibles",
            ))
        for evaluation, participant_count, voter_count, course_evaluations_count in zip(
                evaluations, participant_counts, voter_counts,
                course_evaluations_counts):
            if evaluation._participant_count is None:
                evaluation.num_participants = participant_count
                evaluation.num_voters = voter_count
            evaluation.course_evaluations_count = course_evaluations_count
    for evaluation in evaluations:
        if not evaluation.is_single_result:
            evaluation.distribution = calculate_average_distribution(
                evaluation)
        else:
            evaluation.single_result_rating_result = get_single_result_rating_result(
                evaluation)
            evaluation.distribution = normalized_distribution(
                evaluation.single_result_rating_result.counts)
        evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    return evaluations
예제 #6
0
파일: test_tools.py 프로젝트: fsr-itse/EvaP
    def test_calculation_bipolar_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        evaluation = mommy.make(Evaluation, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT)
        contribution1 = mommy.make(Contribution, contributor=contributor1, evaluation=evaluation, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10)

        evaluation_results = collect_results(evaluation)

        self.assertEqual(len(evaluation_results.questionnaire_results), 1)
        questionnaire_result = evaluation_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
예제 #7
0
파일: test_tools.py 프로젝트: Onderi/EvaP
    def test_calculation_bipolar_results(self):
        contributor1 = mommy.make(UserProfile)
        student = mommy.make(UserProfile)

        course = mommy.make(Course, state='published', participants=[student, contributor1], voters=[student, contributor1])
        questionnaire = mommy.make(Questionnaire)
        question = mommy.make(Question, questionnaire=questionnaire, type=Question.EASY_DIFFICULT)
        contribution1 = mommy.make(Contribution, contributor=contributor1, course=course, questionnaires=[questionnaire])

        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-3, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-2, count=5)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=-1, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=0, count=30)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=25)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15)
        mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=10)

        course_results = collect_results(course)

        self.assertEqual(len(course_results.questionnaire_results), 1)
        questionnaire_result = course_results.questionnaire_results[0]
        self.assertEqual(len(questionnaire_result.question_results), 1)
        question_result = questionnaire_result.question_results[0]

        self.assertEqual(question_result.count_sum, 105)
        self.assertAlmostEqual(question_result.average, 2.58730158)
        self.assertEqual(question_result.counts, (5, 5, 15, 30, 25, 15, 10))
        self.assertEqual(question_result.minus_balance_count, 32.5)
        distribution = normalized_distribution(question_result.counts)
        self.assertAlmostEqual(distribution[0], 0.04761904)
        self.assertAlmostEqual(distribution[1], 0.04761904)
        self.assertAlmostEqual(distribution[2], 0.1428571)
        self.assertAlmostEqual(distribution[3], 0.28571428)
        self.assertAlmostEqual(distribution[4], 0.2380952)
        self.assertAlmostEqual(distribution[5], 0.1428571)
        self.assertAlmostEqual(distribution[6], 0.09523809)
예제 #8
0
def norm_distribution(distribution):
    return normalized_distribution(distribution)
예제 #9
0
파일: views.py 프로젝트: bissssss1/EvapP
def index(request):
    query = (Evaluation.objects
        .annotate(participates_in=Exists(Evaluation.objects.filter(id=OuterRef('id'), participants=request.user)))
        .annotate(voted_for=Exists(Evaluation.objects.filter(id=OuterRef('id'), voters=request.user)))

        .filter(~Q(state="new"), course__evaluations__participants=request.user)
        .exclude(state="new")
        .prefetch_related(
            'course', 'course__semester', 'course__grade_documents', 'course__type',
            'course__evaluations', 'course__responsibles', 'course__degrees',
        )
        .distinct()
    )
    query = Evaluation.annotate_with_participant_and_voter_counts(query)
    evaluations = [evaluation for evaluation in query if evaluation.can_be_seen_by(request.user)]

    inner_evaluation_ids = [inner_evaluation.id for evaluation in evaluations for inner_evaluation in evaluation.course.evaluations.all()]
    inner_evaluation_query = Evaluation.objects.filter(pk__in=inner_evaluation_ids)
    inner_evaluation_query = Evaluation.annotate_with_participant_and_voter_counts(inner_evaluation_query)

    evaluations_by_id = {evaluation['id']: evaluation for evaluation in inner_evaluation_query.values()}

    for evaluation in evaluations:
        for inner_evaluation in evaluation.course.evaluations.all():
            inner_evaluation.num_voters = evaluations_by_id[inner_evaluation.id]['num_voters']
            inner_evaluation.num_participants = evaluations_by_id[inner_evaluation.id]['num_participants']

    for evaluation in evaluations:
        if evaluation.state == "published":
            if not evaluation.is_single_result:
                evaluation.distribution = calculate_average_distribution(evaluation)
            else:
                evaluation.single_result_rating_result = get_single_result_rating_result(evaluation)
                evaluation.distribution = normalized_distribution(evaluation.single_result_rating_result.counts)
            evaluation.avg_grade = distribution_to_grade(evaluation.distribution)
    evaluations = get_evaluations_with_course_result_attributes(evaluations)

    # evaluations must be sorted for regrouping them in the template
    evaluations.sort(key=lambda evaluation: (evaluation.course.name, evaluation.name))

    semesters = Semester.objects.all()
    semester_list = [dict(
        semester_name=semester.name,
        id=semester.id,
        results_are_archived=semester.results_are_archived,
        grade_documents_are_deleted=semester.grade_documents_are_deleted,
        evaluations=[evaluation for evaluation in evaluations if evaluation.course.semester_id == semester.id]
    ) for semester in semesters]

    unfinished_evaluations_query = (
        Evaluation.objects
        .filter(participants=request.user, state__in=['prepared', 'editor_approved', 'approved', 'in_evaluation'])
        .exclude(voters=request.user)
        .prefetch_related('course__responsibles', 'course__type', 'course__semester')
    )

    unfinished_evaluations_query = Evaluation.annotate_with_participant_and_voter_counts(unfinished_evaluations_query)
    unfinished_evaluations = list(unfinished_evaluations_query)

    # available evaluations come first, ordered by time left for evaluation and the name
    # evaluations in other (visible) states follow by name
    def sorter(evaluation):
        return (
            evaluation.state != 'in_evaluation',
            evaluation.vote_end_date if evaluation.state == 'in_evaluation' else None,
            evaluation.full_name
        )
    unfinished_evaluations.sort(key=sorter)

    template_data = dict(
        semester_list=semester_list,
        can_download_grades=request.user.can_download_grades,
        unfinished_evaluations=unfinished_evaluations,
        evaluation_end_warning_period=settings.EVALUATION_END_WARNING_PERIOD,
    )

    return render(request, "student_index.html", template_data)
예제 #10
0
def norm_distribution(distribution):
    return normalized_distribution(distribution)